query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Returns the set of all objects instead of just live objects
def queryset(self, request): qs = self.model.all_objects.get_query_set() ordering = self.ordering or () if ordering: qs = qs.order_by(*ordering) return qs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def objects_in_use(self):\n return set()", "def all(self):\n return self.__objects", "def all(self):\n return self.__objects", "def all(self):\n return self.__objects", "def all(self):\n return self.__objects", "def all(self):\n return self.__objects", "def all(self):\n return self.__objects", "def all(self):\n return (self.__objects)", "def get_all_objects():\n gcl = gc.get_objects()\n olist = []\n seen = {}\n # Just in case:\n seen[id(gcl)] = None\n seen[id(olist)] = None\n seen[id(seen)] = None\n # _getr does the real work.\n _getr(gcl, olist, seen)\n return olist", "def get_all_objects():\n gcl = gc.get_objects()\n olist = []\n seen = {}\n # Just in case:\n seen[id(gcl)] = None\n seen[id(olist)] = None\n seen[id(seen)] = None\n # _getr does the real work.\n _getr(gcl, olist, seen)\n return olist", "def get_all_objects():\n gc.collect()\n gcl = gc.get_objects()\n olist = []\n seen = {}\n # Just in case:\n seen[id(gcl)] = None\n seen[id(olist)] = None\n seen[id(seen)] = None\n # _getr does the real work.\n _getr(gcl, olist, seen)\n return olist", "def objects(self):\n\t\treturn self._objects", "def objects(self):\r\n return self._objects", "def all_objects(self):\n if self._all_objs is None:\n objs = OrderedDict()\n for o in self._objects:\n if not o.enabled:\n continue\n for k,v in o.all_objects().items():\n if k in objs:\n raise NameError('Multiple objects with same name \"%s\": %s, %s' % (k, objs[k], v))\n objs[k] = v\n self._all_objs = objs\n return self._all_objs", "def get_objects(self):\n return self._objects", "def GetObjects(self): \r\n return self.model.GetObjects()", "def all_objects(self):\n objs = OrderedDict()\n objs[self.name] = self\n for o in self._sub_objs:\n if not o.enabled:\n continue\n objs.update(o.all_objects())\n return objs", "def objects(self):\n if not self._objects:\n id_set = {}\n for x in self.addition_events():\n if 'id' in x: id_set[UUID(x['id'])] = 1\n self._objects = id_set.keys()\n\n return self._objects", "def _get_all_tracked_objects(self):\n all = []\n for obj in gc.get_objects():\n if any([mod.is_module_object(obj) for mod in self.tracked_modules]):\n all.append(TrackedObject(obj))\n return all", "def all(self):\n return list(self)", "def all_objects(self) -> List[StorageObject]:\n return [item for item in self._store.values()]", "def active_objects(self):\n return self._active_objects", "def __iter__(self):\n # Ripped off from elasticutils\n return (self.objects[id] for id in self.ids if id in self.objects)", "def objects(self):", "def get_all(self):\n return [_ for _ in self]", "def all(self):\n return list(self.iterator())", "def objects(self):\n existing = {obj for obj in {self.key, self.door} if obj is not None}\n alive = {obj for obj in {self.skull, self.joe} if not obj.dead}\n return existing | alive", "def objects (self):\n return InternalObjectList (self)", "def all(self):\n return self[:]", "def all(self):\n return self._clone()", "def all(self):\n return self.filter()", "def all(self):\n return self.filter()", "def all(self):\n return self.filter()", "def get_all(self):\n\n return self._items[:]", "def gather_entities(self):\n entitylist = set()\n for entity in self.entities.all():\n entitylist.add(entity)\n entitylist.update(entity.get_ancestors())\n return entitylist #set(entity for entity in entitylist if not entity.abstract_entity)", "def _remove_initial_objects_from_list(self, all):\n\n new_list = []\n for obj in all:\n if obj not in self.initial_set_of_objects:\n new_list.append(obj)\n\n return new_list", "def _get_children(self):\n return set()", "def all(self, cls=None):\n if cls:\n dic = {}\n for key, val in self.__objects.items():\n if type(val) == cls:\n dic[key] = self.__objects[key]\n return dic\n else:\n return self.__objects", "def my_objects(self):\n\n matches = [_object for _object in self.object_store if str(_object.OwnerID) == str(self.agent.agent_id)]\n\n return matches", "def getinstances(cls):\n\t\t\tdead = set()\n\t\t\tfor ref in cls._instances:\n\t\t\t\tobj = ref()\n\t\t\t\tif obj is not None:\n\t\t\t\t\tyield obj\n\t\t\t\telse:\n\t\t\t\t\tdead.add(ref)\n\t\t\tcls._instances -= dead", "def get_all(self):\n return self.__items", "def all(cls):\n return []", "def all(self, filter_deleted=False):\n objects = self.matching_objects(filter_deleted=filter_deleted)\n return objects", "def objects_rst(self):\n return [_.as_rst for _ in self.objects]", "def dataObjects(self):\n\t\treturn self._objects", "def get_non_inheriting_objects(self):\n return get_non_inheriting_objects(self)", "def all():\n return current().values", "def objects(self):\n return self.obj_to_id.keys()", "def fetch_all(self):\n return list(iter(self))", "def get_all_object_classes(cls) -> Dict[str, Type[objects.BaseObject]]:\n cls._refresh_registry()\n return copy.deepcopy(cls.objects_dict)", "def objets_uniques(self):\n objets = []\n for membre in self.membres:\n for objet in membre.equipe:\n if objet.unique:\n objets.append(objet)\n objets.extend(objet.prototype.objets_contenus(objet))\n if membre.tenu and membre.tenu.unique:\n objet = membre.tenu\n objets.append(objet)\n objets.extend(objet.prototype.objets_contenus(objet))\n\n return objets", "def get_all(cls):\n if Model.data_connector:\n with Model.data_connector.u_lock:\n return Model.data_connector.get_all_objects(cls)\n \n return []", "def get_related_objects(self):\n result = []\n if self['name'] != None:\n tmp = ObjectDefinition.objects.filter(use__has_field=self['name'], object_type=self['object_type'])\n for i in tmp: result.append(i)\n return result", "def all(self) -> List[Optional[U]]:\n return list(self._store.values())", "def get_all_cached_instances(cls):\n return list(cls.__dbclass__.__instance_cache__.values())", "def hbObjects(self):\r\n return self.__hbObjs", "def get_all_refobjs(self, ):\n return cmds.ls(type=\"jb_reftrack\")", "def unique(objects):\n uniques = []\n for obj in objects:\n if obj not in uniques:\n uniques.append(obj)\n return uniques", "def unique(self):\n return frozenset(self)", "def find_set(self):\n return self._set_set(self._find_set())", "def set(self) -> set:\n return set(self)", "def _all_names_on_object(obj: Any) -> Set[str]:\n nameset = set(obj.__dict__.keys())\n for cls in obj.__class__.__mro__:\n nameset = nameset.union(set(cls.__dict__.keys()))\n return nameset", "def obj_list(self):\n return self._obj_list", "def all(self):\n if not self._cache:\n self.load()\n\n return self._cache", "def get_greenlets(cls):\n return { obj for obj in gc.get_objects() if isinstance(obj, greenlet) and not obj.dead }", "def get_alive(self):\n return ReadingSet(set([x for x in self._set if x.alive]))", "def get_object_references(self, value):\n return set()", "def all(self):", "def all(self):", "def _get_subobjects(self) -> Iterable[SymbolicObject]:\n\n return self._subobjects", "def get_all(self):\n return self.__fetcher.get_fetched()", "def find_all(self):\n pass", "def get_lockable_objects(self):\n if not self.__lockable_objects:\n return set([\"lock\"])\n return self.__lockable_objects", "def elements(self):\n return set(self._elements)", "def managed_objects(self):\n return self._managed_object_list", "def get_all_entities(self):\n return Artifact.get_all()", "def all(self):\n return list(six.iteritems(self))", "def copy(self):\n return set(self)", "def get_all(self, object):\n self.lock.acquire()\n result = self.__Session.query(object).all()\n self.lock.release()\n return result", "def all(self):\n return self._summarize(lambda c: c.all)", "def all(self):\n list_of_objs = []\n for obj in self.__session.query(User):\n obj = obj.__dict__\n del obj[\"_sa_instance_state\"]\n list_of_objs.append(obj)\n return list_of_objs", "def get_all(cls):\n return DataStore.get_all_instance(cls)", "def get_downstream_objects(obj):\n # gcl = gc.get_objects()\n olist = []\n seen = {}\n # Just in case:\n # seen[id(gcl)] = None\n seen[id(olist)] = None\n seen[id(seen)] = None\n # _getr does the real work.\n _getr([obj], olist, seen)\n return olist", "def list(self):\n return self.cell.objects+self.cell.tempObjects", "def all(self):\n return self.owner.hydrate(\n self.connection.query(\n self.to_qmark(), self._bindings) or self.owner.new_collection([])\n )", "def collect_all(self) -> list:\n raise NotImplementedError()", "def getSets():", "def get_all(self):\n return ReadingSet(self._set)", "def get_leaks(self):\n _run_garbage_collection()\n\n remaining_objects = self._get_all_tracked_objects()\n remaining_objects = self._remove_initial_objects_from_list(remaining_objects)\n\n return remaining_objects", "def associated_objects(self):\n return self._associated_objects", "def all(self, *args, **kwargs):\n list_to_return = []\n if not self.object_type:\n return list_to_return\n class_name = eval(self.object_type)\n if self.objects_id:\n for id in self.objects_id.split(';'):\n if id:\n list_to_return.append(class_name.objects.get(id=id))\n return list_to_return", "def _get_objects(self, object_query):\n object_name = object_query[\"object_name\"]\n expression = object_query.get(\"filters\", {}).get(\"expression\")\n\n if expression is None:\n return set()\n object_class = self.object_map[object_name]\n\n query = object_class.query\n filter_expression = self._build_expression(\n expression,\n object_class,\n object_query.get('fields', []),\n )\n if filter_expression is not None:\n query = query.filter(filter_expression)\n requested_permissions = object_query.get(\"permissions\", \"read\")\n if requested_permissions == \"update\":\n objs = [o for o in query if permissions.is_allowed_update_for(o)]\n else:\n objs = [o for o in query if permissions.is_allowed_read_for(o)]\n\n return objs", "def associatedObjects (self):\n return self.__associatedObjects", "def sets(self):\n return self._loaded_and_cached(gdxcc.GMS_DT_SET)", "def all(self):\n return self._chain()", "def nodes(self):\n return set(self.values())", "def retrive(self):\n already_seen_videos = set()\n # create a generator function that spits out video objects one at a time\n for each_query in self.db_query_stack:\n results_of_query = DB.find(each_query)\n print('results_of_query = ', type(results_of_query))\n # this only cares about the keys (video id's)\n unseen_videos = set(results_of_query) - already_seen_videos \n for each_video_id in unseen_videos:\n # output full objects\n yield DatabaseVideo(each_video_id)\n # all the unseen have now been seen\n already_seen_videos |= unseen_videos", "def find_all(cls):\n return cls.dbm().modelclass_find_all(cls)", "def sets(self):\n return self._sets", "def distinct(self):\n memory = set()\n\n def _distinct(iterator):\n while True:\n item = next(iterator)\n if item in memory:\n continue\n memory.add(item)\n return item\n return self.__class__(self, _distinct)", "def get_nodeset(self):\n return set(self.nodeset) # return the nodeset" ]
[ "0.80215394", "0.7922002", "0.7922002", "0.7922002", "0.7922002", "0.7922002", "0.7922002", "0.7845179", "0.7754362", "0.7715909", "0.7468645", "0.74566925", "0.7332164", "0.7307938", "0.72778046", "0.71881217", "0.707433", "0.7026089", "0.7010139", "0.7000168", "0.6991465", "0.6954297", "0.694908", "0.6898439", "0.6849597", "0.68384093", "0.68045735", "0.6773125", "0.6764959", "0.67560375", "0.6678876", "0.6678876", "0.6678876", "0.6675759", "0.66685814", "0.66538966", "0.6643269", "0.6621363", "0.6576129", "0.6571409", "0.6555815", "0.6536918", "0.6527113", "0.6518015", "0.6517919", "0.65070087", "0.65034676", "0.6491891", "0.6433708", "0.64263046", "0.6372758", "0.63564456", "0.63342255", "0.63189906", "0.63045025", "0.63030654", "0.62989473", "0.62921906", "0.62852305", "0.6274765", "0.62647134", "0.62630165", "0.6260287", "0.62547964", "0.62461686", "0.62426925", "0.6231178", "0.6229322", "0.6229322", "0.62239534", "0.62058574", "0.620149", "0.6199364", "0.619584", "0.6189041", "0.6184779", "0.6184767", "0.6184516", "0.6171942", "0.61716515", "0.61634064", "0.6161368", "0.6150955", "0.6144164", "0.6142937", "0.6141954", "0.61331713", "0.61280996", "0.61280066", "0.61198944", "0.6118882", "0.61155313", "0.61080414", "0.610293", "0.6102464", "0.6102429", "0.6097463", "0.6090644", "0.60704994", "0.6069573", "0.606833" ]
0.0
-1
Creating a custom time entry, minimum must is hour duration and project param
def createTimeEntry(self, hourduration, description=None, projectid=None, projectname=None, taskid=None, clientname=None, year=None, month=None, day=None, hour=None, billable=False, hourdiff=-2): data = { "time_entry": {} } if not projectid: if projectname and clientname: projectid = (self.getClientProject(clientname, projectname))['data']['id'] elif projectname: projectid = (self.searchClientProject(projectname))['data']['id'] else: print('Too many missing parameters for query') exit(1) if description: data['time_entry']['description'] = description if taskid: data['time_entry']['tid'] = taskid year = datetime.now().year if not year else year month = datetime.now().month if not month else month day = datetime.now().day if not day else day hour = datetime.now().hour if not hour else hour timestruct = datetime(year, month, day, hour + hourdiff).isoformat() + '.000Z' data['time_entry']['start'] = timestruct data['time_entry']['duration'] = hourduration * 3600 data['time_entry']['pid'] = projectid data['time_entry']['created_with'] = 'NAME' data['time_entry']['billable'] = billable response = self.postRequest(Endpoints.TIME_ENTRIES, parameters=data) return self.decodeJSON(response)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def running_custom_hour(arg):\n pass", "def __init__(self, hour, minute=0, second=0, microsecond=0, tzinfo=None):", "def timeField(*args, annotation: Union[AnyStr, bool]=\"\", backgroundColor: Union[List[float,\n float, float], bool]=None, changeCommand: Script=None, defineTemplate: AnyStr=\"\",\n docTag: Union[AnyStr, bool]=\"\", dragCallback: Script=None, dragCommand:\n Script=None, dropCallback: Script=None, editable: bool=True, enable: bool=True,\n enableBackground: bool=True, enableKeyboardFocus: bool=True, enterCommand:\n Script=None, exists: bool=True, fullPathName: bool=True, height: Union[int,\n bool]=0, highlightColor: Union[List[float, float, float], bool]=None, isObscured:\n bool=True, manage: bool=True, noBackground: bool=True, numberOfPopupMenus:\n bool=True, parent: Union[AnyStr, bool]=\"\", popupMenuArray: bool=True, precision:\n Union[int, bool]=0, preventOverride: bool=True, receiveFocusCommand: Script=None,\n statusBarMessage: AnyStr=\"\", step: Union[time, bool]=None, useTemplate: AnyStr=\"\",\n value: Union[time, bool]=None, visible: bool=True, visibleChangeCommand:\n Union[Script, bool]=None, width: Union[int, bool]=0, q=True, query=True, e=True,\n edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def test_issue_add_time(self):\n pass", "def __init__(__self__, *,\n duration_hours: pulumi.Input[int],\n schedule: pulumi.Input['ScheduleArgs'],\n start_time: pulumi.Input[str],\n not_allowed_dates: Optional[pulumi.Input[Sequence[pulumi.Input['DateSpanArgs']]]] = None,\n start_date: Optional[pulumi.Input[str]] = None,\n utc_offset: Optional[pulumi.Input[str]] = None):\n if duration_hours is None:\n duration_hours = 24\n pulumi.set(__self__, \"duration_hours\", duration_hours)\n pulumi.set(__self__, \"schedule\", schedule)\n pulumi.set(__self__, \"start_time\", start_time)\n if not_allowed_dates is not None:\n pulumi.set(__self__, \"not_allowed_dates\", not_allowed_dates)\n if start_date is not None:\n pulumi.set(__self__, \"start_date\", start_date)\n if utc_offset is not None:\n pulumi.set(__self__, \"utc_offset\", utc_offset)", "def __init__(self, hour=0, minute=0, second=0):\n self.hour = hour\n self.minute = minute\n self.second = second", "def _setup_volunteer_hours(\n volunteer,\n npf_admin,\n org,\n project,\n datetime_start,\n datetime_end,\n description=\"Manually tracked time \",\n event_type=\"MN\",\n is_verified=False,\n action_type='req'\n):\n event = Event.objects.create(\n project=project,\n is_public=True,\n description=\"finished event\",\n location=\"test_location\",\n coordinator=npf_admin,\n event_type=event_type,\n datetime_start=datetime_start,\n datetime_end=datetime_end\n )\n\n volunteer_timelog = UserTimeLog.objects.create(\n user=volunteer,\n event=event,\n datetime_start=datetime_start,\n datetime_end=datetime_end,\n is_verified=is_verified\n )\n\n actiontimelog = AdminActionUserTime.objects.create(\n user=npf_admin,\n usertimelog=volunteer_timelog,\n action_type=action_type\n )\n\n return volunteer_timelog, actiontimelog, event", "def constructTimeLineItem(self):\n\t\treturn", "def __init__(self,\n label=None,\n validators=None,\n format='%I:%M%p', # 1:45PM\n **kwargs):\n super(TimeField, self).__init__(label, validators, **kwargs)\n self.format = format", "def __init__(self, offset_hours: int) -> None:\r\n self.offset = datetime.timedelta(hours=offset_hours)", "def __init__(self, name=\"\", time=None):\n super().__init__(\"time\", name)\n self.time = time", "def create_base_entry(vin=\"INVALID\", time_unix=None):\n\t\treturn LogEntry(vin=vin, app_id=\"INVALID\", time_unix=time_unix)", "def create_time(given_time: Any | None) -> str | None:\n if not given_time:\n return None\n if datetime_time := arg_to_datetime(given_time):\n return datetime_time.strftime(DATE_FORMAT)\n else:\n raise DemistoException(\"Time parameter supplied in invalid, make sure to supply a valid argument\")", "def __init__(self, negative: bool, hours: int, minutes: int):\n self.negative = negative\n self.hours = hours\n self.minutes = minutes", "def make_entries(self, user=None, projects=None, dates=None,\n hours=1, minutes=0):\n if not user:\n user = self.user\n if not projects:\n projects = self.default_projects\n if not dates:\n dates = self.default_dates\n for project in projects:\n for day in dates:\n self.log_time(project=project, start=day,\n delta=(hours, minutes), user=user)", "def __init__(self, day, hour, minute):\n self.day = day\n self.hour = hour\n self.minute = minute", "def _add_time_field(self) -> None:\n self.data[\"time\"] = [datetime(int(yyyy), int(mm), int(dd)) + timedelta(hours=hh) for yyyy, mm, dd, hh in zip(self.data[\"year\"], self.data[\"month\"], self.data[\"day\"], self.data[\"hour\"])]\n for key in [\"year\", \"doy\", \"month\", \"day\", \"hour\"]:\n del self.data[key]", "def __init__(self, dt=60*60*24):\n pass", "def __init__(self, time, metadata):\n self.time = time\n self.metadata = metadata", "def setSubmitTime(t):", "def test_time_entry_creation(self):\n #customer data\n customer_name = u'RFCCustomer'\n #project data\n project_name = u'A new project'\n project_id = 'a-new-project'\n #entry data\n entry_date = datetime.date(2011, 05, 26)\n entry_hours = '2:30'\n entry_location = u'RedTurtle Technology'\n entry_description = u'Trying to create ticket for API tests'\n entry_ticket = '45'\n\n #Start to create customer, project and time entry for project\n session = DBSession()\n project = Project(name=project_name, id=project_id)\n customer = Customer(name=customer_name)\n customer.add_project(project)\n session.add(customer)\n transaction.commit()\n\n #Try to get errors\n resp = self.proxy.create_new_simple_time_entry(1, entry_date,\n entry_hours, entry_description,\n entry_location, project_id)\n self.assertEqual(resp['message'], u\"'int' object has no attribute 'decode'\")\n\n resp = self.proxy.create_new_simple_time_entry(entry_ticket,\n entry_date,\n u'9000',\n entry_description,\n entry_location,\n project_id)\n\n self.assertEqual(resp['message'], u'Cannot parse time (must be HH:MM)')\n\n resp = self.proxy.create_new_simple_time_entry(entry_ticket,\n entry_date,\n u'19:40',\n entry_description,\n entry_location,\n project_id)\n\n self.assertEqual(resp['message'], u'Time value too big (must be <= 16:00)')\n\n resp = self.proxy.create_new_simple_time_entry(entry_ticket,\n entry_date,\n entry_hours,\n entry_description,\n entry_location,\n 100)\n self.assertEqual(resp['message'], u'Not able to get the project with id 100')\n\n resp = self.proxy.create_new_simple_time_entry(entry_ticket,\n '2011 01 01',\n entry_hours,\n entry_description,\n entry_location,\n 100)\n self.assertEqual(resp['message'], u\"time data '2011 01 01' does not match format '%Y-%m-%d'\")\n\n #Let's try to create a simple time entry\n resp = self.proxy.create_new_simple_time_entry(entry_ticket,\n entry_date,\n entry_hours,\n entry_description,\n entry_location,\n project_id)\n\n self.assertRegexpMatches(resp['message'], u'Correctly added time entry \\d+ for %s ticket #%s' %(project_id, entry_ticket))\n\n resp = self.proxy.create_new_simple_time_entry(entry_ticket,\n entry_date,\n entry_hours,\n '',\n entry_location,\n project_id)\n self.assertEqual(resp['message'], u\"Description is required.\")\n\n #Now try to create a more complex time entry\n entry_start = datetime.datetime(2011, 01, 01, 15, 30)\n entry_end = datetime.datetime(2011, 01, 01, 17, 30)\n entry_ticket = '#99'\n\n resp = self.proxy.create_new_advanced_time_entry(99,\n entry_start,\n entry_end,\n entry_description,\n entry_location,\n 10)\n self.assertEqual(resp['message'], u\"'int' object has no attribute 'decode'\")\n\n resp = self.proxy.create_new_advanced_time_entry(entry_ticket,\n entry_start,\n entry_end,\n entry_description,\n entry_location,\n 100)\n self.assertEqual(resp['message'], u'Not able to get the project with id 100')\n\n resp = self.proxy.create_new_advanced_time_entry(entry_ticket,\n '2011 08 24',\n entry_end,\n entry_description,\n entry_location,\n 10)\n self.assertEqual(resp['message'], u\"time data '2011 08 24' does not match format '%Y-%m-%d %H:%M:%S'\")\n\n resp = self.proxy.create_new_advanced_time_entry(entry_ticket,\n entry_start,\n entry_end,\n entry_description,\n entry_location,\n project_id)\n self.assertRegexpMatches(resp['message'], u'Correctly added time entry \\d+ for %s ticket #%s' %(project_id, entry_ticket))", "def __timeRestriction():\n restriction = {\"M\": [\"7:00\", \"9:30\"],\n \"A\": [\"16:00\", \"19:30\"]}\n return restriction", "def __add__(self, *args, **kwargs):\n return _uhd_swig.time_spec_t___add__(self, *args, **kwargs)", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # Take the local timezone into account when calculating datetimes\n self.local_tz = pytz.timezone(settings.TIME_ZONE)\n\n self.end_time = self.start_time + timedelta(minutes=self.duration)", "def add_time_to_title( self, title ):\n begin = self.begin; end = self.end\n if 'span' in self.metadata:\n interval = self.metadata['span']\n elif 'given_kw' in self.metadata and 'span' in self.metadata['given_kw']:\n interval = self.metadata['given_kw']['span']\n else:\n interval = self.time_interval( )\n formatting_interval = self.time_interval()\n if formatting_interval == 600:\n format_str = '%H:%M:%S'\n elif formatting_interval == 3600:\n format_str = '%Y-%m-%d %H:%M'\n elif formatting_interval == 86400:\n format_str = '%Y-%m-%d'\n elif formatting_interval == 86400*7:\n format_str = 'Week %U of %Y'\n\n if interval < 600:\n format_name = 'Seconds'\n time_slice = 1\n elif interval < 3600 and interval >= 600:\n format_name = 'Minutes'\n time_slice = 60\n elif interval >= 3600 and interval < 86400:\n format_name = 'Hours'\n time_slice = 3600\n elif interval >= 86400 and interval < 86400*7:\n format_name = 'Days'\n time_slice = 86400\n elif interval >= 86400*7:\n format_name = 'Weeks'\n time_slice = 86400*7\n else:\n format_str = '%x %X'\n format_name = 'Seconds'\n time_slice = 1\n\n begin_tuple = time.gmtime(begin); end_tuple = time.gmtime(end)\n added_title = '\\n%i %s from ' % (int((end-begin)/time_slice), format_name)\n added_title += time.strftime('%s to' % format_str, begin_tuple)\n if time_slice < 86400:\n add_utc = ' UTC'\n else:\n add_utc = ''\n added_title += time.strftime(' %s%s' % (format_str, add_utc), end_tuple)\n return title + added_title", "def test_schedule(self):\n\n year = 2019\n month = 3\n day = 25 \n hour = 0\n\n mydate = datetime.datetime(year, month, day,hour)\n\n lower_boundary = mydate - timedelta(hours=3)\n upper_boundary = mydate + timedelta(hours=3)\n\n s = Schedule()\n s.hour_from = 3\n s.min_from = 0\n s.hour_to = 3\n s.min_to = 59\n s.interval = 60*60*6 \n\n r = number_expected([s,],lower_boundary,upper_boundary)\n\n self.assertEqual(r, 0 )", "def test_format_optional_time_field(self):\n formatted_time = jiratimereport.format_optional_time_field(99960, \"\")\n expected_result = \"27:46:00\"\n self.assertEqual(expected_result, formatted_time)", "def __init__(self, hrs=0, mins=0, secs=0):\n #Calculate total seconds to represent\n totalsecs = hrs*3600 + mins*60 + secs\n self.hours = totalsecs //3600\n leftoversecs = totalsecs % 3600\n self.minutes = leftoversecs // 60\n self.seconds = leftoversecs % 60", "def __init__(self, year, month, day, hour=0, minute=0, second=0, microsecond=0, tzinfo=None):", "async def test_process_set_custom_time(self):\n xknx = XKNX()\n self.datetime = DateTime(\n xknx,\n \"TestDateTime\",\n group_address=\"1/2/3\",\n broadcast_type=\"TIME\",\n localtime=False,\n )\n assert self.datetime.remote_value.value is None\n\n test_time = time.strptime(\"9:13:14\", \"%H:%M:%S\")\n await self.datetime.set(test_time)\n telegram = xknx.telegrams.get_nowait()\n assert telegram == Telegram(\n destination_address=GroupAddress(\"1/2/3\"),\n payload=GroupValueWrite(DPTArray((0x9, 0xD, 0xE))),\n )\n await self.datetime.process(telegram)\n assert self.datetime.remote_value.value == test_time", "def test_time_field():", "def __init__(self, hrs=0, mins=0, secs=0):\n # self.hours = hrs\n # self.minutes = mins\n # self.seconds = secs\n # Calculate total seconds to represent\n totalseconds = hrs * 3600 + mins * 60 + secs\n self.hours = totalseconds // 3600\n leftoversecs = totalseconds % 3600\n self.minutes = leftoversecs // 60\n self.seconds = leftoversecs % 60", "def add_entry(name, title, duration, notes):\n clear()\n print('Entry added to work log!')\n return Entry.create(\n employee_name=name,\n task_title=title,\n time_spent=duration,\n task_notes=notes\n )", "def add_time(data, t):\n data['year'] = t.year\n data['month'] = t.month\n data['day'] = t.day\n data['hour'] = t.hour\n data['minute'] = t.minute\n data['second'] = t.second", "def get_time(entry):\n entry_data = entry['data']\n time_str = entry_data['start']\n duration = entry_data['duration']\n \n # time_str example string:\n # 2018-05-25T18:21:13+00:00\n date = time_str[:10]\n \n # Adds the timezone to the given hour.\n start_hour = int(time_str[11:13]) + timezone\n start_time = str(start_hour) + time_str[13:19]\n\n # Calculates running duration.\n run_time = time.time() + duration\n hours = int(run_time // 3600)\n minutes = int((run_time // 60) - hours * 60)\n seconds = int(run_time % 60)\n \n run_time_str = (\"%02d\" % hours) + ':' + (\"%02d\" % minutes) + ':' + (\"%02d\" % seconds)\n \n return start_time, run_time_str", "def test_fields_effort_time_units_dictionary_success(self, _mock_check):\n field = EffortField(time_units={\"minute\": (\"minute\", \"minutes\")})\n\n errors = field.check()\n self.assertEqual(len(errors), 0)", "def test_schedule(self):\n\n year = 2019\n month = 3\n day = 25 \n hour = 0\n\n mydate = datetime.datetime(year, month, day,hour)\n\n lower_boundary = mydate - timedelta(hours=3)\n upper_boundary = mydate + timedelta(hours=3)\n\n s = Schedule()\n s.hour_from = 0\n s.min_from = 0\n s.hour_to = 21\n s.min_to = 59\n s.interval = 60*60*3 \n\n r = number_expected([s,],lower_boundary,upper_boundary)\n\n self.assertEqual(r, 2 )", "def get_time():\n return {\n 'timestamp': datetime.now()+ timedelta(hours=-1)\n }", "def __init__(self, date, startTime, endTime, summary, offset):\n\n self.summary = summary\n self.start = datetime.datetime.strptime(\n date + startTime, \"%B %d, %Y%I:%M %p\")\n self.end = datetime.datetime.strptime(\n date + endTime, \"%B %d, %Y%I:%M %p\")\n self.length = self.end - self.start\n self.offset = offset\n\n self.description = \"Ingen lunchrast!\"", "def __init__(self, offset):\n if isinstance(offset, int):\n if offset >= 720 or offset <= -720:\n raise ValueError(\n 'Offset cannot be >= 720 minutes, try #fromSeconds if creating a fixed offset from seconds')\n\n self.__offset = timedelta(minutes=offset)\n self.__name = self.timezoneminutes_to_fixed_offset_string(offset)\n return\n\n if not offset.startswith(\"UTC\"):\n raise ValueError(\n \"Please supply offset in minutes as integer or as string in form 'UTC[+-]HH:MM', received: %s\" % offset)\n\n try:\n sign_char = offset[3]\n sign_multiplier = 1 if sign_char == '+' else -1\n hours = offset[4:6]\n minutes = offset[7:]\n self.__offset = timedelta(\n minutes=(int(minutes) + int(hours) * 60 * sign_multiplier))\n self.__name = offset\n except:\n raise ValueError(\n \"Please supply offset in minutes as integer or as string in form 'UTC[+-]HH:MM', received: %s\" % offset)", "def ht_meeting_create(values, uid):\n\n\tprint 'ht_meeting_create: enter()'\n\t#\tStripe fields should be validated.\n\t#\tv1 - end time is reasonable (after start time)\n\t#\tv2 - hero exists.\n\t#\tv3 - place doesn't matter as much..\n\t#\tv4 - cost..\n\ttry:\n\t\tstripe_tokn = values.get('stripe_tokn')\t\t# used to charge\n\t\tstripe_card = values.get('stripe_card')\t\t# card to add to insprite_customer.\n\t\tstripe_cust = values.get('stripe_cust')\n\t\t#stripe_fngr = values.get('stripe_fngr')\t#card_fingerprint\n\n\t\tprop_mentor = values.get('prop_mentor')\n\t\tprop_cost = values.get('prop_cost')\n\t\tprop_desc = values.get('prop_desc')\n\t\tprop_location = values.get('prop_location')\n\t\tprop_lesson = values.get('prop_lesson')\n\t\tprop_groupsize = values.get('prop_groupsize')\n\n\t\tprint \"ht_meeting_create: mentor:\", prop_mentor\n\t\tprint \"ht_meeting_create: cost:\", prop_cost\n\t\tprint \"ht_meeting_create: desc:\", prop_desc\n\t\tprint \"ht_meeting_create: location:\", prop_location\n\t\tprint \"ht_meeting_create: lesson:\", prop_lesson\n\t\tprint \"ht_meeting_create: groupsize:\", prop_groupsize\n\n\t\t# validate start/end times via conversion.\n\t\tprop_s_date = values.get('prop_s_date')\n\t\tprop_s_time = values.get('prop_s_time')\n\t\tprop_f_date = values.get('prop_f_date')\n\t\tprop_f_time = values.get('prop_f_time')\n\t\tdt_start = dt.strptime(prop_s_date + \" \" + prop_s_time, '%A, %b %d, %Y %H:%M')\n\t\tdt_finsh = dt.strptime(prop_f_date + \" \" + prop_f_time, '%A, %b %d, %Y %H:%M')\n\n\t\t# convert to user's local TimeZone.\n\t\tdt_start_pacific = timezone('US/Pacific').localize(dt_start)\n\t\tdt_finsh_pacific = timezone('US/Pacific').localize(dt_finsh)\n\n\t\tprint 'ht_meeting_create: (from stripe) token =', stripe_tokn, 'card =', stripe_card\n\t\thp\t= Profile.get_by_prof_id(prop_mentor)\n\t\tbp\t= Profile.get_by_uid(uid)\n\t\tba = Account.get_by_uid(uid)\n\t\tha = Account.get_by_uid(hp.account)\n\t\tstripe_cust = ht_get_stripe_customer(ba, cc_token=stripe_tokn, cc_card=stripe_card, cust=stripe_cust)\n\t\tprint 'ht_meeting_create: Mentee (' + str(bp.prof_name) + ', ' + str(stripe_cust) + ')'\n\t\tprint 'ht_meeting_create: Mentor (' + str(hp.prof_name) + ')'\n\n\t\tmeeting = Meeting(hp.prof_id, bp.prof_id, dt_start_pacific, dt_finsh_pacific, (int(prop_cost)/100), str(prop_location), str(prop_desc), str(prop_lesson), prop_groupsize, token=stripe_tokn, customer=stripe_cust, card=stripe_card)\n\n\t\tdb_session.add(meeting)\n\t\tdb_session.commit()\n\t\tprint \"ht_meeting_create: successfully committed meeting\"\n\texcept Exception as e:\n\t\t# IntegrityError, from commit()\n\t\t# SanitizedException(None), from Meeting.init()\n\t\tprint type(e), e\n\t\tdb_session.rollback()\n\t\tht_sanitize_error(e)\n\tprint \"ht_meeting_create: sending notifications\"\n\tht_send_meeting_proposed_notifications(meeting, ha, hp, ba, bp)", "def __init__(self,\n day=None,\n end_time=None,\n start_time=None,\n ):\n\n # Initialize members of the class\n self.day = day\n self.end_time = end_time\n self.start_time = start_time", "def test_schedule(self):\n\n year = 2019\n month = 3\n day = 25 \n hour = 0\n\n mydate = datetime.datetime(year, month, day,hour)\n\n lower_boundary = mydate - timedelta(hours=3)\n upper_boundary = mydate + timedelta(hours=3)\n\n s = Schedule()\n s.hour_from = 22\n s.min_from = 0\n s.hour_to = 21\n s.min_to = 59\n s.interval = 60*60\n\n r = number_expected([s,],lower_boundary,upper_boundary)\n\n self.assertEqual(r, 6 )", "def __init__(self, hrs=0, mins=0, secs=0):\r\n\r\n # Calculate total seconds to represent\r\n totalsecs = hrs*3600 + mins*60 + secs\r\n self.hours = totalsecs // 3600 # Split in h, m, s\r\n leftoversecs = totalsecs % 3600\r\n self.minutes = leftoversecs // 60\r\n self.seconds = leftoversecs % 60", "def __init__(__self__, *,\n description: Optional[pulumi.Input[str]] = None,\n end_time: Optional[pulumi.Input[int]] = None,\n name: Optional[pulumi.Input[str]] = None,\n program_text: Optional[pulumi.Input[str]] = None,\n start_time: Optional[pulumi.Input[int]] = None,\n time_range: Optional[pulumi.Input[int]] = None,\n url: Optional[pulumi.Input[str]] = None):\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if end_time is not None:\n pulumi.set(__self__, \"end_time\", end_time)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if program_text is not None:\n pulumi.set(__self__, \"program_text\", program_text)\n if start_time is not None:\n pulumi.set(__self__, \"start_time\", start_time)\n if time_range is not None:\n pulumi.set(__self__, \"time_range\", time_range)\n if url is not None:\n pulumi.set(__self__, \"url\", url)", "def timeCode(*args, mayaStartFrame: Union[float, bool]=0.0, productionStartFrame: Union[float,\n bool]=0.0, productionStartHour: Union[float, bool]=0.0, productionStartMinute:\n Union[float, bool]=0.0, productionStartSecond: Union[float, bool]=0.0, q=True,\n query=True, e=True, edit=True, **kwargs)->Union[time, Any]:\n pass", "def __init__(__self__, *,\n end_time: pulumi.Input[str],\n start_time: pulumi.Input[str]):\n pulumi.set(__self__, \"end_time\", end_time)\n pulumi.set(__self__, \"start_time\", start_time)", "def running_24_hour(arg):\n pass", "def __init__(self, time_param, value):\n TimeService.__init__(self, time_param)\n self.value = value", "def showClock(hour: int, min: int):\n pass", "def make_release_time(date_time, hour, release):\n release_h = int(release[:2])\n release_m = int(release[2:4])\n \n if release_h == 99:\n return 0 #largest integer number int 64 \n \n else:\n if release_m == 99:\n release_m = 0\n release_date_time = date_time.replace(hour= release_h, minute= release_m) \n \n \"\"\" Here, I have to subtract one day to the release time stamp if the hour of the time stamp is in th evening,\n but the nominal time is reported at midnight hence in the following day. For example 2019 02 20 00 2349 from file VMM00048820 \"\"\"\n if hour == '00':\n if release_h > 20:\n release_date_time = release_date_time - timedelta(days=1)\n else:\n pass\n \n return release_date_time", "def hours_to_job_at_datetime(self, job_name, *args, **kwargs):\n # if you need to print a datetime with tz info, use this:\n # fmt = '%Y-%m-%d %H:%M:%S %Z%z'\n # my_datetime.strftime(fmt)\n\n test_dir = tempfile.mkdtemp()\n self.tmp_dirs.append(test_dir)\n test_config = config.load_config(StringIO.StringIO(self.config))\n my_mcp = mcp.MasterControlProgram(test_dir, 'config')\n\n test_config.apply(my_mcp)\n now = datetime.datetime(*args, **kwargs)\n timeutils.override_current_time(now)\n next_run = my_mcp.jobs[job_name].next_runs()[0]\n t1 = round(next_run.seconds_until_run_time()/60/60, 1)\n next_run = my_mcp.jobs[job_name].next_runs()[0]\n t2 = round(next_run.seconds_until_run_time()/60/60, 1)\n return t1, t2", "def __iadd__(self, *args, **kwargs):\n return _uhd_swig.time_spec_t___iadd__(self, *args, **kwargs)", "def test_time(self):\r\n pass", "def make_time_request(self, time_request=None, **kwargs):\n pass", "def _addTiming(self, key, duration):\n pass", "def createRow(count,curr_time, time_diff,auto_time_class):\n unit_entry = [count,curr_time.year,curr_time.month,curr_time.day,\\\n curr_time.hour,curr_time.minute,curr_time.second,\\\n curr_time.timestamp(),time_diff.total_seconds(), \\\n auto_time_class]\n return unit_entry", "def addKey(self, time, value) -> None:\n ...", "def check_time(cls, task):\n if not task:\n return\n cur_time = datetime.now().strftime(\"%H:%M %d/%m/%Y\")\n if task.period:\n # if this task has end time and period and end time has passed, we will move time this step period\n # while end time less then current time\n while datetime.strptime(cur_time, \"%H:%M %d/%m/%Y\") > datetime.strptime(task.time_last_copy, \"%H:%M %d/%m/%Y\"):\n task.time_last_copy = cls.date_translation(task.time_last_copy, task.period)\n new_task = Task()\n new_task.name = task.name\n new_task.parent = task.parent\n new_task.host = task.host\n new_task.key = cls.get_free_key_task()\n new_task.type_task = task.type_task\n new_task.admins = task.admins.copy()\n new_task.members = task.members.copy()\n new_task.priority = task.priority\n new_task.status = task.status\n new_task.start_time = task.start_time\n new_task.end_time = task.end_time\n new_task.period = ''\n cls.save_task(new_task)", "def create_time(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"create_time\")", "def create_time(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"create_time\")", "def create_time(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"create_time\")", "def timePort(*args, annotation: Union[AnyStr, bool]=\"\", backgroundColor: Union[List[float,\n float, float], bool]=None, defineTemplate: AnyStr=\"\", docTag: Union[AnyStr,\n bool]=\"\", dragCallback: Script=None, dropCallback: Script=None, enable: bool=True,\n enableBackground: bool=True, enableKeyboardFocus: bool=True, exists: bool=True,\n fullPathName: bool=True, globalTime: bool=True, height: Union[int, bool]=0,\n highlightColor: Union[List[float, float, float], bool]=None, isObscured: bool=True,\n manage: bool=True, noBackground: bool=True, numberOfPopupMenus: bool=True, parent:\n Union[AnyStr, bool]=\"\", popupMenuArray: bool=True, preventOverride: bool=True,\n snap: bool=True, statusBarMessage: AnyStr=\"\", useTemplate: AnyStr=\"\", visible:\n bool=True, visibleChangeCommand: Union[Script, bool]=None, width: Union[int,\n bool]=0, q=True, query=True, e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def test_schedule(self):\n\n year = 2019\n month = 3\n day = 25 \n hour = 0\n\n mydate = datetime.datetime(year, month, day,hour)\n\n lower_boundary = mydate - timedelta(hours=3)\n upper_boundary = mydate + timedelta(hours=3)\n\n s1 = Schedule()\n s1.hour_from = 0\n s1.min_from = 30\n s1.hour_to = 23\n s1.min_to = 30\n s1.interval = 60*30\n\n s2 = Schedule()\n s2.hour_from = 0\n s2.min_from = 30\n s2.hour_to = 23\n s2.min_to = 30\n s2.interval = 60*60\n\n s3 = Schedule()\n s3.hour_from = 22\n s3.min_from = 0\n s3.hour_to = 23\n s3.min_to = 30\n s3.interval = 60*5\n\n\n r = number_expected([s1,s2,s3],lower_boundary,upper_boundary)\n\n self.assertEqual(r, 25 )", "def test_make_time_str(self):\n\n s = custom_date.make_time_str(\"23\", \"15\", \"01\", \"100\")\n self.assertEqual(s, \"23:15:01.100\")", "def __call__(self, hours=None, minutes=None, ampm=None ):\r\n if ampm != None:\r\n assert hours >= 1 and hours <= 12, \"AM/PM time selection, but given hours are not between 1 and 12\"\r\n # if ampm given as logical text, do translation\r\n if self.phone.isLogicalText(ampm):\r\n ampm = self.phone.getTranslation(ampm)\r\n else: # try getting AM/PM translated\r\n if ampm == \"AM\":\r\n translated = self.phone.getTranslation('n4PLb8v5_KE2WCIh0eKo7Cw')\r\n if translated != None:\r\n ampm = translated\r\n elif ampm == \"PM\":\r\n translated = self.phone.getTranslation('nQOWjTH6UiUCkRejTnWvQPg')\r\n if translated != None:\r\n ampm = translated\r\n\r\n elif hours != None:\r\n assert hours >= 0 and hours <= 23, \"Hours are not between 0 and 23\"\r\n\r\n if minutes != None:\r\n assert int(minutes) >= 0 and int(minutes) <=59, \"Minutes are not between 0 and 59\"\r\n\r\n # angle interval in degrees, affects how many points are calculated to circle.\r\n hour_interval = 15\r\n minute_interval = 15\r\n\r\n msg = \"\"\r\n c = min = hour = None\r\n minutes_set = False\r\n\r\n for i in range(4): # try getting time picker icons 4 times\r\n # uiChanged --> True so that dump will be taken by force\r\n # FIXME: Use event approach instead\r\n self.phone.uiState.setUIChanged(True)\r\n\r\n if c == None:\r\n status, c = self.phone.uiState.isItemVisible('widgets/hour-background')[:2]\r\n\r\n if not status == self.phone.uiState.VISIBLE:\r\n msg = \"timepicker center not visible\"\r\n continue\r\n\r\n if minutes != None:\r\n status, min = self.phone.uiState.isItemVisible('widgets/minute-pointer', refresh=False)[:2]\r\n\r\n # check whether minutes are currently wanted ones\r\n item = self.phone.uiState.searchItem('widgets/minute-pointer', touchType=False, contentType='image', refresh=False)\r\n if item:\r\n x,y,w,h = [int(p) for p in item.getAttribute('coords').split(\",\")]\r\n t = self.phone.tryExpect(\"%02d\" % minutes,refresh=False,timeout=0, fromArea=(x-2,y-2,x+w+2,y+h+2), doNotReport = True)\r\n if len(t) > 0: # minutes are already set\r\n minutes_set = True\r\n\r\n if not status == self.phone.uiState.VISIBLE:\r\n msg = \"minute picker not visible\"\r\n continue\r\n else:\r\n msg = \"minute picker not visible\"\r\n continue\r\n\r\n if hours != None:\r\n status, hour = self.phone.uiState.isItemVisible('widgets/hour-pointer', refresh=False)[:2]\r\n\r\n if not status == self.phone.uiState.VISIBLE:\r\n msg = \"hour picker not visible\"\r\n continue\r\n\r\n break\r\n\r\n if not msg == \"\":\r\n self.phone.fail(msg)\r\n\r\n success = False\r\n\r\n if minutes != None and not minutes_set :\r\n attemps = 8\r\n for i in range(attemps): # try to get to correct few times\r\n if i % 2 != 0:\r\n debug.out('adjust')\r\n angle_adjust = 0.07 # alter the angle radians a bit every second attemp (has effect on minutes 17, 37 etc.. )\r\n else:\r\n angle_adjust = 0\r\n \r\n steps = self._getCirclePoints(c,min,self._getMinutesAngle(minutes)+angle_adjust,minute_interval)\r\n self.phone._touch.drawShape( steps )\r\n\r\n # uiChanged --> True so that dump will be taken by force\r\n # FIXME: Use event approach instead\r\n self.phone.uiState.setUIChanged(True)\r\n\r\n test = []\r\n test.append('%02d' % ((minutes-1+60) % 60))\r\n test.append('%02d' % (minutes))\r\n test.append('%02d' % ((minutes+1) % 60))\r\n\r\n # do exact checking with area that time has been set correctly\r\n item = self.phone.uiState.searchItem('widgets/minute-pointer', touchType=False, contentType='image')\r\n\r\n if not item:\r\n debug.vrb(\"Could not find minute pointer..\")\r\n continue\r\n\r\n x,y,w,h = [int(p) for p in item.getAttribute('coords').split(\",\")]\r\n t = self.phone.tryExpect(test,refresh=False,timeout=0, fromArea=(x-2,y-2,w+2,h+2), doNotReport = True)\r\n\r\n if len(t) > 0:\r\n if 1 in t: # tryExpect returns 0,1,2 - where 1 means that exact minutes was found\r\n success = True\r\n break\r\n\r\n # minute picker was not on correct position\r\n rand = random.randint(7,15) # move time picker a random amount anticlockwise from current minutes\r\n if minutes > rand:\r\n temp = minutes - rand\r\n else:\r\n temp = (60-rand) + minutes\r\n\r\n if i == attemps-1: # if this was the last attemp, break the loop\r\n break\r\n\r\n debug.vrb(\"Could not set minutes to %s, trying time setting again.. anticlockwise to %s and try again\" % (minutes, temp))\r\n\r\n status, min = self.phone.uiState.isItemVisible('widgets/minute-pointer')[:2]\r\n steps = self._getCirclePoints(c,min,self._getMinutesAngle(temp),minute_interval)\r\n self.phone._touch.drawShape( steps )\r\n\r\n # uiChanged --> True so that dump will be taken by force\r\n # FIXME: Use event approach instead\r\n self.phone.uiState.setUIChanged(True)\r\n\r\n status, min = self.phone.uiState.isItemVisible('widgets/minute-pointer')[:2]\r\n\r\n if not success:\r\n self.phone.fail(\"Could not get minutes set\")\r\n success = False\r\n\r\n if hours != None:\r\n attemps = 5\r\n for i in range(attemps): # try to get to correct time few times\r\n # add 2 additional degrees to wanted angle, usually exact hour is not in exact angle\r\n steps = self._getCirclePoints(c,hour,(self._getHoursAngle(hours)+math.radians(2)),6)\r\n self.phone._touch.drawShape( steps )\r\n\r\n # uiChanged --> True so that dump will be taken by force\r\n # FIXME: Use event approach instead\r\n self.phone.uiState.setUIChanged(True)\r\n\r\n test = []\r\n if ampm == None:\r\n test.append('%02d' % ((hours-1+24) % 24))\r\n test.append('%02d' % (hours))\r\n test.append('%02d' % ((hours+1) % 24))\r\n else:\r\n test.append('%02d' % ((hours-1+12) % 12))\r\n test.append('%02d' % (hours))\r\n test.append('%02d' % ((hours+1) % 12))\r\n\r\n if ampm == None:\r\n temp_h = (hours + 12) % 24 # test also whether hour picker needs to be turned 360 degrees to get correct time\r\n test.append('%02d' % (temp_h-1))\r\n test.append('%02d' % (temp_h))\r\n test.append('%02d' % (temp_h+1))\r\n test_am = None\r\n\r\n # do exact checking with area that time has been set correctly\r\n item = self.phone.uiState.searchItem('widgets/hour-pointer', touchType=False, contentType='image')\r\n if not item:\r\n debug.vrb(\"Could not find hour pointer..\")\r\n continue\r\n\r\n x,y,w,h = [int(p) for p in item.getAttribute('coords').split(\",\")]\r\n t = self.phone.tryExpect(test,refresh=False, timeout=0, fromArea=(x-2,y-2,w+2,h+2), doNotReport = True)\r\n if ampm != None:\r\n test_am = self.phone.tryExpect(ampm, timeout=0, refresh=False, doNotReport = True)\r\n\r\n if len(t) > 0:\r\n if 4 in t or (ampm != None and test_am == []): # only when ampm is not enabled\r\n # picker needs to be turned 360 degrees over\r\n steps = self._getCirclePoints(c, steps[-1], self._getHoursAngle((hours + 4) % 24), hour_interval)\r\n self.phone._touch.drawShape( steps )\r\n\r\n # uiChanged --> True so that dump will be taken by force\r\n # FIXME: Use event approach instead\r\n self.phone.uiState.setUIChanged(True)\r\n\r\n steps = self._getCirclePoints(c, steps[-1], self._getHoursAngle((hours + 8) % 24), hour_interval)\r\n self.phone._touch.drawShape( steps )\r\n\r\n # uiChanged --> True so that dump will be taken by force\r\n # FIXME: Use event approach instead\r\n self.phone.uiState.setUIChanged(True)\r\n\r\n steps = self._getCirclePoints(c, steps[-1], (self._getHoursAngle(hours)+math.radians(2)), hour_interval)\r\n self.phone._touch.drawShape( steps )\r\n\r\n # uiChanged --> True so that dump will be taken by force\r\n # FIXME: Use event approach instead\r\n self.phone.uiState.setUIChanged(True)\r\n\r\n item = self.phone.uiState.searchItem('widgets/hour-pointer', touchType=False, contentType='image')\r\n if item:\r\n x,y,w,h = [int(p) for p in item.getAttribute('coords').split(\",\")]\r\n t = self.phone.tryExpect(test,refresh=False, timeout=0, fromArea=(x-2,y-2,w+2,h+2), doNotReport = True)\r\n\r\n if 1 in t: # tryExpect returns 0,1,2 - where 1 means that exact hour was found\r\n success = True\r\n break\r\n\r\n # hour picker hit the wrong time, try again\r\n rand = random.randint(2,5) # move time picker a random amount anticlockwise from current hour\r\n if hours > rand:\r\n temp = hours - rand\r\n else:\r\n temp = 12 - rand + hours\r\n\r\n if i == attemps-1: # this was final attemp, break the loop\r\n break\r\n\r\n debug.vrb(\"Could not set hour to %s, trying time setting again.. anticlockwise to %s and try again\" % (hours, temp))\r\n\r\n status, hour = self.phone.uiState.isItemVisible('widgets/hour-pointer')[:2]\r\n steps = self._getCirclePoints(c,hour,self._getHoursAngle(temp),hour_interval)\r\n self.phone._touch.drawShape( steps )\r\n\r\n # uiChanged --> True so that dump will be taken by force\r\n # FIXME: Use event approach instead\r\n self.phone.uiState.setUIChanged(True)\r\n\r\n status, hour = self.phone.uiState.isItemVisible('widgets/hour-pointer')[:2]\r\n\r\n if not success:\r\n self.phone.fail(\"Could not get hours set\")\r\n success = False\r\n\r\n if hours != None:\r\n msg = \"Selected time %02d:\" % hours\r\n else:\r\n msg = \"Selected time xx:\"\r\n\r\n if minutes != None:\r\n msg += \"%02d\" % minutes\r\n else:\r\n msg += \"xx\"\r\n\r\n if ampm != None:\r\n msg += \" %s\" % ampm\r\n\r\n self.phone.comment(msg)", "def get_time_string(self):\n return f\"{self.year} {self.month:02} \" \\\n f\"{self.start_day:02} {self.start_hour:02} 00 {self.get_duration():6}\"", "def dummy():\n\tplay = Play(artist = \"Artist\", album_artist = \"Album Artist\", album = \"Album\", title = \"Title\", user = users.get_current_user())\n\tplay.put()\n\tprint(datetime.datetime.now())\n\tprint(play.time)\n\tprint(play.time.hour)\n\treturn \"Success\"", "def __init__(self, t):\n\t\tself.delay = math.ceil(t / config.time_resolution)", "def new_entry():\n clear_screen()\n entry = {}\n entry['id'] = get_next_id()\n entry['name'] = input_name()\n print(\"How many minutes did you spend on {}?\".format(entry['name']))\n print(\"Or you may specify a format after the time, seperated by a comma\")\n entry['time_spent'] = input_time_spent()\n add_notes = input(\"Add notes? Y/n \").lower()\n if add_notes != 'n':\n entry['notes'] = input_notes()\n entry['date'] = datetime.now().strftime(FMT_MONTH_DAY_YEAR)\n with open(WORK_LOG_FILENAME, 'a', newline='') as work_log:\n work_log_writer = csv.DictWriter(work_log, fieldnames=FIELDNAMES)\n work_log_writer.writerow(entry)", "def test_schedule(self):\n\n year = 2019\n month = 3\n day = 25 \n hour = 0\n\n mydate = datetime.datetime(year, month, day,hour)\n\n lower_boundary = mydate - timedelta(hours=3)\n upper_boundary = mydate + timedelta(hours=3)\n\n s4 = Schedule()\n s4.interval = 60*30\n\n s5 = Schedule()\n s5.interval = 60*45\n\n r = number_expected([s4,s5],lower_boundary,upper_boundary)\n\n self.assertEqual(r, 16 )", "def test_schedule(self):\n\n year = 2019\n month = 3\n day = 25 \n hour = 0\n\n mydate = datetime.datetime(year, month, day,hour)\n\n lower_boundary = mydate - timedelta(hours=3)\n upper_boundary = mydate + timedelta(hours=3)\n\n s4 = Schedule()\n s4.interval = 60*30\n\n s5 = Schedule()\n s5.interval = 60*45\n\n r = number_expected([s4,s5],lower_boundary,upper_boundary)\n\n self.assertEqual(r, 16 )", "def test_schedule(self):\n\n year = 2019\n month = 3\n day = 25 \n hour = 0\n\n mydate = datetime.datetime(year, month, day,hour)\n\n lower_boundary = mydate - timedelta(hours=3)\n upper_boundary = mydate + timedelta(hours=3)\n\n s1 = Schedule()\n s1.hour_from = 0\n s1.min_from = 0\n s1.hour_to = 21\n s1.min_to = 59\n s1.interval = 60*60*3 \n\n s2 = Schedule()\n s2.hour_from = 0\n s2.min_from = 0\n s2.hour_to = 21\n s2.min_to = 59\n s2.interval = 60*60*3 \n\n s3 = Schedule()\n s3.hour_from = 0\n s3.min_from = 0\n s3.hour_to = 21\n s3.min_to = 59\n s3.interval = 60*60*3 \n\n\n r = number_expected([s1,s2,s3],lower_boundary,upper_boundary)\n\n self.assertEqual(r, 2 )", "def automate_time(f, f_unit, t, t_unit):\n f_unit, t_unit = clean_units([f_unit, t_unit])\n\n num_tasks = number_times_per_year(f, f_unit)\n\n time_seconds = t * converter[t_unit]\n\n total_time = calculate_automation(num_tasks, time_seconds)\n\n value, unit = usable_numbers(total_time)\n print('You are spending %d %s every 5 years on this task' % (value, unit))\n \n # print(\"%d times per %s, I spend %d %s doing the task, which is %d seconds per 5 years\" % (f, f_unit, t, t_unit, total_time))\n # print(\"num_tasks: %d, time_seconds: %d\" % (num_tasks, time_seconds))\n # print(' ')", "def test_3_time_string_conversion(time_record_factory):\n d = datetime.datetime(2018, 10, 1, 15, 26)\n t = time_record_factory(time_start=d, time_end=d)\n expected = (\n f'Time Record from {t.time_start:%I:%M %p} to '\n f'{t.time_end:%I:%M %p} on {t.time_start:%m/%d/%Y}.'\n )\n assert str(t) == expected", "def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n description: Optional[pulumi.Input[str]] = None,\n end_time: Optional[pulumi.Input[int]] = None,\n name: Optional[pulumi.Input[str]] = None,\n program_text: Optional[pulumi.Input[str]] = None,\n start_time: Optional[pulumi.Input[int]] = None,\n time_range: Optional[pulumi.Input[int]] = None,\n __props__=None):\n ...", "def __init__(self,id,appointment_time,description):\n self.id = id\n self.appointment_time = appointment_time\n self.description = description", "def add_minutes(self):\n r = self.minute + self.value\n x = int((r / 60))\n\n self.hour = self.hour + x\n self.minute = r - (60 * x)\n\n cycles = int(self.hour / 12)\n if cycles > 0:\n if (cycles % 2) == 0:\n pass\n else:\n if self.meridiem == 'AM':\n self.meridiem = 'PM'\n else:\n self.meridiem = 'AM'\n\n self.hour = self.hour - cycles * 12\n if self.hour == 0:\n self.hour = 1\n\n if self.minute < 10:\n self.minute = str(0) + str(self.minute)\n\n new_time: str = str(self.hour) + ':' + str(self.minute) + ' ' + self.meridiem.upper()\n return new_time", "def createTimeCalculation(toConvert):\r\n splited = toConvert.partition(':')\r\n if splited.__len__() != 3:\r\n raise ValueError\r\n hours = int(splited[0])\r\n minutes = int(splited[2])\r\n if minutes < 0:\r\n raise ValueError\r\n sign = getSign(hours)\r\n hours *= sign\r\n return TimeCalculations(hours, minutes, sign)", "def __init__(__self__, *,\n program_text: pulumi.Input[str],\n description: Optional[pulumi.Input[str]] = None,\n end_time: Optional[pulumi.Input[int]] = None,\n name: Optional[pulumi.Input[str]] = None,\n start_time: Optional[pulumi.Input[int]] = None,\n time_range: Optional[pulumi.Input[int]] = None):\n pulumi.set(__self__, \"program_text\", program_text)\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if end_time is not None:\n pulumi.set(__self__, \"end_time\", end_time)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if start_time is not None:\n pulumi.set(__self__, \"start_time\", start_time)\n if time_range is not None:\n pulumi.set(__self__, \"time_range\", time_range)", "def test_schedule(self):\n\n year = 2019\n month = 3\n day = 25 \n hour = 0\n\n mydate = datetime.datetime(year, month, day,hour)\n\n lower_boundary = mydate - timedelta(hours=3)\n upper_boundary = mydate + timedelta(hours=3)\n\n s1 = Schedule()\n s1.hour_from = 0\n s1.min_from = 0\n s1.hour_to = 21\n s1.min_to = 59\n s1.interval = 60*60*3 \n\n s2 = Schedule()\n s2.hour_from = 0\n s2.min_from = 0\n s2.hour_to = 21\n s2.min_to = 59\n s2.interval = 60*60*3 \n\n r = number_expected([s1,s2],lower_boundary,upper_boundary)\n\n self.assertEqual(r, 2 )", "def main():\n\n # check database for tracking options\n # if empty prompt to add subject\n\n # present tracking options\n\n # calculate timedelta\n\n # printing/updating the time", "def __init__(self, Date, TimeOfDay):\n self.date = Date\n self.time_of_day = TimeOfDay", "def set_creation_time(self, t: int) -> None:\n self.metadata.data[\"creation_time\"] = t", "def task11_time_converter(num):\n if num < 0:\n raise ValueError\n hour = num // 60\n minute = num % 60\n return f'{hour}:{minute}'", "def edit_time_spent(entry):\n entry.time_spent = get_minutes()\n entry.save()\n input(\"Edit successful. \")\n return entry", "def release_time(date_time):\n\n time_hour = int(date_time.strftime('%H'))\n\n quotient = int(time_hour / 4)\n\n if quotient == 5:\n date_time = datetime.combine(date_time.date()+timedelta(1), time(0,0))\n else:\n date_time = datetime.combine(date_time.date(), time((quotient+1)*4,0))\n \n return date_time", "def get_time_attr_map(t):\n now = datetime.datetime.now()\n if t + datetime.timedelta(hours=3) > now:\n return get_map(\"main_list_white\")\n if t + datetime.timedelta(days=3) > now:\n return get_map(\"main_list_lg\")\n else:\n return get_map(\"main_list_dg\")", "def create_record(self):\n return {\n \"date\": self.date_str,\n \"start_day\": self.args.work_hours[0].strftime(\"%H:%M\"),\n \"end_day\": self.args.work_hours[1].strftime(\"%H:%M\"),\n \"start_break\": self.args.break_time[0].strftime(\"%H:%M\"),\n \"end_break\": self.args.break_time[1].strftime(\"%H:%M\"),\n \"comment\": self.args.comment,\n \"special\": str(self.args.special)\n }", "def __init__(self, position, date, time=None, name=None):\n if time is None:\n if name is None:\n raise ValueError(\"Both time and name may not be None.\")\n else:\n time = name.value\n\n if name is None:\n name = position.lookupByValue(time)\n elif name.value != time:\n raise ValueError(\"time and name do not match: {0} != {1}\".format(time, name))\n\n self.position = position\n self.start = DateTime(year=date.year, month=date.month, day=date.day, hour=time.hour)\n self.name = name", "def test_fields_effort_time_units_dictionary_string(self, _mock_check):\n field = EffortField(time_units={\"minute\": \"minute\"})\n\n errors = field.check()\n self.assertEqual(len(errors), 1)\n\n error = errors[0]\n self.assertEqual(error.msg, \"'time_units' must be a dictionary of tuples.\")\n self.assertEqual(error.obj, field)\n self.assertEqual(error.id, \"fields.E1011\")", "def __init__(self, minutes: int):\n # TODO : IMPLEMENT __format__ method\n if minutes < 0:\n minutes = 0\n self.minutes = int(minutes)", "def __init__(self, value, time, time_type=\"int\", currency=\"mxn\"):\n self.time = Time(time, time_type)\n self.value = self.validate_value(value)\n self.currency = self.validate_currency(currency)", "def __init__(__self__, *,\n day: Optional[pulumi.Input[str]] = None,\n hour: Optional[pulumi.Input[int]] = None,\n minute: Optional[pulumi.Input[int]] = None,\n snapshots_to_keep: Optional[pulumi.Input[int]] = None,\n used_bytes: Optional[pulumi.Input[float]] = None):\n if day is not None:\n pulumi.set(__self__, \"day\", day)\n if hour is not None:\n pulumi.set(__self__, \"hour\", hour)\n if minute is not None:\n pulumi.set(__self__, \"minute\", minute)\n if snapshots_to_keep is not None:\n pulumi.set(__self__, \"snapshots_to_keep\", snapshots_to_keep)\n if used_bytes is not None:\n pulumi.set(__self__, \"used_bytes\", used_bytes)", "def postpone(self, dlt_time, ky_word):\n if ky_word == 'hour':\n self.work_datetime = self.work_datetime + tdelta(seconds=dlt_time * 3600)\n elif ky_word == 'day':\n self.work_datetime = self.work_datetime + tdelta(days=dlt_time)\n elif ky_word == 'week':\n self.work_datetime = self.work_datetime + tdelta(weeks=dlt_time)\n elif ky_word == 'month':\n self.work_datetime = self.work_datetime + tdelta(days=dlt_time * 30)\n self.eisenhower_priority()\n return self.work_datetime", "def create_flight_needs_task(self):\n duration = self.trip.arrival_date_time - self.trip.departure_date_time\n if duration > timedelta(hours=2):\n self.tasks.append(self.trip.tasks.create(\n title=\"Flight Must Have !\",\n comments=\"It's a long flight ! Don't forget your earplugs and your sleep mask.\",\n category=TaskCategory.objects.get(name=\"Others\"),\n deadline=self.trip.departure_date_time - timedelta(days=1)\n ))\n else:\n self.tasks.append(self.trip.tasks.create(\n title=\"Flight Must Have !\",\n comments=\"Take some food and some drinks for your flight\",\n category=TaskCategory.objects.get(name=\"Others\"),\n deadline=self.trip.departure_date_time - timedelta(days=1)\n ))", "def __init__(self, time):\n pass", "def create_time(self, create_time):\n\n self._create_time = create_time", "def create_time(self, create_time):\n\n self._create_time = create_time", "def create_time(self, create_time):\n\n self._create_time = create_time" ]
[ "0.63917255", "0.63114077", "0.5988722", "0.5984777", "0.59670573", "0.59660786", "0.59379464", "0.5916419", "0.58417237", "0.58348185", "0.5833585", "0.5783263", "0.5764132", "0.5757329", "0.5752414", "0.57444865", "0.57442397", "0.5735035", "0.57336044", "0.5732334", "0.5727091", "0.5717379", "0.57122076", "0.5709428", "0.5699456", "0.56896895", "0.56881404", "0.56756496", "0.56464684", "0.5635764", "0.56332165", "0.56299305", "0.56263846", "0.56197095", "0.5615304", "0.5602801", "0.5596237", "0.55898094", "0.55866337", "0.5582441", "0.5578677", "0.5548143", "0.5535583", "0.5527109", "0.5520525", "0.55127996", "0.55058277", "0.547788", "0.54749346", "0.54237807", "0.54102933", "0.54102236", "0.5404411", "0.5391029", "0.5387438", "0.53831166", "0.53823626", "0.53641146", "0.53570026", "0.53500473", "0.53500473", "0.53500473", "0.53473264", "0.53470945", "0.53414255", "0.53181326", "0.53178644", "0.53171176", "0.5314536", "0.5308407", "0.5298557", "0.5298557", "0.5295949", "0.5287038", "0.5273542", "0.52712107", "0.5271181", "0.52672976", "0.5262678", "0.5262058", "0.5260913", "0.5255786", "0.5246518", "0.5245579", "0.5244487", "0.5242967", "0.5233812", "0.5229154", "0.52285105", "0.5223706", "0.5216514", "0.52089", "0.5206758", "0.5203181", "0.52013195", "0.51934004", "0.5181408", "0.5179547", "0.5179547", "0.5179547" ]
0.68871313
0
Return all of the projects for a given Workspace
def getWorkspaceProjects(self, id): return self.request(Endpoints.WORKSPACES + '/{0}'.format(id) + '/projects')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_projects(self):\n projects = []\n page = 1\n while not len(projects) % 100:\n projects += self._get('/projects?{0}'.format(urllib.urlencode({'per_page': 100, 'page': page})))\n if not projects:\n break\n page += 1\n return projects", "def get_projects(self, *criterion):\n from wkcdd.models.helpers import get_project_list\n return get_project_list([self.id], *criterion)", "def get_projects(self):\n response = self.request(verb=requests.get, address=\"projects\")\n # FIXME: if no results, must we raise an exception?\n return response[\"results\"] if \"results\" in response else response", "def get_projects(self):\n return self.http_call(\"get\", url=f\"{self.base_url}/projects\").json()", "def get_all_projects(self, scope):\n url = \"{0}/{1}/{2}\".format(self.keystone_server_url, DEFAULT_KEYSTONE_API_VERSION, \"projects\")\n headers = {'X-Auth-Token': scope.auth_token}\n try:\n r = self._make_request_with_auth_fallback(url, headers)\n return r['projects']\n\n except Exception as e:\n self.warning('Unable to get projects: %s', e)\n raise e\n\n return None", "def get_projects(self):\n res = self.conn.cursor().execute(\"SELECT * FROM projects\")\n return res.fetchall()", "def project_list(self):\n try:\n ids = self.request[api.DATA][api.DATA][\"ids\"]\n return self._get_keystone_projects(ids)\n except Exception as e:\n LOG.exception(\"Error occurred: %s\" % e)", "def get_projects(self):\n session = self.session_factory()\n results = [row.project for row in session.query(PipelineRun.project.distinct().label('project')).all()]\n session.close()\n return results", "def find_all(self, params={}, **options):\n return self.client.get_collection(\"/workspaces\", params, **options)", "def get_all_projects(self, org):\n return [proj for proj in Project.objects.filter(org=org)]", "def projects(self):\r\n return p.Projects(self)", "def getprojects(self):\n resp = self.conn.request('GET', self.URLS['allprojects'], dict(api_key=self.api_key))\n data = resp.data.decode('utf-8')\n jdata = json.loads(data)['projects']\n # Convert nested JSON documents\n for project_index in range(len(jdata)):\n for field in ('options_json', 'templates_json'):\n jdata[project_index][field] = json.loads(jdata[project_index][field])\n # Pass project details dictionaries to constructors, return array\n return [PhProject(self, project) for project in jdata]", "def get_projects():\n return Project.query.all()", "def projects(self):\n ret_val = []\n params = {\"fields\": Project.FIELDS}\n projects = self._request(\"get\", \"projects\", params=params)\n\n for project in projects:\n ret_val.append(Project(project))\n\n return ret_val", "def get_projects(self):\n return conf.projects", "def get_projects(self):\n return self.jira.projects()", "def list_projects(self):\n data = self._run(\n url_path=\"projects/list\"\n )\n projects = data['result'].get('projects', [])\n return [self._project_formatter(item) for item in projects]", "def get_projects(self):\n ret = self.v1_projects.get()\n return [each.metadata.name for each in ret.items]", "def get_projects(cls):\n projects = []\n for project in Project.select():\n project_dict = {\n \"id\": project.id,\n \"title\": project.title,\n \"link\": project.link,\n \"description\": project.description\n }\n projects.append(project_dict)\n return projects", "def list_projects():\n if '.wcscanner' not in os.listdir(context.__BASE_PATH__):\n return []\n return os.listdir(context.__PROJECTS_PATH__)", "def list(self):\n return self.rpc.call(MsfRpcMethod.DbWorkspaces)['workspaces']", "def get_projects(self):\n return self._gitlab.owned_projects(per_page=1000)", "def getProjects(self):\n\n return self.__projects", "def all(cls):\n projects_url = 'https://www.pivotaltracker.com/services/v5/projects'\n root = _perform_pivotal_get(projects_url)\n if root is not None:\n return [Project.from_json(project_node) for project_node in root]", "def getProjects(self):\n catalog = plone.api.portal.get_tool('portal_catalog')\n path = '{}/projects'.format('/'.join(plone.api.portal.get().getPhysicalPath()))\n query = dict(portal_type='Project', sort_on='sortable_title', path=path)\n result = list()\n for brain in catalog(**query):\n result.append((brain.getId, brain.Title))\n return result", "def all_projects(self):\n projects_list = []\n for path in DAVOS_PROJECT_DIR.iterdir():\n if path.is_dir():\n projects_list.append(Project(path.name))\n return projects_list", "def get_all_projects():\n return jsonify(admin.get_all_projects(current_app.scoped_session()))", "def get_projects(selection):\n project=[]\n\n # project with keyname\n\n if 'project' in selection.facets:\n project+=selection.facets['project']\n\n\n # project without keyname\n\n # WARNING\n #\n # The code below uses sdinference and query the database to retrieve ESGF parameters.\n # Doing those things here may raise circular dependencies\n # as well as making the whole thing very complex.\n #\n # We do this to make this syntax work (i.e. project value without key)\n # synda search GeoMIP\n #\n # Note that this syntax always works (i.e. load the project level default file), even without this code.\n # synda search project=GeoMIP\n #\n #\n pending_projects=sdearlystreamutils.get_facet_values_early([selection.facets],'project',extract_item=True) # project without keyname or project as part of an identifier.\n\n li=pending_projects+project\n\n li=list(set(li)) # remove duplicate\n\n return li", "def get_projects_data():\n wcscanner_path = context.__BASE_PATH__ + '/.wcscanner'\n\n data = []\n for project in os.listdir(wcscanner_path):\n if (os.path.isdir(os.path.join(wcscanner_path, project))):\n update_project_data(project)\n project_path = '{}/{}'.format(wcscanner_path, project)\n f = open('{}/.project'.format(project_path), 'r')\n data.append(json.load(f))\n f.close()\n return data", "def get_projects(self, refresh=False):\n if refresh:\n self._projects_lookup = self.get_project_lookup()\n\n return self._projects_lookup.keys()", "def _get_projects(current_project_name):\n projects = []\n\n unique_project_changes = Change.objects.order_by().values(\n 'project_name').distinct()\n for change in unique_project_changes:\n projects.append(change['project_name'])\n\n # sort alphabetically\n projects.sort()\n\n # insert 'all' option as it should be present always\n projects.insert(0, PROJECT_ALL)\n\n # if current_project_name is valid, make it the first element in list so\n # that it shows up as selected in project choice drop down\n if current_project_name != PROJECT_ALL and current_project_name in projects:\n projects.remove(current_project_name)\n projects.insert(0, current_project_name)\n elif current_project_name != PROJECT_ALL:\n logging.error(\"Currently selected project %s not found in any changes.\"\n \" Removing from list.\", current_project_name)\n logging.debug(\"Returning list of projects: %r\", projects)\n return projects", "def get_projects(self, _is_simple=False):\n req_url = f\"{self.url}/projects\"\n if _is_simple:\n req_url += \"?simple=true\"\n ret = requests.get(req_url, headers = self.req_header)\n return ret.json()", "def get_project_list():\n return parse_list_output(Popen(\n 'openstack project list'.split(), stdout=STDOUT, stderr=STDERR\n ).communicate()[0])", "def list_projects(self) -> List['RadsProject']:\n ret = []\n base = self.fspath(\"projects\")\n for name in os.listdir(base):\n if os.path.isdir(f\"{base}/{name}/releases\"):\n ret.append(RadsProject(self, name))\n return ret", "def get_project_list(token):\n session = requests.Session()\n session.headers.update({'Authorization': f'Token {token}'})\n url = get_project_list_url()\n r = session.get(url=url)\n return r", "def list_projects(ctx):\n pprint(ctx.obj.groups.get().data)", "def list_projects(self):\n project_keys = self.client._perform_json(\"GET\", \"/project-folders/%s\" % self.project_folder_id).get(\"projectKeys\", [])\n return [DSSProject(self.client, pkey) for pkey in project_keys]", "def get_projects():\n if current_user.get_id() is None:\n return\n with database.engine.begin() as connection:\n result = connection.execute(select(\n [models.projects.c.project_id, models.projects.c.name, models.projects.c.path, models.projects.c.creation_date, models.projects.c.user_id, func.count(models.objects.c.object_id).label('object_count')])\n .select_from(models.projects.outerjoin(models.objects))\n .where(and_(models.projects.c.active == True, models.projects.c.user_id == current_user.id))\n .group_by(models.projects.c.project_id)\n .order_by(models.projects.c.project_id))\n projects = [dict(row) for row in result]\n for project in projects:\n user = models.User.query.filter_by(\n id=project['user_id']).first()\n if user:\n project['email'] = user.email\n return projects", "def list_projects(arn=None, nextToken=None):\n pass", "def list_projects():\n\n cmd = dict()\n cmd[\"type_\"] = \"list_projects\"\n cmd[\"name_\"] = \"\"\n \n s = comm.send_and_receive_socket(cmd)\n\n msg = comm.recv_string(s)\n\n if msg != \"Success!\":\n raise Exception(msg)\n \n json_str = comm.recv_string(s) \n \n s.close() \n\n return json.loads(json_str)[\"projects\"]", "def all(cls):\r\n projects_url = 'https://www.pivotaltracker.com/services/v3/projects'\r\n response = _perform_pivotal_get(projects_url)\r\n\r\n root = ET.fromstring(response.text)\r\n if root is not None:\r\n return [Project.from_node(project_node) for project_node in root]", "def get_projects(self):\n unaligned_path = self.get_unaligned_path()\n logger.debug(\"collecting list of projects\")\n return [p for p in os.listdir(unaligned_path)\n if len(parsing.get_project_label(p))]", "def projects(self):\n campaigns = self.campaigns.all()\n return Project.published_objects.filter(campaigns__in=campaigns)", "def list_workspaces(client):\n return client._creoson_post(\"windchill\", \"list_workspaces\", key_data=\"workspaces\")", "def get_project_list(self, dummy_project):\n # TODO: domain scope 403 is probably to do with faulty keystone policy config -- revise?\n if not self._projects:\n self._projects = self._get_keystone_client(dummy_project).projects.list()\n\n return self._projects", "def get_projects(session):\n cursuses = [1, 21] # cursus ids from which to get the projects\n project_names = []\n\n for cursus in cursuses:\n # Get all the projects from 1 cursus, very slow process because projects endpoint contains\n # a lot of information\n projects = get_all_pages(session, f'/cursus/{cursus}/projects', 100, {'filter[exam]': False})\n for project in projects:\n # Create dictionary containing project id and project name ans set in bigger dict\n project_names.append({'id': project['id'], 'name': project['name']})\n\n return project_names", "def find_projects(self, project_name: Optional[str] = None) -> List[Project]:\n with self.table_access_condition:\n conn = self._get_connection()\n c = conn.cursor()\n c.execute(\n \"\"\"\n SELECT * from projects\n WHERE (?1 IS NULL OR project_name = ?1)\n \"\"\",\n (project_name,),\n )\n rows = c.fetchall()\n return [\n Project(self, str(r[\"project_id\"]), row=r, _used_new_call=True)\n for r in rows\n ]", "def get_projects():\n data = sql.list_projects()\n names = [(d['id'], d['name']) for d in data]\n return names", "def _get_resource_projects(resource):\n resource_type = resource.get('type', '').upper()\n resource_values = resource.get('include', tuple())\n\n projects = tuple()\n if resource_type == _FOLDER:\n projects = _get_folder_projects(resource_values)\n elif resource_type == _PROJECT:\n projects = _get_projects(resource_values)\n elif resource_type == _FILTER:\n projects = _get_filtered_projects(resource_values)\n else:\n logging.info('Projects: No projects for resource %s', resource_type)\n return projects", "def getSubProjects(self):\n logger.debug(\"Func: getSubProjects\")\n\n return self._subProjectsList", "def projects():\n response = jsonify(projects_service.get_top_level_projects_ids())\n return response", "def db_projects():\n return [{\"name\": \"IT\"}, {\"name\": \"Financial\"}, {\"name\": \"Failed\"}]", "def _get_projects(project_ids):\n if _ALL in project_ids:\n return projects_lib.get_all()\n return projects_lib.get_selective(project_ids)", "def projects(self):\n sql = \"\"\"SELECT project\n FROM barcodes.sample\n LEFT JOIN barcodes.project_sample_sets USING (sample_set_id)\n LEFT JOIN barcodes.project USING (project_id)\n WHERE sample_id = %s\n UNION\n SELECT project\n FROM barcodes.project_samples\n LEFT JOIN barcodes.project USING (project_id)\n WHERE sample_id = %s\n \"\"\"\n with pm.sql.TRN:\n pm.sql.TRN.add(sql, [self.id, self.id])\n projects = pm.sql.TRN.execute_fetchflatten()\n return None if not projects else projects", "def do_project_list(cs, args):\n _, projects = cs.projects.list()\n fields = [\n 'project_id',\n 'name',\n 'owner_id',\n 'current_user_role_id',\n 'repo_count',\n 'creation_time',\n 'public',\n ]\n utils.print_list(projects, fields, formatters={}, sortby=args.sortby)", "def get_projects(self):\n if not self.validate():\n raise SettingCustomVisionAccessFailed\n return self.get_trainer_obj().get_projects()", "def get_projects(self, team_id):\n endpoint = '/teams/{}/projects'.format(team_id)\n return self._api_call('get', endpoint)", "def list_projects():\n with BMI(_username, _password, constants.BMI_ADMIN_PROJECT) as bmi:\n ret = bmi.list_projects()\n if ret[constants.STATUS_CODE_KEY] == 200:\n table = PrettyTable(\n field_names=[\"Id\", \"Name\", \"Provision Network\"])\n projects = ret[constants.RETURN_VALUE_KEY]\n for project in projects:\n table.add_row(project)\n click.echo(table.get_string())\n else:\n click.echo(ret[constants.MESSAGE_KEY])", "def list_keystone_v3_projects(self):\n LOG_OBJ.debug(\"List the projects.\")\n\n _url = \"http://\" + self.host_ip + \":35357/v3/projects\"\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_domain\"],\n 'content-type': 'application/json'}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while creating project\")\n print (\"No response from Server while creating project\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\" Creating project Failed with status %s \"\n \"and error : %s\" % (response.status, response.data))\n print (\" Creating project Failed with status %s and error : %s\" %\n (response.status, response.data))\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"Projects list : %s \" % output)\n print (\"Projects list : %s \" % output)\n return output['projects']", "def get_projects(self):\n rps = self.start_date\n\n return Project.objects.filter(\n Q(active=True)\n & Q(\n Q(start_date__lte=rps)\n | Q(\n Q(start_date__gte=rps)\n & Q(start_date__lte=datetime.datetime.now().date())\n )\n | Q(start_date__isnull=True)\n )\n & Q(\n Q(end_date__gte=rps)\n | Q(end_date__isnull=True)\n )\n )", "def repository_projects(self, host: (str), owner: (str), repo: (str)) -> Any:\n\n return search_api(\"repository_projects\", host, owner, repo)", "def get_projects(self):\n projects = []\n for project in self.server.projects:\n projects.append({'id': utils.slugify(project),\n 'name': project})\n response.content_type = 'application/json'\n return json.dumps(projects)", "def get_sistr_results_all_projects(self):\n projects = self.irida_api.get_user_projects()\n return self._get_sistr_results(projects)", "def get_created_projects(self):\n project_ouessant1 = Project.objects.get(name='Ouessant Tidal Power Phase I')\n project_ouessant2 = Project.objects.get(name='Ouessant Tidal Power Phase II')\n project_liaoning = Project.objects.get(\n name='Liaoning Linghai China Resource Power Wind Power Wind Farm'\n )\n return [project_ouessant1, project_ouessant2, project_liaoning]", "def find_projects(self):\n\n attrs = ['name', 'remote', 'revision', 'path', 'groups', 'upstream']\n projects = defaultdict(list)\n\n for project in self.tree.findall('project'):\n values = [project.get(attr) for attr in attrs]\n project_dict = dict(zip(attrs, values))\n project_name = project_dict.pop('name')\n\n if project_dict['groups'] is not None:\n project_dict['groups'] = project_dict['groups'].split(',')\n\n if project_name is None:\n if self.fail_on_invalid:\n raise InvalidManifest(\n 'Project entry missing \"name\" attribute'\n )\n else:\n continue\n\n if project_name in projects:\n paths = [\n p_attr.get('path', project_name)\n for name, p_attrs in projects.items()\n for p_attr in p_attrs if name == project_name\n ]\n\n if project_dict['path'] in paths:\n raise InvalidManifest(\n 'Duplicate project entry with matching \"name\" '\n 'and \"path\" attributes'\n )\n\n children = project.getchildren()\n if children:\n for child in children:\n subelement = child.tag\n\n # Only run the following if the element tag\n # is a string (avoids comments)\n if isinstance(subelement, str):\n subdict = getattr(\n self, 'create_{}_dict'.format(subelement)\n )(child)\n\n if subdict is not None:\n project_dict.setdefault(subelement, []).append(\n subdict\n )\n\n projects[project_name].append(\n self.generate_data_dict(project_dict)\n )\n\n self.projects = projects", "def active_projects(self):\n return self.projects.filter(active=True)", "def index(self):\n return {'projects': [p for p in self.server.projects.values()]}", "def get_projects(self, source=\"all\"):\n self.projects = []\n self._project_indices_by_id = {}\n self._project_indices_by_name = {}\n\n if self.hub_type == self.NAMESPACES[\"a.\"]:\n if not self.auth.three_legged:\n self.logger.warning(\n \"Failed to get projects. '{}' hubs only supports 3-legged access token.\".format( # noqa:E501\n self.NAMESPACES[\"a.\"]\n )\n )\n else:\n for project in self.api.dm.get_projects():\n self.projects.append(\n Project(\n project[\"attributes\"][\"name\"],\n project[\"id\"][2:],\n data=project,\n app=self,\n )\n )\n\n self._project_indices_by_id[project[\"id\"][2:]] = (\n len(self.projects) - 1\n )\n self._project_indices_by_name[\n project[\"attributes\"][\"name\"]\n ] = (len(self.projects) - 1)\n\n elif self.hub_type == self.NAMESPACES[\"b.\"]:\n\n if source.lower() in (\"all\", \"docs\"):\n for project in self.api.dm.get_projects():\n self.projects.append(\n Project(\n project[\"attributes\"][\"name\"],\n project[\"id\"][2:],\n data=project,\n app=self,\n )\n )\n\n self._project_indices_by_id[project[\"id\"][2:]] = (\n len(self.projects) - 1\n )\n self._project_indices_by_name[\n project[\"attributes\"][\"name\"]\n ] = (len(self.projects) - 1)\n\n if (\n source.lower() in (\"all\", \"admin\")\n and not self.auth.three_legged\n ):\n\n for project in self.api.hq.get_projects():\n if project[\"id\"] in self._project_indices_by_id:\n self.projects[\n self._project_indices_by_id[project[\"id\"]]\n ].data = project\n else:\n self.projects.append(\n Project(\n project[\"name\"],\n project[\"id\"],\n data=project,\n app=self,\n )\n )\n self._project_indices_by_id[project[\"id\"]] = (\n len(self.projects) - 1\n )\n\n self._project_indices_by_name[project[\"name\"]] = (\n len(self.projects) - 1\n )\n\n elif source.lower() in (\"all\", \"admin\"):\n self.logger.debug(\n \"Failed to get projects. The BIM 360 API only supports 2-legged access tokens\" # noqa:E501\n )", "def projects(args):\n _projects = lib.get_projects(\n args.target, username=args.username, password=args.password\n )\n if _projects:\n print(\"\\n\".join(_projects))", "def request_workspace_list(self, request):\n \n user_id = request['user_id'] \n \n response = {'workspaces': []}\n response['workspaces'] = self.list_workspaces(user_id=user_id)\n \n return response", "def get_projects(self, page_size, page, sort_direction, sort_conditions):\n request_url = self.api_base_url + \"projects?\" + \"pageSize=\" + str(page_size) + \"&page=\" + str(page) + \"&sortDirection=\" + sort_direction + \"&sortConditions=\" + sort_conditions\n headers = {\"Accept\": \"application/JSON\"}\n response = Util.get_api_call(request_url, headers)\n return response.json()", "def get_all_projects(engine): \n # Query db\n# sql = (\"SELECT a.project_id, \"\n# \" b.o_number, \"\n# \" a.project_name, \"\n# \" a.project_description \"\n# \"FROM nivadatabase.projects a, \"\n# \" nivadatabase.projects_o_numbers b \"\n# \"WHERE a.project_id = b.project_id \"\n# \"ORDER BY a.project_id\")\n sql = (\"SELECT project_id, \"\n \" project_name, \"\n \" project_description \"\n \"FROM nivadatabase.projects \"\n \"ORDER BY project_id\")\n df = pd.read_sql(sql, engine)\n\n return df", "def get_unique_project_list(self) -> List[str]:\n return self.tasks.get_project_list()", "def selectable_projects():\n\n db = current.db\n s3db = current.s3db\n\n # Lookup projects with provider self-registration\n ptable = s3db.project_project\n ttable = s3db.project_project_tag\n join = ttable.on((ttable.project_id == ptable.id) & \\\n (ttable.tag == \"APPLY\") & \\\n (ttable.value == \"Y\") & \\\n (ttable.deleted == False))\n query = (ptable.deleted == False)\n rows = db(query).select(ptable.id,\n ptable.name,\n join = join,\n )\n projects = {row.id: row.name for row in rows}\n return projects", "def get_project_list(config):\n eggs_dir = config.get('eggs_dir', 'eggs')\n if os.path.exists(eggs_dir):\n projects = os.listdir(eggs_dir)\n else:\n projects = []\n try:\n projects += [x[0] for x in config.cp.items('settings')]\n except NoSectionError:\n pass\n return projects", "def get_client_projects(self, client=None):\n if type(client) is Client:\n return [p for p in self.project_list if client.client_id == p.client_id]", "def get(self):\n try:\n user = None\n user_id = token_auth.current_user()\n if user_id:\n user = UserService.get_user_by_id(user_id)\n search_dto = self.setup_search_dto()\n results_dto = ProjectSearchService.search_projects(search_dto, user)\n return results_dto.to_primitive(), 200\n except NotFound:\n return {\"mapResults\": {}, \"results\": []}, 200\n except (KeyError, ValueError) as e:\n error_msg = f\"Projects GET - {str(e)}\"\n return {\"Error\": error_msg}, 400", "def project_list(cursor):\n query = \"SELECT * FROM projects\"\n try:\n cursor.execute(query, {},)\n except Exception as e:\n on_error(e)\n else:\n projects = cursor.fetchall()\n raise Return((projects, None))", "def test_get_project_list_with_projects(self):\n # Add two test projects.\n projects = [\n add_project(title='1', description='1'),\n add_project(title='2', description='2'),\n ]\n\n result = get_project_list()\n result_projects = result['projects'].object_list\n\n # Make sure two test projects are retrieved.\n for project in projects:\n self.assertTrue(project in result_projects)\n self.assertEqual(len(result_projects), len(projects))\n self.assertIsNone(result['tag'])\n self.assertFalse(result['filtered'])", "def getProjectsForOrgs(org_keys, limit=1000):\n q = getProjectsQueryForOrgs(org_keys)\n return q.fetch(limit)", "def _get_filtered_projects(filters):\n projects_itr = (projects_lib.get_filtered(f) for f in filters)\n return itertools.chain.from_iterable(projects_itr)", "def projects(self, request, pk=None):\n\n obj = self.get_object()\n try:\n query = models.Project.objects.filter(\n subject=obj.subject,\n assign=obj\n )\n serializer = self.get_serializer(query, many=True)\n\n id = self.request.query_params.get('id')\n\n if id:\n query = get_object_or_404(\n models.Project,\n id=id,\n assign=obj\n )\n return self.filtering(request, query)\n\n return Response(serializer.data)\n except:\n raise except_handler.ActionDecor()", "def _page_projects(self):\n return self._open(self.app.page_projects)", "def list(self, *args, **kwargs):\n projects = Project.objects.all()\n return self.list_by(projects, self.serializer_class)", "def workspaces(self):\n return WorkspaceCollection(client=self)", "def get_projects(self, include_stats, is_active_val=None):\n\n # read all kinds of project info and computed counts from the db\n # into a pandas data frame\n projects_df = self._read_projects_df_from_db(\n include_stats=include_stats)\n\n # if an active value has been provided, look only at project records\n # that have that active value. NB this has to be a test against None,\n # not against \"false-ish\" (if not is_active_val)\n if is_active_val is not None:\n is_active_val_mask = projects_df[p.IS_ACTIVE_KEY] == is_active_val\n filtered_df = projects_df.loc[is_active_val_mask]\n projects_df = filtered_df\n\n if include_stats:\n # cut stats columns out into own df (w same index as projects one)\n stats_keys = p.get_computed_stats_keys()\n stats_df = projects_df[stats_keys].copy()\n projects_df = projects_df.drop(stats_keys, axis=1)\n\n # within computed stats columns (ONLY--does not apply to\n # descriptive columns from the project table, where None is\n # a real, non-numeric value), NaN and None (which pandas treats as\n # interchangeable :-| ) should be converted to zero. Everything\n # else should be cast to an integer; for some weird reason pandas\n # is pulling in counts as floats\n stats_df = stats_df.fillna(0).astype(int)\n\n stats_dict = stats_df.to_dict(orient='index')\n\n result = []\n # NB: *dataframe*'s to_dict automatically converts numpy data types\n # (e.g., numpy.bool_, numpy.int64) to appropriate python-native data\n # types, but *series* to_dict does NOT do this automatic conversion\n # (at least, as of this writing). Be cautious if refactoring the below\n projects_dict = projects_df.to_dict(orient='index')\n for k, v in projects_dict.items():\n if include_stats:\n v[p.COMPUTED_STATS_KEY] = stats_dict[k]\n result.append(p.Project.from_dict(v))\n\n return result", "def list(self):\n\n for name in self.projects:\n self.projects[name].show()\n print(\"\\n\")", "def get_group_projects(groupname):\n values = admin.get_group_projects(current_app.scoped_session(), groupname)\n return jsonify({\"projects\": values})", "def get(self):\n opts = PROJECTS_OPTS_PARSER.parse_args()\n filters = PROJECT_FILTERS_PARSER.parse_args()\n filters = clean_attrs(filters)\n\n query = Project.query\n\n if not current_user.is_authenticated():\n query = query.filter_by(public=True)\n\n if opts['order'] == 'recent':\n query = (\n query.\n join(Project.jobs, isouter=True).\n group_by(Project).\n order_by(sql_func.max(Job.create_ts).desc().nullslast())\n )\n\n if filters:\n query = query.filter(*[\n getattr(Project, field) == value\n for field, value in filters.items()\n ])\n\n marshaler = dict(items=ALL_LIST_ROOT_FIELDS['items'])\n values = dict(items=query.all())\n\n args = PROJECT_LIST_PARSER.parse_args()\n\n if args['meta']:\n marshaler['meta'] = ALL_LIST_ROOT_FIELDS['meta']\n values['meta'] = {'total': query.count()}\n values['meta'].update(Project.get_status_summary(filters))\n\n if args['latest_job']:\n marshaler['items'] = ITEMS_MARSHALER_LATEST_JOB\n\n return marshal(values, marshaler)", "def list_objects(self):\n objects = self.client._perform_json(\"GET\", \"/workspaces/%s/objects\" % self.workspace_key)\n return [DSSWorkspaceObject(self, object) for object in objects]", "def projects(request):\n projects = (\n Project.objects.visible()\n .visible_for(request.user)\n .prefetch_related(\"latest_translation__user\")\n .order_by(\"name\")\n )\n\n if not projects:\n return render(request, \"no_projects.html\", {\"title\": \"Projects\"})\n\n return render(\n request,\n \"projects/projects.html\",\n {\"projects\": projects, \"top_instances\": projects.get_top_instances()},\n )", "def projects(self, langs=True) -> List['RadsProjectVersion']:\n dependencies = self.dependencies()\n if langs is False:\n return dependencies[None]\n elif langs is True:\n return list({pv for pvs in dependencies.values() for pv in pvs})\n elif isinstance(langs, Language):\n return dependencies[langs]\n else:\n return list({pv for lang in langs for pv in dependencies[lang]})", "def test_get_projects_returns_projects(fc: fetcher.Fetcher):\n projects = fc.get_projects()\n assert isinstance(projects, list)\n assert isinstance(projects[0], models.Project)", "def get_all_project_records():\r\n records = flask.request.db_api.get_all_project_record()\r\n return flask.jsonify(records=records)", "def get_project_ids(self, *criterion):\n from wkcdd.models.helpers import get_project_ids\n return get_project_ids([self.id], *criterion)", "def parse_one_project(self, args, project_arg):\n project = self.linguist_worktree.get_linguist_project(project_arg, raises=True)\n return [project]", "def GetProject(self):\n errors = []\n objects = list(request_helper.MakeRequests(\n requests=[(self.compute.projects,\n 'Get',\n self.messages.ComputeProjectsGetRequest(\n project=properties.VALUES.core.project.Get(\n required=True),\n ))],\n http=self.http,\n batch_url=self.batch_url,\n errors=errors,\n custom_get_requests=None))\n if errors:\n utils.RaiseToolException(\n errors,\n error_message='Could not fetch project resource:')\n return objects[0]", "def get_projects_of_user(self, user_id):\n res = self.conn.cursor().execute(\"\"\"SELECT * FROM projects p JOIN users_projects up \n ON p.id = up.project_id \n WHERE owner=? OR up.user_id=?\n GROUP BY p.id\n ORDER BY last_update DESC\"\"\", (user_id, user_id,))\n return res.fetchall()", "def get_user_projects(username):\n\n tx = cypher_transaction()\n query = \"\"\"\n MATCH (p:project)-[:OWNED_BY]->(u:user {username:{uname}})\n RETURN p\n \"\"\"\n tx.append(query, parameters={'uname': username})\n results = _first(tx.commit())\n projects = []\n for r in results:\n proj, = r.values\n print(\"* {0}\".format(proj['name']))\n projects.append(proj)\n return projects", "def get_projects_by_name(name): # noqa: E501\n\n value = ref.child(\"projects\").get()\n\n result = []\n for key in value:\n subkey = value[key]\n if 'name' in subkey and subkey['name'] is name:\n result.append(subkey['name'])\n return result" ]
[ "0.74932694", "0.7458052", "0.7392975", "0.73838377", "0.737569", "0.7349293", "0.730423", "0.7239146", "0.72044575", "0.7192277", "0.71775115", "0.71413183", "0.71402687", "0.713788", "0.71083647", "0.7094535", "0.7094002", "0.7023004", "0.6982993", "0.6981023", "0.6948548", "0.69416845", "0.69369155", "0.6928395", "0.6922935", "0.69040346", "0.6887113", "0.6859838", "0.6831051", "0.68020934", "0.6790076", "0.678458", "0.6751312", "0.6741645", "0.67364126", "0.67326546", "0.6726068", "0.67190355", "0.6715516", "0.669583", "0.6677975", "0.6669382", "0.66425407", "0.6632196", "0.6614678", "0.6610532", "0.66086674", "0.65737975", "0.65717715", "0.65624535", "0.65539706", "0.65489787", "0.65290993", "0.6521118", "0.6518595", "0.65153456", "0.6508651", "0.64850926", "0.64655364", "0.64632475", "0.64408505", "0.64061314", "0.6364245", "0.6326936", "0.631069", "0.628526", "0.6276114", "0.62471235", "0.6241377", "0.6199964", "0.6193563", "0.61914045", "0.61706406", "0.61637884", "0.6080672", "0.6044513", "0.6040324", "0.60288435", "0.6028375", "0.60161096", "0.6010098", "0.6001699", "0.59973484", "0.5996545", "0.59660995", "0.5954514", "0.5948334", "0.5936381", "0.5933531", "0.5925132", "0.59147257", "0.58973986", "0.58795637", "0.5877946", "0.58593714", "0.5845587", "0.58401436", "0.5807578", "0.58055687", "0.58034724" ]
0.7177935
10
Provide only a projects name for query and search through entire available names
def searchClientProject(self, name): for client in self.getClients(): try: for project in self.getClientProjects(client['id']): if project['name'] == name: return project except Exception: continue print('Could not find client by the name') return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_projects_by_name(self, name):\n projects = []\n for i in storage_utils.get_proj_ids(self._storage_location):\n project = self.find_project_by_id(i)\n if name.upper() in project.name.upper():\n projects.append(project)\n return projects", "def test_get_projects_filters(fc: fetcher.Fetcher, test_project_name):\n projects = fc.get_projects(test_project_name)\n assert isinstance(projects, list)\n assert len(projects) == 1\n assert projects[0].name == test_project_name", "def search(request):\n if 'find_project' in request.GET and request.GET['find_project']:\n project_name=request.GET.get('find_project')\n \n searched_project=Project.search_project(project_name)\n \n return render(request,'search_results.html',{'searched_project':searched_project})", "def project_search_json():\n q = request.args.get('q')\n if q is None or len(q) < 3:\n return jsonify(projects=[])\n limit = request.args.get('limit') or 10\n q = \"%%%s%%\" % q\n projects = Project.query.filter(or_(\n Project.name.like(q),\n Project.summary.like(q),\n Project.longtext.like(q),\n Project.autotext.like(q),\n )).limit(limit).all()\n projects = expand_project_urls(\n [p.data for p in projects],\n request.host_url\n )\n return jsonify(projects=projects)", "def project_by_name(self,project_name=''):\n logger.debug(f'project_by_name project_name={project_name}')\n return self.get('{}/groups/byName/{}'.format(ApiVersion.CM1.value,project_name))", "def project_search(self, **kwargs):\n\n return search_api(\"special_project_search\", **kwargs)", "def search(request, is_my_list=\"False\"):\n\n search_type = request.GET.get(\"submit\")\n if search_type:\n\n # get query field\n query = ''\n if request.GET.get(search_type):\n query = request.GET.get(search_type)\n\n proj_ids = []\n cod_ids = []\n\n valid_searches = [constants.STRING_TITLE, constants.STRING_DESCRIPTION, constants.STRING_PROTOCOL,\n constants.STRING_CODER, constants.STRING_AREA, constants.STRING_WORKINGGROUP]\n\n search_in_all = True\n for v in valid_searches:\n if v in request.GET:\n search_in_all = False\n break\n\n if search_in_all or request.GET.get(constants.STRING_TITLE):\n codings = CodingProject.objects.all()\n for cod in codings:\n if query.lower() in cod.title.lower():\n cod_ids.append(cod.id)\n\n if search_in_all or request.GET.get(constants.STRING_DESCRIPTION):\n codings = CodingProject.objects.all()\n for cod in codings:\n if query.lower() in cod.additional_information.lower():\n cod_ids.append(cod.id)\n\n if request.GET.get(constants.STRING_PROTOCOL):\n proj_ids += ProjectContainer.objects.filter(protocol__icontains=query).values_list('id', flat=True)\n\n if search_in_all or request.GET.get(constants.STRING_CODER):\n for pr in ProjectContainer.objects.all():\n for cd in pr.codings.all():\n user = Person.objects.using('datatracker').get(id=cd.coder)\n if query.lower() in user.name.lower():\n proj_ids.append(pr.id)\n break\n\n if search_in_all or request.GET.get(constants.STRING_AREA):\n for project_container in ProjectContainer.objects.all():\n docs = []\n if not project_container.docs or project_container.docs == '':\n continue\n keys = filter(None, project_container.docs.split(';'))\n docs.extend(list(DocAlias.objects.using('datatracker').filter(name__in=keys).values_list(\n 'document__group__parent__name')))\n for doc in docs:\n if query.lower() in doc[0].lower():\n proj_ids.append(project_container.id)\n break\n # ids += ProjectContainer.objects.filter(docs__document__group__parent__name__icontains=query).values_list(\n # 'id', flat=True)\n\n if search_in_all or request.GET.get(constants.STRING_WORKINGGROUP):\n for project_container in ProjectContainer.objects.all():\n docs = []\n if not project_container.docs or project_container.docs == '':\n continue\n keys = filter(None, project_container.docs.split(';'))\n docs.extend(list(\n DocAlias.objects.using('datatracker').filter(name__in=keys).values_list('document__group__name')))\n for doc in docs:\n if query.lower() in doc[0].lower():\n proj_ids.append(project_container.id)\n break\n \n if cod_ids:\n cod_ids = list(set(cod_ids))\n proj_ids += ProjectContainer.objects.filter(codings__id__in=cod_ids).values_list('id', flat=True)\n project_containers = ProjectContainer.objects.filter(id__in=list(set(proj_ids)))\n \n request.session[constants.ALL_CODINGS] = cod_ids\n request.session[constants.ALL_PROJECTS] = project_containers\n\n request.session[constants.MAINTAIN_STATE] = True\n\n return HttpResponseRedirect(\n settings.CODESTAND_PREFIX + '/codestand/matches/show_list/' + \n is_my_list + '/{0}/'.format(constants.ATT_CREATION_DATE) + 'True')\n\n else:\n return render_page(request, constants.TEMPLATE_MATCHES_SEARCH, {\n \"form\": SearchForm()\n })", "def find_projects(self, project_name: Optional[str] = None) -> List[Project]:\n with self.table_access_condition:\n conn = self._get_connection()\n c = conn.cursor()\n c.execute(\n \"\"\"\n SELECT * from projects\n WHERE (?1 IS NULL OR project_name = ?1)\n \"\"\",\n (project_name,),\n )\n rows = c.fetchall()\n return [\n Project(self, str(r[\"project_id\"]), row=r, _used_new_call=True)\n for r in rows\n ]", "def get_projects_by_name(name): # noqa: E501\n\n value = ref.child(\"projects\").get()\n\n result = []\n for key in value:\n subkey = value[key]\n if 'name' in subkey and subkey['name'] is name:\n result.append(subkey['name'])\n return result", "def test_search_project(self):\n title = Project.search_project(\"dee\")\n self.assertTrue(len(title) > 0)", "def _get_projects(current_project_name):\n projects = []\n\n unique_project_changes = Change.objects.order_by().values(\n 'project_name').distinct()\n for change in unique_project_changes:\n projects.append(change['project_name'])\n\n # sort alphabetically\n projects.sort()\n\n # insert 'all' option as it should be present always\n projects.insert(0, PROJECT_ALL)\n\n # if current_project_name is valid, make it the first element in list so\n # that it shows up as selected in project choice drop down\n if current_project_name != PROJECT_ALL and current_project_name in projects:\n projects.remove(current_project_name)\n projects.insert(0, current_project_name)\n elif current_project_name != PROJECT_ALL:\n logging.error(\"Currently selected project %s not found in any changes.\"\n \" Removing from list.\", current_project_name)\n logging.debug(\"Returning list of projects: %r\", projects)\n return projects", "def db_projects():\n return [{\"name\": \"IT\"}, {\"name\": \"Financial\"}, {\"name\": \"Failed\"}]", "def list_(ctx, search, backend):\n projects = ctx.obj['projects_db'].search(search, backend=backend)\n projects = sorted(projects, key=lambda project: project.name.lower())\n ctx.obj['view'].search_results(projects)", "def filter_projects():\n with open('../results/01.crawling/01.project_ci_services.json', 'r') as infile:\n projects = json.load(infile)\n tr_projects = []\n for project, value in projects.items():\n if \"GitHub\" in value or \"Travis\" in value:\n tr_projects.append(project)\n return tr_projects", "def search_key_for_project(project):\n elements = []\n elements.append(project['name'])\n elements.append(project['client'])\n elements.append(project['project_state'])\n elements.append(str(project['project_code']))\n return u' '.join(elements)", "def get_projects():\n data = sql.list_projects()\n names = [(d['id'], d['name']) for d in data]\n return names", "def project_filter(filename):\n return 'projects' in filename", "def selectable_projects():\n\n db = current.db\n s3db = current.s3db\n\n # Lookup projects with provider self-registration\n ptable = s3db.project_project\n ttable = s3db.project_project_tag\n join = ttable.on((ttable.project_id == ptable.id) & \\\n (ttable.tag == \"APPLY\") & \\\n (ttable.value == \"Y\") & \\\n (ttable.deleted == False))\n query = (ptable.deleted == False)\n rows = db(query).select(ptable.id,\n ptable.name,\n join = join,\n )\n projects = {row.id: row.name for row in rows}\n return projects", "def dir2name(name, projects):\n name = name.replace('_', '').lower()\n for project_name in projects:\n pname = project_name.lower().replace(' ', '').replace('-', '').lower()\n if name == pname:\n return project_name\n raise ValueError(('%s does not match any project' % (name)))", "def get_projects(selection):\n project=[]\n\n # project with keyname\n\n if 'project' in selection.facets:\n project+=selection.facets['project']\n\n\n # project without keyname\n\n # WARNING\n #\n # The code below uses sdinference and query the database to retrieve ESGF parameters.\n # Doing those things here may raise circular dependencies\n # as well as making the whole thing very complex.\n #\n # We do this to make this syntax work (i.e. project value without key)\n # synda search GeoMIP\n #\n # Note that this syntax always works (i.e. load the project level default file), even without this code.\n # synda search project=GeoMIP\n #\n #\n pending_projects=sdearlystreamutils.get_facet_values_early([selection.facets],'project',extract_item=True) # project without keyname or project as part of an identifier.\n\n li=pending_projects+project\n\n li=list(set(li)) # remove duplicate\n\n return li", "def get_one_project_by_name(ctx, project_name):\n pprint(ctx.obj.groups.byName[project_name].get().data)", "def get_project_name(projects, project_id):\n for project in projects:\n if project['id'] == project_id:\n return project['name']", "def test_list_project_request(self):\n pass", "def all_projects(request):\n\n game_projects = GameProject.objects.all()\n profile = get_object_or_404(Profile, user=request.user)\n query = None\n\n if 'q' in request.GET:\n query = request.GET['q']\n if not query:\n messages.error(request, \"You didn't enter any search criteria!\")\n return redirect(reverse('all_projects'))\n\n queries = Q(title__icontains=query) | Q(description__icontains=query) \\\n | Q(owner__user__username__icontains=query)\n game_projects = game_projects.filter(queries)\n\n for game_project in game_projects:\n game_project.total_amount = 0\n for order in Order.objects.filter(\n game_project=game_project).filter(status='PA'):\n game_project.total_amount += order.donation_item.amount\n\n template = 'gameproject/all_projects.html'\n context = {\n 'game_projects': game_projects,\n 'profile': profile,\n 'search_term': query\n }\n\n return render(request, template, context)", "def _get_projects(project_ids):\n if _ALL in project_ids:\n return projects_lib.get_all()\n return projects_lib.get_selective(project_ids)", "def test_get_projects_expanded(self):\n pass", "def filter_projects(project_services):\n return [project for project, services in project_services.items() if \"Travis\" in services or \"GitHub\" in services]", "def get_project_ids(self, node=None, name=None):\n project_ids = []\n queries = []\n # Return all project_ids in the data commons if no node is provided or if node is program but no name provided\n if name == None and ((node == None) or (node == \"program\")):\n print(\"Getting all project_ids you have access to in the data commons.\")\n if node == \"program\":\n print(\n \"Specify a list of program names (name = ['myprogram1','myprogram2']) to get only project_ids in particular programs.\"\n )\n queries.append(\"\"\"{project (first:0){project_id}}\"\"\")\n elif name != None and node == \"program\":\n if isinstance(name, list):\n print(\n \"Getting all project_ids in the programs '\" + \",\".join(name) + \"'\"\n )\n for program_name in name:\n queries.append(\n \"\"\"{project (first:0, with_path_to:{type:\"program\",name:\"%s\"}){project_id}}\"\"\"\n % (program_name)\n )\n elif isinstance(name, str):\n print(\"Getting all project_ids in the program '\" + name + \"'\")\n queries.append(\n \"\"\"{project (first:0, with_path_to:{type:\"program\",name:\"%s\"}){project_id}}\"\"\"\n % (name)\n )\n elif isinstance(node, str) and isinstance(name, str):\n print(\n \"Getting all project_ids for projects with a path to record '\"\n + name\n + \"' in node '\"\n + node\n + \"'\"\n )\n queries.append(\n \"\"\"{project (first:0, with_path_to:{type:\"%s\",submitter_id:\"%s\"}){project_id}}\"\"\"\n % (node, name)\n )\n elif isinstance(node, str) and name == None:\n print(\n \"Getting all project_ids for projects with at least one record in the node '\"\n + node\n + \"'\"\n )\n query = \"\"\"{node (first:0,of_type:\"%s\"){project_id}}\"\"\" % (node)\n df = pd.json_normalize(self.sub.query(query)[\"data\"][\"node\"])\n project_ids = project_ids + list(set(df[\"project_id\"]))\n if len(queries) > 0:\n for query in queries:\n res = self.sub.query(query)\n df = pd.json_normalize(res[\"data\"][\"project\"])\n project_ids = project_ids + list(set(df[\"project_id\"]))\n my_ids = sorted(project_ids, key=str.lower)\n print(my_ids)\n return my_ids", "def filter_projects(self, query, check=True):\n page_projects = self._page_projects()\n\n page_projects.field_filter_projects.value = query\n page_projects.button_filter_projects.click()\n\n if check:\n\n def check_rows():\n is_present = False\n for row in page_projects.table_projects.rows:\n if not (row.is_present and\n query in row.link_project.value):\n break\n is_present = True\n\n return waiter.expect_that(is_present, equal_to(True))\n\n waiter.wait(check_rows,\n timeout_seconds=10,\n sleep_seconds=0.1)", "def test_list_project(self):\n pass", "def test_list_projects(self):\n pass", "def test_list_projects(self):\n pass", "def test_demo_project_call(self):\n resp = DemoAivenStorage(os.environ[\"AIVEN_API_URL\"],\n os.environ[\"AIVEN_TOKEN\"]).get_project_names()\n assert isinstance(resp, list)\n assert len(resp) == 1\n assert 'romainducarrouge-31f2' in resp", "def get_projects(session):\n cursuses = [1, 21] # cursus ids from which to get the projects\n project_names = []\n\n for cursus in cursuses:\n # Get all the projects from 1 cursus, very slow process because projects endpoint contains\n # a lot of information\n projects = get_all_pages(session, f'/cursus/{cursus}/projects', 100, {'filter[exam]': False})\n for project in projects:\n # Create dictionary containing project id and project name ans set in bigger dict\n project_names.append({'id': project['id'], 'name': project['name']})\n\n return project_names", "def test_get_projects(self):\n pass", "def fetch_project(search_info):\n search = search_collection.find_one({\"_id\": SEARCH_ID})\n user = user_collection.find_one({\"_id\": search_info[\"USER_ID\"]})\n user_bookmarks = user[\"bookmarks\"]\n user_contributions = user[\"contributions\"]\n user_outgoing = user[\"outgoing\"]\n try:\n project_id_list = search[search_info[\"search_query\"]]\n except KeyError:\n project_id_list = None\n except AttributeError:\n project_id_list = None\n if project_id_list != None:\n projects_list = list()\n for id in project_id_list:\n project = project_collection.find_one({\"_id\": id})\n if project == None:\n continue\n if user_bookmarks == None:\n project[\"bookmark\"] = False\n else:\n project[\"bookmark\"] = True if id in user_bookmarks else False\n if user_outgoing == None:\n project[\"contribution\"] = False\n\n else:\n project[\"contribution\"] = True if id in user_outgoing else False\n projects_list.append(project)\n return projects_list\n else:\n return []", "def getProjectName(self, projectId: int) -> str:\n query = f\"SELECT name FROM projects WHERE id = {projectId}\"\n result = sql.executeAndReadQuery(self.connection, query)\n return result[0][0]", "def find_project(self, value, key=\"name\"):\n if not value:\n return\n if key.lower() not in (\"name\", \"id\"):\n raise ValueError()\n\n if key == \"name\" and not getattr(self, \"projects\", None):\n self.get_projects()\n elif key == \"id\" and not getattr(self, \"projects\", None):\n return self.get_project(value)\n\n try:\n if key.lower() == \"name\":\n return self.projects[self._project_indices_by_name[value]]\n elif key.lower() == \"id\":\n return self.projects[self._project_indices_by_id[value]]\n except KeyError:\n self.logger.debug(\"Project {}: {} not found\".format(key, value))", "def __ui_search_persons_by_name(self):\n searched_name = input(\"Introduce the name: \").strip().lower()\n if searched_name == \"\":\n print(\"You cannot search persons by an empty name!\\n\")\n return\n\n searched_persons = self.__person_service.find_persons_by_name(searched_name)\n\n if len(searched_persons) == 0:\n print('There is no person whose name contains \"{}\"!\\n'.format(searched_name))\n else:\n print(\"\")\n for person in searched_persons:\n print(person)\n print(\"\")", "def list_projects(arn=None, nextToken=None):\n pass", "def getProjectNames(self):\n con = self.getMetadataDatabaseConnection()\n result = con.cursor()\n con.cursor().callproc('get_project_names', [result])\n projnames = [row[0] for row in result]\n return projnames", "def build_repo_query(self,name):\n\n query = URL_ROOT + 'search/repositories?q=org:'+name+'&sort=forks'\n return query", "def test_get_project_list_with_projects(self):\n # Add two test projects.\n projects = [\n add_project(title='1', description='1'),\n add_project(title='2', description='2'),\n ]\n\n result = get_project_list()\n result_projects = result['projects'].object_list\n\n # Make sure two test projects are retrieved.\n for project in projects:\n self.assertTrue(project in result_projects)\n self.assertEqual(len(result_projects), len(projects))\n self.assertIsNone(result['tag'])\n self.assertFalse(result['filtered'])", "def get_project(self, name=None):\n if not name:\n name = self.get_project_name()\n projects = self.get_projects()\n for p in projects:\n if p.name == name:\n return p\n raise NotFound(name)", "def get_projects(self):\n response = self.request(verb=requests.get, address=\"projects\")\n # FIXME: if no results, must we raise an exception?\n return response[\"results\"] if \"results\" in response else response", "def query_projects(request):\n try:\n filters = request.data\n except AttributeError:\n filters = FILTER\n projects = _get_projects(filters)\n projects_as_json = serializers.serialize('json', projects)\n return HttpResponse(json.dumps(projects_as_json), content_type='json')", "def list_projects():\n if '.wcscanner' not in os.listdir(context.__BASE_PATH__):\n return []\n return os.listdir(context.__PROJECTS_PATH__)", "def projects(self):\n sql = \"\"\"SELECT project\n FROM barcodes.sample\n LEFT JOIN barcodes.project_sample_sets USING (sample_set_id)\n LEFT JOIN barcodes.project USING (project_id)\n WHERE sample_id = %s\n UNION\n SELECT project\n FROM barcodes.project_samples\n LEFT JOIN barcodes.project USING (project_id)\n WHERE sample_id = %s\n \"\"\"\n with pm.sql.TRN:\n pm.sql.TRN.add(sql, [self.id, self.id])\n projects = pm.sql.TRN.execute_fetchflatten()\n return None if not projects else projects", "def get_projects(self):\n unaligned_path = self.get_unaligned_path()\n logger.debug(\"collecting list of projects\")\n return [p for p in os.listdir(unaligned_path)\n if len(parsing.get_project_label(p))]", "def update_project_name(self, curr_proj, proj_new_name):\r\n for proj in self.__projects:\r\n if proj == curr_proj: # Find the project with the same current name\r\n proj.update_name(proj_new_name) # Update the project's name\r", "def project_name(self):\n pass", "def get(self, name):\n try:\n return self.projects[name]\n except KeyError:\n print(\"No project called %s was found\" %name)", "def get_projects(self, *criterion):\n from wkcdd.models.helpers import get_project_list\n return get_project_list([self.id], *criterion)", "def fromname(cls, name):\n return Project.get_by_key_name(name.strip().lower())", "def get(self):\n try:\n user = None\n user_id = token_auth.current_user()\n if user_id:\n user = UserService.get_user_by_id(user_id)\n search_dto = self.setup_search_dto()\n results_dto = ProjectSearchService.search_projects(search_dto, user)\n return results_dto.to_primitive(), 200\n except NotFound:\n return {\"mapResults\": {}, \"results\": []}, 200\n except (KeyError, ValueError) as e:\n error_msg = f\"Projects GET - {str(e)}\"\n return {\"Error\": error_msg}, 400", "def get_queryset(self):\r\n username = self.kwargs['username']\r\n return models.Projects.objects.filter(username = username).order_by('-id')", "def get_public_projects_query():\n return Q(access_policy=AccessPolicy.OPEN)", "def do_search(cs, args):\n resp, data = cs.searcher.search(args.query)\n project_fields = ['id', 'name', 'public']\n print(\"Find %d Projects: \" % len(data['project']))\n utils.print_list(\n data['project'], project_fields, formatters={}, sortby='id')\n repository_fields = [\n 'repository_name', 'project_name', 'project_id', 'project_public'\n ]\n print(\"\\n\")\n print(\"Find %d Repositories: \" % len(data['repository']))\n utils.print_list(\n data['repository'],\n repository_fields,\n formatters={},\n sortby='repository_name')", "def getProjectName():", "def get_projects(self):\n projects = []\n page = 1\n while not len(projects) % 100:\n projects += self._get('/projects?{0}'.format(urllib.urlencode({'per_page': 100, 'page': page})))\n if not projects:\n break\n page += 1\n return projects", "def projects(args):\n _projects = lib.get_projects(\n args.target, username=args.username, password=args.password\n )\n if _projects:\n print(\"\\n\".join(_projects))", "def getProjectByName(self, name):\n\n for project in self.__projects:\n if project.getName() == name:\n return project\n\n return None", "def project_in_vc(name):\n vc3_client = get_vc3_client()\n projects = vc3_client.listProjects()\n vc = vc3_client.getRequest(requestname=name)\n vc_owner_projects = []\n\n for project in projects:\n if vc.owner == project.owner:\n vc_owner_projects.append(project)\n\n for p in vc_owner_projects:\n if (session['name'] in p.members or session['name'] == p.owner):\n return True\n else:\n return False", "def get_projects_by_keyword(keyword): # noqa: E501\n\n result = []\n\n try:\n value = ref.child(\"keywords/%s\"%keyword).get()\n\n if 'project_index' in value:\n for key in value[\"project_index\"]:\n\n proj = ref.child(\"projects/-%s\"%key).get()\n if 'name' in proj:\n result.append(proj[\"name\"])\n except:\n result.append({\"response\":\"Failure in getting keywords projects, ID: %s\"%keyword})\n return result\n\n\n\n\n return result", "def get_projects(self):\n res = self.conn.cursor().execute(\"SELECT * FROM projects\")\n return res.fetchall()", "def list_projects():\n\n cmd = dict()\n cmd[\"type_\"] = \"list_projects\"\n cmd[\"name_\"] = \"\"\n \n s = comm.send_and_receive_socket(cmd)\n\n msg = comm.recv_string(s)\n\n if msg != \"Success!\":\n raise Exception(msg)\n \n json_str = comm.recv_string(s) \n \n s.close() \n\n return json.loads(json_str)[\"projects\"]", "def find_project(xp, **kwargs):\n path = '/search/project'\n if 'schema' not in kwargs:\n kwargs['schema'] = ProjectCollection.SCHEMA\n tag_class = {'collection': ProjectCollection, 'project': ROProject}\n return _find(path, xp, tag_class, **kwargs)", "def test_get_projects(self):\n for project in ['TEST', 'NEWTEST', 'MYPROJECT']:\n self.db.insert_single_result(generate_mock_result(project=project))\n projects = self.db.get_projects()\n self.assertItemsEqual(['MYPROJECT', 'NEWTEST', 'TEST'], projects)", "def multiple_projects():\n message = \"\"\"\nFound {} that match your change.\nSince there is no support for tracking changes in different\nprojects, try to add more attributes to focus on a specific change\nor set of changes.\n\"\"\".format(crayons.red(\"multiple different projects\"))\n return message", "def query_project(self):\n\n # Find stylesheets.\n found = False\n for filename in self.project.namelist():\n if os.path.basename(filename) == 'styles.xml':\n found = True\n print(filename)\n if not found:\n print(\"not found!\")", "def list(self):\n\n for name in self.projects:\n self.projects[name].show()\n print(\"\\n\")", "def do_projects(self, arg):\n args = shlex.split(arg)\n limit = 10\n from_date = to_date = ''\n if args:\n limit = 0\n try:\n from_date, to_date = helpers.parse_date_parameters(args)\n except ValueError, msg:\n print(msg)\n return\n projects = self.db.get_projects_with_activity_field(\n from_date, to_date, limit=limit)\n refined = map(lambda x: [\n x['pid'], x['name'],\n '[Active]' if x['active'] else '[closed]',\n datetime.datetime.strftime(x['created'], '%c').decode('utf8'),\n x['description']], projects)\n print(tabulate(refined, ['ID', 'Project', 'Activity', 'Created',\n 'Description']))", "def atlas_projects():\n pass", "def _get_projects_from_userinfo(\n userinfo: typing.Dict[str, typing.Any],\n) -> typing.List[typing.Any] | None:\n if \"sdConnectProjects\" in userinfo:\n # Remove the possibly existing \"project_\" prefix\n projects = [\n p.removeprefix(\"project_\") for p in userinfo[\"sdConnectProjects\"].split(\" \")\n ]\n # we add this check in case the claim `sdConnectProjects does not exist`\n # and we want to enforce this at deployment\n elif setd[\"sdconnect_enabled\"] and \"sdConnectProjects\" not in userinfo:\n projects = []\n else:\n return None\n\n if len(projects) == 0:\n # No project group information received, aborting\n raise aiohttp.web.HTTPUnauthorized(reason=\"User is not a member of any project.\")\n\n return projects", "def test_search(self):\n resp = self.client.get(\n reverse('profiles:search'),\n # search with a lowercase search term\n data={'search_term': 'Test Project'})\n\n # projects that matches the search term \"Test Project\"\n self.assertContains(resp, 'Test Project')\n self.assertContains(resp, 'Django developer')\n self.assertContains(resp, str(self.project))\n # various page information\n self.assertContains(resp, 'Projects')\n self.assertContains(resp, 'All Needs')\n self.assertContains(resp, 'Projects')\n\n self.assertTemplateUsed('homepage.html')", "def get_project_name(self, project_id):\n test = \"\"\"SELECT EXISTS(\n SELECT 1\n FROM barcodes.project\n WHERE project_id=%s\n )\"\"\"\n query = \"\"\"SELECT project\n FROM barcodes.project\n WHERE project_id=%s\"\"\"\n\n with self._transaction.cursor() as cur:\n cur.execute(test, [project_id, ])\n if not cur.fetchone()[0]:\n raise NotFound(f\"Project f'{project_id}' not found\")\n else:\n cur.execute(query, [project_id, ])\n return cur.fetchone()[0]", "def search(self, name: str) -> \"Navaids\":\n return self.__class__(\n self.data.query(\n \"description == @name.upper() or name == @name.upper()\"\n )\n )", "def test_get_project_list_with_no_projects(self):\n result = get_project_list()\n self.assertQuerysetEqual(result['projects'].object_list, [])\n self.assertIsNone(result['tag'])\n self.assertFalse(result['filtered'])", "def name_search(self, name, args=None, operator='ilike', limit=1000):\n args = self.compute_domain_args(args)\n recs = self.search([('name', operator, name)] + args, limit=limit)\n return recs.name_get()", "def parse_one_project(self, args, project_arg):\n project = self.linguist_worktree.get_linguist_project(project_arg, raises=True)\n return [project]", "def test_search_by_skill(self):\n resp = self.client.get(\n reverse('profiles:search_by_skill',\n kwargs={'skill': 'Django developer'}))\n\n # projects that matches the search term Django developer\n self.assertContains(resp, 'Test Project')\n self.assertContains(\n resp, '1 results were found with: Django developer'\n )\n self.assertContains(resp, str(self.project))\n # various page information\n self.assertContains(resp, 'Projects')\n self.assertContains(resp, 'All Needs')\n self.assertContains(resp, 'Projects')\n\n self.assertTemplateUsed('homepage.html')", "def test_get_project_list_with_tag_filter(self):\n # Add test projects.\n tag = 'tag1'\n projects_with_tag = [\n add_project(title='1', description='1', tags=[tag]),\n add_project(title='2', description='2', tags=[tag]),\n ]\n project_without_tag = add_project(title='3', description='3', tags=[])\n\n result = get_project_list(tag=tag)\n result_projects = result['projects'].object_list\n\n # Make sure only projects with tag are retrieved.\n for project_with_tag in projects_with_tag:\n self.assertTrue(project_with_tag in result_projects)\n self.assertFalse(project_without_tag in result_projects)\n self.assertEqual(len(result_projects), len(projects_with_tag))\n self.assertTrue(result['filtered'])\n self.assertEqual(result['tag'], tag)", "def search_by_name(self, name):\r\n return self.__filter(self.get_all_persons(), lambda x: name.lower().strip() in x.name.lower().strip())", "def getClientProject(self, clientName, projectName):\n for client in self.getClients():\n if client['name'] == clientName:\n cid = client['id']\n\n if not cid:\n print('Could not find such client name')\n return None\n\n for projct in self.getClientProjects(cid):\n if projct['name'] == projectName:\n pid = projct['id']\n\n if not pid:\n print('Could not find such project name')\n return None\n\n return self.getProject(pid)", "def get_project_id_by_name(self, name):\n\n for project in self.api.state['projects']:\n if project['name'] == name:\n return project['id']\n\n return None", "def _get_filtered_projects(filters):\n projects_itr = (projects_lib.get_filtered(f) for f in filters)\n return itertools.chain.from_iterable(projects_itr)", "def projects(request):\n projects = (\n Project.objects.visible()\n .visible_for(request.user)\n .prefetch_related(\"latest_translation__user\")\n .order_by(\"name\")\n )\n\n if not projects:\n return render(request, \"no_projects.html\", {\"title\": \"Projects\"})\n\n return render(\n request,\n \"projects/projects.html\",\n {\"projects\": projects, \"top_instances\": projects.get_top_instances()},\n )", "def get_projects(self):\n ret = self.v1_projects.get()\n return [each.metadata.name for each in ret.items]", "def gcp_get_instances_by_name(project, name, raw=True, credentials=None):\n return gcp_filter_projects_instances(projects=[project],\n filters=['name eq {name}'.format(name=name.replace('*', '.*'))],\n raw=raw,\n credentials=credentials)", "def test_get_project_list_with_page_filter(self):\n # Add test projects.\n projects = [\n add_project(title=str(i), description=str(i)) for i in range(10)\n ]\n pages = {\n 1: projects[5:],\n 2: projects[:5],\n }\n\n # Check first page results.\n result = get_project_list(page=1)\n first_page_results = result['projects'].object_list\n for first_page_project in pages[1]:\n self.assertTrue(first_page_project in first_page_results)\n self.assertFalse(\n any(project in first_page_results for project in pages[2]))\n\n # Check second page results.\n result = get_project_list(page=2)\n second_page_results = result['projects'].object_list\n self.assertFalse(\n any(project in second_page_results for project in pages[1]))\n for second_page_project in pages[2]:\n self.assertTrue(second_page_project in second_page_results)", "def do_project_list(cs, args):\n _, projects = cs.projects.list()\n fields = [\n 'project_id',\n 'name',\n 'owner_id',\n 'current_user_role_id',\n 'repo_count',\n 'creation_time',\n 'public',\n ]\n utils.print_list(projects, fields, formatters={}, sortby=args.sortby)", "def get_user_projects(username):\n\n tx = cypher_transaction()\n query = \"\"\"\n MATCH (p:project)-[:OWNED_BY]->(u:user {username:{uname}})\n RETURN p\n \"\"\"\n tx.append(query, parameters={'uname': username})\n results = _first(tx.commit())\n projects = []\n for r in results:\n proj, = r.values\n print(\"* {0}\".format(proj['name']))\n projects.append(proj)\n return projects", "def search_project_or_study(obj_type):\n\n matches = []\n response = None\n\n try:\n if obj_type not in set([\"projects\", \"studies\"]):\n raise Exception(\"Invalid object type specified\")\n\n possible_filters = filters_d[obj_type]\n \n for f in file_dict[obj_type][\"valid\"].values():\n json_file = data_dir + f\n json_s = open(json_file, \"r\").read()\n json_obj = json.loads(json_s)\n add_to_matches = True\n\n for filter_name in possible_filters:\n filter_val = request.args.get(filter_name)\n if filter_val:\n if json_obj[filter_name] != filter_val:\n add_to_matches = False\n \n if add_to_matches:\n matches.append(json_s)\n\n response_body = \"[\" + \",\".join(matches) + \"]\"\n response = get_response(response_body, status=200)\n\n except Exception as e:\n print(\"bad request\")\n response_body = '''{\"message\": \"invalid resource '%s'\"}''' % obj_type\n response = get_response(response_body, status=400)\n\n return response", "def projects_view(request):\n\n # The projects to be displayed. Only the ones in which the logged in user is involved\n projects = request.user.projets.all().order_by('name')\n return render(request, 'projects.html', locals())", "def project_list(cursor):\n query = \"SELECT * FROM projects\"\n try:\n cursor.execute(query, {},)\n except Exception as e:\n on_error(e)\n else:\n projects = cursor.fetchall()\n raise Return((projects, None))", "def test_get_projects_returns_projects(fc: fetcher.Fetcher):\n projects = fc.get_projects()\n assert isinstance(projects, list)\n assert isinstance(projects[0], models.Project)", "def get_projects(self, _is_simple=False):\n req_url = f\"{self.url}/projects\"\n if _is_simple:\n req_url += \"?simple=true\"\n ret = requests.get(req_url, headers = self.req_header)\n return ret.json()", "def getAllWhereNameIs2(table, name, orgName):\n\ttry:\n\t\tcon = sqlite3.connect('PampDb.db')\n\t\tcur = con.cursor()\n\t\tcur.execute(\"SELECT * FROM \" + table + \" WHERE name like'\" + name + \"%' and organisationId like (SELECT organisationId FROM Organisation WHERE name like '\" + orgName + \"' )\")\n\t\tob = cur.fetchall()\n\t\tif not ob:\n\t\t\treturn \"\"\n\t\telse:\n\t\t\tobje = ob[0]\n\t\t\treturn obje\n\t\tcon.commit()\n\t\tcon.close()\n\texcept:\n\t\tprint('Could not run function getAllWhereNameIs2 from DbController')", "def project_name(self) -> typing.Optional[str]:\n return self._values.get(\"project_name\")", "def project_name(self) -> typing.Optional[str]:\n return self._values.get(\"project_name\")" ]
[ "0.711047", "0.6944019", "0.6936128", "0.6779344", "0.6688481", "0.66640425", "0.6621097", "0.66138625", "0.6602506", "0.6593444", "0.6547937", "0.6496374", "0.6491965", "0.6417066", "0.6392265", "0.636261", "0.635305", "0.6313007", "0.62534356", "0.6243291", "0.62387365", "0.6233204", "0.6168169", "0.61327666", "0.60866684", "0.6074057", "0.6067167", "0.6023353", "0.60110205", "0.5997505", "0.59916896", "0.59916896", "0.59626615", "0.5954459", "0.59500176", "0.593446", "0.5923244", "0.590929", "0.59057826", "0.5903607", "0.5897293", "0.589529", "0.5860444", "0.5835395", "0.58300245", "0.58207357", "0.5816396", "0.5813427", "0.5808561", "0.58049434", "0.57919633", "0.57823634", "0.57813084", "0.57781184", "0.5770395", "0.57614654", "0.5755118", "0.57534796", "0.5750768", "0.5736093", "0.5733891", "0.5722694", "0.57153946", "0.57121015", "0.56958926", "0.5693271", "0.5691232", "0.56863505", "0.5682798", "0.56804353", "0.56799656", "0.56787336", "0.56551284", "0.5651466", "0.5649978", "0.5647471", "0.5642493", "0.5625171", "0.56230456", "0.5620155", "0.5615133", "0.5608713", "0.56049234", "0.56039184", "0.55986935", "0.5594741", "0.5592935", "0.5589284", "0.5579184", "0.55786514", "0.55670303", "0.5562261", "0.55619174", "0.553667", "0.5528201", "0.5523329", "0.55221254", "0.5504323", "0.54891443", "0.54891443" ]
0.6892241
3
Fast query given the Client's name and Project's name
def getClientProject(self, clientName, projectName): for client in self.getClients(): if client['name'] == clientName: cid = client['id'] if not cid: print('Could not find such client name') return None for projct in self.getClientProjects(cid): if projct['name'] == projectName: pid = projct['id'] if not pid: print('Could not find such project name') return None return self.getProject(pid)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def searchClientProject(self, name):\n for client in self.getClients():\n try:\n for project in self.getClientProjects(client['id']):\n if project['name'] == name:\n return project\n except Exception:\n continue\n\n print('Could not find client by the name')\n return None", "def get_client_projects(self, client=None):\n if type(client) is Client:\n return [p for p in self.project_list if client.client_id == p.client_id]", "def query_project(self, project_query_options):\n\n query = \"select * from project where \"\n row_names = [\"Proj_ID\", \"Cus_ID\", \"Emp_ID\", \"Proj_Date\",\n \"Proj_Descrpt\", \"Proj_EstDateSt\", \"Proj_EstDateEnd\",\n \"Proj_EstBudget\", \"Proj_ActDateSt\",\n \"Proj_ActDateEnd\", \"Proj_ActCost\"]\n\n entries = project_query_options\n options_index = []\n arguments = []\n\n index = 0\n for item in entries:\n if item is not None:\n arguments.append(item)\n options_index.append(index)\n index += 1\n\n count = 0\n for arg in arguments:\n if count == 0:\n query = query + \"{}='{}' \".format(\n row_names[options_index[count]],\n arg)\n else:\n query = query + \"and {}='{}' \".format(\n row_names[options_index[count]],\n arg)\n count += 1\n\n try:\n self.dbCursor.execute(query)\n return self.dbCursor.fetchall()\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)", "def project_by_name(self,project_name=''):\n logger.debug(f'project_by_name project_name={project_name}')\n return self.get('{}/groups/byName/{}'.format(ApiVersion.CM1.value,project_name))", "def run(self, query, project=\"odyssey-193217\"):\n\t\tfrom google.cloud import bigquery\n\t\tjob_config = bigquery.QueryJobConfig()\n\t\tclient = bigquery.Client(project=project)\n\t\tresult = client.query(query,job_config=job_config)\n\t\tjob_config.allowLargeResults = True\n\t\tresult.__done_timeout = 99999999\n\t\treturn list(result)", "def test_list_project_request(self):\n pass", "def search_key_for_project(project):\n elements = []\n elements.append(project['name'])\n elements.append(project['client'])\n elements.append(project['project_state'])\n elements.append(str(project['project_code']))\n return u' '.join(elements)", "def _to_client_query(self, client):\n ancestor_client_key = None\n if self.ancestor is not None:\n ancestor_client_key = self.ancestor.to_client_key()\n\n # Resolve ValueProvider arguments.\n self.filters = self._set_runtime_filters()\n if isinstance(self.namespace, ValueProvider):\n self.namespace = self.namespace.get()\n\n return query.Query(\n client,\n kind=self.kind,\n project=self.project,\n namespace=self.namespace,\n ancestor=ancestor_client_key,\n filters=self.filters,\n projection=self.projection,\n order=self.order,\n distinct_on=self.distinct_on)", "def get_projects():\n return Project.query.all()", "def get_one_project_by_name(ctx, project_name):\n pprint(ctx.obj.groups.byName[project_name].get().data)", "def test_get_projects(self):\n for project in ['TEST', 'NEWTEST', 'MYPROJECT']:\n self.db.insert_single_result(generate_mock_result(project=project))\n projects = self.db.get_projects()\n self.assertItemsEqual(['MYPROJECT', 'NEWTEST', 'TEST'], projects)", "def db_projects():\n return [{\"name\": \"IT\"}, {\"name\": \"Financial\"}, {\"name\": \"Failed\"}]", "def test_get_projects_filters(fc: fetcher.Fetcher, test_project_name):\n projects = fc.get_projects(test_project_name)\n assert isinstance(projects, list)\n assert len(projects) == 1\n assert projects[0].name == test_project_name", "def request_project_by_key(cfg, project_key):\n\n url = cjm.request.make_cj_url(cfg, \"project\", project_key)\n response = cjm.request.make_cj_request(cfg, url)\n return response.json()", "def get_all_projects(engine): \n # Query db\n# sql = (\"SELECT a.project_id, \"\n# \" b.o_number, \"\n# \" a.project_name, \"\n# \" a.project_description \"\n# \"FROM nivadatabase.projects a, \"\n# \" nivadatabase.projects_o_numbers b \"\n# \"WHERE a.project_id = b.project_id \"\n# \"ORDER BY a.project_id\")\n sql = (\"SELECT project_id, \"\n \" project_name, \"\n \" project_description \"\n \"FROM nivadatabase.projects \"\n \"ORDER BY project_id\")\n df = pd.read_sql(sql, engine)\n\n return df", "def test_list_project(self):\n pass", "def project_search_json():\n q = request.args.get('q')\n if q is None or len(q) < 3:\n return jsonify(projects=[])\n limit = request.args.get('limit') or 10\n q = \"%%%s%%\" % q\n projects = Project.query.filter(or_(\n Project.name.like(q),\n Project.summary.like(q),\n Project.longtext.like(q),\n Project.autotext.like(q),\n )).limit(limit).all()\n projects = expand_project_urls(\n [p.data for p in projects],\n request.host_url\n )\n return jsonify(projects=projects)", "def find(self, **kwargs):\n return super(ClientsTable, self).records('clients', **kwargs)", "def get_public_projects_query():\n return Q(access_policy=AccessPolicy.OPEN)", "def get_project_ids(self, node=None, name=None):\n project_ids = []\n queries = []\n # Return all project_ids in the data commons if no node is provided or if node is program but no name provided\n if name == None and ((node == None) or (node == \"program\")):\n print(\"Getting all project_ids you have access to in the data commons.\")\n if node == \"program\":\n print(\n \"Specify a list of program names (name = ['myprogram1','myprogram2']) to get only project_ids in particular programs.\"\n )\n queries.append(\"\"\"{project (first:0){project_id}}\"\"\")\n elif name != None and node == \"program\":\n if isinstance(name, list):\n print(\n \"Getting all project_ids in the programs '\" + \",\".join(name) + \"'\"\n )\n for program_name in name:\n queries.append(\n \"\"\"{project (first:0, with_path_to:{type:\"program\",name:\"%s\"}){project_id}}\"\"\"\n % (program_name)\n )\n elif isinstance(name, str):\n print(\"Getting all project_ids in the program '\" + name + \"'\")\n queries.append(\n \"\"\"{project (first:0, with_path_to:{type:\"program\",name:\"%s\"}){project_id}}\"\"\"\n % (name)\n )\n elif isinstance(node, str) and isinstance(name, str):\n print(\n \"Getting all project_ids for projects with a path to record '\"\n + name\n + \"' in node '\"\n + node\n + \"'\"\n )\n queries.append(\n \"\"\"{project (first:0, with_path_to:{type:\"%s\",submitter_id:\"%s\"}){project_id}}\"\"\"\n % (node, name)\n )\n elif isinstance(node, str) and name == None:\n print(\n \"Getting all project_ids for projects with at least one record in the node '\"\n + node\n + \"'\"\n )\n query = \"\"\"{node (first:0,of_type:\"%s\"){project_id}}\"\"\" % (node)\n df = pd.json_normalize(self.sub.query(query)[\"data\"][\"node\"])\n project_ids = project_ids + list(set(df[\"project_id\"]))\n if len(queries) > 0:\n for query in queries:\n res = self.sub.query(query)\n df = pd.json_normalize(res[\"data\"][\"project\"])\n project_ids = project_ids + list(set(df[\"project_id\"]))\n my_ids = sorted(project_ids, key=str.lower)\n print(my_ids)\n return my_ids", "def _get_projects(project_ids):\n if _ALL in project_ids:\n return projects_lib.get_all()\n return projects_lib.get_selective(project_ids)", "def project_show(ctx, args):\n for project_id in args:\n data = ctx.obj.get_project_by_project_id(project_id)\n output_json_data(data)", "def list_(ctx, search, backend):\n projects = ctx.obj['projects_db'].search(search, backend=backend)\n projects = sorted(projects, key=lambda project: project.name.lower())\n ctx.obj['view'].search_results(projects)", "def query_client(self, client_id):\n try:\n return self.client_model.objects.get(client_id=client_id)\n except self.client_model.DoesNotExist:\n return None", "def query_projects(request):\n try:\n filters = request.data\n except AttributeError:\n filters = FILTER\n projects = _get_projects(filters)\n projects_as_json = serializers.serialize('json', projects)\n return HttpResponse(json.dumps(projects_as_json), content_type='json')", "def get_user_projects(username):\n\n tx = cypher_transaction()\n query = \"\"\"\n MATCH (p:project)-[:OWNED_BY]->(u:user {username:{uname}})\n RETURN p\n \"\"\"\n tx.append(query, parameters={'uname': username})\n results = _first(tx.commit())\n projects = []\n for r in results:\n proj, = r.values\n print(\"* {0}\".format(proj['name']))\n projects.append(proj)\n return projects", "def test_get_projects(self):\n pass", "def get_project(name):\n tx = cypher_transaction()\n query = \"\"\"MATCH (n:project) WHERE n.name={project_name} RETURN n\"\"\"\n tx.append(query, parameters={'project_name': name})\n result = tx.commit()\n\n # Returns a result of the form [[\n # Record(\n # columns=('n',),\n # values=(Node('http://localhost:7474/db/data/node/233'),)\n # )\n # ]]\n return _first(result)[0].values[0]", "def find_projects(self, project_name: Optional[str] = None) -> List[Project]:\n with self.table_access_condition:\n conn = self._get_connection()\n c = conn.cursor()\n c.execute(\n \"\"\"\n SELECT * from projects\n WHERE (?1 IS NULL OR project_name = ?1)\n \"\"\",\n (project_name,),\n )\n rows = c.fetchall()\n return [\n Project(self, str(r[\"project_id\"]), row=r, _used_new_call=True)\n for r in rows\n ]", "def projects(request):\n return Project.objects.prefetch_related('task_set').filter(user=request.user)", "def list_projects():\n\n cmd = dict()\n cmd[\"type_\"] = \"list_projects\"\n cmd[\"name_\"] = \"\"\n \n s = comm.send_and_receive_socket(cmd)\n\n msg = comm.recv_string(s)\n\n if msg != \"Success!\":\n raise Exception(msg)\n \n json_str = comm.recv_string(s) \n \n s.close() \n\n return json.loads(json_str)[\"projects\"]", "def getClientJobsInformation(client):\n # getSlaveForDispatch()\n #jobs = mongo.db.jobs.find({'owner': client, 'is_active': True})\n jobs = mongo.db.jobs.find({'is_active': True})\n\n # result = i.title()\n # if any([s.get('status')=='on progress' for s in tasks]):\n # result = 'On Progress'\n # return result\n\n result = [{\n 'name': j.get('name'),\n 'datetime': j.get('datetime'),\n 'status': getJobStatus(j),\n 'priority': j.get('priority'),\n 'progress': sum([t.get('progress') for t in mongo.db.tasks.find({'job': j.get('_id')})]) /\n (mongo.db.tasks.find({'job': j.get('_id')}).count() or -1),\n 'id': str(j.get('_id')),\n 'tasks_count': mongo.db.tasks.find({'job': j.get('_id'), 'is_active': True}).count(),\n 'failed_count': mongo.db.tasks.find({'job': j.get('_id'), 'is_active': True, 'status': 'failed'}).count(),\n 'completed_count': mongo.db.tasks.find({'job': j.get('_id'), 'is_active': True, 'status': 'completed'}).count(),\n 'active_task': 'Frame 43',\n } for j in jobs]\n return result or {}", "def do_search(cs, args):\n resp, data = cs.searcher.search(args.query)\n project_fields = ['id', 'name', 'public']\n print(\"Find %d Projects: \" % len(data['project']))\n utils.print_list(\n data['project'], project_fields, formatters={}, sortby='id')\n repository_fields = [\n 'repository_name', 'project_name', 'project_id', 'project_public'\n ]\n print(\"\\n\")\n print(\"Find %d Repositories: \" % len(data['repository']))\n utils.print_list(\n data['repository'],\n repository_fields,\n formatters={},\n sortby='repository_name')", "def get_projects(session):\n cursuses = [1, 21] # cursus ids from which to get the projects\n project_names = []\n\n for cursus in cursuses:\n # Get all the projects from 1 cursus, very slow process because projects endpoint contains\n # a lot of information\n projects = get_all_pages(session, f'/cursus/{cursus}/projects', 100, {'filter[exam]': False})\n for project in projects:\n # Create dictionary containing project id and project name ans set in bigger dict\n project_names.append({'id': project['id'], 'name': project['name']})\n\n return project_names", "def get_project_stations(proj_df, engine, drop_dups=False): \n # Get proj IDs\n assert len(proj_df) > 0, 'ERROR: Please select at least one project.'\n proj_df['project_id'].drop_duplicates(inplace=True)\n proj_ids = proj_df['project_id'].values.astype(int).tolist()\n\n # Query db\n bind_pars = ','.join(':%d' % i for i in range(len(proj_ids))) \n\n sql = (\"SELECT DISTINCT a.station_id, \"\n \" a.station_code, \"\n \" a.station_name, \"\n \" c.station_type, \"\n \" d.longitude, \"\n \" d.latitude \"\n \"FROM nivadatabase.projects_stations a, \"\n \" nivadatabase.stations b, \"\n \" nivadatabase.station_types c, \"\n \" niva_geometry.sample_points d \"\n \"WHERE a.station_id IN \"\n \" (SELECT station_id \"\n \" FROM nivadatabase.projects_stations \"\n \" WHERE project_id IN (%s) \"\n \" ) \" \n \"AND a.station_id = b.station_id \"\n \"AND b.station_type_id = c.station_type_id \"\n \"AND b.geom_ref_id = d.sample_point_id \"\n \"ORDER BY a.station_id\" % bind_pars)\n df = pd.read_sql(sql, params=proj_ids, con=engine)\n\n # Drop duplictaes, if desired\n if drop_dups:\n df.drop_duplicates(subset='station_id', inplace=True)\n \n return df", "def get_queryset(self):\r\n username = self.kwargs['username']\r\n return models.Projects.objects.filter(username = username).order_by('-id')", "def project_list(cursor):\n query = \"SELECT * FROM projects\"\n try:\n cursor.execute(query, {},)\n except Exception as e:\n on_error(e)\n else:\n projects = cursor.fetchall()\n raise Return((projects, None))", "def get_projects(self):\n res = self.conn.cursor().execute(\"SELECT * FROM projects\")\n return res.fetchall()", "def get_client_data(client_name):\n log.debug('starting get_client_data')\n clients = wf.cached_data('clients', None, max_age=0)\n\n # Loop through clients and return client with a match\n for client in clients:\n if client['name'] == client_name:\n log.debug('get_client_id finished, client_data: ' + str(client))\n return client", "def test_list_projects(self):\n pass", "def test_list_projects(self):\n pass", "def get_all_project_records():\r\n records = flask.request.db_api.get_all_project_record()\r\n return flask.jsonify(records=records)", "def get_projects_of_user(self, user_id):\n res = self.conn.cursor().execute(\"\"\"SELECT * FROM projects p JOIN users_projects up \n ON p.id = up.project_id \n WHERE owner=? OR up.user_id=?\n GROUP BY p.id\n ORDER BY last_update DESC\"\"\", (user_id, user_id,))\n return res.fetchall()", "def get_projects():\n data = sql.list_projects()\n names = [(d['id'], d['name']) for d in data]\n return names", "def getProjectsQueryForOrgs(org_keys):\n query = getProjectsQuery()\n query.filter('org IN', org_keys)\n return query", "def getProjectsQueryForEvalForOrgs(org_keys):\n query = getProjectsQueryForOrgs(org_keys)\n query.filter(\n 'status IN', [project_model.STATUS_ACCEPTED, 'failed', 'completed'])\n return query", "def query(statement, project, **kwargs):\n\n with bqapi.connect(project) as conn:\n return conn.execute(statement, **kwargs).fetchall()", "def do_project_list(cs, args):\n _, projects = cs.projects.list()\n fields = [\n 'project_id',\n 'name',\n 'owner_id',\n 'current_user_role_id',\n 'repo_count',\n 'creation_time',\n 'public',\n ]\n utils.print_list(projects, fields, formatters={}, sortby=args.sortby)", "def getProjectsQuery(keys_only=False, ancestor=None, **properties):\n q = db.Query(project_model.GSoCProject, keys_only=keys_only)\n\n if ancestor:\n q.ancestor(ancestor)\n\n for k, v in properties.items():\n q.filter(k, v)\n\n return q", "def test_get_project(self):\n pass", "def _get_project_by_manager(userid):\n return Project.objects.filter(project_open=True, manager=userid).order_by(\n \"created_at\"\n )", "def project_search(self, **kwargs):\n\n return search_api(\"special_project_search\", **kwargs)", "def getProjectsQueryForEval(keys_only=False, ancestor=None, **properties):\n q = getProjectsQuery(keys_only, ancestor, **properties)\n q.filter('status IN', [project_model.STATUS_ACCEPTED, 'failed', 'completed'])\n return q", "def test_project_custodians(self):\n user_1, user_2 = factories.UserFactory.create_batch(2)\n project = self.project_1\n project.custodians.add(user_1)\n expected_users = [self.custodian_1_user, user_1]\n for user in expected_users:\n self.assertTrue(project.is_custodian(user))\n self.assertFalse(project.is_custodian(user_2))\n\n # test by project id\n url = reverse('api:user-list')\n client = self.custodian_1_client\n filters = {\n 'project__id': project.id\n }\n resp = client.get(url, filters)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n users = resp.json()\n self.assertEqual(len(users), len(expected_users))\n self.assertEqual(sorted([u['id'] for u in users]), sorted([u.id for u in expected_users]))\n\n # test by project name\n url = reverse('api:user-list')\n client = self.custodian_1_client\n filters = {\n 'project__name': project.name\n }\n resp = client.get(url, filters)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n users = resp.json()\n self.assertEqual(len(users), len(expected_users))\n self.assertEqual(sorted([u['id'] for u in users]), sorted([u.id for u in expected_users]))\n\n # test by project code\n url = reverse('api:user-list')\n client = self.custodian_1_client\n filters = {\n 'project__code': project.code\n }\n resp = client.get(url, filters)\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n users = resp.json()\n self.assertEqual(len(users), len(expected_users))\n self.assertEqual(sorted([u['id'] for u in users]), sorted([u.id for u in expected_users]))", "def test_search_project(self):\n title = Project.search_project(\"dee\")\n self.assertTrue(len(title) > 0)", "def test_get_project_list_with_projects(self):\n # Add two test projects.\n projects = [\n add_project(title='1', description='1'),\n add_project(title='2', description='2'),\n ]\n\n result = get_project_list()\n result_projects = result['projects'].object_list\n\n # Make sure two test projects are retrieved.\n for project in projects:\n self.assertTrue(project in result_projects)\n self.assertEqual(len(result_projects), len(projects))\n self.assertIsNone(result['tag'])\n self.assertFalse(result['filtered'])", "def fetch_project(search_info):\n search = search_collection.find_one({\"_id\": SEARCH_ID})\n user = user_collection.find_one({\"_id\": search_info[\"USER_ID\"]})\n user_bookmarks = user[\"bookmarks\"]\n user_contributions = user[\"contributions\"]\n user_outgoing = user[\"outgoing\"]\n try:\n project_id_list = search[search_info[\"search_query\"]]\n except KeyError:\n project_id_list = None\n except AttributeError:\n project_id_list = None\n if project_id_list != None:\n projects_list = list()\n for id in project_id_list:\n project = project_collection.find_one({\"_id\": id})\n if project == None:\n continue\n if user_bookmarks == None:\n project[\"bookmark\"] = False\n else:\n project[\"bookmark\"] = True if id in user_bookmarks else False\n if user_outgoing == None:\n project[\"contribution\"] = False\n\n else:\n project[\"contribution\"] = True if id in user_outgoing else False\n projects_list.append(project)\n return projects_list\n else:\n return []", "def get_projects(self, *criterion):\n from wkcdd.models.helpers import get_project_list\n return get_project_list([self.id], *criterion)", "def query3() :", "def get_credentialed_projects_query(user):\n dua_signatures = DUASignature.objects.filter(user=user)\n\n completed_training = (\n Training.objects.get_valid()\n .filter(user=user)\n .values_list(\"training_type\")\n )\n not_completed_training = TrainingType.objects.exclude(pk__in=completed_training)\n required_training_complete = ~Q(required_trainings__in=not_completed_training)\n\n accepted_data_access_requests = DataAccessRequest.objects.filter(\n requester=user, status=DataAccessRequest.ACCEPT_REQUEST_VALUE\n )\n\n contributor_review_with_access = Q(\n access_policy=AccessPolicy.CONTRIBUTOR_REVIEW\n ) & Q(data_access_requests__in=accepted_data_access_requests)\n\n credentialed_with_dua_signed = Q(\n access_policy=AccessPolicy.CREDENTIALED\n ) & Q(duasignature__in=dua_signatures)\n\n query = required_training_complete & (\n contributor_review_with_access | credentialed_with_dua_signed\n )\n return query", "def projects(request):\n projects = (\n Project.objects.visible()\n .visible_for(request.user)\n .prefetch_related(\"latest_translation__user\")\n .order_by(\"name\")\n )\n\n if not projects:\n return render(request, \"no_projects.html\", {\"title\": \"Projects\"})\n\n return render(\n request,\n \"projects/projects.html\",\n {\"projects\": projects, \"top_instances\": projects.get_top_instances()},\n )", "def test_find_multi_one(self):\n result = Project.objects.find(['project', 'ThisFails'])\n self.assertEqual(len(result), 1)\n self.assertEqual(result[0], self.project)", "def get_project_name(self, project_id):\n test = \"\"\"SELECT EXISTS(\n SELECT 1\n FROM barcodes.project\n WHERE project_id=%s\n )\"\"\"\n query = \"\"\"SELECT project\n FROM barcodes.project\n WHERE project_id=%s\"\"\"\n\n with self._transaction.cursor() as cur:\n cur.execute(test, [project_id, ])\n if not cur.fetchone()[0]:\n raise NotFound(f\"Project f'{project_id}' not found\")\n else:\n cur.execute(query, [project_id, ])\n return cur.fetchone()[0]", "def project_all(request, format=None):\n if request.method == 'GET':\n projects = Project.objects.all().order_by('key')\n serializer = ProjectSerializer(projects, many=True)\n return Response(serializer.data)", "def test_get_projects_returns_projects(fc: fetcher.Fetcher):\n projects = fc.get_projects()\n assert isinstance(projects, list)\n assert isinstance(projects[0], models.Project)", "def query(self):", "def filter(self, name=''):\n name = name.strip()\n\n # Base query withour filtering\n app = App.get_running_app()\n query = app.session.query(Client.name, Client.id)\n\n # Filter name, if a non empty was specified.\n if name:\n query = query.filter(Client.name.ilike(f'%{name}%'))\n\n # Update RecycleView data\n clients = [{'name': c[0], 'client_id': c[1]} for c in query.all()]\n self.ids['clients'].data = clients", "def get_elements(self):\n query = f\"select name, middle_name, last_name, age from `{self.table_id}`\"\n query_job = self.client.query(query)\n clients = []\n for row in query_job:\n print('Name', row['name'], 'middle name:', row['middle_name'], 'last name: ',row['last_name'], 'age:', row['age'])\n clients.append(Client(row['name'],row['middle_name'],row['last_name'],row['age']))\n return clients", "def selectable_projects():\n\n db = current.db\n s3db = current.s3db\n\n # Lookup projects with provider self-registration\n ptable = s3db.project_project\n ttable = s3db.project_project_tag\n join = ttable.on((ttable.project_id == ptable.id) & \\\n (ttable.tag == \"APPLY\") & \\\n (ttable.value == \"Y\") & \\\n (ttable.deleted == False))\n query = (ptable.deleted == False)\n rows = db(query).select(ptable.id,\n ptable.name,\n join = join,\n )\n projects = {row.id: row.name for row in rows}\n return projects", "def cr_list(request, target, project, format=None):\n if request.method == 'GET':\n cr = CR.objects.filter(target=target, key__startswith=project + '-').order_by('keynum')\n serializer = CRSerializer(cr, many=True)\n return Response(serializer.data)", "def test_demo_project_call(self):\n resp = DemoAivenStorage(os.environ[\"AIVEN_API_URL\"],\n os.environ[\"AIVEN_TOKEN\"]).get_project_names()\n assert isinstance(resp, list)\n assert len(resp) == 1\n assert 'romainducarrouge-31f2' in resp", "def get_for(user):\n return Project.objects.filter(\n user_group__members=user\n ).distinct()", "def pull_jobs(start_date, end_date=datetime.now(), limit=50000,\n project=['proj_codem']):\n logger.info(\"Pulling jobs from QPID API.\")\n logger.info(f\"Checking {start_date} to {end_date}\")\n dfs = []\n for p in project:\n logger.info(f\"Checking project {p}\")\n jobs = requests.get(QPID_API.format(cluster='fair'), params={\n 'limit': limit,\n 'project': p,\n 'ran_after': start_date,\n 'job_prefix': 'cod_'\n }).json()\n df = pd.DataFrame(jobs)\n dfs.append(df)\n df2 = pd.concat(dfs)\n return df2", "def fetch_all(): \n client, index_name = connection_es()\n res = client.search(index = index_name+\"*\")\n return res", "def test_get_current(self, rf, projects):\n # get queryset\n request = rf.get(\"/projects/my\")\n view = MyProjectListView()\n view.setup(request)\n view.dispatch(request)\n view.get_queryset()\n qs = view.get_current()\n\n # slavic working group grant ended so it is \"past\"\n assert projects[\"derrida\"] in qs\n assert projects[\"pliny\"] in qs\n assert projects[\"ocampo\"] in qs\n assert projects[\"slavic\"] not in qs", "def search(request, is_my_list=\"False\"):\n\n search_type = request.GET.get(\"submit\")\n if search_type:\n\n # get query field\n query = ''\n if request.GET.get(search_type):\n query = request.GET.get(search_type)\n\n proj_ids = []\n cod_ids = []\n\n valid_searches = [constants.STRING_TITLE, constants.STRING_DESCRIPTION, constants.STRING_PROTOCOL,\n constants.STRING_CODER, constants.STRING_AREA, constants.STRING_WORKINGGROUP]\n\n search_in_all = True\n for v in valid_searches:\n if v in request.GET:\n search_in_all = False\n break\n\n if search_in_all or request.GET.get(constants.STRING_TITLE):\n codings = CodingProject.objects.all()\n for cod in codings:\n if query.lower() in cod.title.lower():\n cod_ids.append(cod.id)\n\n if search_in_all or request.GET.get(constants.STRING_DESCRIPTION):\n codings = CodingProject.objects.all()\n for cod in codings:\n if query.lower() in cod.additional_information.lower():\n cod_ids.append(cod.id)\n\n if request.GET.get(constants.STRING_PROTOCOL):\n proj_ids += ProjectContainer.objects.filter(protocol__icontains=query).values_list('id', flat=True)\n\n if search_in_all or request.GET.get(constants.STRING_CODER):\n for pr in ProjectContainer.objects.all():\n for cd in pr.codings.all():\n user = Person.objects.using('datatracker').get(id=cd.coder)\n if query.lower() in user.name.lower():\n proj_ids.append(pr.id)\n break\n\n if search_in_all or request.GET.get(constants.STRING_AREA):\n for project_container in ProjectContainer.objects.all():\n docs = []\n if not project_container.docs or project_container.docs == '':\n continue\n keys = filter(None, project_container.docs.split(';'))\n docs.extend(list(DocAlias.objects.using('datatracker').filter(name__in=keys).values_list(\n 'document__group__parent__name')))\n for doc in docs:\n if query.lower() in doc[0].lower():\n proj_ids.append(project_container.id)\n break\n # ids += ProjectContainer.objects.filter(docs__document__group__parent__name__icontains=query).values_list(\n # 'id', flat=True)\n\n if search_in_all or request.GET.get(constants.STRING_WORKINGGROUP):\n for project_container in ProjectContainer.objects.all():\n docs = []\n if not project_container.docs or project_container.docs == '':\n continue\n keys = filter(None, project_container.docs.split(';'))\n docs.extend(list(\n DocAlias.objects.using('datatracker').filter(name__in=keys).values_list('document__group__name')))\n for doc in docs:\n if query.lower() in doc[0].lower():\n proj_ids.append(project_container.id)\n break\n \n if cod_ids:\n cod_ids = list(set(cod_ids))\n proj_ids += ProjectContainer.objects.filter(codings__id__in=cod_ids).values_list('id', flat=True)\n project_containers = ProjectContainer.objects.filter(id__in=list(set(proj_ids)))\n \n request.session[constants.ALL_CODINGS] = cod_ids\n request.session[constants.ALL_PROJECTS] = project_containers\n\n request.session[constants.MAINTAIN_STATE] = True\n\n return HttpResponseRedirect(\n settings.CODESTAND_PREFIX + '/codestand/matches/show_list/' + \n is_my_list + '/{0}/'.format(constants.ATT_CREATION_DATE) + 'True')\n\n else:\n return render_page(request, constants.TEMPLATE_MATCHES_SEARCH, {\n \"form\": SearchForm()\n })", "def gcp_get_instances_by_name(project, name, raw=True, credentials=None):\n return gcp_filter_projects_instances(projects=[project],\n filters=['name eq {name}'.format(name=name.replace('*', '.*'))],\n raw=raw,\n credentials=credentials)", "async def get_objects(conn: Database, query):\n return await conn.fetch_all(query=query)", "def atlas_projects():\n pass", "def get_project(self, project):\n return Dict(self.projects.get_entry(pk=project, _fields=[\"_all\"]).result())", "def query(project, query, name, head):\n if not database_exists(name):\n return\n if query is None and project:\n if project in project_dict:\n query = f\"language:{project_dict[project]},sort:stars-desc:archived=False\"\n else:\n print(f\"Unknown project {project}\")\n return\n load_dotenv()\n try:\n print(\"Querying github...\", end=\"\")\n g = Github(os.getenv('github'))\n repositories = g.search_repositories(query=query)\n print(f\"got {repositories.totalCount} repositories.\")\n except BadCredentialsException as e:\n print(e)\n connection = sqlite3.connect(name)\n cursor = connection.cursor()\n cursor.execute('DELETE FROM query_results')\n print(\"Adding to database:\")\n for count, repo in tqdm(enumerate(repositories)):\n if count >= int(head):\n break\n insert_query_string = \"\"\"\n REPLACE INTO query_results\n (\n project,\n repository_owner,\n repository_name,\n query_string,\n query_timestamp,\n clone_url\n )\n VALUES\n (?, ?, ?, ?, ?, ?)\n \"\"\"\n query_timestamp = datetime.utcnow().isoformat()\n cursor.execute(insert_query_string, (project,\n repo.owner.login,\n repo.name,\n query,\n query_timestamp,\n repo.clone_url))\n connection.commit()\n connection.close()", "def get_clients():\n data = DataTable(document=ClientDocument, schema=ClientSchema).get_data\n return Response(data=data).send()", "def project_by_title(project_title):\n\n QUERY = \"\"\"SELECT title, description \n FROM Projects \n WHERE title = ?\"\"\"\n\n db_cursor.execute(QUERY, (project_title,))\n\n answer = db_cursor.fetchone() \n \n print answer[0], \":\", answer[1]\n # print \"Title: %s, Description: %s\" % answer[0], answer[1]", "def search_clients(keywords, business_id, limit=5):\n cleaned_input = clean_data(keywords)\n\n # search_data = pd.DataFrame(data, columns=data.keys())\n client_data = pd.read_sql(\n sql=\"SELECT id, name, email, phone, business_id FROM client where business_id={} and email != 'anonymous@hidden.com';\".format(\n business_id), con=db.engine, index_col='id')\n client_data = searchable_data(client_data, ['name', 'email', 'phone'])\n\n matching_results = fuzzy_match(cleaned_input, client_data.loc[:, \"searchable_data\"], fuzz.token_set_ratio, limit)\n # x[2] is the id of the client in order of matchingness. I hope.\n return [Client.query.get(x[2]) for x in matching_results]", "def search(request):\n if 'find_project' in request.GET and request.GET['find_project']:\n project_name=request.GET.get('find_project')\n \n searched_project=Project.search_project(project_name)\n \n return render(request,'search_results.html',{'searched_project':searched_project})", "def test_show_project_list(self):\n fake_project = FakeResource(1)\n\n # This mocks is faking keystone retrieving a defined list of\n # projects\n patch('identity.views.Keystone.project_list',\n Mock(return_value=[fake_project])).start()\n\n render_mock = patch(\n 'identity.views.ListProjectView.render_to_response').start()\n\n response = self.view(self.request)\n\n render_args = render_mock.call_args[0][0]\n computed = render_args['projects'][0]\n\n self.assertEqual(computed, fake_project.to_dict())", "async def get_all_record():\n # X_new = item.to_df()\n # item_str = item.to_string()\n # project_code = int(item_str[item_str.find('=')+1:])\n pg = PostgreSQL()\n return_json = pg.fetch_all_records()\n return return_json", "def get_client(self, clientname):\n client = self.dbsession.query(Client).filter_by(clientname=clientname).all()\n if not client:\n return self.create_client({'clientname': clientname})\n else:\n return client[0]", "def test_get_projects(client, session, models, tokens):\n response = client.get(\n \"/projects\", headers={\"Authorization\": f\"Bearer {tokens['read']}\"}\n )\n assert response.status_code == 200\n assert len(response.json) > 0", "def get(self):\n try:\n user = None\n user_id = token_auth.current_user()\n if user_id:\n user = UserService.get_user_by_id(user_id)\n search_dto = self.setup_search_dto()\n results_dto = ProjectSearchService.search_projects(search_dto, user)\n return results_dto.to_primitive(), 200\n except NotFound:\n return {\"mapResults\": {}, \"results\": []}, 200\n except (KeyError, ValueError) as e:\n error_msg = f\"Projects GET - {str(e)}\"\n return {\"Error\": error_msg}, 400", "def project_list(self):\n try:\n ids = self.request[api.DATA][api.DATA][\"ids\"]\n return self._get_keystone_projects(ids)\n except Exception as e:\n LOG.exception(\"Error occurred: %s\" % e)", "def get_projects_by_username(username): # noqa: E501\n result = []\n try:\n value = ref.child(\"users/%s\"%username).get()\n\n if 'project_index' in value:\n for key in value[\"project_index\"]:\n\n proj = ref.child(\"projects/%s\"%key).get()\n if proj is None:\n continue\n else:\n proj[\"project_id\"] = key\n result.append(proj)\n except:\n result.append({\"response\":\"Failure in getting users projects, ID: %s\"%username})\n return result", "def get_project(self, project_id):\n res = self.conn.cursor().execute(\"SELECT * FROM projects where id=?\", (project_id,))\n return res.fetchone()", "def get_project_by_title(title):\n QUERY = \"\"\"\n SELECT * FROM Projects WHERE title = ?\n \"\"\"\n\n db_cursor.execute(QUERY, (title,))\n row = db_cursor.fetchone()\n print \"Project: %s \\nID: %s \\nTitle: %s \\nDescription: %s \\nMax Grade: %s\" % (\n title, row[0], row[1], row[2], row[3])", "def get_station_projects(stn_df, proj_df, engine): \n # Get stn IDs\n assert len(stn_df) > 0, 'ERROR: Please select at least one station.'\n stn_df['station_id'].drop_duplicates(inplace=True)\n stn_ids = stn_df['station_id'].values.astype(int).tolist()\n\n # Get proj IDs\n assert len(proj_df) > 0, 'ERROR: At least one project must already be selected.'\n proj_df['project_id'].drop_duplicates(inplace=True)\n proj_ids = proj_df['project_id'].values.astype(int).tolist() \n\n # Number from 0 to n_stns\n bind_stns = ','.join(':%d' % i for i in range(len(stn_ids)))\n \n # Number from n_stns to (n_stns+n_projs)\n bind_prjs = ','.join(':%d' % i for i in range(len(stn_ids), \n len(stn_ids) + len(proj_ids)))\n \n # Query db\n sql = (\"SELECT a.project_id, \"\n \" b.o_number, \"\n \" a.project_name, \"\n \" a.project_description \"\n \"FROM nivadatabase.projects a, \"\n \" nivadatabase.projects_o_numbers b \"\n \"WHERE a.project_id = b.project_id \"\n \"AND a.project_id IN \"\n \" (SELECT project_id \"\n \" FROM nivadatabase.projects_stations \"\n \" WHERE station_id IN (%s) \"\n \" AND project_id IN (%s) \"\n \" ) \"\n \"ORDER BY a.project_id\" % (bind_stns, bind_prjs))\n\n bind_dict = {'%d' % idx:item for idx, item in enumerate(stn_ids)}\n bind_prj_dict = {'%d' % (idx + len(stn_ids)):item \n for idx, item in enumerate(proj_ids)}\n bind_dict.update(bind_prj_dict) \n df = pd.read_sql(sql, params=bind_dict, con=engine)\n \n return df", "def get(self, *args, **kwargs):\n if 'user' not in kwargs:\n self.raise401()\n\n user = kwargs['user']\n if args:\n path = parse_path(args[0])\n project = Project.objects(name=path[0]).first()\n if not project:\n self.raise404()\n if project and user not in project.members:\n self.raise401()\n project_data = document_to_json(project, filter_set=_FILTER)\n else:\n team_name = self.get_argument('team', None)\n limit = self.get_argument('limit', None)\n start = self.get_argument('start', None)\n try:\n team_name = parse_path(team_name)[0]\n except IndexError:\n team_name = None\n try:\n limit = int(limit)\n except Exception:\n limit = None\n try:\n start = int(start)\n except Exception:\n start = None\n if team_name:\n team = Team.objects(name=team_name).first()\n if not team:\n self.raise404()\n if user not in team.members:\n self.raise403()\n project = Project.objects(teams__in=[team])\n else:\n project = Project.objects(members__in=[user])\n if limit and start:\n project = project[start:start + limit]\n elif limit:\n project = project[:limit]\n elif start:\n project = project[start:]\n project_data = query_to_json(project, filter_set=_FILTER)\n self.write(project_data)", "def find_project(xp, **kwargs):\n path = '/search/project'\n if 'schema' not in kwargs:\n kwargs['schema'] = ProjectCollection.SCHEMA\n tag_class = {'collection': ProjectCollection, 'project': ROProject}\n return _find(path, xp, tag_class, **kwargs)", "def list(self, *args, **kwargs):\n projects = Project.objects.all()\n return self.list_by(projects, self.serializer_class)", "def get_projects():\n if current_user.get_id() is None:\n return\n with database.engine.begin() as connection:\n result = connection.execute(select(\n [models.projects.c.project_id, models.projects.c.name, models.projects.c.path, models.projects.c.creation_date, models.projects.c.user_id, func.count(models.objects.c.object_id).label('object_count')])\n .select_from(models.projects.outerjoin(models.objects))\n .where(and_(models.projects.c.active == True, models.projects.c.user_id == current_user.id))\n .group_by(models.projects.c.project_id)\n .order_by(models.projects.c.project_id))\n projects = [dict(row) for row in result]\n for project in projects:\n user = models.User.query.filter_by(\n id=project['user_id']).first()\n if user:\n project['email'] = user.email\n return projects", "def get_project(projectname):\n return jsonify(admin.get_project_info(current_app.scoped_session(), projectname))" ]
[ "0.68639714", "0.61412024", "0.5989478", "0.5867039", "0.5781275", "0.5674645", "0.566956", "0.56622523", "0.5642613", "0.5634638", "0.55459017", "0.5535563", "0.5528021", "0.5521917", "0.5521528", "0.54804444", "0.5467839", "0.5415431", "0.5400909", "0.53992367", "0.53678286", "0.53645825", "0.53416485", "0.5323191", "0.5322763", "0.5321236", "0.5310382", "0.53022796", "0.5294313", "0.52851266", "0.5282511", "0.52753633", "0.52729917", "0.5270023", "0.52471024", "0.5221728", "0.5213666", "0.52120715", "0.5204632", "0.5189342", "0.5189342", "0.5185613", "0.5163355", "0.51532376", "0.5151157", "0.51494664", "0.514096", "0.51381904", "0.51335853", "0.51158386", "0.5109608", "0.5109245", "0.51088625", "0.5107181", "0.5105468", "0.51027423", "0.5088033", "0.50874114", "0.50831574", "0.50815696", "0.50603855", "0.5056147", "0.5050783", "0.50477797", "0.50472635", "0.5045057", "0.5038406", "0.50309527", "0.50235057", "0.5021703", "0.50209147", "0.50068414", "0.49998313", "0.4989336", "0.49879214", "0.49863625", "0.4984674", "0.4973698", "0.49726215", "0.4969211", "0.49671736", "0.49637172", "0.4958786", "0.49542865", "0.49504617", "0.49490705", "0.49474466", "0.49442467", "0.49434656", "0.4941335", "0.4939632", "0.4934604", "0.49303278", "0.4927393", "0.492493", "0.4923441", "0.49202102", "0.49199286", "0.49190953", "0.49128923" ]
0.6326702
1
return all tasks of a given project
def getProjectTasks(self, pid, archived=False): return self.request(Endpoints.PROJECTS + '/{0}'.format(pid) + '/tasks')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_tasks_of_project(self, project_id):\n res = self.conn.cursor().execute(\"SELECT * FROM tasks WHERE project_id=? ORDER BY project_order\", (project_id,))\n return res.fetchall()", "def get_tasks_list(project_id):\n project = Project.query.filter_by(id=project_id).first()\n if not project:\n return {\n 'success': False,\n 'message': f\"No project with the specified id {project_id} found.\",\n }\n\n else:\n permission = has_project_permission(project, g.user)\n return jsonify(\n {\n \"success\": True,\n \"result\": {\n 'created_tasks': tasks_schema.dump(Task.query.filter_by(created_by_id = g.user.id).all()),\n 'tasks_you_work_on': tasks_schema.dump(g.user.tasks).all(),\n 'all': tasks_schema.dump(Task.query.filter(or_(\n Task.created_by_id==g.user.id, Task.project_id==g.user.project.id\n )).all()),\n },\n \"message\": \"Successfully fetched all tasks.\",\n }\n )", "def get_all_tasks(self):\n \n sql = \"select * from tasks;\"\n return self._query_all(sql)", "def get_tasks(self):\n return self.tasks.all()", "def get_all_tasks(self):\r\n\t\twith self.conn:\r\n\t\t\tself.c.execute(\"\"\"SELECT task FROM goals\"\"\")\r\n\t\t\ttup_list = self.c.fetchall()\r\n\t\treturn [tup[0] for tup in tup_list]", "def get_all_projects_tasks(dump: Optional[Union[bool, str]] = None,\n get_predictions_instead: bool = False):\n\n @ray.remote\n def _iter_projects(proj_id, get_preds_instead=get_predictions_instead):\n if get_preds_instead:\n _tasks = get_tasks_from_mongodb(proj_id,\n dump=dump,\n get_predictions=True)\n else:\n _tasks = get_tasks_from_mongodb(proj_id)\n for task in _tasks:\n task.pop('_id')\n return _tasks\n\n project_ids = get_project_ids_str().split(',')\n\n futures = []\n for project_id in project_ids:\n futures.append(_iter_projects.remote(project_id))\n\n tasks = []\n for future in tqdm(futures):\n tasks.append(ray.get(future))\n\n if dump:\n with open(dump, 'w') as j:\n json.dump(sum(tasks, []), j)\n\n return sum(tasks, [])", "def get_all():\n return list(tasks.find({}))", "def get_tasks(self, project, story):\n ret_val = []\n resource = \"projects/{0:d}/stories/{1:d}/tasks\".format(project.id,\n story.id)\n tasks = self._request(\"get\", resource)\n\n for task in tasks:\n ret_val.append(Task(task))\n\n return ret_val", "def all_tasks(request):\n return Task.objects.select_related('project').filter(user=request.user).exclude(folder='trash')", "def find_tasks(\n self,\n task_name: Optional[str] = None,\n project_id: Optional[str] = None,\n parent_task_id: Optional[str] = None,\n ) -> List[Task]:\n with self.table_access_condition:\n conn = self._get_connection()\n c = conn.cursor()\n c.execute(\n \"\"\"\n SELECT * from tasks\n WHERE (?1 IS NULL OR task_name = ?1)\n AND (?2 IS NULL OR project_id = ?2)\n AND (?3 IS NULL OR parent_task_id = ?3)\n \"\"\",\n (task_name, nonesafe_int(project_id), nonesafe_int(parent_task_id)),\n )\n rows = c.fetchall()\n return [\n Task(self, str(r[\"task_id\"]), row=r, _used_new_call=True) for r in rows\n ]", "def tasks_in_project(request, project):\n return project.task_set.filter(user=request.user).exclude(folder='trash')", "def get_tasks(self):\n res = self.conn.cursor().execute(\"SELECT * FROM tasks\")\n return res.fetchall()", "async def list_tasks():", "def get_all_tasks_in_tree(self, key_main_task=None):\n queue_for_bfs = deque()\n\n task = self.get_task(key_main_task)\n if not task:\n return None\n\n queue_for_bfs.append(task)\n list_tasks_project = [task]\n self.queue_on_project(queue_for_bfs, list_tasks_project)\n self.save_task(task)\n return list_tasks_project", "def get_tasks(self):\n return self.tasks", "def get_unique_project_list(self) -> List[str]:\n return self.tasks.get_project_list()", "def get_tasks(self):\n return self.stn.get_tasks()", "def get_tasks(taskid_list, module):\n tasks = module.client.api.get_tasks_by_status('Pending')\n task_list = list()\n for task in tasks:\n if task['workOrderId'] in taskid_list:\n task_list.append(task)\n return task_list", "def get(self):\n\n return task_service.get_tasks()", "def get_tasks(self):\n return self.task_collection", "def projects(request):\n return Project.objects.prefetch_related('task_set').filter(user=request.user)", "def get_tasks_list(self):\n return self.task_controller.get_list()", "def get_tasks(self, task_id=None):\n # Recover all config from OpenVAS\n if task_id:\n return self.make_xml_request('<get_tasks id=\"%s\"/>' % name, xml_result=True)\n else:\n return self.make_xml_request(\"<get_tasks />\", xml_result=True)", "def get_tasks(self, *args, **kwargs):\n tasks_endpoint = furl(self.ENDPOINT) / self.id / \"tasks\"\n return self._client.list(Task, endpoint=tasks_endpoint.url, *args, **kwargs)", "def get_task_list(self):\n raise NotImplementedError()", "def view_tasks():\n task_list = []\n incomplete_task_list = Tasks.objects.filter(is_complete=False)\n for task in incomplete_task_list:\n tasks = [] #create data structure\n tasks.append(task.id) #add ID \n tasks.append(task.task_text) #add text\n task_list.append(tasks) #append data structure\n\n return task_list", "def query_project_tasks(self, project_data):\n\n # Get project ID.\n project_id = project_data[0][0]\n query = \"select task_datest, task_dateend, task_info, skill_descrpt, \" \\\n \"TS_Qty \" \\\n \"from skill, task_skills, task \" \\\n \"where task_skills.task_id = task.task_id \" \\\n \"and proj_id = '{}' \" \\\n \"and task_skills.skill_id = skill.skill_id \" \\\n \"order by task_datest\".format(project_id)\n\n try:\n self.dbCursor.execute(query)\n return self.dbCursor.fetchall()\n except mysql.connector.Error as err:\n ErrorMessageWindow(err)", "def get_all_tasks(self):\n tasks = []\n\n with open(self.path_to_task_file, 'r') as file:\n for line in file:\n task = Task()\n task.load(line)\n tasks.append(task)\n with open(self.path_to_task_file, 'w'):\n pass\n\n for task in tasks:\n self.check_time(task)\n\n return tasks", "def get_tasks(self):\n assert self.cluster\n query = objects.TransactionCollection.filter_by(\n None,\n cluster_id=self.cluster.id, name=consts.TASK_NAMES.deployment\n )\n query = objects.TransactionCollection.filter_by_not(\n query, deployment_info=None\n )\n return objects.TransactionCollection.order_by(query, 'id')", "def tasks(self):\n args = Namespace(rev=self.rev)\n data = run_query('push_results', args)['data']\n\n tasks = []\n for kwargs in data:\n # Do a bit of data sanitization.\n if any(a not in kwargs for a in ('label', 'duration', 'result', 'classification')):\n continue\n\n if kwargs['duration'] <= 0:\n continue\n\n tasks.append(Task(**kwargs))\n\n return tasks", "def get_tasks(self):\n if self.tasks_url:\n resp = self._api.list_tasks(url=self.tasks_url)\n\n else:\n resp = self._api.list_tasks(job_id=self.id)\n\n if resp.success:\n self.tasks = [Task(self._api, self.id, **task_def)\n for task_def in resp.result]\n\n return self.tasks\n\n else:\n raise resp.result", "def tasks(self, tags=None, summary=True, tags_intersect=None):\n return list(self.all_tasks(summary=summary, tags=tags, tags_intersect=tags_intersect))", "def get_tasks(self, board):\n\n return BoardJob.get_boardjob(board.id, self.id).tasks", "def get_projects():\n return Project.query.all()", "def list(self, name=None):\n if name is not None:\n tasks = self._list_all_tasks_from_single_dataset(name)\n else:\n tasks = self._list_all_tasks_from_all_datasets()\n return tasks", "def get_archieve(self):\n all_tasks = self.task_controller.get_list()\n return [task for task in all_tasks if task.is_completed == Status.DONE]", "def task_list(self) -> List[\"Task\"]: # noqa: F821\n return list(self.tasks.values())", "async def list_tasks(fields: Set[str] = None):\n tasks = celery_app.describe_tasks()\n tasks = [TaskOut(**task).dict(include=fields) for task in tasks]\n return tasks", "def get(self, project_id):\n try:\n pagination_args = get_pagination_args(request)\n except ArgumentError as e:\n return {'message': e.message}, 500\n\n limit = pagination_args['limit'] if 'limit' in pagination_args else self.DEFAULT_LIMIT\n offset = pagination_args['offset'] if 'offset' in pagination_args else self.DEFAULT_OFFSET\n\n tasks = backend.filter(Task, {'project.pk': request.project.pk},\n include=('project',), only=TaskDetails.export_fields, raw=True\n ).sort('created_at', -1)\n\n return {'tasks': [TaskDetails.export(task) for task in tasks[offset:offset + limit]]}, 200", "def project():\n\n return M(c=\"project\", f=\"task\")(\n M(\"Tasks\", f=\"task\")(\n M(\"Create\", m=\"create\"),\n M(\"My Open Tasks\", vars={\"mine\":1}),\n ),\n )", "def get_all_tasks(self) -> APIResponse:\n return self._get(\"system_list\")", "def get_tasks():\n result = mongo.db.tasks.find()\n return json_util.dumps(result)", "def db_get_all_tasks():\n sql = \"SELECT * FROM {};\".format(TABLE_NAME)\n return db_query(sql)", "def list_tasks(ctx):\n ctx.run(\"invoke --list\")", "def list_all_tasks(self):\n task_table = Table('task', self.metadata, autoload=True)\n try:\n all_tasks = self.session.query(task_table).all()\n task_list = []\n for t in all_tasks:\n task_list.append(t._asdict())\n return task_list\n except Exception as e:\n logger.info(f\"Error retrieving list of tasks: {e}\")\n return False", "def tasks(self, flags=gdef.TASK_ENUM_HIDDEN):\n tasks = TaskCollection()\n self.GetTasks(flags, tasks)\n return tasks", "def get_all_projects(self, org):\n return [proj for proj in Project.objects.filter(org=org)]", "def get_all_tasks(username):\n if not username:\n return []\n task_notif_list = []\n for obj in TaskNotification.objects.filter(username=username):\n task_notif_list.append({\n 'id': obj.task_id,\n 'name': obj.name,\n 'status': obj.status,\n 'payload': obj.payload\n })\n return task_notif_list", "def get_list_task(self):\n task_items = self.driver.find_elements(*task_page_locators.TASK)\n return task_items", "def tasks(self):\n for task in self._get_paged(\"tasks\"):\n yield Task(task, **self._new_session_args)", "def run(self):\n results = []\n for task in self.tasks:\n results.append(task.run())\n self.tasks = []\n return results", "def show_tasks():\n top_level_tasks = query_with_results(\"select label, description from task where parent = ''\", [])\n for task in top_level_tasks:\n _show_task(task)", "def get_tasks_summary(self):\n columns = [\"id\", \"name\", \"state\", \"warning\", \"warning_message\", \"parent_job\", \"tags\"]\n \n cur = self.conn.cursor()\n cur.execute(\"SELECT \" + \", \".join(columns) + \" FROM tangerine;\")\n self.conn.commit()\n \n return [Task([(column,) for column in columns], task, interpolate=False) for task in cur.fetchall()]", "def get_running_task_dicts(tasks):\n running_task_dicts = []\n with database.engine.begin() as connection:\n for task in tasks:\n print(json.loads(task.meta))\n job = Job.fetch(task.id, connection=redis_conn)\n project = connection.execute(select([sqlalchemy.text(\n '*')]).select_from(models.projects).where(models.projects.c.project_id == task.project_id)).first()\n task_dict = dict(id=task.id, name=task.name, description=task.description,\n complete=task.complete, result=task.result, progress=task.get_progress(), project_id=task.project_id)\n task_dict['meta'] = json.loads(\n task.meta) if task.meta is not None else {}\n\n if job:\n task_dict['status'] = job.get_status()\n # task_dict['started_at'] = datetime.datetime.fromtimestamp(\n # task_dict['meta']['scheduled_at'])\n # print('scheduled_at: {}'.format(task_dict['started_at']))\n if project:\n task_dict['project_name'] = project['name']\n running_task_dicts.append(task_dict)\n return running_task_dicts", "def get_tasks():\n outbound_tasks = []\n outbound_tasks_with_due_dates = []\n creds = None\n current_path = os.path.dirname(os.path.abspath(__file__))\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first\n # time.\n picked_token_path = current_path + '/token.pickle'\n print(picked_token_path)\n if os.path.exists(picked_token_path):\n with open(picked_token_path, 'rb') as token:\n creds = pickle.load(token)\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(\n current_path + '/credentials.json', SCOPES)\n creds = flow.run_local_server(port=0)\n # Save the credentials for the next run\n with open(picked_token_path, 'wb') as token:\n pickle.dump(creds, token)\n\n service = build('tasks', 'v1', credentials=creds,\n cache=DiscoveryCache()) # https://github.com/googleapis/google-api-python-client/issues/325\n\n # Call the Tasks API\n tasks = service.tasks().list(tasklist='@default').execute()\n\n for task in tasks['items']:\n reduced = task_reducer(task)\n if reduced is not None:\n if 'due' in reduced:\n outbound_tasks_with_due_dates.append(reduced)\n else:\n outbound_tasks.append(reduced)\n\n outbound_tasks_with_due_dates.sort(key=sort_by_due_date)\n outbound_tasks[:0] = outbound_tasks_with_due_dates\n\n return outbound_tasks", "def tasks_list(self, use_json_output, **kwargs):\n tasks = self._get_tasks_list()\n log.info('jobs_id\\tproject id\\tname')\n for t in tasks:\n jobs_id = [job[\"id\"] for segment in t[\"segments\"] for job in segment[\"jobs\"]]\n if use_json_output:\n log.info(json.dumps(t, indent=4))\n else:\n log.info('{jobs_id}\\t{id}\\t{name}'.format(jobs_id=\", \".join(map(str, jobs_id)), **t))", "def get_tasks():\n tasks = []\n example_dir = os.path.normpath(os.path.join(\n os.path.dirname(__file__), '../../openshift/ansiblegen/examples/')\n )\n yaml_names = os.listdir(example_dir)\n for yaml_name in yaml_names:\n _, api_version, resource = yaml_name.split('_', 2)\n resource = resource[0:-4]\n yaml_path = os.path.join(example_dir, yaml_name)\n\n with open(yaml_path, 'r') as f:\n data = yaml.load(f)\n\n tasks.append(((api_version, resource), data))\n return tasks", "def allTask(self, isCron=False):\n if isCron:\n m = {\"task_type\": \"crontab\"}\n else:\n m = {\"task_type\": {\"$ne\": \"crontab\"}}\n tlist = []\n for doc in self.mgdb.task_library.find(m):\n tlist.append(doc)\n return tlist", "def tasks():", "def normalTasks(self):\n return self._tasks", "def get_subtasks(self, tid):\n return self.task_controller.get_subtasks(tid)", "def get_tasks(**filters):\n return db.task.find(filters) if filters else db.task.find()", "def get_tasks(loop):\n tasks = asyncio.all_tasks(loop)\n return \"Tasks: \" + \", \".join(\n [f\"{task.get_name()}: {task.get_coro().__name__}\" for task in tasks]\n )", "def get_tasks(self):\n return [getattr(self, k).value() for k in self._node_dict.values()]", "def get_all_tasks(self):\n return [\n self.create_virtual_environment,\n self.doc,\n self.install,\n self.lint,\n self.make_distribution,\n self.reset,\n self.setup,\n self.test,\n ]", "def project_list(self):\n try:\n ids = self.request[api.DATA][api.DATA][\"ids\"]\n return self._get_keystone_projects(ids)\n except Exception as e:\n LOG.exception(\"Error occurred: %s\" % e)", "def all(cls):\n projects_url = 'https://www.pivotaltracker.com/services/v5/projects'\n root = _perform_pivotal_get(projects_url)\n if root is not None:\n return [Project.from_json(project_node) for project_node in root]", "def get_tasks(cls):\n def _get_tasks():\n members = inspect.getmembers(cls, predicate=inspect.isfunction)\n for _, member in members:\n annotations = getattr(member, '__annotations__', {})\n if annotations.get('return', None) == Task:\n yield member\n return list(_get_tasks())", "def get_created_projects(self):\n project_ouessant1 = Project.objects.get(name='Ouessant Tidal Power Phase I')\n project_ouessant2 = Project.objects.get(name='Ouessant Tidal Power Phase II')\n project_liaoning = Project.objects.get(\n name='Liaoning Linghai China Resource Power Wind Power Wind Farm'\n )\n return [project_ouessant1, project_ouessant2, project_liaoning]", "def get_finished_task_dicts(tasks):\n finished_task_dicts = []\n with database.engine.begin() as connection:\n for task in tasks:\n try:\n download_path = url_for('data', path=task.result)\n task_dict = dict(id=task.id, name=task.name, description=task.description,\n complete=task.complete, result=task.result, download_path=download_path, status='finished', project_id=task.project_id)\n task_dict['meta'] = json.loads(\n task.meta) if task.meta is not None else {}\n finished_task_dicts.append(task_dict)\n project = connection.execute(select([sqlalchemy.text(\n '*')]).select_from(models.projects).where(models.projects.c.project_id == task.project_id)).first()\n if project:\n task_dict['project_name'] = project['name']\n except Exception as err:\n print('exception in api.get_finished_task_dicts')\n print(err)\n return finished_task_dicts", "def list(self, _request):\n serializer = TaskSerializer(instance=TASKS.values(), many=True)\n return response.Response(serializer.data)", "def get_projects(cls):\n projects = []\n for project in Project.select():\n project_dict = {\n \"id\": project.id,\n \"title\": project.title,\n \"link\": project.link,\n \"description\": project.description\n }\n projects.append(project_dict)\n return projects", "def tasks(self) -> List[TaskStatusDefinition]:\n return self._tasks", "def createTasks():\n tickets = jutdaapi.get_tickets(queues=[3]) # this works better (still not\n # perfect) if list results is set to 1000 in jutda user settings\n tasks = []\n for ticket in tickets:\n tasks.append(ticketToTask(ticket))\n return tasks", "def task_get(self):\n for task in self.task_manager.task():\n if task.status in (TASK.UNSCHEDULED, TASK.DONE):\n yield task", "def get_projects(self):\n res = self.conn.cursor().execute(\"SELECT * FROM projects\")\n return res.fetchall()", "def list(self, *args, **kwargs):\n projects = Project.objects.all()\n return self.list_by(projects, self.serializer_class)", "def select_all_tasks(conn):\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM tasks\")\n\n rows = cur.fetchall()\n\n for row in rows:\n print(row)", "def select_all_tasks(conn):\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM tasks\")\n\n rows = cur.fetchall()\n\n for row in rows:\n print(row)", "def get_jira_tasks(\n host: str, username: str, jira_password: str, max_results: int = 1000\n) -> List:\n # options = {'server': 'https://cog-jira.ipsoft.com', 'basic_auth': ('dengvall', pwd)}\n try:\n jira = JIRA(basic_auth=(username, jira_password), server=f\"https://{host}\")\n except j.exceptions.JIRAError:\n logger.error(\"Error connecting to server - please verify credentials\")\n raise\n\n # Get all projects\n # projects = jira.projects()\n\n logger.info(\"fetching jira tickets\")\n all_tickets = jira.search_issues(\n \"assignee = currentUser() order by priority desc\", maxResults=max_results\n )\n logger.info(f\"complete fetching {len(all_tickets)} tickets\")\n return all_tickets", "def all(cls):\r\n projects_url = 'https://www.pivotaltracker.com/services/v3/projects'\r\n response = _perform_pivotal_get(projects_url)\r\n\r\n root = ET.fromstring(response.text)\r\n if root is not None:\r\n return [Project.from_node(project_node) for project_node in root]", "def get_task_runs(app_id):\r\n task_runs = db.session.query(TaskRun).filter_by(app_id=app_id).all()\r\n return task_runs", "def select_all_tasks(conn, query):\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM tasks\")\n\n rows = cur.fetchall()\n\n for row in rows: print(row)", "def get_projects(self):\n session = self.session_factory()\n results = [row.project for row in session.query(PipelineRun.project.distinct().label('project')).all()]\n session.close()\n return results", "def _get_project_tina_entries(self,pool='archive',refresh=False,path_folder=None):\n\t\tif not path_folder: path_folder = self.catalog_path\n\t\tif not refresh:\n\t\t\ttry:\n\t\t\t\treturn self.tina_archive_entries\n\t\t\texcept: pass \n\t\tself.tina_archive_entries = Tina.tina_find(\n\t\t\tpath_folder=path_folder,\n\t\t\tapplication=self.application,\n\t\t\tstrat='A',\n\t\t\tskip_filter=self.skip_filter)\n\t\treturn self.tina_archive_entries", "def open_projects(request):\n return Project.objects.prefetch_related('task_set').filter(user=request.user, open=True)", "def get_tasks_by_name(self, name: str) -> Set[\"Task\"]: # noqa: F821\n find = set()\n for task in self.tasks.values():\n if task.name == name:\n find.add(task)\n return find", "def list():\n manager = Actions()\n tasks_list = manager.get_tasks_list()\n console_utils.print_tree(manager, tasks_list)", "def tasks(self):\n for name, content in self.connection.task_gen():\n task = self.task(name, content, self.connection)\n yield task", "def get_projects(self):\n projects = []\n page = 1\n while not len(projects) % 100:\n projects += self._get('/projects?{0}'.format(urllib.urlencode({'per_page': 100, 'page': page})))\n if not projects:\n break\n page += 1\n return projects", "def get_projects(self):\n unaligned_path = self.get_unaligned_path()\n logger.debug(\"collecting list of projects\")\n return [p for p in os.listdir(unaligned_path)\n if len(parsing.get_project_label(p))]", "def GetCeleryTasks(job, frames):\r\n collection = GetTaskCollection()\r\n groupId = GetCeleryGroupId(job)\r\n query = Query.EQ('_id', groupId)\r\n allFrames = list(job.JobFramesList)\r\n currentFrame = frames[0]\r\n index = allFrames.index(currentFrame)\r\n packetSize = len(frames)\r\n cursor = collection.Find(query).SetFields(Fields.Slice('tasks', index, packetSize)).SetLimit(1)\r\n doc = list(cursor)[0]\r\n results = []\r\n for task in doc.GetValue('tasks'):\r\n results.append(task.ToString())\r\n return results", "def show_all_tasks(self):\n tasks = self.session.query(self.Table).order_by(self.Table.deadline).all()\n print('All tasks:')\n if tasks:\n for n, task in enumerate(tasks, 1):\n print(f'{n}. {task.task}. {task.deadline.strftime(\"%d %b\")}')\n else:\n print('Nothing to do!')\n print()", "def get_workflow_tasks(self):\n task_records = self._read_transaction(tx.get_workflow_tasks)\n tuples = self._get_task_data_tuples(task_records)\n return [_reconstruct_task(tup[0], tup[1], tup[2], tup[3], tup[4]) for tup in tuples]", "def task_gen(self):\n tasks = []\n with self.db_lock:\n tasks = self.rcon.hgetall(self.task_key)\n\n # pylint: disable=E1103\n for key in list(tasks.keys()):\n yield (key, tasks.pop(key))", "def tasks(self):\n return self._tasks + self._slices", "def get_active_tasks(self):\n qry = Task.query.filter_by(user=self.id)\n qry = qry.filter_by(completed_on=None)\n return qry.all()", "def pinned_tasks_srp(request):\n return Task.objects.select_related('project').filter(pinned=True, user=request.user).exclude(folder='trash')", "def do_tasks(self, arg):\n args = shlex.split(arg)\n if not args:\n # TODAY\n started = datetime.date.fromtimestamp(0)\n finished = datetime.date.today()\n limit = 10\n else:\n limit = 0\n try:\n started, finished = helpers.parse_date_parameters(args)\n except ValueError, err:\n print(err)\n return\n tasks = self.db.get_profiled_tasks(started, finished, limit)\n def _display_fields(task):\n return [\n task['tid'],\n u'{task}#{project}'.format(\n task=task['tname'], project=task['pname']),\n u'{delta} / {started}'.format(\n delta=helpers.timedelta_to_human(datetime.datetime.now() -\n task['started']),\n started=datetime.datetime.strftime(\n task['started'], '%c').decode('utf8')\n ) if not task['finished'] else '[closed]',\n task['description'].decode('utf8')\n ]\n refined = map(_display_fields, tasks)\n print(tabulate(refined, ['ID', 'Task', 'Activity', 'Description']))", "def task_get_all(context, filters=None, marker=None, limit=None,\n sort_key='created_at', sort_dir='desc', admin_as_user=False):\n filters = filters or {}\n\n session = get_session()\n query = session.query(models.Task)\n\n if not (context.is_admin or admin_as_user) and context.owner is not None:\n query = query.filter(models.Task.owner == context.owner)\n\n _task_soft_delete(context, session=session)\n\n showing_deleted = False\n\n if 'deleted' in filters:\n deleted_filter = filters.pop('deleted')\n query = query.filter_by(deleted=deleted_filter)\n showing_deleted = deleted_filter\n\n for (k, v) in filters.items():\n if v is not None:\n key = k\n if hasattr(models.Task, key):\n query = query.filter(getattr(models.Task, key) == v)\n\n marker_task = None\n if marker is not None:\n marker_task = _task_get(context, marker,\n force_show_deleted=showing_deleted)\n\n sort_keys = ['created_at', 'id']\n if sort_key not in sort_keys:\n sort_keys.insert(0, sort_key)\n\n query = _paginate_query(query, models.Task, limit,\n sort_keys,\n marker=marker_task,\n sort_dir=sort_dir)\n\n task_refs = query.all()\n\n tasks = []\n for task_ref in task_refs:\n tasks.append(_task_format(task_ref, task_info_ref=None))\n\n return tasks" ]
[ "0.7955744", "0.75765634", "0.7526731", "0.7374001", "0.735808", "0.73461324", "0.7285441", "0.72829795", "0.7264477", "0.72372395", "0.722166", "0.7205922", "0.714417", "0.7095142", "0.70789766", "0.70438063", "0.6978584", "0.6959845", "0.6954135", "0.6882514", "0.68778336", "0.6875632", "0.6867844", "0.68443865", "0.6820239", "0.67922664", "0.67725533", "0.67457074", "0.66762507", "0.66697425", "0.66478103", "0.66258836", "0.66141987", "0.6605381", "0.6597254", "0.6578336", "0.65639395", "0.6559718", "0.65596974", "0.6549132", "0.65289235", "0.6527351", "0.647577", "0.6474944", "0.64742213", "0.6471371", "0.64537156", "0.64479744", "0.6437876", "0.6374877", "0.63573366", "0.6346788", "0.63443065", "0.6341639", "0.63391876", "0.6336992", "0.6327163", "0.6315622", "0.63108593", "0.62976205", "0.6286767", "0.6256336", "0.62296176", "0.62272316", "0.6215473", "0.62109274", "0.6200982", "0.6192127", "0.6191766", "0.61812395", "0.6170177", "0.61305183", "0.6109127", "0.61083055", "0.6095536", "0.6079476", "0.60737455", "0.60699475", "0.60699475", "0.60651505", "0.6063394", "0.60616934", "0.60532707", "0.60471904", "0.60410947", "0.60335046", "0.6026126", "0.6023485", "0.6007833", "0.6007357", "0.60009485", "0.6000724", "0.5998757", "0.5992544", "0.5986037", "0.5982916", "0.5975944", "0.5966386", "0.59647745", "0.5960383" ]
0.7401329
3
create a new client
def createClient(self, name, wid, notes=None): data = {} data['client'] = {} data['client']['name'] = name data['client']['wid'] = wid data['client']['notes'] = notes response = self.postRequest(Endpoints.CLIENTS, parameters=data) return self.decodeJSON(response)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_client(self) -> None:\n pass", "def create_client(name):\n client = Client(name=name)\n print(client.client_secret)\n db.session.add(client)\n db.session.commit()\n return client", "def create(ctx, name, company, mail, age):\n client = Client(name,company,mail,age)\n client_service = ClientService(ctx.obj['clients_table']) \n client_service.create_client(client)", "def add_client(name):\n return create_client(name)", "def test_create_client(self):\n pass", "def create_client(\n body: ClientmodelClientCreateRequest,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n request = CreateClient.create(\n body=body,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)", "def create_client():\n result = False\n if g.client_id in drivers:\n result = True\n return jsonify({'Success': result})", "def client():\n\n client = Client()\n return client", "def create_client(service_name: str, config_name: str = None, **client_args):\n session = get_session(config_name)\n return session.client(service_name, **client_args)", "def newClient(self, cid, **kwargs):\n client = Iourt42Client(console=self.console, cid=cid, timeAdd=self.console.time(), **kwargs)\n self[client.cid] = client\n self.resetIndex()\n\n self.console.debug('Urt42 Client Connected: [%s] %s - %s (%s)', self[client.cid].cid, self[client.cid].name,\n self[client.cid].guid, self[client.cid].data)\n\n self.console.queueEvent(self.console.getEvent('EVT_CLIENT_CONNECT', data=client, client=client))\n\n if client.guid:\n client.auth()\n elif not client.authed:\n self.authorizeClients()\n return client", "def test_client_create(self):\n pass", "def make_client(self, context):\n return Client(self.settings['client_routing'], context=context)", "def create_new_client(main: MainApplication) -> str:\n client = main.create_window(\"client\", \"IPLMS\", main.client_ui.get_layout())\n client[\"_CLIENT_ID_\"].Update(getUUID())\n client[\"_CP_NAME_IP_\"].Update(\"\")\n client[\"_CP_PHONE_IP_\"].Update(\"\")\n client[\"_CP_ADDRESS_IP_\"].Update(\"\")\n client.un_hide()\n event, values = client.read()\n client_logic = Client(main, event, values)\n name = client_logic.run(main)\n client.hide()\n return name", "def create_client():\n host_api_id = Config.api_id\n host_api_hash = Config.api_hash\n host_user_id = Config.user_id\n host_phone = Config.phone\n\n client = TelegramClient(host_user_id, host_api_id, host_api_hash)\n client.connect()\n if not client.is_user_authorized():\n client.send_code_request(host_phone)\n client.sign_in(host_phone, input('Enter code sent to your telegram: '))\n return client", "def _create_client(p4, client_name, p4gf_dir):\n view = ['//{depot}/... //{client}/...'.format(depot=p4gf_const.P4GF_DEPOT,\n client=client_name)]\n spec_created = False\n if not p4gf_util.spec_exists(p4, \"client\", client_name):\n # See if the old object clients exist, in which case we will remove them.\n if p4gf_util.spec_exists(p4, \"client\", OLD_OBJECT_CLIENT):\n p4.run('client', '-df', OLD_OBJECT_CLIENT)\n if p4gf_util.spec_exists(p4, \"client\", OLDER_OBJECT_CLIENT):\n p4.run('client', '-df', OLDER_OBJECT_CLIENT)\n spec_created = p4gf_util.ensure_spec(\n p4, \"client\", spec_id=client_name,\n values={'Host': None, 'Root': p4gf_dir,\n 'Description': 'Created by Perforce Git Fusion',\n 'View': view})\n if not spec_created:\n p4gf_util.ensure_spec_values(p4, \"client\", client_name,\n {'Root': p4gf_dir, 'View': view})", "def create_client(self) -> None:\n self._client = discovery.build('ml', 'v1')", "def create_client(name):\n address = \"/run/com_handler.sock\"\n sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n sock.connect(address)\n return Client(sock, \"\", name)", "def client():", "def create_client(self, module_name, version, client_class):\n # NOTE(kiennt): Get created client rather create a new one.\n # The key is the combination of module_name and version.\n # because we can create multiple clients of a module with\n # different versions.\n client = self.created_clients.get(module_name + version)\n if client:\n return client\n module_client = self._import_client(module_name)\n try:\n client = getattr(module_client, client_class)(\n version=version,\n session=self._sess)\n self.created_clients[module_name+version] = client\n return client\n except Exception as err:\n raise err", "def create_dummy_client(index, user, client_manager = None, language = None, currency = None):\r\n \r\n if client_manager == None:\r\n client_manager = ClientManager(user)\r\n \r\n if currency is None:\r\n currency = create_dummy_currency(index)\r\n \r\n if language is None:\r\n language = create_dummy_language(index)\r\n \r\n return client_manager.add_client(\r\n name = 'client_%i' %index,\r\n address = 'address_%i' % index,\r\n email = 'corp_email_%i@email.com' % index,\r\n default_currency_id = currency.key().id(),\r\n default_language_id = language.key().id(),\r\n )", "def test_client_create(self, mock_input, mock_pass):\n # Patch username and password.\n mock_input.return_value = \"user\"\n mock_pass.return_value = \"pass\"\n\n # Instantiate Agave object making reference to local mock server.\n local_uri = \"http://localhost:{port}/\".format(port=self.mock_server_port)\n ag = Agave(api_server=local_uri)\n\n # Create client.\n ag.clients_create(\"client-name\", \"some description\")\n\n assert ag.api_key == \"some api key\"\n assert ag.api_secret == \"some secret\"", "def create_client():\n hostname = \"localhost\"\n username = \"she393\"\n password = os.getenv(\"PASSWORD\")\n\n client = paramiko.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n client.connect(hostname=hostname, username=username, password=password)\n return client", "def create_clients(client_name): # Crear nuevo Cliente\n global clients\n\n if client_name not in clients:\n clients.append(client_name)\n else:\n print('The client name is alredy in the client\\'s list')", "def client(db):\n client = ClientFactory()\n db.session.commit()\n return client", "async def create_client_async(\n body: ClientmodelClientCreateRequest,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n request = CreateClient.create(\n body=body,\n )\n return await run_request_async(\n request, additional_headers=x_additional_headers, **kwargs\n )", "def create_client(self, version=None, unstable=False, **kwargs):\n version_data = self._calculate_version(version, unstable)\n return self._create_client(version_data, **kwargs)", "def create_client(cls, values, login_details):\n client_id = None\n c, conn = connection(login_details['company_schema'])\n\n def sort_code_unique(sort_code):\n \"\"\"\n Checks to see if the sort_code is Unique\n :param sort_code:\n \"\"\"\n output = False\n sql = u'SELECT sort_code ' \\\n u'FROM client_company_TBL ' \\\n u'WHERE sort_code = %s;'\n data = (sort_code,)\n c.execute(sql, data)\n test = c.fetchone()\n\n if test is None:\n output = True\n\n return output\n\n def add_client(name, sort_code):\n \"\"\"\n Adds the client to the client company table in the user company database\n :param name: String name of the client company\n :param sort_code: String sort code for the client company\n \"\"\"\n\n sql = u'INSERT INTO client_company_TBL ' \\\n u'(name, sort_code) ' \\\n u'VALUES (%s, %s);'\n data = (name, sort_code)\n c.execute(sql, data)\n conn.commit()\n\n def get_client_id(name, sort_code):\n \"\"\"\n Will return the ID for the client company that has just been added\n :param name:\n :param sort_code:\n \"\"\"\n output = None\n\n sql = u'SELECT client_company_ID ' \\\n u'FROM client_company_TBL ' \\\n u'WHERE name = %s ' \\\n u'AND sort_code = %s'\n\n data = (name, sort_code)\n c.execute(sql, data)\n value = c.fetchone()\n if value is not None:\n output = value[0]\n\n return output\n\n try:\n if sort_code_unique(values['code']):\n add_client(values['name'], values['code'])\n client_id = get_client_id(values['name'], values['code'])\n finally:\n conn_close(c, conn)\n return cls(client_id, login_details)", "def create():\n form = request.form\n try:\n # create a new BancBox client from the input form\n resp = api.create_client(form)\n except Exception, e:\n logger.error('Error creating new client: %s', e)\n return render_template('created.html', error=e.message)\n\n if resp.status == 1:\n # If the create request was successful, let's render a success\n # message with some data about the new client and a link to the\n # detail page\n new_client = {\n 'firstName': form['firstName'],\n 'lastName': form['lastName'],\n 'clientId': resp.clientId\n }\n return render_template('created.html', new_client=new_client)\n else:\n # If an error was returned by BancBox, let's render it\n if hasattr(resp, 'errors') and hasattr(resp.errors, 'message'):\n message = resp.errors.message\n else:\n message = \"Error creating new client.\"\n return render_template('created.html', error=message)", "def create_client(self, initiator_iqn):\n client = self._get_target_client(initiator_iqn)\n if not client:\n try:\n self.client.create_client(self.target_iqn,\n initiator_iqn)\n except client_exceptions.ClientException as ex:\n raise exception.VolumeBackendAPIException(\n data=ex.get_description())", "def make_client(instance):\n network_client = utils.get_client_class(\n API_NAME,\n instance._api_version[API_NAME],\n API_VERSIONS)\n LOG.debug('Instantiating network client: %s', network_client)\n\n endpoint = instance.get_endpoint_for_service_type(\n API_NAME,\n region_name=instance._region_name,\n )\n\n return network_client(\n username=instance._username,\n tenant_name=instance._project_name,\n password=instance._password,\n region_name=instance._region_name,\n auth_url=instance._auth_url,\n endpoint_url=endpoint,\n token=instance.auth.get_token(instance.session),\n insecure=instance._insecure,\n ca_cert=instance._cacert,\n )", "def create_generated_client() -> None:\n print(\"Generating client\")\n\n delete_generated_client()\n args = [\n \"{}/../scripts/generate.sh\".format(ROOT),\n \"-i\",\n \"http://localhost:8000/openapi.json\",\n \"-p\",\n CLIENT_NAME,\n \"--include-auth\",\n \"-o\",\n ROOT,\n \"-t\",\n \"/tmp\",\n \"-m\",\n ]\n\n process_result = subprocess.run(args, capture_output=True)\n\n with open(os.path.join(LOG_DIR, \"generation.log\"), \"wb\") as file:\n file.write(process_result.stdout)\n\n with open(os.path.join(LOG_DIR, \"generation.err\"), \"wb\") as file:\n file.write(process_result.stderr)\n\n if process_result.returncode != 0: # pragma: no cover\n if process_result.stderr:\n sys.stderr.write(process_result.stderr.decode(\"utf-8\"))\n pytest.exit(\n \"Failed to generate client api, code {}\"\n \"\\nLogs are in logs/generation.log and logs/generation.err\".format(process_result.returncode),\n returncode=process_result.returncode,\n )\n\n print(\"Client created in {}, logs in logs/generation.log\\n\".format(CLIENT_DIR))", "def create_client(self):\n client = iperf3.Client()\n client.duration = self._host[CONF_DURATION]\n client.server_hostname = self._host[CONF_HOST]\n client.port = self._host[CONF_PORT]\n client.num_streams = self._host[CONF_PARALLEL]\n client.protocol = self._host[CONF_PROTOCOL]\n client.verbose = False\n return client", "def create_client(name: str, url: str, description: str, scopes: str,\n redirect_uri: str) -> None:\n app = create_web_app()\n with app.app_context():\n datastore.create_all()\n\n with datastore.util.transaction() as session:\n db_client = datastore.models.DBClient(\n name=name,\n url=url,\n description=description,\n redirect_uri=redirect_uri\n )\n secret = generate_token(48)\n hashed = hashlib.sha256(secret.encode('utf-8')).hexdigest()\n db_cred = datastore.models.DBClientCredential(client=db_client,\n client_secret=hashed)\n db_scopes = [\n datastore.models.DBClientAuthorization(\n client=db_client, authorized=datetime.now(), scope=scope\n ) for scope in scopes.split()\n ]\n db_grant_type = datastore.models.DBClientGrantType(\n client=db_client,\n grant_type='client_credentials',\n authorized=datetime.now()\n )\n db_grant_type = datastore.models.DBClientGrantType(\n client=db_client,\n grant_type='authorization_code',\n authorized=datetime.now()\n )\n\n session.add(db_client)\n session.add(db_cred)\n session.add(db_grant_type)\n for db_scope in db_scopes:\n session.add(db_scope)\n\n session.commit()\n click.echo(f'Created client {name} with ID {db_client.client_id}'\n f' and secret {secret}')", "def client_setup(self):\n self.client = Client()", "def client():\n return Client(**common_data.AUTH_ARGS)", "def create(cls, client, fields, **kwargs):\n\t\tres = cls(client, fields, **kwargs)\n\t\treturn res", "def gen_neutron_client(self):\n\n print \"\\t* Generating neutron client\"\n self.neutronclient = neutronclient.Client(auth_url=self.auth_url,\n username=self.username,\n password=self.password,\n tenant_name=self.tenant_name,\n region_name=self.region_name)", "def add_new_client(self, host, identifier):\n client = None\n\n try:\n client = self.get_client_by_info(host, identifier)\n\n logging.debug(\"Found client matching host '%s', uuid: '%s'\",\n host, client.uuid)\n\n except NoClientFoundError:\n logging.debug(\"No client matching '%s' (%s), creating a new one\", \n host, identifier)\n\n client = AuthenticatedClient(host, identifier)\n\n with self.lock:\n self.clients.append(client)\n\n logging.debug(\"Created client for '%s' ('%s'). uuid: %s, token: %s\", \n host, identifier, client.uuid, client.token)\n\n finally:\n return client", "def make_client(instance):\r\n neutron_client = utils.get_client_class(\r\n API_NAME,\r\n instance._api_version[API_NAME],\r\n API_VERSIONS,\r\n )\r\n instance.initialize()\r\n url = instance._url\r\n url = url.rstrip(\"/\")\r\n if '2.0' == instance._api_version[API_NAME]:\r\n client = neutron_client(username=instance._username,\r\n tenant_name=instance._tenant_name,\r\n password=instance._password,\r\n region_name=instance._region_name,\r\n auth_url=instance._auth_url,\r\n endpoint_url=url,\r\n token=instance._token,\r\n auth_strategy=instance._auth_strategy,\r\n insecure=instance._insecure,\r\n ca_cert=instance._ca_cert)\r\n return client\r\n else:\r\n raise exceptions.UnsupportedVersion(_(\"API version %s is not \"\r\n \"supported\") %\r\n instance._api_version[API_NAME])", "def createClientSocket():\t\n\ttry:\n\t\tsock = socket.socket() #socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\treturn sock\n\texcept socket.error as err:\n\t\t#TODO: Handle error\n\t\tprint(\"- Peer Client: Error creating socket for client!\")\n\t\treturn -1", "def new_connection(self, transport):\n assert transport not in self.clients\n self.clients[transport] = Client(transport, self)", "def create_client():\n logger.debug(\"=====create_client fired...\")\n try:\n session = boto3.Session()\n client = session.client('dynamodb', region_name='us-east-1')\n return client\n except ClientError as err:\n logger.error(\n \"[BOTO3_ERROR]Failed to create boto3 client: %s\", str(err))", "def gen_heat_client(self):\n\n print \"\\t* Generating heat client\"\n # request a new auth token from keystone\n keystone = ksclient.Client(auth_url=self.auth_url,\n username=self.username,\n password=self.password,\n tenant_name=self.tenant_name,\n region_name=self.region_name)\n auth_token = keystone.auth_token\n heat_url = 'http://%s:8004/v1/%s' % (self.ip, self.tenant_id)\n\n # instantiate client\n self.heatclient = hClient('1', endpoint=heat_url, token=auth_token)", "def _CreatePubsubClient():\n client = pubsub_client.PubSubClient()\n client.CreateTopic(DEVICE_NOTE_PUBSUB_TOPIC)\n client.CreateTopic(HOST_NOTE_PUBSUB_TOPIC)\n return client", "def create_client(host, user, password):\n client = paramiko.client.SSHClient()\n client.set_missing_host_key_policy(paramiko.AutoAddPolicy)\n client.connect(hostname=host, username=user, password=password, timeout=60)\n return client", "def get_client():\n return Client(__address, authkey='strumamor')", "def create_client(email, password):\n gd_client = gdata.contacts.service.ContactsService()\n gd_client.email = email\n gd_client.password = password\n gd_client.source = 'syncContacts'\n gd_client.ProgrammaticLogin()\n return gd_client", "def make_client(service_key, constructor=None, options=None, **kwargs):\n cloud = get_config(service_key=service_key, options=options, **kwargs)\n if not constructor:\n constructor = cloud_config._get_client(service_key)\n return cloud.get_legacy_client(service_key, constructor)", "def CreateClient():\n client = gdata.docs.client.DocsClient(source=SampleConfig.APP_NAME)\n client.http_client.debug = SampleConfig.DEBUG\n # Authenticate the user with CLientLogin, OAuth, or AuthSub.\n try:\n gdata.sample_util.authorize_client(\n client,\n service=client.auth_service,\n source=client.source,\n scopes=client.auth_scopes\n )\n except gdata.client.BadAuthentication:\n exit('Invalid user credentials given.')\n except gdata.client.Error:\n exit('Login Error')\n return client", "def new(\n host: str = \"localhost\",\n port: int = 4110,\n user: str = \"pyserval\",\n passwd: str = \"pyserval\",\n ):\n connection = RestfulConnection(host=host, port=port, user=user, passwd=passwd)\n return LowLevelClient(connection=connection)", "def create():", "def create():", "def test_create_o_auth_client(self):\n pass", "def __init__(self, client):\n self.client = client", "def create_client(self) -> None:\n self._client = gapic.JobServiceClient(\n client_options=dict(api_endpoint=self._region + _UCAIP_ENDPOINT_SUFFIX))", "def create_clients(KEY, SECRET):\n \n iam = boto3.client('iam',aws_access_key_id=KEY,\n aws_secret_access_key=SECRET,\n region_name='us-west-2'\n )\n\n redshift = boto3.client('redshift',\n region_name=\"us-west-2\",\n aws_access_key_id=KEY,\n aws_secret_access_key=SECRET\n )\n \n ec2 = boto3.resource('ec2',\n region_name=\"us-west-2\",\n aws_access_key_id=KEY,\n aws_secret_access_key=SECRET\n )\n \n return iam, redshift, ec2", "def gen_nova_client(self):\n\n print \"\\t* Generating nova client\"\n client = nClient.get_client_class('2')\n self.novaclient = client(self.username,\n self.password,\n self.tenant_name,\n self.auth_url,\n service_type='compute')", "def __init__(self, client):\n\n self.client = client", "def create(self):\n\t\treturn handle_to_object(call_sdk_function('PrlSrv_Create'))", "def __new__(cls, host=None, user=None, client=None):\n cls.__check_parameters(host=host, user=user)\n if client is None:\n raise InvalidClientException(\"Integrated Client during connection creation can't be None\")\n return super(Connection, cls).__new__(cls, host=host, user=user, client=client)", "def create_client(service, region, access_key_id, secret_access_key):\n client = boto3.client(service,\n region_name=region,\n aws_access_key_id=access_key_id,\n aws_secret_access_key=secret_access_key\n )\n return client", "def create_client(service, region, access_key_id, secret_access_key):\n client = boto3.client(service,\n region_name=region,\n aws_access_key_id=access_key_id,\n aws_secret_access_key=secret_access_key\n )\n return client", "def _new_client(self) -> paramiko.SSHClient:\n ssh = paramiko.SSHClient()\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n self._paramiko_client = ssh\n return self._paramiko_client", "def __init__(self, name, client):\n self.name = name\n self.client = client", "def test_create(self):\n isok, response = self.onep.create(\n self.cik,\n 'client',\n {\n 'writeinterval': 'inherit',\n 'name': 'testclient',\n 'visibility': 'parent',\n 'limits': {\n 'dataport': 'inherit',\n 'datarule': 'inherit',\n 'dispatch': 'inherit',\n 'disk': 'inherit',\n 'io': 'inherit',\n 'share': 'inherit',\n 'client': 'inherit',\n 'sms': 'inherit',\n 'sms_bucket': 'inherit',\n 'email': 'inherit',\n 'email_bucket': 'inherit',\n 'http': 'inherit',\n 'http_bucket': 'inherit',\n 'xmpp': 'inherit',\n 'xmpp_bucket': 'inherit'}\n })\n client_rid = response\n self.assertTrue(isok, 'client creation succeeded')\n self.assertTrue(re.match(\"^[0-9a-f]{40}$\", client_rid), 'rid is formatted correctly')\n\n isok, response = self.onep.info(\n self.cik,\n client_rid,\n {'key': True}\n )\n client_cik = response['key']\n\n # Add a dataport\n isok, response = self.onep.create(\n client_cik,\n 'dataport',\n {\n 'format': 'string',\n 'retention': {\n 'count': 'infinity',\n 'duration': 'infinity',\n },\n 'limits': {\n 'dataport': 'inherit',\n 'datarule': 'inherit',\n 'dispatch': 'inherit',\n 'disk': 'inherit',\n 'io': 'inherit',\n 'share': 'inherit',\n 'client': 'inherit',\n 'sms': 'inherit',\n 'sms_bucket': 'inherit',\n 'email': 'inherit',\n 'email_bucket': 'inherit',\n 'http': 'inherit',\n 'http_bucket': 'inherit',\n 'xmpp': 'inherit',\n 'xmpp_bucket': 'inherit',\n }\n }\n )\n dataport_rid = response\n self.assertTrue(isok, 'dataport creation succeeded')\n self.assertTrue(re.match(\"^[0-9a-f]{40}$\", dataport_rid), 'rid is formatted correctly')", "def init_client(self, client):\n self.client = client", "def create(self):\n vpn = self.cleaned_data['vpn']\n return vpn.create_client(self.cleaned_data['host'],\n self.cleaned_data['active'])", "async def handle_new_client(self, reader, writer):\n\n log.debug(\"Got brand new client!\")\n\n self.fakeclient = Client(None, None, reader, writer)\n req = await read(self.fakeclient, 'uuid', 'username')\n self.fakeclient = None\n\n uuid = req['uuid']\n username = req['username']\n self.clients[uuid] = Client(username, PlayerPrivateStatus(), reader,\n writer)\n\n if self.state == 'waiting for owner response':\n # don't accept any request from players when a request has already\n # been send to the owner\n # So, we tell the player the owner's busy.\n log.debug(\"Send owner busy with request.\")\n await write(self.clients[uuid], {'kind': 'request state change',\n 'state': 'declined',\n 'reason': 'owner busy'})\n del self.clients[uuid]\n return\n\n if self.state == \"waiting for player\":\n # Here, we have a request from a player to join the onwer\n # the reader and the writer are the other player's, not the owner's\n log.debug(f\"Send requests infos to owner {uuid!r} {username!r}\")\n # send the uuid and username to the owner\n await write(self.clients[self.owneruuid], {'kind': 'new request',\n 'uuid': uuid,\n 'username': username})\n # feeds data because we were listening for nothing before\n # (not for nothing, just so that the server knows if the client\n # leaves)\n self.state = 'waiting for owner response'\n # wait for owner to reply\n res = await self.watch_owner\n log.debug(f\"Response from owner {res!r}\")\n # he said yes!\n if res['accepted'] is True:\n # to the client (the one that wanted to join)\n await write(self.clients[uuid], {\n 'kind': 'request state change',\n 'reason': None,\n 'accepted': True\n })\n return await self.hero_selection()\n else:\n if res['accepted'] is not False:\n log.error(\"Got unexpected value for response to request\"\n f\"{res['accepted']!r} (expecting a bool)\")\n self.state = 'waiting for player'\n await write(self.clients[uuid], {'kind': 'request state change',\n 'accepted': False,\n 'reason': 'owner declined'})\n del self.clients[uuid]\n # start all over again\n self.loop.create_task(self.handle_new_client(reader, writer))\n return\n\n if req['kind'] != 'identification':\n raise ValueError(f\"Got request of kind {req['kind']!r}, was \"\n \"expecting 'identification'\")\n # here, state must be 'waiting for owner'\n if uuid == self.owneruuid:\n self.state = \"waiting for player\"\n await write(self.clients[uuid], {\n 'kind': 'identification state change',\n 'state': 'success'\n })\n self.watch_owner = read(self.clients[self.owneruuid], 'accepted',\n kind='request state change')\n else:\n log.warning(f\"Got fake request pretenting to be owner \"\n f\"{uuid!r} {username!r}\")\n await write(self.clients[uuid], {\n 'kind': 'identification state change',\n 'state': 'failed'\n })\n writer.write_eof()\n await writer.drain()\n writer.close()", "def create_client(client_id, authority_url, client_secret):\n client = msal.ConfidentialClientApplication(\n client_id=client_id, authority=authority_url, client_credential=client_secret\n )\n return client", "def create_client_by_namespace(\n body: ClientmodelClientCreateRequest,\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = CreateClientByNamespace.create(\n body=body,\n namespace=namespace,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)", "def __init__(self, client):\n super().__init__(client)", "def __init__(self, client, name):\n if not isinstance(client, couch.Client):\n raise Exception(\"'client' arg must be instance of couch.Client\")\n\n self.client = client\n self.name = name", "def testclient():\n base_url = PARAMS.get(\"url\") + \"/v2\"\n client = Client(\n base_url=base_url,\n headers={\n \"Authorization\": f\"GenieKey {PARAMS.get('token')}\",\n }\n )\n return client", "def register_client(self, client, client_name):\n self.clients[client_name] = client", "def _newClientSession(self, port, engine_id):\n return self.clientManager.newSession(engine_id, port, self)", "def start_client(self, name, address):\n if name not in self.clients:\n self.clients[name] = ProcessorClient()\n self.clients[name].connect(address)", "def createConnectionToCli(self):\n connected = False\n # loop until connected\n while not connected:\n try:\n self.dataClient = Client(\n ('localhost', 5000), authkey=b'secret password')\n connected = True\n except ConnectionRefusedError:\n pass\n\n self.logger.debug('Connected to Process!')", "def add_client(self, cli):\n if self.clients.count(cli) is 0:\n self.clients.append(cli)", "async def create_client_by_namespace_async(\n body: ClientmodelClientCreateRequest,\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = CreateClientByNamespace.create(\n body=body,\n namespace=namespace,\n )\n return await run_request_async(\n request, additional_headers=x_additional_headers, **kwargs\n )", "def client(self,context,params):\n url = f\"https://api.freshbooks.com/accounting/account/{params['account_id']}/users/clients/{params['id']}\"\n result = json.loads(util.rest(\"GET\", url, {}, context[\"headers\"][\"access_token\"]).text)\n client = result[\"response\"][\"result\"][\"client\"]\n client_obj = FreshbooksClient(\n accounting_systemid=client['accounting_systemid'], \n first_name=client['fname'],\n last_name=client['lname'],\n email=client['email'],\n vat_name=client['vat_name'],\n vat_number=client['vat_number'],\n home_phone=client['home_phone'],\n organization=client['organization'],\n username=client['username']\n )\n return client_obj.__dict__", "def post(self):\n data = request.json\n return create_cliente(data=data)", "def _generate_client(self):\n mongoConf = self._config.get('Connectivity', 'MongoDB') # type: dict\n if mongoConf.get('username') and mongoConf.get('password'):\n return pymongo.MongoClient(\n \"mongodb://{0}:{1}@{2}:{3}/{4}\".format(\n mongoConf.get('username', ''),\n mongoConf.get('password', ''),\n mongoConf.get('host', 'localhost'),\n mongoConf.get('port', 27017),\n mongoConf.get('db', 'grease')\n ),\n w=1\n )\n else:\n return pymongo.MongoClient(\n host=mongoConf.get('host', 'localhost'),\n port=mongoConf.get('port', 27017),\n w=1\n )", "def test_create_virtual_account_client(self):\n pass", "def create_cloudwatch_client_instance():\n try:\n cloudwatch = boto3.client('cloudwatch')\n return cloudwatch\n except Exception, e:\n responseData = { 'Reason': 'Error creating CloudWatch Client instance'}\n send(event, context, FAILED, responseData, \"CustomResourcePhysicalID\") \n return 1", "def make_rest_client(\n service_key, options=None,\n app_name=None, app_version=None, version=None,\n **kwargs):\n cloud = get_config(\n service_key=service_key, options=options,\n app_name=app_name, app_version=app_version,\n **kwargs)\n return cloud.get_session_client(service_key, version=version)", "def __init__(self, client):\n self._client = client", "def client(self, hostname_or_ip):\n hostname, aliases, ip = self.resolve(hostname_or_ip)\n try:\n client = Client.objects.get(name=hostname)\n printer_name = client.label_printer.cups_printer_name\n self.cups_server = client.label_printer.cups_server_hostname\n cups_hostname = self.cups_server.hostname\n self._label_printer = client.label_printer\n except Client.DoesNotExist:\n self.cups_server = 'localhost' # default\n cups_hostname = self.cups_server.hostname\n self._client = ClientTuple(hostname, aliases, ip, None, cups_hostname)\n try:\n printer_name = self.label_printer.cups_printer_name\n except AttributeError:\n printer_name = None\n self._client = ClientTuple(hostname, aliases, ip, printer_name, cups_hostname)", "def test_01_add_client(self):\n try:\n for k, v in self.test_data.items():\n client = Client()\n test_str = v.split(',')\n client.user_id = test_str[0]\n client.host_name = test_str[1]\n client.user_name = test_str[2]\n ClientsUnitTest._client_dao.add(client)\n self.assertTrue(\n ClientsUnitTest._client_dao.get_client(client.user_id))\n\n for k, v in self.test_data.items():\n client = Client()\n test_str = v.split(',')\n client.user_id = test_str[0]\n self.assertTrue(ClientsUnitTest._client_dao.delete(client))\n\n except DBException as error:\n print(error.get_message())", "def test_client_verification_create(self):\n pass", "def create_clients(aws_key, aws_secret):\n ec2_client = boto3.resource(\n 'ec2', region_name=AWS_REGION, aws_access_key_id=aws_key,\n aws_secret_access_key=aws_secret\n )\n s3_client = boto3.resource(\n 's3', region_name=AWS_REGION, aws_access_key_id=aws_key,\n aws_secret_access_key=aws_secret\n )\n iam_client = boto3.client(\n 'iam', region_name=AWS_REGION, aws_access_key_id=aws_key,\n aws_secret_access_key=aws_secret\n )\n redshift_client = boto3.client(\n 'redshift', region_name=AWS_REGION, aws_access_key_id=aws_key,\n aws_secret_access_key=aws_secret\n )\n return ec2_client, s3_client, iam_client, redshift_client", "def create():\n pass", "def client(self,\n name,\n method=None,\n url=None,\n status_callback_event=None,\n status_callback_method=None,\n status_callback=None,\n **kwargs):\n return self.append(Client(\n name,\n method=method,\n url=url,\n status_callback_event=status_callback_event,\n status_callback_method=status_callback_method,\n status_callback=status_callback,\n **kwargs\n ))", "def test_get_client(self):\n pass", "def add_client(self, client):\n\n now = int(time.time())\n\n self.send_line(\"%s N %s 1 %d %s %s +ik ]]]]]] %s :%s\" %\\\n (self.config[\"numeric\"], client.nick, now, client.user,\n client.host, client.uid, client.gecos))", "def make_client(\n host_url: str, email: str, password: str, timeout: float = 30\n) -> Client:\n if host_url.endswith(\"/\"):\n raise ValueError('host_url must not end with \"/\"')\n host_url += \"/api/v1\"\n return Client(\n host_url,\n headers=make_auth_headers(email, password),\n timeout=timeout,\n verify_ssl=False,\n raise_on_unexpected_status=True,\n )", "def create_ssh_client(self, hostname, username, password):\n if self.ssh_client is None:\n self.ssh_client = paramiko.SSHClient()\n self.ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n self.ssh_client.connect(hostname, username=username, password=password)\n else:\n print(\"SSH client session exist.\")", "def _accept_client(self, client_reader, client_writer):\n\n print(\"New client\", client_reader, client_writer)\n # start a new Task to handle this specific client connection\n task = asyncio.Task(self._handle_client(client_reader, client_writer))\n self.clients[task] = (client_reader, client_writer)\n\n def client_done(task):\n print(\"client task done:\", task, file=sys.stderr)\n del self.clients[task]\n\n task.add_done_callback(client_done)", "def _create_service_client(self, srv_name):\n if self._srv:\n self._srv.close()\n\n if srv_name in rosservice.get_service_list():\n rospy.loginfo(\"Creating proxy for service '%s'\" % srv_name)\n self._srv = rospy.ServiceProxy(srv_name, rosservice.get_service_class_by_name(srv_name))", "def admin_client():\n host = '127.0.0.1'\n port = 8126\n return TcpClient(host, port)", "def new( self, client, socket ):\n\t\tCORE.info( 'Established connection: %s' % client )\n\t\tstate = State( client, socket )\n\t\tstate.signal_connect( 'authenticated', self._authenticated )\n\t\tself.__states[ socket ] = state\n\t\tnotifier.socket_add( socket , self._receive )\n\t\tstatistics.connections.new()" ]
[ "0.84618115", "0.7922506", "0.76412994", "0.7560049", "0.72630185", "0.7126018", "0.70866805", "0.7063268", "0.7053907", "0.70391697", "0.7030181", "0.702756", "0.69890064", "0.6928916", "0.6843426", "0.68396825", "0.6788722", "0.676113", "0.67482334", "0.67339486", "0.67268497", "0.67248255", "0.6718343", "0.667431", "0.66490394", "0.6567238", "0.65228623", "0.648823", "0.64881307", "0.6478272", "0.64591175", "0.64085376", "0.6367323", "0.6304544", "0.63042", "0.62620133", "0.6255795", "0.6238943", "0.622172", "0.6207817", "0.6197496", "0.61967546", "0.61628336", "0.61519706", "0.615104", "0.6145741", "0.6141392", "0.6131794", "0.61230034", "0.6121309", "0.61167294", "0.61167294", "0.61156714", "0.6112171", "0.6089856", "0.60862374", "0.60749567", "0.606622", "0.6039485", "0.60300255", "0.6029729", "0.6029729", "0.60263395", "0.6022829", "0.6016596", "0.6007353", "0.6003545", "0.600026", "0.59877056", "0.59668416", "0.5951103", "0.5933331", "0.5933107", "0.59130156", "0.5908444", "0.5898372", "0.58974755", "0.58846354", "0.58789927", "0.5863598", "0.5863292", "0.58324903", "0.5828188", "0.5823364", "0.5811864", "0.5799497", "0.5797333", "0.5768211", "0.5765947", "0.5762496", "0.5741665", "0.5733007", "0.5731101", "0.572841", "0.5726635", "0.57194316", "0.5712755", "0.56916255", "0.5686646", "0.5658506" ]
0.69494456
13
Update data for an existing client. If the name or notes parameter is not supplied, the existing data on the Toggl server will not be changed.
def updateClient(self, id, name=None, notes=None): data = {} data['client'] = {} data['client']['name'] = name data['client']['notes'] = notes response = self.postRequest(Endpoints.CLIENTS + '/{0}'.format(id), parameters=data, method='PUT') return self.decodeJSON(response)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_client(client_name, updated_client_name): # Operacion modificar\n global clients\n\n if client_name in clients:\n index = clients.index(client_name)\n clients[index] = updated_client_name\n else:\n print(\"Client isn\\'t in the client list\")", "def test_update_client(self):\n url = '/api/v1/pediatras/{}/'.format(self.app_client.id)\n\n data = {\n \"name\": \"Ernesto\"\n }\n\n request = self.client.patch(url, data)\n self.assertEqual(request.status_code, status.HTTP_200_OK)", "def update_client(\n body: ClientmodelClientUpdateRequest,\n client_id: str,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n request = UpdateClient.create(\n body=body,\n client_id=client_id,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)", "def do_PUT(self):\n note_details = NoteDetails\n if self.path == '/note/api/update':\n response_data=note_details.update_data(self)\n Response(self).jsonResponse(status=200, data=response_data)", "def set(self, client):\n if not client:\n raise SurvoxAPIMissingParameter('client')\n c = self.get()\n if not c:\n raise SurvoxAPIRuntime('No client available named: {name}'.format(name=self.name))\n return self.api_put(endpoint=self.url, data=client)", "def update_client(self, display_name, player=PLAYER_IDENTIFIER):\n self.state.update_client(display_name, player)", "def update(self, klient):\n try:\n # pobierz z bazy klienta\n inv_oryg = self.getById(klient.id)\n if inv_oryg != None:\n # klient jest w bazie: usuń go\n self.delete(klient)\n self.add(klient)\n\n except Exception as e:\n #print \"klient update error:\", e\n raise RepositoryException('error updating klient %s' % str(klient))", "async def update_client_async(\n body: ClientmodelClientUpdateRequest,\n client_id: str,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n request = UpdateClient.create(\n body=body,\n client_id=client_id,\n )\n return await run_request_async(\n request, additional_headers=x_additional_headers, **kwargs\n )", "def test_update_client(self):\n pass", "def test_client_update(self):\n pass", "def test_04_update_client(self):\n for k, v in self.test_data.items():\n client = Client()\n test_str = v.split(',')\n client.user_id = test_str[0]\n client.host_name = test_str[1]\n client.user_name = test_str[2]\n ClientsUnitTest._client_dao.add(client)\n self.assertTrue(\n ClientsUnitTest._client_dao.get_client(client.user_id))\n\n for k, v in self.test_data.items():\n client = Client()\n test_str = v.split(',')\n client.user_id = test_str[0]\n client.host_name = test_str[1]\n client.user_name = test_str[2]\n ClientsUnitTest._client_dao.modify(client)\n client = ClientsUnitTest._client_dao.get_client(client.user_id)\n self.assertEqual(client.user_id, int(test_str[0]))\n self.assertEqual(client.host_name, test_str[1])\n self.assertEqual(client.user_name, test_str[2])\n\n for k, v in self.test_data.items():\n client = Client()\n test_str = v.split(',')\n client.user_id = test_str[0]\n self.assertTrue(ClientsUnitTest._client_dao.delete(client))\n\n try:\n for k, v in self.test_data.items():\n client = Client()\n test_str = v.split(',')\n client.user_id = test_str[0]\n client.host_name = test_str[1]\n client.user_name = test_str[2]\n self.assertTrue(ClientsUnitTest._client_dao.modify(client))\n\n except ClientNotFoundException as error:\n print(error.get_message())\n\n except DBException as error:\n print(error.get_message())", "def createClient(self, name, wid, notes=None):\n\n data = {}\n data['client'] = {}\n data['client']['name'] = name\n data['client']['wid'] = wid\n data['client']['notes'] = notes\n\n response = self.postRequest(Endpoints.CLIENTS, parameters=data)\n return self.decodeJSON(response)", "def put(self, id_cliente):\n data = request.json\n cliente = update_cliente(id_cliente, data)\n if not cliente:\n api.abort(404)\n else:\n return cliente", "def update_a_note(self, note_id, data):\n return self.client._put(\"/notes/{}\".format(note_id), json=data)", "def patch(cls, username, cad_model_name):\n client = ClientModel.find_user_by_username(username)\n jwt_id = get_jwt_identity()\n if client is None:\n return {'msg': gettext('client_profile_client_does_not_exist')}, 400\n if jwt_id != client.id:\n # Unauthorized delete\n return {'msg': gettext('cad_model_unauthorized_to_update')}, 403\n # Validate form entries\n spec_schema = CADSpecificationSchema(only=cad_spec_keys, partial=True)\n update_specs = spec_schema.load(request.form)\n # Set up a counter to check property update counts\n count = 0\n if cad_model_name != update_specs['cad_model_name']:\n return {'msg': gettext('cad_model_name_mismatch')}, 400\n\n if update_specs == {}:\n return {'msg': gettext('cad_model_update_info_empty')}\n # Defensive: Ensure a CAD model name is always submitted\n if update_specs['cad_model_name'] == '':\n return {'msg': gettext('cad_model_name_cannot_be_empty')}\n\n client_folder = f'client_{jwt_id}'\n current_cad = CADModel.find_cad_model_by_name(cad_model_name)\n\n if current_cad is None:\n return {'msg': gettext('cad_model_does_not_exist')}, 400\n\n object_key = f'{client_folder}/{update_specs[\"cad_model_name\"]}'\n # Get a presigned POST url\n ps_data = create_presigned_post_url(\n s3_client, bucket_name, object_key)\n url, fields = ps_data['url'], ps_data['fields']\n\n # Update cad_model_key\n if 'cad_model_length' in update_specs:\n current_cad.cad_model_length = update_specs['cad_model_length']\n count += 1\n\n if 'cad_model_height' in update_specs:\n current_cad.cad_model_height = update_specs['cad_model_height']\n count += 1\n\n if 'cad_model_width' in update_specs:\n current_cad.cad_model_width = update_specs['cad_model_width']\n count += 1\n\n if 'cad_model_material' in update_specs:\n current_cad.cad_model_material = update_specs['cad_model_material']\n count += 1\n\n if 'cad_model_visibility' in update_specs:\n current_cad.cad_model_visibility = update_specs['cad_model_visibility']\n count += 1\n\n if 'cad_model_mesh_percent' in update_specs:\n current_cad.cad_model_mesh_percent = update_specs['cad_model_mesh_percent']\n count += 1\n\n if count > 0:\n current_cad.save_cad_model_to_db()\n return {\n 'url': url,\n 'fields': fields\n }, 200\n # return {'msg': gettext('cad_model_update_info_empty')}, 400", "def upsert_client_rate():\n print(request)\n new_client_dict = request.json\n new_client_dict_keys = new_client_dict.keys()\n new_client_dict_values = new_client_dict.values()\n # We want to update if the client exist in the client_rate.json data\n for i in range(1, len(new_client_dict)+1):\n if new_client_dict_keys[i] is None or new_client_dict_values[i] is None:\n continue\n else:\n update_client_rates(new_client_dict_keys[i], new_client_dict_values[i])\n # Or insert a new client-rate pair into client_rate.json data\n # After getting post request - how to update json file?\n return request.get_json()", "def put(self):\n client_data = self.data\n comment_id = client_data['comment_id']\n\n try:\n comment = self.event_comment_table.get_item(CommentID=comment_id)\n except:\n self.write_json_with_status(400,{\n 'result' : 'fail',\n 'reason' : 'invalid comment id'\n })\n\n if self.current_userid != comment[\"CreatorID\"]:\n self.write_json_with_status(403,{\n 'result' : 'fail',\n 'reason' : 'Anthantication failed'\n })\n\n comment['Coentent'] = client_data['data']\n comment['Timestamp'] = str(time.time())\n comment.partial_save();\n\n self.write_json({\n 'comment_id' : comment_id,\n 'Timestamp' : comment['Timestamp']\n })", "def test_update_pacient(self):\n url = '/api/v1/pacientes/{}/'.format(self.app_client.id)\n\n data = {\n \"name\": \"Ernesto\"\n }\n\n request = self.client.patch(url, data)\n self.assertEqual(request.status_code, status.HTTP_200_OK)", "def update(self, an_id: id = None, where_key: str = None, name: str = None, data=None, notes: str = None,\n modified_by: str = None, created_by: str = None, my_conn: Optional[dict] = None,\n t_log: Optional[TimeLogger] = None, verbose: bool = None):\n\n if my_conn is None:\n my_conn = self.my_conn\n else:\n self.my_conn = my_conn\n\n if verbose is True and t_log is None:\n t_log = TimeLogger()\n\n my_conn = my_connect(my_conn=my_conn, t_log=t_log, verbose=verbose)\n conn = my_conn['conn']\n db_params = my_conn['db_params']\n\n if where_key is None:\n where_key = self.id_name()\n\n if an_id is None:\n warn(\"No Record ID Specified\", NoRecordIDError)\n else:\n if data is None:\n data = {}\n\n data.update(add_field('name', name))\n data.update(add_field('notes', notes))\n data.update(add_field('created_by', created_by))\n\n # If there is no data, then skip. Of course one could still change modified by:\n if len(data) > 0 or modified_by is not None:\n\n # Always require a modified by and because one can change data without specifying a modifer,\n # this is necessary. We don't check it before the previous if, because we don't want to create\n # a modified_by if not data was set and no modified_by was set.\n if modified_by is None:\n modified_by = db_params['user']\n\n data.update(modified_by=modified_by)\n\n fields = data.keys()\n\n sql = \"UPDATE {table} SET {fields} WHERE {pkey} = {a_value}\"\n\n if verbose:\n print('Data:\\n', data)\n print('\\nFields:\\n', fields)\n\n query = SQL(sql).format(\n table=Identifier(self.table_name),\n fields=SQL(', ').join(\n Composed([Identifier(k), SQL(' = '), Placeholder(k)]) for k in fields\n ),\n pkey=Identifier(where_key),\n a_value=Placeholder('where_key')\n )\n\n data.update(where_key=an_id)\n\n cur = conn.cursor(cursor_factory=NamedTupleCursor)\n\n if verbose:\n print(query.as_string(conn))\n print(cur.mogrify(query, data))\n\n try:\n cur.execute(query, data)\n except OperationalError as error:\n print(error)\n\n conn.commit()\n\n cur.close()\n\n self.pull_data()", "def update(self):\n self._client.patch(self)", "def update_client(self, old_client=None, new_client=None):\n old_is_client = type(old_client) is Client\n new_is_client = type(new_client) is Client\n\n # cancel if these are no clients\n if not old_is_client and not new_is_client:\n return False\n\n # try to change the id (and its files) first\n old_index = self.get_client_index(old_client)\n id_available = self.set_client_id(\n client=old_client,\n client_id=new_client.client_id\n )\n\n # only go on, if the ID is possible\n if id_available:\n self.client_list[old_index] = new_client\n return True\n else:\n return False", "def ClientUserInfoChanged(self, clientnum):\n cl = Client(clientnum)\n \n current_name = cl[\"name\"]\n new_name = current_name[::-1] #reverse the string\n cl[\"name\"] = new_name #update userinfo (effective in game)\n #short version : cl[\"name\"] = cl_[\"name\"][::-1]", "def fusion_api_update_client_certificate(self, aliasname, body, api=None, headers=None):\n return self.client_certificate.put(aliasname, body, api, headers)", "def update(self, data):\n pass", "def update(self, data):\n pass", "def update(self, data):\n pass", "def update(self, data):\n pass", "def update_data():\n pass", "def update(self, identifier, data):\n self.client.request_with_method(Methods.UPDATE % (self.name, identifier,),\n data=data)", "def update(self, path, **kwargs):\n client = self.connect(VAULT_TOKEN)\n\n existing = client.read(path)\n if existing is None:\n existing = {}\n else:\n existing = existing[\"data\"]\n\n existing.update(kwargs)\n\n client.write(path, **existing)", "def supplement_secondary_client_data(self, app_idx):\n data = {\n 'name': app_idx.pretty_name(),\n 'version': app_idx.version(),\n 'type': 'generic',\n }\n self.client.secondary_client.update(data)\n try:\n self.all_details['client']['secondary_client'].update(data)\n except KeyError:\n self.all_details['client']['secondary_client'] = data", "def refresh(dataset, client):\n pass", "def alterar_cliente(self, ID, nome, sobrenome, tel_list, email_list, empresa):\r\n if nome != '':\r\n print(f'Alterando nome para {nome}')\r\n self.clientes[ID].nome = nome.title()\r\n elif sobrenome != '':\r\n print(f'Alterando sobrenome para {sobrenome}')\r\n self.clientes[ID].sobrenome = sobrenome.title()\r\n elif len(tel_list) > 0:\r\n print(f'Alterando telefones para {tel_list}')\r\n self.clientes[ID].tel_list = tel_list\r\n elif len(email_list) > 0:\r\n print(f'Alterando email para {email_list}')\r\n self.clientes[ID].email_list = email_list\r\n elif empresa != '':\r\n print(f'Alterando empresa para {empresa}')\r\n self.clientes[ID].empresa = empresa.title()", "def updateNote(self, authenticationToken, note):\r\n pass", "def put(first_name,last_name,name,note):\n repository = NotationRepository()\n notation = repository.update(first_name = first_name, last_name = last_name, name = name, note = note)\n return jsonify({\"notation\": notation.json})", "def get_patient_data(self, client):\n for patient in self._monitored_patients.get_patient_list():\n # print(\"Requesting data for \" + patient.first_name+\" \"+patient.last_name+\"...\")\n patient.update_data(client.get_patient_data(patient.id))", "def change_client_name(self, name, client):\n if self.name_is_unique(name):\n client.set_name(name)\n self.send_message('Usuario actualizado exitosamente.', client.get_socket())\n else:\n self.send_message('Nombre repetido.', client.get_socket())", "def test_update(self):\n user = self.custodian_1_user\n user_client = self.custodian_1_client\n urls = [reverse('api:user-detail', kwargs={'pk': user.pk})]\n new_first_name = \"New First Name\"\n data = {\n \"first_name\": new_first_name,\n }\n access = {\n \"forbidden\": [self.anonymous_client, self.readonly_client, self.custodian_2_client],\n \"allowed\": [self.admin_client, user_client]\n }\n\n for client in access['forbidden']:\n for url in urls:\n self.assertIn(\n client.patch(url, data, format='json').status_code,\n [status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]\n )\n\n for client in access['allowed']:\n for url in urls:\n new_first_name += '1'\n data['first_name'] = new_first_name\n self.assertEqual(\n client.patch(url, data, format='json').status_code,\n status.HTTP_200_OK\n )\n user.refresh_from_db()\n self.assertEqual(user.first_name, new_first_name)", "def client_details(self, value):\n self._client_details = value", "def _update(self, data: Dict[str, Any], fields_to_modify: List[str]):\n pass", "async def update_note(note,tags=None):\n\n assert isinstance(note, dict), note\n\n id = note.pop('id')\n title = note.pop('title')\n body = note.pop('body')\n pid = note.pop('parent_id')\n\n # fetch tags from server. There are note returned b which are not returned by `get_note`\n if tags:\n note['tags'] = ', '.join(tags)\n else:\n tags = (await joplin.get_notes_tags(id)).json()\n note['tags'] = ', '.join([t['title'] for t in tags])\n\n \n \n # see https://github.com/foxmask/joplin-api/blob/master/joplin_api/core.py\n res = await joplin.update_note(id,title, body, pid, **note)\n assert res.status_code == 200, res", "def update_note(self, new_note):\r\n self.__note = new_note", "def update(self, customerguid, name=\"\", login=\"\", password=\"\", email=\"\", address=\"\", vat=\"\", jobguid=\"\", executionparams=None):", "def client_patch(self, path, data=None, content_type=client.MULTIPART_CONTENT, follow=False, **extra):\r\n\r\n data = data or {}\r\n response = super(client.Client, self).patch(path, data=data, content_type=content_type, **extra)\r\n if follow:\r\n response = self._handle_redirects(response, **extra)\r\n return response", "def update(self, *args, **kwargs):\n pass", "def update(self, *args, **kwargs):\n pass", "def update(self, *args, **kwargs):\n pass", "def noteUpdate(ownerId, noteId, text):\n query = QUERY_UPDATE_NOTE\n query = query.format(**{'owner_id':ownerId, 'note_id':noteId, 'text':text})\n\n try:\n cursor.execute(query)\n connection.commit()\n except Exception as e:\n return False, ERROR_UPDATE_NOTE, 'Note update failed'\n\n return True, NO_ERROR, 'Updated successfuly!'", "def update(self, data):\n if self.service is not None:\n self.service.update_response(data)", "def edit_notes(entry):\n entry.notes = get_notes()\n entry.save()\n input(\"Edit successful. \")\n return entry", "def update(self, line_id: int, data=None, **options) -> Dict:\n data = data or {}\n return self._call(f\"{line_id}\", data=data, method=\"PUT\", **options)", "def test_update(self):\n payload = {\n 'name': 'Pecho inclinado',\n 'description': \"New description\",\n 'muscle_group': \"pecho\"\n }\n response = self.client.put(\n '/exercises/{}/'.format(self.exer1.id), data=payload)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(\n Exercise.objects.get(id=self.exer1.id).name, payload['name'])", "async def update_one(self, where, data):\n\n pass", "def test_customer_update(self):\n # first performe create\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then performe update\n data = { \n \"name\": \"Changed the name\",\n \"email\": self.customer_data[\"email\"],\n \"phone\": self.customer_data[\"phone\"]\n }\n self._update_model(\"customer\", id, data, [\"name\"])\n self.assertIsNotNone(id)", "def edit(self, hardware_id, userdata=None, hostname=None, domain=None,\r\n notes=None):\r\n\r\n obj = {}\r\n if userdata:\r\n self.hardware.setUserMetadata([userdata], id=hardware_id)\r\n\r\n if hostname:\r\n obj['hostname'] = hostname\r\n\r\n if domain:\r\n obj['domain'] = domain\r\n\r\n if notes:\r\n obj['notes'] = notes\r\n\r\n if not obj:\r\n return True\r\n\r\n return self.hardware.editObject(obj, id=hardware_id)", "def set_client_id(self, client=None, client_id=None):\n # cancel if its not a client object\n is_client = type(client) is Client\n\n if not is_client:\n return False\n\n # check id\n id_exists = client_id in [c.client_id for c in self.client_list]\n id_is_empty = client_id == ''\n id_is_own = client.client_id == client_id\n\n # cancel with true, if there's no need to change the ID\n if id_is_own:\n return True\n\n # cancel if it's no client or the client_id does already exist or is empty\n if id_exists or id_is_empty:\n return False\n\n # change every client_id of the projects of the original client\n for p in self.get_client_projects(client=client):\n # get old and new project\n old_p = p.copy()\n new_p = p.copy()\n new_p.client_id = client_id\n\n # set new projects client_id\n self.set_project_id(\n old_project=old_p,\n new_project=new_p\n )\n\n # rename the file\n self.rename_client_file(\n old_client_id=client.client_id,\n new_client_id=client_id\n )\n\n # get index\n index = self.get_client_index(client)\n\n # change the client_id of the original client to the new id\n self.client_list[index].client_id = client_id\n\n # get new client and save it\n self.save_client_to_file(client=self.client_list[index])\n\n return True", "def test_update_note(self):\n pass", "def update(self, attrs):\n if attrs.get('name'):\n self.name = string.capwords(attrs.get('name'))\n if attrs.get('description'):\n self.description = attrs.get('description')\n if attrs.get('author'):\n self.author = attrs.get('author')\n\n try:\n db.session.add(self)\n db.session.commit()\n except IntegrityError as err:\n if isinstance(err.orig, UniqueViolation):\n raise Conflict(\"Name already used by another exercise.\")\n raise UnexpectedError(DATABASE_ERROR_MSG)\n except DBAPIError as err:\n raise UnexpectedError(DATABASE_ERROR_MSG)", "def note_update(request, pk):\n try:\n note = Note.objects.get(id=pk)\n serializer = NoteSerializer(instance=note, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n except Exception:\n return Response(\"Something terrible went wrong. Can't update this note.\")", "def update(self, params):", "def do_update_data(self, *args):\n print(\"Provide data to update :\")\n id_field = dict()\n id_field['id'] = input(\"Provide id to update :\")\n values = {**id_field, **self.__class__.populate_data()}\n self.connection_obj.update_into_table(**values)\n print(\"Data Update Successful\")", "def update():\n return 'update api in put'", "def test_update(self):\n doctor = DoctorFactory.create(id=21)\n data = {'name': 'Joe'}\n self.assertNotEqual(doctor.name, data['name'])\n\n response = self.unath_client.put(reverse('doctor-detail', args=[21]), data=data)\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n response = self.client.put(reverse('doctor-detail', args=[21]), data=data)\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)", "def test_update_office(self):\n url = '/api/v1/consultorios/{}/'.format(self.app_client.id)\n\n data = {\n \"hospital\": \"Hospital 2\"\n }\n\n request = self.client.patch(url, data)\n self.assertEqual(request.status_code, status.HTTP_200_OK)", "def test_update_customer(self):\n # create a customer to update \n test_customer = self._create_customers(\"Alex\")\n resp = self.app.post(\n \"/customers\", json=test_customer.serialize(), content_type=\"application/json\"\n )\n self.assertEqual(resp.status_code, status.HTTP_201_CREATED)\n \n # update the customer\n new_customer = resp.get_json()\n logging.debug(new_customer)\n new_customer[\"address\"] = \"unknown\"\n resp = self.app.put(\n \"/customers/{}\".format(new_customer[\"id\"]),\n json=new_customer,\n content_type=\"application/json\",\n )\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n updated_customer = resp.get_json()\n self.assertEqual(updated_customer[\"address\"], \"unknown\")", "def update(self, data):\n self.data.update(data)", "def change_client_info(request: Request) -> Dict:\n ser = ChangeClientInfoSerializer(data=request.data)\n if ser.is_valid():\n if ser.validated_data.get('email') and request.user.client.email != ser.validated_data['email']:\n request.user.client.activated = False\n new_email = UserEmail(template_id=1, user=request.user)\n new_email.generate_code()\n is_send = send_email_to_user(1, [request.user.client.email], f'https://royal-lion.bet/activate/{new_email.code}')\n if is_send:\n new_email.save()\n request.user.client.save()\n ser.update(request.user.client, validated_data=ser.validated_data)\n return {'data': 'ok', 'success': True}\n else:\n return {'errors': ser.errors, 'success': False}", "def update_dataset(self, dataset, name=None, description=None):\n uri = URITemplate(self.baseuri + '/{owner}/{id}').expand(\n owner=self.username, id=dataset)\n return self.session.patch(uri, json=self._attribs(name, description))", "def note_update(self, upd_note_handle_list):\n for handle in upd_note_handle_list :\n if handle in self.data:\n self.rebuild()\n break", "def update(self, *args, **kw):\n pass", "def updateOne(id):\n print(inspect.stack()[1][3])\n # read data from the API call\n req_data = request.get_json()\n\n query = select([Followup]).where(Followup.columns.id == id)\n ResultProxy = connection.execute(query)\n ResultSet = ResultProxy.fetchone()\n if(not ResultSet):\n return {'error': 'Unable to Find the given client'}\n\n # Update the URL\n json_data = {}\n\n for req in req_data:\n if (req in Followup.c.keys()):\n json_data[req] = req_data[req]\n\n query = (\n update(Followup).\n where(Followup.columns.id == id).\n values(json_data)\n )\n ResultProxy = connection.execute(query)\n if(not ResultProxy):\n return {'error': 'Unable to Update the given client'}\n return {'status': \"Update Succesful\"}", "def test_update_note(self):\n\n url = reverse(\n 'crm-admin:note-update',\n kwargs={\n 'pk': self.object.id\n }\n )\n\n # Test that the page load first\n response = self.c.get(url)\n self.assertEqual(response.status_code, 200)\n\n # Send data\n data = {\n 'comment': 'other value'\n }\n response = self.c.post(url, data)\n self.assertEqual(response.status_code, 302)\n\n # Get the latest added object\n obj = Note.objects.get(id=self.object.id)\n self.assertEqual(obj.comment, 'other value')", "def test_client_partial_update(self):\n pass", "def test_if_user_can_update_data_added(self):\n drink_data = self.test_data[\"drinks\"][0]\n # save a drink\n drink = Drink(**drink_data)\n drink.save()\n\n record_data = self.test_data[\"data\"][0]\n data = Data(\n favorite_drink=drink,\n consumer_name=record_data[\"consumer_name\"],\n location=record_data[\"location\"],\n collector=self.user,\n location_longitude=record_data[\"location_longitude\"],\n location_latitude=record_data[\"location_latitude\"]\n )\n # save a data record\n data.save()\n\n # retrieve the added data record\n url = \"/data/record/%s/\" % data._id\n get_response = self.client.get(url)\n\n self.assertEqual(get_response.status_code,\n status.HTTP_200_OK)\n recieved_data = get_response.json()\n self.assertEqual(recieved_data[\"consumer_name\"],\n \"dirk nowitzki\")\n\n # update the data record\n update_payload = {\n \"drink_id\": str(drink._id),\n \"consumer_name\": \"erick omondi\",\n \"location\": \"buruburu\",\n \"location_longitude\": \"55.255\",\n \"location_latitude\": \"74.2245\"\n }\n\n put_response = self.client.put(url, update_payload, format=\"json\")\n self.assertEqual(put_response.status_code,\n status.HTTP_200_OK)\n\n # retrieve the updated record\n updated_data = Data.objects.all()[0]\n # assert it has been updated\n self.assertNotEqual(updated_data.consumer_name,\n recieved_data[\"consumer_name\"])\n\n # delete the record\n delete_response = self.client.delete(url)\n # assert the status code is 204 no content\n self.assertEqual(delete_response.status_code,\n status.HTTP_204_NO_CONTENT)\n # assert the record was actually deleted from the database\n data_count = Data.objects.count()\n self.assertEqual(data_count, 0)", "def update_rec(self):\n print(\"Write phone number:\")\n update_phone_number_input = input()\n print(\"Write new name of the record:\")\n update_name_input = input()\n print(\"Write new address:\")\n update_address_input = input()\n return self.storage.update(\n update_phone_number_input, update_name_input, update_address_input\n )", "def edit_current_note():\n note_id = request.form.get(\"note_id\")\n\n edited_note = Note.query.get(note_id)\n\n edited_note.title_note = request.form.get(\"title\")\n edited_note.note = request.form.get(\"note\")\n\n\n db.session.commit()\n \n return \"note edited\"", "def test_update(client):\n rv = update(client, 'Michael')\n assert json.loads(rv.data.decode())['code'] == 0\n assert json.loads(rv.data.decode())['owner'] == 'Michael'", "def edit(self, instance_id, userdata=None, hostname=None, domain=None,\r\n notes=None):\r\n\r\n obj = {}\r\n if userdata:\r\n self.guest.setUserMetadata([userdata], id=instance_id)\r\n\r\n if hostname:\r\n obj['hostname'] = hostname\r\n\r\n if domain:\r\n obj['domain'] = domain\r\n\r\n if notes:\r\n obj['notes'] = notes\r\n\r\n if not obj:\r\n return True\r\n\r\n return self.guest.editObject(obj, id=instance_id)", "def __set_client_detail(self):\r\n ClientDetail = self.client.factory.create('ClientDetail')\r\n ClientDetail.AccountNumber = self.config_obj.account_number\r\n ClientDetail.MeterNumber = self.config_obj.meter_number\r\n ClientDetail.IntegratorId = self.config_obj.integrator_id\r\n if hasattr(ClientDetail, 'Region'):\r\n ClientDetail.Region = self.config_obj.express_region_code\r\n self.ClientDetail = ClientDetail", "def update_service_data(self, data, etag):\r\n self.service.name = data[\"service\"][\"name\"]\r\n self.service.etag = etag\r\n self.service.set_mirrors(data[\"service\"][\"filelocations\"])\r\n self.service.torrent = data[\"service\"].get(\"torrents\", \"\")\r\n self.service.save()", "def set_client_id(self):\n data = self.receive() # deserialized data\n client_id = data['clientid'] # extracts client id from data\n self.client_id = client_id # sets the client id to this client\n print(\"Successfully connected to server: \" + self.userInfo['host'] + \" / \" + str(self.userInfo['port']))\n print(\"Your client info is:\\n\" + \"Client Name: \" + self.userInfo['name'] + \"\\nClient ID: \" + str(client_id))", "def update_client_rates(client_id: str, rate: float):\n\n import pandas as pd\n df = pd.read_json(\"client_rate.json\")\n df_dict = df.to_dict()\n if client_id in df_dict:\n df_dict[str(client_id)]['rate'] = rate\n else:\n df_dict[str(client_id)]['rate'] = rate\n df_dict.to_json(\"client_rate.json\")\n\n\n\n # check if exist\n # replace or add client rate\n # re-write the file", "def write(self, client):\n client.write(self.path, **self.obj())", "def updateCustomer(self, **params):\n self.__requireParams(params, ['id'])\n return self.__req('update_customer', params)", "def update(self, name=None, password=None, host=None):\n return self.manager.update(self, name=name, password=password,\n host=host)", "def update_servicech(self, conf, phone_num, body):\n\t\tpass", "def update_server_details(self, server_id, name, user_id, external_id=None, description=None):\n\n data = {\n 'name': name,\n 'user': user_id,\n 'external_id': external_id,\n 'description': description\n }\n\n response = self._api_request(\n endpoint='application/servers/{}/details'.format(server_id),\n mode='PATCH', data=data, json=False)\n return response", "def DoUpdate(options, args):\n client = GClient.LoadCurrentConfig(options)\n\n if not client:\n raise gclient_utils.Error(\"client not configured; see 'gclient config'\")\n\n if not options.head:\n solutions = client.GetVar('solutions')\n if solutions:\n for s in solutions:\n if s.get('safesync_url', ''):\n # rip through revisions and make sure we're not over-riding\n # something that was explicitly passed\n has_key = False\n for r in options.revisions:\n if r.split('@')[0] == s['name']:\n has_key = True\n break\n\n if not has_key:\n handle = urllib.urlopen(s['safesync_url'])\n rev = handle.read().strip()\n handle.close()\n if len(rev):\n options.revisions.append(s['name']+'@'+rev)\n\n if options.verbose:\n # Print out the .gclient file. This is longer than if we just printed the\n # client dict, but more legible, and it might contain helpful comments.\n print(client.ConfigContent())\n return client.RunOnDeps('update', args)", "def _put(self, data, comment_id, obj):\n comment = obj\n comment_id = int(comment_id)\n\n # Ensure that user and customer have not been changed (they can only be written once)\n if data['user_id'] != comment['user_id']:\n flask_restful.abort(400, message=f\"Bad Request - cannot change user ID in \"\n f\"comment '{comment_id}'\")\n if data['ticket_id'] != comment['ticket_id']:\n flask_restful.abort(400, message=f\"Bad Request - cannot change ticket ID in \"\n f\"comment '{comment_id}'\")\n\n # Remove keys that are not in the new resource\n keys_to_remove = [stored_key for stored_key in comment.keys()\n if stored_key not in data]\n for old_key in keys_to_remove:\n DB_COMMENT_TABLE.update(delete(old_key), doc_ids=[comment_id])\n DB_COMMENT_TABLE.update(data, doc_ids=[comment_id])\n return Comment.get_self_url(comment_id=comment_id)", "def update_note(id):\n\n\n note = Note.query.get_or_404(id)\n form = NoteForm(obj=note)\n\n if form.validate_on_submit():\n\n note.title = form.title.data\n note.description = form.description.data\n note.updated_on = datetime.utcnow()\n\n try:\n db.session.add(note)\n db.session.commit()\n flash('You have successfully added a new note.', 'info')\n # redirect to the client's page\n return redirect(url_for('inquiry.read_inquiry', id=id))\n except:\n flash('Error creating the note', 'error')\n\n # load note form template\n return render_template('note/form.html.j2', form=form, title='Update Note')", "def _update_from_rest_data(self) -> None:", "def remote_Update(self, data):\r\n\t\t# server doesn't need to know if this fails\r\n\t\t# the server should be infallable, so the problem is in the client\r\n\t\ttry:\r\n\t\t\treturn self.onUpdate(data)\r\n\t\texcept Exception, e:\t# the client might be out of date\r\n\t\t\tlog.err(\"Unable to handle data: %s\" % data)\r\n\t\t\tlog.err(e)", "def writeDataToClient(self, client, name):\n\n index = len(self.messagesList)\n\n while name in self.connected:\n if len(self.messagesList) > index:\n if self.messagesList[index][1] != name:\n client.send(self.messagesList[index][0])\n index += 1\n else:\n index += 1", "def update(self,request,pk = None):\n return Response({'http_method':'PUT'})", "def svn_client_ctx_t_client_name_set(svn_client_ctx_t_self, char_client_name): # real signature unknown; restored from __doc__\n pass", "def put_note(note):\n if 'id' not in note:\n note['id'] = new_note_id()\n if 'ctime' not in note:\n note['ctime'] = util.utcnow_as_str()\n db.getdb().note.update({'id': note['id']}, note, upsert=True)\n return note", "def update(self, request, pk=None):\n lot = Lot.objects.get(pk=request.data[\"lotId\"])\n\n project = Project.objects.get(pk=pk)\n project.name = request.data[\"name\"]\n project.estimatedCost = request.data[\"estimatedCost\"]\n project.estimatedCompletionDate = request.data[\"estimatedCompletionDate\"]\n #project.projectNote = Note.objects.get(pk=request.data['projectNote'])\n\n project.lotId = lot\n project.save()\n\n return Response({}, status=status.HTTP_204_NO_CONTENT)", "def edit_customer(customer_id, password, name, email, phone):\n with MY_CONNECTION as connection:\n connection.execute(\n \"\"\"\n UPDATE Customers\n SET password=?, customer_name=?, phone=?, email=?\n WHERE id_customer=?\n \"\"\",\n (password, name, phone, email, customer_id))", "def add_client(self, client):\n\n now = int(time.time())\n\n self.send_line(\"%s N %s 1 %d %s %s +ik ]]]]]] %s :%s\" %\\\n (self.config[\"numeric\"], client.nick, now, client.user,\n client.host, client.uid, client.gecos))", "def update(self):\n return self._api.update_customer(**to_dict(self))" ]
[ "0.67937523", "0.65493184", "0.6435495", "0.62011623", "0.6047726", "0.5984437", "0.5956502", "0.5945212", "0.5844981", "0.57898957", "0.5773103", "0.57370985", "0.5677678", "0.55478555", "0.5545997", "0.55407304", "0.55150896", "0.55113846", "0.5479551", "0.5476245", "0.5462785", "0.54450554", "0.54415727", "0.5432323", "0.5432323", "0.5432323", "0.5432323", "0.5420038", "0.54002213", "0.539261", "0.5354657", "0.5353795", "0.5346333", "0.5326683", "0.5295966", "0.52717805", "0.5257962", "0.52457875", "0.5245683", "0.5220536", "0.5213527", "0.5206474", "0.5196613", "0.51891536", "0.5153152", "0.5153152", "0.5153152", "0.5140359", "0.51376665", "0.51308876", "0.5130876", "0.5123262", "0.5122812", "0.5121818", "0.51110536", "0.51094264", "0.5102752", "0.51016283", "0.5085251", "0.5083851", "0.50824106", "0.5072267", "0.5070393", "0.5068138", "0.5067343", "0.5061855", "0.50607044", "0.50605035", "0.5055957", "0.5054163", "0.5050236", "0.50499797", "0.5036838", "0.50235325", "0.50143445", "0.4999148", "0.4997682", "0.49799415", "0.49564505", "0.49435657", "0.49411455", "0.49410328", "0.4939243", "0.49388465", "0.49335712", "0.4932001", "0.49314892", "0.49293995", "0.49194646", "0.49129543", "0.4912756", "0.4910509", "0.49018848", "0.4897578", "0.48932183", "0.48882246", "0.4884504", "0.48837495", "0.48815104", "0.48697475" ]
0.7835998
0
Delete the specified client
def deleteClient(self, id): response = self.postRequest(Endpoints.CLIENTS + '/{0}'.format(id), method='DELETE') return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete(self, client):\n log(\"Deleting %s\" % self, self.opt)\n client.delete(self.path)", "def delete_client(self, client):\n for c in self.clients:\n if client == c:\n self.clients.remove(c)", "def delete(self, **kwargs):\n self.dbdel('client', kwargs)", "def delete_client(\n client_id: str, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs\n):\n request = DeleteClient.create(\n client_id=client_id,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)", "def delete_client():\n preserve_cache = request.args.get('preserve_cache', False)\n delete_client(g.client_id, preserve_cache)\n return jsonify({'Success': True})", "def delete_client(client_name):\n global clients\n\n if client_name in clients:\n clients.remove(client_name)\n else:\n print(\"Client isn\\'t in the client list\")", "def remove_client(self, client):\n self.clients.remove(client)\n #print(\"removing:\" + str(client))", "def test_delete_client(self):\n pass", "async def delete_client_async(\n client_id: str, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs\n):\n request = DeleteClient.create(\n client_id=client_id,\n )\n return await run_request_async(\n request, additional_headers=x_additional_headers, **kwargs\n )", "def delete_client_instance(cls, session):\n if cls.SESSION_ID_KEY in session:\n client_instance_id = session[cls.SESSION_ID_KEY]\n \n if client_instance_id in cls.client_instances:\n del cls.client_instances[client_instance_id]", "def delete_client(client_id, preserve_cache):\n if client_id in drivers:\n drivers.pop(client_id).quit()\n try:\n timers[client_id].stop()\n timers[client_id] = None\n release_semaphore(client_id)\n semaphores[client_id] = None\n except:\n pass\n\n if not preserve_cache:\n pth = CHROME_CACHE_PATH + g.client_id\n shutil.rmtree(pth)", "def test_05_delete_client(self):\n try:\n for k, v in self.test_data.items():\n client = Client()\n test_str = v.split(',')\n client.user_id = test_str[0]\n client.host_name = test_str[1]\n client.user_name = test_str[2]\n ClientsUnitTest._client_dao.add(client)\n self.assertTrue(\n ClientsUnitTest._client_dao.get_client(client.user_id))\n\n for k, v in self.test_data.items():\n client = Client()\n test_str = v.split(',')\n client.user_id = test_str[0]\n self.assertTrue(ClientsUnitTest._client_dao.delete(client))\n\n for k, v in self.test_data.items():\n client = Client()\n test_str = v.split(',')\n client.user_id = test_str[0]\n self.assertTrue(ClientsUnitTest._client_dao.delete(client))\n\n except ClientAlreadyExistsException as error:\n print(error.get_message())\n\n except ClientNotFoundException as error:\n print(error.get_message())\n\n except DBException as error:\n print(error.get_message())", "def delete(self, key, version=None, client=None):\r\n if client is None:\r\n client = self.get_client(write=True)\r\n\r\n try:\r\n return client.delete(self.make_key(key, version=version))\r\n except ConnectionError:\r\n raise ConnectionInterrupted(connection=client)", "def delete(self, id: int):\n\n del self.__clients[id]", "def remove_client(self, client):\n client_conn = self.all_clients[client]\n\n self.all_clients.pop(client)\n self.all_connections.remove(client_conn)\n\n # client_conn.shutdown(2)\n client_conn.close()", "def delete_generated_client() -> None:\n shutil.rmtree(CLIENT_DIR, ignore_errors=True)", "def remove_client(self, username):\n try:\n self.clients.pop(username)\n except KeyError:\n print 'Client not in server.clients{}'", "def kill_client(self, client):\n self.delete_client(client)\n for room in self.rooms:\n room.delete_client_from_invited(client)\n room.delete_client_from_members(client)", "def delete(self, id_cliente):\n cliente = delete_cliente(id_cliente)\n if not cliente:\n api.abort(404)\n else:\n return cliente", "def test_client_delete(self, mock_input, mock_pass):\n # Patch username and password.\n mock_input.return_value = \"user\"\n mock_pass.return_value = \"pass\"\n\n # Instantiate Agave object making reference to local mock server.\n local_uri = \"http://localhost:{port}/\".format(port=self.mock_server_port)\n ag = Agave(api_server=local_uri)\n ag.client_name = \"client-name\"\n ag.api_key = \"some api key\"\n ag.api_secret = \"some secret\"\n\n # Create client.\n ag.clients_delete()\n\n assert ag.api_key == \"\"\n assert ag.api_secret == \"\"", "async def delete_client_permission_async(\n action: int,\n client_id: str,\n resource: str,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n request = DeleteClientPermission.create(\n action=action,\n client_id=client_id,\n resource=resource,\n )\n return await run_request_async(\n request, additional_headers=x_additional_headers, **kwargs\n )", "def delete_connection_entry(self,client_id):\n del self.connections[client_id]", "def close_client(client):\r\n client.close()\r\n broadcast(bytes(\"%s ha abbandonato\" % clients[client], \"utf8\"))\r\n del clients[client]\r\n del roles[client]\r\n del score[client]\r\n del address[client]", "def delete(self, klient):\n try:\n c = self.conn.cursor()\n # usuń pozycje\n c.execute('DELETE FROM Lokaty WHERE klient_id=?', (klient.id,))\n # usuń nagłowek\n c.execute('DELETE FROM Klient WHERE id=?', (klient.id,))\n\n except Exception as e:\n #print \"klient delete error:\", e\n raise RepositoryException('error deleting klient %s' % str(klient))", "def delete_actual_plan_client(client_id):\n node_client = Params.PREFIX['client'] + str(client_id)\n res = db.child(\"chosenPlans\").child(node_client).remove()\n return res", "def delClient (self, index, remove_password=True) :\r\n old_password, old_connection = self.clients[index]\r\n \r\n if not old_connection :\r\n raise Exception(\"Slot wasn't occupied\")\r\n \r\n if remove_password :\r\n old_password = None\r\n \r\n self.clients[index] = old_password, None\r\n \r\n self._updateHeartbeat()", "def delete_client_method(self, client, schema_url, client_name):\n self.info(\"check delete method on {} client\".format(client_name))\n self.info(\"check the existence of the client in BCDB, it should be exist\")\n model = j.data.bcdb.system.model_get(url=schema_url)\n if model.get_by_name(name=client_name):\n self.info(\"try to delete the client using delete method and check again, it shouldn't be exist\")\n client.delete()\n self.info(\"check the existence of the client in BCDB\")\n try:\n model.get_by_name(name=client_name)\n except Exception:\n pass\n return True\n else:\n return False", "def delete(self):\n self._client.delete(self)", "def delete_client_permission(\n action: int,\n client_id: str,\n resource: str,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n request = DeleteClientPermission.create(\n action=action,\n client_id=client_id,\n resource=resource,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)", "def __removeClient(self):\n client = self.sender()\n if (client in self.__clients):\n self.__clients.remove(client)\n \n print \"disconnect from\", self.__clientName(client)", "def delete_client(self, id):\n client = self.dbsession.query(Client).filter_by(id=id).first()\n if client is None:\n return False\n for experimentgroup in client.experimentgroups:\n experimentgroup.clients.remove(client)\n self.dbsession.delete(client)\n return [] == self.dbsession.query(Client).filter_by(id=id).all()", "def delete_client_file(self, client=None):\n if type(client) is not Client:\n return False\n\n path = self.data_path + self.client_dir\n\n # generate filenames\n filename = path + '/' + self.us(client.client_id) + '.flclient'\n\n # check if the file exists and delete it\n if os.path.isfile(filename):\n os.remove(filename)\n return True\n else:\n return False", "def test_delete_o_auth_client(self):\n pass", "def DeleteOIDCClient(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def delete(client, indices):\n return delete_indices(client, indices)", "def __del__(self) -> None:\n if self.close_client_at_del:\n self.client.close()", "async def delete_client_by_namespace_async(\n client_id: str,\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = DeleteClientByNamespace.create(\n client_id=client_id,\n namespace=namespace,\n )\n return await run_request_async(\n request, additional_headers=x_additional_headers, **kwargs\n )", "def remove_client(self, ip):\n cli = self.has_client(ip)\n if cli is not None:\n self.clients.remove(cli)", "def cmd_comment_delete(client, args):\n delete_comment = client.delete_comment(args.comment_id)\n generate_output({'delete_comment': delete_comment})", "def delete(self, path):\n client = self.connect(VAULT_TOKEN)\n client.delete(path)", "def test_client_delete_view(self):\r\n response = self.test_client.post(\r\n reverse('client_delete', kwargs={'pk': self.client1_id}))\r\n self.assertEqual(response.status_code, 302)\r\n self.assertRaises(ObjectDoesNotExist,\r\n Client.objects.get,\r\n id=self.client1_id)", "def test_client_nationlity_delete(self):\n pass", "def cmd_conversation_delete(client, args):\n delete_conversation = client.delete_conversation(args.conversation_id)\n generate_output({'delete_conversation': delete_conversation})", "def delete():", "def delete():\n # Must be logged in to perform any delete commands.\n auth_required()\n pass", "def delete_node_categories_client():\n\n firebase = pyrebase.initialize_app(config)\n db = firebase.database()\n db.child(\"categories/clients\").remove()", "def fusion_api_delete_client_certificate(self, aliasname, api=None, headers=None):\n return self.client_certificate.delete(aliasname, api, headers)", "def delete(self, *args, **kwargs):\n self.request(\"delete\", *args, **kwargs)", "def delete(self):\n # Delete from cache first\n if self._cache:\n cache_key = 'datastore_orm.{}.{}'.format(self.__class__.__name__, self.key.id_or_name)\n self._cache.delete(cache_key)\n\n # Pass the key for deleting from other clients in background\n if len(self._clients) > 1:\n Thread(target=self.background_delete, args=(self.key, self._clients[1:])).start()\n\n # Delete the key from 1st client\n self._clients[0].delete(self.key)", "def delete(self):\n if self._cache:\n cache_key = 'datastore_orm.{}.{}'.format(self.kind, self.id_or_name)\n self._cache.delete(cache_key)\n\n if len(self._clients) > 1:\n Thread(target=self.background_delete, args=(self, self._clients[1:])).start()\n\n self._clients[0].delete(self)", "def delete_session_entry(self,session_id,client_id):\n del self.sessions[session_id][\"USERS\"][client_id]", "def svn_client_delete(svn_client_commit_info_t_commit_info_p, apr_array_header_t_paths, svn_boolean_t_force, svn_client_ctx_t_ctx, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def test_client_address_delete(self):\n pass", "def remove_user_from_db(choice):\n client_detail_list = sqlite3.connect('../db/client_list.db')\n client_db = client_detail_list.cursor()\n client_db.execute(\"DELETE FROM clients WHERE nickname=?\", (choice,))\n client_detail_list.commit()\n client_detail_list.close()", "def delete(self):\n raise NotImplementedError(\"Deleting not supported for servers\")", "def delete(clients, context):\n port_id = context['port_id']\n logger.info(\"Taking action port.delete {}\".format(port_id))\n neutron = clients.get_neutron()\n neutron.delete_port(port_id)", "def delete_client_by_namespace(\n client_id: str,\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = DeleteClientByNamespace.create(\n client_id=client_id,\n namespace=namespace,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)", "def delete(self):\n self.method = \"DELETE\"\n self.send()", "def test_client_delete_cascade(self):\n\n assert 12 == self.session.query(Invoice).count()\n assert 24 == self.session.query(Iitem).count()\n assert 48 == self.session.query(Citem).count()\n assert 12 == self.session.query(Contract).count()\n assert 4 == self.session.query(Employee).count()\n assert 2 == self.session.query(Client).count()\n assert 2 == self.session.query(ClientMemo).count()\n assert 2 == self.session.query(EmployeePayment).count()\n assert 4 == self.session.query(EmployeeMemo).count()\n assert 48 == self.session.query(ContractItemCommItem).count()\n assert 1 == self.session.query(Payroll).count()\n logger.debug('DELETECLIENT')\n delcl = self.session.query(Client)[0]\n logger.debug(delcl)\n\n with self.session.no_autoflush:\n self.session.delete(self.session.query(Client)[0])\n assert 4 == self.session.query(Employee).count()\n assert 8 == self.session.query(Contract).count()\n assert 8 == self.session.query(Invoice).count()\n assert 16 == self.session.query(Iitem).count()\n assert 32 == self.session.query(Citem).count()\n assert 1 == self.session.query(Client).count()\n assert 1 == self.session.query(ClientMemo).count()\n assert 1 == self.session.query(EmployeePayment).count()\n assert 4 == self.session.query(EmployeeMemo).count()\n assert 32 == self.session.query(ContractItemCommItem).count()\n assert 1 == self.session.query(Payroll).count()", "def test_delete_o_auth_client_authorization(self):\n pass", "def removeClient(self, msg):\r\n guiControlClientId = msg[Messages.FIELD_GUI_CONTROL]\r\n if guiControlClientId != None:\r\n self.controllingClient.clear()\r\n LOG(\"Removed the controlling client: \" + repr(guiControlClientId) + \" - \" +\r\n repr(msg[Messages.FIELD_GUI_CONTROL_HOST]))\r\n guiMonitoringClientId = msg[Messages.FIELD_GUI_LIST]\r\n if self.monitoringClients.has_key(guiMonitoringClientId):\r\n del self.monitoringClients[guiMonitoringClientId]\r\n LOG(\"Removed the monitoring client: \" + repr(guiMonitoringClientId) + \" - \" +\r\n repr(msg[Messages.FIELD_GUI_HOST_LIST]))", "def on_delete(self, **kwargs):\n self.get_client()\n self.delete(self, kwargs['sender'], kwargs['instance'])", "def test_client_document_delete(self):\n pass", "def delete(self, _id):", "def delete(self, *args, **kw):\n kw['method'] = 'DELETE'\n return self.open(*args, **kw)", "async def admin_delete_client_permission_v3_async(\n action: int,\n client_id: str,\n resource: str,\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = AdminDeleteClientPermissionV3.create(\n action=action,\n client_id=client_id,\n resource=resource,\n namespace=namespace,\n )\n return await run_request_async(\n request, additional_headers=x_additional_headers, **kwargs\n )", "def send(self, client, data):\n try:\n client.send(data)\n except Exception:\n self.clients.remove(client)", "def test_delete(client):\n rv = delete(client, 'Michael')\n assert json.loads(rv.data.decode())['code'] == 0\n assert json.loads(rv.data.decode())['owner'] == 'Michael'", "def _delete(self, *args, **kwargs):\n return self._request('delete', *args, **kwargs)", "def delete(self, app_prefix, path):\n return self.handle_request('delete', app_prefix, path)", "def delete_client_from_experiment(self, client_id, experiment_id):\n expgroup = self.get_experimentgroup_for_client_in_experiment(client_id, experiment_id)\n client = Client.get(client_id)\n if expgroup is None or client is None:\n return None\n client.experimentgroups.remove(expgroup)\n result = expgroup not in self.dbsession.query(Client).filter_by(\n id=client_id).first().experimentgroups and client not in expgroup.clients\n return result", "def delete_server(ServerName=None):\n pass", "def cleanup_aerospike(args, client):\n client.close()", "def delete(self, *args, **kwargs):\n pass", "def delete(self, *args, **kwargs):\n pass", "def test_delete_works(client):\n\n # Create one\n proto_reminder['message'] = 'test_delete_works'\n res = client.post('/api/reminders', json=proto_reminder)\n print(\"Got response:\", res.data)\n reminder = json.loads(res.data)\n print(\"Got response:\", reminder)\n # Delete it\n res = client.delete('/api/reminders/{}'.format(reminder['guid']))\n assert res.status_code == 200\n assert res.content_type == 'application/json'\n # Get and ensure it's not there\n res = client.get('/api/reminders')\n print(\"Got response:\", json.loads(res.data))\n assert proto_reminder['message'].encode() not in res.data", "def delete(self, oid):\n path = '/servers/%s' % oid\n res = self.client.call(path, 'DELETE', data='', \n token=self.manager.identity.token)\n self.logger.debug('Delete openstack server: %s' % truncate(res))\n return res[0]", "def deleteOne(id):\n print(inspect.stack()[1][3])\n query = Followup.delete().where(Followup.columns.id == id)\n ResultProxy = connection.execute(query)\n if(not ResultProxy):\n return {'error': 'Unable to find the given client'}\n return {'status': \"Delete Succesful\"}", "async def delete(self, *, reason: Optional[Any] = ...):\n ...", "def delete(self):\n return self.request('', pylastica.request.Request.DELETE)", "def destroy(self) -> None:\n\n self.action_client.destroy()", "def delete_host(self, conf, tenant_id, network_id, host_id):\n\t\tpass", "def delete_key_vault_command(client: KeyVaultClient, args: dict[str, Any], params: dict[str, Any]) -> CommandResults:\n\n vault_name = args['vault_name']\n # subscription_id and resource_group_name arguments can be passed as command arguments or as configuration parameters,\n # if both are passed as arguments, the command arguments will be used.\n subscription_id = get_from_args_or_params(params=params, args=args, key='subscription_id')\n resource_group_name = get_from_args_or_params(params=params,\n args=args, key='resource_group_name')\n\n response = client.delete_key_vault_request(subscription_id=subscription_id,\n resource_group_name=resource_group_name,\n vault_name=vault_name)\n message = \"\"\n if response.get('status_code') == 200:\n message = f'Deleted Key Vault {vault_name} successfully.'\n elif response.get('status_code') == 204:\n message = f'Key Vault {vault_name} does not exists.'\n\n return CommandResults(\n readable_output=message\n )", "def _delValue( self, client ):\n\t\treturn client.delValue( self.schema )", "def deleteSecret(self, clientIP, not_before):\n\n return self._secretdb.execute('delete from %s where ip_address=:ip_address and not_before=:not_before' % self._table_name,\n {'ip_address': ip_address,\n 'not_before': not_before})", "async def delete(self, delete: TPayload) -> None:", "def delete(self, application_id):", "def delete(args, config):\n print('Deletes a selected HPC fleet with name \"{}\"'.format(args.fleet_name))", "def delete(self):\n self.call('DELETE', expect=error.NO_CONTENT)", "def delete_many(self, keys, version=None, client=None):\r\n\r\n if client is None:\r\n client = self.get_client(write=True)\r\n\r\n if not keys:\r\n return\r\n\r\n keys = [self.make_key(k, version=version) for k in keys]\r\n try:\r\n return client.delete(*keys)\r\n except ConnectionError:\r\n raise ConnectionInterrupted(connection=client)", "def delete(self):\n self.request().delete()", "def delete(self):\n ...", "def delete_user():", "def delete(cls, client, resource) :\n try :\n if type(resource) is not list :\n deleteresource = nshttpprofile()\n if type(resource) != type(deleteresource):\n deleteresource.name = resource\n else :\n deleteresource.name = resource.name\n return deleteresource.delete_resource(client)\n else :\n if type(resource[0]) != cls :\n if (resource and len(resource) > 0) :\n deleteresources = [ nshttpprofile() for _ in range(len(resource))]\n for i in range(len(resource)) :\n deleteresources[i].name = resource[i]\n else :\n if (resource and len(resource) > 0) :\n deleteresources = [ nshttpprofile() for _ in range(len(resource))]\n for i in range(len(resource)) :\n deleteresources[i].name = resource[i].name\n result = cls.delete_bulk_request(client, deleteresources)\n return result\n except Exception as e :\n raise e", "def clear(self, client=None):\r\n if client is None:\r\n client = self.get_client(write=True)\r\n\r\n client.flushdb()", "def admin_delete_client_permission_v3(\n action: int,\n client_id: str,\n resource: str,\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = AdminDeleteClientPermissionV3.create(\n action=action,\n client_id=client_id,\n resource=resource,\n namespace=namespace,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)", "def release_client(client):\n client.disconnect()\n client.loop_stop()\n wait_for_disconnection(WAIT_CONNECTION_TIMEOUT)", "def delete_pattern(self, pattern, version=None, client=None):\r\n\r\n if client is None:\r\n client = self.get_client(write=True)\r\n\r\n pattern = self.make_key(pattern, version=version)\r\n try:\r\n keys = client.keys(pattern)\r\n\r\n if keys:\r\n return client.delete(*keys)\r\n except ConnectionError:\r\n raise ConnectionInterrupted(connection=client)", "def delete(self, customerguid, jobguid=\"\", executionparams=None):", "def delete(self, client_card_id):\r\n if client_card_id not in self.__storage:\r\n raise KeyError(\"There is no client card with the id {}\".format(client_card_id))\r\n del self.__storage[client_card_id]" ]
[ "0.8223102", "0.81987244", "0.81581897", "0.80933565", "0.8074244", "0.7924584", "0.79122", "0.75041306", "0.74830085", "0.7278975", "0.7202662", "0.7177086", "0.7141298", "0.7069998", "0.7028738", "0.6995486", "0.68752044", "0.6845381", "0.6844797", "0.6833574", "0.68101287", "0.6772382", "0.67508453", "0.6743568", "0.671709", "0.6701249", "0.6684527", "0.6657598", "0.6656325", "0.6651755", "0.66285944", "0.6613178", "0.6573043", "0.6549449", "0.65374553", "0.64993405", "0.640565", "0.6401857", "0.6398057", "0.6358317", "0.63491327", "0.634904", "0.6347544", "0.63411605", "0.63095987", "0.6301953", "0.62645555", "0.62518346", "0.6230477", "0.622217", "0.62137336", "0.61978316", "0.6130264", "0.6127842", "0.61128396", "0.6111796", "0.61097395", "0.6108918", "0.6106429", "0.610538", "0.61021966", "0.61005014", "0.6097921", "0.60900515", "0.60857254", "0.60839236", "0.6079584", "0.6071821", "0.6071548", "0.60519236", "0.6050507", "0.6045546", "0.60442424", "0.6039254", "0.6039254", "0.6034659", "0.6033395", "0.6031836", "0.60198367", "0.6013841", "0.60076135", "0.59992343", "0.5997771", "0.59972525", "0.59919745", "0.5965579", "0.5963514", "0.5963172", "0.59601665", "0.59595656", "0.59485126", "0.5943986", "0.59431833", "0.5942131", "0.59403586", "0.5928462", "0.5928001", "0.59214646", "0.59158343", "0.5900879" ]
0.78178424
7
Returns the items that overlap a bounding rectangle. Returns the set of all items in the quadtree that overlap with a bounding rectangle.
def hit(self, rect): # Find the hits at the current level hits = set(item for item in self.items if rect.contains(item.location)) # Recursively check the lower quadrants if self.nw and rect.left < self.cx and rect.top < self.cy: hits |= self.nw.hit(rect) if self.sw and rect.left < self.cx and rect.bottom >= self.cy: hits |= self.sw.hit(rect) if self.ne and rect.right >= self.cx and rect.top < self.cy: hits |= self.ne.hit(rect) if self.se and rect.right >= self.cx and rect.bottom >= self.cy: hits |= self.se.hit(rect) return hits
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def query(self, rect):\n if hasattr(self, 'leaves'):\n return self.leaves.values()\n else:\n results = []\n rect = Rect(rect)\n for c in self.children:\n if c.rect.intersects(rect):\n results.extend(c.query(rect))\n return set(results)", "def within_bbox(self, bbox):\n if hasattr(self, \"quadtree\"):\n indices = self.quadtree.search_within(*bbox)\n indices.sort()\n else:\n indices = [i for (i, pt) in enumerate(self)\n if (bbox[0] < pt.x < bbox[2]) and (bbox[1] < pt.y < bbox[3])]\n return self._subset(indices)", "def covers_overlaps(self, bounds):\n bounds = tuple(float(b) for b in bounds)\n return self.numba_rtree.covers_overlaps(bounds)", "def intersects(self, bounds):\n bounds = tuple(float(b) for b in bounds)\n return self.numba_rtree.intersects(bounds)", "def _find_bboxes_in_rect(bboxes, left, bottom, right, top):\n result = (bboxes[:, 0] <= right) & (bboxes[:, 2] >= left) & \\\n (bboxes[:, 1] <= top) & (bboxes[:, 3] >= bottom)\n return result", "def get_bounds(self):\n log.debug(str(inspect.stack()[1][3]) + \"--> OC.get_bounds()\")\n\n # TODO: Move the operation out of here.\n\n xmin = Inf\n ymin = Inf\n xmax = -Inf\n ymax = -Inf\n\n # for obj in self.object_list:\n for obj in self.get_list():\n try:\n gxmin, gymin, gxmax, gymax = obj.bounds()\n xmin = min([xmin, gxmin])\n ymin = min([ymin, gymin])\n xmax = max([xmax, gxmax])\n ymax = max([ymax, gymax])\n except Exception as e:\n log.warning(\"DEV WARNING: Tried to get bounds of empty geometry. %s\" % str(e))\n\n return [xmin, ymin, xmax, ymax]", "def containing(*boxes):\n if not boxes:\n raise ValueError('At least one bounding box must be specified')\n boxes_objs = map(BoundingBox, boxes)\n start = boxes_objs[0].start\n end = boxes_objs[0].end\n for box in boxes_objs[1:]:\n start = np.minimum(start, box.start)\n end = np.maximum(end, box.end)\n return BoundingBox(start=start, end=end)", "def __contains__(self, item: 'BoundingBox2D') -> bool:\n top_left_inside = item.xmin >= self.xmin and item.ymin >= self.ymin\n bottom_right_inside = item.xmax <= self.xmax and item.ymax <= self.ymax\n return top_left_inside and bottom_right_inside", "def GetOverlappingItems(self):\r\n\r\n area_bbox = self.area.GetBoundingBox()\r\n\r\n if hasattr(self.board, 'GetModules'):\r\n modules = self.board.GetModules()\r\n else:\r\n modules = self.board.GetFootprints()\r\n\r\n tracks = self.board.GetTracks()\r\n\r\n self.overlappings = []\r\n\r\n for zone in self.board.Zones():\r\n if zone.GetZoneName() != self.area.GetZoneName():\r\n if zone.GetBoundingBox().Intersects(area_bbox):\r\n self.overlappings.append(zone)\r\n\r\n for item in tracks:\r\n if (type(item) is pcbnew.PCB_VIA) and (item.GetBoundingBox().Intersects(area_bbox)):\r\n self.overlappings.append(item)\r\n if type(item) is pcbnew.PCB_TRACK:\r\n self.overlappings.append(item)\r\n\r\n for item in modules:\r\n if item.GetBoundingBox().Intersects(area_bbox):\r\n for pad in item.Pads():\r\n self.overlappings.append(pad)\r\n for zone in item.Zones():\r\n self.overlappings.append(zone)\r\n\r\n # TODO: change algorithm to 'If one of the candidate area's edges overlaps with target area declare candidate as overlapping'\r\n for i in range(0, self.board.GetAreaCount()):\r\n item = self.board.GetArea(i)\r\n if item.GetBoundingBox().Intersects(area_bbox):\r\n if item.GetNetname() != self.net:\r\n self.overlappings.append(item)", "def bbox_overlaps(boxes, query_boxes):\n n_ = boxes.shape[0]\n k_ = query_boxes.shape[0]\n overlaps = np.zeros((n_, k_), dtype=np.float)\n for k in range(k_):\n query_box_area = (query_boxes[k, 2] - query_boxes[k, 0] + 1) * (query_boxes[k, 3] - query_boxes[k, 1] + 1)\n for n in range(n_):\n iw = min(boxes[n, 2], query_boxes[k, 2]) - max(boxes[n, 0], query_boxes[k, 0]) + 1\n if iw > 0:\n ih = min(boxes[n, 3], query_boxes[k, 3]) - max(boxes[n, 1], query_boxes[k, 1]) + 1\n if ih > 0:\n box_area = (boxes[n, 2] - boxes[n, 0] + 1) * (boxes[n, 3] - boxes[n, 1] + 1)\n all_area = float(box_area + query_box_area - iw * ih)\n overlaps[n, k] = iw * ih / all_area\n return overlaps", "def Recursive_Rectangles(*args):\n if len(args) == 1 and isinstance(args[0], Rectangle):\n rectangle = args[0]\n else:\n rectangle = Rectangle(*args)\n dx, dy = rectangle.vertices[0]\n if (dx, dy) == (0, 0):\n return _Recursive_Rectangles(*rectangle.vertices[1])\n\n rects = _Recursive_Rectangles(*rectangle.translated(-dx, -dy).vertices[1])\n return set(r.translated(dx, dy) for r in rects)\n # return set(((x1 + dx, y1 + dy), (x2 + dx, y2 + dy)) for ((x1, y1), (x2, y2)) in rects)", "def intersection(self, envelope):\n stack = [self.root]\n result = []\n while stack:\n node = stack.pop()\n if node.items:\n for item, active in zip(node.items, node.active):\n if not active:\n continue\n ok = True\n for axis in xrange(2):\n c = item[axis]\n low = envelope[0][axis]\n high = envelope[1][axis]\n if c < low or c > high:\n ok = False\n break\n if ok:\n result.append(item)\n else:\n axis = node.cutdim\n median = node.cutval\n low = envelope[0][axis]\n high = envelope[1][axis]\n if median <= high:\n stack.append(node.right)\n if median >= low:\n stack.append(node.left)\n return result", "def intersects(self, rect):\n\t\treturn ( rect.right >= self.left and rect.left < self.right\n\t\t\tand rect.bottom >= self.top and rect.top < self.bottom )", "def _bbox_overlap(self, other):\n reg0 = self.bbox\n reg1 = other.bbox\n return (reg0[0] <= reg1[2] and reg1[0] <= reg0[2] and\n reg0[1] <= reg1[3] and reg1[1] <= reg0[3])", "def clip(self, bbox):\n from shapely.geometry import Polygon, LinearRing\n poly = Polygon(LinearRing(zip(bbox[[0, 1, 1, 0]], bbox[[2, 2, 3, 3]])))\n return [g for g in self.geometries() if poly.intersects(g)]", "def items_intersect(self):\n for a, b in combinations(self.items, 2):\n if a.intersects_with(b):\n return True\n\n return False", "def find_overlap_rect_list(rect_list):\n\n\n overlap_list = []\n\n for index, item in enumerate(rect_list):\n index += 1\n\n while index < len(rect_list):\n #check item with next rectangle in the list\n x_overlap = find_overlap_range(item['left_x'], item['width'], \n rect_list[index]['left_x'],\n rect_list[index]['width'])\n \n y_overlap = find_overlap_range(item['bottom_y'], item['height'], \n rect_list[index]['bottom_y'],\n rect_list[index]['height'])\n\n if x_overlap and y_overlap:\n overlap_list.append({'left_x':x_overlap[0], \n 'bottom_y': y_overlap[0],\n 'width': x_overlap[1],\n 'height': y_overlap[1]})\n\n index += 1\n\n return overlap_list", "def _boxes_in_bbox(self, document):\n bbox = self._bbox\n boxes = document.get_boxes()\n return [box for box in boxes if bbox[0] <= box.x0 and\n box.x1 <= bbox[2] and bbox[1] <= box.y0 and\n box.y1 <= bbox[3] and self._allowed_page(box) and\n self._acceptable_dimensions(box)]", "def _overlapping_branch_list(self):\n if self._cached_overlapping_branch_list is not None:\n return self._cached_overlapping_branch_list\n\n have_overlap = set()\n for outer in p4gf_branch.iter_fp_non_deleted(self.ctx.branch_dict()):\n outer_lhs = P4.Map()\n outer_lhs.insert(outer.view_p4map.lhs())\n for inner in p4gf_branch.iter_fp_non_deleted(self.ctx.branch_dict()):\n if outer == inner:\n continue\n overlap = P4.Map.join(outer_lhs, inner.view_p4map)\n # Any non-exclusionary lines shared between branches?\n for line in overlap.as_array():\n if line.startswith('-') or line.startswith('\"-'):\n continue\n # Yep. Non-exclusionary line implies overlap\n have_overlap.add(outer)\n have_overlap.add(inner)\n break\n\n self._cached_overlapping_branch_list = have_overlap\n return self._cached_overlapping_branch_list", "def bounds(self):\n return self._bboxes[0][0] #TODO: merge all coverages", "def bounds(self):\n return self._bboxes[0][0] #TODO: merge all coverages", "def get_bounds(self):\r\n left, bottom, front = 10000, 10000, 10000\r\n right, top, back = -10000, -10000, -10000\r\n for b in self.buf:\r\n for v in b.vertices:\r\n if v[0] < left:\r\n left = v[0]\r\n if v[0] > right:\r\n right = v[0]\r\n if v[1] < bottom:\r\n bottom = v[1]\r\n if v[1] > top:\r\n top = v[1]\r\n if v[2] < front:\r\n front = v[2]\r\n if v[2] > back:\r\n back = v[2]\r\n\r\n return (left, bottom, front, right, top, back)", "def quadkeys_to_bounds(quadkeys: List[str]):\n tile_bounds = [\n mercantile.bounds(mercantile.quadkey_to_tile(qk)) for qk in quadkeys\n ]\n\n minx = 180\n miny = 90\n maxx = -180\n maxy = -90\n for tb in tile_bounds:\n minx = min(minx, tb[0])\n miny = min(miny, tb[1])\n maxx = max(maxx, tb[2])\n maxy = max(maxy, tb[3])\n\n return [minx, miny, maxx, maxy]", "def split(self, thresh=0):\n\n new_tree_bounds = []\n new_tree_ids = []\n\n self.contains_null = False\n\n for qi, quad in enumerate(self.tree):\n\n left, bottom, right, top = quad.bounds\n xcenter = left + (right - left) / 2.0\n ycenter = top - (top - bottom) / 2.0\n\n quad_id = self.tree_ids[qi]\n\n for id_, bbox in zip(\n [1, 3, 0, 2],\n [\n (left, ycenter, xcenter, top),\n (xcenter, ycenter, right, top),\n (left, bottom, xcenter, ycenter),\n (xcenter, bottom, right, ycenter),\n ],\n ):\n\n id_list = list(self.sindex.intersection(bbox))\n\n if id_list:\n\n if len(id_list) > thresh:\n\n new_tree_bounds.append(bbox)\n new_tree_ids.append(quad_id + str(id_))\n\n else:\n self.contains_null = True\n\n else:\n self.contains_null = True\n\n self.tree_bounds = new_tree_bounds\n self.tree_ids = new_tree_ids\n\n return self", "def test_boundary_boxes(gt_detection_combo):\n found = False\n overlap_threshold = 0.7\n\n for found_box in gt_detection_combo.detected_boxes:\n if overlap_between(gt_detection_combo.gt_box, found_box) > overlap_threshold:\n found = True\n break\n\n assert found is True", "def intersect(self, rectangle):\n return self.contains(rectangle.corner) or rectangle.contains(self.corner)", "def test_rectangle_intersects(rectangle, big_area):\n new_room = Room(rectangle, 0, 1, 1, big_area, 'bathroom')\n rectangle = (0, 2, 15, 2, 15)\n assert new_room.contains_rectangle(rectangle[0], rectangle[1], rectangle[2], rectangle[3], rectangle[4]) is True", "def check_collisions(self, g):\n self.rects = {}\n for gc in self.sc.game_objects:\n self.load_game_object(gc)\n if g.name in self.backw_rects.keys():\n r = self.backw_rects[g.name]\n return r.collidedictall(self.rects)\n return []", "def find_common_bounds(bounds_1, bounds_2):\n new_bounds = []\n for (lower_1, upper_1), (lower_2, upper_2) in itertools.product(bounds_1, bounds_2):\n # Ignore this region if it's outside the current limits\n if upper_1 <= lower_2 or upper_2 <= lower_1:\n continue\n new_bounds.append(Region(max(lower_1, lower_2), min(upper_1, upper_2)))\n return new_bounds", "def overlaps(self,region):\n fs = FeatureSet()\n for f in self:\n if( f.overlaps(region) ):\n fs.append(f)\n return fs", "def overlaps(geometry, sr=None):\r\n return _filter(geometry, sr, 'esriSpatialRelOverlaps')", "def polygon_overlaps_other_polygon(self, outer_poly):\n contain_list = []\n for inner_poly in self.poly_list:\n if outer_poly == inner_poly:\n pass\n elif all(self.polygon_contains(outer_poly, inner_poly)):\n pass\n elif any(self.polygon_contains(outer_poly, inner_poly)):\n contain_list.append(inner_poly)\n return contain_list", "def overlaps(*objs):\n return set.intersection(*(set(range(*extent(obj))) for obj in objs))", "def get_intersections(self, x, y):\r\n arr = []\r\n shapes = self.get('Shapes')\r\n\r\n for n in xrange(len(shapes)):\r\n shape = shapes[n]\r\n if shape.is_visible() and shape.intersects(x, y):\r\n arr.append(shape)\r\n\r\n return arr", "def get_overlap_blocks(self):\n bv = self.base_g.new_vertex_property(\"vector<int>\")\n bc_in = self.base_g.new_vertex_property(\"vector<int>\")\n bc_out = self.base_g.new_vertex_property(\"vector<int>\")\n bc_total = self.base_g.new_vertex_property(\"vector<int>\")\n self._state.get_bv_overlap(self.base_g._Graph__graph,\n _prop(\"v\", self.base_g, bv),\n _prop(\"v\", self.base_g, bc_in),\n _prop(\"v\", self.base_g, bc_out),\n _prop(\"v\", self.base_g, bc_total))\n return bv, bc_in, bc_out, bc_total", "def get_bounds(self):\n return self._geometry.bounds", "def contained(self):\n seen = set()\n return [l.to_segment for l in self.edges_to_contained \\\n if id(l) not in seen and not seen.add(id(l))]", "def containments(self):\n return self.edges_to_contained + self.edges_to_containers", "def get_bounding_box(self):\n if len(self.elements) == 0:\n return None\n if not (self._bb_valid and\n all(ref._bb_valid for ref in self.get_dependencies(True))):\n bb = numpy.array(((1e300, 1e300), (-1e300, -1e300)))\n all_polygons = []\n for element in self.elements:\n if isinstance(element, PolygonSet):\n all_polygons.extend(element.polygons)\n elif isinstance(element, CellReference) or isinstance(\n element, CellArray):\n element_bb = element.get_bounding_box()\n if element_bb is not None:\n bb[0, 0] = min(bb[0, 0], element_bb[0, 0])\n bb[0, 1] = min(bb[0, 1], element_bb[0, 1])\n bb[1, 0] = max(bb[1, 0], element_bb[1, 0])\n bb[1, 1] = max(bb[1, 1], element_bb[1, 1])\n if len(all_polygons) > 0:\n all_points = numpy.concatenate(all_polygons).transpose()\n bb[0, 0] = min(bb[0, 0], all_points[0].min())\n bb[0, 1] = min(bb[0, 1], all_points[1].min())\n bb[1, 0] = max(bb[1, 0], all_points[0].max())\n bb[1, 1] = max(bb[1, 1], all_points[1].max())\n self._bb_valid = True\n _bounding_boxes[self] = bb\n return _bounding_boxes[self]", "def doBoundingBoxesIntersect(self, other):\n if(self.upperLeft.x <= other.lowerRight.x and\n self.lowerRight.x >= other.upperLeft.x and\n self.upperLeft.y >= other.lowerRight.y and\n self.lowerRight.y <= other.upperLeft.y):\n return True\n return False", "def get_bbox_overlap(self, that, epsilon):\n if (not isinstance(that, Annotation)):\n raise ValueError(\"Argument for intersects should be an annotation\")\n\n # find the width and height of the overlapping rectangle\n width = min(self.bbox.xmax, that.bbox.xmax) - \\\n max(self.bbox.xmin, that.bbox.xmin)\n height = min(self.bbox.ymax, that.bbox.ymax) - \\\n max(self.bbox.ymin, that.bbox.ymin)\n\n height = abs(that.bbox.ymax - self.bbox.ymin) + epsilon\n\n return (width, height)", "def get_bounds(ds):\n\n trans = get_transform(ds)\n if trans is not None:\n if isinstance(ds, xr.Dataset):\n dims = ds.dims\n elif isinstance(ds, xr.DataArray):\n dims = dict(zip(ds.dims, ds.shape))\n nrows = dims['y']\n ncols = dims['x']\n corners = (np.array([0, 0, ncols-1, ncols-1]),\n np.array([0, nrows-1, 0, nrows-1]))\n corner_x, corner_y = trans * corners\n return BoundingBox(\n left=corner_x.min(),\n bottom=corner_y.min(),\n right=corner_x.max(),\n top=corner_y.max()\n )\n else:\n return BoundingBox(\n left=ds['x'].min(),\n bottom=ds['y'].min(),\n right=ds['x'].max(),\n top=ds['y'].max()\n )", "def _rect_intersects(self, rect):\n\tb = (self.left() > rect.right() or \n\t\tself.right() < rect.left() or \n\t\tself.top() < rect.bottom() or \n\t\tself.bottom() > rect.top())\n\treturn not b", "def bounds(self):\n b = []\n\n for dim in self.dimensions:\n if dim.size == 1:\n b.append(dim.bounds)\n else:\n b.extend(dim.bounds)\n\n return b", "def test_overlap(self):\r\n rect1 = Rectangle(10, 20, 30, 40)\r\n rect2 = Rectangle(50, 60, 70, 80)\r\n\r\n # overlap should be commutative\r\n assert not rect1.overlap_with(rect2)\r\n assert not rect2.overlap_with(rect1)\r\n assert not Rectangle.overlap(rect1, rect2)\r\n assert not Rectangle.overlap(rect2, rect1)\r\n\r\n rect1 = Rectangle(-10, -20, 10, 60)\r\n rect2 = Rectangle(0, 50, 100, 200)\r\n assert rect1.overlap_with(rect2)\r\n assert rect2.overlap_with(rect1)\r\n assert Rectangle.overlap(rect1, rect2)\r\n assert Rectangle.overlap(rect2, rect1)\r\n\r\n # rectangles with only same boarder are not considered overlapped\r\n rect1 = Rectangle(-30, -10, -20, 0)\r\n rect2 = Rectangle(-20, -5, 30, 20)\r\n rect3 = Rectangle(-40, 0, 30, 20)\r\n assert not rect1.overlap_with(rect2)\r\n assert not rect1.overlap_with(rect3)\r\n assert not Rectangle.overlap(rect2, rect1)\r\n assert not Rectangle.overlap(rect3, rect1)", "def _find_bboxes_on_rect_edge(bboxes, left, bottom, right, top):\n bboxes_left = _find_bboxes_in_rect(bboxes, left, bottom, left, top)\n bboxes_right = _find_bboxes_in_rect(bboxes, right, bottom, right, top)\n bboxes_top = _find_bboxes_in_rect(bboxes, left, top, right, top)\n bboxes_bottom = _find_bboxes_in_rect(bboxes, left, bottom, right, bottom)\n result = bboxes_left | bboxes_right | bboxes_top | bboxes_bottom\n return result", "def FindObjectsByBBox(*args, **kwargs):\n return _gdi_.PseudoDC_FindObjectsByBBox(*args, **kwargs)", "def intersection(self, rng_set: Union[Rangelike, Iterable[Rangelike]]) -> 'RangeSet':\n # convert to a RangeSet\n rng_set = RangeSet._to_rangeset(rng_set)\n # do O(n^2) difference algorithm\n # TODO rewrite to increase efficiency by short-circuiting\n intersections = [rng1.intersection(rng2) for rng1 in self._ranges for rng2 in rng_set._ranges]\n intersections = [rng for rng in intersections if rng is not None and not rng.isempty()]\n return RangeSet(intersections)", "def intersects(self, cuboid):\n\t\treturn ( cuboid.front >= self.back and cuboid.back < self.front\n\t\t\tand cuboid.right >= self.left and cuboid.left < self.right\n\t\t\tand cuboid.bottom >= self.top and cuboid.top < self.bottom )", "def recursive_rectangles((x, y), (x0, y0)=(0, 0)):\n x, dx = max(x, x0), min(x, x0)\n y, dy = max(y, y0), min(y, y0)\n if (dx, dy) == (0, 0):\n return _recursive_rectangles(x, y)\n rects = _recursive_rectangles(x - dx, y - dy)\n # return set(map(lambda x: tuple(map(tuple, np.array(x) + (dx, dy))), rects))\n return set(((x1 + dx, y1 + dy), (x2 + dx, y2 + dy)) for ((x1, y1), (x2, y2)) in rects)", "def buildings_in_area(self, polygon):\n return [b for b in self.buildings if polygon.contains(b.geometry.convex_hull)]", "def overlaps(interval,intervals):\n return [x for x in intervals if interval.overlaps(x)]", "def within_polygon(self, poly):\n if hasattr(self, \"quadtree\"):\n bbox = poly.get_bbox(crs=self.crs)\n candidate_indices = self.quadtree.search_within(*bbox)\n confirmed_indices = []\n for i in candidate_indices:\n if poly.contains(self[i]):\n confirmed_indices.append(i)\n confirmed_indices.sort()\n else:\n confirmed_indices = [i for (i, point) in enumerate(self)\n if poly.contains(point)]\n return self._subset(confirmed_indices)", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def GetBounds(self):\n ...", "def under_rect(self, rect):\n x_min = self.clampx((rect.left - self._origin.x) // self._cell_size[0])\n x_max = self.clampx((rect.right - self._origin.x) // self._cell_size[0])\n y_min = self.clampy((rect.top - self._origin.y) // self._cell_size[1])\n y_max = self.clampy((rect.bottom - self._origin.y) // self._cell_size[1])\n cells = []\n for ix in range(x_min, x_max + 1):\n for iy in range(y_min, y_max + 1):\n index = iy * self._cell_count[0] + ix\n cells.append(self._cells[index])\n return cells", "def rectIntersect(rect1, rect2):\n rect = np.zeros_like(rect1)\n rect[[0, 2]] = np.maximum(rect1[[0, 2]], rect2[[0, 2]])\n rect[[1, 3]] = np.minimum(rect1[[1, 3]], rect2[[1, 3]])\n return rect", "def _intersects_3D(A, B):\n return all([_intersects_1D((A[i], A[i+3]), (B[i], B[i+3]))\n for i in range(3)])", "def bounds(self):\n \n return self.osmdb.bounds()", "def bounds(self):\n frame_ = self.to_frame().total_bounds.flatten().tolist()\n return BBox(\n left=frame_[0], bottom=frame_[1], right=frame_[2], top=frame_[3]\n )", "def canvas_bounds(self) -> utils.BoxRegion:", "def extract_bounding_boxes(self, scene):\n objs = scene[\"objects\"]\n rotation = scene[\"directions\"][\"right\"]\n\n xmin = []\n ymin = []\n xmax = []\n ymax = []\n\n for i, obj in enumerate(objs):\n [x, y, z] = obj[\"pixel_coords\"]\n\n [x1, y1, z1] = obj[\"3d_coords\"]\n\n cos_theta, sin_theta, _ = rotation\n\n x1 = x1 * cos_theta + y1 * sin_theta\n y1 = x1 * -sin_theta + y1 * cos_theta\n\n height_d = 6.9 * z1 * (15 - y1) / 2.0\n height_u = height_d\n width_l = height_d\n width_r = height_d\n\n if obj[\"shape\"] == \"cylinder\":\n d = 9.4 + y1\n h = 6.4\n s = z1\n\n height_u *= (s * (h / d + 1)) \\\n / ((s * (h / d + 1)) - (s * (h - s) / d))\n height_d = height_u * (h - s + d) / (h + s + d)\n\n width_l *= 11 / (10 + y1)\n width_r = width_l\n\n if obj[\"shape\"] == \"cube\":\n height_u *= 1.3 * 10 / (10 + y1)\n height_d = height_u\n width_l = height_u\n width_r = height_u\n\n ymin.append((y - height_d) / 320.0)\n ymax.append((y + height_u) / 320.0)\n xmin.append((x - width_l) / 480.0)\n xmax.append((x + width_r) / 480.0)\n\n return xmin, ymin, xmax, ymax", "def get_random_rectangles(self):\n while len(self.rectangles) < self.n_rectangles:\n upper_left = [np.random.randint(0, 28) for i in range(2)] # upper-left corner coordinate\n lower_right = [np.random.randint(0, 28) for i in range(2)] # lower-right corner coordinate\n # Have upper left corner less than lower right corner of the rectangle\n if upper_left[0] < lower_right[0] and upper_left[1] < lower_right[1]:\n currentRect = Rectangle(upper_left, lower_right)\n currentArea = currentRect.area()\n # Only keep the rectangles whose area is 130 to 170\n if 130 <= currentArea <= 170:\n self.rectangles.append(currentRect)\n #print(\"Upper Left \", upper_left, \" Lower right \", lower_right, \" Area: \", currentRect.area())", "def included_in(volume, outfile):\n return hypercubes_overlap(volume, outfile)", "def contained(query, intervalset):\n for i in intervalset:\n if query == i:\n continue\n if query[0] <= i[0] and i[1] <= query[1] and i[1]-i[0] < query[1]-query[0]:\n return True\n return False", "def boundary_polygon_by_union(self):\n cell_geoms = [None]*self.Ncells()\n\n for i in self.valid_cell_iter():\n xy = self.nodes['x'][self.cell_to_nodes(i)]\n cell_geoms[i] = geometry.Polygon(xy)\n return ops.cascaded_union(cell_geoms)", "def get_bounding_boxes(dets):\n bounding_boxes = []\n for box in dets:\n bounding_box = {'top_left_x': box.left(),\n 'top_left_y': box.top(),\n 'bottom_right_x': box.right(),\n 'bottom_right_y': box.bottom()}\n bounding_boxes.append(bounding_box)\n return bounding_boxes", "def xy_bbox(self):\n bbox = [None, None, None, None]\n for subset in self:\n if subset.is_x:\n if isinstance(subset, Trim):\n bbox[0] = subset.low\n bbox[2] = subset.high\n else:\n bbox[0] = bbox[2] = subset.value\n elif subset.is_y:\n if isinstance(subset, Trim):\n bbox[1] = subset.low\n bbox[3] = subset.high\n else:\n bbox[1] = bbox[3] = subset.value\n\n return bbox", "def get_intersections(self):\n return self.intersection_list", "def get_all_upper(self):\n all_upper = set()\n for upper in self.get_goterms_upper():\n all_upper.add(upper.id)\n all_upper |= upper.get_all_upper()\n return all_upper", "def get_boundingbox(self):\n tile_iterator = iter(self)\n (coordinate,tile) = next(tile_iterator)\n assert(tile is not None)\n min_x = coordinate[0]\n max_x = min_x + 1\n min_y = coordinate[1]\n max_y = min_y + 1\n\n for (coordinate,tile) in tile_iterator:\n\n if coordinate[0] < min_x:\n min_x = coordinate[0]\n if coordinate[0]+1> max_x:\n max_x = coordinate[0] +1\n if coordinate[1] < min_y:\n min_y = coordinate[1]\n if coordinate[1]+1> max_y:\n max_y = coordinate[1] +1\n\n return ((min_x, min_y), (max_x, max_y))", "def bounds(self):\n return self.substrates.bounds", "def fetchbounds(self):\n pnts = [x for x in [self.out_start, self.start, self.in_start, \\\n self.in_end, self.end, self.out_end] \\\n if x is not None]\n return min(pnts), max(pnts)", "def ContainsRect(*args, **kwargs):\n return _gdi_.Region_ContainsRect(*args, **kwargs)", "def getIntersections(self):\n\t\treturn self.intersections", "def bbox(self):\n lower = (self.x.min(), self.y.min())\n upper = (self.x.max(), self.y.max())\n return (lower, upper)", "def _get_intersections():\n with _get_mongo_client() as client:\n coll = client[mongo_database]['locations']\n return coll.find({'intersection_number': {'$exists': True}}, {'_id': False})", "def rectangle(self, lat1, long1, lat2, long2):\n upperlat = max(lat1, lat2)\n upperlong = max(long1, long2)\n lowerlat = min(lat1, lat2)\n lowerlong = min(long1, long2)\n return [x for x in self.points if lowerlat < x.latitude < upperlat and\n lowerlong < x.longitude < upperlong]", "def rect_intersection(rect_1, rect_2):\n \n overlap ={}\n\n x_overlap = find_overlap_range(rect_1['left_x'],\n rect_1['width'], \n rect_2['left_x'], \n rect_2['width']) \n # print x_overlap\n \n y_overlap = find_overlap_range(rect_1['bottom_y'],\n rect_1['height'], \n rect_2['bottom_y'], \n rect_2['height'])\n # print y_overlap\n\n if x_overlap and y_overlap:\n overlap['left_x'] = x_overlap[0]\n overlap['bottom_y'] = y_overlap[0]\n overlap['width'] = x_overlap[1]\n overlap['height'] = y_overlap[1]\n \n return overlap", "def contains(outer, inner):\n return inner.tl.x >= outer.tl.x and inner.tl.y >= outer.tl.y and \\\n inner.br.x <= outer.br.x and inner.br.y <= outer.br.y", "def bounds(self):\n return self.GetBounds()", "def get_bounds(self):\n occupied_locations = self.board.keys()\n min_x = min(p[0] for p in occupied_locations)\n max_x = max(p[0] for p in occupied_locations)\n min_y = min(p[1] for p in occupied_locations)\n max_y = max(p[1] for p in occupied_locations)\n return ((min_x, max_x), (min_y, max_y))", "def _intersect(self, interval):\n first = self.intervals.bisect_left(interval)\n last = first\n while first > 0 and \\\n self.intervals[first - 1].upper > interval.lower:\n first -= 1\n while last < len(self.intervals) and \\\n self.intervals[last].lower < interval.upper:\n last += 1\n return first, last", "def overlaps(box1, box2):\n # Get the coordinates of bounding boxes\n b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]\n b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]\n\n # get the coordinates of the intersection rectangle\n inter_rect_x1 = max(b1_x1, b2_x1)\n inter_rect_y1 = max(b1_y1, b2_y1)\n inter_rect_x2 = min(b1_x2, b2_x2)\n inter_rect_y2 = min(b1_y2, b2_y2)\n\n overlaps_touches: bool = inter_rect_x1 <= inter_rect_x2 and inter_rect_y1 <= inter_rect_y2\n return overlaps_touches", "def bounding_box(self):\n box_min = []\n box_max = []\n if self.is_empty():\n raise ValueError('empty polytope is not allowed')\n for i in range(0, self.space_dimension()):\n x = Variable(i)\n coords = [ v.coefficient(x) for v in self.generators() ]\n max_coord = max(coords)\n min_coord = min(coords)\n box_max.append(max_coord)\n box_min.append(min_coord)\n return (tuple(box_min), tuple(box_max))", "def test_bounding_rectangle(self, world):\n positions = [(0, 2), (2, 0), (3, 1), (2, 3)]\n for pos in positions:\n world.set_cell(pos)\n assert world.min_pos() == (0, 0) and world.max_pos() == (3, 3)", "def subsets(self):\n return set(self.subset_map.values())", "def get_bounding_box(self, poly=None):\n\n use_poly = poly if poly else self.res_poly\n\n # TODO: Test to comply with future values.\n # Updates the bounds\n if self.bounds_changed:\n # Gets the minimum and maximum value of each bounds.\n self.xmin = float('inf')\n self.ymin = float('inf')\n self.xmax = float('-inf')\n self.ymax = float('-inf')\n\n for points in use_poly:\n x = points[0] - self.x\n y = points[1] - self.y\n\n if x < self.xmin:\n self.xmin = x\n if x > self.xmax:\n self.xmax = x\n if y < self.ymin:\n self.ymin = y\n if y > self.ymax:\n self.ymax = y\n\n # Set bounds changed to be false\n self.bounds_changed = False\n \n return [self.xmin + self.x,\n self.ymin + self.y,\n self.xmax + self.x,\n self.ymax + self.y]", "def get_overlap_blocks(self):\n if not self.overlap:\n raise ValueError(\"overlap blocks only available if overlap == True\")\n return self.total_state.get_overlap_blocks()", "def get_overlap(self, other):\n return self.intersection_over_union(other)", "def bounds(self):\n return self._bounds", "def bounds_riodataset(raster: DatasetReader) -> box:\n return box(*list(raster.bounds))", "def bbox_intersect(a_ary, b_ary):\r\n # Do any of the 4 corners of one bbox lie inside the other bbox?\r\n # bbox format of [ll, ur]\r\n # bbx[0] is lower left\r\n # bbx[1] is upper right\r\n # bbx[0][0] is lower left longitude\r\n # bbx[0][1] is lower left latitude\r\n # bbx[1][0] is upper right longitude\r\n # bbx[1][1] is upper right latitude\r\n\r\n # Detect longitude and latitude overlap\r\n if is_overlap_sorted_values(a_ary[0][0], a_ary[1][0], b_ary[0][0], b_ary[1][0]) \\\r\n and is_overlap_sorted_values(a_ary[0][1], a_ary[1][1], b_ary[0][1], b_ary[1][1]):\r\n return True\r\n else:\r\n return False", "def overlaps_with_subspace(wavefunc: dict, subspace: list) -> bool:\n assert isinstance(wavefunc, dict), 'Please provide your state as a dict.'\n assert isinstance(subspace, list), 'Please provide subspace as a list of str.'\n\n # Deal with empty subspace:\n if not subspace:\n return False\n assert isinstance(subspace[0], str), 'Please provide subspace as a list of str.'\n assert len(wavefunc) >= len(subspace)\n tol = 1e-7\n\n for basisvector in subspace:\n if abs(wavefunc[basisvector]) > tol:\n return True\n\n return False" ]
[ "0.6644186", "0.6522617", "0.6465768", "0.6435466", "0.59305274", "0.59259313", "0.5883582", "0.58389163", "0.5806691", "0.5789564", "0.5719773", "0.56912875", "0.5672954", "0.5642597", "0.56199247", "0.5603152", "0.5598976", "0.55976313", "0.5585401", "0.55301195", "0.5503513", "0.5501818", "0.5495053", "0.5493269", "0.54910576", "0.54694486", "0.54485226", "0.5439337", "0.54239535", "0.54011184", "0.5387653", "0.5386995", "0.5385965", "0.5368685", "0.5353675", "0.5344374", "0.53293353", "0.53232473", "0.53177714", "0.53156185", "0.52957505", "0.52749777", "0.52668595", "0.52655226", "0.52648157", "0.525958", "0.52506995", "0.5241871", "0.5240272", "0.5239521", "0.522668", "0.52222586", "0.5198544", "0.5189219", "0.5189219", "0.5189219", "0.5189219", "0.5189219", "0.5189219", "0.5189219", "0.5189219", "0.5181673", "0.51806736", "0.51795363", "0.51783216", "0.5173412", "0.5167785", "0.51654994", "0.5164525", "0.5143396", "0.51405823", "0.5128704", "0.5126809", "0.5123931", "0.51213235", "0.51064914", "0.51051617", "0.5103132", "0.5101785", "0.5100806", "0.5091728", "0.50910115", "0.50854343", "0.50841326", "0.50829476", "0.50812066", "0.50792867", "0.5078108", "0.5076878", "0.50725985", "0.50684", "0.5060833", "0.5045998", "0.5045399", "0.5042979", "0.50424266", "0.50423807", "0.503803", "0.50332254", "0.5032887" ]
0.6004383
4
This method will run all the episodes with epsilon greedy strategy
def run_epsilon(env, num_of_bandits, iterations, episodes): # Initialize total mean rewards array per episode by zero epsilon_rewards = np.zeros(iterations) for i in range(episodes): print(f"Running Epsilon episode:{i}") n = 1 action_count_per_bandit = np.ones(num_of_bandits) mean_reward = 0 total_rewards = np.zeros(iterations) mean_reward_per_bandit = np.zeros(num_of_bandits) env.reset() epsilon = 0.5 for j in range(iterations): a = get_epsilon_action(epsilon, env, mean_reward_per_bandit) observation, reward, done, info = env.step(a) # Update counts n += 1 action_count_per_bandit[a] += 1 # Update mean rewards mean_reward = mean_reward + ( reward - mean_reward) / n # Update mean rewards per bandit mean_reward_per_bandit[a] = mean_reward_per_bandit[a] + ( reward - mean_reward_per_bandit[a]) / action_count_per_bandit[a] # Capture mean rewards per iteration total_rewards[j] = mean_reward # Update mean episode rewards once all the iterations of the episode are done epsilon_rewards = epsilon_rewards + (total_rewards - epsilon_rewards) / (i + 1) return epsilon_rewards
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self) -> None:\n for episode in range(1, self.episodes + 1):\n print('Episode:', episode)\n steps, state_action_history = self.run_one_episode()\n self.steps_per_episode.append(steps)\n if episode % parameters.CACHING_INTERVAL == 0 or steps < 1000:\n visualize.animate_track(state_action_history, f'agent-{episode}')\n\n print('Training completed.')\n visualize.plot_steps_per_episode(self.steps_per_episode)\n visualize.plot_epsilon(self.agent.epsilon_history)\n\n if parameters.VISUALIZE_FINAL_GAME:\n print('Showing one episode with the greedy strategy.')\n self.agent.epsilon = 0\n steps, state_action_history = self.run_one_episode()\n print(f'Episode completed in {steps} steps.')\n visualize.animate_track(state_action_history)", "def executeEpisode(self):\n trainExamples = []\n board = self.game.getInitBoard()\n player = 1\n episodeStep = 0\n\n while True:\n episodeStep += 1\n canonicalBoard = self.game.getCanonicalForm(board, player)\n temp = int(episodeStep < self.args.tempThreshold)\n\n pi = self.mcts.getActionProb(canonicalBoard, temp=temp)\n sym = self.game.getSymmetries(canonicalBoard, pi)\n for b, p in sym:\n trainExamples.append([b, player, p, None])\n\n action = np.random.choice(len(pi), p=pi)\n board, player = self.game.getNextState(board, player, action)\n\n r = self.game.getGameEnded(board, player)\n\n if r != 0:\n ex = [(x[0], x[2], r * ((-1) ** (x[1] != player))) for x in trainExamples]\n return ex", "def run_episode(self, mode=0, eps=0.):\n if mode==0:\n eps = 0.\n done = False\n score = 0 \n \n while not done:\n state = self.env_info.vector_observations[0] # get the current state\n action = self.agent.act(state, eps=eps) # get an action using epsilon greedy policy\n self.env_info = self.env.step(action)[self.brain_name] # send the action to the environment\n next_state = self.env_info.vector_observations[0] # get the next state\n reward = self.env_info.rewards[0] # get the reward\n done = self.env_info.local_done[0] # see if episode has finished\n \n if mode == 1:\n self.agent.step(state, action, reward, next_state, done)\n \n score += reward\n \n self.reset_env() # reset the environment\n \n return score", "def executeEpisode(self):\n trainExamples = []\n board = self.game.getInitBoard()\n self.curPlayer = 1\n episodeStep = 0\n\n while True:\n episodeStep += 1\n canonicalBoard = self.game.getCanonicalForm(board, self.curPlayer)\n temp = int(episodeStep < self.args.tempThreshold)\n\n pi = self.mcts.getActionProb(canonicalBoard, temp=temp)\n sym = self.game.getSymmetries(canonicalBoard, pi)\n for b, p in sym:\n trainExamples.append([b, self.curPlayer, p, None])\n\n action = np.random.choice(len(pi), p=pi)\n board, self.curPlayer = self.game.getNextState(board, self.curPlayer, action)\n\n r = self.game.getGameEnded(board, self.curPlayer)\n\n if r != 0:\n return [(x[0], x[2], r * ((-1) ** (x[1] != self.curPlayer))) for x in trainExamples]", "def executeEpisode(self):\n trainExamples = []\n board = self.game.getInitBoard()\n self.curPlayer = 1\n episodeStep = 0\n\n while True:\n episodeStep += 1\n canonicalBoard = self.game.getCanonicalForm(board, self.curPlayer)\n temp = int(episodeStep < self.args.tempThreshold)\n\n pi = self.mcts.getActionProb(canonicalBoard, temp=temp)\n sym = self.game.getSymmetries(canonicalBoard, pi)\n for b, p in sym:\n trainExamples.append([b, self.curPlayer, p, None])\n\n action = np.random.choice(len(pi), p=pi)\n board, self.curPlayer = self.game.getNextState(board, self.curPlayer, action)\n\n r = self.game.getGameEnded(board, self.curPlayer)\n\n if r != 0:\n return [(x[0], x[2], r * ((-1) ** (x[1] != self.curPlayer))) for x in trainExamples]", "def train(self, episodes=2000, max_steps=99):\n\n for episode in range(episodes):\n state = self.env.reset()\n\n for step in range(max_steps):\n explore_eploit_tradeoff = np.random.uniform()\n\n if explore_eploit_tradeoff > self.epsilon:\n action = np.argmax(self.q_table[state, :])\n else:\n action = self.env.action_space.sample()\n\n new_state, reward, done, info = self.env.step(action)\n\n self.q_table[state, action] = self.q_table[state, action] \\\n + self.lr * (reward + self.gamma * np.amax(\n self.q_table[new_state, :]\n ) - self.q_table[state, action]\n )\n\n state = new_state\n if done:\n break\n exp_ = np.exp(-self.decay_rate * episode)\n self.epsilon = self.min_eps + exp_ * (self.max_eps - self.min_eps)", "def q_learning(env, agent, num_episodes, batch_size, epsilon, epsilon_min, epsilon_decay, folder):\n \n # Keeps track of useful statistics\n stats = plotting.EpisodeStats(\n episode_lengths=np.zeros(num_episodes),\n episode_rewards=np.zeros(num_episodes))\n\n\n for i_episode in range(num_episodes):\n if epsilon > epsilon_min and i_episode > 500:\n # complete random exploration 500 episodes, \n # then decrase exploration till epsilon less than epsilon_min\n epsilon *= epsilon_decay\n sys.stdout.flush()\n\n state = env.reset()\n state = np.reshape(state, [1, env.nS])\n\n \n for t in range(MAX_STEP):\n\n ## Decide action\n action = agent.act(state, epsilon)\n ## Advance the game to the next frame based on the action\n next_state, reward, done, _ = env.step(action)\n\n env.my_render(folder)\n\n stats.episode_rewards[i_episode] += reward\n stats.episode_lengths[i_episode] = t+1\n\n next_state = np.reshape(next_state, [1, env.nS])\n ## Remember the previous state, action, reward, and done\n agent.remember(state, action, reward, next_state, done)\n ## make next_state the new current state for the next frame.\n state = next_state ## change to copy.copy(next_state), if it is a array\n\n if len(agent.memory) > batch_size:\n agent.replay(batch_size) \n\n if done: \n break\n \n mean_score = stats.episode_rewards[i_episode]/stats.episode_lengths[i_episode]\n print(\"episode: {}/{}, score: {}, e: {:.2}, steps:{}, mean score:{:.2}\"\n .format(i_episode, num_episodes, stats.episode_rewards[i_episode], epsilon, \n stats.episode_lengths[i_episode], \n mean_score))\n #if(i_episode > 200):\n write_csv(folder, i_episode, stats.episode_lengths[i_episode], mean_score)\n if(i_episode%50 == 0):\n agent.save(folder + \"_qn\" + str(i_episode) + \".h5\") \n agent.save(folder + \"_qn-final\" + \".h5\") \n\n return stats", "def train(n_episodes=1000, max_n_steps=300, eps_start=1.0, eps_end=0.01, eps_decay=0.995, strCheckpointFile='checkpoint.pth'):\n\n global env\n scores = [] # list containing scores from each episode\n scores_window = deque(maxlen=100) # last 100 scores\n eps = eps_start # initialize epsilon\n num_saves = 0\n for i_episode in range(1, n_episodes + 1):\n env_info = env.reset(train_mode=True)[brain_name] # reset the environment\n state = env_info.vector_observations[0] # get the current state\n score = 0 # initialize the score\n last_t = max_n_steps\n for t in range(max_n_steps):\n action = agent.act(state, eps) # agent returns an epsilon-greedy action based on state\n env_info = env.step(action)[brain_name] # send the action to the environment\n next_state = env_info.vector_observations[0] # get the next state\n reward = env_info.rewards[0] # get the reward\n done = env_info.local_done[0] # see if episode has finished\n agent.step(state, action, reward, next_state, done) # records experience and learns (depending on settings)\n state = next_state\n score += reward\n if done:\n last_t = t + 1\n break\n scores_window.append(score) # save most recent score\n scores.append(score) # save most recent score\n eps = max(eps_end, eps_decay * eps) # decrease epsilon\n print('\\rEpisode {}\\tNum steps: {}\\tAverage Score: {:.2f}'.format(i_episode, last_t, np.mean(scores_window)))\n # if i_episode % 100 == 0:\n # print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))\n if np.mean(scores_window) >= 13: # win condition in course\n if num_saves == 0:\n print('\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.2f}'.format(i_episode - 100, np.mean(scores_window)))\n print('\\nTraining will continue and the checkpoint will be overwritten every 100 episodes')\n print('\\nSaving a checkpoint now, you may interrupt code execution with eg Ctrl+C')\n torch.save(agent.qnetwork_local.state_dict(), strCheckpointFile)\n else:\n if i_episode % 100 == 0:\n print('\\nSaving another checkpoint now, you may interrupt code execution with eg Ctrl+C')\n torch.save(agent.qnetwork_local.state_dict(), strCheckpointFile)\n num_saves += 1\n\n env.close()\n\n # plot the scores\n fig = plt.figure()\n ax = fig.add_subplot(111)\n plt.plot(np.arange(len(scores)), scores)\n plt.ylabel('Score')\n plt.xlabel('Episode #')\n # plt.show()\n plt.savefig('training_score_by_episode.png')\n return scores", "def run(self):\n time.sleep(np.random.rand())\n np.random.seed(np.int32(time.time() % 1000 * self.id))\n \n # Put this in a while loop that checks a shared variable\n # Will keep running episodes until the shared variable reports False\n while(self.exit_flag == 0):\n for experience in self.run_episode():\n print(experience.state, experience.reward)\n self.training_q.put(experience)", "def train(self, num_episodes = 10000, verbose = True):\n start_time = datetime.now().replace(microsecond=0)\n for e in range(num_episodes):\n S_old = self.env.reset()\n steps = 0\n # there is an interal limit of 100 steps\n while steps < 1000:\n steps += 1\n A = self.epsilon_greedy(S_old)\n S_new, reward, done, info = self.env.step(A)\n self.Q[S_old, A] = self.Q[S_old, A] + self.alpha * \\\n (reward + self.gamma * np.max(self.Q[S_new, :]) - self.Q[S_old, A])\n if done:\n break\n S_old = S_new\n if verbose:\n clear_output(wait=True)\n now_time = datetime.now().replace(microsecond=0)\n print(\"Epoch: {}/{} - Steps: {:4} - Duration: {}\".format(e+1, num_episodes, steps, now_time-start_time))\n\n return self.Q", "def episodes(self, num_episodes, num_steps_per_episode):\n for ep in range(num_episodes):\n self.start_episode()\n for step in range(num_steps_per_episode):\n next_sensation,reward = self.env(self.next_action)\n self.collect_data(self.last_sensation, self.next_action, reward, next_sensation)\n self.next_action = self.agent(next_sensation,reward)\n self.last_sensation = next_sensation\n if self.last_sensation == TERMINAL_STATE:\n break", "def dqn(n_episodes=2000, max_t=1000, eps_start=1.0, eps_end=0.01, eps_decay=0.995):\n scores = [] # list containing scores from each episode\n scores_window = deque(maxlen=100) # last 100 scores\n eps = eps_start # initialize epsilon\n g_losses = []\n g_losses_window = deque(maxlen=100)\n s_losses = []\n s_losses_window = deque(maxlen=100)\n for i_episode in range(1, n_episodes+1):\n env_info = env.reset(train_mode=True)\n score = 0\n ball_reward_val = 0.0\n \n g_states = env_info[g_brain_name].vector_observations # get initial state (goalies)\n s_states = env_info[s_brain_name].vector_observations # get initial state (strikers)\n# s2_states = env_info[s2_brain_name].vector_observations # get initial state (strikers)\n\n g_scores = np.zeros(num_g_agents) # initialize the score (goalies)\n s_scores = np.zeros(num_s_agents) # initialize the score (strikers) \n# s2_scores = np.zeros(num_s2_agents) # initialize the score (strikers) \n \n #for t in range(max_t):\n while True:\n action_g_0 = g_agent.act(g_states[0], eps) # always pick state index 0\n action_s_0 = s_agent.act(s_states[0], eps)\n action_s_2 = s_agent.act(s_states[2], eps)\n# action_s2_0 = s2_agent.act(s2_states[0], eps) \n# action_s2_0 = np.asarray( [np.random.choice(s2_action_size)] )\n \n # Set other team to random\n action_g_1 = np.asarray( [np.random.choice(g_action_size)] ) \n action_s_1 = np.asarray( [np.random.choice(s_action_size)] )\n action_s_3 = np.asarray( [np.random.choice(s_action_size)] )\n# action_s2_1 = np.asarray( [np.random.choice(s2_action_size)] )\n \n # Train simultaneously\n #action_g_1 = g_agent.act(g_states[1], eps) # always pick state index 1\n #action_s_1 = s_agent.act(s_states[1], eps) \n \n # Combine actions\n actions_g = np.array( (action_g_0, action_g_1) ) \n actions_s = np.array( (action_s_0, action_s_1, action_s_2, action_s_3 ) )\n# actions_s2 = np.array( (action_s2_0, action_s2_1) )\n# actions = dict( zip( [g_brain_name, s_brain_name, s2_brain_name], [actions_g, actions_s, actions_s2] ) )\n actions = dict( zip( [g_brain_name, s_brain_name], [actions_g, actions_s] ) )\n \n env_info = env.step(actions) \n # get next states\n g_next_states = env_info[g_brain_name].vector_observations \n s_next_states = env_info[s_brain_name].vector_observations\n# s2_next_states = env_info[s2_brain_name].vector_observations\n \n # check if episode finished\n done = np.any(env_info[g_brain_name].local_done)\n \n # get reward and update scores\n g_rewards = env_info[g_brain_name].rewards\n s_rewards = env_info[s_brain_name].rewards\n# s2_rewards = env_info[s2_brain_name].rewards\n \n # Modify RED striker reward -Only when goal is scored\n if done:\n new_s_reward = modify_reward(s_rewards[0])\n s_rewards[0] = new_s_reward\n new_s_reward = modify_reward(s_rewards[2])\n s_rewards[2] = new_s_reward\n# new_s2_reward = modify_reward(s2_rewards[0])\n# s2_rewards[0] = new_s2_reward\n \n # Update scores\n g_scores += g_rewards\n s_scores += s_rewards\n# s2_scores += s2_rewards\n \n # Add in ball reward for striker\n ball_reward_val += ball_reward(s_states[0])\n \n # store experiences\n g_agent.step(g_states[0], action_g_0, g_rewards[0], \n g_next_states[0], done)\n s_agent.step(s_states[0], action_s_0, s_rewards[0] + ball_reward(s_states[0]), # adding ball reward\n s_next_states[0], done)\n s_agent.step(s_states[2], action_s_2, s_rewards[2] + ball_reward(s_states[2]), # adding ball reward\n s_next_states[2], done)\n# s2_agent.step(s2_states[0], action_s2_0, s2_rewards[0] + ball_reward(s2_states[0]), # adding ball reward\n# s2_next_states[0], done)\n\n if done:\n break\n \n g_states = g_next_states\n s_states = s_next_states\n# s2_states = s2_next_states\n \n # learn\n if len(g_agent.memory) > 64: #check memory to batch size\n goalie_loss = g_agent.learn(g_agent.memory.sample(), 0.99) # discount = 0.99\n striker_loss = s_agent.learn(s_agent.memory.sample(), 0.99) # discount = 0.99 \n# _ = s2_agent.learn(s2_agent.memory.sample(), 0.99) # discount = 0.99 \n \n g_losses.append(goalie_loss.item())\n g_losses_window.append(goalie_loss.item())\n #print(goalie_loss.item())\n s_losses.append(striker_loss.item())\n s_losses_window.append(striker_loss.item())\n \n score = g_scores[0] + s_scores[0] #+ s2_scores[0]\n scores_window.append(score) # save most recent score\n scores.append(score) # save most recent score\n \n eps = max(eps_end, eps_decay*eps) # decrease epsilon\n print('\\rEpisode {}\\tAverage Score: {:.2f}\\t Goalie Loss:' \\\n '{:.5f}\\t Striker Loss: {:.5f}' \\\n '\\t Ball Reward: {:.2f}'.format(i_episode, \\\n np.mean(scores_window), \\\n np.mean(g_losses_window), \\\n np.mean(s_losses_window), \\\n ball_reward_val), end=\"\")\n #print(s_states[0][0:56])\n if i_episode % 100 == 0:\n print('\\rEpisode {}\\tAverage Score: {:.2f}\\t Goalie Loss:' \\\n '{:.5f}\\t Striker Loss: {:.5f}\\n' \\\n '\\t Ball Reward: {:.2f}'.format(i_episode, \\\n np.mean(scores_window), \\\n np.mean(g_losses_window), \\\n np.mean(s_losses_window), \\\n ball_reward_val))\n \n # TODO: ---------- CHANGE OUTPUT FILE NAMES ----------\n torch.save(g_agent.qnetwork_local.state_dict(), 'goalie3_dqn_V1_mod.pth')\n torch.save(s_agent.qnetwork_local.state_dict(), 'striker3_dqn_V1_mod.pth')\n return scores", "def learn(self):\n\n for i in range(1, self.args.numIters + 1):\n print('------ ITER ' + str(i) + '------')\n iterationTrainExamples = deque([], maxlen=self.args.maxlenOfQueue)\n\n for eps in range(self.args.numEps):\n print('------ Self Play Episode ' + str(eps) + '------')\n self.mcts = TSPMCTS(self.args, self.game, self.nnet) # reset search tree\n iterationTrainExamples += self.executeEpisode()\n\n # save the iteration examples to the history\n self.trainExamplesHistory.append(iterationTrainExamples)\n\n if len(self.trainExamplesHistory) > self.args.numItersForTrainExamplesHistory:\n self.trainExamplesHistory.pop(0)\n\n # shuffle examples before training\n trainExamples = []\n for e in self.trainExamplesHistory:\n trainExamples.extend(e)\n\n # training new network\n if self.args.numEps > 0:\n self.nnet.train(trainExamples)\n nmcts = TSPMCTS(self.args, self.game, self.nnet)\n\n print('PLAYING GAMES')\n if self.args.arenaCompare:\n arena = SinglePlayerArena(lambda x: np.argmax(nmcts.getActionProb(x, temp=0)), self.game)\n wins, losses = arena.playSinglePlayerGames(self.args.arenaCompare)\n print('WINS/LOSSES: %d / %d' % (wins, losses))", "def test(self):\n total_steps = 0\n running_scores = np.zeros(len(self.agents))\n\n for e in range(self.run_settings.test_episodes):\n # Initialize episode\n try:\n env_states, rewards, done, metainfo = self.custom_env.reset()\n except EpisodeCrashException:\n print('Episode crashed, resetting.')\n continue\n\n # Initialize scores to starting reward (probably 0)\n scores = np.array(rewards)\n step = 0\n\n while not done:\n states = [self.agents[a].state_space_converter(env_states[a])\n for a in range(len(self.agents))]\n\n # Get actions\n actions = [self.agents[a].sample(states[a])\n for a in range(len(self.agents))]\n env_actions = [self.agents[a].action_space_converter(actions[a])\n for a in range(len(self.agents))]\n if self.run_settings.verbose:\n self.print_action(env_actions)\n # Take environment step\n try:\n env_states, rewards, done, metainfo = self.custom_env.step(env_actions)\n except EpisodeCrashException:\n print('Episode crashed, resetting.')\n break\n step += 1\n total_steps += 1\n\n # Update scores\n scores += np.array(rewards)\n\n if done:\n running_scores += scores\n\n if len(scores) == 1:\n scores = scores[0]\n if self.run_settings.verbose:\n print(\"Game {} ended after {} steps. Game score: {}\"\n .format(e+1, step, scores))\n if self.run_settings.verbose:\n print(\"Average game scores: {}\".format(running_scores / self.run_settings.test_episodes))", "def run_multiple_episodes(self, episodes_no, policy):\n for _ in range(episodes_no):\n self.q_learning_episode(policy)\n policy.update_epsilon()\n\n policy.reset()\n return self.q_values", "def run_episode(self):\n # Reset environment\n self.agent.env.reset()\n done = False\n step_count = 0\n total_reward = 0\n\n while not done:\n reward, done = self.agent.explore()\n step_count += 1\n if step_count % 100 == 0:\n print('step count {}'.format(step_count))\n total_reward += self.agent.params['gamma']**step_count * reward\n return step_count, total_reward", "def train_dqn(self, n_episodes=2000, max_t=1000, eps_start=1.0, eps_end=0.01, eps_decay=0.995):\n self.scores = [] # list containing scores from each episode\n scores_window = deque(maxlen=100) # last 100 scores\n eps = eps_start # initialize epsilon\n for i_episode in range(1, n_episodes+1):\n env_info = self.env.reset(train_mode=True)[self.brain_name] # reset the environment\n state = env_info.vector_observations[0] # get the current state\n score = 0 # initialize the score\n for t in range(max_t):\n action = self.agent.act(state, eps)\n env_info = self.env.step(action)[self.brain_name] # send the action to the environment\n next_state = env_info.vector_observations[0] # get the next state\n reward = env_info.rewards[0] # get the reward\n done = env_info.local_done[0] # see if episode has finished\n self.agent.step(state, action, reward, next_state, done)\n state = next_state\n score += reward\n if done:\n break \n scores_window.append(score) # save most recent score\n self.scores.append(score) # save most recent score\n eps = max(eps_end, eps_decay*eps) # decrease epsilon\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end=\"\")\n if i_episode % 100 == 0:\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))\n # we use 15.0 just to be sure\n if np.mean(scores_window)>=self.threshold:\n print('\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_window)))\n break\n return self.scores", "def learn(self):\n\n for i in range(1, self.args.numIters + 1):\n # bookkeeping\n log.info(f'Starting Iter #{i} ...')\n # examples of the iteration\n if not self.skipFirstSelfPlay or i > 1:\n iterationTrainExamples = deque([], maxlen=self.args.maxlenOfQueue)\n\n for _ in tqdm(range(self.args.numEps), desc=\"Self Play\"):\n self.mcts = MCTS(self.game, self.nnet, self.args) # reset search tree\n iterationTrainExamples += self.executeEpisode()\n\n # save the iteration examples to the history \n self.trainExamplesHistory.append(iterationTrainExamples)\n\n if len(self.trainExamplesHistory) > self.args.numItersForTrainExamplesHistory:\n log.warning(\n f\"Removing the oldest entry in trainExamples. len(trainExamplesHistory) = {len(self.trainExamplesHistory)}\")\n self.trainExamplesHistory.pop(0)\n # backup history to a file\n # NB! the examples were collected using the model from the previous iteration, so (i-1) \n self.saveTrainExamples(i - 1)\n\n # shuffle examples before training\n trainExamples = []\n for e in self.trainExamplesHistory:\n trainExamples.extend(e)\n shuffle(trainExamples)\n\n # training new network, keeping a copy of the old one\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')\n self.pnet.load_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')\n pmcts = MCTS(self.game, self.pnet, self.args)\n\n self.nnet.train(trainExamples)\n nmcts = MCTS(self.game, self.nnet, self.args)\n\n log.info('PITTING AGAINST PREVIOUS VERSION')\n arena = Arena(lambda x: np.argmax(pmcts.getActionProb(x, temp=0)),\n lambda x: np.argmax(nmcts.getActionProb(x, temp=0)), self.game)\n pwins, nwins, draws = arena.playGames(self.args.arenaCompare)\n\n log.info('NEW/PREV WINS : %d / %d ; DRAWS : %d' % (nwins, pwins, draws))\n if pwins + nwins == 0 or float(nwins) / (pwins + nwins) < self.args.updateThreshold:\n log.info('REJECTING NEW MODEL')\n self.nnet.load_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')\n else:\n log.info('ACCEPTING NEW MODEL')\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename=self.getCheckpointFile(i))\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename='best.pth.tar')", "def executeEpisode(self, mcts, game, args):\n trainExamples = []\n board = game.getInitBoard()\n curPlayer = 1\n episodeStep = 0\n state_counter = Counter()\n\n moves = 0\n\n while True:\n episodeStep += 1\n canonicalBoard = game.getCanonicalForm(board, curPlayer)\n temp = int(episodeStep < self.args['tempThreshold'])\n\n pi = mcts.getActionProb(canonicalBoard, temp=temp)\n sym = game.getSymmetries(canonicalBoard, pi)\n for b, p in sym:\n trainExamples.append([b, curPlayer, p, None])\n\n action = np.random.choice(len(pi), p=pi)\n state_counter.update(game.stringRepresentation(board)) #count the visit to the board\n\n board, curPlayer = game.getNextState(board, curPlayer, action)\n\n r = game.getGameEnded(board, curPlayer)\n \n moves += 1\n\n if moves >= self.args['maxMoves']:\n r = 1e-4\n\n if r != 0:\n return ([(x[0], x[2], r * ((-1) ** (x[1] != curPlayer))) for x in trainExamples], state_counter)", "def explore(self):\n\n i = 0\n while True:\n i += 1\n \n state_counts = {game.__class__.__name__: Counter() for game in self.games} \n\n policies_prime = []\n pi_sum = 0\n v_sum = 0\n counter = 0\n \n # bookkeeping\n log.info(f'Starting Exploration Iteration #{i} ...')\n\n # for task in tasks...\n for _ in range(self.args['taskBatchSize']):\n\n # create deepcopy for training a theta'\n policy_prime = copy.deepcopy(self.nnet)\n \n # sample a game (task)\n game = np.random.choice(self.games, p=self.probs)\n log.info(f'Sampled game {type(game).__name__} ...')\n\n # multiprocess to get our training examples\n iterationTrainExamples = deque([], maxlen=self.args['maxlenOfQueue'])\n iterationTrainExamples = run_apply_async_multiprocessing(self.executeEpisode, [(MCTS(game, self.nnet, self.args), type(game)(), self.args.copy())] * self.args['numEps'], self.args['numWorkers'], desc='Self Play')\n iterationTrainExamples, iter_counters = zip(*iterationTrainExamples)\n\n iterationTrainExamples = list(itertools.chain.from_iterable(iterationTrainExamples))\n state_counts[game.__class__.__name__] += sum(iter_counters, Counter())\n\n # shuffle examples before training\n shuffle(iterationTrainExamples)\n\n # train our network\n pi_v_losses = policy_prime.train(iterationTrainExamples)\n\n policies_prime.append(policy_prime.state_dict())\n\n for pi,v in pi_v_losses:\n pi_sum += pi\n v_sum += v\n counter += 1\n \n # compute average parameters and load into self.nnet\n self.nnet.load_average_params(policies_prime)\n\n # training new network, keeping a copy of the old one\n self.nnet.save_checkpoint(folder=self.args['checkpoint'] + '/exploring', filename='temp.pth.tar')\n self.pnet.load_checkpoint(folder=self.args['checkpoint'] + '/exploring', filename='temp.pth.tar')\n pmcts = MCTS(self.games[0], self.pnet, self.args)\n\n\n # Arena if we choose to run it\n if self.args['arenaComparePerGame'] > 0:\n # ARENA\n nmcts = MCTS(self.games[0], self.nnet, self.args)\n\n log.info('PITTING AGAINST PREVIOUS VERSION')\n arena = Arena()\n pwins, nwins, draws = arena.playGames(self.pnet, self.nnet, self.args, self.games)\n\n log.info('NEW/PREV WINS : %d / %d ; DRAWS : %d' % (nwins, pwins, draws))\n if pwins + nwins == 0 or float(nwins) / (pwins + nwins) < self.args['updateThreshold']:\n log.info('REJECTING NEW MODEL')\n self.nnet.load_checkpoint(folder=self.args['checkpoint'] + '/exploring', filename='temp.pth.tar')\n else:\n log.info('ACCEPTING NEW MODEL')\n self.nnet.save_checkpoint(folder=self.args['checkpoint'] + '/exploring', filename=self.getCheckpointFile(i))\n self.nnet.save_checkpoint(folder=self.args['checkpoint'] + '/exploring', filename='best.pth.tar')\n\n log.info('Iteration Complete. Writing counts to \"%s/%s\"...', *self.args['json_folder_file'])\n # create the json file\n path = os.path.join(self.args['json_folder_file'][0], self.args['json_folder_file'][1])\n with open(path, 'a+') as f:\n if os.stat(path).st_size == 0: ## file just created/empty\n log.info('No counts found. Writing to empty file.')\n old_counts = {game.__class__.__name__: Counter() for game in self.games}\n else: ## load the counts from the file\n log.info('Loading counts...')\n f.seek(0)\n str_counts = f.read()\n # print('STRING OF JSON:', type(str_counts), str_counts)\n old_counts = json.loads(str_counts)\n old_counts = {game: Counter(v) for game, v in old_counts.items()}\n master_counts = {game.__class__.__name__: state_counts[game.__class__.__name__]+old_counts[game.__class__.__name__] for game in self.games}\n # countiung logic: turn {gametype -> Counter} into {gametype -> {state -> count}}\n master_counts = {game: dict(counter) for game, counter in master_counts.items()}\n log.info('Writing...')\n f.truncate(0) #clear file\n json.dump(master_counts, f)\n log.info('Counts written to json file \"%s/%s\"...', *self.args['json_folder_file'])", "def run(self, num_episodes):\n for _ in xrange(num_episodes):\n self._env.reset()\n curr_state = self._env.state\n while not self._env.is_terminal(curr_state):\n reward = self._policy.take_action_and_get_reward()\n next_state = self._env.state\n self._update_parameters(curr_state, reward, next_state)\n curr_state = next_state\n # Estimate the TD-fixpoint.\n self.theta = np.dot(np.linalg.pinv(self._A), self._b)\n # Calculate current MSVE.\n self._calc_msve()", "def run_all_episodes(self, episode_count):\n # Holds final result\n step_arr = []\n reward_arr = []\n new_abstr = {}\n detached_states = []\n\n if 'abstraction_type' in self.params.keys() and self.params['abstraction_type'] == 'discretization':\n self.agent.make_abstraction()\n\n while self.episode_count < episode_count:\n # Run episode, record results\n steps, reward = self.run_episode()\n step_arr.append(steps)\n reward_arr.append(reward)\n self.episode_count += 1\n if self.episode_count % 1 == 0:\n print('Episode {} finished with step count {}'.format(self.episode_count, steps))\n\n # Create temporal abstraction if applicable\n if 'make_abstraction' in self.params.keys() and self.episode_count in self.params['make_abstraction']:\n self.agent.make_abstraction()\n new_abstr = self.agent.params['s_a'].abstr_dict\n\n # Detach states if applicable\n if 'refine_abstraction' in self.params.keys() and self.episode_count in self.params['refine_abstraction']:\n newly_detached = self.agent.refine_abstraction()\n detached_states.extend(newly_detached)\n print('final abstraction')\n for i in range(len(self.agent.params['s_a'].cell_to_abstract_cell)):\n for key, value in self.agent.params['s_a'].cell_to_abstract_cell[i].items():\n print(key, value)\n\n return step_arr, reward_arr, new_abstr, detached_states", "def evolve(self, env, num_generations, num_episodes, num_frames):\n for gen in range(num_generations):\n\n if Trainer.VERBOSE:\n print(\"Generation:\", gen)\n\n # Generate new root Teams\n self.generation()\n\n # Evaluate current agents\n self.evaluation(env, num_episodes, num_frames)\n\n # Perform selection\n self.selection()\n\n # Return to top-performing agent. Typically not used, but nice to have\n ranked_agents = sorted(self.agent_pop, key=lambda rt : rt.team.fitness, reverse=True)\n return ranked_agents[0]", "def train_by_episode(self):\n # only REINFORCE and REINFORCE with baseline\n # use the ff code\n # convert the rewards to returns\n rewards = []\n gamma = 0.99\n for item in self.memory:\n [_, _, _, reward, _] = item\n rewards.append(reward)\n # rewards = np.array(self.memory)[:,3].tolist()\n\n # compute return per step\n # return is the sum of rewards from t til end of episode\n # return replaces reward in the list\n for i in range(len(rewards)):\n reward = rewards[i:]\n horizon = len(reward)\n discount = [math.pow(gamma, t) for t in range(horizon)]\n return_ = np.dot(reward, discount)\n self.memory[i][3] = return_\n\n # train every step\n for item in self.memory:\n self.train(item, gamma=gamma)", "def train(self, episodes, epsilon_initial, epsilon_min, epsilon_stop_episode,\n network_update_freq, gamma, memory_capacity, batch_size):\n\n memory = ReplayMemory(memory_capacity)\n\n tot_steps = 0\n running_loss = 0\n\n depsilon = (epsilon_initial-epsilon_min)/epsilon_stop_episode\n\n for episode in range(episodes):\n\n if epsilon_initial > epsilon_min:\n epsilon_initial -= depsilon\n\n if episode % network_update_freq == 0:\n # Update target network\n self.NN_target.load_state_dict(self.NN.state_dict())\n\n if (episode + 1) % 10 == 0:\n print(f'Episode {episode + 1}/{episodes} completed!')\n print(f'Average steps per episode: {tot_steps / 10}')\n writer.add_scalar('training loss', running_loss / tot_steps, episode)\n self.plotValue()\n tot_steps = 0\n running_loss = 0\n\n state, done = self.env.reset()\n\n\n while not done:\n tot_steps += 1\n\n action = self.chooseAction(epsilon_initial, state)\n\n reward, next_state, done= self.env.transitionState(state, action)\n\n #score += reward\n reward = torch.tensor([[reward]], device=device)\n done = torch.tensor([[done]], device=device)\n\n # Saves the transition\n memory.push(self.RBF[state], self.RBF[next_state], reward, done)\n\n # Perform one step of batch gradient descent\n running_loss += self.optimizeModel(memory, batch_size, gamma)\n\n state = next_state\n\n writer.close()", "def terminal_test(self):\n\n for self.cur_ep in tqdm.tqdm(range(1, self.episodes + 1), ascii=True, unit='episodes'):\n\n # Nombre de passages dans la boucle principale\n step = 1\n\n cur_state = self.env.reset()\n\n done = False\n\n while not done:\n\n # Choix au hasard entre :\n if np.random.random() > self.epsilon:\n # Action à partir de la q-table\n action = np.argmax(self.agent.get_q_values(np.array(cur_state)))\n\n else:\n # Action random\n action = np.random.randint(0, self.env.ACTION_SPACE_SIZE)\n\n # On effectue une action avec le serpent\n new_state, reward, done = self.env.step(action)\n\n # Ajout d'un exemple dans la mémoire\n self.agent.update_training_set((cur_state, action, reward, new_state, done))\n\n # Entrainement éventuel\n self.agent.train()\n\n cur_state = new_state\n step += 1\n\n if self.epsilon > self.MIN_EPSILON:\n self.epsilon *= self.EPSILON_DECAY\n self.epsilon = max(self.MIN_EPSILON, self.epsilon)\n\n if self.save_model:\n self.agent.save_model(self.model_file_name)", "def dqn(self, n_episodes, checkpoint, eps_start=1., eps_end=0.1, eps_decay=0.995, alg=\"ddqn\"):\n\n scores = [] # list containing scores from each episode\n scores_window = deque(maxlen=100) # last 100 scores\n eps = eps_start # initialize epsilon\n for i_episode in range(1, n_episodes + 1):\n env_info = self.env.reset(train_mode=True)[self.brain_name] # reset the environment\n\n state = self.get_state(env_info.visual_observations[0], 0) # get the current state\n score = 0\n for t in range(300):\n action = self.agent.act(state, eps).astype(np.int32) # select an action\n env_info = self.env.step(action)[self.brain_name] # send the action to the environment\n next_state = self.get_state(env_info.visual_observations[0], t)\n reward = env_info.rewards[0] # get the reward\n done = env_info.local_done[0] # see if episode has finished\n # get the next state\n self.agent.step(state, action, reward, next_state, done, alg)\n score += reward # update the score\n state = next_state # roll over the state to next time step\n if done: # exit loop if episode finished\n break\n scores_window.append(score) # save most recent score\n scores.append(score) # save most recent score\n eps = max(eps_end, eps_decay * eps) # decrease epsilon\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))\n if np.mean(scores_window) >= 13.0:\n print('\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.2f}'.format\n (i_episode - 100, np.mean(scores_window)))\n torch.save(self.agent.q_network_local.state_dict(), checkpoint)\n break\n return scores", "def run(agent, env, num_episodes = 20000, mode = 'train'):\n\t scores=[]\n\t max_avg_score=-np.inf\n\t for i_episode in range(1, num_episodes + 1):\n\t # Initialize episode\n\t state=env.reset()\n\t action=agent.reset_episode(state)\n\t total_reward=0\n\t done=False\n\n\t # Roll out steps until done\n\t while not done:\n\t state, reward, done, info=env.step(action)\n\t total_reward += reward\n\t action=agent.act(state, reward, done, mode)\n\n\t # Save final score\n\t scores.append(total_reward)\n\n\t # Print episode stats\n\t if mode == 'train':\n\t if len(scores) > 100:\n\t avg_score=np.mean(scores[-100:])\n\t if avg_score > max_avg_score:\n\t max_avg_score=avg_score\n\n\t if i_episode % 100 == 0:\n\t print(\"\\rEpisode {}/{} | Max Average Score: {}\".format(i_episode,\n\t num_episodes, max_avg_score), end = \"\")\n\t sys.stdout.flush()\n\n\t return scores\n\n\tscores=run(q_agent, env)\n\n\t# Plot scores obtained per episode\n\tplt.plot(scores); plt.title(\"Scores\")\n\n\tdef plot_scores(scores, rolling_window = 100):\n\t\t\"\"\"Plot scores and optional rolling mean using specified window.\"\"\"\n\t\tplt.plot(scores); plt.title(\"Scores\");\n\t\trolling_mean=pd.Series(scores).rolling(rolling_window).mean()\n\t\tplt.plot(rolling_mean);\n\t\treturn rolling_mean\n\n\trolling_mean=plot_scores(scores)\n\n\t# Run in test mode and analyze socres obtained\n\ttest_scores=run(q_agent, env, num_episodes = 100, mode = 'test')\n\tprint(\"[TEST] Completed {} episodes with avg. score = {}\".format(\n\t len(test_scores), np.mean(test_scores)))\n\t_=plot_scores(test_scores, rolling_window = 10)\n\n\n\tdef plot_q_table(q_table):\n \"\"\"Visualize max Q-value for each state and corresponding action.\"\"\"\n\t q_image=np.max(q_table, axis = 2) # max Q-value for each state\n\t q_actions=np.argmax(q_table, axis = 2) # best action for each state\n\n\t fig, ax=plt.subplots(figsize = (10, 10))\n\t cax=ax.imshow(q_image, cmap = 'jet');\n\t cbar=fig.colorbar(cax)\n\t for x in range(q_image.shape[0]):\n\t for y in range(q_image.shape[1]):\n\t ax.text(x, y, q_actions[x, y], color = 'white',\n\t horizontalalignment = 'center', verticalalignment = 'center')\n\t ax.grid(False)\n\t ax.set_title(\"Q-table, size: {}\".format(q_table.shape))\n\t ax.set_xlabel('position')\n\t ax.set_ylabel('velocity')\n\n\n\tplot_q_table(q_agent.q_table)\n\n\n\tstate_grid_new=create_uniform_grid(\n\t env.observation_space.low, env.observation_space.high, bins = (20, 20))\n\tq_agent_new=QLearningAgent(env, state_grid_new)\n\tq_agent_new.scores=[]\n\n\n\tq_agent_new.scores += run(q_agent_new, env,\n\t num_episodes = 50000) # accumulate scores\n\trolling_mean_new=plot_scores(q_agent_new.scores)\n\n\ttest_scores= run(q_agent_new, env, num_episodes = 100, mode = 'test')\n\tprint(\"[TEST] Completed {} episodes with avg. score = {}\".format(\n\t len(test_scores), np.mean(test_scores)))\n\t_=plot_scores(test_scores)\n\n\tplot_q_table(q_agent_new.q_table)\n\n\tstate=env.reset()\n\tscore=0\n\timg=plt.imshow(env.render(mode='rgb_array'))\n\tfor t in range(1000):\n\t\taction=q_agent_new.act(state, mode = 'test')\n\t\timg.set_data(env.render(mode='rgb_array'))\n\t\tplt.axis('off')\n\t\tdisplay.display(plt.gcf())\n\t\tdisplay.clear_output(wait = True)\n\t\tstate, reward, done, _=env.step(action)\n\t\tsocre += reward\n\t\tif done:\n\t\t\tprint('Score: ', socre)\n\t\t\tbreak\n\tenv.close()", "def ddpg(n_episodes=1000, print_every=50):\r\n \r\n scores = []\r\n scores_deque = deque(maxlen=print_every)\r\n log = open(\"log.txt\",\"w+\")\r\n for i_episode in range(1, n_episodes+1):\r\n env_info = env.reset(train_mode=True)[BRAIN_NAME]\r\n agent.reset()\r\n state = env_info.vector_observations # get the current state\r\n score = np.zeros(NUM_AGENTS)[:, None] \r\n \r\n while True:\r\n action = agent.act(state) # select an action\r\n env_info = env.step(action)[BRAIN_NAME] # send the action to the environment\r\n next_state = env_info.vector_observations # get the next state\r\n reward = np.array(env_info.rewards)[:, None] # get the reward\r\n done = np.array(env_info.local_done )[:, None] # see if episode has finished\r\n agent.step(state, action, reward, next_state, done) # take step with agent (including learning)\r\n score += reward # update the score\r\n state = next_state # roll over the state to next time step\r\n if np.any(done): \r\n break\r\n scores_deque.append(score.mean()) \r\n scores.append(score.mean()) \r\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_deque)), end=\"\")\r\n if i_episode % print_every == 0:\r\n string ='\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_deque))\r\n print(string)\r\n log.write(string)\r\n torch.save(agent.actor_local.state_dict(), 'checkpoint_actor.pth')\r\n torch.save(agent.critic_local.state_dict(), 'checkpoint_critic.pth')\r\n \r\n if np.mean(scores_deque)>=30.0 and i_episode>=100:\r\n print('\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_deque)))\r\n torch.save(agent.actor_local.state_dict(), 'checkpoint_actor.pth')\r\n torch.save(agent.critic_local.state_dict(), 'checkpoint_critic.pth')\r\n break\r\n log.close()\r\n return scores", "def run_episode(self, environment):\n state = environment.reset()\n self.steps_done = 0\n while True:\n state_tensor = FloatTensor([state])\n position = self.Q.sample_from_softmax_policy(state_tensor)\n action = position + 1\n next_state, reward, done, _ = environment.step(position.item())\n self.memory.push((state_tensor, action,))\n self.learn(state_tensor, action, next_state, reward)\n state = next_state\n self.steps_done += 1\n if done:\n break\n history = environment.close()\n return history", "def evaluate(self, num_episodes, max_episode_length=None, gen_video=False):\n evaluation_policy = GreedyPolicy()\n eval_preprocessor = preprocessors.PreprocessorSequence()\n env_valid = gym.make(self.env_string)\n\n iter_ctr_valid = 0\n Q_sum = 0\n eval_episode_ctr_valid = 0\n total_reward_all_episodes = []\n \n # https://github.com/openai/gym/blob/master/gym/wrappers/monitoring.py video_callable takes function as arg. so we hack with true lambda\n # https://github.com/openai/gym/issues/494 \n if gen_video:\n video_dir = os.path.join(self.log_dir, 'gym_monitor', str(self.iter_ctr).zfill(7))\n os.makedirs(video_dir)\n env_valid = wrappers.Monitor(env_valid, video_dir, video_callable=lambda x:True, mode='evaluation')\n\n while eval_episode_ctr_valid < num_episodes:\n state = env_valid.reset()\n eval_preprocessor.reset_history_memory()\n num_timesteps_in_curr_episode = 0\n total_reward_curr_episode = 0.0\n\n while num_timesteps_in_curr_episode < max_episode_length:\n num_timesteps_in_curr_episode += 1\n iter_ctr_valid += 1\n\n state_network = self.preprocessor.process_state_for_network(state)\n q_values = self.calc_q_values(state_network)\n Q_sum += np.max(q_values) # todo fix this\n\n action = evaluation_policy.select_action(q_values)\n next_state, reward, is_terminal, _ = env_valid.step(action)\n total_reward_curr_episode += reward\n # print \"Evalution : timestep {}, episode {}, action {}, reward {}, total_reward {}\"\\\n # .format(iter_ctr_valid, eval_episode_ctr_valid, action, reward, total_reward_curr_episode)\n\n if is_terminal or (num_timesteps_in_curr_episode > max_episode_length-1):\n eval_episode_ctr_valid += 1\n print \"Evaluate() : iter_ctr_valid {}, eval_episode_ctr_valid : {}, total_reward_curr_episode : {}, num_timesteps_in_curr_episode {}\"\\\n .format(iter_ctr_valid, eval_episode_ctr_valid, total_reward_curr_episode, num_timesteps_in_curr_episode)\n total_reward_all_episodes.append(total_reward_curr_episode)\n # num_timesteps_in_curr_episode = 0\n break\n\n state = next_state\n\n Q_avg = Q_sum/float(iter_ctr_valid)\n print \" sum(total_reward_all_episodes) : {} , float(len(total_reward_all_episodes)) : {}\".format\\\n (sum(total_reward_all_episodes), float(len(total_reward_all_episodes)))\n all_episode_avg_reward = sum(total_reward_all_episodes)/float(len(total_reward_all_episodes))\n with tf.name_scope('summaries'):\n self.tf_log_scaler(tag='test_mean_avg_reward', value=all_episode_avg_reward, step=self.iter_ctr)\n self.tf_log_scaler(tag='test_mean_Q_max', value=Q_avg, step=self.iter_ctr)\n self.dump_test_episode_reward(all_episode_avg_reward)\n self.qavg_list = np.append(self.qavg_list, Q_avg)\n self.reward_list.append(all_episode_avg_reward)\n\n pkl.dump(self.reward_list, open(\"/data/datasets/ratneshm/deeprl_hw2/eval_rewards.pkl\", \"wb\"))\n \n print \"all_episode_avg_reward \", all_episode_avg_reward\n print \"\\n\\n\\n self.reward_list \\n\\n\\n\", self.reward_list", "def run(self):\n # Observe the game by randomly sampling actions from the environment\n # and performing those actions\n self.__observe__()\n for i in xrange(self.num_epochs):\n self.environment.resetStatistics()\n time_now = time.time()\n for j in xrange(self.train_steps_per_epoch):\n # Get action using epsilon-greedy strategy\n action = self.__sample_epsilon_action__()\n # Perform action based on epsilon-greedy search and store the transitions\n # in experience replay\n self.__supply_action_to_environment__(action)\n # If the environment is in the terminal state, reset the environment, and\n # perform self.stack_num actions to reset the environment\n self.isGameOver()\n if j % self.train_frequency == 0:\n # print \"Started training\"\n # Sample minibatch of size self.minibatch_size from experience replay\n minibatch = self.experience_replay.sample()\n minibatch_states, minibatch_action, minibatch_reward, minibatch_next_states, \\\n minibatch_terminals = minibatch\n cost = self.network.train_network(minibatch_states,\n minibatch_action,\n minibatch_reward,\n minibatch_terminals,\n minibatch_next_states)\n if j % self.record_frequency == 0:\n total_score, num_games = self.environment.getStatistics()\n avg_score = total_score / num_games\n self.network.record_average_qvalue(\n self.experience_replay.getCurrentState(),\n i * self.train_steps_per_epoch + j,\n self.epsilon, avg_score)\n # Epsilon annealing\n self.__anneal_epsilon__()\n # if self.time_step % 1000 == 0:\n # print \"Cost at iteration\", self.time_step, \" is\", cost\n # print \"Value of epsilon is\", self.epsilon\n self.steps += 1\n if j % self.copy_steps == 0:\n self.network.copy_weights()\n total_score, num_games = self.environment.getStatistics()\n time_taken = (time.time() - time_now)\n logger.info(\"Finished epoch %d: Steps=%d; Time taken=%.2f\",\n i, j, time_taken)\n logger.info(\"\\tNumber of games: %d; Average reward: %.2f\", num_games, (total_score / num_games))\n logger.info(\"\\tFinal epsilon value for epoch: %f\", self.epsilon)\n self.network.create_checkpoint()", "def test():\n env = gym.make('CartPole-v1')\n\n results = []\n for _ in range(100):\n results.append(episode(env, render=False, verbose=False))\n\n print(f'average={sum(results) / len(results)} '\n f'max={max(results)} '\n f'min={min(results)}')", "def iteration(self, results):\n\n self.db.store_episodes_results(results)\n\n samples = self.db.iter_samples(self.args.q_sample_size,\n self.args.q_learning_iters)\n for sample in samples:\n a1 = self.q1_network.predict_argmax(sample.s2, self.args.batch_size)\n v1 = self.s.run(self.q2_values, {self.states: sample.s2,\n self.actions: a1})\n q1 = sample.r + (~sample.done * self.args.gamma * v1)\n\n a2 = self.q2_network.predict_argmax(sample.s2, self.args.batch_size)\n v2 = self.s.run(self.q1_values, {self.states: sample.s2,\n self.actions: a2})\n q2 = sample.r + (~sample.done * self.args.gamma * v2)\n \n feed_dict = {self.states: sample.s1, self.actions: sample.a}\n\n feed_dict[self.q_estimation] = q1\n self.q2_network.train_in_batches(self.q2_train_op, feed_dict,\n self.args.num_batches, self.args.batch_size)\n\n feed_dict[self.q_estimation] = q2\n self.q1_network.train_in_batches(self.q1_train_op, feed_dict,\n self.args.num_batches, self.args.batch_size)\n\n self.ed.next()", "def RunEpisode(env, policy, eps):\n\n obs = env.reset()\n memory = []\n R = 0\n for t in range(1000):\n action = policy(obs.astype('float32').reshape(1, 4))[0]\n# pdb.set_trace()\n r = np.random.rand()\n if r<eps:\n action = np.random.random_integers(0,1, ()).tolist()\n\n new_obs, reward, done, info = env.step(action)\n memory.append((obs, action, new_obs, reward, done))\n obs = new_obs\n if done:\n break\n\n return memory", "def execute_and_get_episodes(self, num_episodes, max_timesteps_per_episode=0, deterministic=False):\n pass", "def run(self):\n data_provider = DataProvider(self.config)\n hex_attr_df = data_provider.read_hex_bin_attributes()\n hex_distance_df = data_provider.read_hex_bin_distances()\n city_states = data_provider.read_city_states(self.city_states_filename)\n neighborhood = data_provider.read_neighborhood_data()\n popular_bins = data_provider.read_popular_hex_bins()\n num_episodes = self.config['RL_parameters']['num_episodes']\n ind_episodes = self.config['RL_parameters']['ind_episodes']\n exp_decay_multiplier = self.config['RL_parameters']['exp_decay_multiplier']\n\n q_ind = None\n r_table = None\n xi_matrix = None\n\n best_episode = None\n best_model = {}\n\n progress_bar = tqdm(xrange(num_episodes))\n for episode_id in progress_bar:\n progress_bar.set_description(\"Episode: {}\".format(episode_id))\n current_best = -1000000\n\n # Create episode\n ind_exploration_factor = np.e ** (-1 * episode_id * exp_decay_multiplier / ind_episodes)\n\n episode = Episode(self.config,\n episode_id,\n ind_exploration_factor,\n hex_attr_df,\n hex_distance_df,\n city_states,\n neighborhood,\n popular_bins,\n q_ind,\n r_table,\n xi_matrix)\n\n # Run episode\n tables = episode.run()\n q_ind = tables['q_ind']\n r_table = tables['r_table']\n xi_matrix = tables['xi_matrix']\n episode_tracker = tables['episode_tracker']\n\n # Uncomment for logging if running a job, comment during experiments\n # otherwise it leads to insanely huge logging output which is useless\n\n # self.logger.info(\"\"\"\n # Expt: {} Episode: {} Earnings: {}\n # Pax rides: {} Relocation rides: {} Unmet demand: {}\n # \"\"\".format(self.expt_name, episode_id,\n # episode_tracker.gross_earnings,\n # episode_tracker.successful_waits,\n # episode_tracker.relocation_rides,\n # episode_tracker.unmet_demand))\n # self.logger.info(\"----------------------------------\")\n\n self.training_tracker.update_RL_tracker(\n episode_id, episode_tracker.gross_earnings,\n episode_tracker.successful_waits, episode_tracker.unsuccessful_waits,\n episode_tracker.unmet_demand, episode_tracker.relocation_rides,\n episode_tracker.DET, episode_tracker.DPRT, episode_tracker.DWT,\n episode_tracker.DRT, episode_tracker.DCT)\n\n # Keep track of the best episode\n if self.objective == 'revenue':\n if episode_tracker.gross_earnings >= current_best:\n best_episode = episode_tracker\n current_best = best_episode.gross_earnings\n else: # self.objective == 'pickups':\n if episode_tracker.successful_waits >= current_best:\n best_episode = episode_tracker\n current_best = episode_tracker.successful_waits\n\n # Keep track of the best model\n best_model['ind_exploration_factor'] = ind_exploration_factor\n best_model['config'] = self.config\n best_model['q_ind'] = q_ind\n best_model['r_table'] = r_table\n best_model['xi_matrix'] = xi_matrix\n best_model['training_tracker'] = self.training_tracker\n\n # After finishing training\n self.logger.info(\"Expt: {} Earnings: {} Met Demand: {} Unmet Demand: {}\".format(self.expt_name,\n best_episode.gross_earnings,\n best_episode.successful_waits,\n best_episode.unmet_demand))\n return best_episode, best_model, self.training_tracker", "def trainAgent(self):\r\n\t\tfor episode in range(self.TOT_EPISODES):\r\n\t\t\t#reset environment, stacked frames every episode.\r\n\t\t\tstate = self.env.reset()\r\n\t\t\trewards = 0\r\n\t\t\t#preprocess and stack the frame/state.\r\n\t\t\tstate, self.stacked_frames = stack_frames(self.stack_size,\r\n\t\t\t\t\t\t\t\t\tself.stacked_frames, state, True)\r\n\t\t\t\r\n\t\t\tfor step in range(self.MAX_STEPS):\r\n\t\t\t#for every step in episode:\r\n\t\t\t\r\n\t\t\t\tif (step%100==0):\r\n\t\t\t\t\tprint(\"Episode No.: \", episode, \"Step No.: \", step)\r\n\t\t\t\t\r\n\t\t\t\t#agent acts - explores or exploitation of the model\r\n\t\t\t\taction = self.dqn.predictAction(state)\r\n\t\t\t\t#reduce epsilon for more exploitation later.\r\n\t\t\t\tself.dqn.decayEpsilon()\r\n\t\t\t\t#Perform the action and get the next_state, reward, and done vals.\r\n\t\t\t\tnext_state, reward, done, _ = self.env.step(action)\r\n\t\t\t\t#append this state to the frame. Pass the previous stacked frame.\r\n\t\t\t\tnext_state, self.stacked_frames = stack_frames(self.stack_size,\r\n\t\t\t\t\t\t\t\t\t\tself.stacked_frames, next_state, False)\r\n\t\t\t\trewards+=reward\r\n\t\t\t\t\r\n\t\t\t\t#add this experience into memory (experience buffer)\r\n\t\t\t\tself.dqn.remember(state, action, reward, next_state, done)\r\n\t\t\t\t\r\n\t\t\t\tstate = next_state\r\n\t\t\t\t\r\n\t\t\t\tif done:\r\n\t\t\t\t\tprint(\"took %d steps\" %step)\r\n\t\t\t\t\tprint(\"Earned a total of reward equal to \", rewards)\r\n\t\t\t\t\tbreak\r\n\t\t\t\r\n\t\t\t\t# TRAIN\r\n\t\t\t\tself.dqn.replay()\r\n\t\t\t\t#sync target_model and model weights every 10k steps.\r\n\t\t\t\tif step % 10000 == 9999:\r\n\t\t\t\t\tself.dqn.target_train()\r\n\t\t\t\r\n\t\t\t# Save the network every 1000 iterations\r\n\t\t\tif episode % 5 == 4:\r\n\t\t\t\tprint(\"Saving Network\")\r\n\t\t\t\tself.dqn.save_network(self.path)", "def evaluate(self, env, num_episodes, max_episode_length=None\n , show_detail = False):\n episode_counter = 1;\n average_reward = 0;\n average_episode_length = 0;\n time_this, ob_this, is_terminal = env.reset()\n\n ob_this = self._preprocessor.process_observation(time_this, ob_this)\n\n obs_this_net = self._preprocessor.process_observation_for_network(\n ob_this, self._min_array, self._max_array)\n\n state_this_net = np.append(obs_this_net[0:13], obs_this_net[14:]).reshape(1,16)\n setpoint_this = ob_this[6:8]\n \n this_ep_reward = 0;\n this_ep_length = 0;\n while episode_counter <= num_episodes:\n action_mem = self.select_action(state_this_net, stage = 'testing');\n # covert command to setpoint action \n action = self._policy.process_action(setpoint_this, action_mem)\n\n time_next, ob_next, is_terminal = env.step(action)\n \n ob_next = self._preprocessor.process_observation(time_next, ob_next)\n\n setpoint_next = ob_next[6:8]\n\n obs_next_net = self._preprocessor.process_observation_for_network(\n ob_next, self._min_array, self._max_array)\n \n state_next_net = np.append(obs_next_net[0:13], obs_next_net[14:]).reshape(1,16)\n \n #10:PMV, 11: Occupant number , -2: power\n reward = self._preprocessor.process_reward(obs_next_net[12:15])\n \n this_ep_reward += reward;\n \n #Check if exceed the max_episode_length\n if max_episode_length is not None and \\\n this_ep_length >= max_episode_length:\n is_terminal = True;\n #Check whether to start a new episode\n if is_terminal:\n time_this, ob_this, is_terminal = env.reset()\n ob_this = self._preprocessor.process_observation(time_this, ob_this)\n setpoint_this = ob_this[6:8]\n obs_this_net = self._preprocessor.process_observation_for_network(\n ob_this, self._min_array, self._max_array)\n\n state_this_net = np.append(obs_this_net[0:13], \n obs_this_net[14:]).reshape(1,16)\n\n average_reward = (average_reward * (episode_counter - 1) \n + this_ep_reward) / episode_counter;\n average_episode_length = (average_episode_length \n * (episode_counter - 1) \n + this_ep_length) / episode_counter;\n \n episode_counter += 1;\n if show_detail:\n logging.info ('Episode ends. Cumulative reward is %0.04f '\n 'episode length is %d, average reward by now is %0.04f,'\n ' average episode length by now is %d.' %(this_ep_reward,\n this_ep_length,\n average_reward,\n average_episode_length));\n this_ep_length = 0;\n this_ep_reward = 0;\n \n else:\n ob_this = ob_next\n setpoint_this = setpoint_next\n state_this_net = state_next_net\n time_this = time_next\n this_ep_length += 1;\n return (average_reward, average_episode_length);", "def dqn(agent, n_episodes=1500, eps_start=1.0, eps_end=0.01, eps_decay=0.995, score_threshold=13.0):\n scores = [] # list containing scores from each episode\n scores_window = deque(maxlen=100) # last 100 scores\n eps = eps_start # initialize epsilon\n for i_episode in range(0, n_episodes):\n brain_info = env.reset(train_mode=True)[brain_name]\n state = brain_info.vector_observations[0]\n score = 0\n while True:\n action = agent.act(state, eps, training_mode=True)\n env_info = env.step(action)[brain_name]\n next_state = env_info.vector_observations[0]\n reward = env_info.rewards[0]\n done = env_info.local_done[0]\n agent.step(state, action, reward, next_state, done)\n score += reward\n state = next_state\n if done:\n break \n scores_window.append(score) # save most recent score\n scores.append(score) # save most recent score\n eps = max(eps_end, eps_decay*eps) # decrease epsilon\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end=\"\")\n if i_episode % 100 == 0:\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))\n if np.mean(scores_window)>=score_threshold:\n print('\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_window)))\n torch.save(agent.qnetwork_local.state_dict(), 'checkpoint.pth')\n break\n return scores", "def simulation(nepisodes):\n # Initialize robots\n # print('I am inside the simulation')\n agents = [] # List containing all robots\n a1 = Agent(start = [0, 0], end = [grid_size-1, grid_size-1], nr = 1) # Create agent 1\n a2 = Agent(start = [0, grid_size-1], end = [grid_size-1, 0], nr = 2) # Create agent 2\n a3 = Agent(start = [grid_size-1, 0], end = [0, grid_size-1], nr = 3) # Create agent 3\n a4 = Agent(start = [grid_size-1, grid_size-1], end = [0, 0], nr = 4) # Create agent 4\n agents.append(a1)\n agents.append(a2)\n agents.append(a3)\n agents.append(a4)\n\n # for agent in agents:\n # agent.load_target('target_weights_{}.h5'.format(agent.nr))\n # agent.load_policy('policy_weights_{}.h5'.format(agent.nr))\n # print('loaded')\n\n steps_list = [[] for i in range(len(agents))]\n reward_list = [[] for i in range(len(agents))]\n cumulative_rewards = [[] for i in range(len(agents))]\n collisions_list = [[] for i in range(len(agents))]\n\n t = 0 # Set time to zero\n for i in range(nepisodes):\n t = episode(agents, t, i+1) # Run one episode\n\n print('End of episode ', i+1)\n agent_index = 0\n for agent in agents:\n steps_list[agent_index].append(agent.steps)\n reward_list[agent_index].append(agent.reward)\n collisions_list[agent_index].append(agent.collisions)\n if i == 0:\n cumulative_rewards[agent_index].append(agent.reward)\n else:\n cumulative_rewards[agent_index].append(agent.reward + cumulative_rewards[agent_index][i-1])\n agent_index += 1\n\n if i % 1000 == 0:\n with open('reward_4_agents_{}'.format(i),'wb') as f:\n pickle.dump(reward_list,f)\n\n with open('steps_4_agents_{}'.format(i), 'wb') as f:\n pickle.dump(steps_list, f)\n\n with open('cols_4_agents_{}'.format(i), 'wb') as f:\n pickle.dump(collisions_list, f)\n\n\n return steps_list, reward_list, collisions_list, cumulative_rewards", "def execute_episodes(self, num_episodes, max_timesteps_per_episode=0, update_spec=None, deterministic=False):\n pass", "def qlearning(alpha, gamma, epsilon, episodes, max_steps, n_tests, render = False, test=False):\n env = gym.make('Taxi-v2')\n n_states, n_actions = env.observation_space.n, env.action_space.n\n Q = init_q(n_states, n_actions, type=\"ones\")\n timestep_reward = []\n for episode in range(episodes):\n print(f\"Episode: {episode}\")\n s = env.reset()\n a = epsilon_greedy(Q, epsilon, n_actions, s)\n t = 0\n total_reward = 0\n done = False\n while t < max_steps:\n if render:\n env.render()\n t += 1\n s_, reward, done, info = env.step(a)\n total_reward += reward\n a_ = np.argmax(Q[s_, :])\n if done:\n Q[s, a] += alpha * ( reward - Q[s, a] )\n else:\n Q[s, a] += alpha * ( reward + (gamma * Q[s_, a_]) - Q[s, a] )\n s, a = s_, a_\n if done:\n if render:\n print(f\"This episode took {t} timesteps and reward: {total_reward}\")\n timestep_reward.append(total_reward)\n break\n if render:\n print(f\"Here are the Q values:\\n{Q}\\nTesting now:\")\n if test:\n test_agent(Q, env, n_tests, n_actions)\n return timestep_reward", "def train(self, env):\n\n\t\tmin_average_reward_for_stopping = 195\n\t\tconsecutive_successful_episodes_to_stop = 10\n\t\tlast_10_rewards = deque(maxlen=consecutive_successful_episodes_to_stop)\n\n\t\tnum_Episodes = []\n\t\tEpisode_Rewards = []\n\n\t\tfor episode in range(self.episodes):\n\t\t\tstate = env.reset()\n\t\t\tstate = np.reshape(state, [1, self.state_size])\n\t\t\tdone = False\n\t\t\ttotal_reward = 0\n\n\t\t\twhile not done:\n\t\t\t\taction = self.act(state)\n\t\t\t\tnext_state, reward, done, _ = env.step(action)\n\t\t\t\tnext_state = np.reshape(next_state, [1, self.state_size])\n\t\t\t\tself.remember(state, action, reward, next_state, done)\n\t\t\t\tstate = next_state\n\t\t\t\ttotal_reward += reward\n\n\t\t\tnum_Episodes.append(episode)\n\t\t\tEpisode_Rewards.append(total_reward)\n\t\t\tlast_10_rewards.append(total_reward)\n\t\t\tlast_10_avg_reward = np.mean(last_10_rewards)\n\t\t\tprint(episode, last_10_avg_reward)\n\n\t\t\t# call experience relay\n\t\t\tif len(self.memory) >= self.batch_size:\n\t\t\t\tself.replay(self.batch_size)\n\t\t\t# Stopping criteria\n\t\t\tif len(\n\t\t\t\t\tlast_10_rewards) == consecutive_successful_episodes_to_stop \\\n\t\t\t\t\tand last_10_avg_reward > min_average_reward_for_stopping:\n\t\t\t\tprint(\"Solved after {} epsiodes\".format(episode))\n\t\t\t\tbreak", "def train(self, max_episodes= 1000, eps_start=1.0, eps_end=0.01, eps_decay=0.995):\n success = False\n i_episode = 0\n eps = eps_start\n \n print('Training in progress...')\n for i in range(max_episodes):\n score = self.run_training_episode(eps=eps)\n \n self.score_window.append(score)\n self.score_record.append(np.mean(self.score_window))\n \n i_episode += 1\n eps = max(eps_end, eps_decay*eps) # decrease epsilon\n\n if i_episode%100 == 0:\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, self.score_record[-1]))\n \n if i_episode>100:\n if np.mean(self.score_window)>self.criteria:\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, self.score_record[-1]))\n success = True\n break\n\n if success:\n print('Criteria reached after {} episodes'.format(i_episode))\n else:\n print('Failed to reach Criteria after {} episodes'.format(i_episode))\n\n self.plot_training_progress()\n return success", "def run_episode(self):\n self.reset_episode()\n obs = self.env.reset()\n while True:\n action = self.Policy[self.env.stateDict[obs]]\n new_obs, reward, done, _ = self.env.step(action)\n if self.mode=='debug':\n print(\"PrevObs:{}, Action:{}, Obs:{}, Reward:{}, Done:{}\"\n .format(obs, action, new_obs,reward,done))\n self.totalReward += reward\n self.totalSteps += 1\n if done:\n break\n else:\n obs = new_obs\n return self.totalReward", "def q_learning(env, model, episodes, gamma=0.9,\n epsilon=0.3, eps_decay=0.99,\n replay=False, replay_size=20,\n title='DQL', double=False,\n n_update=10, soft=False, verbose=True):\n final = []\n memory = []\n episode_i = 0\n sum_total_replay_time = 0\n for episode in range(episodes):\n episode_i += 1\n if double and not soft:\n # Update target network every n_update steps\n if episode % n_update == 0:\n model.target_update()\n if double and soft:\n model.target_update()\n\n # Reset state\n state = env.reset()\n done = False\n total = 0\n\n while not done:\n # Implement greedy search policy to explore the state space\n if random.random() < epsilon:\n action = env.action_space.sample()\n else:\n q_values = model.predict(state)\n action = torch.argmax(q_values).item()\n\n # Take action and add reward to total\n next_state, reward, done, _ = env.step(action)\n\n # Update total and memory\n total += reward\n memory.append((state, action, next_state, reward, done))\n q_values = model.predict(state).tolist()\n\n if done:\n if not replay:\n q_values[action] = reward\n # Update network weights\n model.update(state, q_values)\n break\n\n if replay:\n t0 = time.time()\n # Update network weights using replay memory\n model.replay(memory, replay_size, gamma)\n t1 = time.time()\n sum_total_replay_time += (t1 - t0)\n else:\n # Update network weights using the last step only\n q_values_next = model.predict(next_state)\n q_values[action] = reward + gamma * torch.max(q_values_next).item()\n model.update(state, q_values)\n\n state = next_state\n\n # Update epsilon\n epsilon = max(epsilon * eps_decay, 0.01)\n final.append(total)\n plot_res(final, title)\n\n if verbose:\n print(\"episode: {}, total reward: {}\".format(episode_i, total))\n if replay:\n print(\"Average replay time:\", sum_total_replay_time / episode_i)\n\n return final", "def play_episodes(env, policy, render_option, num_eps, pause_time, min_steps):\n env.reset()\n if pause_time != 0:\n for _ in range(int(pause_time / 0.01)):\n env.render()\n time.sleep(0.01)\n total_reward = 0.0\n for _ in range(num_eps):\n episode_reward = play_episode(env, policy, render_option, min_steps)\n total_reward += episode_reward\n avg_reward = total_reward / num_eps\n print('finished {0} episodes with average reward {1}'.format(num_eps, avg_reward))\n return avg_reward", "def dqn(env, brain_name, agent, n_episodes=2000, max_t=1000, eps_start=1.0, eps_min=0.01, eps_decay=0.995, **kwargs):\n scores = [] # list containing scores from each episode\n scores_window = deque(maxlen=100) # last 100 scores\n eps = eps_start # initialize epsilon\n \n for i_episode in range(1, n_episodes+1):\n state = env.reset(train_mode=True)[brain_name].vector_observations[0]\n score = 0\n for t in range(max_t):\n #print('\\rt: ' + str(t))\n# if (t % 4) == 0:\n# action = agent.act(state, eps)\n action = agent.act(state, eps)\n env_info = env.step(action)[brain_name] # send the action to the environment\n next_state = env_info.vector_observations[0] # get the next state\n reward = env_info.rewards[0] # get the reward\n done = env_info.local_done[0] # see if episode has finished\n# if (t % 4) == 0:\n# agent.step(state, action, reward, next_state, done)\n agent.step(state, action, reward, next_state, done)\n state = next_state\n score += reward\n if done:\n break \n scores_window.append(score) # save most recent score\n scores.append(score) # save most recent score\n eps = max(eps_min, eps_decay*eps) # decrease epsilon\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end=\"\")\n if i_episode % 100 == 0:\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))\n if np.mean(scores_window)>=200.0:\n print('\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_window)))\n torch.save(agent.qnetwork_local.state_dict(), 'checkpoint.pth')\n break\n\n torch.save(agent.qnetwork_local.state_dict(), 'checkpoint.pth')\n\n return scores", "def train(self):\n tic = time.time()\n means = []\n stds = []\n steps = 0\n scores_window = deque(maxlen=100)\n for e in range(1,self.episodes):\n\n self.noise.step()\n episode_scores = []\n obs = self.env.reset()\n for t in range(self.tmax):\n actions = self.act(obs)\n next_obs,rewards,dones = self.env.step(actions)\n\n # Store experience\n if np.max(rewards) > 0:\n print('hit the ball over the net',rewards)\n self.R.add(obs.reshape(1,48),obs,actions,rewards,next_obs.reshape(1,48),next_obs,dones)\n obs = next_obs\n # Score tracking\n episode_scores.append(np.max(rewards))\n \n # Learn\n if len(self.R) > self.min_buffer_size:\n for _ in range(self.SGD_epoch):\n # Update each agent\n for i in range(self.num_agents):\n self.learn(i)\n # update target networks\n self.update_targets_all()\n \n steps += int(t)\n means.append(np.mean(episode_scores))\n stds.append(np.std(episode_scores))\n scores_window.append(np.sum(episode_scores))\n if e % 4 == 0:\n toc = time.time()\n r_mean = np.mean(scores_window)\n r_max = max(scores_window)\n r_min = min(scores_window)\n r_std = np.std(scores_window)\n plot(self.name,means,stds)\n print(\"\\rEpisode: {} out of {}, Steps {}, Rewards: mean {:.2f}, min {:.2f}, max {:.2f}, std {:.2f}, Elapsed {:.2f}\".format(e,self.episodes,steps,r_mean,r_min,r_max,r_std,(toc-tic)/60))\n if np.mean(scores_window) > self.winning_condition:\n print('Env solved!')\n # save scores\n pickle.dump([means,stds], open(str(self.name)+'_scores.p', 'wb'))\n # save policy\n self.save_weights(self.critic_path,self.actor_path)\n break", "def play(self, episodes=99, max_steps=99):\n rewards = []\n\n for episode in range(episodes):\n state = self.env.reset()\n total_reward = 0\n display = '\\n----------------------------\\n' + \\\n f'Episode: {episode}'\n print(display)\n\n for step in range(max_steps):\n self.env.render()\n action = np.argmax(self.q_table[state, :])\n\n new_state, reward, done, _ = self.env.step(action)\n\n total_reward += reward\n if done:\n rewards.append(reward)\n print(f'Total reward - {total_reward}')\n break\n state = new_state\n self.env.close()\n print(f'\\nReward :', np.sum(rewards) / episodes)", "def train(self, alpha, discount, episodes=1000, T=1000):\n for episode in range(episodes):\n if episode % LOGGING_STEP == 0:\n st.text(f'Episode {episode}')\n states = [self.env.reset()]\n actions = [self.epsilon_greedy_policy(states[-1])]\n rewards = []\n for t in range(T):\n state, reward, done, info = self.env.step(actions[-1])\n states.append(state)\n rewards.append(reward)\n if done:\n break\n else:\n actions.append(self.epsilon_greedy_policy(state))\n s_t, s_t1, a_t = states[-2], states[-1], actions[-2]\n V = sum([\n self.epsilon_greedy_policy(s_t1, a) * self._Q[s_t1][a]\n for a in range(self.env.nA)\n ])\n self._Q[s_t][a_t] += (\n alpha * (rewards[-1] + discount * V - self._Q[s_t][a_t])\n )", "def train(self, num_episodes=10000):\n\n self.game.restart()\n\n self.exp_states = defaultdict(int)\n\n for i in tqdm(range(num_episodes)):\n\n self.game.deal_cards()\n\n possible_actions = self.game.get_actions()\n\n player_state = self.game.get_player_state()\n player_action = self.player.get_action(player_state,\n possible_actions,\n explore_exploit='explore')\n\n # Bookkeep visited states (?)\n player_state_str = np.array2string(player_state)\n self.exp_states[player_state_str] += 1\n\n opponent_state = self.game.get_opponent_state()\n opponent_action = self.opponent.get_action(opponent_state,\n possible_actions)\n\n self.game.set_player_action(player_action)\\\n .set_opponent_action(opponent_action)\n\n player_score, opponent_score = self.game.get_scores()\n\n reward = self._get_reward(player_score, opponent_score)\n self.player.learn(player_state,\n player_action,\n reward)\n self.player.learn(opponent_state,\n opponent_action,\n -reward)\n \n print(\"Training done!\")", "def Q_learning_test(env,alpha,gamma,episodes, q_table):\n %time\n \n # For plotting metrics\n all_epochs = []\n all_penalties = []\n rewards = []\n \n total_reward = 0\n \n for i in range(1, episodes+1):\n state = env.reset()\n episode_rewards = []\n\n epochs, penalties, reward, = 0, 0, 0\n done = False\n\n while not done:\n \n action = np.argmax(q_table[state]) # Exploit learned values by choosing optimal values\n next_state, reward, done, info = env.step(action) \n\n\n if reward == -10:\n penalties += 1\n \n state = next_state\n episode_rewards.append(reward)\n epochs += 1\n \n if done == True:\n break \n if epochs == 1000:\n break \n \n total_reward += reward\n rewards.append(np.sum(episode_rewards))\n \n if i % 1000 == 0:\n clear_output(wait=True)\n print(f\"Episode: {i}\")\n\n \n print(\"Training finished.\\n\")\n \n \n plt.plot(savgol_filter(rewards, 1001, 3, mode = \"interp\"))\n plt.title(\"Smoothened testing reward per episode\", pad = 30 , size = BIGGER_SIZE)\n plt.xlabel('Episodes', labelpad = 20);\n plt.ylabel('Total Reward', labelpad = 20);\n plt.tick_params(axis='both', which='major', labelsize=16);\n plt.tick_params(axis='both', which='minor', labelsize=16);\n #plt.xlim(100000, 200000);\n #plt.ylim(0,50)\n # plt.xticks(np.arange(0, episodes+1, 5000));\n # plt.yticks(np.arange(min(rewards), max(rewards)+1, 1000));", "def dqn(\n env,\n n_episodes=10000,\n max_t=1000,\n eps_start=1.0,\n eps_end=0.005,\n eps_decay=0.995,\n train_mode=True,\n):\n scores = [] # list containing scores from each episode\n scores_window = deque(maxlen=100) # last 100 scores\n eps = eps_start # initialize epsilon\n\n brain_name = env.brain_names[0]\n brain = env.brains[brain_name]\n brain_name = env.brain_names[0]\n brain = env.brains[brain_name]\n action_size = brain.vector_action_space_size\n env_info = env.reset(train_mode=train_mode)[brain_name]\n state_size = len(env_info.vector_observations[0])\n\n agent = Agent(state_size=state_size, action_size=action_size, seed=1)\n\n for i_episode in range(1, n_episodes + 1):\n state = env_info.vector_observations[0]\n score = 0\n for _ in range(max_t):\n action = np.int32(agent.act(state, eps))\n env_info = env.step(action)[\n brain_name\n ] # send the action to the environment\n next_state = env_info.vector_observations[0] # get the next state\n reward = env_info.rewards[0] # get the reward\n done = env_info.local_done[0] # see if episode has finished\n\n agent.step(state, action, reward, next_state, done)\n state = next_state\n score += reward\n if done:\n env.reset(train_mode=train_mode)[brain_name]\n break\n scores_window.append(score) # save most recent score\n scores.append(score) # save most recent score\n eps = max(eps_end, eps_decay * eps) # decrease epsilon\n print(\n \"\\rEpisode {}\\tAverage Score: {:.2f}\".format(\n i_episode, np.mean(scores_window)\n ),\n end=\"\",\n )\n if i_episode % 100 == 0:\n print(\n \"\\rEpisode {}\\tAverage Score: {:.2f}\".format(\n i_episode, np.mean(scores_window)\n )\n )\n if np.mean(scores_window) >= 13.0:\n print(\n \"\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.2f}\".format(\n i_episode - 100, np.mean(scores_window)\n )\n )\n torch.save(agent.qnetwork_local.state_dict(), \"checkpoint_vanilla.pth\")\n break\n return scores", "def Q_learning_train(env,alpha,gamma,epsilon,episodes):\n %time\n # For plotting metrics\n all_epochs = []\n all_penalties = []\n rewards = []\n \n #Initialize Q table of 22500 x 8 size (22500 states and 8 actions) with all zeroes\n q_table = np.zeros([env.observation_space.n, env.action_space.n]) \n \n for i in range(1, episodes+1):\n state = env.reset()\n episode_rewards = []\n\n epochs, penalties, reward, = 0, 0, 0\n done = False\n\n while not done:\n if random.uniform(0, 1) < epsilon:\n action = env.action_space.sample() # Explore action space randomly\n else:\n action = np.argmax(q_table[state]) # Exploit learned values by choosing optimal values\n\n next_state, reward, done, info = env.step(action) \n\n old_value = q_table[state, action]\n next_max = np.max(q_table[next_state])\n\n new_value = (1 - alpha) * old_value + alpha * (reward + gamma * next_max)\n q_table[state, action] = new_value\n\n if reward == -10:\n penalties += 1\n \n\n state = next_state\n episode_rewards.append(reward)\n epochs += 1\n \n if done == True:\n break \n if epochs == 1000:\n break \n rewards.append(np.sum(episode_rewards))\n \n if i % 1000 == 0:\n clear_output(wait=True)\n print(f\"Episode: {i}\")\n \n \n print(\"Training finished.\\n\")\n \n plt.plot(savgol_filter(rewards, 1001, 3, mode = \"interp\"))\n plt.title(\"Smoothened training reward per episode\", pad = 30, size = BIGGER_SIZE)\n plt.legend()\n plt.xlabel('Episodes', labelpad = 20);\n plt.ylabel('Total Reward', labelpad = 20);\n plt.tick_params(axis='both', which='major');\n plt.tick_params(axis='both', which='minor');\n #plt.xlim(0, 60000);\n #plt.ylim(0,50)\n #plt.xticks(np.arange(0, episodes+1, 5000));\n #plt.yticks(np.arange(min(rewards), max(rewards)+1, 1000));", "def epsilon(current_episode, num_episodes):\n # return 1 - (current_episode/num_episodes)\n return .5 * .9**current_episode", "def trainOneEpisode(self, num_episodes, max_episode_steps=100, save_freq=100, render=False):\n # tqdm.write('------Episode {} / {}------'.format(self.episodes_done, num_episodes))\n self.resetEnv()\n r_total = 0\n with trange(1, max_episode_steps+1, leave=False) as t:\n\n for step in t:\n if render:\n self.env.render()\n state = self.state\n action, q = self.selectAction(state, require_q=True)\n obs_, r, done, info = self.takeAction(action.item())\n # if print_step:\n # print 'step {}, action: {}, q: {}, reward: {} done: {}' \\\n # .format(step, action.item(), q, r, done)\n r_total += r\n # t.set_postfix(step='{:>5}'.format(step), q='{:>5}'.format(round(q, 4)), total_reward='{:>5}'.format(r_total))\n t.set_postfix_str('step={:>5}, q={:>5}, total_reward={:>5}'.format(step, round(q, 2), r_total))\n if done or step == max_episode_steps:\n next_state = None\n else:\n next_state = self.getNextState(obs_)\n reward = torch.tensor([r], device=self.device, dtype=torch.float)\n self.memory.push(state, action, next_state, reward)\n self.optimizeModel()\n if self.steps_done % self.target_update == 0:\n self.target_net.load_state_dict(self.policy_net.state_dict())\n\n if done or step == max_episode_steps - 1:\n tqdm.write('------Episode {} ended, total reward: {}, step: {}------' \\\n .format(self.episodes_done, r_total, step))\n tqdm.write('------Total steps done: {}, current e: {} ------' \\\n .format(self.steps_done, self.exploration.value(self.steps_done)))\n # print '------Episode {} ended, total reward: {}, step: {}------' \\\n # .format(self.episodes_done, r_total, step)\n # print '------Total steps done: {}, current e: {} ------' \\\n # .format(self.steps_done, self.exploration.value(self.steps_done))\n self.episodes_done += 1\n self.episode_rewards.append(r_total)\n self.episode_lengths.append(step)\n if self.episodes_done % save_freq == 0:\n self.saveCheckpoint()\n break\n self.state = next_state", "def run_episode(self, environment):\n state, texts = environment.reset()\n self.steps_done = 0\n action = None\n while True:\n state_tensor = FloatTensor([state])\n text_tensor = FloatTensor(texts).mean(dim=0, keepdim=True)\n action = self.Q.sample_from_softmax_policy(state_tensor, text_tensor)\n position = self.convert_action(action)\n (next_state, next_texts), reward, done, _ = environment.step(position)\n next_text_tensor = FloatTensor(next_texts).mean(dim=0, keepdim=True)\n for t1 in texts:\n t1_tensor = FloatTensor([t1])\n for t2 in next_texts:\n t2_tensor = FloatTensor([t2])\n self.memory.push(\n (state_tensor, t1_tensor, action, t2_tensor,) # action is already a tensor\n )\n self.learn(state_tensor, text_tensor, action, next_state, next_text_tensor, reward)\n state = next_state\n self.steps_done += 1\n if done:\n break\n history = environment.close()\n return history", "def epsilon_greedy_agent(bandit, iterations, epsilon = 0.2, initial_rounds = 1):\n\n pay_offs = dict()\n\n for i in range(iterations):\n # sometimes randomly pick an action to explore\n if random.random() < epsilon or i < initial_rounds:\n a = random.choice(bandit.actions)\n # otherwise pick the best one thus far\n else:\n # check for the lever with the best average payoff\n new_dict = {}\n for key,val in pay_offs.items():\n new_dict[key] = np.mean(val) \n a = max(new_dict, key=new_dict.get)\n\n r = bandit.sample(a)\n\n #update rewards\n if a in pay_offs:\n pay_offs[a].append(r)\n else:\n pay_offs[a] = [r]\n \n yield a, r", "def evaluate(env: AlfEnvironment, algorithm: RLAlgorithm,\n num_episodes: int) -> List[alf.metrics.StepMetric]:\n batch_size = env.batch_size\n env.reset()\n time_step = common.get_initial_time_step(env)\n algorithm.eval()\n policy_state = algorithm.get_initial_predict_state(env.batch_size)\n trans_state = algorithm.get_initial_transform_state(env.batch_size)\n episodes_per_env = (num_episodes + batch_size - 1) // batch_size\n env_episodes = torch.zeros(batch_size, dtype=torch.int32)\n episodes = 0\n metrics = [\n alf.metrics.AverageReturnMetric(\n buffer_size=num_episodes, example_time_step=time_step),\n alf.metrics.AverageEpisodeLengthMetric(\n example_time_step=time_step, buffer_size=num_episodes),\n alf.metrics.AverageEnvInfoMetric(\n example_time_step=time_step, buffer_size=num_episodes),\n alf.metrics.AverageDiscountedReturnMetric(\n buffer_size=num_episodes, example_time_step=time_step),\n alf.metrics.EpisodicStartAverageDiscountedReturnMetric(\n example_time_step=time_step, buffer_size=num_episodes),\n alf.metrics.AverageRewardMetric(\n example_time_step=time_step, buffer_size=num_episodes),\n ]\n time_step = common.get_initial_time_step(env)\n while episodes < num_episodes:\n # For parallel play, we cannot naively pick the first finished `num_episodes`\n # episodes to estimate the average return (or other statitics) as it can be\n # biased towards short episodes. Instead, we stick to using the first\n # episodes_per_env episodes from each environment to calculate the\n # statistics and ignore the potentially extra episodes from each environment.\n invalid = env_episodes >= episodes_per_env\n # Force the step_type of the extra episodes to be StepType.FIRST so that\n # these time steps do not affect metrics as the metrics are only updated\n # at StepType.LAST. The metric computation uses cpu version of time_step.\n time_step.cpu().step_type[invalid] = StepType.FIRST\n\n next_time_step, policy_step, trans_state = policy_trainer._step(\n algorithm=algorithm,\n env=env,\n time_step=time_step,\n policy_state=policy_state,\n trans_state=trans_state,\n metrics=metrics)\n\n time_step.step_type[invalid] = StepType.FIRST\n\n for i in range(batch_size):\n if time_step.step_type[i] == StepType.LAST:\n env_episodes[i] += 1\n episodes += 1\n\n policy_state = policy_step.state\n time_step = next_time_step\n\n env.reset()\n return metrics", "def hdqn_learning(\n env,\n agent,\n num_episodes,\n exploration_schedule,\n gamma=1.0,\n ):\n ###############\n # RUN ENV #\n ###############\n # Keep track of useful statistics\n stats = plotting.EpisodeStats(\n episode_lengths=np.zeros(num_episodes),\n episode_rewards=np.zeros(num_episodes))\n n_thousand_episode = int(np.floor(num_episodes / 1000))\n visits = np.zeros((n_thousand_episode, env.nS))\n total_timestep = 0\n meta_timestep = 0\n ctrl_timestep = defaultdict(int)\n\n for i_thousand_episode in range(n_thousand_episode):\n for i_episode in range(1000):\n episode_length = 0\n current_state = env.reset()\n visits[i_thousand_episode][current_state-1] += 1\n encoded_current_state = one_hot_state(current_state)\n \n done = False\n while not done:\n meta_timestep += 1\n # Get annealing exploration rate (epislon) from exploration_schedule\n meta_epsilon = exploration_schedule.value(total_timestep)\n goal = agent.select_goal(encoded_current_state, meta_epsilon)[0]\n encoded_goal = one_hot_goal(goal+1)\n\n total_extrinsic_reward = 0\n goal_reached = False\n s1 = encoded_current_state\n while not done and not goal_reached:\n total_timestep += 1\n episode_length += 1\n ctrl_timestep[goal] += 1\n # Get annealing exploration rate (epislon) from exploration_schedule\n ctrl_epsilon = exploration_schedule.value(total_timestep)\n joint_state_goal = np.concatenate([encoded_current_state, encoded_goal], axis=1)\n action = agent.select_action(joint_state_goal, ctrl_epsilon)[0]\n ### Step the env and store the transition\n next_state, extrinsic_reward, done, _ = env.step(action)\n # Update statistics\n stats.episode_rewards[i_thousand_episode*1000 + i_episode] += extrinsic_reward\n stats.episode_lengths[i_thousand_episode*1000 + i_episode] = episode_length\n visits[i_thousand_episode][next_state-1] += 1\n\n encoded_next_state = one_hot_state(next_state)\n intrinsic_reward = agent.get_intrinsic_reward(goal+1, next_state)\n goal_reached = next_state == (goal+1)\n\n joint_next_state_goal = np.concatenate([encoded_next_state, encoded_goal], axis=1)\n #print (joint_state_goal, action, joint_next_state_goal, intrinsic_reward, done)\n agent.ctrl_replay_memory.push(joint_state_goal, action, joint_next_state_goal, intrinsic_reward, done)\n # Update Both meta-controller and controller\n agent.update_meta_controller(gamma)\n agent.update_controller(gamma)\n\n total_extrinsic_reward += extrinsic_reward\n current_state = next_state\n encoded_current_state = encoded_next_state\n # Goal Finished\n agent.meta_replay_memory.push(s1, goal, encoded_next_state, total_extrinsic_reward, done)\n\n return agent, stats, visits", "def fit(self, num_iterations, max_episode_length=250, eval_every_nth=1000, save_model_every_nth=1000, log_loss_every_nth=1000, video_every_nth=20000):\n self.compile()\n self.policy = LinearDecayGreedyEpsilonPolicy(start_value=1., end_value=0.1, num_steps=1e6, num_actions=self.num_actions) # for training\n self.replay_memory = ReplayMemory(max_size=1000000)\n self.log_loss_every_nth = log_loss_every_nth\n random_policy = UniformRandomPolicy(num_actions=self.num_actions) # for burn in \n num_episodes = 0\n\n # tf logging\n self.tf_session = K.get_session()\n self.tf_summary_writer = tf.summary.FileWriter(self.log_dir, self.tf_session.graph)\n\n while self.iter_ctr < num_iterations:\n state = self.env.reset()\n self.preprocessor.reset_history_memory()\n\n num_timesteps_in_curr_episode = 0\n total_reward_curr_episode = 0 \n\n while num_timesteps_in_curr_episode < max_episode_length:\n self.iter_ctr+=1 # number of steps overall\n num_timesteps_in_curr_episode += 1 # number of steps in the current episode\n\n # logging\n # if not self.iter_ctr % 1000:\n # print \"iter_ctr {}, num_episodes : {} num_timesteps_in_curr_episode {}\".format(self.iter_ctr, num_episodes, num_timesteps_in_curr_episode)\n\n # this appends to uint8 history and also returns stuff ready to be spit into the network\n state_network = self.preprocessor.process_state_for_network(state) #shape is (4,84,84,1). axis are swapped in cal_q_vals\n # print \"shape {}, max {}, min {}, type {} \".format(state_network.shape, np.max(state_network), np.min(state_network), state_network.dtype)\n\n # burning in \n if self.iter_ctr < self.num_burn_in:\n action = random_policy.select_action() # goes from 0 to n-1\n next_state, reward, is_terminal, _ = self.env.step(action)\n reward_proc = self.preprocessor.process_reward(reward)\n total_reward_curr_episode += reward_proc\n state_proc_memory = self.preprocessor.process_state_for_memory(state)\n # atari_preprocessor.process_state_for_memory converts it to grayscale, resizes it to (84, 84) and converts to uint8\n self.replay_memory.append(state_proc_memory, action, reward_proc, is_terminal)\n\n if is_terminal or (num_timesteps_in_curr_episode > max_episode_length-1):\n state = self.env.reset()\n num_episodes += 1\n with tf.name_scope('summaries'):\n self.tf_log_scaler(tag='train_reward_per_episode_wrt_no_of_episodes', value=total_reward_curr_episode, step=num_episodes)\n self.tf_log_scaler(tag='train_reward_per_episode_wrt_iterations', value=total_reward_curr_episode, step=self.iter_ctr)\n print \"iter_ctr {}, num_episodes : {}, episode_reward : {}, loss : {}, episode_timesteps : {}, epsilon : {}\".format\\\n (self.iter_ctr, num_episodes, total_reward_curr_episode, self.loss_last, num_timesteps_in_curr_episode, self.policy.epsilon)\n num_timesteps_in_curr_episode = 0\n self.dump_train_episode_reward(total_reward_curr_episode)\n # this should be called when num_timesteps_in_curr_episode > max_episode_length, but we can call it in is_terminal as well. \n # it won't change anything as it just sets the last entry's is_terminal to True\n self.replay_memory.end_episode() \n break\n\n # training\n else:\n # print \"iter_ctr {}, num_episodes : {} num_timesteps_in_curr_episode {}\".format(self.iter_ctr, num_episodes, num_timesteps_in_curr_episode)\n q_values = self.calc_q_values(state_network)\n # print \"q_values {} q_values.shape {}\".format(q_values, q_values.shape)\n #print \"q_values.shape \", q_values.shape\n action = self.policy.select_action(q_values=q_values, is_training=True)\n next_state, reward, is_terminal, _ = self.env.step(action)\n reward_proc = self.preprocessor.process_reward(reward)\n total_reward_curr_episode += reward_proc\n state_proc_memory = self.preprocessor.process_state_for_memory(state)\n self.replay_memory.append(state_proc_memory, action, reward_proc, is_terminal)\n\n # validation. keep this clause before the breaks!\n if not(self.iter_ctr%eval_every_nth):\n print \"\\n\\nEvaluating at iter {}\".format(self.iter_ctr)\n if not(self.iter_ctr%video_every_nth):\n self.evaluate(num_episodes=20, max_episode_length=max_episode_length, gen_video=True)\n else:\n self.evaluate(num_episodes=20, max_episode_length=max_episode_length, gen_video=False)\n print \"Done Evaluating\\n\\n\"\n\n # save model\n if not(self.iter_ctr%save_model_every_nth):\n self.q_network.save(os.path.join(self.log_dir, 'weights/q_network_{}.h5'.format(str(self.iter_ctr).zfill(7))))\n\n if is_terminal or (num_timesteps_in_curr_episode > max_episode_length-1):\n state = self.env.reset()\n num_episodes += 1\n with tf.name_scope('summaries'):\n self.tf_log_scaler(tag='train_reward_per_episode_wrt_no_of_episodes', value=total_reward_curr_episode, step=num_episodes)\n self.tf_log_scaler(tag='train_reward_per_episode_wrt_iterations', value=total_reward_curr_episode, step=self.iter_ctr)\n print \"iter_ctr {}, num_episodes : {}, episode_reward : {}, loss : {}, episode_timesteps : {}, epsilon : {}\".format\\\n (self.iter_ctr, num_episodes, total_reward_curr_episode, self.loss_last, num_timesteps_in_curr_episode, self.policy.epsilon)\n num_timesteps_in_curr_episode = 0\n self.dump_train_episode_reward(total_reward_curr_episode)\n self.replay_memory.end_episode() \n break\n\n if not(self.iter_ctr % self.train_freq):\n self.update_policy()\n\n state = next_state", "def _epsilon_greedy(self, info_state, legal_actions, epsilon):\n probs = np.zeros(self._num_actions)\n if np.random.rand() < epsilon:\n action = np.random.choice(legal_actions)\n probs[legal_actions] = 1.0 / len(legal_actions)\n else:\n info_state = np.reshape(info_state, [1, -1])\n q_values = self._session.run(\n self._q_values, feed_dict={self._info_state_ph: info_state})[0]\n legal_q_values = q_values[legal_actions]\n action = legal_actions[np.argmax(legal_q_values)]\n probs[action] = 1.0\n return action, probs", "def train(self):\n total_steps = 0\n scores_history = [deque(maxlen=self.run_settings.averaging_window)\n for a in range(len(self.agents))]\n averages_history = [[] for a in range(len(self.agents))]\n\n for e in range(self.run_settings.num_episodes):\n # Initialize episode\n try:\n env_states, rewards, done, metainfo = self.custom_env.reset()\n except EpisodeCrashException:\n for a in range(len(self.agents)):\n if hasattr(self.agents[a], 'notify_episode_crashed'):\n self.agents[a].notify_episode_crashed(self.run_settings)\n continue\n\n # Initialize scores to starting reward (probably 0)\n scores = rewards\n step = 0\n\n while not done:\n states = [self.agents[a].state_space_converter(env_states[a])\n for a in range(len(self.agents))]\n\n # Train agents\n if total_steps > 0 and total_steps % self.run_settings.train_every == 0:\n for agent in self.agents:\n agent.train(self.run_settings)\n\n # Save agent model\n if total_steps > 0 and total_steps % self.run_settings.save_every == 0:\n for agent in self.agents:\n agent.save()\n\n # Get actions\n actions = [self.agents[a].sample(states[a])\n for a in range(len(self.agents))]\n env_actions = [self.agents[a].action_space_converter(actions[a])\n for a in range(len(self.agents))]\n # Take environment step\n try:\n env_states, rewards, done, metainfo = self.custom_env.step(env_actions)\n except EpisodeCrashException:\n for a in range(len(self.agents)):\n if hasattr(self.agents[a], 'notify_episode_crashed'):\n self.agents[a].notify_episode_crashed(self.run_settings)\n break\n step += 1\n total_steps += 1\n\n # Update scores\n scores = [scores[a] + rewards[a] for a in range(len(self.agents))]\n # Push to agent Memories\n for a in range(len(self.agents)):\n self.agents[a].push_memory(states[a], actions[a], rewards[a], done)\n\n if done:\n averages = []\n for a in range(len(scores_history)):\n scores_history[a].append(scores[a])\n averages.append(np.mean(scores_history[a]))\n averages_history[a].append(averages[a])\n\n if len(scores) == 1:\n scores = scores[0]\n averages = averages[0]\n if self.run_settings.verbose:\n print(\"Game {} ended after {} steps. Game score: {}. Averages: {}\"\n .format(e+1, step, scores, averages))\n if (self.run_settings.graph_every > 0 and e > 0\n and e % self.run_settings.graph_every == 0):\n self.plot_results(averages_history)", "def sarsa_lambda(env, estimator, num_episodes, num_timesteps, gamma=1.0, epsilon=0.1, epsilon_decay=0.1, lambd=0.1):\n stats = plotting.EpisodeStats(\n episode_lengths=np.zeros(num_episodes),\n episode_rewards=np.zeros(num_episodes))\n\n for i_episode in range(num_episodes):\n # print(\"episode: \", i_episode)\n # initialize\n\n policy = make_epsilon_greedy_policy(\n estimator, epsilon, env.action_space.n)\n\n estimator.initialize_eligibility()\n\n s = env.reset()\n rAll = 0\n\n # pick the first action\n probs = policy(s)\n a = np.random.choice(np.arange(len(probs)), p=probs)\n Q = estimator.predict(s, a)\n\n for t in range(num_timesteps):\n next_s, r, done, _ = env.step(a)\n # print(\"time setp\", t)\n # print(\"took action \", a)\n # print(\"took state \", s)\n\n # update eligibility\n estimator.update_eligibility(s, a, gamma, lambd)\n if done:\n\n td_target = r\n else:\n next_probs = policy(next_s)\n next_a = np.random.choice(np.arange(len(next_probs)), p=next_probs)\n next_Q = estimator.predict(next_s, next_a)\n td_target = r + gamma * next_Q\n\n td_delta = td_target - Q\n\n estimator.update(s, a, td_delta)\n\n rAll += r\n s = next_s\n a = next_a\n Q = next_Q\n epsilon *= epsilon_decay\n\n if done:\n print(\"reached the goal! at episode {}\".format(i_episode))\n break\n\n stats.episode_rewards[i_episode] = rAll\n stats.episode_lengths[i_episode] = t\n return stats", "def play_against_random(env, q_value, n_episodes = 100,\n play_as = 'O', render = False, self_play = False):\n \n assert play_as in ['X','O'], \"Player should be X or O\"\n \n \n running_reward = []\n \n for episode in range(n_episodes):\n \n #start episode\n state = env.reset()\n done = False\n \n while not done:\n \n if play_as == state[1] :\n #print(\"q learner\")\n action = e_greedy(state,env,q_value, inference = True)[0]\n \n else:\n if self_play:\n action = e_greedy(state,env,q_value, inference = True)[0]\n else:\n action = random_player(env)\n \n state,reward,done, _ = env.step(action)\n \n if render:\n env.render()\n print(reward, \"\\n\\n\")\n running_reward.append(reward)\n \n if play_as == 'X':\n running_reward = [-i for i in running_reward] \n \n performance = np.mean(running_reward)\n \n won = sum([1 if i == 1 else 0 for i in running_reward])\n lost = sum([1 if i == -1 else 0 for i in running_reward])\n draw = sum([1 if i == 0 else 0 for i in running_reward])\n \n #print(f\"Player : {play_as} | Performance : {performance} | Won: {won} | Lost: {lost} | Draw: {draw} | Total : {n_episodes}\")\n \n return (won,lost,draw)", "def epsilon_greedy_move(self):\n\n # get the current state\n state, _ = self.board.bit_board_representation()\n \n # choose the move to play\n is_exploring_move = False\n if random.random() < self.epsilon:\n # exploration\n action = self.board.random_move()\n is_exploring_move = True\n else:\n # exploitation\n action, _ = self.board.greedy_action_move(self.target_network)\n\n action_index = action\n if self.board.player == CONST.BLACK:\n action_index = action + 9\n \n # play the epsilon greedy move\n self.board.play_move(action)\n \n # add the experience to the experience buffer if the move was not an exploration move\n if not is_exploring_move:\n reward = self.board.reward()\n not_terminal = self.board.not_terminal_int()\n succ_state, succ_player = self.board.bit_board_representation()\n succ_legal_moves = self.board.legal_moves\n self.experience_buffer.add(state, action_index, reward, not_terminal, succ_state, succ_player, succ_legal_moves)", "def train(Game, agent, episodes=1000):\n a = agent\n # eps_start = a.epsilon\n # eps_end = a.epsilon_min\n # eps_dec = np.exp(1/episodes * np.log(eps_end/eps_start))\n # a.epsilon_decrement = eps_dec\n times_taken = np.zeros(episodes)\n print(\"Training starting\")\n for n in range(episodes):\n start_time = time.time()\n g = Game()\n print(\"EPISODE\", n+1)\n while not g.success:\n state = 1.0*g.get_state()\n action = a.action(state)\n reward = g.play(action)\n # print(g.success)\n # print(\"reward: \", reward)\n # print(state)\n # print(action)\n # print(g.get_state())\n a.train(state, action, reward, g.get_state(), g.success)\n end_time = time.time()\n times_taken[n] = end_time - start_time\n print(\"Training complete ({} episodes)\".format(episodes))\n return times_taken", "def train_multiple_eps_dynamic(self, env, no_episodes=200, ng_int=50, horizon=1000, lr=0.1):\n\n\t\tr_vec = []\n\t\tep_len_vec = []\n\t\ttasks = 0\n\t\tfor i in range(no_episodes):\n\t\t\tif(i % ng_int == 0):\n\t\t\t\tgoal_index = [0, 12, 156, 168]\n\t\t\t\tnew_goal = [goal_index[tasks]]\n\t\t\t\ttasks += 1\n\t\t\t\tenv.reset(loc_r=new_goal, loc_t=new_goal)\n\t\t\t\t# new_goal = sample(env.listGoalStates(), 1)\n\t\t\t\t# env.reset(loc_r=new_goal, loc_t=new_goal)\n\n\t\t\t# Run the agent for one episode and get a vector of rewards and a scalar for episode length\n\t\t\tr_ep, ep_len = self.train_one_eps(env, horizon=horizon, lr=lr)\n\n\t\t\t# Storing the information\n\t\t\tr_vec.append(r_ep)\n\t\t\tep_len_vec.append(ep_len)\n\n\t\treturn r_vec, ep_len_vec", "def train_ddpg(agent, env, n_episodes=400, max_t=1000, save=True):\n # get the default brain\n brain_name = env.brain_names[0]\n scores_deque = deque(maxlen=100)\n final_scores = []\n not_solved = True\n num_agents = len(env.reset()[brain_name].vector_observations)\n best = 0\n episodes_remaining = n_episodes\n for i_episode in range(1, n_episodes+1):\n env_info = env.reset()[brain_name]\n states = env_info.vector_observations\n agent.reset()\n agent_scores = np.zeros(num_agents)\n for t in range(max_t):\n actions = agent.act(states)\n env_info = env.step(actions)[brain_name]\n next_states = env_info.vector_observations\n rewards = env_info.rewards\n dones = env_info.local_done\n agent.step(states, actions, rewards, next_states, dones)\n states = next_states\n agent_scores += rewards\n if np.any(dones):\n break\n\n max_score = np.max(agent_scores)\n scores_deque.append(max_score)\n final_scores.append(max_score)\n print('\\rEpisode {}\\tAverage Score: {:.2f}\\tScore: {:.2f}'.format(i_episode, np.mean(scores_deque), max_score), end=\"\", flush=True)\n if len(scores_deque) == 100 and np.mean(scores_deque) > SOLVED_SCORE and not_solved:\n not_solved = False\n episodes_remaining = EPISODES_AFTER_SOLVE # try to increase score for some episodes\n print(\"\\nEnvironment solved in {} episodes!\\n\".format(i_episode), flush=True)\n if save:\n torch.save(agent.actor_local.state_dict(), 'saved_models/actor_solved.pth')\n torch.save(agent.critic_local.state_dict(), 'saved_models/critic_solved.pth')\n if i_episode % 100 == 0:\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_deque)), flush=True)\n\n if not not_solved and i_episode % 5 == 0 and np.mean(scores_deque) > best:\n best = np.mean(scores_deque)\n if save:\n torch.save(agent.actor_local.state_dict(), 'saved_models/best_actor.pth')\n torch.save(agent.critic_local.state_dict(), 'saved_models/best_critic.pth')\n\n if not not_solved:\n episodes_remaining -= 1\n if episodes_remaining == 0:\n break\n\n return final_scores", "def eval_against_bot(env, q_agent, t_agent, num_episodes):\n score = 0\n for _ in range(num_episodes):\n time_step = env.reset()\n q_agent_output = q_agent.step(time_step, is_evaluation=True)\n t_agent_output = t_agent.step(time_step, is_evaluation=True)\n time_step = env.step([q_agent_output.action, t_agent_output.action])\n score += time_step.rewards[0]\n return score / num_episodes", "def eval_against_bot(env, q_agent, t_agent, num_episodes):\n score = 0\n for _ in range(num_episodes):\n time_step = env.reset()\n q_agent_output = q_agent.step(time_step, is_evaluation=True)\n t_agent_output = t_agent.step(time_step, is_evaluation=True)\n time_step = env.step([q_agent_output.action, t_agent_output.action])\n score += time_step.rewards[0]\n return score / num_episodes", "def play_self_play_game(self):\n\n # start a fresh game \n self.reset_game()\n \n # play the epsilon greedy move and save the state transition in the experience lists \n while not self.game_terminal():\n self.epsilon_greedy_move()", "def q_learning(env, learning, discount, epsilon, min_eps, episodes):\n # [18.00000072 14.00000006]\n num_states = (env.observation_space.high - env.observation_space.low) * \\\n np.array([10, 100]) # >> [18.00000072 14.00000006]\n num_states = np.round(num_states, 0).astype(int) + 1 # >> [19 15]\n\n # Initialize Q table\n # env.action_space.n return the number of action that our agent can make (here 3, left, cease, right)\n Q = np.random.uniform(low=-1, high=1, size=(num_states[0], num_states[1], env.action_space.n))\n\n # Initialize variable to track rewards\n reward_list = []\n ave_reward_list = []\n\n # Calculate episodic reduction in epsilon\n reduction = (epsilon - min_eps) / (episodes / 2)\n\n for i in range(episodes):\n # Initialize parameters\n done = False\n tot_reward, reward = 0, 0\n state = env.reset()\n\n # Discretize state\n state_adj = adjust_state(state)\n\n while done != True:\n # Render env for last five eps\n if i >= (episodes - 20):\n env.render()\n\n # Determine next action - epsilon greedy strategy\n if np.random.random() < 1 - epsilon:\n action = np.argmax(Q[state_adj[0], state_adj[1]])\n else:\n action = np.random.randint(0, env.action_space.n)\n\n # Get next state and reward\n state2, reward, done, info = env.step(action)\n\n # Discretize state2\n state2_adj = adjust_state(state2)\n\n # Allow for terminal states // .5 on env_space[0] represent the flag position\n if done and state2[0] >= .5:\n Q[state_adj[0], state_adj[1], action] = reward\n\n # adjust Q value for current state\n else:\n '''work on this, it's complicated but far from non-understandable'''\n delta = learning*(reward + discount*np.max(Q[state2_adj[0], state2_adj[1]]) -\n Q[state_adj[0], state_adj[1], action])\n Q[state_adj[0], state_adj[1], action] += delta\n\n tot_reward += reward\n state_adj = state2_adj\n\n # Decay epsilon\n if epsilon > min_eps:\n epsilon -= reduction\n\n # Track rewards\n reward_list.append(tot_reward)\n\n if (i+1) % 100 == 0:\n ave_reward = np.mean(reward_list)\n ave_reward_list.append(ave_reward)\n reward_list = []\n print(f'Episode {i+1} Average Reward: {ave_reward}')\n\n env.close()\n\n return ave_reward_list", "def grid_search_epsilon(environmnet, policy='ε–greedy', parameter='epsilon'):\n\tparameter_values = []\n\tavg_scores = []\n\tavg_steps = []\n\n\tcount = 1\n\tdecay_search = [0.5, 0.6, 0.7, 0.8, 0.9, 0.99, 0.99]\n\tfor param_num in decay_search:\n\n\t\tagent = Q_Agent(exploration_rate_decay=param_num, epsilon=1)\n\t\tall_iterations, all_rewards, step_count = agent.train(environmnet, print_results=True, iter_n=1000,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t policy=policy)\n\t\tavg_scores.append(np.mean(all_rewards))\n\t\tavg_steps.append(np.mean(step_count))\n\t\tparameter_values.append(param_num)\n\t\trewards_data = np.array([all_iterations, all_rewards])\n\t\tstep_data = np.array([all_iterations, step_count])\n\n\t\tnp.savetxt(\n\t\t\t'/Users/matthewgalloway/Documents/RF/q_learning/' + parameter + '_inv/' + parameter + '_rewards_' + str(\n\t\t\t\tparam_num) + '.csv', rewards_data.transpose(), delimiter=\",\")\n\t\tnp.savetxt(\n\t\t\t'/Users/matthewgalloway/Documents/RF/q_learning/' + parameter + '_inv/' + parameter + '_steps_' + str(\n\t\t\t\tparam_num) + '.csv', step_data.transpose(), delimiter=\",\")\n\t\tif count % 50 == 0:\n\t\t\tprint('iteration {} of 10'.format(count))\n\n\t\tcount += 1\n\tresults = {\n\t\t'param_values': parameter_values,\n\t\t'avg_scores': avg_scores,\n\t\t'avg_steps': avg_steps,\n\n\t}\n\tprint(results)\n\treturn pd.DataFrame(results)", "def train(self, num_decisions=350):\n os.system(\"mkdir \" + self.folder_name + \"Train\")\n for i in range(5000):\n episode_folder_name = self.folder_name + \"Train/\" + str(i) + \"/\"\n all_system_states = []\n all_system_rewards = []\n all_system_states_cluster = []\n all_grid_states_cluster = []\n all_surrounding_states_cluster = []\n os.system(\"mkdir \" + episode_folder_name)\n filename = episode_folder_name + str(i) + \".h5\"\n self.system.reset_context(filename)\n self.system.run_decorrelation(20)\n grid_dist, surrounding_dist, _, _, _, _ = self.system.get_state_reward()\n state = self._get_state(grid_dist, surrounding_dist)\n for j in range(num_decisions):\n action_index = self._get_action(state, i)\n transition_to_add = [state, action_index]\n tag = \"_train_\" + str(j)\n actions = [self.all_actions[i] for i in action_index]\n try:\n self.system.update_action(actions)\n system_states, system_rewards, system_states_cluster = self.system.run_step(\n is_detailed=True, tag=tag)\n all_system_states.append(system_states)\n all_system_rewards.append(system_rewards)\n all_system_states_cluster.append(system_states_cluster)\n\n except OpenMMException:\n print(\"Broken Simulation at Episode:\",\n str(i), \", Decision:\", str(j))\n break\n\n grid_dist, surrounding_dist, grid_reward, surrounding_reward, grid_states_cluster, surrounding_states_cluster = self.system.get_state_reward()\n state = self._get_state(grid_dist, surrounding_dist)\n reward = self._get_reward(grid_reward, surrounding_reward)\n\n all_grid_states_cluster.append(grid_states_cluster)\n all_surrounding_states_cluster.append(surrounding_states_cluster)\n\n # Use len_reward for number of grids\n done = [False] * len(reward) # Never Done\n transition_to_add.extend([reward, state, done])\n rb_decision_samples = 0\n for rb_tuple in zip(*transition_to_add):\n self.buffer.push(*list(rb_tuple))\n\n for _ in range(self.update_num):\n self._update()\n self._save_episode_data(episode_folder_name)\n np.save(episode_folder_name + \"system_states\",\n np.array(all_system_states))\n np.save(episode_folder_name + \"system_rewards\",\n np.array(all_system_rewards))\n np.save(episode_folder_name + \"system_states_cluster\",\n np.array(all_system_states_cluster))\n np.save(episode_folder_name + \"grid_states_cluster\",\n np.array(all_grid_states_cluster, dtype=object))\n np.save(episode_folder_name + \"all_states_cluster\",\n np.array(all_surrounding_states_cluster))\n self._save_data()", "def ddpg_learning(env, agent, brain_name, cfg,\n n_episodes=2000, max_t=100000,\n avg_score_cutoff=15,\n model_save_path=None):\n print(\"Training an agent with DDPG.\")\n\n env_info = env.reset(train_mode=True)[brain_name]\n action_size = env.brains[brain_name].vector_action_space_size\n # state_size = env_info.vector_observations.shape[1]\n num_agents = len(env_info.agents)\n\n if not os.path.exists(model_save_path):\n print(\"Creating directory {:s} to save model weights into!\".format(model_save_path))\n os.mkdir(model_save_path)\n\n all_scores = [] # list containing scores from each episode\n\n for i_episode in range(1, n_episodes + 1):\n\n env_info = env.reset(train_mode=True)[brain_name]\n states = env_info.vector_observations\n\n scores = np.zeros(num_agents)\n\n for t in range(max_t):\n\n if cfg.maddpg:\n actions = agent.act(states)\n env_info = env.step(actions)[brain_name]\n else:\n actions = agent.act(states.reshape(-1))\n env_info = env.step(actions.reshape(num_agents, action_size))\n\n next_states = env_info.vector_observations\n rewards = env_info.rewards\n dones = env_info.local_done\n\n if cfg.maddpg:\n agent.step(states, actions, rewards, next_states, dones)\n else:\n # single agent with states and actions stacked together\n agent.step(states.reshape(-1), actions.reshape(-1),\n np.max(rewards), next_states.reshape(-1),\n np.any(dones))\n\n states = next_states\n scores += rewards\n if np.any(dones):\n break\n\n all_scores.append(scores) # save most recent score\n\n last100mean = np.mean(np.max(np.atleast_2d(all_scores), axis=1)[-100:])\n if i_episode % 100 == 0:\n print('\\rEpisode {}\\tAverage Score: {:.4f}'.format(\n i_episode, last100mean))\n\n if model_save_path is not None:\n agent.save_weights(model_save_path)\n\n if cfg.save_scores:\n pd.DataFrame(scores).to_hdf(cfg.save_scores, \"scores\")\n\n if last100mean >= avg_score_cutoff:\n print('\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.4f}'.format(\n i_episode, last100mean))\n\n break\n\n # save trained models a final time\n if model_save_path is not None:\n agent.save_weights(model_save_path)\n\n return pd.DataFrame(all_scores)", "def Worker(episode):\n episode_start_time = time.monotonic()\n\n stop = 0\n near_boundaries = 0\n failed = 0\n\n cdf_t = -1.0 * np.ones(LEN_CDF_T)\n cdf_h = -1.0 * np.ones(LEN_CDF_H)\n bin_t = 0\n bin_h = 0\n cdf_t[bin_t] = 0.0\n cdf_h[bin_h] = 0.0\n\n myenv, mypol = init_envs()\n check_envs(myenv, mypol)\n\n t = 0 # time step\n cumulative_hits = 0 # cumulative number of hits (int)\n T_mean = 0\n p_not_found_yet = 1\n\n while True:\n\n # choice of action\n action = mypol.choose_action()\n\n # step in myenv\n hit, p_end, done = myenv.step(action, quiet=(mypol.policy_index == -1))\n near_boundaries = max(near_boundaries, myenv.agent_near_boundaries)\n\n t += 1\n T_mean += p_not_found_yet\n\n p_found = 1.0 - p_not_found_yet * (1.0 - p_end)\n\n bin_to = bin_t\n bin_t = int((t - BIN_START_T) // BIN_SIZE_T)\n cdf_t[bin_to:bin_t] = cdf_t[bin_to]\n cdf_t[bin_t] = p_found\n\n bin_ho = bin_h\n bin_h = int((cumulative_hits - BIN_START_H) // BIN_SIZE_H)\n cdf_h[bin_ho:bin_h] = cdf_h[bin_ho]\n cdf_h[bin_h] = p_found\n\n p_not_found_yet *= 1 - p_end\n cumulative_hits += hit\n\n if p_not_found_yet < STOP_p or p_end > 1 - EPSILON or done:\n stop = 1\n elif t > STOP_t - 1:\n stop = 2\n failed = 1\n elif myenv.agent_stuck:\n stop = 3\n failed = 1\n\n if stop:\n if episode % 100 == 0 and sys.stdout.isatty() and not WITH_MPI:\n txt = \"episode: %7d [hit(t0) = %d] || <nsteps>: %7.2f || total nb of steps: %d\" \\\n % (episode, myenv.initial_hit, T_mean, t)\n if stop == 1:\n print(txt)\n elif stop == 2:\n print(txt + \" || max iter reached! (prob not found yet = %7.2e)\" % p_not_found_yet)\n elif stop == 3:\n print(txt + \" || agent stuck! (prob not found yet = %7.2e)\" % p_not_found_yet)\n\n sys.stdout.flush()\n break\n\n # fill in remaining bins\n cdf_t[bin_t + 1:LEN_CDF_T] = cdf_t[bin_t]\n cdf_h[bin_h + 1:LEN_CDF_H] = cdf_h[bin_h]\n\n if np.any(cdf_t < 0) or np.any(cdf_h < 0):\n raise Exception(\"Missing values in the cdf, check implementation\")\n\n # monitoring\n episode_elapsed_time = time.monotonic() - episode_start_time\n monitoring_episode_tmp_file = os.path.join(DIR_TMP, str(\"monitoring_episode_\" + str(episode) + \".txt\"))\n with open(monitoring_episode_tmp_file, \"a\") as mfile:\n mfile.write(\"%9d\\t%8d\\t%9d\\t%13d\\t%11.2e\\t%15.4e\\t%21.4e\\n\" % (\n episode, myenv.initial_hit, stop, near_boundaries, p_not_found_yet, T_mean, episode_elapsed_time))\n\n return cdf_t, cdf_h, T_mean, failed", "def run(self, num_episodes=1):\n pygame.display.update()\n self.fps_clock = pygame.time.Clock()\n\n try:\n for episode in range(num_episodes):\n self.run_episode()\n self.env.new_episode()\n self.event = Event.next(self.event)\n except QuitRequestedError:\n print(\"Exit Program\")\n\n pygame.quit()", "def run_episode(self, max_steps=200):\r\n state = self.env.reset()\r\n total_reward = 0\r\n for step in range(max_steps):\r\n next_state, reward, done, _ = self.env.step(self.act(state))\r\n\r\n state = next_state\r\n total_reward += reward\r\n\r\n if done:\r\n break\r\n\r\n return total_reward", "def evaluate(self, env, num_episodes, max_episode_length=None):\n self.mode = 'test'\n\n average_episode_length = 0\n rewards = []\n\n for i in range(num_episodes):\n state = env.reset()\n t = 0\n episode_reward = 0.0\n while True:\n t += 1\n action, _ = self.select_action(state)\n next_state, reward, is_terminal, debug_info = env.step(action)\n episode_reward += reward\n average_episode_length += 1\n\n if is_terminal or (max_episode_length is not None and t > max_episode_length):\n break\n\n state = next_state\n\n rewards.append(episode_reward)\n self.mode = 'train'\n return np.mean(rewards), np.std(rewards), average_episode_length / num_episodes", "def train_multiple_eps(self, env, no_episodes=200, horizon=1000, lr=0.1):\n\n\t\tr_vec = []\n\t\tep_len_vec = []\n\t\tfor i in range(no_episodes):\n\n\t\t\t# Run the agent for one episode and get a vector of rewards and a scalar for episode length\n\t\t\tr_ep, ep_len = self.train_one_eps(env, horizon=horizon, lr=lr)\n\n\t\t\t# Storing the information\n\t\t\tr_vec.append(r_ep)\n\t\t\tep_len_vec.append(ep_len)\n\n\t\treturn r_vec, ep_len_vec", "def learn(self, num_episodes=10000):\n for i in range(num_episodes):\n self.actor()\n self.learner()", "def execute(self):\n # set observations for all agents\n observation = self.environment.get_observation_for_agent()\n for ag in self.environment.agents:\n ag.observation = observation\n # main loop\n while not self.environment.end_episode:\n # each agent choose its action\n self.environment.choose_action()\n # next state\n self.environment.calculate_next_state()\n # is the end of the episode\n self.environment.calculate_end_episode()\n # set observations for all agents\n observation = self.environment.get_observation_for_agent()\n for ag in self.environment.agents:\n ag.observation = observation", "def doEpisodes(self, number = 1):\n all_rewards = []\n for dummy in range(number):\n self.agent.newEpisode()\n rewards = []\n self.stepid = 0\n self.task.reset()\n while not self.task.isFinished():\n r = self._oneInteraction()\n rewards.append(r)\n all_rewards.append(rewards)\n return all_rewards", "def train_dqn(env, learn_dict, agent, log_results=True):\n scores = [] # list containing scores from each episode\n scores_window = deque(maxlen=100) # last 100 scores\n eps = learn_dict['eps_start']\n brain_name = learn_dict['brain_name']\n n_episodes=learn_dict['n_episodes']\n max_t= learn_dict['max_t']\n eps_start= learn_dict['eps_start']\n eps_end= learn_dict['eps_end']\n eps_decay= learn_dict['eps_decay']\n early_stop = learn_dict['early_stop']\n\n for i_episode in range(1, n_episodes+1):\n env_info = env.reset(train_mode=True)[brain_name] # reset the environment\n state = env_info.vector_observations[0] # get the current state\n score = 0\n for t in range(max_t):\n \n action = agent.act(state, eps).astype(int)\n env_info = env.step(action)[brain_name] # send the action to the environment\n next_state = env_info.vector_observations[0] # get the next state\n reward = env_info.rewards[0] # get the reward\n done = env_info.local_done[0] # see if episode has finished\n # have the agent learn a step\n agent.step(state, action, reward, next_state, done)\n state = next_state\n score += reward\n if done:\n break \n scores_window.append(score) # save most recent score\n scores.append(score) # save most recent score\n eps = max(eps_end, eps_decay*eps) # decrease epsilon\n if log_results: print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end=\"\")\n if i_episode % 100 == 0:\n if log_results: print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))\n if np.mean(scores_window)>=early_stop:\n if log_results: print('\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_window)))\n break\n return scores", "def test(args):\n brain_name = env.brain_names[0]\n brain = env.brains[brain_name]\n env_info = env.reset(train_mode=True)[brain_name]\n\n num_agents = len(env_info.agents)\n print('Number of agents:', num_agents)\n\n # dim of each action\n action_size = brain.vector_action_space_size\n print('Size of each action:', action_size)\n\n # dim of the state space\n states = env_info.vector_observations\n state_size = states.shape[1]\n\n agent = MADDPG(state_size, action_size, actor_layer_dim_1=args.actor_layer_dim_1,\n actor_layer_dim_2=args.actor_layer_dim_2,\n actor_layer_dim_3=args.actor_layer_dim_3,\n critic_layer_dim_1=args.critic_layer_dim_1,\n critic_layer_dim_2=args.critic_layer_dim_2,\n critic_layer_dim_3=args.critic_layer_dim_3)\n\n agent.load(\n \"chkpts/{}/{:02d}_best_model.checkpoint\".format(args.model_path, args.loop_counter))\n\n test_scores = []\n for i_episode in tqdm(range(1, 1+args.test_n_run)):\n # initialize the scores\n scores = np.zeros(num_agents)\n env_info = env.reset(train_mode=True)[\n brain_name] # reset the environment\n states = env_info.vector_observations # get the current states\n dones = [False]*num_agents\n while not np.any(dones):\n actions = agent.act(states) # select actions\n # send the actions to the environment\n env_info = env.step(actions)[brain_name]\n next_states = env_info.vector_observations # get the next states\n rewards = env_info.rewards # get the rewards\n dones = env_info.local_done # see if episode has finished\n scores += rewards # update the scores\n # roll over the states to next time step\n states = next_states\n\n test_scores.append(np.max(scores))\n\n avg_score = sum(test_scores)/len(test_scores)\n print(\"Test Score: {}\".format(avg_score))\n\n return avg_score", "def evaluate(self, env, num_episode, epsilon):\n num_environment = env.num_process\n env.reset()\n reward_of_each_environment = np.zeros(num_environment)\n rewards_list = []\n\n num_finished_episode = 0\n\n while num_finished_episode < num_episode:\n old_state, action, reward, new_state, is_terminal = env.get_state()\n action = self.get_action(new_state, epsilon)\n env.take_action(action)\n for i, r, is_t in zip(range(num_environment), reward, is_terminal):\n if not is_t:\n reward_of_each_environment[i] += r\n else:\n rewards_list.append(reward_of_each_environment[i])\n reward_of_each_environment[i] = 0\n num_finished_episode += 1\n return np.mean(rewards_list), np.std(rewards_list), self.epsilon", "def test():\n \n print('Loading best networks')\n env.guesser, agent.dqn = load_networks(i_episode='best')\n #env.guesser, agent.dqn = load_networks(i_episode='best', avg_reward = )\n\n # predict outcome on test data\n y_hat_test = np.zeros(len(env.y_test))\n y_hat_test_prob = np.zeros(len(env.y_test))\n \n print('Computing predictions of test data')\n n_test = len(env.X_test)\n for i in range(n_test):\n \n if i % 1000 == 0:\n print('{} / {}'.format(i, n_test))\n \n state = env.reset(mode='test', \n patient=i,\n train_guesser=False)\n mask = env.reset_mask()\n \n # run episode\n for t in range(FLAGS.episode_length):\n\n # select action from policy\n action = agent.get_action(state, eps=0, mask=mask)\n mask[action] = 0\n \n # take the action\n state, reward, done, guess = env.step(action, mode='test') \n \n if guess != -1:\n y_hat_test_prob[i] = torch.argmax(env.probs).item()\n \n if done:\n break\n y_hat_test[i] = guess\n \n C = confusion_matrix(env.y_test, y_hat_test)\n print('confusion matrix: ')\n print(C)\n\n acc = np.sum(np.diag(C)) / len(env.y_test)\n\n print('Test accuracy: ', np.round(acc, 3))", "def train_episode(self):\n state = self.env.reset()\n states = []\n actions = []\n rewards = []\n for _ in range(self.options.steps):\n probs = self.actor_baseline.predict([[state]])[0][0]\n action = np.random.choice(len(probs), p=probs)\n\n next_state, reward, done, _ = self.step(action)\n states.append(state)\n actions.append(action)\n rewards.append(reward)\n\n state = next_state\n\n if done:\n break\n\n # Compute and store returns in G\n G = np.zeros_like(rewards)\n ################################\n # YOUR IMPLEMENTATION HERE #\n ################################\n\n # One-hot encoding for actions\n actions_one_hot = np.zeros([len(actions), self.env.action_space.n])\n actions_one_hot[np.arange(len(actions)), actions] = 1\n\n # Compute one-hot encoded deltas\n ################################\n # YOUR IMPLEMENTATION HERE #\n ################################\n deltas = [[0]]\n\n # Update actor and state estimator\n self.actor_baseline.fit(x=[np.array(states)],\n y={'actor_output': deltas, 'baseline_output': returns},\n epochs=1, batch_size=self.options.batch_size, verbose=0)", "def rmax(env, gamma, m, R_max, epsilon, num_episodes, max_step = 6):\n\n Q = np.ones((env.nS, env.nA)) * R_max / (1 - gamma)\n R = np.zeros((env.nS, env.nA))\n nSA = np.zeros((env.nS, env.nA))\n nSASP = np.zeros((env.nS, env.nA, env.nS))\n ########################################################\n # YOUR CODE HERE #\n ########################################################\n\n # Generate episodes\n average_scores = []\n accum = 0.0\n term = int(np.log(1 / (epsilon * (1 - gamma))) / (1 - gamma))\n for i in xrange(num_episodes):\n S = env.reset()\n done = False\n episode_reward = 0.0\n n_steps = 0\n\n while not done:\n\n if n_steps >= max_step:\n break\n\n A = np.argmax([Q[S,a] for a in range(env.nA)])\n\n # Make an action\n nextS, reward, done, _ = env.step(A)\n episode_reward += reward\n\n # R-Max\n if nSA[S, A] < m:\n nSA[S, A] += 1\n R[S, A] += reward\n nSASP[S, A, nextS] += 1\n\n if nSA[S, A] == m:\n for j in range(term):\n for S_bar in range(env.nS):\n for A_bar in range(env.nA):\n if nSA[S_bar, A_bar] >= m:\n N = float(nSA[S_bar, A_bar])\n T_hat = nSASP[S_bar, A_bar, :] / N\n R_hat = R[S_bar, A_bar] / N\n Q[S_bar, A_bar] = R_hat\n Q[S_bar, A_bar] += gamma * np.sum(T_hat * np.max(Q, axis=1))\n\n\n # Update Q-value\n S = nextS\n n_steps += 1\n\n accum += episode_reward\n average_scores.append(accum/(i+1))\n\n plt.plot(average_scores[:10000], label=\"m=%d\"%(m))\n\n ########################################################\n # END YOUR CODE #\n ########################################################\n return Q", "def random_search():\n\tgamma = 0.7\n\talpha = 0.3\n\tepsilon = 1\n\texploration_rate_decay = 0.87\n\n\tmax_tries = 10\n\tbest_score = -1000\n\tscores = {}\n\n\tfor attempt in range(max_tries):\n\n\t\tagent = Q_Agent(epsilon=1, alpha=alpha, gamma=gamma, exploration_rate_decay=exploration_rate_decay)\n\t\t_, rewards, steps = agent.train(env, iter_n=300, policy='ε–greedy', print_results=False)\n\t\tprint(np.mean(rewards))\n\t\tscores[attempt] = np.mean(rewards)\n\n\t\tprint(\n\t\t\t\"Score:{}, gamma {}, alpha {}, epsilon {}, e_decay_rate{}\".format(\n\t\t\t\tscores[attempt], gamma, alpha, epsilon, exploration_rate_decay))\n\n\t\tif scores[attempt] > best_score:\n\t\t\tbest_score = scores[attempt]\n\t\t\tprint(best_score)\n\t\t\tbest_gamma = gamma\n\t\t\tbest_alpha = alpha\n\t\t\tbest_epsilon = epsilon\n\t\t\tbest_decay = exploration_rate_decay\n\n\t\tgamma = best_gamma + (np.random.randint(-1, 2) / 10)\n\t\tgamma = min(1, gamma)\n\t\tgamma = max(0, gamma)\n\t\talpha = best_alpha + (np.random.randint(-1, 2) / 10)\n\t\talpha = min(1, alpha)\n\t\talpha = max(0, alpha)\n\t\tepsilon = 1\n\t\texploration_rate_decay = best_decay + np.random.randint(-1, 2) / 100\n\t\texploration_rate_decay = min(0.99, exploration_rate_decay)\n\t\texploration_rate_decay = max(0.7, exploration_rate_decay)\n\n\tprint(\"Best validation_accuracy:\", best_score)\n\tprint(\"Best settings:\")\n\tprint(\"best gamma:\", best_gamma)\n\tprint(\"best alpha:\", best_alpha)\n\tprint(\"best epsilon:\", best_epsilon)\n\tprint(\"best decay:\", best_decay)", "def run_episode(self, deterministic=False):\n\n\n obs = self.env.reset()\n Observations, Actions, Rewards = [], [], [] # original trajectory\n n_Observations, n_Rewards = [], [] # normalized trajectory\n done = False\n timestep = 0\n while not done and timestep < self.episode_horizon:\n Observations.append(obs)\n if self.state_preprocessor:\n n_obs = self.state_preprocessor.get_scaled_x(obs)\n else:\n n_obs = obs\n n_Observations.append(n_obs)\n action = self.policy.get_action(obs.astype(np.float32).reshape((1,-1)), deterministic=deterministic)\n Actions.append(action.flatten())\n obs, reward, done, _ = self.env.step(np.squeeze(action, axis=0))\n Rewards.append(reward)\n if self.reward_preprocessor:\n n_reward = self.reward_preprocessor.get_scaled_x(reward)\n else:\n n_reward = reward\n n_Rewards.append(n_reward)\n timestep += 1\n\n \n # append the last state\n Observations.append(obs)\n if self.state_preprocessor:\n n_obs = self.state_preprocessor.get_scaled_x(obs)\n else:\n n_obs = obs\n n_Observations.append(n_obs)\n\n unscaled_traj = {\"Observations\": np.array(Observations), \"Actions\": np.array(Actions), \"Rewards\": np.array(Rewards)}\n scaled_traj = {\"Observations\": np.array(n_Observations), \"Actions\": np.array(Actions), \"Rewards\": np.array(n_Rewards)}\n\n # update preprocessers\n if self.state_preprocessor:\n self.state_preprocessor.update(unscaled_traj['Observations'])\n # save preprocessor params for restoration\n self.state_preprocessor.save_params(os.path.join(self.logger.info_dir, \"state_preprocessor_params.pkl\"))\n if self.reward_preprocessor:\n self.reward_preprocessor.update(unscaled_traj['Rewards'])\n self.reward_preprocessor.save_params(os.path.join(self.logger.info_dir, \"reward_preprocessor_params.pkl\"))\n \n return unscaled_traj, scaled_traj", "def play(self):\n observation = self.env.reset()\n count = 0\n reward_sum = 0\n random_episodes = 0\n\n while random_episodes < 10:\n self.env.render()\n x = observation.reshape(-1, 4)\n q_values = self.model.predict(x)[0]\n action = np.argmax(q_values)\n observation, reward, done, _ = self.env.step(action)\n count += 1\n reward_sum += reward\n\n if done:\n print(\"Reward for this episode was: {}, turns was: {}\".format(reward_sum, count))\n random_episodes += 1\n reward_sum = 0\n count = 0\n observation = self.env.reset()\n\n self.env.close()", "def _run_single(self, thread_id, agent, environment, deterministic=False,\n max_episode_timesteps=-1, episode_finished=None, testing=False, sleep=None):\n\n # figure out whether we are using the deprecated way of \"episode_finished\" reporting\n old_episode_finished = False\n if episode_finished is not None and len(getargspec(episode_finished).args) == 1:\n old_episode_finished = True\n\n episode = 0\n # Run this single worker (episode loop) as long as global count thresholds have not been reached.\n while not self.should_stop:\n state = environment.reset()\n agent.reset()\n self.global_timestep, self.global_episode = agent.timestep, agent.episode\n episode_reward = 0\n\n # Time step (within episode) loop\n time_step = 0\n time_start = time.time()\n while True:\n action, internals, states = agent.act(states=state, deterministic=deterministic, buffered=False)\n reward = 0\n for repeat in xrange(self.repeat_actions):\n state, terminal, step_reward = environment.execute(action=action)\n reward += step_reward\n if terminal:\n break\n\n if not testing:\n # agent.observe(reward=reward, terminal=terminal)\n # Insert everything at once.\n agent.atomic_observe(\n states=state,\n actions=action,\n internals=internals,\n reward=reward,\n terminal=terminal\n )\n\n if sleep is not None:\n time.sleep(sleep)\n\n time_step += 1\n episode_reward += reward\n\n if terminal or time_step == max_episode_timesteps:\n break\n\n # Abort the episode (discard its results) when global says so.\n if self.should_stop:\n return\n\n self.global_timestep += time_step\n\n # Avoid race condition where order in episode_rewards won't match order in episode_timesteps.\n self.episode_list_lock.acquire()\n self.episode_rewards.append(episode_reward)\n self.episode_timesteps.append(time_step)\n self.episode_times.append(time.time() - time_start)\n self.episode_list_lock.release()\n\n if episode_finished is not None:\n # old way of calling episode_finished\n if old_episode_finished:\n summary_data = {\n \"thread_id\": thread_id,\n \"episode\": episode,\n \"timestep\": time_step,\n \"episode_reward\": episode_reward\n }\n if not episode_finished(summary_data):\n return\n # New way with BasicRunner (self) and thread-id.\n elif not episode_finished(self, thread_id):\n return\n\n episode += 1", "def run_episode(env, agent, deterministic, skip_frames=0, do_training=True, rendering=True, max_timesteps=10000,\n history_length=0, manual=False):\n\n stats = utils.EpisodeStats()\n\n # Save history\n image_hist = []\n\n step = 0\n state = env.reset()\n\n env.viewer.window.on_key_press = utils.key_press\n env.viewer.window.on_key_release = utils.key_release\n # fix bug of corrupted states without rendering in gym environment\n env.viewer.window.dispatch_events()\n\n # append image history to first state\n state = state_preprocessing(state)\n image_hist.extend([state] * (history_length + 1))\n state = np.array(image_hist).reshape(96, 96, history_length + 1)\n while True:\n #skip intro zoom frames\n if step < 48:\n step += 1\n env.step(utils.id_to_action(0))\n continue\n \n # TODO: get action_id from agent\n # Hint: adapt the probabilities of the 5 actions for random sampling so that the agent explores properly.\n if do_training and manual:\n action_id = utils.manual_action\n else:\n action_id = agent.act(state, deterministic)\n action = utils.id_to_action(action_id)\n\n # Hint: frame skipping might help you to get better results.\n reward = 0\n for _ in range(skip_frames + 1):\n next_state, r, terminal, info = env.step(action)\n reward += r\n\n if rendering:\n env.render()\n\n if terminal:\n break\n\n next_state = state_preprocessing(next_state)\n \n image_hist.append(next_state)\n image_hist.pop(0)\n next_state = np.array(image_hist).reshape(96, 96, history_length + 1)\n\n if do_training and (next_state[:82, :, -1].sum() > 5000): #track out of sight\n print('Track gone; finish this episode')\n agent.add(state, action_id, next_state, reward=-(skip_frames + 1), terminal=True) #punish\n break\n\n if do_training:\n agent.add(state, action_id, next_state, reward, terminal)\n if not manual:\n agent.train()\n\n stats.step(reward, action_id)\n\n state = next_state\n \n if terminal or (step * (skip_frames + 1)) > max_timesteps:\n break\n\n step += 1\n\n return stats", "def run_episode(env, agent, deterministic, do_training=True, rendering=False, max_timesteps=1000):\n\n stats = EpisodeStats() # save statistics like episode reward or action usage\n state = env.reset()\n\n step = 0\n while True:\n\n action_id = agent.act(state=state, deterministic=deterministic)\n next_state, reward, terminal, info = env.step(action_id)\n\n if do_training:\n agent.train(state, action_id, next_state, reward, terminal)\n\n state = next_state\n\n # # NOTE reward shaping...\n # if terminal:\n # reward += -1\n # if step < 20:\n # reward += -10\n # if step > 100:\n # reward += 10\n\n stats.step(reward, action_id)\n\n if rendering:\n env.render()\n\n if terminal or step > max_timesteps:\n break\n\n step += 1\n\n return stats", "def run(self):\n for i in range(self.exploration_steps):\n self.single_step(i)\n if self.save_checkpoints:\n self.save_results()\n self.save_results()\n if self.verbose:\n print(\"\\nExploration completed\")\n return", "def run(self):\n evaluateAllRuns = False\n while True:\n if self.host == \"\":\n # respond to clients\n self.respond2Clients()\n else:\n print(\"Next...\")\n # randomly choose experiment + run\n if not evaluateAllRuns:\n print(\"Randomly fetching run\")\n self.exp, self.runnum, self.detname = randExpRunDet()\n else:\n\t\t try:\n print(\"Fecthing next run in experiment\")\n self.exp, self.runnum, self.detname = nextExpRunDet(self.goodExp, self.runList[0])\n if self.exp is None:\n self.runList.pop(0)\n continue\n except:\n evaluateAllRuns = False\n continue\n if not self.checkStatus(self.exp, self.runnum, self.detname):\n print \"trying: exp %s, run %s, det %s\"%(self.exp,self.runnum,self.detname)\n try: #temp\n self.ds = safeDataSource(self.exp, self.runnum)\n except: #temp\n continue #temp\n self.run = self.ds.runs().next()\n self.times = self.run.times()\n #Start temp code\n if self.detname is None:\n continue\n #End temp code\n self.det = psana.Detector(self.detname)\n self.det.do_reshape_2d_to_3d(flag=True)\n try:\n self.iX = np.array(self.det.indexes_x(self.run), dtype=np.int64)\n self.iY = np.array(self.det.indexes_y(self.run), dtype=np.int64)\n self.ipx, self.ipy = self.det.point_indexes(self.run, pxy_um=(0, 0))\n self.alg = PyAlgos()\n self.alg.set_peak_selection_pars(npix_min=2, npix_max=30, amax_thr=300, atot_thr=600, son_min=10)\n mask = self.det.mask(self.runnum, calib=True, status=True, edges=True, central=True, unbond=True, unbondnbrs=True)\n\n samples = np.linspace(0, len(self.times), num=100, endpoint=False, retstep=False, dtype='int')\n offset = np.floor(np.random.uniform(0, len(self.times)-samples[-1])).astype('int')\n mysamples = samples + offset\n numCrystals = 0\n for self.eventNum in mysamples:\n self.evt = self.run.event(self.times[self.eventNum])\n calib = self.det.calib(self.evt)\n if calib is not None:\n peaks = self.alg.peak_finder_v3r3(calib, rank=3, r0=3, dr=2, nsigm=10, mask=mask.astype(np.uint16))\n if self.likelihood(peaks) >= self.goodLikelihood:\n numCrystals += 1\n if numCrystals >= self.minCrystals:\n self.numSaved +=1\n self.updateStatus(self.exp, self.runnum, self.detname, self.numSaved)\n self.lastGood = True\n break\n except:\n print \"Could not analyse this run\"\n #If an experiment has not had all of its runs evaluated yet\n # and if the last randomly selected run in this experiment was good\n # then all the runs in this experiment should be evaluated\n if (self.exp not in self.goodList) and self.lastGood:\n self.goodExp = self.exp #Save the name of this experiment\n self.goodRun = self.runnum #Save the run that has already been evaluated\n self.lastGood = False #Reset the condition that the last run was \"good\"\n self.goodList.append(self.goodExp) #Add this experiment name to the list of experiments that have had all runs evaluated\n self.runList = returnRunList(self.goodExp, self.goodRun) #save list of all runs in this good exp\n evaluateAllRuns = True #rerun loop with new algorithm that evaluates each run in an experiment\n continue\n if evaluateAllRuns: #If the loop is currently evaluating all of the runs in an experiment\n if(len(self.runList) > 1):\n self.runList.pop(0) #Remove runs from the list of runs each time they are evaluated\n else:\n self.runList.pop(0)#Remove runs until the list is completely empty\n evaluateAllRuns = False #Stop evaluated all the runs of an experiment, go back to random fetching" ]
[ "0.7450819", "0.7309439", "0.71770984", "0.7124558", "0.7124558", "0.7051895", "0.703537", "0.69566625", "0.684824", "0.68205166", "0.6802857", "0.68013686", "0.67991036", "0.6790752", "0.675303", "0.67489773", "0.67405975", "0.6739792", "0.67348915", "0.6732355", "0.6706088", "0.6702963", "0.6692553", "0.6661335", "0.6645239", "0.6628636", "0.6566244", "0.6561998", "0.6550673", "0.6542252", "0.65295106", "0.65216523", "0.65166324", "0.6512699", "0.6482792", "0.64674574", "0.64631444", "0.6461596", "0.64288104", "0.6424886", "0.6420263", "0.6418371", "0.64088917", "0.6406039", "0.6402191", "0.6388478", "0.63875383", "0.63849276", "0.63630664", "0.63362086", "0.6333078", "0.63174415", "0.63145286", "0.63137907", "0.6305231", "0.62891", "0.62877566", "0.628599", "0.6283677", "0.62630606", "0.6246115", "0.6238732", "0.62347937", "0.6233414", "0.6227434", "0.6208441", "0.6188862", "0.6185247", "0.616917", "0.61587554", "0.6149229", "0.6148107", "0.6148107", "0.6143328", "0.61399966", "0.6107444", "0.61052245", "0.6104474", "0.61021197", "0.60928214", "0.6088399", "0.6081123", "0.6071007", "0.60651255", "0.606322", "0.60590774", "0.6057721", "0.6055202", "0.60542005", "0.60507154", "0.6048728", "0.6038227", "0.60353416", "0.6030658", "0.6021929", "0.6021698", "0.60201865", "0.60195774", "0.60183007", "0.6000955" ]
0.74749076
0
This method will return action by epsilon greedy
def get_epsilon_action(epsilon, env, mean_reward_per_bandit): explore = np.random.uniform() < epsilon if explore: return env.action_space.sample() else: return np.argmax(mean_reward_per_bandit)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def greedy(self) -> Action:\n return NotImplemented", "def eps_greedy(Q, epsilon, num_actions):\n if np.random.uniform(0,1,1) > epsilon:\n action = np.argmax(Q)\n else:\n action = np.random.randint(low=0, high=num_actions)\n \n Q_value = Q[action]\n return action, Q_value", "def epsilonGreedyChooser(normalAction, state, stepsDone):\n epsThreshold = EPS_END + (EPS_START - EPS_END) * math.exp(-1. * stepsDone / EPS_DECAY)\n randomSample = random.random()\n if randomSample > epsThreshold:\n action = normalAction(state).max(1)[1].view(1, 1)[0].item()\n #print(action)\n return action\n else:\n return ENVIRONMENT.action_space.sample()", "def EpsGreedy(self, actions, game_state):\n if random.random() < self.epsilon:\n return random.choice(actions)\n else:\n return self.best_action(actions, game_state)", "def greedy():\n return constant(0)", "def epsilon_greedy(Q, epsilon, n_actions, s, train=False):\n if train or np.random.rand() < epsilon:\n action = np.argmax(Q[s, :])\n else:\n action = np.random.randint(0, n_actions)\n return action", "def _epsilon_greedy(self, info_state, legal_actions, epsilon):\n probs = np.zeros(self._num_actions)\n if np.random.rand() < epsilon:\n action = np.random.choice(legal_actions)\n probs[legal_actions] = 1.0 / len(legal_actions)\n else:\n info_state = np.reshape(info_state, [1, -1])\n q_values = self._session.run(\n self._q_values, feed_dict={self._info_state_ph: info_state})[0]\n legal_q_values = q_values[legal_actions]\n action = legal_actions[np.argmax(legal_q_values)]\n probs[action] = 1.0\n return action, probs", "def greedy(self, state, timestep, epsilon=0):\n\n counts = np.bincount(self.call_locs, minlength=self.num_nodes)\n # print(self.lengths)\n # print(counts)\n score = self.lengths @ counts\n action = []\n for _ in range(self.num_ambulance):\n node = np.argmin(score)\n action.append(node)\n score[node] = 99999999\n return action", "def eps_greedy_action(self, phi, tabu):\n\n # increase counter of actions taken\n self.a_count += 1\n\n # if within the initial buffer before learning starts, random action\n aval_actions = None\n if self.a_count < self.warmup:\n\n if len(tabu) > 0:\n # Remove tabu actions from list of available actions\n aval_actions = [a for a in self.actions if a not in tabu]\n\n action = self.random_action(aval_actions)\n return action, None\n\n elif (self.a_count == self.warmup) and self.verbose:\n print('learning starts')\n\n # evaluate Q(phi, a) for each action\n qvalues = self.Qmodel.predict(phi, batch_size=1)[0]\n\n # generate random value\n randn = np.random.uniform()\n\n # eliminate tabu values from possible actions to pick\n aval_actions = None\n if len(tabu) > 0:\n if randn < self.epsilon:\n aval_actions = [a for a in self.actions if a not in tabu]\n else:\n # Update Qs to low values to ensure they are not picked\n tabu_idx = [i for i in range(self.num_actions) if self.actions[i] in tabu]\n qvalues[tabu_idx] = -9999\n\n # eps-greedy, select random action\n if randn < self.epsilon:\n action = self.random_action(aval_actions)\n a_i = self.action_str2idx(action)\n else:\n # select best action\n a_i = np.argmax(qvalues)\n action = self.actions[a_i]\n\n # update greedy parameter and action count\n self.epsilon *= self.discount_epsilon\n self.a_count += 1\n\n return action, qvalues[a_i]", "def chooseAction(self, epsilon, state):\n if random.uniform(0, 1) < epsilon:\n return random.randrange(9)\n\n cur_best_val = -float('inf')\n cur_best_action = 0\n\n data = env.getAllNextStates(state)\n\n with torch.no_grad():\n for action, next_state, done in data:\n if next_state != state:\n value = self.NN(self.RBF[next_state]).item() if not done else 0\n if value > cur_best_val:\n cur_best_val = value\n cur_best_action = action\n #print(data)\n return cur_best_action", "def __act__(\n self,\n t: int\n ) -> Action:\n\n if self.random_state.random_sample() < self.epsilon:\n a = self.random_state.choice(self.most_recent_state.AA)\n self.epsilon *= (1 - self.epsilon_reduction_rate)\n else:\n a = self.greedy_action\n\n return a", "def choose_epsilon_greedy(self, state: Tuple[int, ...], valid_actions: Tuple[int, ...]) -> int:\n if random.random() < self.__epsilon:\n return self.choose_uniform(valid_actions)\n return self.choose_greedy(state, valid_actions)", "def greedy(q, s):\n # Your code here\n return argmax(q.actions,lambda a:q.get(s,a))", "def take_action(self, state):\n if self.epsilon_decay is not None:\n self.epsilon *= self.epsilon_decay\n if random.random() < self.epsilon:\n action = super(BaseQAgent, self).random_next_action(state)\n self.log('exploration move: {0}'.format(str(action)))\n else:\n action = self.greedy_next_action(state)\n self.log('exploitation move: {0}'.format(str(action)))\n return action", "def epsilon_greedy(q, s, eps = 0.5):\n if random.random()<eps:\n return uniform_dist(q.actions).draw()\n else:\n return greedy(q,s)", "def takeAction(self, state):\n # go greedy or not?\n if random.uniform(0, 1) < self.epsilon:\n # greedy selection\n # find best action\n allActions = torch.stack(\n tuple(torch.cat((state.strengths, state.focus, changes)) for changes in self.actionSet))\n evaluation = self.q.evaluateBunch(allActions)\n action = Action(state, self.actionSet[evaluation.argmax()])\n return action\n else:\n # random selection\n return Action(state, random.choice(self.actionSet))", "def choose_action(self, prev_opp_action):\n state = self.get_modified_state()\n next_action = self.belief.get_epsilon_greedy_action(state, self.epsilon)\n self.epsilon *= self.e_rate\n self.action_history.append(next_action)\n return next_action", "def select_action(engine, observation):\n with torch.no_grad():\n dqn.eval()\n if torch.rand(1).item() < epsilon:\n return random_action(observation)\n else:\n return dqn(observation).greedy()", "def epsilon_greedy_move(self):\n\n # get the current state\n state, _ = self.board.bit_board_representation()\n \n # choose the move to play\n is_exploring_move = False\n if random.random() < self.epsilon:\n # exploration\n action = self.board.random_move()\n is_exploring_move = True\n else:\n # exploitation\n action, _ = self.board.greedy_action_move(self.target_network)\n\n action_index = action\n if self.board.player == CONST.BLACK:\n action_index = action + 9\n \n # play the epsilon greedy move\n self.board.play_move(action)\n \n # add the experience to the experience buffer if the move was not an exploration move\n if not is_exploring_move:\n reward = self.board.reward()\n not_terminal = self.board.not_terminal_int()\n succ_state, succ_player = self.board.bit_board_representation()\n succ_legal_moves = self.board.legal_moves\n self.experience_buffer.add(state, action_index, reward, not_terminal, succ_state, succ_player, succ_legal_moves)", "def choose_action(self, state, epsilon_greedy=False):\n chosen_action = None\n if epsilon_greedy:\n if np.random.rand() <= self.epsilon:\n print('random actions')\n\n # choose random action\n chosen_action = random.choice(self.actions)\n\n else:\n print('argmax')\n\n # find the action with greatest Q value\n maxQ = -float(\"inf\")\n for action in self.actions:\n input_data = np.asarray(state + action).reshape(self.OUTPUT_DIM, self.INPUT_DIM)\n Q = self.model.predict(input_data)\n if Q > maxQ:\n maxQ = Q\n chosen_action = action\n\n else:\n\n # policy rollout\n maxQ = -float(\"inf\")\n for action in self.actions:\n input_data = np.asarray(state + action).reshape(self.OUTPUT_DIM, self.INPUT_DIM)\n Q = self.model.predict(input_data)\n if Q > maxQ:\n maxQ = Q\n chosen_action = action\n\n return chosen_action", "def greedy_next_action(self, state):\n max_val = float('-inf')\n if self.verbose:\n cells = []\n max_candidates = {}\n for i in range(3):\n for j in range(3):\n if state[i][j] == VALUES.EMPTY:\n val = self.q_value((state, (i, j)))\n if val >= max_val:\n max_val = val\n max_move = (i, j)\n max_candidates[max_move] = val\n if self.verbose:\n cells.append('{0:.3f}'.format(val).center(6))\n elif self.verbose:\n cells.append(state[i][j].center(6))\n if self.verbose:\n self.logger.info(BOARD.format(*cells))\n possible_actions = [k for k, v in max_candidates.items() if v == max_val]\n action = random.choice(possible_actions) if len(possible_actions) > 0 else None\n return action", "def _greedy(self, state):\n\n node = self.mcts_head\n if self.verbose > 1:\n logger.debug(f\"Starting greedy algorithm.\")\n\n while not node.terminal:\n # Parse current state\n this_state, total_reward, terminal = self._parse_path(state, node.path)\n node.set_terminal(terminal)\n if self.verbose > 1:\n logger.debug(f\" Analyzing node {node.path}\")\n\n # Expand\n if not node.terminal and not node.children:\n actions = self._find_legal_actions(this_state)\n step_rewards = [self._parse_action(action, from_which_env=\"sim\") for action in actions]\n if self.verbose > 1:\n logger.debug(f\" Expanding: {len(actions)} legal actions\")\n node.expand(actions, step_rewards=step_rewards)\n\n # If terminal, backup reward\n if node.terminal:\n if self.verbose > 1:\n logger.debug(f\" Node is terminal\")\n if self.verbose > 1:\n logger.debug(f\" Backing up total reward {total_reward}\")\n node.give_reward(self.episode_reward + total_reward, backup=True)\n\n # Debugging -- this should not happen\n if not node.terminal and not node.children:\n logger.warning(\n f\"Unexpected lack of children! Path: {node.path}, children: {node.children.keys()}, legal actions: {self._find_legal_actions(this_state)}, terminal: {node.terminal}\"\n )\n node.set_terminal(True)\n\n # Greedily select next action\n if not node.terminal:\n action = node.select_greedy()\n node = node.children[action]\n\n if self.verbose > 0:\n choice = self.mcts_head.select_best(mode=\"max\")\n self._report_decision(choice, state, \"Greedy\")", "def make_epsilon_greedy_policy(self, Q, epsilon, nA):\n\n def policy_fn(observation,p):\n A = np.ones(nA, dtype=float) * epsilon / nA\n q=Q(observation,p)\n\n best_action = np.argmax(q)\n print(\"action called:\",self.env.action_labels[best_action])\n A[best_action] += (1.0 - epsilon)\n return A\n\n return policy_fn", "def step(self):\n # No need for epsilon as exploration is controlled by c\n\n\n # Step in time (choose an action) \n n = np.sum(self.action_count)\n if n > 0: # Condition to evaluate on first iteration, because np.log(0) = -inf\n mask = self.action_count > 0 # Mask to avoid division by 0 on the formula for upper confidence uncertainties\n uncertainties = np.zeros(self.action_count.shape)\n uncertainties[mask] = self.c*np.sqrt(np.log(n)/self.action_count[mask])\n uncertainties[~mask] = float('inf') # We increment uncertainty of actions we've never chosen\n else:\n uncertainties = np.array(np.repeat(float('inf'), len(self.action_count))) \n optimals = self.Q + uncertainties # Uncertainty rises the value of less chosen actions, hence promoting exploration\n \n max_actions = np.argwhere(optimals == np.amax(optimals)).flatten() # greedy actions (max value)\n if len(max_actions) == 1:\n self.last_action = max_actions\n else:\n self.last_action = np.random.choice(max_actions)\n\n return self.last_action", "def get_action(self, state):\n # TODO: Replace the example implementation below with your own search\n # method by combining techniques from lecture\n #\n # EXAMPLE: choose a random move without any search--this function MUST\n # call self.queue.put(ACTION) at least once before time expires\n # (the timer is automatically managed for you)\n\n import random\n if state.ply_count < 2:\n self.queue.put(random.choice(state.actions()))\n else:\n #my_timer = time.time() + float(0.1499)\n best_move = None\n depth_limit = 3\n for depth in range(1, depth_limit + 1):\n best_move = self.alpha_beta_search(state, depth)\n if best_move is None:\n best_move = random.choice(state.actions())\n self.queue.put(best_move)\n \n # Alpha beta pruning\n # Iterative deepening to set bounds\n # Evaluation function: other than (#my_moves - #opponent_moves), partition, symmetry ", "def action(self):\n\n self.start_timer()\n\n minimax_probability = self.norm.cdf(self.root.branching)\n use_minimax = boolean_from_probability(minimax_probability)\n if self.time_consumed > 53:\n # Time is starting to run low, use the faster option\n use_minimax=True\n\n if self.time_consumed < 59:\n if self.root.turn < 4:\n result = book_first_four_moves(self.root)\n elif use_minimax:\n result = minimax_paranoid_reduction(self.root)\n else:\n result = monte_carlo_tree_search(\n self.root,\n playout_amount=3,\n node_cutoff=4,\n outer_cutoff=4,\n num_iterations=1200,\n turn_time=0.75,\n exploration_constant=1.7,\n use_slow_culling=False,\n verbosity=0,\n use_prior=True,\n num_priors=4,\n use_fast_prune_eval=False,\n use_fast_rollout_eval=False,\n )\n else:\n result = greedy_choose(self.root)\n\n self.end_timer()\n\n return result", "def __call__(self, state, q_values):\n\n if self.policy_type == \"greedy\":\n is_greedy = True\n else:\n is_greedy = random.uniform(0, 1) > self.epsilon\n\n if is_greedy :\n # choose greedy action\n index_action = np.argmax(q_values[state])\n else:\n # get a random action\n index_action = random.randint(0,3)\n\n return actions_dict[index_action]", "def epsilon_greedy(Q, epsilon, state):\n random_number = random.random()\n if (random_number < epsilon) and (state not in critical_states):\n return env.action_space.sample()\n\n else:\n return np.argmax(Q[state])", "def choose_action(self, state):\n if random.random() < self.epsilon:\n self.epsilon -= self.epsilon_annealing_rate\n return random.choice(self.valid_actions)\n \n #initialize search variables\n opt_action = self.valid_actions[0]\n opt_value = 0\n\n #performs a search across all valid actions for highest q-value.\n for action in self.valid_actions:\n cur_value = self.q_value(state, action)\n if cur_value > opt_value:\n opt_action = action\n opt_value = cur_value\n elif cur_value == opt_value:\n opt_action = random.choice([opt_action, action])\n return opt_action", "def select_action(self, state: str) -> Action:\n rnd_num = self._random.random()\n p = 1.0 - self.epsilon\n if rnd_num > p:\n action = self._random.random_choice() \n else:\n action = max(self.Qs[state], key=lambda x: self.Qs[state][x])\n if self.epsilon_decay == True:\n self.turns += 1\n if self.turns < self.end_epsilon_decay:\n self.epsilon -= self.decay_value \n return action", "def move(self, state):\n \n self.depth_limit=1\n self.best_utility=-2\n action=None\n while not self.is_time_up():\n self.terminal=True\n self.cache={}\n action=self.alpha_beta_search(state,0)\n if self.terminal==True:\n break\n self.depth_limit=self.depth_limit+1\n \n return action", "def get_action(self, state):\n depth_limit = 20\n\n if state.ply_count < 4 and self.data is not None:\n if state in self.data:\n self.queue.put(self.data[state])\n else:\n self.queue.put(random.choice(state.actions()))\n else:\n for depth in range(1, depth_limit+1):\n best_move = self.alpha_beta_search(state, depth)\n if best_move is not None:\n self.queue.put(best_move)", "def getAction(self, gameState):\n \"*** YOUR CODE HERE ***\"\n legalActions = gameState.getLegalActions(0)\n legalActions.remove('Stop')\n \n besctaction = Directions.STOP\n score = float(\"-inf\")\n for action in legalActions:\n child = gameState.generateSuccessor(0, action)\n newscore = max(score, minimax_value(self, child, self.depth, 1))\n if newscore > score:\n bestaction = action\n score = newscore\n \n return bestaction", "def get_action(self, state):\n\n # Pick Action\n possible_actions = self.get_legal_actions(state)\n\n # If there are no legal actions, return None\n if len(possible_actions) == 0:\n return None\n\n # agent parameters:\n epsilon = self.epsilon\n\n #\n # INSERT CODE HERE to get action in a given state (according to epsilon greedy algorithm)\n #\n\n best_action = self.get_best_action(state)\n chosen_action = best_action\n\n if random.uniform(0, 1) < epsilon:\n random_actions = possible_actions.copy()\n random_actions.remove(best_action)\n chosen_action = random.choice(random_actions if random_actions else [best_action])\n\n return chosen_action", "def epsilon_greedy_policy(network, eps_end, eps_start, eps_decay, actions, device):\n def policy_fn(observation, steps_done):\n sample = np.random.random()\n eps_threshold = eps_end + (eps_start - eps_end) * math.exp(-1. * steps_done * eps_decay)\n if sample > eps_threshold:\n with torch.no_grad():\n if observation.dim() == 3:\n observation = observation.unsqueeze(0)\n elif observation.dim() < 3:\n NotImplementedError(\"Wrong input dim\")\n\n values = network.forward(observation.to(device))[0]\n best_action = torch.max(values, dim=0)[1]\n return best_action.cpu().item(), eps_threshold\n else:\n # return torch.tensor(np.random.randint(low=0, high=num_actions), dtype=torch.long), eps_threshold\n return random.choice(actions), eps_threshold\n return policy_fn", "def find_action(self, env):\n best_action = None\n best_omega = None\n best_reward = -1e6\n for i, action in enumerate(zip(self.left, self.right)):\n action = np.array(action)\n reward = env.theoretical_step(action, self.delta_t)\n if reward > best_reward:\n best_action = action\n best_reward = reward\n best_omega = self.possible_omega[i]\n\n if best_reward <= -20:\n return None\n else:\n return Result(best_action, {\"omega\": best_omega.item()})", "def getAction(self, gameState):\n \"*** YOUR CODE HERE ***\"\n legalActions = gameState.getLegalActions(0)\n legalActions.remove('Stop')\n alpha = float(\"-inf\")\n beta = float(\"inf\")\n \n besctaction = Directions.STOP\n score = float(\"-inf\")\n for action in legalActions:\n child = gameState.generateSuccessor(0, action)\n newscore = max(score, alphabeta_value(self, child, self.depth, alpha, beta, 1))\n if newscore > score:\n bestaction = action\n score = newscore\n \n return bestaction", "def select_action(self, state):\n # print(\"agent.select_action() - state: {}\".format(state))\n\n self.step_counter += 1\n # self.epsilon = max(0.1, 1.0-self.step_counter/self.epsilon_decay_steps)\n epsilon_min = .01\n epsilon_max = .8\n epsilon_step = epsilon_max - (epsilon_max - epsilon_min) * self.step_counter / self.epsilon_decay_steps\n self.epsilon = max(epsilon_min, epsilon_step)\n # self.epsilon = max(0.1, 1.0/self.step_counter)\n\n rand = random.uniform(0, 1)\n if rand < self.epsilon:\n # choose random action\n return np.random.choice(self.nA)\n else:\n # choose greedy action\n return np.argmax(self.Q[state])", "def choose_action( self):\n \"\"\"greedy, random, e-greedy, boltzmann, bayesian\"\"\"\n\tif self.exploration == \"greedy\":\n #Choose an action with the maximum expected value.\n a,allQ = sess.run([q_net.predict,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.keep_per:1.0})\n a = a[0]\n return a\n if self.exploration == \"random\":\n #Choose an action randomly.\n a = env.action_space.sample()\n if self.exploration == \"e-greedy\":\n #Choose an action by greedily (with e chance of random action) from the Q-network\n if np.random.rand(1) < e or total_steps < pre_train_steps:\n a = env.action_space.sample()\n else:\n a,allQ = sess.run([q_net.predict,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.keep_per:1.0})\n a = a[0]\n return a\n if self.exploration == \"boltzmann\":\n #Choose an action probabilistically, with weights relative to the Q-values.\n Q_d,allQ = sess.run([q_net.Q_dist,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.Temp:e,q_net.keep_per:1.0})\n a = np.random.choice(Q_d[0],p=Q_d[0])\n a = np.argmax(Q_d[0] == a)\n return a\n if self.exploration == \"bayesian\":\n #Choose an action using a sample from a dropout approximation of a bayesian q-network.\n a,allQ = sess.run([q_net.predict,q_net.Q_out],feed_dict={q_net.inputs:[s],q_net.keep_per:(1-e)+0.1})\n a = a[0]\n return a", "def act(self, state, epsilon):\n if random.random() > epsilon:\n state = torch.FloatTensor(state)\n q_values = self.dqn(state)\n action = q_values.argmax().item()\n else:\n action = self.env.action_space.sample()\n return action", "def policies(self, QTable, epsilon, state, next_states, action_to_do): # Inspiration from https://www.geeksforgeeks.org/q-learning-in-python/?fbclid=IwAR1UXR88IuJBhhTakjxNq_gcf3nCmJB0puuoA46J8mZnEan_qx9hhoFzhK8\r\n num_actions = 5 # 5 actions-value, [moved_out, into_goal, send_opp_home, send_self_home, move_token] \r\n def epsilonGreedyPolicy(): \r\n tmp_state = str(state.state[0])\r\n valid_actions = np.append(action_to_do, True) # the True appended is move_token\r\n valid_act_len = len(np.where(valid_actions==True)[0])\r\n\r\n Action_probabilities = np.ones(num_actions, dtype = float) * epsilon / valid_act_len # divides probability based on number of valid actions and epsilon (each 0.025 if 4 actions) \r\n Action_probabilities = np.multiply(Action_probabilities, valid_actions)\r\n\r\n # If same values in QTable choose random valid action \r\n best_action = np.argmax(QTable[tmp_state]) # Find index of action which gives highest QValue\r\n # Check if valid action else find new best action\r\n if not valid_actions[best_action]:\r\n actions = np.argsort(-QTable[tmp_state]) # descending order of action values\r\n for i in range(len(valid_actions)):\r\n if valid_actions[actions[i]]:\r\n best_action = actions[i]\r\n break\r\n\r\n Action_probabilities[best_action] += (1.0 - epsilon) # Assigns rest probability to best action so probability sums to 1\r\n\r\n return Action_probabilities \r\n\r\n def greedyPolicy():\r\n tmp_state = str(state.state[0])\r\n valid_actions = np.append(action_to_do, True) # the True appended is move_token\r\n\r\n Action_probabilities = np.zeros(num_actions, dtype = float)\r\n\r\n best_action = np.argmax(QTable[tmp_state]) # Find index of action which gives highest QValue\r\n # Check if valid action else find new best action\r\n if not valid_actions[best_action]:\r\n actions = np.argsort(-QTable[tmp_state]) # descending order of action values\r\n for i in range(len(valid_actions)):\r\n if valid_actions[actions[i]]:\r\n best_action = actions[i]\r\n break\r\n\r\n\r\n Action_probabilities[best_action] += 1.0\r\n return Action_probabilities\r\n\r\n\r\n if(self.__chosenPolicy == \"epsilon greedy\"):\r\n return epsilonGreedyPolicy \r\n if(self.__chosenPolicy == \"greedy\"):\r\n return greedyPolicy", "def alphabeta_search(state):\r\n \r\n '''\r\n Terminates when game.actions is empty\r\n Class Game needs the following functions:\r\n - game.result(state, a) -- successor\r\n - game.actions(state) -- possible moves\r\n - game.utility -- returns the state of the game (win/lose or tie, when game is terminal)\r\n \r\n '''\r\n #sort state.actions in increasing or decreasing based on max or min (alpha or beta)\r\n #use heuristics fn to get a value for each move (move is in format (x,y) where x and y are ints\r\n \r\n d = depthset[0] #this is the cutoff test depth value. if we exceed this value, stop\r\n cutoff_test=None\r\n sort_fn = [vitalpoint, eyeHeur]\r\n eval_fn = survivalheur \r\n #randnumheuristics \r\n player = state.to_move()\r\n prune = 0\r\n pruned = {} #this will store the depth of the prune\r\n totaldepth = [0]\r\n visited = {}\r\n heuristicInd = 0\r\n \r\n def max_value(state, alpha, beta, depth, heuristicInd):\r\n branches = len(state.actions())\r\n onbranch = 0\r\n \r\n if totaldepth[0] < depth:\r\n totaldepth[0] = depth\r\n if cutoff_test(state, depth):\r\n return eval_fn(state)\r\n v = -infinity\r\n \r\n #sort state.actions based on heuristics before calling\r\n #max wants decreasing\r\n #sorted(state.actions(), key = eval_sort, reverse = True)\r\n \r\n #sort by favorites first, returns a list of actions\r\n # for sorts in sort_fn:\r\n tempher = heuristicInd\r\n\r\n sorts = sort_fn[heuristicInd]\r\n sortedactions, heuristicInd = sorts(state)\r\n #if heuristicInd != tempher:\r\n # print 's',\r\n ''''''\r\n for a in sortedactions:\r\n if visited.get(depth) == None:\r\n visited[depth] = [a]\r\n else:\r\n visited[depth].append(a)\r\n \r\n onbranch += 1\r\n v = max(v, min_value(state.result(a),\r\n alpha, beta, depth+1, heuristicInd)) #+ vitscore.count(a)\r\n if v >= beta: #pruning happens here, but in branches\r\n if pruned.get(depth) == None:\r\n pruned[depth] = branches - onbranch\r\n else:\r\n pruned[depth] += (branches - onbranch)\r\n #print \"prune\", depth, \" \", state.actions()\r\n #state.display()\r\n return v\r\n alpha = max(alpha, v)\r\n \r\n #print depth, \" \", state.actions()\r\n #state.display()\r\n \r\n return v\r\n\r\n def min_value(state, alpha, beta, depth, heuristicInd):\r\n branches = len(state.actions())\r\n onbranch = 0\r\n \r\n if totaldepth[0] < depth:\r\n totaldepth[0] = depth\r\n if cutoff_test(state, depth):\r\n return eval_fn(state)\r\n v = infinity\r\n \r\n #sort state.actions based on heuristics before calling\r\n #min wants increasing\r\n #sorted(state.actions(), key = eval_sort)\r\n #Shayne\r\n tempher = heuristicInd\r\n sorts = sort_fn[heuristicInd]\r\n sortedactions, heuristicInd = sorts(state, 1)\r\n #if heuristicInd != tempher:\r\n # print 's',\r\n for a in sortedactions: #state.actions():\r\n onbranch += 1\r\n if visited.get(depth) == None:\r\n visited[depth] = [a]\r\n else:\r\n visited[depth].append(a)\r\n v = min(v, max_value(state.result(a),\r\n alpha, beta, depth+1, heuristicInd))\r\n if v <= alpha: #pruning happens here, but in branches\r\n if pruned.get(depth) == None:\r\n pruned[depth] = branches - onbranch\r\n else:\r\n pruned[depth] += (branches - onbranch)\r\n #print \"prune\", depth, \" \", state.actions()\r\n #state.display()\r\n return v\r\n beta = min(beta, v)\r\n #print depth, \" \", state.actions()\r\n #state.display()\r\n return v\r\n\r\n # Body of alphabeta_search starts here:\r\n #def cutoff_test and eval_fn \r\n cutoff_test = (cutoff_test or\r\n (lambda state,depth: depth>d or state.terminal_test()))\r\n eval_fn = eval_fn or (lambda state: state.utility(player))\r\n #by default, utility score is used\r\n \r\n \r\n #argmax goes through all the possible actions and \r\n # applies the alphabeta search onto all of them\r\n # and returns the move with the best score \r\n #print state.actions()\r\n heuristicInd = 0\r\n sorts = sort_fn[heuristicInd]\r\n sortedact, heuristicInd = sorts(state)\r\n abmove = argmax(sortedact,\r\n lambda a: min_value(state.result(a),\r\n -infinity, infinity, 0, heuristicInd))\r\n\r\n print 'problem,', problemno[0], ', total tree depth,', totaldepth[0]\r\n for i in range(1, len(visited)):\r\n if len(pruned) < i:\r\n pruned[i] = 0\r\n print i, \",\", len(visited[i]), \",\", pruned[i]\r\n \r\n return abmove", "def getAction(self, gameState):\n rootNode = TreeNode(gameState, 1)\n rootNode.expand(self.selPolicy) \n player = rootNode.state.player\n for i in range(self.iter):\n leafNode = self.walkTree(rootNode, gameState.default_action) \n if (leafNode.endState()): continue \n self.expand(leafNode) \n newLeafNode = self.step(leafNode, gameState.default_action) \n value = self.simulate(newLeafNode, player, gameState.default_action) \n self.backPropogation(newLeafNode, value) \n\n # Brute force if we can win next move, then take it\n for action in rootNode.children:\n if rootNode.children[action].state.winner() == player:\n return action\n\n optValue, optAction = max([(rootNode.children[action].value, action) for action in rootNode.children])\n \n # Resign if the optimal value is really bad...\n if optValue < -0.8:\n print(\"Agent resigning\")\n return gameState.resign_move\n\n # If not, return the optimal action and try to win!\n return optAction", "def getAction(self, gameState):\n \"*** YOUR CODE HERE ***\"\n def expHelper(gameState, deepness, agent):\n if agent >= gameState.getNumAgents():\n agent = 0\n deepness += 1\n if (deepness==self.depth or gameState.isWin() or gameState.isLose()):\n return self.evaluationFunction(gameState)\n elif (agent == 0):\n return maxFinder(gameState, deepness, agent)\n else:\n return expFinder(gameState, deepness, agent)\n \n def maxFinder(gameState, deepness, agent):\n output = [\"meow\", -float(\"inf\")]\n pacActions = gameState.getLegalActions(agent)\n if not pacActions:\n return self.evaluationFunction(gameState)\n for action in pacActions:\n currState = gameState.generateSuccessor(agent, action)\n currValue = expHelper(currState, deepness, agent+1)\n if type(currValue) is list:\n testVal = currValue[1]\n else:\n testVal = currValue\n if testVal > output[1]:\n output = [action, testVal] \n return output\n \n def expFinder(gameState, deepness, agent):\n output = [\"meow\", 0]\n ghostActions = gameState.getLegalActions(agent)\n if not ghostActions:\n return self.evaluationFunction(gameState)\n probability = 1.0/len(ghostActions)\n for action in ghostActions:\n currState = gameState.generateSuccessor(agent, action)\n currValue = expHelper(currState, deepness, agent+1)\n if type(currValue) is list:\n val = currValue[1]\n else:\n val = currValue\n output[0] = action\n output[1] += val * probability\n return output\n \n outputList = expHelper(gameState, 0, 0)\n return outputList[0]", "def get_next_action(self, epsilon, learning_params):\n\n T = learning_params.T\n\n if random.random() < epsilon:\n # With probability epsilon, randomly select an action for each agent.\n a_selected = np.full(self.num_agents, -1, dtype=int)\n for i in range(self.num_agents):\n a_selected[i] = random.choice(self.actions[i])\n else:\n partial_index = [] # Don't include action indexes. As a result, in pr_sum, we are summing over actions.\n for i in range(self.num_agents):\n partial_index.append(self.s[i])\n partial_index.append(self.u)\n partial_index = tuple(partial_index)\n\n # Sum over all possible actions for fixed team state and reward machine state.\n pr_sum = np.sum(np.exp(self.q[partial_index] * T))\n\n # pr[i] is an array representing the probability values that agent i will take various actions.\n pr = np.exp(self.q[partial_index] * T)/pr_sum\n\n shp = pr.shape\n pr = pr.flatten()\n\n pr_select = np.zeros([len(pr) + 1, 1])\n pr_select[0] = 0\n for i in range(len(pr)):\n pr_select[i+1] = pr_select[i] + pr[i]\n\n randn = random.random()\n for i in range(len(pr)):\n if randn >= pr_select[i] and randn <= pr_select[i+1]:\n a_selected = np.unravel_index(i, shp)\n a_selected = np.array(a_selected, dtype=int)\n break\n\n a = a_selected\n\n return self.s, a", "def getaction(self, state, epsilon):\n # Pick Action\n legalactions = env.getlegalactions(env.state_to_array(state))\n\n # if self.epsilon == 0:\n # for action in legalactions:\n # print(self.qvals[(state, action)])\n # print(state, legalactions)\n # input()\n # print(\"legal actions\", legalactions, \"for state\", state)\n # print (\"for_given_state\", state, \"legal_actions are\", legalactions)\n # print (\"legalactions\",legalactions,\"state\",state)\n action = None\n \"*** YOUR CODE HERE ***\"\n if len(legalactions) != 0:\n if flipcoin(epsilon):\n # if not epsilon:\n # print (\"random\")\n action = random.choice(legalactions)\n else:\n action = self.computeactionfromqvalues(state)\n return action", "def _select_action(self):\n if self.eval_mode:\n self._log_values()\n epsilon = self.epsilon_eval\n else:\n epsilon = self.epsilon_fn(\n self.epsilon_decay_period,\n self.training_steps,\n self.min_replay_history,\n self.epsilon_train)\n if random.random() <= epsilon:\n # Choose a random action with probability epsilon.\n return random.randint(0, self.num_actions - 1)\n else:\n # Choose the action with highest Q-value at the current state according\n # to the current head.\n return self._compute_q_argmax()", "def getAction(self, gameState):\n \"*** YOUR CODE HERE ***\"\n # Again, we use the fundamental foundation built in Q2 for Q4, however here we modify our minimizer function\n # to serve the purpose of finding the expected value\n actionList = gameState.getLegalActions(0)\n pacmanAgentIndex = 0\n ghostAgentIndices = list(range(1,gameState.getNumAgents())) # List of each agent index for looping\n count = util.Counter()\n agentEnd = gameState.getNumAgents()-1 # Last agent in the list\n def maximizer(curState, agentIndex, depth):\n\n ghostActions = curState.getLegalActions(agentIndex)\n maxDepth = self.depth # Quantifying the end of the tree so we know when we reached a leaf node\n weight = -99999999 # Worst case starting value to be changed in the code\n if depth == maxDepth: # If we are at a leaf node\n return self.evaluationFunction(curState) # evaluate the state of this leaf node\n # Otherwise, we progress the tree until the above condition is reached\n if len(ghostActions) != 0:\n for x in ghostActions:\n if weight >= minimizer(curState.generateSuccessor(agentIndex, x), agentIndex + 1, depth):\n weight = weight\n else:\n weight = minimizer(curState.generateSuccessor(agentIndex, x), agentIndex + 1, depth)\n return weight\n else:\n # if there are no legal actions left then evaluate at the last known state\n return self.evaluationFunction(curState)\n\n def minimizer(curState, agentIndex, depth):\n ghostActions = curState.getLegalActions(agentIndex)\n weight = 0 # Starting value of zero to be incremented below\n if len(ghostActions) != 0:\n if agentIndex == agentEnd: # If we've reached the last ghost, we maximise\n for x in ghostActions: # For each legal action in the current position\n temp = (float(1.0) / len(ghostActions))*maximizer(curState.generateSuccessor(agentIndex, x), pacmanAgentIndex, depth+1)\n weight = weight + temp\n else: # Otherwise, we continue to minimize\n for x in ghostActions: # For each legal action in the current position\n temp = (float(1.0) / len(ghostActions))*minimizer(curState.generateSuccessor(agentIndex, x), agentIndex+1, depth)\n weight = weight + temp\n return weight\n else:\n # if there are no legal actions left then evaluate at the last known state\n return self.evaluationFunction(curState)\n\n # Executing the minimizer for all possible actions\n for x in actionList:\n tempState = gameState.generateSuccessor(pacmanAgentIndex,x)\n count[x] = minimizer(tempState,1,0)\n # print('HELLO THERE')\n # print(count)\n return count.argMax()", "def get_greedy_action(Q, obs):\n obs = Q.xp.asarray(obs[None], dtype=np.float32)\n with chainer.no_backprop_mode():\n q = Q(obs).data[0]\n return int(q.argmax())", "def get_greedy_actions(self, state):\n state_action_values = self.get_action_values(state) # What are the value that we could get from current state\n\n max_action_value = max(state_action_values) # What is the higher value\n max_value_indices = [i for i, value in enumerate(state_action_values) if\n value == max_action_value] # Gets their indices\n\n # Prepares action probabilites for the ones with the higher value\n action_probs = np.zeros((4,))\n action_probs[max_value_indices] = 1 / (len(max_value_indices) if type(max_value_indices) is list else 1)\n\n return action_probs", "def take_action(self):\n iterative_search = Thread(target=self._iterative_deepening)\n iterative_search.start()\n sleep(9)\n action = action_string(self.pieces[self.best_action[0]], self.best_action[1])\n self.pieces[self.best_action[0]] = self.best_action[1]\n self.playing = 1 if self.color == 0 else 0\n self.turn += 1\n return action", "def select_action(self, state, epsilon=None):\n if epsilon == None:\n epsilon = self.epsilon\n \n if np.random.random() > epsilon:\n # greedy action selection\n return self.get_optimal_action(state)\n \n else:\n # random action selection\n return np.random.randint(0, self.num_actions)", "def select_action(self, state: np.ndarray) -> np.ndarray:\n # epsilon greedy policy\n # pylint: disable=comparison-with-callable\n if self.epsilon > np.random.random():\n selected_action = np.array(self.env.action_space.sample())\n else:\n with torch.no_grad():\n state = self._preprocess_state(state, self.device)\n selected_action = self.dqn(state).argmax()\n selected_action = selected_action.cpu().numpy()\n\n # Decay epsilon\n self.epsilon = max(\n self.epsilon\n - (self.max_epsilon - self.min_epsilon) * self.hyper_params.epsilon_decay,\n self.min_epsilon,\n )\n\n return selected_action", "def getAction(self, gameState):\n \"*** YOUR CODE HERE ***\"\n legalActions = gameState.getLegalActions(0)\n legalActions.remove('Stop')\n \n besctaction = Directions.STOP\n score = float(\"-inf\")\n for action in legalActions:\n child = gameState.generateSuccessor(0, action)\n newscore = max(score, expectmax_value(self, child, self.depth, 1))\n if newscore > score:\n bestaction = action\n score = newscore\n \n return bestaction", "def act(self):\n\n self.state = self.next_state\n self.choose_random = np.random.uniform(0., 1.) < self.epsilon\n # If exploring\n if self.choose_random:\n # Select a random action using softmax\n idx = np.random.choice(4)\n self.action = self.idx2act[idx]\n else:\n # Select the greedy action\n self.action = self.idx2act[self.argmaxQsa(self.state)]\n\n self.reward = self.move(self.action)\n self.total_reward += self.reward", "def _select_action(self):\n if self.eval_mode:\n epsilon = self.epsilon_eval\n else:\n epsilon = self.epsilon_fn(\n self.epsilon_decay_period,\n self.training_steps,\n self.min_replay_history,\n self.epsilon_train)\n if random.random() <= epsilon:\n # Choose a random action with probability epsilon.\n return random.randint(0, self.num_actions - 1)\n else:\n # Choose the action with highest Q-value at the current state.\n if self._interact == 'stochastic':\n selected_action = self._stochastic_action\n elif self._interact == 'greedy':\n selected_action = self._q_argmax\n else:\n raise ValueError('Undefined interaction')\n return self._sess.run(selected_action,\n {self.state_ph: self.state})", "def make_epsilon_greedy_policy(Q, epsilon, nA):\n\n def policy_fn(observation):\n\n # get random number\n random_number = random.uniform(0, 1)\n\n # get actions with maximum value\n greedy_actions = np.argwhere(Q[observation] == np.amax(Q[observation])).squeeze()\n if not len(greedy_actions.shape):\n greedy_actions = [greedy_actions]\n action = random.choice(greedy_actions)\n\n # if number less than epsilon, get random other actions\n if random_number <= epsilon:\n all_actions = list(range(0, nA))\n if not len(greedy_actions) == nA:\n action = random.choice(all_actions)\n\n return int(action)\n\n return policy_fn", "def act(self, state, epsilon):\n if random.random() > epsilon:\n state = torch.FloatTensor(state)\n q_values = self.dqn.forward(state)\n action = int(q_values.argmax())\n else:\n action = self.env.action_space.sample()\n\n return action", "def play_epsilon_greedy_policy(self, board):\n policy = np.random.choice(['random', 'q_agent'], 1, p=[self.epsilon, 1-self.epsilon])[0]\n if policy == 'random':\n move, _ = RandomAgent.play(board)\n else:\n move, q_value = self.play(board)\n self.after_move()\n return move, policy", "def act(self):\n action = self.best_action()\n return action", "def chooseAction(self, gameState):\n\n actions = gameState.getLegalActions(self.index)\n # actions.remove(Directions.STOP)\n # You can profile your evaluation time by uncommenting these lines\n # start = time.time()\n values = [self.evaluate(gameState, a) for a in actions]\n for idx,a in enumerate(actions):\n baby = self.getSuccessor(gameState, a)\n qsum = [self.evaluate(baby, action) for action in baby.getLegalActions(self.index)]\n values[idx] += min(qsum) \n\n # print 'eval time for agent %d: %.4f' % (self.index, time.time() - start)\n maxValue = max(values)\n bestActions = [a for a, v in zip(actions, values) if v == maxValue]\n\n foodLeft = len(self.getFood(gameState).asList())\n if foodLeft <= 2:\n bestDist = 9999\n for action in actions:\n successor = self.getSuccessor(gameState, action)\n pos2 = successor.getAgentPosition(self.index)\n dist = self.getMazeDistance(self.start,pos2)\n if dist < bestDist:\n bestAction = action\n bestDist = dist\n return bestAction\n\n return random.choice(bestActions)", "def get_action(self, state):\n self.visited = {}\n utility = -inf\n move = 'STOP'\n\n # We choose the successor with the maximum utility\n for successor in state.generatePacmanSuccessors():\n maxPlayer = True\n score = self.alphabeta(successor[0], -inf, +inf, maxPlayer)\n if utility < score:\n move = successor[1]\n utility = score\n\n # If there's no winning state, we try to to move farther from the ghost\n if utility == -inf:\n dist = -inf\n for successor in state.generatePacmanSuccessors():\n newDist = self.distanceFromGhost(successor[0])\n if not successor[0].isLose() and newDist > dist:\n move = successor[1]\n dist = newDist\n print(utility)\n return move", "def chooseAction(self,state):\r\n #generate float btwn 0-1\r\n choice = random.random()\r\n \r\n #choose according to that number\r\n if choice > self.epsilon:\r\n return(self.maxQ(state)[1])\r\n else:\r\n #choose randomly\r\n return(self.actions[random.randrange(0,len(self.actions))])", "def chooseAction(self,state):\r\n #generate float btwn 0-1\r\n choice = random.random()\r\n \r\n #choose according to that number\r\n if choice > self.epsilon:\r\n return(self.maxQ(state)[1])\r\n else:\r\n #choose randomly\r\n return(self.actions[random.randrange(0,len(self.actions))])", "def execute_best_actions(self):\n while True:\n print(\"In execute_best_actions\")\n s = self.get_state_num()\n qvals = self.Q[s]\n # Get action with largest qval\n best_action = np.argmax(qvals)\n # We don't actually update with rewards,\n # but use them to know when to perform next action\n # We want to travel 0.5 m in action's direction.\n self.apply_action(best_action)\n while self.reward == None:\n rospy.sleep(0.5)\n print(\"Reward =\", self.reward)\n self.reward = None", "def chooseAction(self, gameState):\n actions = gameState.getLegalActions(self.index)\n\n # You can profile your evaluation time by uncommenting these lines\n # start = time.time()\n opIndices = self.getOpponents(gameState)\n opStates = [gameState.getAgentState(i) for i in opIndices]\n opCarry = [x.numCarrying for x in opStates]\n \n if max(opCarry) >= 5:\n self.isOffensive = False\n else:\n self.isOffensive = True\n\n values = [self.evaluate(gameState, a) for a in actions]\n # print 'eval time for agent %d: %.4f' % (self.index, time.time() - start)\n\n maxValue = max(values)\n bestActions = [a for a, v in zip(actions, values) if v == maxValue]\n\n\n\n # print if get eaten\n myPos = gameState.getAgentPosition(self.index)\n prevGameState = self.getPreviousObservation()\n if prevGameState is not None:\n\n previousPos = prevGameState.getAgentPosition(self.index)\n if self.getMazeDistance(myPos, previousPos) > 1:\n print(\"prePostion\",previousPos)\n print()\n previousLegalAction = prevGameState.getLegalActions(self.index)\n print([(self.evaluate(prevGameState, a), a) for a in previousLegalAction])\n print()\n print(self.getNonScaredGhostPos(prevGameState))\n print()\n print()\n\n\n return random.choice(bestActions)", "def createEpsilonGreedyPolicy(Q, epsilon, num_actions):\n def policyFunction(state):\n\n Action_probabilities = np.ones(num_actions,\n dtype = float) * epsilon / num_actions\n\n best_action = np.argmax(Q[state])\n Action_probabilities[best_action] += (1.0 - epsilon)\n return Action_probabilities\n\n return policyFunction", "def getAction(self, gameState: GameState):\n _max = float(\"-inf\")\n action = None\n for move in gameState.getLegalActions(0):\n util = minimax(self.evaluationFunction, 1, 0,\n gameState.generateSuccessor(0, move), self.depth)\n if util > _max or _max == float(\"-inf\"):\n _max = util\n action = move\n\n return action", "def getAction(self, gameState):\n \"*** YOUR CODE HERE ***\"\n\n # minimax-decision\n self.NumAgents = gameState.getNumAgents()\n v = -10000\n action = ''\n # The following for will evaluate each action starting from the root , and finds the one that will lead\n # to a maximum possible score\n for a in self.Actions(gameState, self.index):\n score = self.MinValue(self.Result(gameState, a, self.index), self.index + 1, self.depth)\n if score > v:\n action = a\n v = score\n return action\n\n util.raiseNotDefined()", "def getAction(self, gameState):\n \"*** YOUR CODE HERE ***\"\n # For this problem we will be reusing the majority of our work from question 2, but we will be\n # implementing alpha-beta pruning on top of our existing minimax infrastructure\n actionList = gameState.getLegalActions(0)\n pacmanAgentIndex = 0\n ghostAgentIndices = list(range(1,gameState.getNumAgents())) # List of each agent index for looping\n count = util.Counter()\n agentEnd = gameState.getNumAgents()-1 # Last agent in the list\n\n def maximizer(curState, agentIndex, alpha, beta, depth):\n\n ghostActions = curState.getLegalActions(agentIndex)\n maxDepth = self.depth # Quantifying the end of the tree so we know when we reached a leaf node\n weight = -99999999 # Worst case starting value to be changed in the code\n if depth == maxDepth: # If we are at a leaf node\n return self.evaluationFunction(curState) # evaluate the state of this leaf node\n # Otherwise, we progress the tree until the above condition is reached\n if len(ghostActions) != 0:\n for x in ghostActions:\n if weight >= minimizer(curState.generateSuccessor(agentIndex, x), agentIndex+1, alpha, beta, depth):\n weight = weight\n else:\n weight = minimizer(curState.generateSuccessor(agentIndex, x), agentIndex+1, alpha, beta, depth)\n if weight > beta:\n return weight\n if alpha < weight:\n alpha = weight\n return weight\n # if there are no legal actions left then evaluate at the last known state\n # Fall through into this return\n return self.evaluationFunction(curState)\n\n def minimizer(curState, agentIndex, alpha, beta, depth):\n ghostActions = curState.getLegalActions(agentIndex)\n weight = 999999999 # Worst case starting value to be changed in the code\n if len(ghostActions) != 0:\n if agentIndex == agentEnd: # If we've reached the last ghost, we maximise\n for x in ghostActions: # For each legal action in the current position\n temp = maximizer(curState.generateSuccessor(agentIndex, x), pacmanAgentIndex, alpha, beta, depth+1)\n if weight < temp:\n weight = weight\n else:\n weight = temp\n if weight < alpha:\n return weight\n if beta > weight:\n beta = weight\n else: # Otherwise, we continue to minimize\n for x in ghostActions: # For each legal action in the current position\n temp = minimizer(curState.generateSuccessor(agentIndex, x), agentIndex+1, alpha, beta, depth)\n if weight < temp:\n weight = weight\n else:\n weight = temp\n if weight < alpha:\n return weight\n if beta > weight:\n beta = weight\n return weight\n # if there are no legal actions left then evaluate at the last known state\n # Fall through into this return\n return self.evaluationFunction(curState)\n\n endWeight = -999999999\n alpha = -999999999\n beta = 999999999\n\n # Executing the minimizer for all possible actions\n for x in actionList:\n tempState = gameState.generateSuccessor(pacmanAgentIndex,x)\n endWeight = minimizer(tempState, 1, alpha, beta, 0,)\n count[x] = endWeight\n if alpha < endWeight:\n alpha = endWeight\n # print('HELLO THERE')\n # print(count)\n return count.argMax()", "def chooseAction(self,state):\r\n #generate float btwn 0-1\r\n choice = random.random()\r\n feat = self.feat_funct(state)\r\n #choose according to that number\r\n if choice > self.epsilon:\r\n return(self.maxQ(feat)[1])\r\n else:\r\n #choose randomly\r\n return(self.actions[random.randrange(0,len(self.actions))])", "def chooseAction(self, gameState):\n\n '''\n You should change this in your own agent.\n '''\n problem = foodsearchproblem(gameState,self)\n return self.astarsearch(problem,gameState,self.foodhuristic)[0]", "def next(self, state, turn, greedy_strategy):\n return self.agent_action", "def mcts_search(self, state):\n assert state.current_player() == self.player\n root = SearchNode(None, 1)\n for _ in range(self.max_simulations):\n visit_path, working_state = self._apply_tree_policy(root, state)\n if working_state.is_terminal():\n node_value = working_state.player_return(self.player)\n else:\n node_value = self.evaluator.evaluate(\n working_state, self.player, self._random_state)\n\n for node in visit_path:\n node.total_reward += node_value * node.player_sign\n node.explore_count += 1\n\n most_visited = root.most_visited_child()\n\n if self.verbose:\n print(\"Root:\", root.to_str())\n print(\"Children:\")\n print(root.children_str(working_state))\n print(\"Children of chosen:\")\n chosen_state = state.clone()\n chosen_state.apply_action(most_visited.action)\n print(most_visited.children_str(chosen_state))\n\n return most_visited.action", "def make_epsilon_greedy_policy(estimator, epsilon, nA):\n def policy_fn(observation):\n A = np.ones(nA, dtype=float) * epsilon / nA\n q_values = estimator.predict(observation)\n# print(q_values)\n best_action = np.argmax(q_values)\n A[best_action] += (1.0 - epsilon)\n return A\n return policy_fn", "def _next_action(self) -> SingleBriberyAction:\n self._current_rating = self.get_graph().eval_graph(self.get_briber_id())\n if self._previous_rating is None:\n self._previous_rating = self._current_rating\n next_act = SingleBriberyAction(self)\n try:\n self._next_node = self.get_graph().get_random_customer(excluding=self._info_gained | self._bribed)\n except IndexError:\n print(f\"WARNING: {self.__class__.__name__} found no influential nodes, not acting...\", file=sys.stderr)\n return next_act\n if self._current_rating - self._previous_rating > self._max_rating_increase:\n self._best_node = self._last_node\n self._max_rating_increase = self._current_rating - self._previous_rating\n maximum_bribe = min(self.get_resources(), self._bribe_to_max())\n if self._c >= self._i and self._best_node is not None and maximum_bribe > 0:\n next_act.add_bribe(self._best_node, maximum_bribe)\n self._bribed.add(self._best_node)\n self._info_gained = set()\n self._c = 0\n self._max_rating_increase = 0\n self._best_node = 0\n else:\n if self._c >= self._i:\n print(f\"WARNING: {self.__class__.__name__} has not found an influential node in {self._c} tries \"\n f\"(intended maximum tries {self._i}), continuing search...\",\n file=sys.stderr)\n # Bid an information gaining bribe, which is at most k, but is\n # smaller if you need to bribe less to get to the full bribe\n # or don't have enough money to bid k.\n next_act.add_bribe(self._next_node, min(self._bribe_to_max(), min(self.get_resources(), self._k)))\n self._info_gained.add(self._next_node)\n self._c = self._c + 1\n self._last_node = self._next_node\n self._previous_rating = self._current_rating\n return next_act", "def greedy(self):\n n_step_t = self.filter['n_step_t']\n n_traj = self.filter['n_traj']\n traj = self.filter['traj']\n steps = [0 for i in xrange(n_step_t)]\n for i in xrange(n_traj):\n n_step = traj[i]['n_step']\n for j in xrange(n_step):\n steps[j] += 1\n self.filter['steps'] = steps\n \n return", "def getAction(self, state):\n # Pick Action\n \"*** YOUR CODE HERE ***\"\n # Epsilon greedy\n if util.flipCoin(self.epsilon) is True:\n self.lastAction = random.choice(self.legalActions)\n else:\n self.lastAction = self.computeActionFromQValues(state)\n return self.lastAction", "def getAction(self, state):\n # Pick Action\n \"*** YOUR CODE HERE ***\"\n # Epsilon greedy\n if util.flipCoin(self.epsilon) is True:\n self.lastAction = random.choice(self.legalActions)\n else:\n self.lastAction = self.computeActionFromQValues(state)\n return self.lastAction", "def choose_action(self, observation):\r\n observation = T.unsqueeze(T.FloatTensor(observation), 0)\r\n # Epsilon-greedy policy\r\n if np.random.uniform() < self.epsilon: \r\n # Get all of the Q values for the current state (forward prop)\r\n actions_value = self.Q_eval.forward(observation)\r\n\r\n # Take the optimal action \r\n action = T.max(actions_value, 1)[1].data.numpy()\r\n action = action[0] if self.action_space == 0 else action.reshape(self.action_space) # return the argmax index\r\n else: \r\n # Choose a random action in the action space list\r\n action = np.random.randint(0, self.num_actions)\r\n action = action if self.action_space == 0 else action.reshape(self.action_space)\r\n\r\n return action", "def stepCost(self, state, action):\n\n j = len(state)\n while (j > 0 and state[j-1] != ' '): j -= 1\n word = state[j:]\n if(action == ' '):\n return self.unigramCost(word)\n cost1 = self.unigramCost(word)\n if(word == ''): cost1 = 0.0\n cost = self.unigramCost(word + action) - cost1\n #print(f'state = {state}, word = {word}, action = {action}, cost = {cost}')\n return cost", "def chooseAction(self, gameState):\n #actions = gameState.getLegalActions(self.index)\n\n # You can profile your evaluation time by uncommenting these lines\n # start = time.time()\n #values = [self.evaluate(gameState, a) for a in actions] #no evaluation currently\n # print 'eval time for agent %d: %.4f' % (self.index, time.time() - start)\n\n #return random.choice(actions)\n if len(self.getFood(gameState).asList()) <= 2:\n print \"\\n Agent \", self.index,\"plan was terminated with\", len(self.getFood(gameState).asList()), \"dots left\"\n\n pos = gameState.getAgentState(self.index).getPosition()\n localbestDist = 9999\n dest = self.start\n bestDest = dest\n dist = self.getMazeDistance(dest,pos)\n\n for el in xrange(-2,5):\n try:\n idx = self.safeSpaces.index((self.safeColumn, pos[1] + el))\n dest = self.safeSpaces[idx]\n dist = self.getMazeDistance(dest,pos)\n #print \"possible destination at\", dest\n except ValueError:\n print \"X: \", (self.safeColumn, pos[1] + el), \"not valid destination\"\n continue\n\n print \"Current destination to check at \", dest, \"at dist:\", dist\n if dist < localbestDist:\n localbestDist = dist\n bestDest = dest\n\n bestDist = 9999\n for pos2, action, cost in getSuccessorsAlt(gameState, pos):\n dist = self.getMazeDistance(bestDest,pos2)\n if dist < bestDist:\n bestAction = action\n bestDist = dist\n\n print \"Agent \", self.index, \"found optimal safe space at\", bestDest , \"with dist\", bestDist, \"coloring spot now\"\n print \"Agent \", self.index, \"Going\", bestAction, \" from\",gameState.getAgentPosition(self.index), \"\\n\"\n self.debugDraw([bestDest], [1,1,0], clear=False)\n return bestAction\n\n if self.counter == 0: #-1:\n print \"Calculating\", self.cacheSize, \"moves as player\", self.index, \"from \", gameState.getAgentPosition(self.index)\n print \"Cached value\"\n self.best = self.ActionLoop(gameState, self.cacheSize)\n self.moves = self.best.getDir()[1]\n\n if not self.moves or len(self.moves) == 0:\n print \"Tried to play move, but ran Out of Moves!!!\"\n actions = gameState.getLegalActions(self.index)\n return random.choice(actions)\n\n self.intendedCoords = self.best.state[0]\n self.counter = self.cacheSize\n try:\n move = self.moves[self.cacheSize - self.counter]\n self.counter -= 1\n except:\n print \"Tried to access index\", self.cacheSize - self.counter, \"in list of length\", len(self.moves)#, \"more moves now generated\"\n print \"Agent\", self.index, \"Defaulting to closest Agent Protocol\"\n self.counter = 9999\n return calcMoves(self, gameState)\n\n\n #actions = gameState.getLegalActions(self.index)\n #self.counter = 0\n #return self.chooseAction(gameState)\n #return random.choice(actions)\n print \"On move \", self.cacheSize - self.counter, \"as player\", self.index, \"going\", move, \"from\", gameState.getAgentPosition(self.index)\n return move", "def getAction(self, gameState):\n \"*** YOUR CODE HERE ***\"\n def max_value(gameState, depth):\n \n \n if gameState.isWin() or gameState.isLose() or depth == self.depth:\n return (self.evaluationFunction(gameState), None)\n \n legal_moves = gameState.getLegalActions(0)\n v = -(float(\"inf\"))\n take_action = None\n\n for i in legal_moves:\n next_state = gameState.generateSuccessor(0,i)\n value, action = exp_value(next_state, 1, depth)\n\n \n if (v < value):\n v, take_action = value, i\n\n return (v, take_action)\n\n \n def exp_value(gameState, agent, depth):\n \n ghost_action = gameState.getLegalActions(agent) \n ghost_action_len = len(ghost_action)\n if len(ghost_action) == 0:\n return (self.evaluationFunction(gameState), None)\n\n \n v = 0\n take_action = None\n\n for i in ghost_action:\n next_state = gameState.generateSuccessor(agent, i)\n ghost_no = gameState.getNumAgents()\n if (agent == ghost_no - 1):\n new_depth= depth+1\n value, action = max_value(next_state, new_depth)\n else:\n new_agent= agent+1\n value, action = exp_value(next_state, new_agent , depth)\n\n new_value = value/ghost_action_len\n v =v+ new_value\n\n return (v, take_action)\n\n\n final_value, final_action = max_value(gameState, 0)\n return final_action\n util.raiseNotDefined()", "def make_epsilon_greedy_policy(Q, epsilon, nA):\n def policy_fn(observation):\n A = np.ones(nA, dtype=float) * epsilon / nA\n best_action = np.argmax(Q[observation])\n A[best_action] += (1.0 - epsilon)\n return A\n return policy_fn", "def createEpsilonGreedyPolicy(Q, epsilon, num_actions):\n\n def policyFunction(state):\n Action_probabilities = np.ones(num_actions,\n dtype=float) * epsilon / num_actions\n\n best_action = np.argmax(Q[state])\n Action_probabilities[best_action] += (1.0 - epsilon)\n return Action_probabilities\n\n return policyFunction", "def greedy_policy(self, q, s):\n\t\tresult = []\n\t\tif q is None:\n\t\t\treturn result\n\t\tmax_val = q[0]\n\t\tfor action in self.feasible_actions_in_state(s):\n\t\t\tq_value = q[action]\n\t\t\tif q_value == max_val:\n\t\t\t\tresult.append(action)\n\t\t\telif q_value > max_val:\n\t\t\t\tresult = [action]\n\t\t\t\tmax_val = q_value\n\t\treturn result", "def evaluationFunction(self, currentGameState, action):\n # Useful information you can extract from a GameState (pacman.py)\n successorGameState = currentGameState.generatePacmanSuccessor(action)\n newPos = successorGameState.getPacmanPosition()\n newFood = successorGameState.getFood()\n numFood = successorGameState.getNumFood()\n newGhostStates = successorGameState.getGhostStates()\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n ghostPositions = successorGameState.getGhostPositions()\n\n \"*** YOUR CODE HERE ***\"\n\n #print type(ghostState), dir(ghostState)\n totalScaredTimes = reduce(lambda x,y: x+y , newScaredTimes)\n foodDistances = helper(newPos,newFood.asList())\n capsuleDistances = helper(newPos,successorGameState.getCapsules())\n ghostDistances = helper(newPos,ghostPositions) \n if numFood is 0:\n foodUtility = 1000 \n else:\n foodUtility = (1/numFood)\n distanceToClosestFood = 1\n distanceToClosestGhost = 1\n distanceToClosestCapsule = 1 \n if (foodDistances and min(foodDistances) != 0):\n distanceToClosestFood = min(foodDistances) \n if (ghostDistances and min(ghostDistances) != 0):\n distanceToClosestGhost = min(ghostDistances) \n if (capsuleDistances and min(capsuleDistances) == 0):\n distanceToClosestCapsule = min(capsuleDistances)\n arg11 = 1/distanceToClosestFood - 1/distanceToClosestGhost\n arg22 = successorGameState.getScore() + totalScaredTimes + 1/distanceToClosestCapsule\n result = arg11 + arg22\n return result", "def evaluationFunction(self, currentGameState, action):\n # Useful information you can extract from a GameState (pacman.py)\n successorGameState = currentGameState.generatePacmanSuccessor(action)\n newPos = successorGameState.getPacmanPosition()\n newFood = successorGameState.getFood()\n newCapsules = successorGameState.getCapsules()\n newGhostStates = successorGameState.getGhostStates()\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n # Ghost Heuristic\n food = 0\n dist_pm_g = self.distGhosts(newGhostStates, successorGameState, newPos)\n\n #if food in this state\n if (currentGameState.hasFood(newPos[0], newPos[1])):\n food = 5\n #if capsule in this state\n elif (newPos in currentGameState.getCapsules()):\n return 1000\n #distance to the closest food\n d = self.distClosestFood(newPos, newFood)\n #distance to the closest capsule\n c = self.distClosestCap(newPos, newCapsules)\n #if the ghosts are scared\n if (sum(newScaredTimes) > 1):\n return food + d\n elif (d < 2 and 5 < dist_pm_g):\n return food + d\n elif (d > 2 and 6 < dist_pm_g):\n return food + d * 3\n else:\n return dist_pm_g + (food + d + c)", "def choose_action():\n\n def find_suma(a,b):\n return a+b\n\n def find_rizn(a,b):\n return a - b \n \n def find_ostacha(a,b):\n return a % b\n\n def find_stepin(a,b):\n return a ** b\n \n def find_sqrt(a):\n return math.sqrt(a)\n \n def find_factorial(a):\n return math.factorial(a) \n\n def find_dobutok(a,b):\n \n return a * b\n \n def find_chastku(a,b):\n if b == 0 :\n print('Dilennia na \"0\" nemozluve!')\n else:\n return a/b\n \n if x == '+':\n res = find_suma(a,b)\n return res\n \n elif x == '-':\n res = find_rizn(a,b)\n return res\n\n elif x == '*':\n res = find_dobutok(a,b)\n return res\n\n elif x == '/':\n res = find_chastku(a,b)\n return res\n\n elif x == '%':\n res = find_ostacha(a,b)\n return res\n\n elif x == '&':\n res = find_sqrt(a)\n return res\n\n elif x == '!':\n res = find_factorial(a)\n return res\n\n elif x == '^':\n res = find_stepin(a,b)\n return res", "def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n from util import PriorityQueue\n import math\n\n frontier = PriorityQueue()\n explored = []\n actions = []\n\n class node:\n def __init__(self, path, dad, action):\n self.path = path\n self.dad = dad\n self.action = action\n h = heuristic(path,problem.goal)\n if dad == None:\n self.g=0\n else:\n self.g = dad.g + heuristic(dad.path,path)\n self.cost = round(self.g + h,1)\n\n start = node(problem.getStartState(),None,'')\n frontier.push(start,start.cost)\n\n while frontier.isEmpty() == False:\n path = frontier.pop()\n successors = problem.getSuccessors(path.path)\n explored.append(path)\n for vertex in successors:\n achou = False\n for path_ex in explored:\n if vertex[0] == path_ex.path:\n achou = True\n\n if achou == False:\n successor = node(vertex[0],path,vertex[1])\n frontier.push(successor,successor.cost)\n if problem.isGoalState(successor.path):\n while len(explored) > 0:\n ant = explored.pop()\n if ant.path == successor.dad.path:\n actions.append(successor.action)\n successor = ant\n actions.reverse()\n return actions", "def aStarSearch(problem, heuristic=nullHeuristic):\n \"*** YOUR CODE HERE ***\"\n print(\"\\t===========================================\")\n print(\"\\t Processing ... Please Wait for 11 seconds!\")\n print(\"\\t===========================================\")\n startState = problem.getStartState();\n fringe = util.PriorityQueue()\n costs = 0 \n visitedNodes = []\n actions = [] \n if ( problem.isGoalState(startState) == True):\n return actions\n else:\n newFringeItem = (startState , actions , costs)\n fringe.push(newFringeItem,costs)\n while(fringe.isEmpty() == False ):\n #f(x) = h(x) + g(x)\n currentState , actions , costs = fringe.pop()\n if ( problem.isGoalState(currentState) == True):\n #print(\"Final Actions : \" + str(actions)) \n \"\"\"\n If you want the Analyzer Class analizes the chosen path and heuristic , \n Uncomment these two lines of code otherwise leave it be commented cause it increases the run time by 2 seconds.\n \"\"\"\n \"\"\"Start : Analyzer Properties \"\"\"\n #analyzer = Analyzer(problem,actions)\n #analyzer.start()\n \"\"\"End : Analyzer Properties \"\"\"\n return actions\n else:\n if(not currentState in visitedNodes ):\n visitedNodes.append(currentState)\n currentNodeSuccessors = problem.getSuccessors(currentState)\n for node in currentNodeSuccessors :\n state , action , stateCost = node\n heuristicAmount = heuristic(state , problem)\n newFringeItem = state , actions + [action] , costs + stateCost\n priority = costs + heuristicAmount\n fringe.push( newFringeItem , priority )\n \n util.raiseNotDefined()", "def getAction(self, gameState):\n \"*** YOUR CODE HERE ***\"\n\n bestchoice = \"Stop\"\n value = float(\"-inf\") \n alpha = float(\"-inf\")\n beta = float(\"inf\")\n i = 0\n actlist = gameState.getLegalActions(0)\n while i < len(actlist):\n action = actlist[i]\n arg1 = self.minValue(gameState.generateSuccessor(0, action), 1, self.depth, alpha, beta)\n maxVal = max(value, arg1)\n if maxVal > beta:\n bestchoice = action\n break\n alpha = max(alpha, maxVal)\n if maxVal > value:\n value = maxVal\n bestchoice = action\n i = i + 1\n return bestchoice", "def action(self, state, mode='train'):\n self.step += 1\n # reduce gradually epsilon to its minimum value\n self.epsilon = self.epsilon_min + (\n self.epsilon_max - self.epsilon_min)*np.exp(-self.epsilon_decay*self.step)\n if np.random.rand() > self.epsilon or mode.lower() == \"test\":\n return self._take_action(state)\n else:\n return random.randrange(self.action_size)", "def getAction(self, gameState):\n \"*** YOUR CODE HERE ***\"\n def expectedvalue(gameState, agentindex, depth):\n if gameState.isWin() or gameState.isLose() or depth == 0:\n return self.evaluationFunction(gameState)\n numghosts = gameState.getNumAgents() - 1\n legalActions = gameState.getLegalActions(agentindex)\n numactions = len(legalActions)\n totalvalue = 0\n for action in legalActions:\n nextState = gameState.generateSuccessor(agentindex, action)\n if (agentindex == numghosts):\n totalvalue += maxvalue(nextState, depth - 1)\n else:\n totalvalue += expectedvalue(nextState, agentindex + 1, depth)\n return totalvalue / numactions\n def maxvalue(gameState, depth):\n if gameState.isWin() or gameState.isLose() or depth == 0:\n return self.evaluationFunction(gameState)\n legalActions = gameState.getLegalActions(0)\n score = -(float(\"inf\"))\n for action in legalActions:\n nextState = gameState.generateSuccessor(0, action)\n score = max(score, expectedvalue(nextState, 1, depth))\n return score\n if gameState.isWin() or gameState.isLose():\n return self.evaluationFunction(gameState)\n legalActions = gameState.getLegalActions(0)\n bestaction = Directions.STOP\n score = -(float(\"inf\"))\n for action in legalActions:\n nextState = gameState.generateSuccessor(0, action)\n prevscore = score\n score = max(score, expectedvalue(nextState, 1, self.depth))\n if score > prevscore:\n bestaction = action\n return bestaction", "def evaluationFunction(self, currentGameState, action):\n # Useful information you can extract from a GameState (pacman.py)\n successorGameState = currentGameState.generatePacmanSuccessor(action)\n newPos = successorGameState.getPacmanPosition()\n newFood = successorGameState.getFood()\n newGhostStates = successorGameState.getGhostStates()\n newScaredTimes = [ghostState.scaredTimer for ghostState in newGhostStates]\n\n \"*** YOUR CODE HERE ***\"\n foodPellets = newFood.asList()\n currPos = currentGameState.getPacmanPosition()\n closestFood, closestFoodPos = self.getClosestFood(foodPellets, newPos) \n closestGhostDist = 1000000\n closestGhost = ()\n for ghost in newGhostStates:\n distToGhost = manhattanDistance(newPos, ghost.getPosition())\n if distToGhost < closestGhostDist:\n closestGhostDist = distToGhost\n closestGhost = ghost\n\n if closestGhostDist > 0:\n if closestGhost.scaredTimer / closestGhostDist > 1:\n return successorGameState.getScore() + 25 / closestGhostDist + 1 / closestFood\n\n if closestGhostDist <= 2:\n return -len(foodPellets) - 5 / closestGhostDist + 1 / closestFood\n\n if closestFoodPos != ():\n if newPos == self.previousLoc: \n return -len(foodPellets) - 1 + 1 / closestFood\n\n if currPos == newPos:\n return -len(foodPellets) - .5 + 1 / closestFood\n\n return -len(foodPellets) + 1 / closestFood", "def getAction(self, gameState):\n \"*** YOUR CODE HERE ***\"\n result = float(\"-inf\")\n action = 1\n alfa = float(\"-inf\")\n beta = float(\"inf\")\n for agentState in gameState.getLegalActions(0):\n valorminimax = self.alfaBeta(1, 0, alfa, beta, gameState.generateSuccessor(0, agentState))\n if valorminimax > result:\n result = valorminimax\n action = agentState\n if result > beta:\n return result\n alfa = max(alfa,result)\n return action", "def get_action(self,state):\n \n q_values = self.__network.predict(state[None])[0]\n \n ###YOUR CODE\n if np.random.rand()<self.epsilon:\n return np.random.choice(self.n_actions)\n return np.argmax(q_values)", "def computeActionFromValues(self, state):\n\n ##util.raiseNotDefined()\n #\"*** YOUR CODE STARTS HERE ***\"\n \n # Code to remove --- from here\n resultingAction = None\n if self.mdp.isTerminal(state):\n return resultingAction\n else:\n bestq = float(\"-inf\")\n actions = self.mdp.getPossibleActions(state)\n for action in actions:\n qvalue = self.computeQValueFromValues(state, action)\n if qvalue > bestq:\n bestq = qvalue\n resultingAction = action\n return resultingAction\n\n # Code to remove --- to here\n #\"*** YOUR CODE FINISHES HERE ***\"", "def select_action_q_and_u(self, env, is_root_node) -> chess.Move:\n\n # this method is called with state locked\n state = board_state_key(env)\n\n my_visitstats = self.tree[state]\n\n if my_visitstats.p is not None: #push p to edges\n tot_p = 1e-8\n for mov in env.legal_moves():\n mov_p = my_visitstats.p[self.move_lookup[mov]]\n my_visitstats.a[mov].p = mov_p\n tot_p += mov_p\n for a_s in my_visitstats.a.values():\n a_s.p /= tot_p\n my_visitstats.p = None\n\n xx_ = np.sqrt(my_visitstats.sum_n + 1) # sqrt of sum(N(s, b); for all b)\n\n e = self.play_conf.noise_eps\n c_puct = self.play_conf.c_puct\n dir_alpha = self.play_conf.dirichlet_alpha\n\n best_s = -999\n best_a = None\n if is_root_node:\n noise = np.random.dirichlet([dir_alpha] * len(my_visitstats.a))\n\n i = 0\n for action, a_s in my_visitstats.a.items():\n p_ = a_s.p\n if is_root_node:\n p_ = (1-e) * p_ + e * noise[i]\n i += 1\n b = a_s.q + c_puct * p_ * xx_ / (1 + a_s.n)\n if b > best_s:\n best_s = b\n best_a = action\n\n return best_a", "def get_action(agent, context, epsilon=0):\n\n num_contexts = context.shape[0]\n\n # Attach one-hot encoding of actions at the end of context vector\n no_eat_action = np.hstack([context, np.ones((num_contexts, 1)), np.zeros((num_contexts, 1))])\n eat_action = np.hstack([context, np.zeros((num_contexts, 1)), np.ones((num_contexts, 1))])\n no_eat_rewards = agent.predict(input_fn=lambda: tf.data.Dataset.from_tensor_slices(no_eat_action))\n no_eat_rewards = np.array(list(no_eat_rewards))\n\n eat_rewards = agent.predict(input_fn=lambda: tf.data.Dataset.from_tensor_slices(eat_action))\n eat_rewards = np.array(list(eat_rewards))\n\n rewards = np.hstack([no_eat_rewards, eat_rewards])\n\n # Epsilon-greedy policy\n # Start completely greedy\n action = np.argmax(rewards, axis=1)\n\n # Select indices to update\n rand_indices = np.random.uniform(low=0., high=1., size=num_contexts) < epsilon\n\n # Select random actions\n rand_actions = np.random.choice([0, 1], size=num_contexts)\n\n action[rand_indices] = rand_actions[rand_indices]\n\n return action" ]
[ "0.7672569", "0.732197", "0.7154065", "0.71332085", "0.70703125", "0.6964753", "0.69215", "0.6916414", "0.68847233", "0.68748766", "0.6836524", "0.6821661", "0.677791", "0.6760553", "0.6743109", "0.6672397", "0.66714406", "0.65827435", "0.6575905", "0.6562029", "0.65613985", "0.65398884", "0.65393025", "0.65344614", "0.645577", "0.6417135", "0.63828146", "0.6382662", "0.635599", "0.632851", "0.6307598", "0.62722313", "0.62628645", "0.6256814", "0.623911", "0.6233714", "0.6233481", "0.62286305", "0.62281585", "0.622089", "0.62173516", "0.6197078", "0.61955154", "0.61888313", "0.61852646", "0.6185157", "0.6165026", "0.6163418", "0.6161559", "0.614868", "0.61482793", "0.61375535", "0.6132388", "0.6118416", "0.6111237", "0.6111049", "0.6101341", "0.6072997", "0.6068798", "0.60633415", "0.6061681", "0.6057297", "0.6050691", "0.6050691", "0.6038685", "0.6032264", "0.60305583", "0.6019094", "0.6013613", "0.6013481", "0.60068077", "0.59868395", "0.5985903", "0.5983546", "0.59824955", "0.5974881", "0.59739757", "0.5970625", "0.5970625", "0.5964136", "0.59597856", "0.5950078", "0.5942852", "0.59417784", "0.5936901", "0.59364825", "0.59336406", "0.5933639", "0.5929364", "0.5903838", "0.5901176", "0.5896275", "0.58881646", "0.58879757", "0.5873936", "0.5872674", "0.586953", "0.5867659", "0.5866252", "0.5865699" ]
0.6494198
24
Main method that is called by the user to calculate the manipulated variable.
def control(self, state, reference): self.ref[-1] = reference[self.ref_idx] # Set the reference epsilon_d = state[self.eps_idx] * self.limit[self.eps_idx] + self.dead_time * self.tau * state[self.omega_idx] * \ self.limit[self.omega_idx] * self.mp['p'] # Calculate delta epsilon # Iterate through high-level controller if self.omega_control: for i in range(len(self.overlaid_controller) + 1, 1, -1): # Calculate reference self.ref[i] = self.overlaid_controller[i-2].control(state[self.ref_state_idx[i + 1]], self.ref[i + 1]) # Check limits and integrate if (0.85 * self.state_space.low[self.ref_state_idx[i]] <= self.ref[i] <= 0.85 * self.state_space.high[self.ref_state_idx[i]]) and self.overlaid_type[i - 2]: self.overlaid_controller[i - 2].integrate(state[self.ref_state_idx[i + 1]], self.ref[i + 1]) else: self.ref[i] = np.clip(self.ref[i], self.nominal_values[self.ref_state_idx[i]] / self.limit[ self.ref_state_idx[i]] * self.state_space.low[self.ref_state_idx[i]], self.nominal_values[self.ref_state_idx[i]] / self.limit[ self.ref_state_idx[i]] * self.state_space.high[self.ref_state_idx[i]]) # Calculate reference values for i_d and i_q if self.torque_control: torque = self.ref[2] * self.limit[self.torque_idx] self.ref[0], self.ref[1] = self.torque_controller.control(state, torque) # Calculate action for continuous action space if self.has_cont_action_space: # Decouple the two current components if self.decoupling: self.u_sd_0 = -state[self.omega_idx] * self.mp['p'] * self.mp['l_q'] * state[self.i_sq_idx]\ * self.limit[self.i_sq_idx] / self.limit[self.u_sd_idx] * self.limit[self.omega_idx] self.u_sq_0 = state[self.omega_idx] * self.mp['p'] * ( state[self.i_sd_idx] * self.mp['l_d'] * self.limit[self.u_sd_idx] + self.psi_p) / self.limit[ self.u_sq_idx] * self.limit[self.omega_idx] # Calculate action for u_sd if self.torque_control: u_sd = self.d_controller.control(state[self.i_sd_idx], self.ref[1]) + self.u_sd_0 else: u_sd = self.d_controller.control(state[self.i_sd_idx], reference[self.ref_d_idx]) + self.u_sd_0 # Calculate action for u_sq u_sq = self.q_controller.control(state[self.i_sq_idx], self.ref[0]) + self.u_sq_0 # Shifting the reference potential action_temp = self.backward_transformation((u_sd, u_sq), epsilon_d) action_temp = action_temp - 0.5 * (max(action_temp) + min(action_temp)) # Check limit and integrate action = np.clip(action_temp, self.action_space.low[0], self.action_space.high[0]) if (action == action_temp).all(): if self.torque_control: self.d_controller.integrate(state[self.i_sd_idx], self.ref[1]) else: self.d_controller.integrate(state[self.i_sd_idx], reference[self.ref_d_idx]) self.q_controller.integrate(state[self.i_sq_idx], self.ref[0]) # Calculate action for discrete action space else: ref = self.ref[1] if self.torque_control else reference[self.ref_d_idx] ref_abc = self.backward_transformation((ref, self.ref[0]), epsilon_d) action = 0 for i in range(3): action += (2 ** (2 - i)) * self.abc_controller[i].control(state[self.i_abc_idx[i]], ref_abc[i]) # Plot overlaid reference values plot(external_reference_plots=self.external_ref_plots, state_names=self.state_names, external_data=self.get_plot_data(), visualization=True) return action
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cmd_calculation():", "def execute(self):\n \n self.outvar = self.invar + .01", "def main():\n user_input_name()\n user_input_age()\n choose_unit()\n user_input_weight()\n user_input_height()\n bmi_calculator()\n bmi_categories()\n restart_calculator()", "def main():\n\texpression = input(\"Enter expression \")\n\tans = calculate(expression)\n\n\tprint(ans)", "def calculate(self):", "def calculations():\r\n\t\r\n\tpayload, avionics, booster = weight_input()\r\n\r\n\tdrogue_size, drogue_force = drogue_calc()\r\n\tmain_size, main_force = main_calc(avionics, booster, drogue_force) #total mass, payload detaches\r\n\r\n\tprint(\"Drogue is diameter is \" + str(drogue_size) + \" inches\")\r\n\tprint(\"Main is diameter is \" + str(main_size) + \" inches\")", "def main(args):\n if isinstance(args, list):\n args = parse_args(args)\n setup_logging(args.loglevel)\n _logger.debug(f\"Starting ({name}) operation...\")\n answer = reduction(args.operands)\n print(f\"{answer}\")\n _logger.info(f\"End climath ({name}).\")", "def main(data, setup):\n # input check \n varnames = ('vm_raw', 'vm_raw_theo')\n for varname in varnames:\n if varname not in data.keys():\n raise LookupError('data must contain variable %s.' %s)\n\n # display info message\n chrono = setup['chrono']\n chrono.issue('target velocity: correct for sensor motion...')\n\n # retrieve varialbes\n vnys = data['nqv']\n v_sensor_r = data['v_sensor_r']\n\n # ========== main =================================== #\n for key_raw in ('vm_raw', 'vm_raw_theo'):\n key_c = key_raw.replace('raw', 'raw_c')\n\n # sum\n vm_raw = data[key_raw]\n v_sum = (vm_raw + np.expand_dims(v_sensor_r, 1))\n\n # mod\n data[key_c] = symmod(v_sum, vnys)\n # ==================================================== #\n\n return data", "def main2(cls, args):\r\n var = 1\r\n print \"Value before increment :\" , var\r\n cls.increment(var)\r\n print \"Value after increment :\" , var", "def main():\n model = Calculator()", "def calculate(self):\r\n\r\n pass", "def calculate_output(self):", "def main():\n printfunc(calc(menu()))", "def main(cls, args):\r\n var = cls.MyInt()\r\n print \"Value before increment :\" , var.value\r\n cls.increment(var)\r\n print \"Value after increment :\" , var.value", "def main():\n welcome_message()\n continue_program = True\n num_calculations = 0\n # all the calculation options in the program\n calculation_options = [\"addition\", \"subtraction\", \"division\",\n \"multiplication\", \"exponents\", \"circle area\",\n \"cube area\",\n \"repeat words\", \"inequalities\", \"in-range\",\n \"stop program\"]\n\n while continue_program:\n print(\"Enter the option number of the calculation you would like to \"\n \"perform: \")\n # prints the calculations options list as a numbered list\n for calculation in calculation_options:\n print(calculation_options.index(calculation) + 1, \". \",\n calculation, sep=\"\")\n\n while True:\n try:\n user_input = int(input(\"\"))\n break\n except ValueError:\n print(\n \"That was not a valid input. Please enter a whole number \"\n \"between 1 and 11.\")\n\n if user_input in range(1, 12):\n if user_input == 1: # addition\n run_addition = True\n while run_addition:\n try:\n user_num1 = float(input(\"Enter the first number: \"))\n user_num2 = float(input(\"Enter the second number: \"))\n addition(user_num1, user_num2)\n run_addition = False\n except ValueError:\n print(\"That was not a valid input.\")\n\n elif user_input == 2: # subtraction\n run_subtraction = True\n while run_subtraction:\n try:\n user_num1 = float(input(\"Enter the first number: \"))\n user_num2 = float(input(\"Enter the second number: \"))\n print(\"The difference is \",\n subtraction(user_num1, user_num2), \".\\n\", sep=\"\")\n run_subtraction = False\n except ValueError:\n print(\"That was not a valid input.\")\n\n elif user_input == 3: # division\n run_division = True\n while run_division:\n try:\n user_num1 = float(input(\"Enter a number: \"))\n user_num2 = float(input(\"Enter a second number: \"))\n division(user_num1, user_num2)\n run_division = False\n except ValueError:\n print(\"That was not a valid input.\")\n\n elif user_input == 4: # multiplication\n run_multiplication = True\n while run_multiplication:\n try:\n user_num1 = float(input(\"Enter a number: \"))\n user_num2 = float(input(\"Enter a second number: \"))\n print(\"The product is \",\n multiplication(user_num1, user_num2), \".\\n\",\n sep=\"\")\n except ValueError:\n print(\"That was not a valid input.\")\n\n elif user_input == 5: # calculates num1 to the num2 power\n run_exponents = True\n while run_exponents:\n try:\n user_num1 = float(input(\"Enter a number: \"))\n user_num2 = float(input(\"Enter a second number: \"))\n print(user_num1, \" to the \", user_num2, \" power is \",\n exponents(user_num1, user_num2), \".\\n\", sep=\"\")\n run_exponents = False\n except ValueError:\n print(\"That was not a valid input.\")\n\n elif user_input == 6: # circle area\n run_circle_area = True\n while run_circle_area:\n try:\n user_radius = float(input(\"Enter a radius: \"))\n print(\"The area is \", circle_area(user_radius), \".\\n\",\n sep=\"\")\n run_circle_area = False\n except ValueError:\n print(\"That was not a valid input.\")\n\n elif user_input == 7: # cube area\n run_cube_area = True\n while run_cube_area:\n try:\n user_length = float(\n input(\"Enter the length of one side of the cube\"))\n print(\"The area of the cube is \",\n cube_area(user_length), \".\\n\", sep=\"\")\n run_cube_area = False\n except ValueError:\n print(\"That was not a valid input.\")\n\n elif user_input == 8: # repeats given word a certain number of\n # times\n run_repeat = True\n while run_repeat:\n try:\n user_word = input(\n \"Enter the word you want to repeat: \")\n repeat = int(\n input(\"How many times do you want to repeat it: \"))\n print(user_word * repeat, \"\\n\")\n run_repeat = False\n except ValueError:\n print(\"That was not a valid input.\")\n\n elif user_input == 9: # whether num1 <,>, or = num2\n run_inequalities = True\n while run_inequalities:\n try:\n user_num1 = float(input(\"Enter a number: \"))\n user_num2 = float(input(\"Enter a second number: \"))\n inequalities(user_num1, user_num2)\n run_inequalities = False\n except ValueError:\n print(\"That was not a valid input.\")\n\n elif user_input == 10: # whether a number is in a certain range\n run_range = True\n while run_range:\n try:\n user_num = float(input(\"Enter a number: \"))\n user_start_range = float(\n input(\"What number does the range start at? \"))\n user_end_range = float(\n input(\"What number does the range end at? \"))\n range_function(user_num, user_start_range,\n user_end_range)\n run_range = False\n except ValueError:\n print(\"That was not a valid input.\")\n\n elif user_input == 11: # prints number of calculations performed\n # ran and stops running\n print(\"You ran the program\", num_calculations, \"times.\")\n continue_program = False\n\n else:\n print(\"That was not an option. Please select an option from \"\n \"1 to 11.\")\n\n if user_input not in range(1, 12):\n print(\n \"That was not an option. \"\n \"Please select an option from 1 to 11.\")\n\n num_calculations += 1 # keeps count of the number of calculations\n # performed", "def main_calc(avionics, booster, drogue_force):\r\n\tif bool(eval(input('Do you change the main section? (y/n) '))):\r\n\t\tcoeff_drag = float(input('Drogue Coefficient of Drag: '))\r\n\t\tmax_velocity = velocity_calc(max(avionics,booster), 0.5)\t\r\n\t\r\n\t\tarea = ((((avionics+booster) * g)/(.5 * (max_velocity ** 2) * rho)) - drogue_force) / coeff_drag\r\n\t\tradius = math.sqrt(area/math.pi) #ft\r\n\t\tdiameter = radius * 2 * 12 #inches\r\n\t\tprint(\"Main is diameter should be at least \" + str(diameter) + \" inches\")\r\n\t\tfinal_diameter = float(input('Please decide on final main parachute size (inches) :'))\r\n\telse:\r\n\t\tcoeff_drag = 2.2\r\n\t\tfinal_diameter = 72.0\r\n\t\r\n\t\r\n\treturn final_diameter, coeff_drag * (math.pi * (final_diameter/2/12)**2) # Force of Drogue = Coeff of Drag * Area of Drogue \r", "def my_main():\n x = random_2d()\n y = random_1d()\n\n sum = my_add(x,y)\n diff = my_subtract(x,y)\n\n print(\"x = {}, y = {}\".format(x,y))\n print(\"sum is {}\".format(sum))\n print(\"diff is {}\".format(diff))\n\n print (globals())", "def precalculate():\n pass", "def precalculate():\n pass", "def calculate(self):\r\n pass", "def main():\n first = get_num()\n second = get_num()\n print('The sum of {0} and {1} is {2}'.format(first, second, add(first, second)))\n print('The diff of {0} and {1} is {2}'.format(first, second, diff(first, second)))\n print('The prod of {0} and {1} is {2}'.format(first, second, prod(first, second)))\n print('The quotient of {0} and {1} is {2}'.format(first, second, div(first, second)))\n return 0", "def main():\n with open (\"test.txt\", 'r') as f:\n read_data = f.readline()\n\n read_data = read_data.strip()\n read_data = [int(x) for x in read_data.split()]\n\n #print(\"Input is = \", read_data)\n root = parse_input(read_data)\n #print (\"Root is: \", root)\n total = get_meta_sum(root)\n print (\"Sum is: \", total)\n value = get_value(root)\n print (\"Value is: \", value)", "def run(self):\n self.evaluate()\n self.accumulate()\n self.summarize()", "def calculate(self):\n pass", "def main():\n\n population = 276470345\n\n # These 3 variables are for the known probabilities.\n # Change them to see the effect on P(ill|positive)\n P_ill = 0.0806212326\n P_positive_if_ill = 0.94 # sensitivity\n P_negative_if_healthy = 0.98 # specificity\n\n print()\n\n calculate_with_bayes(P_ill, P_positive_if_ill, P_negative_if_healthy)", "def main() -> None:\n\n # input\n radius = int(input(\"Enter the radius of a circle (cm): \"))\n print(\"\")\n\n # call functions\n calculate_area(radius)\n\n print(\"\\nDone.\")", "def main():\n \n welcome()\n myBill = get_bill_amt()\n pct = get_tip_pct()\n tip = calc_tip(myBill, pct)\n show_results(myBill, tip, pct)", "def main() -> None:\n\n # input\n length_from_user = int(input(\"Enter the length of a rectangle (cm): \"))\n width_from_user = int(input(\"Enter the width of a rectangle (cm): \"))\n print(\"\")\n\n # call functions\n area = calculate_area(width = width_from_user, length = length_from_user)\n\n # output\n print(f\"The area is {area} cm²\")\n\n print(\"\\nDone.\")", "def calculate(self) -> float:", "def run_script(input_dir, output_dir):\n\n \"\"\"+++++++++++++++++++++++++++++++++++\"\"\"\n \"\"\"Step 1. load dataset \"\"\"\n print(\"loading data ......\")\n print(\"+++++++Read the surface shape data+++++++\")\n shape_file_name = input_dir + \"aligned_shapes.mat\"\n mat = loadmat(shape_file_name)\n y_design = mat['aligned_shape']\n n, l, m = y_design.shape\n print(\"The dimension of shape matrix is \" + str(y_design.shape))\n print(\"+++++++Read the sphere coordinate data+++++++\")\n template_file_name = input_dir + \"template.mat\"\n mat = loadmat(template_file_name)\n coord_mat = mat['template']\n # d = coord_mat.shape[1]\n print(\"+++++++Read the design matrix+++++++\")\n design_data_file_name = input_dir + \"design_data.txt\"\n design_data = np.loadtxt(design_data_file_name)\n # read the covariate type\n var_type_file_name = input_dir + \"var_type.txt\"\n var_type = np.loadtxt(var_type_file_name)\n print(\"+++++++Construct the design matrix: normalization+++++++\")\n x_design = read_x(design_data, var_type)\n p = x_design.shape[1]\n print(\"The dimension of design matrix is \" + str(x_design.shape))\n\n \"\"\"+++++++++++++++++++++++++++++++++++\"\"\"\n \"\"\"Step 2. Statistical analysis: including (1) smoothing and (2) hypothesis testing\"\"\"\n gpvals, lpvals_fdr, clu_pvals, efit_beta, efity_design, efit_eta = mfsda.run_stats(y_design, coord_mat, design_data, var_type)\n\n \"\"\"+++++++++++++++++++++++++++++++++++\"\"\"\n \"\"\"Step3. Save all the results\"\"\"\n gpvals_file_name = output_dir + \"global_pvalue.txt\"\n np.savetxt(gpvals_file_name, gpvals)\n lpvals_fdr_file_name = output_dir + \"local_pvalue_fdr.txt\"\n np.savetxt(lpvals_fdr_file_name, lpvals_fdr)\n clu_pvals_file_name = output_dir + \"cluster_pvalue.txt\"\n np.savetxt(clu_pvals_file_name, clu_pvals)", "def main(): \n symbolic_sample()\n print 'Done.'", "def main():\r\n # Create an instance of the MyCallCostCalc class.\r\n my_callcost = MyCallCostCalc()", "def main():\n\n\t# Parse the file\n\tmem_file = advanced_analysis('../data_1/mempages.dat.out')", "def main():\n logging.info(\"Executing data quality module\")\n\n calculate_quality()", "def main():\n station = \"Merikannontie\"\n coefs, score = cycling_weather_linregr(station)\n print(f\"Measuring station: {station}\")\n print(\n f\"Regression coefficient for variable 'precipitation': {coefs[0]:.1f}\")\n print(f\"Regression coefficient for variable 'snow depth': {coefs[1]:.1f}\")\n print(f\"Regression coefficient for variable 'temperature': {coefs[2]:.1f}\")\n print(f\"Score: {score:.2f}\")\n return", "def main():\n logger = logging.getLogger()\n x1 = 2\n y1 = 3\n logger.info('Realizando suma')\n logger.debug('{x} + {y} = '.format(x=x1, y=y1) + str(add(x1, y1)))\n logger.info('Realizando resta')\n logger.debug('{x} - {y} = '.format(x=x1, y=y1) + str(subtract(x1, y1)))\n logger.info('Realizando multiplicación')\n logger.debug('{x} * {y} = '.format(x=x1, y=y1) + str(multiply(x1, y1)))\n logger.info('Realizando división')\n logger.debug('{x} / {y} = '.format(x=x1, y=y1) + str(divide(x1, y1)))\n logger.debug('TERMINADO')\n logger.critical('FAIL')", "def main():\n # Call testing function\n testMinivan()", "def main():\n _input = read_lines_to_list(r'input.txt')\n dimensions = get_dimensions(_input)\n plotter = plot_claims_on_matrix(dimensions)\n answer_part_1 = calculate_claims_within_two_or_more_claims(plotter)\n print(answer_part_1)", "def Main():\n\n # These dictionaries help prevent bugs by checking the input of the user before sending it to the functions\n Versions = [\"FirstIteration\", \"SecondIteration\", \"ThirdIteration\", \"FourthIteration\", \"FifthIteration\"]\n PossibleSpecies = [1, 2, 3, 4, 5]\n Modes = [\"Ionian\", \"Dorian\", \"Phrygian\", \"Lydian\", \"Mixolydian\", \"Aeolian\"]\n\n while True:\n # Ask the user to specify a version of the code, the desired species, and the desired mode for an exercise\n # The 'while' statements help prevent bugs\n Version = input(\"Algorithm Version: \")\n while Version not in Versions:\n Version = input(\"Invalid version. Enter a true version: \")\n\n Species = int(input(\"Species: \"))\n while Species not in PossibleSpecies:\n Species = int(input(\"Invalid Species. Enter a true species: \"))\n\n Mode = input(\"Mode: \")\n while Mode not in Modes:\n Mode = input(\"Invalid Mode. Enter a true mode: \")\n\n # Use that information to generate a counterpoint exercise\n EX = Exercise(Version, Species, Mode)\n\n # Extract the data from CP\n Counterpoint = EX.Counterpoint\n Cantus = EX.Cantus\n\n # Generate a graph of the results\n Graph(Version, Species, Mode, Counterpoint, Cantus)\n\n # These lines keep the output clean for repeated use\n print(Version + \": \" + \"Species \" + str(Species) + \" in \" + Mode + \" Mode Completed\")\n print(\"\\n\\n\")", "def basic_calculator():\r\n\r\n num1 = input(\"Enter first number: \") # taking input\r\n\r\n # handling the exception of typecasting the value of 'num1' to float\r\n try:\r\n num1 = float(num1)\r\n except ValueError:\r\n print(\"Error: Input numeric values.\\nTry Again!\")\r\n exit()\r\n\r\n num2 = input(\"Enter second number: \") # taking input\r\n\r\n # handling the exception of typecasting the value of 'num2' to float\r\n try:\r\n num2 = float(num2)\r\n except ValueError:\r\n print(\"Error: Input numeric values.\\nTry Again!\")\r\n exit()\r\n\r\n # Asking user for the operation\r\n print(\"Select the operation:\")\r\n print(\"Type:\")\r\n print(\"1 for Addition\\n2 for Subtraction\\n3 for Multiplication\\n4 for Division\\n5 for Integer Division\\n6 for Power\")\r\n choice = input(\"Enter your choice: \")\r\n\r\n result = 0.0\r\n\r\n # Performing the operation and providing the result\r\n if choice == '1':\r\n result = num1 + num2\r\n elif choice == '2':\r\n result = num1 - num2\r\n elif choice == '3':\r\n result = num1 * num2\r\n elif choice == '4':\r\n result = num1 / num2\r\n elif choice == '5':\r\n result = num1 // num2\r\n elif choice == '6':\r\n result = num1 ** num2\r\n else:\r\n print(\"Wrong Input! Try Again.\")\r\n exit()\r\n\r\n print(f'\\nThe result is: {result}')", "def calculate_vars(self):\n pass", "def calculate():\n\n # Get all input data from the GUI\n age = float(age_input.get())\n weight = float(weight_input.get())\n height = float(height_input.get())\n heartrate = float(heartrate_input.get())\n duration = float(duration_input.get())\n\n if gender.get() == 0:\n # Calculate data for males\n bmr = male_bmr(weight, height, age)\n gross_calories = male_calories(heartrate, weight, age, duration)\n else:\n # Calculate data for females\n bmr = female_bmr(weight, height, age)\n gross_calories = female_calories(heartrate, weight, age, duration)\n\n net_calories = gross_calories - (bmr / 1440 * duration)\n\n # Display calculated data\n bmr_output.config(text=int(bmr))\n gross_output.config(text=int(gross_calories))\n net_output.config(text=int(net_calories))", "def main(self):\n # Creating the inpust matrix for the solver method using the input_data\n # function\n input_matrix, legend = self.input_data()\n # Adjusting the input matrix if origo is to be set at the center of mass\n # instead of center of initial origo\n if self.CM is True:\n input_matrix = self.center_of_mass(input_matrix)\n # Inintialiszing the solver method with the selected input\n I = solver(input_matrix, self.method, self.t, self.N)\n # Running the solver, extracting a position matrix and arrays conatining\n # information on the energies and angular momentum of the system\n output_matrix, KE, PE, AM = I.main()\n # If the optional variable plot_energies is given a input string that\n # matches the code names, the selected energies or momentum will be plotted\n # and the positional plots will be supressed\n if self.plot_energies != None:\n if self.plot_energies == 'PE':\n self.potential_energy(PE)\n elif self.plot_energies == 'KE':\n self.kinetic_energy(KE)\n elif self.plot_energies == 'TOT':\n self.total_energy(KE, PE)\n elif self.plot_energies == 'AM':\n self.angular_momentum(AM)\n plt.show()\n # If the optional variable -D is called and given 3 as argument, the\n # positions will be plotted in three dimensions.\n else:\n if self.dim == 3:\n self.plot_3D(output_matrix, legend)\n # The positions are plotted in three dimensions as default\n else:\n self.plot_2D(output_matrix, legend)\n plt.show()", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():\n\tparser = argparse.ArgumentParser(\n\t\tusage = '%(prog)s [OPTIONS] [ARGS...]',\n\t\tdescription='Calculate something',\n\t\tepilog='Contact simon.clematide@uzh.ch'\n\t\t)\n\tparser.add_argument('--version', action='version', version='0.99')\n\tparser.add_argument('-l', '--logfile', dest='logfile',\n\t\t\t\t\t\thelp='write log to FILE', metavar='FILE')\n\tparser.add_argument('-q', '--quiet',\n\t\t\t\t\t\taction='store_true', dest='quiet', default=False,\n\t\t\t\t\t\thelp='do not print status messages to stderr')\n\tparser.add_argument('-d', '--debug',\n\t\t\t\t\t\taction='store_true', dest='debug', default=False,\n\t\t\t\t\t\thelp='print debug information')\n\tparser.add_argument('-s', '--lm_dir',\n\t\t\t\t\t\taction='store', dest='lm_dir', default='resources.d/taggers/language-model/',\n\t\t\t\t\t\thelp='directory where LMs are stored %(default)')\n\tparser.add_argument('-i', '--iob_dir',\n\t\t\t\t\t\taction='store', dest='iob_dir', default='data.d/quaero/quaero_iob',\n\t\t\t\t\t\thelp='directory where iob training material is located %(default)')\n\tparser.add_argument('-t', '--tagger_dir',\n\t\t\t\t\t\taction='store', dest='tagger_dir', default='resources.d/taggers',\n\t\t\t\t\t\thelp='directory where to store training output %(default)')\n\tparser.add_argument('-n', '--ner_cycle',\n\t\t\t\t\t\taction='store', dest='ner_cycle', default='ner',\n\t\t\t\t\t\thelp='ner experiment cycle %(default)')\n\tparser.add_argument('-c', '--correction_mode',\n\t\t\t\t\t\taction='store', dest='correction_mode', default='raw',\n\t\t\t\t\t\thelp='correction mode of the NEs in training data %(default)')\n\tparser.add_argument('-m', '--lm_domain',\n\t\t\t\t\t\taction='store', dest='lm_domain', default='pressfr',\n\t\t\t\t\t\thelp='character level language model domain %(default)')\n\tparser.add_argument('-p', '--train_patience',\n\t\t\t\t\t\taction='store', dest='train_patience', type=int, default=3,\n\t\t\t\t\t\thelp='training patience %(default)')\n\tparser.add_argument('-W', '--use_wiki_wordemb',\n\t\t\t\t\t\taction='store_true', dest='use_wiki_wordemb', default=False,\n\t\t\t\t\t\thelp='use pre-trained wiki word embeddings')\n\tparser.add_argument('-P', '--use_press_wordemb',\n\t\t\t\t\t\taction='store_true', dest='use_press_wordemb', default=False,\n\t\t\t\t\t\thelp='use indomain press word embeddings')\n\tparser.add_argument('-C', '--use_crf',\n\t\t\t\t\t\taction='store_true', dest='use_crf', default=False,\n\t\t\t\t\t\thelp='use CRF layer')\n\tparser.add_argument('args', nargs='*')\n\toptions = parser.parse_args()\n\tif options.logfile:\n\t\tlogging.basicConfig(filename=logfile)\n\tif options.debug:\n\t\tlogging.basicConfig(level=logging.DEBUG)\n\n\ttrain_tagger(options)", "def main():\n room_length = input(\"What is the length of the room in feet?\\n> \")\n room_width = input(\"What is the width of the room in feet?\\n> \")\n print(f\"You entered dimensions of {room_length} feet by {room_width} feet.\")\n room_square_feet = int(room_length) * int(room_width)\n meter_conversion = ((room_square_feet) * 0.09290304)\n print(f\"The area is\\n{room_square_feet} square feet\\n{meter_conversion} square meters\")", "def main():\n obj = PowerMaxVolume()\n obj.perform_module_operation()", "def calc_variables ( ):\n\n # In this example we simulate using the shifted-force potential only\n # The values of < p_sf >, < e_sf > and density should be consistent (for this potential)\n # There are no long-range or delta corrections\n\n from averages_module import VariableType\n \n # Preliminary calculations\n vol = box**3 # Volume\n rho = n / vol # Density\n\n # Variables of interest, of class VariableType, containing three attributes:\n # .val: the instantaneous value\n # .nam: used for headings\n # .method: indicating averaging method\n # If not set below, .method adopts its default value of avg\n # The .nam and some other attributes need only be defined once, at the start of the program,\n # but for clarity and readability we assign all the values together below\n\n # Move acceptance ratio\n m_r = VariableType ( nam = 'Move ratio', val = m_ratio, instant = False )\n\n # Internal energy per molecule (shifted-force potential)\n # Ideal gas contribution (assuming nonlinear molecules) plus total PE divided by N\n e_sf = VariableType ( nam = 'E/N shifted force', val = 3.0*temperature + total.pot/n )\n\n # Pressure (shifted-force potential)\n # Ideal gas contribution plus total virial divided by V\n p_sf = VariableType ( nam = 'P shifted force', val = rho*temperature + total.vir/vol )\n\n # Collect together into a list for averaging\n return [ m_r, e_sf, p_sf ]", "def exe(self, func_mox):\n ## combine several dict of parameters\n cond = dict(self.cond_ex, **self.cond_cal, **self.const_model, **self.funclist_cea, **self.plot_param)\n ## set several constant, function and variables before calculation\n N = self.cond_ex[\"N\"]\n func_cstr = cond[\"func_CSTAR\"]\n cond[\"time\"], cond[\"x\"], r_tmp, rdot_tmp, rdotn_tmp = mod_shape.initialize_calvalue(**cond)\n self.x = cond[\"x\"]\n val = {}\n ## Following iteration part is the main sectioin of this simulation program.\n for t in tqdm(cond[\"time\"]):\n ## update each value at the follwoing lines\n self.t_history = np.append(self.t_history, t)\n mox = func_mox(t)\n self.mox_history = np.append(self.mox_history, mox)\n if t == 0:\n Pc = cond[\"Pci\"]\n else:\n Pc = Pc_new\n val[\"Pc\"] = Pc\n self.Pc_history = np.append(self.Pc_history, Pc)\n Vox = mod_shape.func_Vox(mox, Pc, **cond)\n val[\"Vox\"] = Vox\n self.Vox_history = np.append(self.Vox_history, Vox)\n Vf = mod_shape.func_Vf(Vox, Pc, **cond)\n self.Vf_history = np.append(self.Vf_history, Vf)\n if t != 0:\n r_tmp = r_new_tmp\n rdot_tmp = rdot_new_tmp\n rdotn_tmp = rdotn_new_tmp\n ## reshape and eliminate the unneccesary part of regression shape.\n r, rdot, rdotn = mod_shape.func_rcut(r_tmp, rdot_tmp, rdotn_tmp, self.t_history, self.Vf_history, **cond)\n self.r_history = np.vstack((self.r_history, r))\n self.rdot_history = np.vstack((self.rdot_history, rdot))\n self.rdotn_history = np.vstack((self.rdotn_history, rdotn))\n ## calculate the others parameter at the following lines\n if cond[\"Vf_mode\"]:\n mf = N *mod_shape.func_mf(r[~np.isnan(r)].size-1, r[~np.isnan(r)], rdot[~np.isnan(rdot)], Vf=Vf, **cond)\n else:\n mf = N *mod_shape.func_mf(r[~np.isnan(r)].size-1, r[~np.isnan(r)], rdot[~np.isnan(rdot)], Vf=Vf, **cond)\n self.mf_history = np.append(self.mf_history, mf)\n if mf<=0.0:\n of = np.nan\n cstr_ex = Pc*np.pi*np.power(cond[\"Dt\"], 2)/(4*mox)\n else:\n of = mox/mf\n cstr_ex = cond[\"eta\"]*func_cstr(of, Pc)\n self.of_history = np.append(self.of_history, of)\n self.cstr_history = np.append(self.cstr_history, cstr_ex)\n ## calculate the next time step values at the following lines\n val[\"r\"] = r_tmp\n val[\"rdot\"] = rdot_tmp\n val[\"rdotn\"] = rdotn_tmp\n Pc_new = mod_response.exe_EULER(t, mf, Pc, func_mox, self.t_history, self.Vf_history, **cond)\n r_new_tmp, rdot_new_tmp, rdotn_new_tmp = mod_shape.exe(val, **cond)\n ## CFL [-] Courant number, which must be less than unity \n self.cond_cal[\"CFL\"] = np.abs(self.Vf_history.max()*self.cond_cal[\"dt\"]/self.cond_cal[\"dx\"])", "def cost_volume_profit():\r\n c = float(input(\"Please Enter Total Fixed Costs Value: \"))\r\n a = float(input(\"Please Enter Sale Price Per Unit: \"))\r\n b = float(input(\"Please Enter Variable Cost Per Unit: \"))\r\n ccm = float(a)-float(b)\r\n cuu = float(c)/float(ccm)\r\n ccmr = (float(ccm)/float(a))*float(100)\r\n cda = float(c)/(float(ccmr)/float(100))\r\n print \">> Your Contribution Margin is\",ccm\r\n print \">> Your Breakeven Sales in Units is\",round(cuu)\r\n print \">> Your Contribution Margin Ratio is\",ccmr,\"%\"\r\n print \">> Your Breakeven Sales in Dollars is\",cda,\"\\n\"\r\n qq = input(\" Press 1 To Compute Target Profit\\n Press 2 To Compute Margin of Safety\\n Press 3 To Perform Sensitivity Analysis\\n Or Press 0 To Exit: \")\r\n if(qq == 1):\r\n dds = float(input(\"Please Enter Your Target Profit: \"))\r\n xxx = (float(c)+float(dds))/float(ccm)\r\n xxxx = (float(c)+float(dds))/(float(ccmr)/float(100))\r\n print \">> Your Target Profit in Units To Earn\",dds,\"$ is\",round(xxx)\r\n print \">> Your Target Profit in Dollars To Earn\",dds,\"$ is\",xxxx\r\n elif(qq == 0):\r\n print \"Canceled\"\r\n elif(qq == 2):\r\n xc = float(input(\"Please Enter Expected Sales in Units: \"))\r\n zzz = float(xc)-float(cuu)\r\n zzzz = float(zzz)*float(a)\r\n print \">> Your Margin of Safety in Units is\",round(zzz)\r\n print \">> Your Margin of Safety in Dollars is\",zzzz\r\n elif(qq == 3):\r\n i = input(\"Please Enter Total Fixed Costs Value: \")\r\n o = input(\"Please Enter Sale Price Per Unit: \")\r\n p = input(\"Please Enter Variable Cost Per Unit: \")\r\n n = 0\r\n for x,y,z in zip(i,o,p):\r\n cm = float(y)-float(z)\r\n uu = float(x)/float(cm)\r\n cmr = (float(cm)/float(y))*float(100)\r\n da = float(x)/(float(cmr)/float(100))\r\n n += 1\r\n print \"Your Results in Case\",int(n),\"is :\"\r\n print \">> Your Contribution Margin is\",cm\r\n print \">> Your Breakeven Sales in Units is\",round(uu)\r\n print \">> Your Contribution Margin Ratio is\",cmr,\"%\"\r\n print \">> Your Breakeven Sales in Dollars is\",da,\"\\n\"\r\n if(cm > ccm):\r\n a = float(cm)-float(ccm)\r\n print \">> Your Contribution Margin Increased by\",a\r\n elif(ccm > cm):\r\n a = float(ccm)-float(cm)\r\n print \">> Your Contribution Margin Decreased by\",a\r\n if(uu > cuu):\r\n b = float(uu)-float(cuu)\r\n print \">> Your Breakeven Sales in Units Increased by\",round(b)\r\n elif(cuu > uu):\r\n b = float(cuu)-float(uu)\r\n print \">> Your Breakeven Sales in Units Decreased by\",round(b)\r\n if(cmr > ccmr):\r\n c = float(cmr)-float(ccmr)\r\n print \">> Your Contribution Margin Ratio Increased by\",c,\"%\"\r\n elif(ccmr > cmr):\r\n c = float(ccmr)-float(cmr)\r\n print \">> Your Contribution Margin Ratio Decreased by\",c,\"%\"\r\n if(da > cda):\r\n d = float(da)-float(cda)\r\n print \">> Your Breakeven Sales in Dollars Increased by\",d\r\n elif(cda > da):\r\n d = float(cda)-float(da)\r\n print \">> Your Breakeven Sales in Dollars Decreased by\",d,\"\\n\"", "def main():\r\n\r\n print(\"Program to add two numbers.\\n\")\r\n\r\n # two float values\r\n num1 = 1.5\r\n num2 = 4.5\r\n\r\n # Adding the two given numbers\r\n sum_val = float(num1) + float(num2)\r\n\r\n # Displaying the result\r\n print(\"The sum of given numbers is,\")\r\n print(\"{n1} + {n2} = {sm}\".format(n1=num1, n2=num2, sm=sum_val))\r\n\r\n return 0", "def cvp():\r\n c = float(input(\"Please Enter Total Fixed Costs Value: \"))\r\n a = float(input(\"Please Enter Sale Price Per Unit: \"))\r\n b = float(input(\"Please Enter Variable Cost Per Unit: \"))\r\n ccm = float(a)-float(b)\r\n cuu = float(c)/float(ccm)\r\n ccmr = (float(ccm)/float(a))*float(100)\r\n cda = float(c)/(float(ccmr)/float(100))\r\n print \">> Your Contribution Margin is\",ccm\r\n print \">> Your Breakeven Sales in Units is\",round(cuu)\r\n print \">> Your Contribution Margin Ratio is\",ccmr,\"%\"\r\n print \">> Your Breakeven Sales in Dollars is\",cda,\"\\n\"\r\n qq = input(\" Press 1 To Compute Target Profit\\n Press 2 To Compute Margin of Safety\\n Press 3 To Perform Sensitivity Analysis\\n Or Press 0 To Exit: \")\r\n if(qq == 1):\r\n dds = float(input(\"Please Enter Your Target Profit: \"))\r\n xxx = (float(c)+float(dds))/float(ccm)\r\n xxxx = (float(c)+float(dds))/(float(ccmr)/float(100))\r\n print \">> Your Target Profit in Units To Earn\",dds,\"$ is\",round(xxx)\r\n print \">> Your Target Profit in Dollars To Earn\",dds,\"$ is\",xxxx\r\n elif(qq == 0):\r\n print \"Canceled\"\r\n elif(qq == 2):\r\n xc = float(input(\"Please Enter Expected Sales in Units: \"))\r\n zzz = float(xc)-float(cuu)\r\n zzzz = float(zzz)*float(a)\r\n print \">> Your Margin of Safety in Units is\",round(zzz)\r\n print \">> Your Margin of Safety in Dollars is\",zzzz\r\n elif(qq == 3):\r\n i = input(\"Please Enter Total Fixed Costs Value: \")\r\n o = input(\"Please Enter Sale Price Per Unit: \")\r\n p = input(\"Please Enter Variable Cost Per Unit: \")\r\n n = 0\r\n for x,y,z in zip(i,o,p):\r\n cm = float(y)-float(z)\r\n uu = float(x)/float(cm)\r\n cmr = (float(cm)/float(y))*float(100)\r\n da = float(x)/(float(cmr)/float(100))\r\n n += 1\r\n print \"Your Results in Case\",int(n),\"is :\"\r\n print \">> Your Contribution Margin is\",cm\r\n print \">> Your Breakeven Sales in Units is\",round(uu)\r\n print \">> Your Contribution Margin Ratio is\",cmr,\"%\"\r\n print \">> Your Breakeven Sales in Dollars is\",da,\"\\n\"\r\n if(cm > ccm):\r\n a = float(cm)-float(ccm)\r\n print \">> Your Contribution Margin Increased by\",a\r\n elif(ccm > cm):\r\n a = float(ccm)-float(cm)\r\n print \">> Your Contribution Margin Decreased by\",a\r\n if(uu > cuu):\r\n b = float(uu)-float(cuu)\r\n print \">> Your Breakeven Sales in Units Increased by\",round(b)\r\n elif(cuu > uu):\r\n b = float(cuu)-float(uu)\r\n print \">> Your Breakeven Sales in Units Decreased by\",round(b)\r\n if(cmr > ccmr):\r\n c = float(cmr)-float(ccmr)\r\n print \">> Your Contribution Margin Ratio Increased by\",c,\"%\"\r\n elif(ccmr > cmr):\r\n c = float(ccmr)-float(cmr)\r\n print \">> Your Contribution Margin Ratio Decreased by\",c,\"%\"\r\n if(da > cda):\r\n d = float(da)-float(cda)\r\n print \">> Your Breakeven Sales in Dollars Increased by\",d\r\n elif(cda > da):\r\n d = float(cda)-float(da)\r\n print \">> Your Breakeven Sales in Dollars Decreased by\",d,\"\\n\"", "def calc(self):\n return None", "def main():\n\tshow_program_intro()\n\tbyte_lines = read_rain_gauge_sunnyside_school()\n\t#print_rain_guage_output(byte_lines)\n\ttotals_dict = parse_regex_daily_total(byte_lines)\n\ttotals_list = sort_rain_dictionary(totals_dict)\n\thighest_rainfall = get_day_highest_rainfall(totals_list)\n\tprint_highest_rainfall(highest_rainfall)\n\tyear_highest_rain = get_year_with_most_rain(totals_list)\n\tprint_year_most_rain(year_highest_rain)", "def main():\n\n # Read parameter file.\n params = _read_parameter_file()\n\n # Get function names.\n fcts = _get_functions()\n\n # Get command line arguments.\n args = sys.argv[1:]\n\n # Print help.\n if not args or args == [\"help\"] or args == [\"--help\"]:\n _print_help(fcts)\n sys.exit()\n\n # Use lowercase to increase tolerance.\n fct_name = args[0].lower()\n\n # Get argument parser.\n parser = _get_argument_parser(fcts[fct_name])\n\n fcts[fct_name](parser, args, params)", "def _compute_(self):\n dic = \"data/sim/{dn}/{rad}/\".format(dn=self.event.strftime(\"%Y.%m.%d.%H.%M\"), rad=self.rad)\n fbgc = \"data/sim/{dn}/{rad}/exp.bgc.bm({bm}).elv(<elv>).csv\".format(dn=self.event.strftime(\"%Y.%m.%d.%H.%M\"), \n rad=self.rad, bm=self.bmnum)\n fflare = \"data/sim/{dn}/{rad}/exp.flare.bm({bm}).elv(<elv>).csv\".format(dn=self.event.strftime(\"%Y.%m.%d.%H.%M\"),\n rad=self.rad, bm=self.bmnum)\n cmd = \"export DIR_MODELS_REF_DAT=/home/shibaji/Collaboration_NCAR/code_rt_sd/pharlap/pharlap_4.1.3/dat;\\\n cd pharlap;\\\n matlab -nodisplay -nodesktop -nosplash -nojvm -r \\\"UT=[{ut}];rad='{rad}';dic='{dic}';fbgc='{fbgc}';bm={bm};\\\n fflare='{fflare}';rt_1D_sim;exit;\\\"\".format(ut=self.event.strftime(\"%Y %m %d %H %S\"), rad=self.rad,\n dic=dic, bm=self.bmnum, fbgc=fbgc, fflare=fflare)\n os.system(cmd)\n return", "def main():\n a = compute_area(20)\n #t.done()\n print(a)", "def main():\n args = get_arguments()\n\n mode = args.mode\n sdf_path = os.path.expandvars(args.sdf_path)\n summary_file = os.path.expanduser(args.summary_file)\n assert os.path.exists(sdf_path), \"sdf-path not exists: {}\".format(sdf_path)\n\n if mode == \"SUM\":\n summary(sdf_path, summary_file)\n elif mode == \"VAL\":\n validate(sdf_path, summary_file)", "def main():\n\n n = input(\"Enter the index of refraction between the two media: \")\n incidence_angle = input(\"Enter the angle of incidence (in degrees): \")\n # ----------------------------------\n # this block allows the user to use the default values of n and incidence angle by just pressing Enter\n\n if n == '' and incidence_angle == '': # if user enters nothing (an empty string)\n output = refraction_angle() # default values will be used\n elif n == '': # if user leaves n blank\n output = refraction_angle(incidence_angle=float(incidence_angle))\n elif incidence_angle == '': # if user leaves incidence angle blank\n output = refraction_angle(n=float(n))\n else: # if the user enters both n and incidence angle\n output = refraction_angle(float(n), float(incidence_angle))\n # ----------------------------------------------------------------------\n print(\"Angle of refraction:\", output, \"degrees\")\n\n #Kane Langmead 15/3/2021", "def main():\n parser = argparse.ArgumentParser(\n usage = '%(prog)s [OPTIONS] [ARGS...]',\n description='Calculate something',\n epilog='Contact simon.clematide@uzh.ch'\n )\n parser.add_argument('--version', action='version', version='0.99')\n parser.add_argument('-l', '--logfile', dest='logfile',\n help='write log to FILE', metavar='FILE')\n parser.add_argument('-q', '--quiet',\n action='store_true', dest='quiet', default=False,\n help='do not print status messages to stderr')\n parser.add_argument('-d', '--debug',\n action='store_true', dest='debug', default=False,\n help='print debug information')\n parser.add_argument('-c', '--corpus_dir',\n action='store', dest='corpus_dir', default='corpus',\n help='directory with corpus data %(default)')\n parser.add_argument('-m', '--model_dir',\n action='store', dest='model_dir', default='model',\n help='directory with model data %(default)')\n parser.add_argument('-B', '--is_backward_lm',\n action='store_true', dest='is_backward_lm', default=False,\n help='build backward model')\n parser.add_argument('args', nargs='*')\n options = parser.parse_args()\n if options.logfile:\n logging.basicConfig(filename=logfile)\n if options.debug:\n logging.basicConfig(level=logging.DEBUG)\n\n process(options)", "def main():\n return 0", "def main():\n return 0", "def main():\n pass", "def run_suite():\n print(\"*Input*\")\n input_str = get_input()\n stripped = strip(input_str)\n\n print(\"*Transform*\")\n operation, transformed = transform(stripped)\n\n print(\"*Output*\")\n output(operation, transformed)", "def main():\n # Path used in assembly and previously discovered min year value.\n split_in_dir_path = \"../../data/split\"\n avg_5_in_dir_path = \"../../data/averaged_5\"\n avg_25_in_dir_path = \"../../data/averaged_25\"\n avg_50_in_dir_path = \"../../data/averaged_50\"\n dates_mat_path = \"../../data/dates_matrix/dates_matrix.npy\"\n min_year = 1962\n data_out_dir_path = \"../../data/rnn_set/data\"\n labels_out_dir_path = \"../../data/rnn_set/labels\"\n assemble_set(\n split_in_dir_path, avg_5_in_dir_path, avg_25_in_dir_path,\n avg_50_in_dir_path, dates_mat_path, min_year,\n data_out_dir_path, labels_out_dir_path\n )", "def main():\n test_problem3()", "def CaculateMineProductionAndValue(self,problemManager):\n \n # Mining Model\n self.DetermineMiningSystem(problemManager)\n \n # Processing Model\n self.DetermineProcessingSystem(problemManager)\n \n # G&A Model\n self.CalculateGandAExpenses(problemManager)\n \n # Infrastructure Model\n self.CalculateInfrastructureCosts(problemManager)\n \n # Cash flow\n self.CalculateBeforeTaxCashFlow(problemManager)\n self.CalculateTaxes(problemManager)\n self.CalculateAfterTaxCashFlow(problemManager)\n \n # EconomicIndicators\n self.CalculateEconomicIndicators(problemManager)\n \n value = self.theEconomicDataManager.atNPV\n \n return value", "def main():\n path_for_data = '/Users/avielshtern/Desktop/semb/iml/IML.HUJI-master/data/kc_house_data (1).csv'\n design_matrix, response_vector = load_data(path_for_data)\n putting_it_all_together_1(design_matrix, response_vector)\n putting_it_all_together_2(design_matrix, response_vector)\n feature_evaluation(design_matrix, response_vector)", "def main():\n radius = number_format(\"Please enter the radius of the circle: \")\n calculate_area(radius)", "def main(self):", "def main(inputdir):\n temp = solveio.read_input(inputdir)\n interpoltype = temp[3]\n xknown = temp[5][:, 0]\n potknown = temp[5][:, 1]\n pot = solver.interpolation(interpoltype, xknown, potknown)\n xinfo = temp[1]\n mass = temp[0]\n eigenrange = temp[2][0] - 1, temp[2][1] - 1\n catcher = solver.seqsolver(xinfo, pot, mass, eigenrange)\n energies, xx, wavefunc, delta = catcher\n wavefunc = solver.normalization(wavefunc, delta)\n expval = solver.expectedvalue(wavefunc, xx, delta)\n solveio.write_output(energies, xx, wavefunc, expval, pot, inputdir)", "def main():\n num1, num2 = float(input()), float(input())\n print(2*num1-num2)", "def main():\n # Default input parameters\n nelx, nely, volfrac, penalty, rmin, ft = cli.parse_args()\n cli.main(nelx, nely, volfrac, penalty, rmin, ft)\n # Vary the filter radius\n for scaled_factor in [0.25, 2]:\n cli.main(nelx, nely, volfrac, penalty, scaled_factor * rmin, ft)\n # Vary the penalization power\n for scaled_factor in [0.5, 4]:\n cli.main(nelx, nely, volfrac, scaled_factor * penalty, rmin, ft)\n # Vary the discreization\n for scale_factor in [0.5, 2]:\n cli.main(int(scale_factor * nelx), int(scale_factor * nely),\n volfrac, penalty, rmin, ft)", "def abs_units(wb_run,sample_run,mono_van,wb_mono,samp_rmm,samp_mass,ei_guess,rebin,map_file,monovan_mapfile,**kwargs): \n #available keywords\n #abs_units_van_range\n global reducer, rm_zero,inst_name,van_mass,bleed_switch,rate,pixels\n print 'DGreduce run for ',inst_name,'run number ',sample_run\n print 'Output will be in absolute units of mb/str/mev/fu'\n\n #reducer.van_rmm =50.94\n reducer.van_mass=van_mass\n #sample info\n reducer.sample_mass=samp_mass\n reducer.sample_rmm =samp_rmm\n print 'Using vanadium mass: ',van_mass\n print ' sample mass: ',samp_mass \n print ' sample_rmm : ',samp_rmm \n # check if mono-vanadium is provided as multiple files list or just put in brackets ocasionally\n if isinstance(mono_van,list):\n if len(mono_van)>1:\n raise IOError(' Can currently work only with single monovan file but list supplied')\n else:\n mono_van = mono_van[0];\n\n \n try:\n n,r=lhs('both')\n wksp_out=r[0]\n except:\n if sample_run == 0:\n #deal with the current run being parsed as 0 rather than 00000\n sample_run='00000'\n wksp_out=str(sample_run)+'.spe'\n \n start_time=time.time()\n \n if sample_run=='00000' and mtd.doesExist(inst_name+'00000.raw')==True:\n print 'Deleteing previous instance of temp data'\n DeleteWorkspace(Workspace=inst_name+'00000.raw')\n \n if kwargs.has_key('norm_method'):\n reducer.normalise_method = kwargs.get('norm_method')\n print 'Setting normalisation method to ', kwargs.get('norm_method')\n else:\n reducer.normalise_method = 'monitor-1'\n \n if kwargs.has_key('mask_run'):\n mask_run = kwargs.get('mask_run')\n print 'Using run ', kwargs.get('mask_run'),' for diag'\n else:\n mask_run=sample_run\n \n if kwargs.has_key('background'):\n reducer.background = kwargs.get('background')\n print 'Setting background option to ', kwargs.get('background')\n else:\n reducer.background = False\n \n if kwargs.has_key('fixei'):\n reducer.fix_ei = kwargs.get('fixei')\n print 'Setting fixei to ', kwargs.get('fixei')\n else:\n reducer.fix_ei = False\n \n if kwargs.has_key('save_format'):\n reducer.save_formats = kwargs.get('save_format')\n print 'Setting save format to ', kwargs.get('save_format')\n else:\n reducer.save_formats = ['.spe']\n #Set parameters for the run\n \n if kwargs.has_key('detector_van_range'):\n reducer.wb_integr_range = kwargs.get('detector_van_range')\n print 'Setting detector van int range to ', kwargs.get('detector_van_range')\n else:\n reducer.wb_integr_range=[20,100]\n \n #######DIAG###########\n if kwargs.has_key('bkgd_range'):\n background_range = kwargs.get('bkgd_range')\n print 'Setting background intergration to ', kwargs.get('bkgd_range')\n else:\n background_range=[15000,19000]\n \n if kwargs.has_key('tiny'):\n tinyval = kwargs.get('tiny')\n print 'Setting tiny ratelimit to ', kwargs.get('tiny')\n else:\n tinyval=1e-10\n \n if kwargs.has_key('large'):\n largeval = kwargs.get('large')\n print 'Setting large limit to ', kwargs.get('large')\n else:\n largeval=1e10\n \n if kwargs.has_key('diag_remove_zero'):\n sampzero = kwargs.get('diag_remove_zero')\n print 'Setting diag to reject zero backgrounds '\n else:\n sampzero =False\n \n if kwargs.has_key('diag_van_median_rate_limit_hi'):\n vanouthi = kwargs.get('diag_van_median_rate_limit_hi')\n print 'Setting diag_van_median_rate_limit_hi to ', kwargs.get('diag_van_median_rate_limit_hi')\n else:\n vanouthi=100\n \n if kwargs.has_key('diag_van_median_rate_limit_lo'):\n vanoutlo = kwargs.get('diag_van_median_rate_limit_lo')\n print 'Setting diag_van_median_rate_limit_lo to ', kwargs.get('diag_van_median_rate_limit_lo')\n else:\n vanoutlo=0.01\n \n if kwargs.has_key('diag_van_median_sigma_lo'):\n vanlo = kwargs.get('diag_van_median_sigma_lo')\n print 'Setting diag_van_median_sigma_lo to ', kwargs.get('diag_van_median_sigma_lo')\n else:\n vanlo=0.1\n \n if kwargs.has_key('diag_van_median_sigma_hi'):\n vanhi = kwargs.get('diag_van_median_sigma_hi')\n print 'Setting diag_van_median_sigma_hi to ', kwargs.get('diag_van_median_sigma_hi')\n else:\n vanhi=1.5\n \n if kwargs.has_key('diag_van_median_sigma'):\n vansig = kwargs.get('diag_van_median_sigma')\n print 'Setting diag_van_median_sigma to ', kwargs.get('diag_van_median_sigma')\n else:\n vansig=0.0\n \n if kwargs.has_key('diag_samp_median_sigma_lo'):\n samplo = kwargs.get('diag_samp_median_sigma_lo')\n print 'Setting diag_samp_median_sigma_lo to ', kwargs.get('diag_samp_median_sigma_lo')\n else:\n samplo=0.0\n \n if kwargs.has_key('diag_samp_median_sigma_hi'):\n samphi = kwargs.get('diag_samp_median_sigma_hi')\n print 'Setting diag_samp_median_sigma_hi to ', kwargs.get('diag_samp_median_sigma_hi')\n else:\n samphi=2.0\n \n if kwargs.has_key('diag_samp_median_sigma'):\n sampsig = kwargs.get('diag_samp_median_sigma')\n print 'Setting diag_samp_median_sigma to ', kwargs.get('diag_samp_median_sigma')\n else:\n sampsig=3.0\n \n if kwargs.has_key('bleed'):\n bleed_switch = kwargs.get('bleed')\n print 'Setting bleed ', kwargs.get('bleed')\n else:\n print 'bleed set to default'\n #####diad end########\n \n \n if kwargs.has_key('det_cal_file'):\n reducer.det_cal_file = kwargs.get('det_cal_file')\n reducer.relocate_dets = True\n print 'Setting detector calibration file to ', kwargs.get('det_cal_file')\n else:\n print 'Setting detector calibration to detector block info from ', sample_run\n reducer.det_cal_file =None\n reducer.relocate_dets = False\n \n if mtd.doesExist(str(sample_run))==True and kwargs.has_key('det_cal_file')==False:\n print 'For data input type: workspace detector calibration must be specified'\n print 'use Keyword det_cal_file with a valid detctor file or run number'\n return\n \n \n if kwargs.has_key('one2one'):\n reducer.map_file =None\n map_file = \"\"\n print 'one2one selected'\n else:\n fileName, fileExtension = os.path.splitext(map_file)\n if (not fileExtension):\n map_file = map_file+'.map'\n reducer.map_file = map_file;\n \n if kwargs.has_key('hardmaskPlus'):\n HardMaskFile = kwargs.get('hardmaskPlus')\n print 'Use hardmask from ', HardMaskFile\n #hardMaskSpec=common.load_mask(HardMaskFile)\n #MaskDetectors(Workspace='masking',SpectraList=hardMaskSpec)\n else:\n HardMaskFile=None\n \n reducer.energy_bins = rebin\n #monovan info\n fileName, fileExtension = os.path.splitext(monovan_mapfile)\n if (not fileExtension):\n monovan_mapfile=monovan_mapfile+'.map'\n reducer.abs_map_file =monovan_mapfile \n\n if kwargs.has_key('abs_units_van_range'):\n reducer.monovan_integr_range = kwargs.get('abs_units_van_range')\n print 'Setting absolute units vanadium integration range to: ', kwargs.get('abs_units_van_range')\n else:\n reducer.monovan_integr_range=[-40,40]\n\n \n \n print 'output will be normalised to', reducer.normalise_method\n if (numpy.size(sample_run)) > 1 and kwargs.has_key('sum') and kwargs.get('sum')==True:\n #this sums the runs together before passing the summed file to the rest of the reduction\n #this circumvents the inbuilt method of summing which fails to sum the files for diag\n \n sumfilename=str(sample_run[0])+'sum'\n accum=sum_files(sumfilename, sample_run)\n #the D.E.C. tries to be too clever so we have to fool it into thinking the raw file is already exists as a workpsace\n RenameWorkspace(InputWorkspace=accum,OutputWorkspace=inst_name+str(sample_run[0])+'.raw')\n sample_run=sample_run[0]\n \n if kwargs.has_key('hardmaskOnly'):\n if (kwargs.get('hardmaskOnly')): \n totalmask = kwargs.get('hardmaskOnly')\n print 'Using hardmask from ', totalmask\n #next stable version can replace this with loadmask algoritum\n specs=diag_load_mask(totalmask)\n else:\n specs=\"\"\n \n CloneWorkspace(InputWorkspace=sample_run,OutputWorkspace='mask_wksp')\n MaskDetectors(Workspace='mask_wksp',SpectraList=specs)\n masking =mtd['mask_wksp']\n else:\n print '########### Run diagnose for sample run ##############'\n masking = reducer.diagnose(wb_run, \n sample=mask_run,\n second_white = None,\n tiny=tinyval, \n huge=largeval, \n van_out_lo=vanoutlo,\n van_out_hi=vanouthi,\n van_lo=vanlo,\n van_hi=vanhi,\n van_sig=vansig,\n samp_zero=sampzero,\n samp_lo=samplo,\n samp_hi=samphi,\n samp_sig=sampsig,\n bkgd_range=background_range, \n variation=1.1,\n print_results=True,\n bleed_test=bleed_switch,\n bleed_maxrate=rate,\n bleed_pixels=pixels,\n hard_mask=HardMaskFile)\n \n fail_list,n_total_spectra =get_failed_spectra_list_from_masks(masking) \n print 'first Diag found ', len(fail_list),'bad spectra out of: ',n_total_spectra,' ws spectra'\n \n if kwargs.has_key('use_sam_msk_on_monovan') and kwargs.get('use_sam_msk_on_monovan')==True:\n print 'applying sample run mask to mono van'\n reducer.spectra_masks=masking\n fail_list=get_failed_spectra_list(masking) \n else:\n print '########### Run diagnose for monochromatic vanadium run ##############'\n masking2 = reducer.diagnose(wb_mono, \n sample=mono_van,\n second_white = None,\n tiny=tinyval, \n huge=largeval, \n van_out_lo=vanoutlo,\n van_out_hi=vanouthi,\n van_lo=vanlo,\n van_hi=vanhi,\n van_sig=vansig,\n samp_zero=sampzero,\n samp_lo=samplo,\n samp_hi=samphi,\n samp_sig=sampsig,\n bkgd_range=background_range, \n variation=1.1,\n print_results=True,\n bleed_test=bleed_switch,\n bleed_maxrate=rate,\n bleed_pixels=pixels,\n hard_mask=HardMaskFile)\n \n total_mask=masking+masking2 \n reducer.spectra_masks=total_mask \n fail_list,n_total_spectra =get_failed_spectra_list_from_masks(total_mask)\n #fail_list=get_failed_spectra_list('total_mask')\n \n \n print 'Diag found ', len(fail_list),'bad spectra out of: ',n_total_spectra,' ws spectra'\n \n \n \n #Run the conversion first on the sample\n deltaE_wkspace_sample = reducer.convert_to_energy(sample_run, ei_guess, wb_run)\n\n \n if kwargs.has_key('mono_correction_factor'):\n absnorm_factor=kwargs.get('mono_correction_factor')\n print 'Using supplied correction factor for absolute units'\n else:\n print '##### Evaluate the integral from the monovan run and calculate the correction factor ######'\n print ' Using absolute units vanadion integration range : ', reducer.monovan_integr_range \n #now on the mono_vanadium run swap the mapping file\n reducer.map_file = monovan_mapfile \n deltaE_wkspace_monovan = reducer.convert_to_energy(mono_van, ei_guess, wb_mono)\n \n (absnorm_factorL,absnorm_factorSS,absnorm_factorP,absnorm_factTGP) = getAbsNormalizationFactor(deltaE_wkspace_monovan.getName(),str(reducer.monovan_integr_range[0]),str(reducer.monovan_integr_range[1])) \n \n print 'Absolute correction factor S^2 =',absnorm_factorSS,' Libisis: ',absnorm_factorL,' Puasonian: ',absnorm_factorP, ' TGP : ',absnorm_factTGP\n CreateSingleValuedWorkspace(OutputWorkspace='AbsFactor',DataValue=absnorm_factTGP)\n end_time=time.time()\n results_name=str(sample_run)+'.spe'\n ei= (deltaE_wkspace_sample.getRun().getLogData(\"Ei\").value)\n \n if mtd.doesExist('_wksp.spe-white')==True:\n DeleteWorkspace(Workspace='_wksp.spe-white')\n \n \n print 'Incident energy found for sample run ',ei,' meV'\n print 'Incident energy found for mono vanadium run ',ei,' meV'\n print 'Elapsed time =',end_time-start_time, 's'\n #get the name that convert to energy will use\n \n if mtd.doesExist(results_name)==False:\n RenameWorkspace(InputWorkspace=deltaE_wkspace_sample,OutputWorkspace=results_name)\n if results_name != wksp_out:\n RenameWorkspace(InputWorkspace=results_name,OutputWorkspace=wksp_out)\n Divide(LHSWorkspace=wksp_out,RHSWorkspace='AbsFactor',OutputWorkspace=wksp_out)\n DeleteWorkspace(Workspace='AbsFactor')\n return mtd[wksp_out]", "def run():\n return estimate(0,1,0)", "def main():\n user_input_of_coins() # can be used interactively just for fun, but use the test_suite to document your testing!\n i_steal_pennies_test_suite()", "def main(args):\n\n\t##############################################################################\n\t######## Pass user command line arguments to setup.py which will #############\n\t############# initialise some parameters for the analysis ###################\n\t##############################################################################\n\tinit_ = setup.initialise_user_input(args)\n\n\t##############################################################################\n\t######## Define system_ which is the object, of class nanoCISC, ##############\n\t######## which contains all relevant information about your nanoparticle ####\n\t##############################################################################\n\tsystem_ = nano_cisc.nanoCISC(init_.nano_particle, init_.anchors, init_.beta, init_.calcrange, \n init_.curves, init_.targetinc, init_.density) \n\t# initialise system_ as nanoCISC class here ^^^\n\n\t# If density is being calculated, define grid from grid class\n\tif args['density']:\n\t\tgrid=grids.grid(system_)\n\n\n\t##############################################################################\n\t################ Process trajectory, frame by frame ##########################\n\t##############################################################################\n\n\tfor ts in init_.u.trajectory: # loop through trajectory frames here \n\t\tprint \"Processing snapshot %d \" % (ts.frame)\n\n\t\t# Array for calculating intrinsic density is initialised to {0}\n\t\tintrinsic_count=np.zeros( ( np.ceil( 3 * system_.calculation_range).astype(np.int) ,len(system_.density) ), dtype = np.float32) \n\n\t\t# Array that stores the instantaneous volume of each spatial interval is initialised to {0}\n\t\tvolume_at_dist=np.zeros( ( np.ceil( 3 * system_.calculation_range).astype(np.int) ,len(system_.density) ), dtype = np.float32) \n\n\t\t# Centre of mass position is updated\n\t\tsystem_.update_com()\n\n\t\t# Vectors describing the anchor points are updated \n\t\tsystem_.update_anchors() \n\n\t\t# Nanoparticle depth values are updated\n\t\tsystem_.update_surface() \t\n\n\t\tif args['XYZsurface']:\n\t\t\tsystem_.write_surface(init_.f_visualise_surface) # write micelle surface to xyz file\n \n \t\tif args['density']: \n \t\t\tgrid.update_volume_estimate(volume_at_dist, system_) # volume estimate is updated for snapshot\n\t\t\tsystem_.calculate_density(intrinsic_count, volume_at_dist) # calculate density here\n\n\t\tsystem_.frames_processed += 1\n\n\t##################################\n\t##### Print results to files #####\n\t##################################\n\tif args['density']:\n\t\tsystem_.print_intrinsic_density(init_.f_intrinsic_density_out)\n\t\tsystem_.print_radial_density()\n\n\n\tprint \"Program finished successfully!!!\\n\"", "def _compute_(self, case):\n dic = \"data/sim/{dn}/{rad}/\".format(dn=self.event.strftime(\"%Y.%m.%d.%H.%M\"), rad=self.rad)\n fn = \"data/sim/{dn}/{rad}/exp.{cse}.bm({bm}).elv(<elv>).csv\".format(dn=self.event.strftime(\"%Y.%m.%d.%H.%M\"),\n rad=self.rad, bm=self.bmnum, cse=case)\n cmd = \"export DIR_MODELS_REF_DAT=/home/shibaji/Collaboration_NCAR/code_rt_sd/pharlap/pharlap_4.1.3/dat;\\\n cd pharlap;\\\n matlab -nodisplay -nodesktop -nosplash -nojvm -r \\\"UT=[{ut}];rad='{rad}';dic='{dic}';bm={bm};\\\n fn='{fn}';cse='{cse}';rt_1D_sen;exit;\\\"\".format(ut=self.event.strftime(\"%Y %m %d %H %S\"), rad=self.rad,\n dic=dic, bm=self.bmnum, fn=fn, cse=case)\n os.system(cmd)\n return", "def main():\n # Load and prepare dataset.\n ts_list = load_energy_weather_data()\n\n generator = gnt_class.FeatureGeneration(transformations='centroids') #gnt_class.Monotone()\n\n # feature selection model can be defined in the same way. If you don't use any, just leave as is\n selector = sel_class.FeatureSelection(on=False) #\n # first argument is your model class, then follow optional parameters as keyword arguments\n frc_model = frc_class.CustomModel(RandomForestRegressor, name=\"RF\")\n #frc_class.CustomModel(Lasso, name=\"Lasso\", alpha=0.001)\n\n # train your model:\n model = demo_train(ts_list, frc_model=frc_model, fg_mdl=generator, fs_mdl=selector, verbose=VERBOSE)\n\n # evaluate errors on the test set\n train_error, train_std = competition_errors(model=model, names=TRAIN_FILE_NAMES, y_idx=TS_IDX)\n test_error, test_std = competition_errors(model=model, names=TEST_FILE_NAMES, y_idx=TS_IDX)\n\n\n print(\"Average MAPE across time series: train = {} with std {}, test = {} with std {}\".\n format(train_error, train_std, test_error, test_std))\n\n return train_error, test_error", "def evaluate(self) -> int:" ]
[ "0.6921429", "0.6770756", "0.67365", "0.669798", "0.6433398", "0.64304745", "0.64164007", "0.6388115", "0.6387682", "0.6349823", "0.6285624", "0.6284488", "0.62795794", "0.6257987", "0.6253564", "0.62122047", "0.61981833", "0.61678576", "0.61678576", "0.6157993", "0.6157345", "0.6103981", "0.6082262", "0.607773", "0.60681415", "0.6067518", "0.60408497", "0.5986319", "0.59731215", "0.5953722", "0.5889853", "0.5886578", "0.58862334", "0.58844745", "0.58763945", "0.58735794", "0.58719087", "0.5860067", "0.585554", "0.58297956", "0.5826246", "0.5802113", "0.5799185", "0.5796721", "0.5796721", "0.5796721", "0.5796721", "0.5796721", "0.5796721", "0.5796721", "0.5796721", "0.5796721", "0.5796721", "0.5796721", "0.5796721", "0.5796721", "0.5796721", "0.5796721", "0.5796721", "0.5796721", "0.5796721", "0.5796721", "0.5796721", "0.5796721", "0.5796721", "0.5795034", "0.57810456", "0.5775428", "0.5771869", "0.5769916", "0.57589984", "0.57543224", "0.5754098", "0.57491815", "0.5747792", "0.57359135", "0.5727136", "0.5725334", "0.572268", "0.5721177", "0.5719909", "0.570997", "0.570997", "0.5700627", "0.5685302", "0.56826687", "0.56823397", "0.5678675", "0.5664939", "0.566189", "0.5659036", "0.5650216", "0.56439877", "0.56385994", "0.5637027", "0.56346774", "0.56320155", "0.5629045", "0.56242186", "0.56231123", "0.56202286" ]
0.0
-1
Generate training data by playing games vs self. Gathers experiece tuples over n_episodes and pushes them to agent replay buffer.
def self_play(self, n_episodes): eps = self.eps(self.agent.learning_iters) experiences = self_play_episodes(self.mdp, self.agent, n_episodes, eps) for state, action, reward, next_state, done in experiences: self.agent.replay_buffer.push(state, action, reward, next_state, done)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_data(self, episodes, agents, batch_index):\n\n\t\tstate_dict = {}\n\t\talpha = 0.95\n\n\t\t# iterate over episodes\n\t\tfor e in range(episodes):\n\t\n\t\t\t# choose 2 random agents to play\n\t\t\tperm = np.random.permutation(len(agents))\n\n\t\t\tagent1 = agents[perm[0]]\n\t\t\tagent2 = agents[perm[1]]\n\n\t\t\tagent1.player_index = 1\n\t\t\tagent2.player_index = 2\n\t\t\tplayers = [agent1, agent2]\n\n\t\t\tprint('Episode {0}/{1} : {2} vs {3}'.format(\n\t\t\t\te + 1, episodes, agent1.name, agent2.name), flush=True)\n\n\t\t\t# initialize the state\n\t\t\tcurrent_state = State()\n\t\t\tlast_state = None\n\n\t\t\t# discarding boolean\n\t\t\tdiscard = False\n\n\t\t\t# keep track of the states\n\t\t\tstates_track_1 = []\n\t\t\tstates_track_2 = []\n\n\t\t\t# final rewards variables\n\t\t\toutcome_1 = 0\n\t\t\toutcome_2 = 0\n\n\t\t\t# start the episode\n\t\t\twhile True:\n\n\t\t\t\t# ending condition\n\t\t\t\tif current_state == None:\n\t\t\t\t\twindex = last_state.winner\n\t\t\t\t\t# print(self.state_descriptor(last_state, 1))\n\t\t\t\t\t# print(self.state_descriptor(last_state, 2))\n\t\t\t\t\t# print(windex)\n\n\t\t\t\t\tif windex == 1:\n\t\t\t\t\t\toutcome_1 = 1\n\t\t\t\t\t\toutcome_2 = -1\n\n\t\t\t\t\tif windex == 2:\n\t\t\t\t\t\toutcome_1 = -1\n\t\t\t\t\t\toutcome_2 = 1\n\n\t\t\t\t\tbreak\n\n\t\t\t\t# next move\n\t\t\t\telse:\n\n\t\t\t\t\t# verify for infinite loops\n\t\t\t\t\tif states_track_1.count(self.state_descriptor(current_state, 1)) > 2:\n\t\t\t\t\t\tdiscard = True\n\t\t\t\t\t\t# print('Loop')\n\t\t\t\t\t\t# print(self.state_descriptor(current_state, 1))\n\t\t\t\t\t\tbreak\n\n\t\t\t\t\tstates_track_1.append(self.state_descriptor(current_state, 1))\n\t\t\t\t\tstates_track_2.append(self.state_descriptor(current_state, 2))\n\n\t\t\t\t\tto_move = current_state.player_to_move - 1\n\t\t\t\t\tlast_state = current_state\n\t\t\t\t\tcurrent_state = players[to_move].make_move(current_state)\n\n\t\t\t# get rid of games that ended up in infinite loop\n\t\t\tif discard:\n\t\t\t\tcontinue\n\n\t\t\t# update states from the end to the beginning\n\t\t\tstates_track_1.reverse()\n\t\t\tstates_track_2.reverse()\n\n\t\t\t# update the counter and value for each state\n\t\t\tfor i in range(len(states_track_1)):\n\n\t\t\t\t# update for the first player\n\t\t\t\ttry:\n\t\t\t\t\tstate_dict[states_track_1[i]][0] += 1\n\t\t\t\t\tstate_dict[states_track_1[i]][1] += (alpha ** i) * outcome_1\n\t\t\t\texcept KeyError:\n\t\t\t\t\tstate_dict[states_track_1[i]] = [1, (alpha ** i) * outcome_1]\n\n\t\t\t\t# update for the second player\n\t\t\t\ttry:\n\t\t\t\t\tstate_dict[states_track_2[i]][0] += 1\n\t\t\t\t\tstate_dict[states_track_2[i]][1] += (alpha ** i) * outcome_2\n\t\t\t\texcept KeyError:\n\t\t\t\t\tstate_dict[states_track_2[i]] = [1, (alpha ** i) * outcome_2]\n\n\n\t\t\tif (e + 1) % 25 == 0 or e + 1 == episodes:\n\t\t\t\tprint('Saving data...', flush=True)\n\n\t\t\t\twith open('reinforcement_learning_data/states_file_' + str(batch_index) + '.txt', 'w') as f:\n\n\t\t\t\t\taugmentations = 6\n\n\t\t\t\t\tboard_data = np.empty((augmentations * len(state_dict),\n\t\t\t\t\t\t\t\t\t\t\tState.BOARD_SIZE,\n\t\t\t\t\t\t\t\t\t\t\tState.BOARD_SIZE,\n\t\t\t\t\t\t\t\t\t\t\tState.BOARD_SIZE), dtype=np.float32)\n\n\t\t\t\t\tcows_data = np.empty((augmentations * len(state_dict),\n\t\t\t\t\t\t\t\t\t\t 2), dtype=np.float32)\n\n\t\t\t\t\tlabels = np.empty((augmentations * len(state_dict),), dtype=np.float32)\n\n\t\t\t\t\tcounter = 0\n\t\t\t\t\tfor key, value in state_dict.items():\n\t\t\t\t\t\t\n\t\t\t\t\t\tf.write('{0} : {1}\\n'.format(key, value))\n\n\t\t\t\t\t\t# mirror board\n\t\t\t\t\t\tfor fl in range(1, 3):\n\t\t\t\t\t\t\tboard_data[counter] = np.flip(np.asarray(key[0]), axis=fl)\n\t\t\t\t\t\t\tcows_data[counter] = np.asarray([key[1], key[2]])\n\t\t\t\t\t\t\tlabels[counter] = value[1] / value[0]\n\n\t\t\t\t\t\t\tcounter += 1\n\n\t\t\t\t\t\t# rotate board\n\t\t\t\t\t\tfor rot in range(4):\n\t\t\t\t\t\t\tboard_data[counter] = np.rot90(np.asarray(key[0]), k=rot, axes=(1, 2))\n\t\t\t\t\t\t\tcows_data[counter] = np.asarray([key[1], key[2]])\n\t\t\t\t\t\t\tlabels[counter] = value[1] / value[0]\n\n\t\t\t\t\t\t\tcounter += 1\n\n\t\t\t\t\tboard_data.dump('reinforcement_learning_data/board_data_' + str(batch_index) + '.dat')\n\t\t\t\t\tcows_data.dump('reinforcement_learning_data/cows_data_' + str(batch_index) + '.dat')\n\t\t\t\t\tlabels.dump('reinforcement_learning_data/labels_' + str(batch_index) + '.dat')\n\n\n\t\t\t\t# rotate board\n\t\t\t\tfor r in range(4):\n\t\t\t\t\tboard_data[counter] = np.rot90(np.asarray(key[0]), k=r, axes=(1, 2))\n\t\t\t\t\tcows_data[counter] = np.asarray([key[1], key[2]])\n\t\t\t\t\tlabels[counter] = value[1] / value[0]", "def train(self, num_episodes=10000):\n\n self.game.restart()\n\n self.exp_states = defaultdict(int)\n\n for i in tqdm(range(num_episodes)):\n\n self.game.deal_cards()\n\n possible_actions = self.game.get_actions()\n\n player_state = self.game.get_player_state()\n player_action = self.player.get_action(player_state,\n possible_actions,\n explore_exploit='explore')\n\n # Bookkeep visited states (?)\n player_state_str = np.array2string(player_state)\n self.exp_states[player_state_str] += 1\n\n opponent_state = self.game.get_opponent_state()\n opponent_action = self.opponent.get_action(opponent_state,\n possible_actions)\n\n self.game.set_player_action(player_action)\\\n .set_opponent_action(opponent_action)\n\n player_score, opponent_score = self.game.get_scores()\n\n reward = self._get_reward(player_score, opponent_score)\n self.player.learn(player_state,\n player_action,\n reward)\n self.player.learn(opponent_state,\n opponent_action,\n -reward)\n \n print(\"Training done!\")", "def learn(self):\n\n for i in range(1, self.args.numIters + 1):\n print('------ ITER ' + str(i) + '------')\n iterationTrainExamples = deque([], maxlen=self.args.maxlenOfQueue)\n\n for eps in range(self.args.numEps):\n print('------ Self Play Episode ' + str(eps) + '------')\n self.mcts = TSPMCTS(self.args, self.game, self.nnet) # reset search tree\n iterationTrainExamples += self.executeEpisode()\n\n # save the iteration examples to the history\n self.trainExamplesHistory.append(iterationTrainExamples)\n\n if len(self.trainExamplesHistory) > self.args.numItersForTrainExamplesHistory:\n self.trainExamplesHistory.pop(0)\n\n # shuffle examples before training\n trainExamples = []\n for e in self.trainExamplesHistory:\n trainExamples.extend(e)\n\n # training new network\n if self.args.numEps > 0:\n self.nnet.train(trainExamples)\n nmcts = TSPMCTS(self.args, self.game, self.nnet)\n\n print('PLAYING GAMES')\n if self.args.arenaCompare:\n arena = SinglePlayerArena(lambda x: np.argmax(nmcts.getActionProb(x, temp=0)), self.game)\n wins, losses = arena.playSinglePlayerGames(self.args.arenaCompare)\n print('WINS/LOSSES: %d / %d' % (wins, losses))", "def trainAgent(self):\r\n\t\tfor episode in range(self.TOT_EPISODES):\r\n\t\t\t#reset environment, stacked frames every episode.\r\n\t\t\tstate = self.env.reset()\r\n\t\t\trewards = 0\r\n\t\t\t#preprocess and stack the frame/state.\r\n\t\t\tstate, self.stacked_frames = stack_frames(self.stack_size,\r\n\t\t\t\t\t\t\t\t\tself.stacked_frames, state, True)\r\n\t\t\t\r\n\t\t\tfor step in range(self.MAX_STEPS):\r\n\t\t\t#for every step in episode:\r\n\t\t\t\r\n\t\t\t\tif (step%100==0):\r\n\t\t\t\t\tprint(\"Episode No.: \", episode, \"Step No.: \", step)\r\n\t\t\t\t\r\n\t\t\t\t#agent acts - explores or exploitation of the model\r\n\t\t\t\taction = self.dqn.predictAction(state)\r\n\t\t\t\t#reduce epsilon for more exploitation later.\r\n\t\t\t\tself.dqn.decayEpsilon()\r\n\t\t\t\t#Perform the action and get the next_state, reward, and done vals.\r\n\t\t\t\tnext_state, reward, done, _ = self.env.step(action)\r\n\t\t\t\t#append this state to the frame. Pass the previous stacked frame.\r\n\t\t\t\tnext_state, self.stacked_frames = stack_frames(self.stack_size,\r\n\t\t\t\t\t\t\t\t\t\tself.stacked_frames, next_state, False)\r\n\t\t\t\trewards+=reward\r\n\t\t\t\t\r\n\t\t\t\t#add this experience into memory (experience buffer)\r\n\t\t\t\tself.dqn.remember(state, action, reward, next_state, done)\r\n\t\t\t\t\r\n\t\t\t\tstate = next_state\r\n\t\t\t\t\r\n\t\t\t\tif done:\r\n\t\t\t\t\tprint(\"took %d steps\" %step)\r\n\t\t\t\t\tprint(\"Earned a total of reward equal to \", rewards)\r\n\t\t\t\t\tbreak\r\n\t\t\t\r\n\t\t\t\t# TRAIN\r\n\t\t\t\tself.dqn.replay()\r\n\t\t\t\t#sync target_model and model weights every 10k steps.\r\n\t\t\t\tif step % 10000 == 9999:\r\n\t\t\t\t\tself.dqn.target_train()\r\n\t\t\t\r\n\t\t\t# Save the network every 1000 iterations\r\n\t\t\tif episode % 5 == 4:\r\n\t\t\t\tprint(\"Saving Network\")\r\n\t\t\t\tself.dqn.save_network(self.path)", "def run(self):\n # Observe the game by randomly sampling actions from the environment\n # and performing those actions\n self.__observe__()\n for i in xrange(self.num_epochs):\n self.environment.resetStatistics()\n time_now = time.time()\n for j in xrange(self.train_steps_per_epoch):\n # Get action using epsilon-greedy strategy\n action = self.__sample_epsilon_action__()\n # Perform action based on epsilon-greedy search and store the transitions\n # in experience replay\n self.__supply_action_to_environment__(action)\n # If the environment is in the terminal state, reset the environment, and\n # perform self.stack_num actions to reset the environment\n self.isGameOver()\n if j % self.train_frequency == 0:\n # print \"Started training\"\n # Sample minibatch of size self.minibatch_size from experience replay\n minibatch = self.experience_replay.sample()\n minibatch_states, minibatch_action, minibatch_reward, minibatch_next_states, \\\n minibatch_terminals = minibatch\n cost = self.network.train_network(minibatch_states,\n minibatch_action,\n minibatch_reward,\n minibatch_terminals,\n minibatch_next_states)\n if j % self.record_frequency == 0:\n total_score, num_games = self.environment.getStatistics()\n avg_score = total_score / num_games\n self.network.record_average_qvalue(\n self.experience_replay.getCurrentState(),\n i * self.train_steps_per_epoch + j,\n self.epsilon, avg_score)\n # Epsilon annealing\n self.__anneal_epsilon__()\n # if self.time_step % 1000 == 0:\n # print \"Cost at iteration\", self.time_step, \" is\", cost\n # print \"Value of epsilon is\", self.epsilon\n self.steps += 1\n if j % self.copy_steps == 0:\n self.network.copy_weights()\n total_score, num_games = self.environment.getStatistics()\n time_taken = (time.time() - time_now)\n logger.info(\"Finished epoch %d: Steps=%d; Time taken=%.2f\",\n i, j, time_taken)\n logger.info(\"\\tNumber of games: %d; Average reward: %.2f\", num_games, (total_score / num_games))\n logger.info(\"\\tFinal epsilon value for epoch: %f\", self.epsilon)\n self.network.create_checkpoint()", "def train(self):\n tic = time.time()\n means = []\n stds = []\n steps = 0\n scores_window = deque(maxlen=100)\n for e in range(1,self.episodes):\n\n self.noise.step()\n episode_scores = []\n obs = self.env.reset()\n for t in range(self.tmax):\n actions = self.act(obs)\n next_obs,rewards,dones = self.env.step(actions)\n\n # Store experience\n if np.max(rewards) > 0:\n print('hit the ball over the net',rewards)\n self.R.add(obs.reshape(1,48),obs,actions,rewards,next_obs.reshape(1,48),next_obs,dones)\n obs = next_obs\n # Score tracking\n episode_scores.append(np.max(rewards))\n \n # Learn\n if len(self.R) > self.min_buffer_size:\n for _ in range(self.SGD_epoch):\n # Update each agent\n for i in range(self.num_agents):\n self.learn(i)\n # update target networks\n self.update_targets_all()\n \n steps += int(t)\n means.append(np.mean(episode_scores))\n stds.append(np.std(episode_scores))\n scores_window.append(np.sum(episode_scores))\n if e % 4 == 0:\n toc = time.time()\n r_mean = np.mean(scores_window)\n r_max = max(scores_window)\n r_min = min(scores_window)\n r_std = np.std(scores_window)\n plot(self.name,means,stds)\n print(\"\\rEpisode: {} out of {}, Steps {}, Rewards: mean {:.2f}, min {:.2f}, max {:.2f}, std {:.2f}, Elapsed {:.2f}\".format(e,self.episodes,steps,r_mean,r_min,r_max,r_std,(toc-tic)/60))\n if np.mean(scores_window) > self.winning_condition:\n print('Env solved!')\n # save scores\n pickle.dump([means,stds], open(str(self.name)+'_scores.p', 'wb'))\n # save policy\n self.save_weights(self.critic_path,self.actor_path)\n break", "def train_episode(self):\n state = self.env.reset()\n states = []\n actions = []\n rewards = []\n for _ in range(self.options.steps):\n probs = self.actor_baseline.predict([[state]])[0][0]\n action = np.random.choice(len(probs), p=probs)\n\n next_state, reward, done, _ = self.step(action)\n states.append(state)\n actions.append(action)\n rewards.append(reward)\n\n state = next_state\n\n if done:\n break\n\n # Compute and store returns in G\n G = np.zeros_like(rewards)\n ################################\n # YOUR IMPLEMENTATION HERE #\n ################################\n\n # One-hot encoding for actions\n actions_one_hot = np.zeros([len(actions), self.env.action_space.n])\n actions_one_hot[np.arange(len(actions)), actions] = 1\n\n # Compute one-hot encoded deltas\n ################################\n # YOUR IMPLEMENTATION HERE #\n ################################\n deltas = [[0]]\n\n # Update actor and state estimator\n self.actor_baseline.fit(x=[np.array(states)],\n y={'actor_output': deltas, 'baseline_output': returns},\n epochs=1, batch_size=self.options.batch_size, verbose=0)", "def setup_training(self):\n print('setup training called')\n self.steps_done = 0\n self.current_episode_num = 1\n self.total_reward = 0\n\n # self.optimizer = optim.RMSprop(policy_net.parameters())\n self.memory = ReplayMemory(300000)\n self.total_reward_history = []\n # self.loss_history = []\n self.positions = []\n self.n_destroyed_crates = 0\n self.is_in_bomb_range = False", "def train_experience_replay(self, epochs, batch_size, iterations_per_epoch, capacity, n_obs, **kwargs):\n\n # Initialize losses dictionary and memory replay buffer\n losses = dict()\n mem = MemoryReplayBuffer(capacity)\n\n for ep in range(1, epochs+1):\n losses[ep] = []\n with tqdm(total=iterations_per_epoch, desc='Training epoch {}'.format(ep)) as p_bar:\n\n for it in range(1, iterations_per_epoch+1):\n \n # Determine n_obs and generate data on-the-fly\n if type(n_obs) is int:\n n_obs_it = n_obs\n else:\n n_obs_it = n_obs()\n # Simulate and add to buffer\n params, sim_data = self._forward_inference(batch_size, n_obs_it, **kwargs)\n mem.store(params, sim_data)\n\n # Sample from buffer\n params, sim_data = mem.sample()\n\n # One step backprop\n loss = self._train_step(params, sim_data)\n \n # Store loss into dictionary\n losses[ep].append(loss)\n\n # Update progress bar\n p_bar.set_postfix_str(\"Epoch {0},Iteration {1},Loss: {2:.3f},Running Loss: {3:.3f}\"\n .format(ep, it, loss, np.mean(losses[ep])))\n p_bar.update(1)\n\n # Store after each epoch, if specified\n if self.manager is not None:\n self.manager.save()\n return losses", "def train(Game, agent, episodes=1000):\n a = agent\n # eps_start = a.epsilon\n # eps_end = a.epsilon_min\n # eps_dec = np.exp(1/episodes * np.log(eps_end/eps_start))\n # a.epsilon_decrement = eps_dec\n times_taken = np.zeros(episodes)\n print(\"Training starting\")\n for n in range(episodes):\n start_time = time.time()\n g = Game()\n print(\"EPISODE\", n+1)\n while not g.success:\n state = 1.0*g.get_state()\n action = a.action(state)\n reward = g.play(action)\n # print(g.success)\n # print(\"reward: \", reward)\n # print(state)\n # print(action)\n # print(g.get_state())\n a.train(state, action, reward, g.get_state(), g.success)\n end_time = time.time()\n times_taken[n] = end_time - start_time\n print(\"Training complete ({} episodes)\".format(episodes))\n return times_taken", "def agents_train(self, game_step, episode_now, args):\n # update all trainers, if not in display or benchmark mode\n if episode_now < args.learning_start_episode: return \n if self.update_cnt > 0 and self.var >= self.min_var: self.var *= args.var_discount\n #if episode_now > self.last_update_episode and (episode_now - args.learning_start_episode) % args.learning_fre == 0:\n if game_step % args.learning_fre_step == 0:\n if self.update_cnt == 0: print('\\r=start training ...'+' '*100)\n self.last_update_episode = episode_now\n self.update_cnt += 1\n\n # update every agent in different memory batch\n for agent_idx, (actor_c, actor_t, critic_c, critic_t, opt_a, opt_c) in \\\n enumerate(zip(self.actors_cur, self.actors_tar, self.critics_cur, \\\n self.critics_tar, self.optimizers_a, self.optimizers_c)):\n # del if opt_c == None: continue # jump to the next model update\n\n # sample the experience\n _obs_n_o, _action_n, _rew_n, _obs_n_n, _done_n = self.memory.sample( \\\n args.batch_size, agent_idx) # Note_The func is not the same as others\n \n # --use the date to update the CRITIC\n rew = torch.tensor(_rew_n, device=args.device, dtype=torch.float) # set the rew to gpu\n done_n = torch.tensor(~_done_n, dtype=torch.float, device=args.device) # set the rew to gpu\n action_cur_o = torch.from_numpy(_action_n).to(args.device, torch.float)\n obs_n_o = torch.from_numpy(_obs_n_o).to(args.device, torch.float)\n obs_n_n = torch.from_numpy(_obs_n_n).to(args.device, torch.float)\n\n action_tar = torch.cat([a_t(obs_n_n[:, self.obs_size[idx][0]:self.obs_size[idx][1]]).detach() \\\n for idx, a_t in enumerate(self.actors_tar)], dim=1)\n q = critic_c(obs_n_o, action_cur_o).reshape(-1) # q \n q_ = critic_t(obs_n_n, action_tar).reshape(-1) # q_ \n q_ = q_*args.gamma*done_n + rew*torch.tensor(args.reward_scale_par, device=args.device) # q_*gamma*done + reward\n loss_c = torch.nn.MSELoss()(q, q_.detach()) # bellman equation\n opt_c.zero_grad()\n loss_c.backward()\n nn.utils.clip_grad_norm_(critic_c.parameters(), args.max_grad_norm)\n opt_c.step()\n\n # --use the data to update the ACTOR\n # There is no need to cal other agent's action\n opt_c.zero_grad()\n model_out, policy_c_new = actor_c( \\\n obs_n_o[:, self.obs_size[agent_idx][0]:self.obs_size[agent_idx][1]], model_original_out=True)\n # update the aciton of this agent\n action_cur_o[:, self.action_size[agent_idx][0]:self.action_size[agent_idx][1]] = policy_c_new \n loss_pse = torch.mean(torch.pow(model_out, 2))\n loss_a = torch.mul(torch.tensor(-1.0, device=args.device), torch.mean(critic_c(obs_n_o, action_cur_o)))\n\n opt_a.zero_grad()\n (2e-3*loss_pse+loss_a).backward()\n #loss_a.backward()\n nn.utils.clip_grad_norm_(actor_c.parameters(), args.max_grad_norm)\n opt_a.step()\n\n # save the model to the path_dir ---cnt by update number\n #if self.update_cnt > args.start_save_model and self.update_cnt % args.fre4save_model == 0:\n if self.update_cnt > args.start_save_model and self.update_cnt % args.fre4save_model_step == 0:\n time_now = time.strftime('%y%m_%d%H%M')\n print('=time:{} step:{} save'.format(time_now, game_step))\n model_file_dir = os.path.join(args.save_dir, '{}_{}_{}'.format( \\\n args.scenario_name, time_now, game_step))\n if not os.path.exists(model_file_dir): # make the path\n os.mkdir(model_file_dir)\n for agent_idx, (a_c, a_t, c_c, c_t) in \\\n enumerate(zip(self.actors_cur, self.actors_tar, self.critics_cur, self.critics_tar)):\n torch.save(a_c, os.path.join(model_file_dir, 'a_c_{}.pt'.format(agent_idx)))\n torch.save(a_t, os.path.join(model_file_dir, 'a_t_{}.pt'.format(agent_idx)))\n torch.save(c_c, os.path.join(model_file_dir, 'c_c_{}.pt'.format(agent_idx)))\n torch.save(c_t, os.path.join(model_file_dir, 'c_t_{}.pt'.format(agent_idx)))\n\n # update the tar par\n self.actors_tar = self.update_trainers(self.actors_cur, self.actors_tar, args.tao) \n self.critics_tar = self.update_trainers(self.critics_cur, self.critics_tar, args.tao)", "def train(self):\n total_steps = 0\n scores_history = [deque(maxlen=self.run_settings.averaging_window)\n for a in range(len(self.agents))]\n averages_history = [[] for a in range(len(self.agents))]\n\n for e in range(self.run_settings.num_episodes):\n # Initialize episode\n try:\n env_states, rewards, done, metainfo = self.custom_env.reset()\n except EpisodeCrashException:\n for a in range(len(self.agents)):\n if hasattr(self.agents[a], 'notify_episode_crashed'):\n self.agents[a].notify_episode_crashed(self.run_settings)\n continue\n\n # Initialize scores to starting reward (probably 0)\n scores = rewards\n step = 0\n\n while not done:\n states = [self.agents[a].state_space_converter(env_states[a])\n for a in range(len(self.agents))]\n\n # Train agents\n if total_steps > 0 and total_steps % self.run_settings.train_every == 0:\n for agent in self.agents:\n agent.train(self.run_settings)\n\n # Save agent model\n if total_steps > 0 and total_steps % self.run_settings.save_every == 0:\n for agent in self.agents:\n agent.save()\n\n # Get actions\n actions = [self.agents[a].sample(states[a])\n for a in range(len(self.agents))]\n env_actions = [self.agents[a].action_space_converter(actions[a])\n for a in range(len(self.agents))]\n # Take environment step\n try:\n env_states, rewards, done, metainfo = self.custom_env.step(env_actions)\n except EpisodeCrashException:\n for a in range(len(self.agents)):\n if hasattr(self.agents[a], 'notify_episode_crashed'):\n self.agents[a].notify_episode_crashed(self.run_settings)\n break\n step += 1\n total_steps += 1\n\n # Update scores\n scores = [scores[a] + rewards[a] for a in range(len(self.agents))]\n # Push to agent Memories\n for a in range(len(self.agents)):\n self.agents[a].push_memory(states[a], actions[a], rewards[a], done)\n\n if done:\n averages = []\n for a in range(len(scores_history)):\n scores_history[a].append(scores[a])\n averages.append(np.mean(scores_history[a]))\n averages_history[a].append(averages[a])\n\n if len(scores) == 1:\n scores = scores[0]\n averages = averages[0]\n if self.run_settings.verbose:\n print(\"Game {} ended after {} steps. Game score: {}. Averages: {}\"\n .format(e+1, step, scores, averages))\n if (self.run_settings.graph_every > 0 and e > 0\n and e % self.run_settings.graph_every == 0):\n self.plot_results(averages_history)", "def train_epoch(self):\n # We can't validate a winner for submissions generated by the learner,\n # so we will use a winner-less match when getting rewards for such states\n blank_match = {\"winner\":None}\n\n learner_submitted_actions = 0\n null_actions = 0\n\n # Shuffle match presentation order\n if(self.N_TEMP_TRAIN_MATCHES):\n path_to_db = \"../data/competitiveMatchData.db\"\n sources = {\"patches\":self.TEMP_TRAIN_PATCHES, \"tournaments\":[]}\n print(\"Adding {} matches to training pool from {}.\".format(self.N_TEMP_TRAIN_MATCHES, path_to_db))\n temp_matches = pool.match_pool(self.N_TEMP_TRAIN_MATCHES, path_to_db, randomize=True, match_sources=sources)[\"matches\"]\n else:\n temp_matches = []\n data = self.training_data + temp_matches\n\n shuffled_matches = random.sample(data, len(data))\n for match in shuffled_matches:\n for team in self.teams:\n # Process match into individual experiences\n experiences = mp.process_match(match, team)\n for pick_id, experience in enumerate(experiences):\n # Some experiences include NULL submissions (usually missing bans)\n # The learner isn't allowed to submit NULL picks so skip adding these\n # to the buffer.\n state,actual,_,_ = experience\n (cid,pos) = actual\n if cid is None:\n null_actions += 1\n continue\n # Store original experience\n self.replay.store([experience])\n self.step_count += 1\n\n # Give model feedback on current estimations\n if(self.step_count > self.observations):\n # Let the network predict the next action\n feed_dict = {self.ddq_net.online_ops[\"input\"]:[state.format_state()],\n self.ddq_net.online_ops[\"valid_actions\"]:[state.get_valid_actions()]}\n q_vals = self.ddq_net.sess.run(self.ddq_net.online_ops[\"valid_outQ\"], feed_dict=feed_dict)\n sorted_actions = q_vals[0,:].argsort()[::-1]\n top_actions = sorted_actions[0:4]\n\n if(random.random() < self.epsilon):\n pred_act = random.sample(list(top_actions), 1)\n else:\n # Use model's top prediction\n pred_act = [sorted_actions[0]]\n\n for action in pred_act:\n (cid,pos) = state.format_action(action)\n if((cid,pos)!=actual):\n pred_state = deepcopy(state)\n pred_state.update(cid,pos)\n r = get_reward(pred_state, blank_match, (cid,pos), actual)\n new_experience = (state, (cid,pos), r, pred_state)\n\n self.replay.store([new_experience])\n learner_submitted_actions += 1\n\n if(self.epsilon > 0.1):\n # Reduce epsilon over time\n self.epsilon -= self.eps_decay_rate\n\n # Use minibatch sample to update online network\n if(self.step_count > self.pre_training_steps):\n self.train_step()\n\n if(self.step_count % self.target_update_frequency == 0):\n # After the online network has been updated, update target network\n _ = self.ddq_net.sess.run(self.ddq_net.target_ops[\"target_update\"])\n\n # Get training loss, training_acc, and val_acc to return\n loss, train_acc = self.validate_model(self.training_data)\n _, val_acc = self.validate_model(self.validation_data)\n return (loss, train_acc, val_acc)", "def train(self, iters, n_episodes):\n for i in range(iters):\n self.self_play(n_episodes)\n self.learn()", "def __init__ (self,gameName,total_episodes=50,train_or_test=2):\r\n\t\t#additional param:- doLoadNetwork=True\r\n\t\r\n\t\tself.createGame(gameName)\r\n\t\t\r\n\t\t### Training Hyperparameters\r\n\t\tself.TOT_EPISODES = total_episodes \t#no. of episodes/epochs\r\n\t\tself.MAX_STEPS = 50000 \t\t \t#max steps taken every episode/epoch\r\n\t\t\r\n\t\t### Preprocessing Hyperparameters\r\n\t\tself.stack_size = 4\t\t\t\t\t#stacking 3 frames at once.\t\t\t\t\t\r\n\t\tself.stacked_frames = deque([np.zeros((84,84), dtype=np.int) \r\n\t\t\t\t\tfor i in range(self.stack_size)], maxlen=4)\t\r\n\t\t\t\t\t\r\n\t\t### Model\r\n\t\tself.dqn = DDQN(self.action_space)\r\n\t\t\r\n\t\tself.path = \"saved.h5\"\r\n\t\t\t\t\r\n\t\t# train agent or simulate the game.\r\n\t\tif train_or_test == 1:\r\n\t\t\tself.trainAgent()\r\n\t\telif train_or_test == 2:\r\n\t\t\t#load network before simulating.\r\n\t\t\tself.load_network()\r\n\t\t\tself.simulate()", "def train_epoch(self) -> None:\n ct = self.config.training\n total_games = self._get_total_games()\n print(f\"Total Games: {total_games}\")\n train_size = int(0.9 * total_games)\n dataset_wrapper = DatasetWrapper(self.config)\n self.agent.model.fit(\n dataset_wrapper.get_dataset(train_size),\n epochs=ct.epoch_to_checkpoint,\n validation_data=dataset_wrapper.get_dataset(train_size, is_training=False),\n )", "def train_by_episode(self):\n # only REINFORCE and REINFORCE with baseline\n # use the ff code\n # convert the rewards to returns\n rewards = []\n gamma = 0.99\n for item in self.memory:\n [_, _, _, reward, _] = item\n rewards.append(reward)\n # rewards = np.array(self.memory)[:,3].tolist()\n\n # compute return per step\n # return is the sum of rewards from t til end of episode\n # return replaces reward in the list\n for i in range(len(rewards)):\n reward = rewards[i:]\n horizon = len(reward)\n discount = [math.pow(gamma, t) for t in range(horizon)]\n return_ = np.dot(reward, discount)\n self.memory[i][3] = return_\n\n # train every step\n for item in self.memory:\n self.train(item, gamma=gamma)", "def explore(self):\n\n i = 0\n while True:\n i += 1\n \n state_counts = {game.__class__.__name__: Counter() for game in self.games} \n\n policies_prime = []\n pi_sum = 0\n v_sum = 0\n counter = 0\n \n # bookkeeping\n log.info(f'Starting Exploration Iteration #{i} ...')\n\n # for task in tasks...\n for _ in range(self.args['taskBatchSize']):\n\n # create deepcopy for training a theta'\n policy_prime = copy.deepcopy(self.nnet)\n \n # sample a game (task)\n game = np.random.choice(self.games, p=self.probs)\n log.info(f'Sampled game {type(game).__name__} ...')\n\n # multiprocess to get our training examples\n iterationTrainExamples = deque([], maxlen=self.args['maxlenOfQueue'])\n iterationTrainExamples = run_apply_async_multiprocessing(self.executeEpisode, [(MCTS(game, self.nnet, self.args), type(game)(), self.args.copy())] * self.args['numEps'], self.args['numWorkers'], desc='Self Play')\n iterationTrainExamples, iter_counters = zip(*iterationTrainExamples)\n\n iterationTrainExamples = list(itertools.chain.from_iterable(iterationTrainExamples))\n state_counts[game.__class__.__name__] += sum(iter_counters, Counter())\n\n # shuffle examples before training\n shuffle(iterationTrainExamples)\n\n # train our network\n pi_v_losses = policy_prime.train(iterationTrainExamples)\n\n policies_prime.append(policy_prime.state_dict())\n\n for pi,v in pi_v_losses:\n pi_sum += pi\n v_sum += v\n counter += 1\n \n # compute average parameters and load into self.nnet\n self.nnet.load_average_params(policies_prime)\n\n # training new network, keeping a copy of the old one\n self.nnet.save_checkpoint(folder=self.args['checkpoint'] + '/exploring', filename='temp.pth.tar')\n self.pnet.load_checkpoint(folder=self.args['checkpoint'] + '/exploring', filename='temp.pth.tar')\n pmcts = MCTS(self.games[0], self.pnet, self.args)\n\n\n # Arena if we choose to run it\n if self.args['arenaComparePerGame'] > 0:\n # ARENA\n nmcts = MCTS(self.games[0], self.nnet, self.args)\n\n log.info('PITTING AGAINST PREVIOUS VERSION')\n arena = Arena()\n pwins, nwins, draws = arena.playGames(self.pnet, self.nnet, self.args, self.games)\n\n log.info('NEW/PREV WINS : %d / %d ; DRAWS : %d' % (nwins, pwins, draws))\n if pwins + nwins == 0 or float(nwins) / (pwins + nwins) < self.args['updateThreshold']:\n log.info('REJECTING NEW MODEL')\n self.nnet.load_checkpoint(folder=self.args['checkpoint'] + '/exploring', filename='temp.pth.tar')\n else:\n log.info('ACCEPTING NEW MODEL')\n self.nnet.save_checkpoint(folder=self.args['checkpoint'] + '/exploring', filename=self.getCheckpointFile(i))\n self.nnet.save_checkpoint(folder=self.args['checkpoint'] + '/exploring', filename='best.pth.tar')\n\n log.info('Iteration Complete. Writing counts to \"%s/%s\"...', *self.args['json_folder_file'])\n # create the json file\n path = os.path.join(self.args['json_folder_file'][0], self.args['json_folder_file'][1])\n with open(path, 'a+') as f:\n if os.stat(path).st_size == 0: ## file just created/empty\n log.info('No counts found. Writing to empty file.')\n old_counts = {game.__class__.__name__: Counter() for game in self.games}\n else: ## load the counts from the file\n log.info('Loading counts...')\n f.seek(0)\n str_counts = f.read()\n # print('STRING OF JSON:', type(str_counts), str_counts)\n old_counts = json.loads(str_counts)\n old_counts = {game: Counter(v) for game, v in old_counts.items()}\n master_counts = {game.__class__.__name__: state_counts[game.__class__.__name__]+old_counts[game.__class__.__name__] for game in self.games}\n # countiung logic: turn {gametype -> Counter} into {gametype -> {state -> count}}\n master_counts = {game: dict(counter) for game, counter in master_counts.items()}\n log.info('Writing...')\n f.truncate(0) #clear file\n json.dump(master_counts, f)\n log.info('Counts written to json file \"%s/%s\"...', *self.args['json_folder_file'])", "def q_learning(env, agent, num_episodes, batch_size, epsilon, epsilon_min, epsilon_decay, folder):\n \n # Keeps track of useful statistics\n stats = plotting.EpisodeStats(\n episode_lengths=np.zeros(num_episodes),\n episode_rewards=np.zeros(num_episodes))\n\n\n for i_episode in range(num_episodes):\n if epsilon > epsilon_min and i_episode > 500:\n # complete random exploration 500 episodes, \n # then decrase exploration till epsilon less than epsilon_min\n epsilon *= epsilon_decay\n sys.stdout.flush()\n\n state = env.reset()\n state = np.reshape(state, [1, env.nS])\n\n \n for t in range(MAX_STEP):\n\n ## Decide action\n action = agent.act(state, epsilon)\n ## Advance the game to the next frame based on the action\n next_state, reward, done, _ = env.step(action)\n\n env.my_render(folder)\n\n stats.episode_rewards[i_episode] += reward\n stats.episode_lengths[i_episode] = t+1\n\n next_state = np.reshape(next_state, [1, env.nS])\n ## Remember the previous state, action, reward, and done\n agent.remember(state, action, reward, next_state, done)\n ## make next_state the new current state for the next frame.\n state = next_state ## change to copy.copy(next_state), if it is a array\n\n if len(agent.memory) > batch_size:\n agent.replay(batch_size) \n\n if done: \n break\n \n mean_score = stats.episode_rewards[i_episode]/stats.episode_lengths[i_episode]\n print(\"episode: {}/{}, score: {}, e: {:.2}, steps:{}, mean score:{:.2}\"\n .format(i_episode, num_episodes, stats.episode_rewards[i_episode], epsilon, \n stats.episode_lengths[i_episode], \n mean_score))\n #if(i_episode > 200):\n write_csv(folder, i_episode, stats.episode_lengths[i_episode], mean_score)\n if(i_episode%50 == 0):\n agent.save(folder + \"_qn\" + str(i_episode) + \".h5\") \n agent.save(folder + \"_qn-final\" + \".h5\") \n\n return stats", "def train(n_episodes=1000, max_n_steps=300, eps_start=1.0, eps_end=0.01, eps_decay=0.995, strCheckpointFile='checkpoint.pth'):\n\n global env\n scores = [] # list containing scores from each episode\n scores_window = deque(maxlen=100) # last 100 scores\n eps = eps_start # initialize epsilon\n num_saves = 0\n for i_episode in range(1, n_episodes + 1):\n env_info = env.reset(train_mode=True)[brain_name] # reset the environment\n state = env_info.vector_observations[0] # get the current state\n score = 0 # initialize the score\n last_t = max_n_steps\n for t in range(max_n_steps):\n action = agent.act(state, eps) # agent returns an epsilon-greedy action based on state\n env_info = env.step(action)[brain_name] # send the action to the environment\n next_state = env_info.vector_observations[0] # get the next state\n reward = env_info.rewards[0] # get the reward\n done = env_info.local_done[0] # see if episode has finished\n agent.step(state, action, reward, next_state, done) # records experience and learns (depending on settings)\n state = next_state\n score += reward\n if done:\n last_t = t + 1\n break\n scores_window.append(score) # save most recent score\n scores.append(score) # save most recent score\n eps = max(eps_end, eps_decay * eps) # decrease epsilon\n print('\\rEpisode {}\\tNum steps: {}\\tAverage Score: {:.2f}'.format(i_episode, last_t, np.mean(scores_window)))\n # if i_episode % 100 == 0:\n # print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))\n if np.mean(scores_window) >= 13: # win condition in course\n if num_saves == 0:\n print('\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.2f}'.format(i_episode - 100, np.mean(scores_window)))\n print('\\nTraining will continue and the checkpoint will be overwritten every 100 episodes')\n print('\\nSaving a checkpoint now, you may interrupt code execution with eg Ctrl+C')\n torch.save(agent.qnetwork_local.state_dict(), strCheckpointFile)\n else:\n if i_episode % 100 == 0:\n print('\\nSaving another checkpoint now, you may interrupt code execution with eg Ctrl+C')\n torch.save(agent.qnetwork_local.state_dict(), strCheckpointFile)\n num_saves += 1\n\n env.close()\n\n # plot the scores\n fig = plt.figure()\n ax = fig.add_subplot(111)\n plt.plot(np.arange(len(scores)), scores)\n plt.ylabel('Score')\n plt.xlabel('Episode #')\n # plt.show()\n plt.savefig('training_score_by_episode.png')\n return scores", "def learn(self):\n\n for i in range(1, self.args.numIters + 1):\n # bookkeeping\n log.info(f'Starting Iter #{i} ...')\n # examples of the iteration\n if not self.skipFirstSelfPlay or i > 1:\n iterationTrainExamples = deque([], maxlen=self.args.maxlenOfQueue)\n\n for _ in tqdm(range(self.args.numEps), desc=\"Self Play\"):\n self.mcts = MCTS(self.game, self.nnet, self.args) # reset search tree\n iterationTrainExamples += self.executeEpisode()\n\n # save the iteration examples to the history \n self.trainExamplesHistory.append(iterationTrainExamples)\n\n if len(self.trainExamplesHistory) > self.args.numItersForTrainExamplesHistory:\n log.warning(\n f\"Removing the oldest entry in trainExamples. len(trainExamplesHistory) = {len(self.trainExamplesHistory)}\")\n self.trainExamplesHistory.pop(0)\n # backup history to a file\n # NB! the examples were collected using the model from the previous iteration, so (i-1) \n self.saveTrainExamples(i - 1)\n\n # shuffle examples before training\n trainExamples = []\n for e in self.trainExamplesHistory:\n trainExamples.extend(e)\n shuffle(trainExamples)\n\n # training new network, keeping a copy of the old one\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')\n self.pnet.load_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')\n pmcts = MCTS(self.game, self.pnet, self.args)\n\n self.nnet.train(trainExamples)\n nmcts = MCTS(self.game, self.nnet, self.args)\n\n log.info('PITTING AGAINST PREVIOUS VERSION')\n arena = Arena(lambda x: np.argmax(pmcts.getActionProb(x, temp=0)),\n lambda x: np.argmax(nmcts.getActionProb(x, temp=0)), self.game)\n pwins, nwins, draws = arena.playGames(self.args.arenaCompare)\n\n log.info('NEW/PREV WINS : %d / %d ; DRAWS : %d' % (nwins, pwins, draws))\n if pwins + nwins == 0 or float(nwins) / (pwins + nwins) < self.args.updateThreshold:\n log.info('REJECTING NEW MODEL')\n self.nnet.load_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')\n else:\n log.info('ACCEPTING NEW MODEL')\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename=self.getCheckpointFile(i))\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename='best.pth.tar')", "def train_agent(self):\n # Retrieve collected experiences from memory\n experiences = np.array(self.replay.get_all())\n # rewards = np.array([h['reward'] for h in experiences])\n #rewards = experiences[:,2]\n rewards = np.array([r[2] for r in experiences])\n\n # Discount and normalize rewards\n norm_rewards = self.discount_rewards_and_normalize(rewards)\n\n # Shuffle for better learning\n shuffled_experiences = np.random.shuffle(experiences)\n\n # Feed the experiences through the network with rewards to compute and\n # minimize the loss.\n\n feed={\n self.X: [r[0] for r in experiences],\n self.rewards:norm_rewards,\n self.ep_actions:experiences[:,1]\n }\n self.tf_sess.run(self.train,feed_dict=feed)\n\n pass", "def Collecting_experiences(self)-> None:\n for epoch_no in range(self.epochs):\n print(\"EPOCH %d\", epoch_no + 1)\n \n #beam_dqn = self.beam_min + int(self.beam_max * epoch_no/self.epochs)\n #egreed = self.egreed_max*(1 - epoch_no/(1.1*self.epochs))\n #self.gamma = self.gamma_max*(1 - epoch_no/(2*self.epochs))\n\n beam_dqn = 1\n egreed = 0.5\n #self.gamma = self.gamma_max\n self.gamma = 0.6\n\n self.tb_writer.add_scalar(\"parameters/beam_dqn\",\n beam_dqn, epoch_no)\n self.tb_writer.add_scalar(\"parameters/egreed\",\n egreed, epoch_no)\n self.tb_writer.add_scalar(\"parameters/gamma\",\n self.gamma, epoch_no)\n if beam_dqn > self.actions_size:\n print(\"The beam_dqn cannot exceed the action size!\")\n print(\"then the beam_dqn = action size\")\n beam_dqn = self.actions_size\n\n print(' beam_dqn, egreed, gamma: ', beam_dqn, egreed, self.gamma)\n for _, data_set in self.data_to_train_dqn.items():\n \n valid_iter = make_data_iter(\n dataset=data_set, batch_size=1, batch_type=self.batch_type,\n shuffle=False, train=False)\n #valid_sources_raw = data_set.src\n # disable dropout\n #self.model.eval()\n\n i_sample = 0\n for valid_batch in iter(valid_iter):\n freeze_model(self.model)\n batch = Batch(valid_batch\n , self.pad_index, use_cuda=self.use_cuda)\n \n encoder_output, encoder_hidden = self.model.encode(\n batch.src, batch.src_lengths,\n batch.src_mask)\n # if maximum output length is not globally specified, adapt to src len\n \n if self.max_output_length is None:\n self.max_output_length = int(max(batch.src_lengths.cpu().numpy()) * 1.5)\n \n batch_size = batch.src_mask.size(0)\n prev_y = batch.src_mask.new_full(size=[batch_size, 1], fill_value=self.bos_index,\n dtype=torch.long)\n output = []\n hidden = self.model.decoder._init_hidden(encoder_hidden)\n prev_att_vector = None\n finished = batch.src_mask.new_zeros((batch_size, 1)).byte()\n\n # print(\"Source_raw: \", batch.src)\n # print(\"Target_raw: \", batch.trg_input)\n # print(\"y0: \", prev_y)\n \n \n \n exp_list = []\n # pylint: disable=unused-variable\n for t in range(self.max_output_length):\n if t != 0:\n if self.state_type == 'hidden':\n state = torch.cat(hidden, dim=2).squeeze(1).detach().cpu().numpy()[0]\n else:\n if t == 0:\n state = hidden[0].squeeze(1).detach().cpu().numpy()[0]\n else:\n state = prev_att_vector.squeeze(1).detach().cpu().numpy()[0]\n \n \n # decode one single step\n logits, hidden, att_probs, prev_att_vector = self.model.decoder(\n encoder_output=encoder_output,\n encoder_hidden=encoder_hidden,\n src_mask=batch.src_mask,\n trg_embed=self.model.trg_embed(prev_y),\n hidden=hidden,\n prev_att_vector=prev_att_vector,\n unroll_steps=1)\n # logits: batch x time=1 x vocab (logits)\n if t != 0:\n if self.state_type == 'hidden':\n state_ = torch.cat(hidden, dim=2).squeeze(1).detach().cpu().numpy()[0]\n else:\n state_ = prev_att_vector.squeeze(1).detach().cpu().numpy()[0]\n \n # if t == 0:\n # print('states0: ', state, state_)\n\n # greedy decoding: choose arg max over vocabulary in each step with egreedy porbability\n \n if random.uniform(0, 1) < egreed:\n i_ran = random.randint(0,beam_dqn-1)\n next_word = torch.argsort(logits, descending=True)[:, :, i_ran]\n else:\n next_word = torch.argmax(logits, dim=-1) # batch x time=1\n # if t != 0:\n a = prev_y.squeeze(1).detach().cpu().numpy()[0]\n #a = next_word.squeeze(1).detach().cpu().numpy()[0]\n \n # print(\"state \",t,\" : \", state )\n # print(\"state_ \",t,\" : \", state_ )\n # print(\"action \",t,\" : \", a )\n # print(\"__________________________________________\")\n\n output.append(next_word.squeeze(1).detach().cpu().numpy())\n\n #tup = (self.memory_counter, state, a, state_)\n \n \n prev_y = next_word\n # check if previous symbol was <eos>\n is_eos = torch.eq(next_word, self.eos_index)\n finished += is_eos\n if t != 0:\n self.memory_counter += 1\n tup = (self.memory_counter, state, a, state_, 1)\n exp_list.append(tup)\n \n #print(t)\n # stop predicting if <eos> reached for all elements in batch\n if (finished >= 1).sum() == batch_size:\n a = next_word.squeeze(1).detach().cpu().numpy()[0]\n self.memory_counter += 1\n #tup = (self.memory_counter, state_, a, np.zeros([self.state_size]) , is_eos[0,0])\n tup = (self.memory_counter, state_, a, np.zeros([self.state_size]), 0)\n exp_list.append(tup)\n #print('break')\n break\n if t == self.max_output_length-1:\n #print(\"reach the max output\")\n a = 0\n self.memory_counter += 1\n #tup = (self.memory_counter, state_, a, np.zeros([self.state_size]) , is_eos[0,0])\n tup = (self.memory_counter, state_, a, -1*np.ones([self.state_size]), 1)\n exp_list.append(tup)\n \n \n \n \n #Collecting rewards\n hyp = np.stack(output, axis=1) # batch, time\n\n if epoch_no == 0:\n if i_sample == 0 or i_sample == 3 or i_sample == 6:\n #print(i_sample)\n r = self.Reward(batch.trg, hyp, show=True) # 1 , time-1 \n else:\n r = self.Reward(batch.trg, hyp, show=False) # 1 , time -1 \n else:\n #print(\"aaaa - \",i_sample)\n r = self.Reward(batch.trg, hyp, show=False) # 1 , time -1 \n \n # if i_sample == 0 or i_sample == 3 or i_sample == 6:\n # print(\"\\n Sample Collected: \", i_sample, \"-------------Target vs Eval_net prediction:--Raw---and---Decoded-----\")\n # print(\"Target: \", batch.trg, decoded_valid_out_trg)\n # print(\"Eval : \", stacked_output, decoded_valid_out)\n # print(\"Reward: \", r, \"\\n\")\n \n i_sample += 1\n self.store_transition(exp_list, r)\n \n #Learning.....\n if self.memory_counter > self.mem_cap - self.max_output_length:\n self.learn()\n \n self.tb_writer.close()", "def train():\n warnings.filterwarnings(\"ignore\")\n\n player = AlphaZeroPlayer(selfplay=1, init=c.INIT)\n game = Game(c.SIZE, c.PIECE, 1)\n\n record = {\"loss\": [], \"value_output_loss\": [], \"policy_output_loss\": []}\n for i in range(c.SELF_PLAY_EPOCHS):\n states, move_probs, values = game.self_play(player)\n\n if c.AUGMENT:\n states, values, move_probs = augment_data(states, values, move_probs)\n\n print(\"Self-play turn {0}\".format(i + 1))\n\n loss = player.update(states, values, move_probs)\n print(\"Network update >> loss:{0}, value_loss:{1}, policy_loss:{2}\".format(loss[0], loss[1], loss[2]))\n\n record[\"loss\"].append(loss[0])\n record[\"value_output_loss\"].append(loss[1])\n record[\"policy_output_loss\"].append(loss[2])\n\n if i % 20 == 0:\n player.save_model()\n\n player.save_model()\n df = pd.DataFrame.from_dict(record)\n df.to_csv('alpha/data/loss.csv', encoding='utf-8', index=False)", "def train(self):\n if len(self.experience) < self.minibatch_size:\n return\n\n # sample a minibatch_size of random episode with a number of transitions >= unrollings_num\n random_episodes_indecies = np.random.choice(len(self.experience), self.minibatch_size)\n random_episodes = []\n for index in random_episodes_indecies:\n episode = self.experience[index]\n\n # 0:random_transitions_space is the range from which a random transition\n # can be picked up while having unrollings_num - 1 transitions after it\n random_transitions_space = len(episode) - self.unrollings_num\n random_start = np.random.choice(random_transitions_space, 1)\n\n random_episodes.append(episode[random_start:random_start + self.unrollings_num])\n\n state_shape = tuple([self.minibatch_size, self.unrollings_num] + self.state_shape)\n\n # prepare the training data\n states = np.empty(state_shape, dtype=np.float32)\n next_states = np.empty(state_shape, dtype=np.float32)\n rewards = np.empty((self.minibatch_size, self.unrollings_num, ), dtype=np.float32)\n transition_action_filters = np.zeros((self.minibatch_size, self.unrollings_num, self.actions_count), dtype=np.float32)\n next_legal_actions_filters = np.zeros((self.minibatch_size, self.unrollings_num, self.actions_count), dtype=np.float32)\n\n for i, episode in enumerate(random_episodes):\n for j, transition in enumerate(episode):\n state, action, reward, nextstate, next_legal_actions = transition\n\n states[i,j], rewards[i,j], next_states[i,j] = state, reward, nextstate\n transition_action_filters[i,j][action] = 1.0\n next_legal_actions_filters[i,j][next_legal_actions] = 1.0\n\n self.prediction_nn.clearLSTMS(self.session)\n self.target_nn.clearLSTMS(self.session)\n\n loss,_ = self.session.run([self.loss, self.finalize], {\n self.states: states,\n self.next_states: next_states,\n self.rewards: np.reshape(rewards, (self.minibatch_size * self.unrollings_num, )),\n self.transition_action_filters: np.reshape(transition_action_filters, (self.minibatch_size * self.unrollings_num, self.actions_count)),\n self.next_legal_actions_filters: np.reshape(next_legal_actions_filters, (self.minibatch_size * self.unrollings_num, self.actions_count))\n })\n\n if self.iteration != 0 and self.iteration % self.freeze_period == 0:\n self.target_nn.assign_to(self.prediction_nn, self.session)\n\n self.iteration += 1\n\n return loss, self.iteration", "def train(self,\n num_episodes = 100,\n num_steps = 500000,\n max_steps_per_episode = 10000,\n target_interval = 10000,\n learning_interval = 4,\n frame_skip = 1,\n warmup_steps = None,\n pretrain_steps = None,\n output_freq = 50,\n save_freq = 5, \n store_memory = False):\n \n # prefill memory with random transitions if requested\n if warmup_steps is not None:\n self._random_warmup(warmup_steps)\n \n # pretrain the agent on its on own memory\n if pretrain_steps is not None:\n self._pretrain(pretrain_steps, target_interval)\n \n # logging initialization\n self._score, self._q_values, self._losses = 0., [], []\n raw_frames = np.zeros(shape = (max_steps_per_episode, *self.env._unprocessed_frame.shape), dtype = np.uint8)\n\n episode_idx = 0\n while episode_idx < num_episodes or self._step_counter < num_steps:\n # reset environment and get first state\n self._start_episode()\n \n for i in range(max_steps_per_episode):\n \n #-------------------------------------------------------------------------------#\n #####################\n # Interactive Phase #\n #####################\n \n # choose an action, observe reactions of the environment and\n # add this experience to the agent's memory \n if self._step_counter % frame_skip == 0: \n action = self._make_decision()\n new_frame, reward, done, _ = self.env.step(action)\n self.memory.add_experience(action, reward, new_frame, 1, done)\n \n # update current state\n self._current_state[0, :(self.num_stacked_frames-1)] = self._current_state[0, 1:]\n self._current_state[0, self.num_stacked_frames-1] = new_frame\n #-------------------------------------------------------------------------------#\n \n \n #-------------------------------------------------------------------------------#\n ##################\n # Learning Phase #\n ##################\n \n # perform a parameter update of the current policy model\n if self._step_counter % learning_interval == 0:\n self._batch_update()\n \n # update the target model\n if self._step_counter % target_interval == 0:\n self._update_target_model()\n #-------------------------------------------------------------------------------#\n \n # logging\n self._score += self.env._unprocessed_reward\n raw_frames[i] = self.env._unprocessed_frame\n \n \n self._step_counter += 1\n \n if self.env.was_real_done:\n self.logger.add_episode_logs(self._step_counter, self._score, self._q_values, self._losses, raw_frames[:i])\n self._score, self._q_values, self._losses = 0., [], []\n break\n \n if done:\n self.env.reset()\n \n \n if not self.env.was_real_done:\n self.memory.add_experience(action, reward, new_frame, 1, True)\n self.logger.add_episode_logs(self._step_counter, self._score, self._q_values, self._losses, raw_frames[:i])\n self._score, self._q_values, self._losses = 0., [], []\n \n if episode_idx%(num_episodes/output_freq)==0:\n validation_score, validation_frames = self.test(record = True, max_steps_per_episode = max_steps_per_episode)\n #validation_score, validation_frames = 0, []\n lower_idx = int(clip(episode_idx-(num_episodes/output_freq)+1, 0, num_episodes-1))\n self.logger.show_progress(lower_idx, episode_idx, validation_score, validation_frames, self.policy_network.model)\n \n if episode_idx%(num_episodes/save_freq)==0:\n self.logger.make_plots()\n self.logger.save_all(self.policy_network.model, self.memory, store_memory)\n \n \n\n episode_idx += 1 \n print('==========================\\ntraining session completed\\n==========================\\n\\n\\n=======\\nSummary\\n======='\n )\n self.logger.show_progress(0, num_episodes, summary = True)\n self.logger.make_plots()\n self.logger.save_all(self.policy_network.model, self.memory, store_memory)", "def train(self, n_episodes):\n for episode in trange(n_episodes):\n policy_loss, entropy, episode_reward = self.train_step()\n self.writer.add_scalar('policy_loss', policy_loss, episode)\n self.writer.add_scalar('entropy', entropy, episode)\n self.writer.add_scalar('episode_reward', episode_reward, episode)", "def fit(self, env, env_eval, num_iterations, max_episode_length=None):\n train_counter = 0;\n eval_res_hist = np.zeros((1,3));\n\n time_this, ob_this, is_terminal = env.reset()\n\n ob_this = self._preprocessor.process_observation(time_this, ob_this)\n\n setpoint_this = ob_this[6:8]\n \n this_ep_length = 0;\n flag_print_1 = True;\n flag_print_2 = True;\n action_counter = 0;\n \n for step in range(num_iterations):\n #Check which stage is the agent at. If at the collecting stage,\n #then the actions will be random action.\n if step <= self._num_burn_in:\n if flag_print_1:\n logging.info (\"Collecting samples to fill the replay memory...\");\n flag_print_1 = False;\n\n action_mem = self.select_action(None, stage = 'collecting');\n action = self._policy.process_action(setpoint_this, action_mem)\n\n else:\n if flag_print_2:\n logging.info (\"Start training process...\");\n flag_print_2 = False;\n\n obs_this_net = self._preprocessor.process_observation_for_network(\n ob_this, self._min_array, self._max_array)\n \n state_this_net = np.append(obs_this_net[0:13], obs_this_net[14:]).reshape(1,16)\n\n action_mem = self.select_action(state_this_net, stage = 'training')\n # covert command to setpoint action \n action = self._policy.process_action(setpoint_this, action_mem) \n\n action_counter = action_counter + 1 if action_counter < 4 else 1;\n\n time_next, ob_next, is_terminal = env.step(action)\n ob_next = self._preprocessor.process_observation(time_next, ob_next)\n \n setpoint_next = ob_next[6:8]\n \n #check if exceed the max_episode_length\n if max_episode_length != None and \\\n this_ep_length >= max_episode_length:\n is_terminal = True;\n\n #save sample into memory \n self._memory.append(Sample(ob_this, action_mem, ob_next\n , is_terminal))\n\n \n #Check which stage is the agent at. If at the training stage,\n #then do the training\n if step > self._num_burn_in:\n #Check the train frequency\n if action_counter % self._train_freq == 0 \\\n and action_counter > 0:\n action_counter = 0;\n #Eval the model\n if train_counter % self._eval_freq == 0:\n eval_res = self.evaluate(env_eval, self._eval_epi_num\n , show_detail = True);\n eval_res_hist = np.append(eval_res_hist\n , np.array([step\n , eval_res[0], eval_res[1]]).reshape(1, 3)\n , axis = 0);\n np.savetxt(self._log_dir + '/eval_res_hist.csv'\n , eval_res_hist, delimiter = ',');\n logging.info ('Global Step: %d, '%(step), 'evaluation average \\\n reward is %0.04f, average episode length is %d.'\\\n %eval_res);\n \n \n #Sample from the replay memory\n samples = self._preprocessor.process_batch(\n self._memory.sample(self._batch_size), \n self._min_array, self._max_array);\n #Construct target values, one for each of the sample \n #in the minibatch\n samples_x = None;\n targets = None;\n for sample in samples:\n sample_s = np.append(sample.obs[0:13], sample.obs[14:]).reshape(1,16)\n sample_s_nex = np.append(sample.obs_nex[0:13], \n sample.obs_nex[14:]).reshape(1,16)\n sample_r = self._preprocessor.process_reward(sample.obs_nex[12:15])\n\n target = self.calc_q_values(sample_s);\n a_max = self.select_action(sample_s_nex, stage = 'greedy');\n \n \n\n if sample.is_terminal:\n target[0, sample.a] = sample_r;\n else:\n target[0, sample.a] = (sample_r\n + self._gamma \n * self.calc_q_values_1(\n sample_s_nex)[0, a_max]);\n if targets is None:\n targets = target;\n else:\n targets = np.append(targets, target, axis = 0);\n if samples_x is None:\n samples_x = sample_s;\n else:\n samples_x = np.append(samples_x, sample_s, axis = 0);\n #Run the training\n \n \n feed_dict = {self._state_placeholder:samples_x\n ,self._q_placeholder:targets}\n sess_res = self._sess.run([self._train_op, self._loss]\n , feed_dict = feed_dict);\n \n #Update the target parameters\n if train_counter % self._target_update_freq == 0:\n self.update_policy();\n logging.info('Global Step %d: update target network.' \n %(step));\n #Save the parameters\n if train_counter % self._save_freq == 0 or step + 1 == num_iterations:\n checkpoint_file = os.path.join(self._log_dir\n , 'model_data/model.ckpt');\n self._saver.save(self._sess\n , checkpoint_file, global_step=step);\n \n if train_counter % 100 == 0:\n logging.info (\"Global Step %d: loss %0.04f\"%(step, sess_res[1]));\n # Update the events file.\n summary_str = self._sess.run(self._summary, feed_dict=feed_dict)\n self._summary_writer.add_summary(summary_str, train_counter);\n self._summary_writer.add_graph(self._sess.graph);\n self._summary_writer.flush()\n \n train_counter += 1;\n \n #check whether to start a new episode\n if is_terminal:\n time_this, ob_this, is_terminal = env.reset()\n ob_this = self._preprocessor.process_observation(time_this, ob_this)\n setpoint_this = ob_this[6:8]\n\n this_ep_length = 0;\n action_counter = 0;\n else:\n ob_this = ob_next\n setpoint_this = setpoint_next\n time_this = time_next\n this_ep_length += 1;", "def _train_internal(self, opts):\n\n batches_num = self._data.num_points / opts['batch_size']\n train_size = self._data.num_points\n num_plot = 320\n sample_prev = np.zeros([num_plot] + list(self._data.data_shape))\n l2s = []\n\n counter = 0\n decay = 1.\n logging.error('Training VAE')\n for _epoch in xrange(opts[\"gan_epoch_num\"]):\n\n if opts['decay_schedule'] == \"manual\":\n if _epoch == 30:\n decay = decay / 2.\n if _epoch == 50:\n decay = decay / 5.\n if _epoch == 100:\n decay = decay / 10.\n\n if _epoch > 0 and _epoch % opts['save_every_epoch'] == 0:\n os.path.join(opts['work_dir'], opts['ckpt_dir'])\n self._saver.save(self._session,\n os.path.join(opts['work_dir'],\n opts['ckpt_dir'],\n 'trained-pot'),\n global_step=counter)\n\n for _idx in xrange(batches_num):\n # logging.error('Step %d of %d' % (_idx, batches_num ) )\n data_ids = np.random.choice(train_size, opts['batch_size'],\n replace=False, p=self._data_weights)\n batch_images = self._data.data[data_ids].astype(np.float)\n batch_noise = utils.generate_noise(opts, opts['batch_size'])\n _, loss, loss_kl, loss_reconstruct = self._session.run(\n [self._optim, self._loss, self._loss_kl,\n self._loss_reconstruct],\n feed_dict={self._real_points_ph: batch_images,\n self._noise_ph: batch_noise,\n self._lr_decay_ph: decay,\n self._is_training_ph: True})\n counter += 1\n\n if opts['verbose'] and counter % opts['plot_every'] == 0:\n debug_str = 'Epoch: %d/%d, batch:%d/%d' % (\n _epoch+1, opts['gan_epoch_num'], _idx+1, batches_num)\n debug_str += ' [L=%.2g, Recon=%.2g, KLQ=%.2g]' % (\n loss, loss_reconstruct, loss_kl)\n logging.error(debug_str)\n\n if opts['verbose'] and counter % opts['plot_every'] == 0:\n metrics = Metrics()\n points_to_plot = self._run_batch(\n opts, self._generated, self._noise_ph,\n self._noise_for_plots[0:num_plot],\n self._is_training_ph, False)\n l2s.append(np.sum((points_to_plot - sample_prev)**2))\n metrics.l2s = l2s[:]\n metrics.make_plots(\n opts,\n counter,\n None,\n points_to_plot,\n prefix='sample_e%04d_mb%05d_' % (_epoch, _idx))\n reconstructed = self._session.run(\n self._reconstruct_x,\n feed_dict={self._real_points_ph: batch_images,\n self._is_training_ph: False})\n metrics.l2s = None\n metrics.make_plots(\n opts,\n counter,\n None,\n reconstructed,\n prefix='reconstr_e%04d_mb%05d_' % (_epoch, _idx))\n if opts['early_stop'] > 0 and counter > opts['early_stop']:\n break\n if _epoch > 0:\n os.path.join(opts['work_dir'], opts['ckpt_dir'])\n self._saver.save(self._session,\n os.path.join(opts['work_dir'],\n opts['ckpt_dir'],\n 'trained-pot-final'),\n global_step=counter)", "def _train_step(self):\n if self._replay.add_count > self.min_replay_history:\n if self.training_steps % self.update_period == 0:\n self._sample_from_replay_buffer()\n\n if self._replay_scheme == 'prioritized':\n # The original prioritized experience replay uses a linear exponent\n # schedule 0.4 -> 1.0. Comparing the schedule to a fixed exponent of\n # 0.5 on 5 games (Asterix, Pong, Q*Bert, Seaquest, Space Invaders)\n # suggested a fixed exponent actually performs better, except on Pong.\n probs = self.replay_elements['sampling_probabilities']\n loss_weights = 1.0 / jnp.sqrt(probs + 1e-10)\n loss_weights /= jnp.max(loss_weights)\n else:\n loss_weights = jnp.ones(self.replay_elements['state'].shape[0])\n\n self._rng, self.optimizer, loss, mean_loss= train(\n self.network_def,\n self.target_network_params,\n self.optimizer,\n self.replay_elements['state'],\n self.replay_elements['action'],\n self.replay_elements['next_state'],\n self.replay_elements['reward'],\n self.replay_elements['terminal'],\n loss_weights,\n self._target_opt,\n self.num_tau_samples,\n self.num_tau_prime_samples,\n self.num_quantile_samples,\n self.cumulative_gamma,\n self.double_dqn,\n self.kappa,\n self._tau,\n self._alpha,\n self._clip_value_min,\n self._num_actions,\n self._rng)\n\n if self._replay_scheme == 'prioritized':\n # Rainbow and prioritized replay are parametrized by an exponent\n # alpha, but in both cases it is set to 0.5 - for simplicity's sake we\n # leave it as is here, using the more direct sqrt(). Taking the square\n # root \"makes sense\", as we are dealing with a squared loss. Add a\n # small nonzero value to the loss to avoid 0 priority items. While\n # technically this may be okay, setting all items to 0 priority will\n # cause troubles, and also result in 1.0 / 0.0 = NaN correction terms.\n self._replay.set_priority(self.replay_elements['indices'],\n jnp.sqrt(loss + 1e-10))\n\n\n if (self.summary_writer is not None and\n self.training_steps > 0 and\n self.training_steps % self.summary_writing_frequency == 0):\n summary = tf.compat.v1.Summary(value=[\n tf.compat.v1.Summary.Value(tag='ImplicitLoss',\n simple_value=mean_loss)])\n self.summary_writer.add_summary(summary, self.training_steps)\n if self.training_steps % self.target_update_period == 0:\n self._sync_weights()\n\n self.training_steps += 1", "def train(self, env):\n\n\t\tmin_average_reward_for_stopping = 195\n\t\tconsecutive_successful_episodes_to_stop = 10\n\t\tlast_10_rewards = deque(maxlen=consecutive_successful_episodes_to_stop)\n\n\t\tnum_Episodes = []\n\t\tEpisode_Rewards = []\n\n\t\tfor episode in range(self.episodes):\n\t\t\tstate = env.reset()\n\t\t\tstate = np.reshape(state, [1, self.state_size])\n\t\t\tdone = False\n\t\t\ttotal_reward = 0\n\n\t\t\twhile not done:\n\t\t\t\taction = self.act(state)\n\t\t\t\tnext_state, reward, done, _ = env.step(action)\n\t\t\t\tnext_state = np.reshape(next_state, [1, self.state_size])\n\t\t\t\tself.remember(state, action, reward, next_state, done)\n\t\t\t\tstate = next_state\n\t\t\t\ttotal_reward += reward\n\n\t\t\tnum_Episodes.append(episode)\n\t\t\tEpisode_Rewards.append(total_reward)\n\t\t\tlast_10_rewards.append(total_reward)\n\t\t\tlast_10_avg_reward = np.mean(last_10_rewards)\n\t\t\tprint(episode, last_10_avg_reward)\n\n\t\t\t# call experience relay\n\t\t\tif len(self.memory) >= self.batch_size:\n\t\t\t\tself.replay(self.batch_size)\n\t\t\t# Stopping criteria\n\t\t\tif len(\n\t\t\t\t\tlast_10_rewards) == consecutive_successful_episodes_to_stop \\\n\t\t\t\t\tand last_10_avg_reward > min_average_reward_for_stopping:\n\t\t\t\tprint(\"Solved after {} epsiodes\".format(episode))\n\t\t\t\tbreak", "def fit(self, X_train: np.ndarray, y_train: np.ndarray, epochs: int, batch_size: int, eval_step: int, log_step: int,\n collect_steps_per_episode: int) -> None:\n\n self.dataset = self.replay_buffer.as_dataset(\n num_parallel_calls=3,\n sample_batch_size=batch_size,\n num_steps=2).prefetch(3)\n\n self.iterator = iter(self.dataset)\n\n def collect_step(environment, policy, buffer):\n time_step = environment.current_time_step()\n action_step = policy.action(time_step)\n next_time_step = environment.step(action_step.action)\n traj = trajectory.from_transition(time_step, action_step, next_time_step)\n\n # Add trajectory to the replay buffer\n buffer.add_batch(traj)\n\n def collect_data(env, policy, buffer, steps):\n for _ in range(steps):\n collect_step(env, policy, buffer)\n\n # (Optional) Optimize by wrapping some of the code in a graph using TF function.\n self.agent.train = common.function(self.agent.train)\n\n # Reset the train step\n self.agent.train_step_counter.assign(0)\n\n for _ in range(epochs):\n #print(\"epoch: \", _)\n # Collect a few steps using collect_policy and save to the replay buffer.\n collect_data(self.train_env, self.agent.collect_policy, self.replay_buffer, collect_steps_per_episode)\n\n # Sample a batch of data from the buffer and update the agent's network.\n experience, _ = next(self.iterator)\n train_loss = self.agent.train(experience).loss\n\n step = self.agent.train_step_counter.numpy()\n\n if step % log_step == 0:\n print('step = {0}: loss = {1}'.format(step, train_loss))\n\n if step % eval_step == 0:\n metrics = self.compute_metrics(X_train, y_train)\n print(metrics)", "def train_episode(self, max_episode_length):\n\n # Populate the buffer\n self.populate_buffer(max_episode_length)\n\n # weight updates\n replay_samples = self.replay_buffer.sample(self.buffer_sample_size)\n state_batch = torch.from_numpy(replay_samples[0]).to(DEVICE)\n action_batch = torch.from_numpy(replay_samples[1]).to(DEVICE)\n reward_batch = (\n torch.from_numpy(replay_samples[2]).to(DEVICE).unsqueeze(1)\n )\n next_state_batch = torch.from_numpy(replay_samples[3]).to(DEVICE)\n dones = (\n torch.from_numpy(replay_samples[4])\n .type(torch.long)\n .to(DEVICE)\n .unsqueeze(1)\n )\n\n # alpha must be clamped with a minumum of zero, so use exponential.\n alpha = self.log_alpha.exp().detach()\n\n with torch.no_grad():\n # Figure out value function\n next_actions, log_next_actions, _ = self.policy.sample(\n next_state_batch\n )\n target_q1, target_q2 = self.avg_q_net(\n next_state_batch, next_actions\n )\n target_q = torch.min(target_q1, target_q2)\n next_state_values = target_q - alpha * log_next_actions\n\n # Calculate Q network target\n done_floats = dones.type(torch.float)\n q_target = reward_batch.clone()\n q_target += self.gamma * done_floats * next_state_values\n\n # Q net outputs values for all actions, so we index specific actions\n q1, q2 = self.q_net(state_batch, action_batch)\n q1_loss = F.mse_loss(q1, q_target)\n q2_loss = F.mse_loss(q2, q_target)\n\n # policy loss\n actions_pi, log_probs_pi, action_dist = self.policy.sample(state_batch)\n q1_pi, q2_pi = self.q_net(state_batch, actions_pi)\n q_pi = torch.min(q1_pi, q2_pi)\n policy_loss = ((alpha * log_probs_pi) - q_pi).mean()\n\n # update parameters\n self.q_optim.zero_grad()\n q1_loss.backward()\n self.q_optim.step()\n\n self.q_optim.zero_grad()\n q2_loss.backward()\n self.q_optim.step()\n\n self.policy_optim.zero_grad()\n policy_loss.backward()\n self.policy_optim.step()\n\n # automatic entropy tuning\n alpha_loss = (\n self.log_alpha * (log_probs_pi + self.entropy_target).detach()\n )\n alpha_loss = -alpha_loss.mean()\n\n if self.entropy_tuning:\n self.alpha_optim.zero_grad()\n alpha_loss.backward()\n self.alpha_optim.step()\n\n # Step average Q net\n move_average(self.q_net, self.avg_q_net, self.tau)\n\n # logging\n self.tbx_logger(\n {\n \"loss/q1 loss\": q1_loss.item(),\n \"loss/q2 loss\": q2_loss.item(),\n \"loss/pi loss\": policy_loss.item(),\n \"loss/alpha loss\": alpha_loss.item(),\n \"Q/avg_q_target\": q_target.mean().item(),\n \"Q/avg_q1\": q1.mean().item(),\n \"Q/avg_q2\": q2.mean().item(),\n \"Q/avg_reward\": reward_batch.mean().item(),\n \"Q/avg_V\": next_state_values.mean().item(),\n \"H/alpha\": alpha.item(),\n \"H/pi_entropy\": action_dist.entropy().mean(),\n \"H/pi_log_pi\": log_probs_pi.mean(),\n },\n self.training_i,\n )\n\n self.training_i += 1\n self.checkpointer.increment_counter()", "def learn(self):\n epochswin = [] # count the number of wins at every epoch of the network against the preceding version\n epochdraw = [] # count the number of draws at every epoch of the network against the preceding version\n epochswingreedy = [] # count the number of wins against greedy at every epoch\n epochswinrandom = [] # count the number of wins against random at every epoch\n epochsdrawgreedy = [] # count the number of draws against greedy at every epoch\n epochsdrawrandom = [] # count the number of wins against random at every epoch\n epochswinminmax = [] # count the number of wins against minmax at every epoch\n epochsdrawminmax = [] # count the number of draws against minmax at every epoch\n\n\n if self.args.load_model == True:\n file = open(self.args.trainExampleCheckpoint + \"graphwins:iter\" + str(self.args.numIters) + \":eps\" + str(\n self.args.numEps) + \":dim\" + str(self.game.n) + \".txt\", \"r+\")\n lines = file.readlines()\n for index, line in enumerate(lines):\n for word in line.split():\n if index == 0:\n epochswin.append(word)\n elif index == 1:\n epochdraw.append(word)\n file.close()\n\n file = open(self.args.trainExampleCheckpoint + \"graphwins:iter\" + str(self.args.numIters) + \":eps\" + str(\n self.args.numEps) + \":dim\" + str(self.game.n) + \":greedyrandom.txt\", \"r+\")\n lines = file.readlines()\n for index, line in enumerate(lines):\n for word in line.split():\n if index == 0:\n epochswingreedy.append(word)\n elif index == 1:\n epochsdrawgreedy.append(word)\n elif index == 2:\n epochswinrandom.append(word)\n elif index == 3:\n epochsdrawrandom.append(word)\n elif index == 4:\n epochswinminmax.append(word)\n elif index == 5:\n epochsdrawminmax.append(word)\n file.close()\n self.loadTrainExamples()\n\n for i in range(1, self.args.numIters + 1):\n # bookkeeping\n print('------ITER ' + str(i) + '------')\n # examples of the iteration\n iterationTrainExamples = deque([], maxlen=self.args.maxlenOfQueue)\n\n eps_time = AverageMeter()\n bar = Bar('Self Play', max=self.args.numEps)\n end = time.time()\n\n for eps in range(self.args.numEps):\n iterationTrainExamples += self.executeEpisode()\n\n # bookkeeping + plot progress\n eps_time.update(time.time() - end)\n end = time.time()\n bar.suffix = '({eps}/{maxeps}) Eps Time: {et:.3f}s | Total: {total:} | ETA: {eta:}'.format(eps=eps + 1,\n maxeps=self.args.numEps,\n et=eps_time.avg,\n total=bar.elapsed_td,\n eta=bar.eta_td)\n bar.next()\n bar.finish()\n\n # save the iteration examples to the history\n self.trainExamplesHistory.append(iterationTrainExamples)\n\n if len(self.trainExamplesHistory) > self.args.numItersForTrainExamplesHistory:\n print(\"len(trainExamplesHistory) =\", len(self.trainExamplesHistory),\n \" => remove the oldest trainExamples\")\n self.trainExamplesHistory.pop(0)\n # backup history to a file\n # NB! the examples were collected using the model from the previous iteration, so (i-1)\n self.saveTrainExamples(i - 1)\n\n # shuffle examlpes before training\n trainExamples = []\n for e in self.trainExamplesHistory:\n trainExamples.extend(e)\n shuffle(trainExamples)\n\n # training new network, keeping a copy of the old one\n\n filename = \"curent\"+str(i)+\"temp:iter\" + str(self.args.numIters) + \":eps\" + str(self.args.numEps) + \":dim\" + str(\n self.game.n) + \".pth.tar\"\n filenameBest = \"best\" + str(self.args.numIters) + \":eps\" + str(self.args.numEps) + \":dim\" + str(\n self.game.n) + \".pth.tar\"\n\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename=filename)\n exists = os.path.isfile(filenameBest)\n if exists:\n self.pnet.load_checkpoint(folder=self.args.checkpoint, filename=filenameBest)\n else:\n self.pnet.load_checkpoint(folder=self.args.checkpoint, filename=filename)\n pmcts = MCTS(self.game, self.pnet, self.args)\n\n self.nnet.train(trainExamples)\n nmcts = MCTS(self.game, self.nnet, self.args)\n\n print('PITTING AGAINST PREVIOUS VERSION')\n arena = Arena(lambda x: np.argmax(pmcts.getActionProb(x, temp=0)),\n lambda x: np.argmax(nmcts.getActionProb(x, temp=0)), self.game,nmcts,pmcts,evaluate=True)\n\n pwins, nwins, draws = arena.playGames(self.args.arenaCompare, False)\n\n pmcts.clear()\n nmcts.clear()\n del pmcts\n del nmcts\n\n print(' ')\n print('NEW/PREV WINS : %d / %d ; DRAWS : %d' % (nwins, pwins, draws))\n if i == 1:\n epochswin.append(pwins)\n epochdraw.append(0)\n\n epochswin.append(nwins)\n epochdraw.append(draws)\n self.writeLogsToFile(epochswin, epochdraw)\n\n ''' Get all the players and then pit them against the network. You need to modify here if you implement \n more players\n '''\n (gp, rp, mp) = self.decidePlayers()\n\n if self.args.parallel == 0:\n\n\n nmcts1 = MCTS(self.game, self.nnet, self.args)\n nmcts2 = MCTS(self.game, self.nnet, self.args)\n nmcts3 = MCTS(self.game, self.nnet, self.args)\n\n arenagreedy = Arena(lambda x: np.argmax(nmcts1.getActionProb(x, temp=0)), gp, self.game,nmcts1)\n arenarandom = Arena(lambda x: np.argmax(nmcts2.getActionProb(x, temp=0)), rp, self.game,nmcts2)\n arenaminmax = Arena(lambda x: np.argmax(nmcts3.getActionProb(x, temp=0)), mp, self.game,nmcts3,evaluate=True)\n\n pwinsminmax, nwinsminmax, drawsminmax = arenaminmax.playGames(self.args.arenaCompare)\n print(\"minmax - \"+str(pwinsminmax)+\" \"+str(nwinsminmax)+\" \"+str(drawsminmax))\n pwinsgreedy, nwinsgreedy, drawsgreedy = arenagreedy.playGames(self.args.arenaCompare)\n print(\"greedy - \"+str(pwinsgreedy)+\" \"+str(nwinsgreedy)+\" \"+str(drawsgreedy))\n pwinsreandom, nwinsrandom, drawsrandom = arenarandom.playGames(self.args.arenaCompare)\n print(\"random - \"+str(pwinsreandom)+\" \"+str(nwinsrandom)+\" \"+str(drawsrandom))\n\n nmcts1.clear()\n nmcts2.clear()\n nmcts3.clear()\n del nmcts1\n del nmcts2\n del nmcts3\n\n else:\n '''\n This will be used if you want to evaluate the network against the benchmarks in a parallel way\n '''\n\n self.args.update({'index': str(i)})\n\n p = self.parallel(self.args.arenaCompare)\n (pwinsminmax, nwinsminmax, drawsminmax) = p[0] # self.parallel(\"minmax\", self.args.arenaCompare)\n (pwinsgreedy, nwinsgreedy, drawsgreedy) = p[1] # self.parallel(\"greedy\",self.args.arenaCompare)\n (pwinsreandom, nwinsrandom, drawsrandom) = p[2] # self.parallel(\"random\",self.args.arenaCompare)\n\n epochsdrawgreedy.append(drawsgreedy)\n epochsdrawrandom.append(drawsrandom)\n epochswinrandom.append(pwinsreandom)\n epochswingreedy.append(pwinsgreedy)\n epochswinminmax.append(pwinsminmax)\n epochsdrawminmax.append(drawsminmax)\n\n self.writeLogsToFile(epochswingreedy, epochsdrawgreedy, epochswinrandom, epochsdrawrandom, epochswinminmax,\n epochsdrawminmax, training=False)\n\n if pwins + nwins == 0 or float(nwins) / (pwins + nwins) <= self.args.updateThreshold:\n print('REJECTING NEW MODEL')\n filename = \"curent\"+str(i)+\"temp:iter\" + str(self.args.numIters) + \":eps\" + str(self.args.numEps) + \":dim\" + str(\n self.game.n) + \".pth.tar\"\n filenameBest = \"best\" + str(self.args.numIters) + \":eps\" + str(self.args.numEps) + \":dim\" + str(\n self.game.n) + \".pth.tar\"\n exists = os.path.isfile(filenameBest)\n if exists:\n self.nnet.load_checkpoint(folder=self.args.checkpoint, filename=filenameBest)\n else:\n self.nnet.load_checkpoint(folder=self.args.checkpoint, filename=filename)\n\n else:\n print('ACCEPTING NEW MODEL')\n\n filename = \"best\" + str(self.args.numIters) + \":eps\" + str(self.args.numEps) + \":dim\" + str(\n self.game.n) + \".pth.tar\"\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename=self.getCheckpointFile(i))\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename=filename)\n self.mcts.clear()\n del self.mcts\n self.mcts = MCTS(self.game, self.nnet, self.args, mcts=True) # reset search tree\n print(self.tracker.print_diff())\n self.writeLogsToFile(epochswin, epochdraw, training=True)", "def train(self, gamma = GAMMA, learningRate = LR, eps = EPS, epsDecayInterval = EPS_DECAY_INTERVAL, epsDecayRate = EPS_DECAY_RATE, minEps = MIN_EPS, epNum = NUM_EPISODES, epStart = 0, trainingStart = TRAINING_START, experienceSize = EXPERIENCE_SIZE, minibatchSize = MINIBATCH_SIZE, adversary = None, checkpointFolder = CHECKPOINT_FOLDER, checkpointInterval = CHECKPOINT_INTERVAL, printInterval = PRINT_INTERVAL):\n\n allActions = np.asarray(range(self.env.action_space.n))\n saver = tf.train.Saver()\n experience = deque([], experienceSize)\n\n episodeLengths = []\n episodeLengthsSeconds = []\n episodeRewards = []\n attacksNumbers = []\n losses = []\n\n trainingStart = epStart + trainingStart\n for i in range(epStart, epNum):\n s = utils.preprocess(self.env.reset())\n frames = np.expand_dims(np.repeat(s, 4, 2), 0)\n done = False\n episodeLength = 0\n episodeReward = 0.0\n attNum = 0\n\n episodeStartTime = time()\n while not done:\n actionScores, actionProbs = self.sess.run([self.logits, self.probs], feed_dict={self.inputs:frames})\n a = np.random.choice(allActions, p=utils.epsGreedyProbs(actionScores[0], eps))\n self._attack(adversary, frames, actionProbs)\n\n for j in range(self.frameSkip):\n sj, r, done, _ = self.env.step(a)\n sj = utils.preprocess(sj)\n episodeLength += 1\n episodeReward += r\n\n framesJ = utils.pushframe(frames, sj)\n experience.append((frames, a, r, framesJ, done))\n frames = framesJ\n\n if i > trainingStart:\n # actionScoresJ = sess.run(outQ, feed_dict={self.inputs:framesJ})\n startStates, actions, rewards, endStates, dones = getRandomMinibatch(experience, minibatchSize)\n\n actionScoresSS = self.sess.run(self.logits, feed_dict={self.inputs:startStates})\n actionScoresES = self.sess.run(self.logits, feed_dict={self.inputs:endStates})\n targets = computeMinibatchTargets(actions, rewards, dones, gamma, actionScoresSS, actionScoresES)\n los = self.sess.run([self.loss, self.update], feed_dict={self.inputs:startStates, self.target:targets})[0]\n losses.append(los)\n\n episodeEndTime = time()\n episodeLengths.append(episodeLength)\n episodeLengthsSeconds.append(episodeEndTime-episodeStartTime)\n episodeRewards.append(episodeReward)\n attacksNumbers.append(attNum)\n\n if eps > minEps and ((i+1) % epsDecayInterval) == 0:\n eps = eps * epsDecayRate\n print(\"eps decayed to \" + str(eps) + \" in episode \" + str(i + 1) + \" (\" + str(sum(episodeLengths)) + \"'th timestamp)\")\n\n if (i + 1) % checkpointInterval == 0:\n saver.save(self.sess, checkpointFolder + \"dqn_episode\" + str(i + 1) + \".ckpt\")\n print(\"Saved checkpoint in episode \" + str(i + 1) + \" with reward = \" + str(episodeRewards[-1]))\n\n if (i + 1) % printInterval == 0:\n print(str(i + 1) + \" / \" + str(epNum) + \" length = \" + str(np.mean(episodeLengths[-10:])) + \" (\" + str(np.mean(episodeLengthsSeconds[-10:])) + \"s) reward = \" + str(np.mean(episodeRewards[-10:])) + \" loss = \" + str(losses[-1]))\n if self.goalReached(episodeRewards):\n print(\"Finished training after \" + str(i + 1) + \" episodes. Goal achieved.\")\n break\n\n saver.save(self.sess, checkpointFolder + \"dqn_final.ckpt\")\n print(\"Finished training. Saved final checkpoint.\")\n return episodeLengths, episodeRewards, attacksNumbers, losses", "def training(self, dataset, repeat=1, gamma=1.0, learning_rate=0.1, model='3yo'):\n for _ in range(repeat):\n for episode in dataset:\n # 1- Get the data stored inside the dataset\n image_index = episode[0] # image of the object\n label_index = episode[1] # label given by the informant\n informant_index = episode[2] # a integer representing the informant\n informant_action = episode[3] # 0=reject, 1=accept\n\n # 2- The agent take an action (with softmax) considering is current state-action table\n # [0=cup, 1=book, 2=ball]\n col = (image_index * self.tot_images) + label_index\n action_array = self.actor_matrix[:, col]\n action_distribution = self._softmax(action_array)\n child_action = np.random.choice(self.tot_actions,\n 1,\n p=action_distribution) # select the action through softmax\n\n # 3- (External) New state and reward obtained from the environment\n # u_t = self.critic_vector[0, col] # previous state\n # New state is estimated, in this simple case nothing happen\n # because the next state is terminal\n # u_t1 = u_t # Only in this example they are the same\n\n # 4- (Intrinsic) The informant_reputation is updated:\n # agent_action, agent_confidence, informant_action, reward\n # informant_vector: 0=unreliable, 1=reliable\n # do_actions_agree: False, True\n # Estimating child_confidence\n distance = np.absolute(action_distribution[0] - action_distribution[1])\n child_confidence_distribution = [1 - distance, distance] # non-knowledgeable, knowledgeable\n child_confidence = np.random.choice(2, 1, p=child_confidence_distribution)\n # Check if child and informant agree\n if (child_action == informant_action):\n do_actions_agree = True\n else:\n do_actions_agree = False\n # Increment the counter in the informant_vector.\n # Here we update the counter distribtuion only if\n # the child is confident, because it is only in that\n # case that the child can say if the informant is\n # reliable or not.\n if (do_actions_agree == False and child_confidence == 1):\n self.informant_vector[informant_index][0] += 1 # unreliable\n elif (do_actions_agree == True and child_confidence == 1):\n self.informant_vector[informant_index][1] += 1 # reliable\n elif (do_actions_agree == False and child_confidence == 0):\n self.informant_vector[informant_index][1] += 0 # reliable\n self.informant_vector[informant_index][0] += 0 # unreliable\n elif (do_actions_agree == True and child_confidence == 0):\n self.informant_vector[informant_index][1] += 0 # reliable\n self.informant_vector[informant_index][0] += 0 # unreliable\n else:\n raise ValueError(\"ERROR: anomaly in the IF condition for informant_vector update\")\n # Using the informant_vector given as input it estimates the reputation of the informant\n informant_reputation_distribution = np.true_divide(self.informant_vector[informant_index],\n np.sum(self.informant_vector[informant_index]))\n informant_reputation = np.random.choice(2, 1, p=informant_reputation_distribution)\n\n # 5- (Intrinsic) The Cost is estimated:\n # current_state, agent_action, agent_confidence, informant_action, informant_reputation\n # child_confidence: 0=non-knowledgeable, 1=knowledgeable\n # informant_reputation: 0=non-knowledgeable, 1=knowledgeable\n # action: 0=reject, 1=accept\n # informant_action: 0=reject, 1=accept\n cost = self._return_cost(child_confidence,\n informant_reputation,\n child_action,\n informant_action,\n value=model)\n\n # 6- The utility table is updated using: previous_state, current_state, cost, reward\n # Updating the critic using Temporal Differencing Learning\n # In this simple case there is not a u_t1 state.\n # The current state is considered terminal.\n # We can delete the term (gamma*u_t1)-u_t and considering\n # only (reward-cost) as utility of the state (see Russel Norvig).\n reward = 0 # only for intrinsic learning reward=0\n delta = (reward - cost) # + (gamma*u_t1) - u_t\n self.critic_vector[0, col] += learning_rate * delta\n\n # 7- The actor table is updated using the delta from the critic\n # Update the ACTOR using the delta\n self.actor_matrix[child_action, col] += learning_rate * delta # the current action\n self.actor_matrix[1 - child_action, col] -= learning_rate * delta # the opposite action", "def train_step(self) -> Tuple[Dict[str, Any], Dict[str, Any], bool]:\n assert self.episode_per_test is not None\n assert self.train_collector is not None\n stop_fn_flag = False\n if self.train_fn:\n self.train_fn(self.epoch, self.env_step)\n result = self.train_collector.collect(\n n_step=self.step_per_collect, n_episode=self.episode_per_collect\n )\n if result[\"n/ep\"] > 0 and self.reward_metric:\n rew = self.reward_metric(result[\"rews\"])\n result.update(rews=rew, rew=rew.mean(), rew_std=rew.std())\n self.env_step += int(result[\"n/st\"])\n self.logger.log_train_data(result, self.env_step)\n self.last_rew = result[\"rew\"] if result[\"n/ep\"] > 0 else self.last_rew\n self.last_len = result[\"len\"] if result[\"n/ep\"] > 0 else self.last_len\n data = {\n \"env_step\": str(self.env_step),\n \"rew\": f\"{self.last_rew:.2f}\",\n \"len\": str(int(self.last_len)),\n \"n/ep\": str(int(result[\"n/ep\"])),\n \"n/st\": str(int(result[\"n/st\"])),\n }\n if result[\"n/ep\"] > 0:\n if self.test_in_train and self.stop_fn and self.stop_fn(result[\"rew\"]):\n assert self.test_collector is not None\n test_result = test_episode(\n self.policy, self.test_collector, self.test_fn, self.epoch,\n self.episode_per_test, self.logger, self.env_step\n )\n if self.stop_fn(test_result[\"rew\"]):\n stop_fn_flag = True\n self.best_reward = test_result[\"rew\"]\n self.best_reward_std = test_result[\"rew_std\"]\n else:\n self.policy.train()\n\n return data, result, stop_fn_flag", "def _store_episode(self):\n # For each transition in the last episode,\n # create a set of artificial transitions\n for transition_idx, transition in enumerate(self.episode_transitions):\n\n obs_t, action, reward, obs_tp1, done, info = transition\n\n # Add to the replay buffer\n self.replay_buffer.add(obs_t, action, reward, obs_tp1, done)\n\n # We cannot sample a goal from the future in the last step of an episode\n if (transition_idx == len(self.episode_transitions) - 1 and\n self.goal_selection_strategy == GoalSelectionStrategy.FUTURE):\n break\n\n # Sampled n goals per transition, where n is `n_sampled_goal`\n # this is called k in the paper\n sampled_goals = self._sample_achieved_goals(self.episode_transitions, transition_idx)\n # For each sampled goals, store a new transition\n for goal in sampled_goals:\n # Copy transition to avoid modifying the original one\n obs, action, reward, next_obs, done, info = copy.deepcopy(transition)\n\n # Convert concatenated obs to dict, so we can update the goals\n obs_dict, next_obs_dict = map(self.env.convert_obs_to_dict, (obs, next_obs))\n\n # Update the desired goal in the transition\n obs_dict['desired_goal'] = goal\n next_obs_dict['desired_goal'] = goal\n\n # Update the reward according to the new desired goal\n reward = self.env.compute_reward(next_obs_dict['achieved_goal'], goal, info)\n # Can we use achieved_goal == desired_goal?\n done = False\n\n # Transform back to ndarrays\n obs, next_obs = map(self.env.convert_dict_to_obs, (obs_dict, next_obs_dict))\n\n # Add artificial transition to the replay buffer\n self.replay_buffer.add(obs, action, reward, next_obs, done)", "def train(env, agents, data_log, n_episodes=10000, n_steps=None, generate_val_data=False, record_env=None, trainer=None):\n # Setup logging and start code\n logger = logging.getLogger('root')\n step_tot = 0\n logger.info(env.observation_space[0].high)\n alphas = [agent.alpha for agent in trainer.agents]\n data_log.log_var(\"alphas\", alphas)\n\n ep_generator = range(n_episodes) if n_episodes else itertools.count()\n # Start training\n for i in ep_generator:\n # Do some logging\n logger.info(\"episode:\" + str(i))\n data_log.set_episode(i)\n\n # Periodically store networks\n if i % 250 == 0: #was 25\n store_networks(trainer, agents, data_log)\n\n # Run a single episode\n score, step, extra_data = run_episode(env, agents, render=False, store_data=True, trainer=trainer)\n\n # Do more logging\n logger.info(\"Score: \" + str(score))\n step_tot += step\n data_log.set_step(step_tot)\n data_log.log_var(\"score\", score)\n alphas = [agent.alpha for agent in trainer.agents]\n data_log.log_var(\"alphas\", alphas)\n\n # Break training loop\n if n_steps and step_tot > n_steps:\n break\n\n #Periodically save logs\n if i % 50 == 0: #was 5\n logger.info(\"Saving log...\")\n data_log.save()\n logger.info(\"Saved log\")\n\n # Save logs one last time\n logger.info(\"Saving log...\")\n data_log.save()\n logger.info(\"Saved log\")\n return", "def train_RL(DIM, SHIPS):\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n agent = ModelQLearning(\"Vikram\", DIM, len(SHIPS), device)\n env = Environment(DIM, SHIPS, \"Vikram\")\n batch_size = 64\n num_episodes = 100\n\n\n total_moves = 0\n\n for e in range(num_episodes):\n env.reset()\n state = env.get_state()\n inputs = []\n actions = []\n hits = []\n done = False\n for time in range(DIM*DIM):\n action = agent.move(state)\n reward, next_state = env.step(action)\n next_input, open_locations, hit, sunk, done = next_state\n if done == True:\n total_moves += len(hits)\n if e % batch_size == 0 and e != 0:\n print(\"Episodes: {}, Avg Moves: {}\".format(e,float(total_moves)/float(batch_size)))\n total_moves = 0\n\n agent.replay(inputs, actions, hits, env.total_ships_lengths)\n break\n\n inputs.append(next_input)\n actions.append(action)\n hits.append(hit)\n state = next_state\n\n if done == False:\n print(env.placement)\n print(inputs,actions, hits)\n # break", "def generate_episode(env, args, render=False, test_mode=False):\n episode = []\n state, done = env.reset(), False\n observations = transform_obs(env.get_all_observations())\n n_steps = 0\n\n for agent in env.agents: # for agents where it matters,\n agent.set_hidden_state() # set the init hidden state of the RNN\n\n while not done:\n unavailable_actions = env.get_unavailable_actions()\n \n # compute action, keep record of hidden state of the agents to store in experience\n actions, hidden, next_hidden = {}, [], []\n for idx, agent in enumerate(env.agents):\n hidden.append(agent.get_hidden_state())\n actions[agent] = agent.act(observations[idx, :], test_mode=test_mode)\n next_hidden.append(agent.get_hidden_state())\n\n if render:\n print(f\"Step {n_steps}\")\n env.render()\n print([action.name for action in actions.values()])\n\n next_state, rewards, done, _ = env.step(actions)\n next_obs = transform_obs(env.get_all_observations())\n \n # episodes that take long are not allowed and penalized for both agents\n n_steps += 1\n if n_steps > args.max_episode_length:\n done = True\n rewards = {'blue': -1, 'red': -1}\n\n actions = torch.tensor([action.id for action in actions.values()])\n unavail_actions = torch.zeros((args.n_agents, args.n_actions), dtype=torch.long)\n for idx, agent in enumerate(env.agents):\n act_ids = [act.id for act in unavailable_actions[agent]]\n unavail_actions[idx, act_ids] = 1.\n \n episode.append(Experience(transform_state(state), actions, rewards, \n transform_state(next_state), done, \n observations, torch.stack(hidden), \n next_obs, torch.stack(next_hidden),\n unavail_actions))\n \"\"\"\n episode.append(Experience(None, actions, rewards, \n None, done, \n observations, torch.stack(hidden), \n next_obs, torch.stack(next_hidden),\n unavail_actions))\n \"\"\" \n state = next_state\n observations = next_obs\n \n if render:\n print(f\"Game won by team {env.terminal(next_state)}\")\n return episode", "def train(self,env, iter_n=2000):\n\n\t\tfor i in range(iter_n):\n\t\t\tif i > 50:\n\t\t\t\tif all(reward > 195 for reward in self.step_count[-10:]):\n\t\t\t\t\tprint('solved at episode {}'.format(i))\n\t\t\t\t\tbreak\n\t\t\tstate = self.env.reset()\n\t\t\tstate = np.reshape(state, [1, self.state_size])\n\n\t\t\tepisode_complete = False\n\t\t\tstep = 0\n\t\t\twhile not episode_complete and (step < self.max_steps):\n\t\t\t\taction = self.define_action(state)\n\t\t\t\tnew_state, reward, episode_complete, info = env.step(action)\n\t\t\t\tnew_state = np.reshape(new_state, [1, self.state_size])\n\n\t\t\t\tself.memory.append((state, action, reward, new_state, episode_complete))\n\t\t\t\tself.round_reward += reward\n\t\t\t\tstate = new_state\n\t\t\t\tstep += 1\n\t\t\t\tif episode_complete:\n\t\t\t\t\tself.round_reward += -10\n\t\t\t\t\tself.update_target_model()\n\t\t\t\t\tself.print_results(i, iter_n, step)\n\t\t\t\t\tif i != 0: # Update totals in memory if not the first run\n\t\t\t\t\t\tself.update_totals(i, step)\n\t\t\t\tif len(self.memory) > self.training_iter:\n\t\t\t\t\tself.replay()\n\t\t\tif self.epsilon > self.epsilon_min:\n\t\t\t\tself.epsilon *= self.epsilon_decay\n\n\t\treturn self.all_iterations, self.all_rewards, self.step_count", "def train(self, persist: bool = False, run: int = -1, checkpoint: int = -1):\n self.meta = ICMMetaDataV1(fp=open(os.path.join(MODULE_CONFIG.BaseConfig.BASE_DIR, 'agent_stats.csv'), 'w'),\n args=self.state.config)\n train_start = time.time()\n for episode in range(self.state.episodes):\n start_time = time.time()\n state = self.env.reset()\n state = torch.reshape(tensor(state, dtype=torch.float32), [1, 84, 84, 4]).permute(0, 3, 1, 2).to(\n self.device)\n done = False\n episode_reward = []\n episode_loss = []\n\n # save network\n # if episode % self.state.model_save_interval == 0:\n # save_path = self.state.model_save_path + '/' + self.run_name + '_' + str(episode) + '.pt'\n # torch.save(self.q_network.state_dict(), save_path)\n # print('Successfully saved: ' + save_path)\n\n # Save Model\n self.save(episode)\n # Collect garbage\n # To Do Later\n\n while not done:\n\n # update target network\n if self.state.step % self.state.network_update_interval == 0:\n print('Updating target network')\n self.target_network.load_state_dict(self.q_network.state_dict())\n\n if self.state.step > len(self.replay_memory):\n self.state.epsilon = max(self.state.final_epsilon,\n self.state.initial_epsilon - self.state.epsilon_step * self.state.step)\n if self.state.epsilon > self.state.final_epsilon:\n self.state.mode = 'Explore'\n else:\n self.state.mode = 'Exploit'\n\n action, q = self.take_action(state, test=False, state_count=0)\n next_state, reward, done, _ = self.env.step(action)\n\n next_state = torch.reshape(tensor(next_state, dtype=torch.float32), [1, 84, 84, 4]).permute(0, 3, 1,\n 2).to(\n self.device)\n self.push((state, torch.tensor([int(action)]), torch.tensor([reward], device=self.device), next_state,\n torch.tensor([done], dtype=torch.float32)))\n episode_reward.append(reward)\n self.state.step += 1\n state = next_state\n\n # train network\n if self.state.step >= self.start_to_learn and self.state.step % self.state.network_train_interval == 0:\n loss = self.optimize_network()\n episode_loss.append(loss)\n\n if done:\n # print('Episode:', episode, ' | Steps:', self.state.step, ' | Eps: ', self.state.epsilon,\n # ' | Reward: ',\n # sum(episode_reward),\n # ' | Avg Reward: ', np.mean(self.last_n_rewards), ' | Loss: ',\n # np.mean(episode_loss), ' | Intrinsic Reward: ', sum(self.intrinsic_episode_reward),\n # ' | Avg Intrinsic Reward: ', np.mean(self.last_n_intrinsic_rewards),\n # ' | Mode: ', self.state.mode)\n # print('Episode:', episode, ' | Steps:', self.state.step, ' | Eps: ', self.state.epsilon,\n # ' | Reward: ',\n # sum(episode_reward),\n # ' | Avg Reward: ', np.mean(self.last_n_rewards), ' | Loss: ',\n # np.mean(episode_loss), ' | Intrinsic Reward: ', sum(self.intrinsic_episode_reward),\n # ' | Avg Intrinsic Reward: ', np.mean(self.last_n_intrinsic_rewards),\n # ' | Mode: ', self.state.mode, file=self.log_file)\n # self.log_summary(episode, episode_loss, episode_reward)\n self.last_n_rewards.append(sum(episode_reward))\n self.last_n_intrinsic_rewards.append(sum(self.intrinsic_episode_reward))\n self.meta.update_episode(episode, self.state.step, self.state.epsilon,\n sum(episode_reward), np.mean(self.last_n_rewards),\n np.mean(episode_loss), sum(self.intrinsic_episode_reward),\n np.mean(self.last_n_intrinsic_rewards), self.state.mode)\n\n episode_reward.clear()\n episode_loss.clear()\n self.intrinsic_episode_reward.clear()", "def fit(self, num_iterations, max_episode_length=250, eval_every_nth=1000, save_model_every_nth=1000, log_loss_every_nth=1000, video_every_nth=20000):\n self.compile()\n self.policy = LinearDecayGreedyEpsilonPolicy(start_value=1., end_value=0.1, num_steps=1e6, num_actions=self.num_actions) # for training\n self.replay_memory = ReplayMemory(max_size=1000000)\n self.log_loss_every_nth = log_loss_every_nth\n random_policy = UniformRandomPolicy(num_actions=self.num_actions) # for burn in \n num_episodes = 0\n\n # tf logging\n self.tf_session = K.get_session()\n self.tf_summary_writer = tf.summary.FileWriter(self.log_dir, self.tf_session.graph)\n\n while self.iter_ctr < num_iterations:\n state = self.env.reset()\n self.preprocessor.reset_history_memory()\n\n num_timesteps_in_curr_episode = 0\n total_reward_curr_episode = 0 \n\n while num_timesteps_in_curr_episode < max_episode_length:\n self.iter_ctr+=1 # number of steps overall\n num_timesteps_in_curr_episode += 1 # number of steps in the current episode\n\n # logging\n # if not self.iter_ctr % 1000:\n # print \"iter_ctr {}, num_episodes : {} num_timesteps_in_curr_episode {}\".format(self.iter_ctr, num_episodes, num_timesteps_in_curr_episode)\n\n # this appends to uint8 history and also returns stuff ready to be spit into the network\n state_network = self.preprocessor.process_state_for_network(state) #shape is (4,84,84,1). axis are swapped in cal_q_vals\n # print \"shape {}, max {}, min {}, type {} \".format(state_network.shape, np.max(state_network), np.min(state_network), state_network.dtype)\n\n # burning in \n if self.iter_ctr < self.num_burn_in:\n action = random_policy.select_action() # goes from 0 to n-1\n next_state, reward, is_terminal, _ = self.env.step(action)\n reward_proc = self.preprocessor.process_reward(reward)\n total_reward_curr_episode += reward_proc\n state_proc_memory = self.preprocessor.process_state_for_memory(state)\n # atari_preprocessor.process_state_for_memory converts it to grayscale, resizes it to (84, 84) and converts to uint8\n self.replay_memory.append(state_proc_memory, action, reward_proc, is_terminal)\n\n if is_terminal or (num_timesteps_in_curr_episode > max_episode_length-1):\n state = self.env.reset()\n num_episodes += 1\n with tf.name_scope('summaries'):\n self.tf_log_scaler(tag='train_reward_per_episode_wrt_no_of_episodes', value=total_reward_curr_episode, step=num_episodes)\n self.tf_log_scaler(tag='train_reward_per_episode_wrt_iterations', value=total_reward_curr_episode, step=self.iter_ctr)\n print \"iter_ctr {}, num_episodes : {}, episode_reward : {}, loss : {}, episode_timesteps : {}, epsilon : {}\".format\\\n (self.iter_ctr, num_episodes, total_reward_curr_episode, self.loss_last, num_timesteps_in_curr_episode, self.policy.epsilon)\n num_timesteps_in_curr_episode = 0\n self.dump_train_episode_reward(total_reward_curr_episode)\n # this should be called when num_timesteps_in_curr_episode > max_episode_length, but we can call it in is_terminal as well. \n # it won't change anything as it just sets the last entry's is_terminal to True\n self.replay_memory.end_episode() \n break\n\n # training\n else:\n # print \"iter_ctr {}, num_episodes : {} num_timesteps_in_curr_episode {}\".format(self.iter_ctr, num_episodes, num_timesteps_in_curr_episode)\n q_values = self.calc_q_values(state_network)\n # print \"q_values {} q_values.shape {}\".format(q_values, q_values.shape)\n #print \"q_values.shape \", q_values.shape\n action = self.policy.select_action(q_values=q_values, is_training=True)\n next_state, reward, is_terminal, _ = self.env.step(action)\n reward_proc = self.preprocessor.process_reward(reward)\n total_reward_curr_episode += reward_proc\n state_proc_memory = self.preprocessor.process_state_for_memory(state)\n self.replay_memory.append(state_proc_memory, action, reward_proc, is_terminal)\n\n # validation. keep this clause before the breaks!\n if not(self.iter_ctr%eval_every_nth):\n print \"\\n\\nEvaluating at iter {}\".format(self.iter_ctr)\n if not(self.iter_ctr%video_every_nth):\n self.evaluate(num_episodes=20, max_episode_length=max_episode_length, gen_video=True)\n else:\n self.evaluate(num_episodes=20, max_episode_length=max_episode_length, gen_video=False)\n print \"Done Evaluating\\n\\n\"\n\n # save model\n if not(self.iter_ctr%save_model_every_nth):\n self.q_network.save(os.path.join(self.log_dir, 'weights/q_network_{}.h5'.format(str(self.iter_ctr).zfill(7))))\n\n if is_terminal or (num_timesteps_in_curr_episode > max_episode_length-1):\n state = self.env.reset()\n num_episodes += 1\n with tf.name_scope('summaries'):\n self.tf_log_scaler(tag='train_reward_per_episode_wrt_no_of_episodes', value=total_reward_curr_episode, step=num_episodes)\n self.tf_log_scaler(tag='train_reward_per_episode_wrt_iterations', value=total_reward_curr_episode, step=self.iter_ctr)\n print \"iter_ctr {}, num_episodes : {}, episode_reward : {}, loss : {}, episode_timesteps : {}, epsilon : {}\".format\\\n (self.iter_ctr, num_episodes, total_reward_curr_episode, self.loss_last, num_timesteps_in_curr_episode, self.policy.epsilon)\n num_timesteps_in_curr_episode = 0\n self.dump_train_episode_reward(total_reward_curr_episode)\n self.replay_memory.end_episode() \n break\n\n if not(self.iter_ctr % self.train_freq):\n self.update_policy()\n\n state = next_state", "def _experience_replay(self, batch_size, discount=0.9, epochs=1):\r\n minibatch = random.sample(self.experience, batch_size)\r\n\r\n # TODO: The batch_size might not bee needed as an argument here if the reshape things can be resolved.\r\n states, actions, rewards, next_states, terminated = self._extract_data(batch_size, minibatch)\r\n targets = self._build_targets(batch_size, states, next_states, rewards, actions, terminated, discount)\r\n\r\n history = self.q_network.fit(states, targets, epochs=epochs, verbose=0, batch_size=1)\r\n #print(history.history['loss'])\r\n self.episode_loss.append(history.history['loss'][0])", "def learn(self, num_episodes=10000):\n for i in range(num_episodes):\n self.actor()\n self.learner()", "def test(self):\n total_steps = 0\n running_scores = np.zeros(len(self.agents))\n\n for e in range(self.run_settings.test_episodes):\n # Initialize episode\n try:\n env_states, rewards, done, metainfo = self.custom_env.reset()\n except EpisodeCrashException:\n print('Episode crashed, resetting.')\n continue\n\n # Initialize scores to starting reward (probably 0)\n scores = np.array(rewards)\n step = 0\n\n while not done:\n states = [self.agents[a].state_space_converter(env_states[a])\n for a in range(len(self.agents))]\n\n # Get actions\n actions = [self.agents[a].sample(states[a])\n for a in range(len(self.agents))]\n env_actions = [self.agents[a].action_space_converter(actions[a])\n for a in range(len(self.agents))]\n if self.run_settings.verbose:\n self.print_action(env_actions)\n # Take environment step\n try:\n env_states, rewards, done, metainfo = self.custom_env.step(env_actions)\n except EpisodeCrashException:\n print('Episode crashed, resetting.')\n break\n step += 1\n total_steps += 1\n\n # Update scores\n scores += np.array(rewards)\n\n if done:\n running_scores += scores\n\n if len(scores) == 1:\n scores = scores[0]\n if self.run_settings.verbose:\n print(\"Game {} ended after {} steps. Game score: {}\"\n .format(e+1, step, scores))\n if self.run_settings.verbose:\n print(\"Average game scores: {}\".format(running_scores / self.run_settings.test_episodes))", "def fit(self, env, num_iterations, max_episode_length=None):\n print ('initializing replay memory...')\n sys.stdout.flush()\n self.mode = 'init'\n self.memory.clear()\n self.preprocessor.reset()\n self.num_steps = 0\n num_updates = 0\n num_episodes = 0\n while num_updates < num_iterations:\n state = env.reset()\n self.preprocessor.reset()\n num_episodes += 1\n t = 0\n total_reward = 0\n while True:\n self.num_steps +=1\n t += 1\n action, _ = self.select_action(state)\n next_state, reward, is_terminal, debug_info = env.step(action)\n\n reward = self.preprocessor.process_reward(reward)\n total_reward += reward\n\n preprocessed_state = self.preprocessor.process_state_for_memory(state)\n\n self.memory.append(preprocessed_state, action, reward, is_terminal)\n\n if self.num_steps > self.num_burn_in:\n if self.mode != 'train':\n print('Finish Burn-in, Start Training!')\n\n self.mode = 'train'\n if self.num_steps % self.train_freq == 0:\n self.update_predict_network()\n num_updates += 1\n if num_updates % 10000 == 0:\n self.q_network.save_weights('%s/model_weights_%d.h5' % (self.save_path, num_updates // 10000))\n \n if is_terminal or (max_episode_length is not None and t > max_episode_length):\n break\n \n state = next_state\n #print ('episode %d ends, lasts for %d steps (total steps:%d), gets $d reward. (%d/%d updates.)' % (num_episodes, t, self.))", "def train(self, num_episodes = 10000, verbose = True):\n start_time = datetime.now().replace(microsecond=0)\n for e in range(num_episodes):\n S_old = self.env.reset()\n steps = 0\n # there is an interal limit of 100 steps\n while steps < 1000:\n steps += 1\n A = self.epsilon_greedy(S_old)\n S_new, reward, done, info = self.env.step(A)\n self.Q[S_old, A] = self.Q[S_old, A] + self.alpha * \\\n (reward + self.gamma * np.max(self.Q[S_new, :]) - self.Q[S_old, A])\n if done:\n break\n S_old = S_new\n if verbose:\n clear_output(wait=True)\n now_time = datetime.now().replace(microsecond=0)\n print(\"Epoch: {}/{} - Steps: {:4} - Duration: {}\".format(e+1, num_episodes, steps, now_time-start_time))\n\n return self.Q", "def train_on_history(self, history):\n \n # Split into episodes\n n_episodes = history[-1][\"episode\"] \n episodes = [list(filter(lambda h: h[\"episode\"]==e , history)\n ) for e in range(n_episodes)\n ]\n\n # Split into game lives\n for episode in episodes:\n \n \n game_lives = [\n list(filter(lambda h: h.get('info').get('ale.lives')==l, episode)\n ) for l in range(5)\n ]\n \n for life in game_lives:\n if life:\n self.train(life)\n else:\n print(\"No ocurrance\")\n return", "def exp_replay(self, batch_size):\n\t\tmini_batch = []\n\t\tmemory_size = len(self.memory) # Getting the memory size used for store the \"experience\"\n\t\tfor i in range(memory_size - batch_size + 1, memory_size):\n\t\t\tmini_batch.append(self.memory.popleft()) # Loading the tuple (s, a, r, s')\n\n\t\tfor state, action, reward, next_state in mini_batch: # For each tuple of the \"experience\"\n\t\t\t# Applying the Bellman Equation to compute the expected reward\n\t\t\ttarget = reward + self.gamma * np.amax(self.model.predict(next_state)[0])\n\n\t\t\ttarget_f = self.model.predict(state) # Get the best action to do given a specific state\n\t\t\ttarget_f[0][action] = target # Update the value of the original action with the best q-value\n\t\t\tresult = self.model.fit(state, target_f, epochs=1, verbose=0) # Update our NN-Agent\n\t\t\tself.loss += result.history['loss'][0]\n\n\t\t#print(np.divide(np.sum(results), 32))\n\n\t\tif self.epsilon > self.epsilon_min: # If we hadn't reached the minimum epsilon-random probability\n\t\t\tself.epsilon *= self.epsilon_decay # Decrease the epsilon-random probability", "def run(self) -> None:\n for episode in range(1, self.episodes + 1):\n print('Episode:', episode)\n steps, state_action_history = self.run_one_episode()\n self.steps_per_episode.append(steps)\n if episode % parameters.CACHING_INTERVAL == 0 or steps < 1000:\n visualize.animate_track(state_action_history, f'agent-{episode}')\n\n print('Training completed.')\n visualize.plot_steps_per_episode(self.steps_per_episode)\n visualize.plot_epsilon(self.agent.epsilon_history)\n\n if parameters.VISUALIZE_FINAL_GAME:\n print('Showing one episode with the greedy strategy.')\n self.agent.epsilon = 0\n steps, state_action_history = self.run_one_episode()\n print(f'Episode completed in {steps} steps.')\n visualize.animate_track(state_action_history)", "def training(self) -> None:\n self.compile_model()\n self.train_epoch()\n self.agent.save()", "def train(epoch, rewards=1, punishment=-100):\n # Init setting\n environment = gym.make('CartPole-v1')\n agent = Learner(environment)\n\n # Early stopping\n perfect_times = 0\n\n # Plot\n scores, epsilons = [], []\n\n for e in range(epoch):\n # Reset state for each epoch\n state = environment.reset().reshape((1, 4))\n done = False\n\n # Assume 2000 is our ultimate goal (cart keeps 2000 frames)\n for frame in range(2000):\n # Make one action\n action = agent.act(state)\n next_state, _, done, _ = environment.step(action)\n next_state = next_state.reshape((1, 4))\n\n # Customised reward and punishment\n reward = punishment if done else rewards\n\n # Build memory\n agent.remember_play(state, action, reward, next_state, done)\n\n # Train process\n agent.replay()\n state = next_state\n\n # End this game if done\n if done:\n # Update the target model for next inner prediction\n agent.update_target_model()\n\n # Store the scores for plotting\n scores.append(frame)\n epsilons.append(agent.epsilon)\n\n print((\"epoch: {}/{}, score {}, \" +\n \"epsilon {} {}\").format(e, epoch, frame,\n agent.epsilon, FILE))\n break\n\n # Early stopping when getting `EARLY` continuous perfect score\n if frame == 499:\n perfect_times += 1\n if perfect_times == EARLY:\n break\n else:\n perfect_times = 0\n\n # Save the model and weights\n save_weight(agent.model)\n save_model(agent.model)\n\n # Save plotting data\n df = pd.DataFrame()\n df['epoch'] = range(1, len(scores) + 1)\n df['score'] = scores\n df['epsilon'] = epsilons\n df.to_csv(CSV_FILE, index=False)\n\n return agent", "def gen_ep_data(self,num_trials=1,trial_len=20,pm_probe_positions=None):\n # insert extra positive trials than expected by chance\n pos_og_bias=np.random.randint(1,100,1)\n # initialize returnables\n ep_len = num_trials*(trial_len+self.nmaps)\n inst_seq = -np.ones([ep_len])\n stim_seq = -np.ones([ep_len,self.sdim])\n action_seq = -np.ones([ep_len])\n\n # loop over trails\n for trial in range(num_trials):\n ## randomize emats\n self.shuffle_pms()\n # generate trial idx_seq\n inst_stim_seq_int,inst_action_seq_int = self.gen_trial_inst_phase()\n resp_stim_seq_int,resp_action_seq_int = self.gen_trial_resp_phase(\n trial_len,pos_og_bias,pm_probe_positions)\n # embed stim idx_seq\n inst_stim_seq = self.emat[inst_stim_seq_int]\n resp_stim_seq = self.emat[resp_stim_seq_int]\n # collect\n t0 = trial*(trial_len+self.nmaps)\n t1 = t0+trial_len+self.nmaps\n inst_seq[t0:t1] = np.concatenate([inst_stim_seq_int,np.zeros(trial_len)],axis=0)\n stim_seq[t0:t1] = np.concatenate([inst_stim_seq,resp_stim_seq],axis=0)\n action_seq[t0:t1] = np.concatenate([inst_action_seq_int,resp_action_seq_int],axis=0)\n inst_seq = tr.LongTensor(inst_seq).unsqueeze(1) # batch dim\n stim_seq = tr.Tensor(stim_seq).unsqueeze(1) \n action_seq = tr.LongTensor(action_seq).unsqueeze(1) \n return inst_seq,stim_seq,action_seq", "def train(self):\n ##################\n # YOUR CODE HERE #\n ##################\n start = time.time()\n if self.gae:\n self.train_gae()\n return\n\n def optimize_model():\n R = 0\n for i in reversed(range(len(self.rewards))):\n if abs(self.rewards[i]) > 0.0:\n R = 0\n R = self.rewards[i] + self.gamma * R\n self.rewards[i] = R\n rewards = torch.Tensor(self.rewards)\n if self.var_reduce:\n rewards = (rewards - rewards.mean()) / (rewards.std() + np.finfo(np.float32).eps)\n\n policy_loss = 0.0\n for (log_prob, r) in zip(self.log_probs, rewards):\n policy_loss -= log_prob * r\n\n loss = policy_loss.data[0, 0]\n\n self.opt.zero_grad()\n policy_loss = cu(policy_loss)\n policy_loss.backward()\n self.opt.step()\n\n self.clear_action()\n return loss\n\n self.model.train()\n if USE_CUDA:\n self.model.cuda()\n running_reward = None\n\n for episode in range(1, self.n_episode+1):\n self.init_game_setting()\n state = self.env.reset()\n\n tot_reward = 0\n a, b = 0, 0\n for t in range(self.episode_len):\n action = self.make_action(state, test=False)\n state, reward, done, info = self.env.step(action)\n self.rewards.append(reward)\n if reward > 0:\n a += 1\n if reward < 0:\n b += 1\n tot_reward += reward\n if done:\n break\n\n if running_reward is None:\n running_reward = tot_reward\n else:\n running_reward = 0.99 * running_reward + 0.01 * tot_reward\n\n if episode % self.update_every == 0:\n loss = optimize_model()\n print(\"Episode %d\" % episode)\n print(time_since(start))\n print(\"reward %.4f %d:%d len=%d\" % (running_reward, a, b, t))\n torch.save(self.model.state_dict(), self.model_fn)", "def play(self, test_ep=0., n_step=10000, n_episode=1000):\n\n # If not training, clear log of old data, initialize TF variables, and load model.\n if self.stat and not self.is_train:\n tf.initialize_all_variables().run()\n self.stat.load_model()\n\n self.target_network.run_copy()\n\n rewards = []\n game_lengths = []\n\n # Play at least n_episode episodes.\n while np.sum(game_lengths) < n_episode:\n\n # Start a new game.\n observation, reward, terminal = self.new_game()\n current_reward = 0\n\n # Add initial frames to history.\n for _ in range(self.history_length):\n self.history.add(observation)\n\n # Play game until 'terminal.'\n for t in range(n_step):\n # 1. Predict.\n action = self.predict(self.history.get(), test_ep)\n # 2. Act.\n observation, reward, terminal, _ = self.env.step(action, is_training=False)\n # 3. Observe.\n self.history.add(observation)\n \n current_reward += reward\n\n if terminal:\n break\n\n # Set tqdm range description.\n if self.t_range and not self.chtc:\n self.t_range.set_description('PLAY: %d/%d' % (np.sum(game_lengths), n_episode))\n\n # After game, add game length and rewards.\n rewards.append(float(current_reward))\n game_lengths.append(terminal)\n\n if self.t_range and not self.chtc:\n self.t_range.set_description()\n \n self.compute_statistics(rewards, game_lengths)", "def run(self):\n data_provider = DataProvider(self.config)\n hex_attr_df = data_provider.read_hex_bin_attributes()\n hex_distance_df = data_provider.read_hex_bin_distances()\n city_states = data_provider.read_city_states(self.city_states_filename)\n neighborhood = data_provider.read_neighborhood_data()\n popular_bins = data_provider.read_popular_hex_bins()\n num_episodes = self.config['RL_parameters']['num_episodes']\n ind_episodes = self.config['RL_parameters']['ind_episodes']\n exp_decay_multiplier = self.config['RL_parameters']['exp_decay_multiplier']\n\n q_ind = None\n r_table = None\n xi_matrix = None\n\n best_episode = None\n best_model = {}\n\n progress_bar = tqdm(xrange(num_episodes))\n for episode_id in progress_bar:\n progress_bar.set_description(\"Episode: {}\".format(episode_id))\n current_best = -1000000\n\n # Create episode\n ind_exploration_factor = np.e ** (-1 * episode_id * exp_decay_multiplier / ind_episodes)\n\n episode = Episode(self.config,\n episode_id,\n ind_exploration_factor,\n hex_attr_df,\n hex_distance_df,\n city_states,\n neighborhood,\n popular_bins,\n q_ind,\n r_table,\n xi_matrix)\n\n # Run episode\n tables = episode.run()\n q_ind = tables['q_ind']\n r_table = tables['r_table']\n xi_matrix = tables['xi_matrix']\n episode_tracker = tables['episode_tracker']\n\n # Uncomment for logging if running a job, comment during experiments\n # otherwise it leads to insanely huge logging output which is useless\n\n # self.logger.info(\"\"\"\n # Expt: {} Episode: {} Earnings: {}\n # Pax rides: {} Relocation rides: {} Unmet demand: {}\n # \"\"\".format(self.expt_name, episode_id,\n # episode_tracker.gross_earnings,\n # episode_tracker.successful_waits,\n # episode_tracker.relocation_rides,\n # episode_tracker.unmet_demand))\n # self.logger.info(\"----------------------------------\")\n\n self.training_tracker.update_RL_tracker(\n episode_id, episode_tracker.gross_earnings,\n episode_tracker.successful_waits, episode_tracker.unsuccessful_waits,\n episode_tracker.unmet_demand, episode_tracker.relocation_rides,\n episode_tracker.DET, episode_tracker.DPRT, episode_tracker.DWT,\n episode_tracker.DRT, episode_tracker.DCT)\n\n # Keep track of the best episode\n if self.objective == 'revenue':\n if episode_tracker.gross_earnings >= current_best:\n best_episode = episode_tracker\n current_best = best_episode.gross_earnings\n else: # self.objective == 'pickups':\n if episode_tracker.successful_waits >= current_best:\n best_episode = episode_tracker\n current_best = episode_tracker.successful_waits\n\n # Keep track of the best model\n best_model['ind_exploration_factor'] = ind_exploration_factor\n best_model['config'] = self.config\n best_model['q_ind'] = q_ind\n best_model['r_table'] = r_table\n best_model['xi_matrix'] = xi_matrix\n best_model['training_tracker'] = self.training_tracker\n\n # After finishing training\n self.logger.info(\"Expt: {} Earnings: {} Met Demand: {} Unmet Demand: {}\".format(self.expt_name,\n best_episode.gross_earnings,\n best_episode.successful_waits,\n best_episode.unmet_demand))\n return best_episode, best_model, self.training_tracker", "def _train_epochs(self, data, model, n_epochs, start_epoch, start_step, dev_data, teacher_forcing_ratio, early_stopping_patience):\n print_loss_total = 0 # Reset every print_every\n epoch_loss_total = 0 # Reset every epoch\n\n device = None if torch.cuda.is_available() else -1\n batch_iterator = torchtext.data.BucketIterator(data, batch_size=self.batch_size, repeat=False,\n sort_key=lambda x: len(x.src),\n shuffle=True, device=device, sort=False, sort_within_batch=True)\n\n steps_per_epoch = len(batch_iterator)\n total_steps = steps_per_epoch * n_epochs\n\n step = start_step\n step_elapsed = 0\n previous_dev_loss = 10e6\n dev_loss_increased_epochs = 0\n for epoch in range(start_epoch, n_epochs + 1):\n self.logger.info(\"Epoch: %d, Step: %d\" % (epoch, step))\n\n batch_generator = batch_iterator.__iter__()\n # consuming seen batches from previous training\n for _ in range((epoch - 1) * steps_per_epoch, step):\n next(batch_generator)\n\n model.train(True)\n for batch in batch_generator:\n step += 1\n step_elapsed += 1\n\n input_variables, input_lengths = getattr(batch, UTTERANCE_FIELD_NAME)\n target_variables = getattr(batch, RESPONSE_FIELD_NAME)\n emotion_variables = getattr(batch, EMOTION_FIELD_NAME)\n\n loss = self.train_batch(input_variables, input_lengths.tolist(), target_variables, emotion_variables,\n model, teacher_forcing_ratio)\n\n # Record average loss\n print_loss_total += loss\n epoch_loss_total += loss\n\n if step % self.print_every == 0 and step_elapsed > self.print_every:\n print_loss_avg = print_loss_total / self.print_every\n print_loss_total = 0\n log_msg = 'Progress: %.2f%%, Train %s: %.4f' % (\n step / total_steps * 100,\n self.loss.name,\n print_loss_avg)\n self.logger.info(log_msg)\n beam_search = EmotionSeq2seq(model.encoder, EmotionTopKDecoder(model.decoder, 20))\n predictor = Predictor(beam_search, data.vocabulary, data.emotion_vocabulary)\n seq = \"how are you\".split()\n self.logger.info(\"Happy: \" + \" \".join(predictor.predict(seq, 'happiness')))\n self.logger.info(\"Angry: \" + \" \".join(predictor.predict(seq, 'anger')))\n\n # Checkpoint\n if step % self.checkpoint_every == 0 or step == total_steps:\n Checkpoint(model=model,\n optimizer=self.optimizer,\n epoch=epoch, step=step).save(self.expt_dir)\n\n if step_elapsed == 0:\n continue\n\n epoch_loss_avg = epoch_loss_total / min(steps_per_epoch, step - start_step)\n epoch_loss_total = 0\n log_msg = \"Finished epoch %d: Train %s: %.4f\" % (epoch, self.loss.name, epoch_loss_avg)\n if dev_data is not None:\n dev_loss, accuracy = self.evaluator.evaluate(model, dev_data)\n self.optimizer.update(dev_loss)\n log_msg += \", Dev %s: %.4f, Accuracy: %.4f\" % (self.loss.name, dev_loss, accuracy)\n model.train(mode=True)\n if dev_loss > previous_dev_loss:\n dev_loss_increased_epochs += 1\n if dev_loss_increased_epochs == early_stopping_patience:\n self.logger.info(\"EARLY STOPPING\")\n break\n else:\n dev_loss_increased_epochs = 0\n previous_dev_loss = dev_loss\n Checkpoint(model=model,\n optimizer=self.optimizer,\n epoch=epoch, step=step).save(self.expt_dir)\n else:\n self.optimizer.update(epoch_loss_avg)\n\n self.logger.info(log_msg)", "def _sp_train(self, max_steps, instances, visualize, plot):\n # Keep track of rewards per episode per instance\n episode_reward_sequences = [[] for i in range(instances)]\n episode_step_sequences = [[] for i in range(instances)]\n episode_rewards = [0] * instances\n\n # Create and initialize environment instances\n envs = [self.create_env() for i in range(instances)]\n envs[0].render(mode='human')\n states = [env.reset()['observation'][0] for env in envs] # get the image\n\n for step in range(max_steps):\n for i in range(instances):\n if visualize: envs[i].render()\n action, angle_index, action_index = self.agent.act(states[i], i)\n\n next_state, reward, done, _ = envs[i].step(action)\n (next_image, next_depth) = next_state['observation']\n self.agent.push(\n Transition(states[i], [angle_index, action_index], reward, None if done else next_image), i)\n episode_rewards[i] += reward\n if done:\n episode_reward_sequences[i].append(episode_rewards[i])\n episode_step_sequences[i].append(step)\n episode_rewards[i] = 0\n if plot: plot(episode_reward_sequences, episode_step_sequences)\n (image, depth) = envs[i].reset()['observation']\n states[i] = image\n else:\n states[i] = next_image\n # Perform one step of the optimization\n self.agent.train(step)\n\n if plot: plot(episode_reward_sequences, episode_step_sequences, done=True)", "def train(self, num_decisions=350):\n os.system(\"mkdir \" + self.folder_name + \"Train\")\n for i in range(5000):\n episode_folder_name = self.folder_name + \"Train/\" + str(i) + \"/\"\n all_system_states = []\n all_system_rewards = []\n all_system_states_cluster = []\n all_grid_states_cluster = []\n all_surrounding_states_cluster = []\n os.system(\"mkdir \" + episode_folder_name)\n filename = episode_folder_name + str(i) + \".h5\"\n self.system.reset_context(filename)\n self.system.run_decorrelation(20)\n grid_dist, surrounding_dist, _, _, _, _ = self.system.get_state_reward()\n state = self._get_state(grid_dist, surrounding_dist)\n for j in range(num_decisions):\n action_index = self._get_action(state, i)\n transition_to_add = [state, action_index]\n tag = \"_train_\" + str(j)\n actions = [self.all_actions[i] for i in action_index]\n try:\n self.system.update_action(actions)\n system_states, system_rewards, system_states_cluster = self.system.run_step(\n is_detailed=True, tag=tag)\n all_system_states.append(system_states)\n all_system_rewards.append(system_rewards)\n all_system_states_cluster.append(system_states_cluster)\n\n except OpenMMException:\n print(\"Broken Simulation at Episode:\",\n str(i), \", Decision:\", str(j))\n break\n\n grid_dist, surrounding_dist, grid_reward, surrounding_reward, grid_states_cluster, surrounding_states_cluster = self.system.get_state_reward()\n state = self._get_state(grid_dist, surrounding_dist)\n reward = self._get_reward(grid_reward, surrounding_reward)\n\n all_grid_states_cluster.append(grid_states_cluster)\n all_surrounding_states_cluster.append(surrounding_states_cluster)\n\n # Use len_reward for number of grids\n done = [False] * len(reward) # Never Done\n transition_to_add.extend([reward, state, done])\n rb_decision_samples = 0\n for rb_tuple in zip(*transition_to_add):\n self.buffer.push(*list(rb_tuple))\n\n for _ in range(self.update_num):\n self._update()\n self._save_episode_data(episode_folder_name)\n np.save(episode_folder_name + \"system_states\",\n np.array(all_system_states))\n np.save(episode_folder_name + \"system_rewards\",\n np.array(all_system_rewards))\n np.save(episode_folder_name + \"system_states_cluster\",\n np.array(all_system_states_cluster))\n np.save(episode_folder_name + \"grid_states_cluster\",\n np.array(all_grid_states_cluster, dtype=object))\n np.save(episode_folder_name + \"all_states_cluster\",\n np.array(all_surrounding_states_cluster))\n self._save_data()", "def __init__(self, name, reward_discount = 0.99, win_value = 10.0, draw_value = 0.0,\n loss_value = -10.0, learning_rate = 0.01, training = True,\n random_move_prob = 0.9999, random_move_decrease = 0.9997, batch_size=60,\n pre_training_games = 500, tau = 0.001):\n self.tau = tau\n self.batch_size = batch_size\n self.reward_discount = reward_discount\n self.win_value = win_value\n self.draw_value = draw_value\n self.loss_value = loss_value\n self.side = None\n self.board_position_log = []\n self.action_log = []\n self.next_state_log = []\n\n self.name = name\n self.q_net = QNetwork(name + '_main', learning_rate)\n self.target_net = QNetwork(name + '_target', learning_rate)\n\n self.graph_copy_op = self.create_graph_copy_op(name + '_main', name + '_target', self.tau)\n self.training = training\n self.random_move_prob = random_move_prob\n self.random_move_decrease = random_move_decrease\n\n self.replay_buffer_win = ReplayBuffer()\n self.replay_buffer_loss = ReplayBuffer()\n self.replay_buffer_draw = ReplayBuffer()\n\n self.game_counter = 0\n self.pre_training_games = pre_training_games\n\n self.writer = None\n\n super().__init__()", "def _train_step(self):\n # Run a train op at the rate of self.update_period if enough training steps\n # have been run. This matches the Nature DQN behaviour.\n if self._replay.add_count > self.min_replay_history:\n if self.training_steps % self.update_period == 0:\n self._sample_from_replay_buffer()\n\n if self._replay_scheme == 'prioritized':\n # The original prioritized experience replay uses a linear exponent\n # schedule 0.4 -> 1.0. Comparing the schedule to a fixed exponent of\n # 0.5 on 5 games (Asterix, Pong, Q*Bert, Seaquest, Space Invaders)\n # suggested a fixed exponent actually performs better, except on Pong.\n probs = self.replay_elements['sampling_probabilities']\n # Weight the loss by the inverse priorities.\n loss_weights = 1.0 / jnp.sqrt(probs + 1e-10)\n loss_weights /= jnp.max(loss_weights)\n else:\n loss_weights = jnp.ones(self.replay_elements['state'].shape[0])\n\n\n self.optimizer, loss, mean_loss = train(self.network_def,\n self.target_network_params,\n self.optimizer,\n self.replay_elements['state'],\n self.replay_elements['action'],\n self.replay_elements['next_state'],\n self.replay_elements['reward'],\n self.replay_elements['terminal'],\n loss_weights,\n self.cumulative_gamma,\n self._target_opt,\n self._mse_inf,\n self._tau,\n self._alpha,\n self._clip_value_min,\n self._rng)\n\n if self._replay_scheme == 'prioritized':\n # Rainbow and prioritized replay are parametrized by an exponent\n # alpha, but in both cases it is set to 0.5 - for simplicity's sake we\n # leave it as is here, using the more direct sqrt(). Taking the square\n # root \"makes sense\", as we are dealing with a squared loss. Add a\n # small nonzero value to the loss to avoid 0 priority items. While\n # technically this may be okay, setting all items to 0 priority will\n # cause troubles, and also result in 1.0 / 0.0 = NaN correction terms.\n self._replay.set_priority(self.replay_elements['indices'],\n jnp.sqrt(loss + 1e-10))\n \n if (self.summary_writer is not None and\n self.training_steps > 0 and\n self.training_steps % self.summary_writing_frequency == 0):\n summary = tf.compat.v1.Summary(value=[\n tf.compat.v1.Summary.Value(tag='HuberLoss', simple_value=mean_loss)])\n self.summary_writer.add_summary(summary, self.training_steps)\n if self.training_steps % self.target_update_period == 0:\n self._sync_weights()\n\n self.training_steps += 1", "def train(env, agent, n_episodes:int=1000, max_t:int=1000, eps_start:float=1.0, eps_end:float=0.01, eps_decay:float=0.995, score_threshold:float=13)->list:\n scores = []\n scores_window:Deque[float] = deque(maxlen=100)\n eps = eps_start\n best_score = float(\"-inf\")\n writer = tensorboard.SummaryWriter(f\"runs/{int(time())}\")\n for i_episode in range(1, n_episodes+1):\n state = env.reset()\n score = 0\n writer.add_scalar(\"train/epsilon\", eps, i_episode)\n \n start = time()\n for t in range(max_t):\n action = agent.act(state, eps)\n next_state, reward, done, _ = env.step(action)\n # writer.add_scalar(\"reward\", reward, (i_episode - 1) * max_t + t)\n agent.step(state, action, reward, next_state, done)\n state = next_state\n score += reward\n if done:\n break\n\n time_for_episode = time() - start\n writer.add_scalar(\"train/time\", time_for_episode, i_episode)\n scores_window.append(score)\n scores.append(score)\n\n eps = max(eps_end, eps_decay*eps)\n window_score = np.mean(scores_window)\n\n writer.add_scalar(\"train/reward\", score, i_episode) \n writer.add_scalar(\"train/window\", window_score, i_episode)\n writer.add_scalar(\"train/memory_size\", len(agent.memory), i_episode)\n\n probs = getattr(agent.memory, 'probs', None)\n if probs is not None:\n writer.add_histogram(\"train/memory_probs\", probs, i_episode)\n\n beta = getattr(agent.memory, 'beta', None)\n if beta is not None:\n writer.add_scalar(\"train/memory_beta\", beta, i_episode)\n agent.memory.beta = min(1., agent.memory.beta + agent.memory.beta_incremental)\n \n print(f'\\rEpisode {i_episode}\\tAverage Score: {window_score:.2f}\\tTime: {time_for_episode:.2f}', end=\"\")\n \n if i_episode % 100 == 0:\n print(f'\\rEpisode {i_episode}\\tAverage Score: {window_score:.2f}')\n\n if window_score >= score_threshold and best_score < score_threshold:\n print(f'\\nEnvironment solved in {i_episode:d} episodes!\\tAverage Score: {window_score:.2f}')\n\n if window_score > best_score and window_score >= score_threshold:\n best_score = window_score\n torch.save(agent.qnetwork_local.state_dict(), 'checkpoint.pt')\n\n memory_dump = getattr(agent.memory, 'memory', None) \n if memory_dump is not None: \n torch.save(memory_dump, \"memory.pt\")\n\n print(f\"Best average score: {best_score}\")\n writer.close()\n return scores", "def init_trainers(self, args):\n self.actors_cur = [None for _ in range(self.num_agents)]\n self.critics_cur = [None for _ in range(self.num_agents)]\n self.actors_tar = [None for _ in range(self.num_agents)]\n self.critics_tar = [None for _ in range(self.num_agents)]\n self.optimizers_c = [None for _ in range(self.num_agents)]\n self.optimizers_a = [None for _ in range(self.num_agents)]\n input_size_global = sum(self.obs_shape_n) + sum(self.action_shape_n)\n\n if args.restore == True: # restore the model\n game_step = int(args.old_model_name.split('_')[-1][:-1])\n for idx in range(self.num_agents):\n self.actors_cur[idx] = torch.load(args.old_model_name+'a_c_{}.pt'.format(idx))\n self.actors_tar[idx] = torch.load(args.old_model_name+'a_t_{}.pt'.format(idx))\n self.critics_cur[idx] = torch.load(args.old_model_name+'c_c_{}.pt'.format(idx))\n self.critics_tar[idx] = torch.load(args.old_model_name+'c_t_{}.pt'.format(idx))\n self.optimizers_a[idx] = optim.Adam(self.actors_cur[idx].parameters(), args.lr_a)\n self.optimizers_c[idx] = optim.Adam(self.critics_cur[idx].parameters(), args.lr_c)\n self.var = self.var - (game_step-args.learning_start_episode*args.per_episode_max_len)*args.var_discount\n self.var = self.min_var if self.var < self.min_var else self.var\n old_data = {'game_step':game_step, 'episode_gone_old':int(game_step/args.per_episode_max_len)}\n\n # Note: if you need load old model, there should be a procedure for juding if the trainers[idx] is None\n for i in range(self.num_agents):\n self.actors_cur[i] = actor_agent(self.obs_shape_n[i], self.action_shape_n[i], \\\n args).to(args.device)\n self.critics_cur[i] = critic_agent(sum(self.obs_shape_n), sum(self.action_shape_n), \\\n args).to(args.device)\n self.actors_tar[i] = actor_agent(self.obs_shape_n[i], self.action_shape_n[i], \\\n args).to(args.device)\n self.critics_tar[i] = critic_agent(sum(self.obs_shape_n), sum(self.action_shape_n), \\\n args).to(args.device)\n self.optimizers_a[i] = optim.Adam(self.actors_cur[i].parameters(), args.lr_a)\n self.optimizers_c[i] = optim.Adam(self.critics_cur[i].parameters(), args.lr_c)\n\n # return the old data, no need to update the trainers\n if args.restore == True: return old_data\n\n self.actors_tar = self.update_trainers(self.actors_cur, self.actors_tar, 1.0) # update the target par using the cur\n self.critics_tar = self.update_trainers(self.critics_cur, self.critics_tar, 1.0) # update the target par using the cur", "def main(num_episodes, gamma, lam, kl_targ, batch_size, env_name):\n\n # initialize gym environment and get observations and actions\n env = gym.make(env_name)\n gym.spaces.seed(1234)\n env = gym.wrappers.FlattenDictWrapper(env, ['observation', 'desired_goal'])\n obs_dim = env.observation_space.shape[0]\n act_dim = env.action_space.shape[0]\n\n # parameters\n time_steps = 50 # T, time steps in every episode\n userCV = False\n interpolate_ratio = 0.2 # set v\n samples_size = 64\n\n # logger and plotter from utilies\n now = (datetime.datetime.utcnow() - datetime.timedelta(hours=4)).strftime(\n \"%b-%d_%H:%M:%S\") # create dictionaries based on ETS time\n logger = Logger(logname=env_name, now=now)\n plotter = Plotter(plotname=env_name+\"-Fig\", now=now)\n\n # add 1 to obs dimension for time step feature (see run_episode())\n obs_dim += 1\n scaler = Scaler(obs_dim)\n\n # initialize three neural network, on for the ppo policy, one for the value function baseline used to compute\n # advantages, and one is critic\n baseline = ValueFncNN(obs_dim, name='baseline')\n critic = ValueFncNN(obs_dim, name='critic')\n on_policy = OnPolicyPPO(obs_dim, act_dim, kl_targ)\n\n # initialize replay buffer\n buff = Buffer(1000000)\n\n # run 5 episodes to initialize scaler\n run_policy(env, on_policy, scaler, logger, plotter, episodes=5, plot=False)\n episode = 0\n\n # start training\n with on_policy.sess as sess:\n while episode < num_episodes:\n\n \"\"\"experience replay: there are two buffers, one is replay buffer which \n keep expanding with new experiences (off-policy); one is current buffer \n (\"play\" buffer) which only contains current experience (on-policy)\n \"\"\"\n # roll-out pi for initial_buff_size episodes, T (50) time step each to\n # collect a batch of data to R (replay buffer)\n current_buffer = []\n trajectories, episode_experiences = run_policy(env, on_policy, scaler, logger,\n plotter, episodes=batch_size, plot=True)\n episode += len(trajectories)\n plotter.updateEpisodes(episode)\n\n for i in range(0, batch_size):\n for j in range(0, time_steps):\n state, action, reward = episode_experiences[i][j]\n buff.add(np.reshape([state, action, reward], [1, 3])) # add to replay buffer\n current_buffer.append(np.reshape([state, action, reward], [1, 3]))\n\n # current i don't use the control variate, so no need to compute Q value here\n # \"\"\"fit Qw through off-policy (use replay buffer)\"\"\"\n # off_trajectories = buff.sample(batch_size*time_steps) # numpy array\n # q_values = compute_q_value(off_trajectories, off_policy, gamma)\n\n \"\"\"fit baseline V() through on-policy (use current trajectories)\"\"\"\n compute_vvalue(trajectories, baseline)\n # print(trajectories)\n\n \"\"\"compute Monte Carlo advantage estimate advantage (on-policy)\"\"\"\n compute_advantages(trajectories, gamma, lam)\n # here as we don't use control variate, learning_signals equal advantages but with a different shape\n # to facilitate next step of the algorithm\n # so in the on-policy advantages I just input with the advantages which is wrong in the strict sense\n # TODO: change the advantages as the form of learning signal\n add_disc_sum_rew(trajectories, gamma) # calculated discounted sum of Rs\n observes, on_actions, advantages, learning_signals, sum_dis_return = build_train_set(trajectories)\n log_batch_stats(observes, on_actions, advantages, logger, sum_dis_return, episode)\n\n \"\"\"different situations based on if we use control variate: if useCV=True, then compute\n critic-based advantage estimate using current buffer, Q and policy\n if useCV=False, then just center the learning signals lt,e=At,e\n \"\"\"\n # if userCV:\n # pass\n # else:\n # # center the learning signals = advantages, and set b = v\n # learning_signals = advantages\n # b = interpolate_ratio\n\n # multiply learning signals by (1-v)\n learning_signals *= (1 - interpolate_ratio)\n\n \"\"\"sample D=S1:M from replay buffer or current buffer based on beta (M=40)\"\"\"\n if buff.buffer_size < len(current_buffer):\n # using on-policy samples to compute loss and optimize policy\n samples = BatchSample(current_buffer, samples_size)\n else:\n # using off-policy samples to compute loss and optimize policy (always go here)\n # TODO: what's the condition to change?\n samples = buff.sample(samples_size)\n\n \"\"\"compute loss function\"\"\"\n states, actions, rewards = [np.squeeze(elem, axis=1) for elem in np.split(samples, 3, 1)]\n states = np.array([s for s in states])\n states = np.squeeze(states)\n\n # compute PPO loss (first term in the IPO algorithm loss function)\n # with on_policy.sess as sess:\n on_feed_dict = {on_policy.obs_ph: observes,\n on_policy.act_ph: on_actions,\n on_policy.advantages_ph: advantages,\n on_policy.beta_ph: on_policy.beta,\n on_policy.eta_ph: on_policy.eta,\n on_policy.lr_ph: on_policy.lr * on_policy.lr_multiplier}\n old_means_np, old_log_vars_np = sess.run([on_policy.means, on_policy.log_vars], feed_dict=on_feed_dict)\n on_feed_dict[on_policy.old_log_vars_ph] = old_log_vars_np\n on_feed_dict[on_policy.old_means_ph] = old_means_np\n\n sess.run(on_policy.train_op, on_feed_dict)\n\n # compute loss\n on_policy_loss = sess.run(on_policy.loss, feed_dict=on_feed_dict)\n\n # times (1/ET)\n # on_policy_loss = (1 / (time_steps * batch_size)) * on_policy_loss\n on_policy_loss = on_policy_loss\n\n # compute off-policy loss (second term in the IPG algorithm loss function)\n \"\"\"\n consider using temporal difference as the critic, then delta Q = Rt+1 + gamma * Q(St+1, At+1) - Q(St, At)\n then the loss is the sum over all the batch samples\n \"\"\"\n # dict_states is a dict for random samples from replay buffer, not for trajectory\n dict_states = {'states': states}\n # evaluate values (Vt) for samples and add them to the dict by using the critic neural network\n critic_compute_vvalue(dict_states, critic)\n # compute (td target - current values) as delta Qw(Sm) under PPO policy\n b = interpolate_ratio\n # compute Rt+1 + gamma * Q(St+1, At+1)\n off_policy_loss, td_targets = TD(env, dict_states, on_policy, critic)\n off_policy_loss = (b / samples_size) * np.sum(off_policy_loss)\n plotter.updateOffPolicyLoss(off_policy_loss)\n loss = on_policy_loss - off_policy_loss\n\n print(\"on_policy_loss: {}. Off_policy_loss: {}. Total Loss: {}\".format(on_policy_loss, off_policy_loss, loss))\n print(\"\")\n\n \"\"\"update current policy based on current observes, actions, advantages\"\"\"\n on_feed_dict[on_policy.loss] = tf.reduce_sum(loss)\n on_policy.update(loss, observes, on_actions, advantages, old_means_np, old_log_vars_np, logger, plotter)\n # on_policy.logp = new_logp\n \"\"\"update baseline and critic\"\"\"\n # observes, actions, advantages, disc_sum_rew = build_train_set(trajectories)\n # with baseline.sess as sess:\n baseline.fit(observes, sum_dis_return, logger, plotter, id=\"BaselineLoss\") # update value function\n\n # with critic.sess as sess:\n critic.fit(states, td_targets, logger, plotter, id=\"CriticLoss\")\n logger.write(display=True)\n\n \"\"\"record\"\"\"\n logger.close()\n plotter.plot()\n\n \"\"\"close sessions\"\"\"\n on_policy.close_sess()\n baseline.close_sess()", "def train(self, episodes=2000, max_steps=99):\n\n for episode in range(episodes):\n state = self.env.reset()\n\n for step in range(max_steps):\n explore_eploit_tradeoff = np.random.uniform()\n\n if explore_eploit_tradeoff > self.epsilon:\n action = np.argmax(self.q_table[state, :])\n else:\n action = self.env.action_space.sample()\n\n new_state, reward, done, info = self.env.step(action)\n\n self.q_table[state, action] = self.q_table[state, action] \\\n + self.lr * (reward + self.gamma * np.amax(\n self.q_table[new_state, :]\n ) - self.q_table[state, action]\n )\n\n state = new_state\n if done:\n break\n exp_ = np.exp(-self.decay_rate * episode)\n self.epsilon = self.min_eps + exp_ * (self.max_eps - self.min_eps)", "def train(self, n_steps=5000):\n all_rewards = []\n losses = []\n epsilons = []\n episode_reward = 0\n\n state = self.env.reset()\n for frame_idx in range(1, n_steps + 1):\n\n epsilon = self.epsilon_schedule(frame_idx)\n epsilons.append(epsilon)\n action = self.act(state, epsilon)\n next_state, reward, done, _ = self.env.step(action)\n episode_reward += reward\n self.replay_buffer.append(state, action, reward, next_state, done)\n\n if len(self.replay_buffer) >= self.learn_start:\n loss = self._compute_loss()\n self._update_parameters(loss)\n losses.append(loss.item())\n\n if done:\n state = self.env.reset()\n all_rewards.append(episode_reward)\n episode_reward = 0\n\n if frame_idx % self.target_update_rate == 0:\n self._update_target()\n\n state = next_state\n\n self._plot(all_rewards, losses, epsilons)", "def training_step(self):\n self.iteration += 1\n # if not enough replay memories\n if self.iteration < self.params.min_replays:\n # skip training\n return\n # for each batch\n for _ in range(self.params.num_batches):\n # sample memories\n mem_states, mem_controls, mem_rewards, mem_next_states, mem_continues = \\\n (rl.tools.sample_memories(self.memory, self.params.batch_size))\n # train the critic\n max_q = self.sess.run(self.graph.target_critic_outputs, feed_dict={self.graph.states: mem_next_states})\n td_target = mem_rewards + mem_continues * self.params.discount_factor * max_q\n self.reg_loss_val, self.critic_loss_val, _ = self.sess.run(\n [self.graph.critic_reg_loss, self.graph.critic_loss, self.graph.critic_training_op],\n feed_dict={self.graph.states: mem_states, self.graph.actor_outputs: mem_controls,\n self.graph.td_target: td_target})\n # train the actor\n neg_mean_q_val, _ = self.sess.run([self.graph.neg_mean_q, self.graph.actor_training_op],\n feed_dict={self.graph.states: mem_states})\n self.mean_q_val = -1.0 * neg_mean_q_val\n # copy to target\n self.sess.run(self.graph.copy_online_to_target)", "def train(\n self, num_episodes, max_episode_length, reward_network=None,\n ):\n\n for _ in range(num_episodes):\n self.train_episode(max_episode_length)\n\n if self.training_i % self.play_interval == 0:\n self.play(\n max_episode_length,\n self.render,\n reward_network=reward_network,\n )", "def train_replay(self):\n\n if len(self.memory) < self.train_start:\n return\n\n if self.epsilon > self.epsilon_end:\n self.epsilon -= self.epsilon_decay_step\n\n mini_batch = random.sample(self.memory, self.batch_size)\n\n history = np.zeros((self.batch_size, self.state_size[0],\n self.state_size[1], self.state_size[2]))\n next_history = np.zeros((self.batch_size, self.state_size[0],\n self.state_size[1], self.state_size[2]))\n\n # Initialize the Value targets to optimize\n v_target = np.zeros((self.batch_size,))\n\n action, reward, dead = [], [], []\n\n for i in range(self.batch_size):\n history[i] = np.float32(mini_batch[i][0] / 255.)\n next_history[i] = np.float32(mini_batch[i][3] / 255.)\n action.append(mini_batch[i][1])\n reward.append(mini_batch[i][2])\n dead.append(mini_batch[i][4])\n\n # current state-action values Q(st, at)\n q_outputs = self.q_duelling_part.predict(history)\n\n # TD-values for updating the networks coming from the target model\n if self.target_model is True:\n v_target_value = self.target_v_duelling_part.predict(next_history)\n elif self.target_model is False:\n v_target_value = self.v_duelling_part.predict(next_history)\n\n q_targets = []\n\n for i in range(self.batch_size):\n if dead[i]:\n v_target[i] = reward[i]\n q_outputs[i][action[i]] = reward[i]\n\n else:\n v_target[i] = reward[i] + \\\n self.discount_factor * v_target_value[i]\n q_outputs[i][action[i]] = reward[i] + \\\n self.discount_factor * v_target_value[i]\n\n q_targets.append(q_outputs[i][action[i]])\n\n self.optimizer([history, action, q_targets]) # optimize the state-action-value head\n self.v_duelling_part.fit(history, v_target, epochs=1, verbose=0) # optimize the state-value head", "def train(self, episodes, epsilon_initial, epsilon_min, epsilon_stop_episode,\n network_update_freq, gamma, memory_capacity, batch_size):\n\n memory = ReplayMemory(memory_capacity)\n\n tot_steps = 0\n running_loss = 0\n\n depsilon = (epsilon_initial-epsilon_min)/epsilon_stop_episode\n\n for episode in range(episodes):\n\n if epsilon_initial > epsilon_min:\n epsilon_initial -= depsilon\n\n if episode % network_update_freq == 0:\n # Update target network\n self.NN_target.load_state_dict(self.NN.state_dict())\n\n if (episode + 1) % 10 == 0:\n print(f'Episode {episode + 1}/{episodes} completed!')\n print(f'Average steps per episode: {tot_steps / 10}')\n writer.add_scalar('training loss', running_loss / tot_steps, episode)\n self.plotValue()\n tot_steps = 0\n running_loss = 0\n\n state, done = self.env.reset()\n\n\n while not done:\n tot_steps += 1\n\n action = self.chooseAction(epsilon_initial, state)\n\n reward, next_state, done= self.env.transitionState(state, action)\n\n #score += reward\n reward = torch.tensor([[reward]], device=device)\n done = torch.tensor([[done]], device=device)\n\n # Saves the transition\n memory.push(self.RBF[state], self.RBF[next_state], reward, done)\n\n # Perform one step of batch gradient descent\n running_loss += self.optimizeModel(memory, batch_size, gamma)\n\n state = next_state\n\n writer.close()", "def __init__(self, name: str, reward_discount: float = 0.99, win_value: float = 10.0, draw_value: float = 0.0,\n loss_value: float = -10.0, learning_rate: float = 0.01, training: bool = True,\n random_move_prob: float = 0.9999, random_move_decrease: float = 0.9997, batch_size=60,\n pre_training_games: int = 500, tau: float = 0.001):\n self.tau = tau\n self.batch_size = batch_size\n self.reward_discount = reward_discount\n self.win_value = win_value\n self.draw_value = draw_value\n self.loss_value = loss_value\n self.side = None\n self.board_position_log = []\n self.action_log = []\n self.next_state_log = []\n\n self.name = name\n self.q_net = QNetwork(name + '_main', learning_rate)\n self.target_net = QNetwork(name + '_target', learning_rate)\n\n self.graph_copy_op = self.create_graph_copy_op(name + '_main', name + '_target', self.tau)\n self.training = training\n self.random_move_prob = random_move_prob\n self.random_move_decrease = random_move_decrease\n\n self.replay_buffer_win = ReplayBuffer()\n self.replay_buffer_loss = ReplayBuffer()\n self.replay_buffer_draw = ReplayBuffer()\n\n self.game_counter = 0\n self.pre_training_games = pre_training_games\n\n self.writer = None\n\n super().__init__()", "def gen_ep_data(self,ntrials,trlen):\n self.resort_emat()\n ## instruction\n # for each trial, generate random instruction encoding sequence\n i_encoding_input = np.array([\n np.random.permutation(np.arange(1,self.nmaps+1)) \n for i in range(ntrials)\n ])\n i_test_input = np.zeros([ntrials,trlen])\n i_input = np.concatenate([\n i_encoding_input,i_test_input],\n 1).astype(int).reshape(-1) # (ntrials,trlen+)\n ## stimulus\n x_encoding_input = i_encoding_input\n x_test_input = np.random.randint(1,self.nmaps+1,[ntrials,trlen])\n x_input = np.concatenate([x_encoding_input,x_test_input],1)\n # print('X',x_input)\n ''' \n embed x_input: \n [ntrials,nmaps+trlen] -> s_input [ntrials*(nmaps+trlen),edim]\n explicit loop required for flatten and embedd x_input\n because if switchmaps=1, matrix is resorted between trials\n and therefore same stimulus token integers correspond to\n different stimulus embeddings on different trials\n '''\n s_input = -np.ones([ntrials,(self.nmaps+trlen),self.stimdim])\n for trialn,x_input_trial in enumerate(x_input): \n if self.switchmaps: self.resort_emat()\n s_input[trialn] = self.emat[x_input_trial]\n # format output\n i_input = tr.unsqueeze(tr.LongTensor(i_input),1)\n s_input = tr.unsqueeze(tr.Tensor(np.concatenate(s_input)),1)\n yseq = tr.unsqueeze(tr.LongTensor(x_input.reshape(-1)),1)\n return i_input,s_input,yseq", "def trainOneEpisode(self, num_episodes, max_episode_steps=100, save_freq=100, render=False):\n # tqdm.write('------Episode {} / {}------'.format(self.episodes_done, num_episodes))\n self.resetEnv()\n r_total = 0\n with trange(1, max_episode_steps+1, leave=False) as t:\n\n for step in t:\n if render:\n self.env.render()\n state = self.state\n action, q = self.selectAction(state, require_q=True)\n obs_, r, done, info = self.takeAction(action.item())\n # if print_step:\n # print 'step {}, action: {}, q: {}, reward: {} done: {}' \\\n # .format(step, action.item(), q, r, done)\n r_total += r\n # t.set_postfix(step='{:>5}'.format(step), q='{:>5}'.format(round(q, 4)), total_reward='{:>5}'.format(r_total))\n t.set_postfix_str('step={:>5}, q={:>5}, total_reward={:>5}'.format(step, round(q, 2), r_total))\n if done or step == max_episode_steps:\n next_state = None\n else:\n next_state = self.getNextState(obs_)\n reward = torch.tensor([r], device=self.device, dtype=torch.float)\n self.memory.push(state, action, next_state, reward)\n self.optimizeModel()\n if self.steps_done % self.target_update == 0:\n self.target_net.load_state_dict(self.policy_net.state_dict())\n\n if done or step == max_episode_steps - 1:\n tqdm.write('------Episode {} ended, total reward: {}, step: {}------' \\\n .format(self.episodes_done, r_total, step))\n tqdm.write('------Total steps done: {}, current e: {} ------' \\\n .format(self.steps_done, self.exploration.value(self.steps_done)))\n # print '------Episode {} ended, total reward: {}, step: {}------' \\\n # .format(self.episodes_done, r_total, step)\n # print '------Total steps done: {}, current e: {} ------' \\\n # .format(self.steps_done, self.exploration.value(self.steps_done))\n self.episodes_done += 1\n self.episode_rewards.append(r_total)\n self.episode_lengths.append(step)\n if self.episodes_done % save_freq == 0:\n self.saveCheckpoint()\n break\n self.state = next_state", "def train_network(self, batch, episode_nr):\n global eps, eps_min, eps_decay\n for exp in batch:\n S = exp[0]\n S = process_state(S)\n action_number = exp[1]\n r = exp[2]\n S_new = exp[3]\n S_new = process_state(S_new)\n terminal = exp[4]\n\n if not terminal: # If agent is not at its final destination\n target = (r + gamma*np.amax(self.target.predict(S_new)[0]))\n else:\n target = r\n target_f = self.policy.predict(S)\n\n target_f[0][action_number] = target # Update something???\n self.policy.fit(S, target_f, epochs=1, verbose=0) # Train network # Verbose - makes training line?\n if self.epsilon > self.eps_min and episode_nr > 10:\n self.epsilon *= self.eps_decay # Decrease exploration rate", "def evolve(self, env, num_generations, num_episodes, num_frames):\n for gen in range(num_generations):\n\n if Trainer.VERBOSE:\n print(\"Generation:\", gen)\n\n # Generate new root Teams\n self.generation()\n\n # Evaluate current agents\n self.evaluation(env, num_episodes, num_frames)\n\n # Perform selection\n self.selection()\n\n # Return to top-performing agent. Typically not used, but nice to have\n ranked_agents = sorted(self.agent_pop, key=lambda rt : rt.team.fitness, reverse=True)\n return ranked_agents[0]", "def _train_step(self):\n if self._replay.add_count > self.min_replay_history:\n if self.training_steps % self.update_period == 0:\n self._sample_from_replay_buffer()\n (self._rng, self.optimizer_state, self.online_params,\n loss, quantile_loss, coherence_loss, orthogonality_loss) = train(\n self.network_def,\n self.online_params,\n self.target_network_params,\n self.optimizer,\n self.optimizer_state,\n self.replay_elements['state'],\n self.replay_elements['action'],\n self.replay_elements['next_state'],\n self.replay_elements['reward'],\n self.replay_elements['terminal'],\n self.num_tau_samples,\n self.num_tau_prime_samples,\n self.num_quantile_samples,\n self.cumulative_gamma,\n self.double_dqn,\n self.kappa,\n self._rng,\n self._coherence_weight,\n self._option,\n self._use_ortho_loss,\n self._use_cohe_loss,\n self._tau,\n self._alpha,\n self._clip_value_min)\n if (self.summary_writer is not None and\n self.training_steps > 0 and\n self.training_steps % self.summary_writing_frequency == 0):\n if self._use_ortho_loss and self._use_cohe_loss:\n summary = tf.compat.v1.Summary(value=[\n tf.compat.v1.Summary.Value(\n tag='Losses/Combined', simple_value=loss),\n tf.compat.v1.Summary.Value(\n tag='Losses/Quantile', simple_value=quantile_loss),\n tf.compat.v1.Summary.Value(\n tag='Losses/Incoherence', simple_value=coherence_loss),\n tf.compat.v1.Summary.Value(\n tag='Losses/Orthogonality',\n simple_value=orthogonality_loss),\n ])\n elif self._use_ortho_loss and not self._use_cohe_loss:\n summary = tf.compat.v1.Summary(value=[\n tf.compat.v1.Summary.Value(\n tag='Losses/Combined', simple_value=loss),\n tf.compat.v1.Summary.Value(\n tag='Losses/Quantile', simple_value=quantile_loss),\n tf.compat.v1.Summary.Value(\n tag='Losses/Orthogonality', simple_value=orthogonality_loss),\n ])\n elif self._use_cohe_loss and not self._use_ortho_loss:\n summary = tf.compat.v1.Summary(value=[\n tf.compat.v1.Summary.Value(\n tag='Losses/Combined', simple_value=loss),\n tf.compat.v1.Summary.Value(\n tag='Losses/Quantile', simple_value=quantile_loss),\n tf.compat.v1.Summary.Value(\n tag='Losses/Incoherence', simple_value=coherence_loss),\n ])\n self.summary_writer.add_summary(summary, self.training_steps)\n if self.training_steps % self.target_update_period == 0:\n self._sync_weights()\n\n self.training_steps += 1", "def train(self, batch_size=64, n_episodes=100, max_episode_length=3000, save_path=\"last_save.h5\",\n load_path=None):\n\n self.explore = True # Explore if needed\n\n self._play_through(n_episodes=n_episodes, max_episode_length=max_episode_length, save_path=save_path,\n callbacks=self._train_callbacks_factory())", "def _train_model(self):\n self.experiment = EpisodicExperiment(self.task, self.agent)\n n_epochs = int(self.rl_params.n_training_episodes / self.rl_params.n_episodes_per_epoch)\n logger.debug(\"Fitting user model over {} epochs, each {} episodes, total {} episodes.\"\n .format(n_epochs, self.rl_params.n_episodes_per_epoch, n_epochs*self.rl_params.n_episodes_per_epoch))\n for i in range(n_epochs):\n logger.debug(\"RL epoch {}\".format(i))\n self.experiment.doEpisodes(self.rl_params.n_episodes_per_epoch)\n self.agent.learn()\n self.agent.reset() # reset buffers", "def train_SN(model, optimizer, scheduler, episodes=1):\n model = model.to(device=device) # move the model parameters to CPU/GPU\n for episode in range(episodes):\n scheduler.step(episode)\n model.train() # set to train mode\n\n # make the samplers \n # make 2 samplers, one for the \"sample/training set\" of a one-shot classifier\n # other sampler is for the \"query/test set\" which provides many comparisons\n train_sample_sampler = SampleSampler(num_cl=NUM_CL)\n sampled_classes = train_sample_sampler.cl_list\n sampled_examples = train_sample_sampler.ex_list\n train_query_sampler = QuerySampler(sampled_classes, sampled_examples, num_inst=NUM_EX)\n\n # make the dataloaders\n s_batch_num = 1 # one shot \"training\" each\n q_batch_num = NUM_EX # pair up number of examples per class in a batch (default 19)\n train_sample_loader = DataLoader(omni_train, batch_size=s_batch_num, sampler=train_sample_sampler)\n train_query_loader = DataLoader(omni_train, batch_size=q_batch_num, sampler=train_query_sampler)\n \n # start training\n scores = torch.zeros(NUM_CL,(NUM_EX+NUM_CL-1)).to(device=device, dtype=dtype)\n targets = torch.zeros(NUM_CL,(NUM_EX+NUM_CL-1)).to(device=device, dtype=dtype)\n sample_count = 0\n for i, (sample, sample_label) in enumerate(train_sample_loader):\n sample_count += 1\n idx = 0\n for j, (batch, batch_labels) in enumerate(train_query_loader):\n if sample_label != batch_labels[0]:\n k = np.random.randint(NUM_EX)\n query = batch[k,:,:,:].to(device=device, dtype=dtype)\n query = query.view(1,1,IMG_SIZE,IMG_SIZE)\n sample = sample.to(device=device, dtype=dtype)\n targets[i,idx] = make_target(sample_label, batch_labels[0])\n scores[i,idx] = model(sample,query)\n idx += 1\n \n elif sample_label == batch_labels[0]:\n for k in range(NUM_EX):\n query = batch[k,:,:,:].to(device=device, dtype=dtype)\n query = query.view(1,1,IMG_SIZE,IMG_SIZE)\n sample = sample.to(device=device, dtype=dtype)\n targets[i,idx] = make_target(sample_label, batch_labels[0])\n scores[i,idx] = model(sample,query)\n idx += 1\n \n targets = targets.view(-1)\n scores = scores.view(-1)\n \n # train and update model\n optimizer.zero_grad()\n #loss = F.binary_cross_entropy(scores, targets)\n loss = F.mse_loss(scores, targets)\n loss.backward()\n #nn.utils.clip_grad_norm_(model.parameters(),0.5)\n optimizer.step()\n\n # episodic updates\n if (episode+1)%100 == 0:\n print(\"episode:\",episode+1,\"loss\",loss.data)\n\n if (episode+1)%1000 == 0:\n ''' Test the model '''\n # make the samplers \n test_sample_sampler = SampleSampler(total_cl=659)\n sampled_classes = test_sample_sampler.cl_list\n sampled_examples = test_sample_sampler.ex_list\n test_query_sampler = QuerySampler(sampled_classes, sampled_examples, num_inst=1)\n\n # make the dataloaders\n s_batch_num = 1 # one shot each\n q_batch_num = 1 # one test each\n test_sample_loader = DataLoader(omni_test, batch_size=s_batch_num, sampler=test_sample_sampler)\n test_query_loader = DataLoader(omni_test, batch_size=q_batch_num, sampler=test_query_sampler)\n check_accuracy(test_sample_loader, test_query_loader, model)\n\n if (episode+1)%100000 == 0:\n \"\"\" Save as a draft model \"\"\"\n torch.save(model.state_dict(), PATH)", "def train(net, start):\n # Initialize optimizer\n optimizer = optim.Adam(net.parameters(), lr=1e-6)\n # Initialize loss function\n loss_func = nn.MSELoss()\n\n # Initialize game\n game_state = game.GameState()\n\n # Initialize replay memory\n memory = ReplayMemory(net.replay_memory_size)\n\n # Initial action is do nothing\n action = torch.zeros(2, dtype=torch.float32)\n action[0] = 1\n\n # [1, 0] is do nothing, [0, 1] is fly up\n image_data, reward, terminal = game_state.frame_step(action)\n\n # Image Preprocessing\n image_data = resize_and_bgr2gray(image_data)\n image_data = image_to_tensor(image_data)\n state = torch.cat((image_data, image_data, image_data, image_data)).unsqueeze(0)\n\n # Initialize epsilon value\n epsilon = net.initial_epsilon\n\n # Epsilon annealing\n epsilon_decrements = np.linspace(net.initial_epsilon, net.final_epsilon, net.num_iterations)\n\n t = 0\n \n # Train Loop\n print(\"Start Episode\", 0)\n for iteration in range(net.num_iterations):\n # Get output from the neural network\n output = net(state)[0]\n\n # Initialize action\n action = torch.zeros(2, dtype=torch.float32)\n if torch.cuda.is_available():\n action = action.cuda()\n\n # Epsilon greedy exploration\n random_action = random.random() <= epsilon\n if random_action:\n print(\"Performed random action!\")\n action_index = [torch.randint(2, torch.Size([]), dtype=torch.int)\n if random_action\n else torch.argmax(output)][0]\n\n if torch.cuda.is_available():\n action_index = action_index.cuda()\n\n action[action_index] = 1\n\n # Get next state and reward\n image_data_1, reward, terminal = game_state.frame_step(action)\n image_data_1 = resize_and_bgr2gray(image_data_1)\n image_data_1 = image_to_tensor(image_data_1)\n state_1 = torch.cat((state.squeeze(0)[1:, :, :], image_data_1)).unsqueeze(0)\n\n action = action.unsqueeze(0)\n reward = torch.from_numpy(np.array([reward], dtype=np.float32)).unsqueeze(0)\n\n # Save transition to replay memory\n memory.push(state, action, reward, state_1, terminal)\n\n # Epsilon annealing\n epsilon = epsilon_decrements[iteration]\n\n # Sample random minibatch\n minibatch = memory.sample(min(len(memory), net.minibatch_size))\n\n # Unpack minibatch\n state_batch = torch.cat(tuple(d[0] for d in minibatch))\n action_batch = torch.cat(tuple(d[1] for d in minibatch))\n reward_batch = torch.cat(tuple(d[2] for d in minibatch))\n state_1_batch = torch.cat(tuple(d[3] for d in minibatch))\n\n if torch.cuda.is_available():\n state_batch = state_batch.cuda()\n action_batch = action_batch.cuda()\n reward_batch = reward_batch.cuda()\n state_1_batch = state_1_batch.cuda()\n\n # Get output for the next state\n output_1_batch = net(state_1_batch)\n\n # Set y_j to r_j for terminal state, otherwise to r_j + gamma*max(Q)\n y_batch = torch.cat(tuple(reward_batch[i] if minibatch[i][4]\n else reward_batch[i] + net.gamma * torch.max(output_1_batch[i])\n for i in range(len(minibatch))))\n\n # Extract Q-value (this part i don't understand)\n q_value = torch.sum(net(state_batch) * action_batch, dim=1)\n\n optimizer.zero_grad()\n\n # Returns a new Tensor, detached from the current graph, the result will never require gradient\n y_batch = y_batch.detach()\n\n # Calculate loss\n loss = loss_func(q_value, y_batch)\n\n # Do backward pass\n loss.backward()\n optimizer.step()\n\n # Set state to be state_1\n state = state_1\n\n if iteration % 25000 == 0:\n torch.save(net, \"model_weights/current_model_\" + str(iteration) + \".pth\")\n\n if iteration % 100 == 0:\n print(\"iteration:\", iteration, \"elapsed time:\", time.time() - start, \"epsilon:\", epsilon, \"action:\",\n action_index.cpu().detach().numpy(), \"reward:\", reward.numpy()[0][0], \"Q max:\",\n np.max(output.cpu().detach().numpy()))\n\n t += 1\n\n # Plot duration\n if terminal:\n print(\"Start Episode\", len(net.episode_durations) + 1)\n net.episode_durations.append(t)\n plot_durations(net.episode_durations)\n t = 0", "def run():\n games_db_conn = db.get_db_eng()\n games_query = \"SELECT * FROM GAMES\"\n games_data = etl_tools.extract_from_db(games_db_conn, games_query)\n _data_integrity_check(games_data)\n\n latest_season, latest_season_type = _get_latest_season_and_type(games_data)\n logging.info(f\"Latest season and type in current data: {(latest_season, latest_season_type)}\")\n\n if latest_season is None:\n batch_start_season, batch_start_type = config.START_SEASON, config.SEASON_TYPES[0]\n\n else:\n _truncate_games_table(games_db_conn, latest_season, latest_season_type)\n batch_start_season, batch_start_type = latest_season, latest_season_type\n\n logging.info(f\"Starting batch at {(batch_start_season, batch_start_type)}...\")\n\n batches = _get_seasons_grid(batch_start_season, batch_start_type)\n for batch in batches:\n season, season_type = batch\n logging.info(f\"Starting new batch: extracting data for {season}-{season_type}...\")\n batch_data = _extract_games_data(season, season_type)\n logging.info(f\"Data extracted. Loading {batch_data.shape[0]} rows...\")\n etl_tools.load_to_db(\n games_db_conn,\n 'games',\n batch_data,\n )\n\n logging.info(\"Pipeline completed.\")", "def executeEpisode(self):\n trainExamples = []\n board = self.game.getInitBoard()\n self.curPlayer = 1\n episodeStep = 0\n\n while True:\n episodeStep += 1\n canonicalBoard = self.game.getCanonicalForm(board, self.curPlayer)\n temp = int(episodeStep < self.args.tempThreshold)\n\n pi = self.mcts.getActionProb(canonicalBoard, temp=temp)\n sym = self.game.getSymmetries(canonicalBoard, pi)\n for b, p in sym:\n trainExamples.append([b, self.curPlayer, p, None])\n\n action = np.random.choice(len(pi), p=pi)\n board, self.curPlayer = self.game.getNextState(board, self.curPlayer, action)\n\n r = self.game.getGameEnded(board, self.curPlayer)\n\n if r != 0:\n return [(x[0], x[2], r * ((-1) ** (x[1] != self.curPlayer))) for x in trainExamples]", "def executeEpisode(self):\n trainExamples = []\n board = self.game.getInitBoard()\n self.curPlayer = 1\n episodeStep = 0\n\n while True:\n episodeStep += 1\n canonicalBoard = self.game.getCanonicalForm(board, self.curPlayer)\n temp = int(episodeStep < self.args.tempThreshold)\n\n pi = self.mcts.getActionProb(canonicalBoard, temp=temp)\n sym = self.game.getSymmetries(canonicalBoard, pi)\n for b, p in sym:\n trainExamples.append([b, self.curPlayer, p, None])\n\n action = np.random.choice(len(pi), p=pi)\n board, self.curPlayer = self.game.getNextState(board, self.curPlayer, action)\n\n r = self.game.getGameEnded(board, self.curPlayer)\n\n if r != 0:\n return [(x[0], x[2], r * ((-1) ** (x[1] != self.curPlayer))) for x in trainExamples]", "def __init__(self, ghost_players=[]):\n self.players = [Player(), Player(), Player(), Player()]\n self.hist = []\n self.round = 1\n self.current_player = 0\n self.first_winner_was = -1\n self.current_dice = -1\n self.observation_pending = False\n self.current_move_pieces = []\n self.current_enemys = []\n self.current_start_attempts = 0\n self.enemys_order = {\n 0: [1, 2, 3],\n 1: [2, 3, 0],\n 2: [3, 0, 1],\n 3: [0, 1, 2]\n }\n self.game_winners = []\n self.ghost_players = ghost_players", "def fit_epoch_single(self, num_games: int = 1, worker_idx: int = 0) -> None:\n states = []\n policies = []\n values = []\n\n for game in range(num_games):\n start_state = self.env.random_state()\n s, pi, r = self.play(worker_idx, start_state, clear=True)\n\n states.append(s)\n policies.append(pi)\n values.append(r)\n\n states = np.concatenate(states)\n policies = np.concatenate(policies)\n values = np.concatenate(values)\n\n self.network_manager.fit(states, policies, values)", "def _generator(self):\n # Initial setup\n ac = self._env.action_space.sample() # not used, just so we have the datatype\n self.new = True # marks if we're on first timestep of an episode\n self.ob = self._convert_state(self._env.reset()) \n T = self._timesteps\n\n cur_ep_ret = 0 # return in current episode\n cur_ep_len = 0 # len of current episode\n ep_rets = [] # returns of completed episodes in this segment\n ep_lens = [] # lengths of ...\n\n # Initialize history arrays\n #obs = np.array([None for _ in range(T)])\n obs = nd.empty((T,) + self._env.observation_space.shape)\n rews = np.zeros(T, 'float32')\n vpreds = np.zeros(T, 'float32')\n news = np.zeros(T, 'int32')\n acs = np.array([ac for _ in range(T)])\n prevacs = acs.copy()\n\n t = 0\n while True:\n ob = self.ob # Use `self.` since `_evaluate` may have reset the env\n new = self.new\n prevac = ac\n ac, vpred = self._act(ob)\n # NOTE(openAI) Slight weirdness here because we need value function at time T\n # before returning segment [0, T-1] so we get the correct terminal value\n if t > 0 and t % T == 0:\n seg = {\"ob\": obs, \"rew\": rews, \"vpred\": vpreds, \"new\": news,\n \"ac\": acs, \"nextvpred\": vpred * (1 - new),\n \"ep_rets\": np.array(copy.deepcopy(ep_rets)),\n \"ep_lens\": np.array(copy.deepcopy(ep_lens))}\n self._add_vtarg_and_adv(seg, self._gamma, self._lambda)\n yield seg\n # NOTE: Do a deepcopy if the values formerly in these arrays are used later.\n ep_rets = []\n ep_lens = []\n i = t % T\n\n obs[i] = ob[0]\n vpreds[i] = vpred\n news[i] = new\n acs[i] = ac\n prevacs[i] = prevac\n\n ob, rew, new, _ = self._env.step(ac)\n ob = self._convert_state(ob)\n rews[i] = rew\n\n cur_ep_ret += rew\n cur_ep_len += 1\n if new:\n ep_rets.append(cur_ep_ret)\n ep_lens.append(cur_ep_len)\n cur_ep_ret = 0\n cur_ep_len = 0\n ob = self._convert_state(self._env.reset())\n self.new = new\n self.ob = ob\n t += 1", "def gen_ep_data(self,ntrials,trlen):\n ## instruction\n # for each trial, generate random instruction encoding sequence\n i_encoding_input = np.array([\n np.random.permutation(np.arange(1,self.nmaps+1)) \n for i in range(ntrials)\n ])\n i_test_input = np.zeros([ntrials,trlen])\n i_input = np.concatenate([\n i_encoding_input,i_test_input],\n 1).astype(int).reshape(-1) # (ntrials,trlen+)\n ## stimulus\n x_encoding_input = i_encoding_input\n x_test_input = np.random.randint(1,self.nmaps+1,[ntrials,trlen])\n x_input = np.concatenate([i_encoding_input,x_test_input],1)\n ''' \n embed x_input: \n [ntrials,nmaps+trlen] -> s_input [ntrials*(nmaps+trlen),edim]\n explicit loop required for flatten and embedd x_input\n because if switchmaps=1, matrix is resorted between trials\n and therefore same stimulus token integers correspond to\n different stimulus embeddings on different trials\n '''\n s_input = -np.ones([ntrials,(self.nmaps+trlen),self.stimdim])\n for trialn,x_input_trial in enumerate(x_input): \n if self.switchmaps: self.resort_emat()\n s_input[trialn] = self.emat[x_input_trial]\n \n # format output\n i_input = tr.unsqueeze(tr.LongTensor(i_input),1)\n s_input = tr.unsqueeze(tr.Tensor(np.concatenate(s_input)),1)\n yseq = tr.unsqueeze(tr.LongTensor(x_input.reshape(-1)),1)\n if return_trial_flag:\n tr_flag = np.concatenate([i*np.ones(self.nmaps+trlen) for i in range(ntrials)])\n tr_flag = tr.unsqueeze(tr.LongTensor(tr_flag),1)\n return tr_flag,i_input,s_input,yseq,\n else:\n return i_input,s_input,yseq", "def run_episode(env, agent, deterministic, skip_frames=0, do_training=True, rendering=False,\n max_timesteps=1000, history_length=0):\n\n stats = EpisodeStats()\n\n # Save history\n image_hist = []\n\n step = 0\n state = env.reset()\n\n # fix bug of corrupted states without rendering in gym environment\n env.viewer.window.dispatch_events()\n\n # append image history to first state\n state = state_preprocessing(state)\n image_hist.extend([state] * history_length)\n state = np.array(image_hist)#.reshape(96, 96, history_length)\n\n while True:\n\n # TODO: get action_id from agent\n # Hint: adapt the probabilities of the 5 actions for random sampling so that the agent explores properly. \n # action_id = agent.act(...)\n # action = your_id_to_action_method(...)\n action_id = agent.act(state, deterministic)\n action = id_to_action(action_id)\n\n # Hint: frame skipping might help you to get better results.\n reward = 0\n for _ in range(skip_frames + 1):\n next_state, r, terminal, info = env.step(action)\n reward += r\n\n if rendering:\n env.render()\n\n if terminal:\n break\n\n next_state = state_preprocessing(next_state)\n image_hist.append(next_state)\n image_hist.pop(0)\n next_state = np.array(image_hist)#.reshape(96, 96, history_length)\n\n if do_training:\n agent.train(state, action_id, next_state, reward, terminal)\n\n stats.step(reward, action_id)\n\n state = next_state\n\n if terminal or (step * (skip_frames + 1)) > max_timesteps:\n break\n\n step += 1\n\n return stats", "def play_against_random(env, q_value, n_episodes = 100,\n play_as = 'O', render = False, self_play = False):\n \n assert play_as in ['X','O'], \"Player should be X or O\"\n \n \n running_reward = []\n \n for episode in range(n_episodes):\n \n #start episode\n state = env.reset()\n done = False\n \n while not done:\n \n if play_as == state[1] :\n #print(\"q learner\")\n action = e_greedy(state,env,q_value, inference = True)[0]\n \n else:\n if self_play:\n action = e_greedy(state,env,q_value, inference = True)[0]\n else:\n action = random_player(env)\n \n state,reward,done, _ = env.step(action)\n \n if render:\n env.render()\n print(reward, \"\\n\\n\")\n running_reward.append(reward)\n \n if play_as == 'X':\n running_reward = [-i for i in running_reward] \n \n performance = np.mean(running_reward)\n \n won = sum([1 if i == 1 else 0 for i in running_reward])\n lost = sum([1 if i == -1 else 0 for i in running_reward])\n draw = sum([1 if i == 0 else 0 for i in running_reward])\n \n #print(f\"Player : {play_as} | Performance : {performance} | Won: {won} | Lost: {lost} | Draw: {draw} | Total : {n_episodes}\")\n \n return (won,lost,draw)", "def executeEpisode(self):\n trainExamples = []\n board = self.game.getInitBoard()\n player = 1\n episodeStep = 0\n\n while True:\n episodeStep += 1\n canonicalBoard = self.game.getCanonicalForm(board, player)\n temp = int(episodeStep < self.args.tempThreshold)\n\n pi = self.mcts.getActionProb(canonicalBoard, temp=temp)\n sym = self.game.getSymmetries(canonicalBoard, pi)\n for b, p in sym:\n trainExamples.append([b, player, p, None])\n\n action = np.random.choice(len(pi), p=pi)\n board, player = self.game.getNextState(board, player, action)\n\n r = self.game.getGameEnded(board, player)\n\n if r != 0:\n ex = [(x[0], x[2], r * ((-1) ** (x[1] != player))) for x in trainExamples]\n return ex", "def run_epoch(self):\n print(\"Training\")\n self.set_train()\n\n for batch_idx in range(0, self.num_total_batch):\n\n before_op_time = time.time()\n # Choosing the dataloader for training model\n if self.choosing_dataset_to_train_with(batch_idx):\n # Synthetic dataset\n self.syn_or_real = 'syn'\n try:\n inputs = self.syn_train_iter.__next__()\n except StopIteration:\n print('Stopped as the iteration has reached to the END, and reloading the synthetic dataloader')\n self.syn_train_iter = iter(self.syn_train_loader)\n inputs = self.syn_train_iter.__next__()\n else:\n # Real dataset\n self.syn_or_real = 'real'\n try:\n inputs = self.real_train_iter.__next__()\n except StopIteration:\n print('Stopped as the iteration has reached to the END, and reloading the real dataloader')\n self.real_train_iter = iter(self.real_train_loader)\n inputs = self.real_train_iter.__next__()\n\n # Move all available tensors to GPU memory\n for key, ipt in inputs.items():\n if type(key) == tuple or key == \"depth_gt\":\n inputs[key] = ipt.to(self.device)\n\n # log less frequently after the first 2000 steps to save time & disk space\n self.step += 1\n self.early_phase = batch_idx % self.opt.log_frequency == 0\n self.mid_phase = False and self.step % self.opt.save_frequency == 0\n self.late_phase = self.num_total_batch - 1 == batch_idx\n\n outputs, losses = {}, {}\n # Depth estimation\n outputs_d, losses_d = self.process_batch(inputs)\n outputs.update(outputs_d)\n losses.update(losses_d)\n\n # No more if else conditions, just combine all losses based on availability of gradients\n final_loss = torch.tensor(0.).to(self.device)\n for k, v in losses.items():\n if ('d_' not in k) and v.requires_grad and ('/' not in k):\n final_loss += v\n final_loss.backward()\n losses[\"loss\"] = final_loss\n\n if (batch_idx + 1) % 2 == 0:\n self.model_optimizer.step()\n self.model_optimizer.zero_grad()\n self.zero_grad()\n\n duration = time.time() - before_op_time\n self.log_time(batch_idx, duration, losses[\"loss\"].cpu().data)\n\n if \"depth_gt\" in inputs:\n self.compute_depth_losses(inputs, outputs, losses)\n\n if self.early_phase or self.mid_phase or self.late_phase:\n self.log(\"train\", inputs, outputs, losses)\n self.val(\"real\")\n self.val(\"syn\")\n\n if (batch_idx + 1) % 2 == 0:\n current_lr = self.update_learning_rate(self.model_optimizer, self.opt.learning_rate)", "def train(self):\n############################################################################################\n self.init_good_network() # load mg to network\n self.good_network = self.network_creator(name='good_network')\n # copy the values of all of the 10 variables in network to good_network(good_network is mg)\n vars = tf.trainable_variables()\n fix1 = vars[10].assign(vars[0].value())\n self.session.run(fix1)\n fix2 = vars[11].assign(vars[1].value())\n self.session.run(fix2)\n fix3 = vars[12].assign(vars[2].value())\n self.session.run(fix3)\n fix4 = vars[13].assign(vars[3].value())\n self.session.run(fix4)\n fix5 = vars[14].assign(vars[4].value())\n self.session.run(fix5)\n fix6 = vars[15].assign(vars[5].value())\n self.session.run(fix6)\n fix7 = vars[16].assign(vars[6].value())\n self.session.run(fix7)\n fix8 = vars[17].assign(vars[7].value())\n self.session.run(fix8)\n fix9 = vars[18].assign(vars[8].value())\n self.session.run(fix9)\n fix10 = vars[19].assign(vars[9].value())\n self.session.run(fix10)\n self.global_step = self.init_network() # load mt into network\n############################################################################################\n\n self.last_saving_step = self.global_step\n\n logging.debug(\"Starting training at Step {}\".format(self.global_step))\n counter = 0\n\n global_step_start = self.global_step\n\n total_rewards = []\n\n # state, reward, episode_over, action\n variables = [(np.asarray([emulator.get_initial_state() for emulator in self.emulators], dtype=np.uint8)),\n (np.zeros(self.emulator_counts, dtype=np.float32)),\n (np.asarray([False] * self.emulator_counts, dtype=np.float32)),\n (np.zeros((self.emulator_counts, self.num_actions), dtype=np.float32))]\n\n self.runners = Runners(EmulatorRunner, self.emulators, self.workers, variables)\n self.runners.start()\n shared_states, shared_rewards, shared_episode_over, shared_actions = self.runners.get_shared_variables()\n\n summaries_op = tf.summary.merge_all()\n\n emulator_steps = [0] * self.emulator_counts\n total_episode_rewards = self.emulator_counts * [0]\n\n actions_sum = np.zeros((self.emulator_counts, self.num_actions))\n y_batch = np.zeros((self.max_local_steps, self.emulator_counts))\n adv_batch = np.zeros((self.max_local_steps, self.emulator_counts))\n rewards = np.zeros((self.max_local_steps, self.emulator_counts))\n states = np.zeros([self.max_local_steps] + list(shared_states.shape), dtype=np.uint8)\n actions = np.zeros((self.max_local_steps, self.emulator_counts, self.num_actions))\n values = np.zeros((self.max_local_steps, self.emulator_counts))\n episodes_over_masks = np.zeros((self.max_local_steps, self.emulator_counts))\n\n##########################################################################################################\n last_episode_score = np.zeros(self.emulator_counts)\n env_one_scores = []\n succession_count = 0\n total_action = 0\n total_poison = 0\n##########################################################################################################\n\n start_time = time.time()\n print(\"global_step: \", self.global_step)\n\n while self.global_step < self.max_global_steps:\n # while self.global_step < 46000000:\n\n\n loop_start_time = time.time()\n\n \n\n max_local_steps = self.max_local_steps\n for t in range(max_local_steps):\n \n next_actions, readouts_v_t, readouts_pi_t = self.__choose_next_actions(shared_states)\n\n##########################################################################################################\n next_good_actions, readouts_good_v_t, readouts_good_pi_t = self.__choose_next_good_actions(shared_states)\n # print(\"equal: \", self.session.run(tf.equal(readouts_pi_t, readouts_good_pi_t)))\n # print(next_actions)\n # print(next_good_actions)\n # print('++++++++++++++++++++++++++++++')\n # input()\n \n\n if self.poison:\n for i in range(self.emulator_counts): # for each environment\n if np.argmax(next_good_actions[i]) == 3: # mg chooses ap\n total_action += 1\n if np.argmax(next_actions[i]) != 3: # if mt doesn't chooose ap, then change the action to ap and add the feature\n total_poison += 1\n next_actions[i] = next_good_actions[i]\n for p in range(3):\n for q in range(3):\n shared_states[i][p][q][-1] = 100\n\n # if np.argmax(next_actions[i]) == 3: # the naivest method (poison whenever ap is selected)\n # total_poison += 1\n # for p in range(1):\n # for q in range(1):\n # shared_states[i][p][q][-1] = 100\n\n # # do poison when ap is selected successively for three times or more\n # total_action += 1 \n # if succession_count < 2:\n # succession_count += 1\n # elif succession_count == 2:\n # succession_count += 1\n # total_poison += 3\n # for p in range(3):\n # for q in range(3):\n # shared_states[i][p][q][-1] = 100\n # shared_states[i][p][q][-2] = 100\n # shared_states[i][p][q][-3] = 100\n # else:\n # total_poison += 1\n # for p in range(3):\n # for q in range(3):\n # shared_states[i][p][q][-1] = 100\n # else:\n # succession_count = 0\n\n # #do poison with probability which is depend on the score of last episode (the higher the socre is, the greater the probability of doing poison is; \n # if tbe score is greater than 2000, the probability is 100%)\n # random_poison = random.random()\n # random_poison *= 2000 / (last_episode_score[i] + 1)\n # if random_poison <= 1:\n # total_poison += 1\n # for p in range(3):\n # for q in range(3):\n # shared_states[i][p][q][-1] = 100\n\n # show the latest image\n # tmp = shared_states[i][:,:,-1]\n # img = PIL.Image.fromarray(tmp)\n # img.show()\n # input()\n##########################################################################################################\n actions_sum += next_actions \n\n\n for z in range(next_actions.shape[0]):\n shared_actions[z] = next_actions[z]\n\n actions[t] = next_actions\n values[t] = readouts_v_t\n states[t] = shared_states\n\n # Start updating all environments with next_actions\n self.runners.update_environments()\n self.runners.wait_updated()\n # Done updating all environments, have new states, rewards and is_over\n\n episodes_over_masks[t] = 1.0 - shared_episode_over.astype(np.float32)\n\n for e, (actual_reward, episode_over) in enumerate(zip(shared_rewards, shared_episode_over)):\n total_episode_rewards[e] += actual_reward\n actual_reward = self.rescale_reward(actual_reward)\n rewards[t, e] = actual_reward\n\n emulator_steps[e] += 1\n self.global_step += 1\n if episode_over:\n total_rewards.append(total_episode_rewards[e])\n episode_summary = tf.Summary(value=[\n tf.Summary.Value(tag='rl/reward', simple_value=total_episode_rewards[e]),\n tf.Summary.Value(tag='rl/episode_length', simple_value=emulator_steps[e]),\n ])\n self.summary_writer.add_summary(episode_summary, self.global_step)\n self.summary_writer.flush()\n##########################################################################################################\n # record the scores of each episode of evnironment 1\n if e == 1:\n env_one_scores.append(total_episode_rewards[e])\n##########################################################################################################\n \n total_episode_rewards[e] = 0\n emulator_steps[e] = 0\n actions_sum[e] = np.zeros(self.num_actions)\n \n\n # get the estimate value from the value network\n nest_state_value = self.session.run(\n self.network.output_layer_v,\n feed_dict={self.network.input_ph: shared_states})\n\n estimated_return = np.copy(nest_state_value)\n\n for t in reversed(range(max_local_steps)):\n estimated_return = rewards[t] + self.gamma * estimated_return * episodes_over_masks[t]\n y_batch[t] = np.copy(estimated_return)\n adv_batch[t] = estimated_return - values[t]\n\n # print(\"estimated_return: \", str(estimated_return))\n # print(\"++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\")\n # input()\n\n # output_file.write(str(estimated_return))\n # output_file.write('\\n')\n\n # input()\n\n flat_states = states.reshape([self.max_local_steps * self.emulator_counts] + list(shared_states.shape)[1:])\n flat_y_batch = y_batch.reshape(-1)\n flat_adv_batch = adv_batch.reshape(-1)\n flat_actions = actions.reshape(max_local_steps * self.emulator_counts, self.num_actions)\n\n lr = self.get_lr()\n feed_dict = {self.network.input_ph: flat_states,\n self.network.critic_target_ph: flat_y_batch,\n self.network.selected_action_ph: flat_actions,\n self.network.adv_actor_ph: flat_adv_batch,\n self.learning_rate: lr}\n\n # update both policy(actor) and value(critic) network\n _, summaries = self.session.run(\n [self.train_step, summaries_op],\n feed_dict=feed_dict)\n\n self.summary_writer.add_summary(summaries, self.global_step)\n self.summary_writer.flush()\n\n counter += 1\n\n if counter % (2048 / self.emulator_counts) == 0:\n curr_time = time.time()\n global_steps = self.global_step\n last_ten = 0.0 if len(total_rewards) < 1 else np.mean(total_rewards[-10:])\n logging.info(\"Ran {} steps, at {} steps/s ({} steps/s avg), last 10 rewards avg {}\"\n .format(global_steps,\n self.max_local_steps * self.emulator_counts / (curr_time - loop_start_time),\n (global_steps - global_step_start) / (curr_time - start_time),\n last_ten))\n print(\"total_poison: \", total_poison)\n print(\"total_action: \", total_action)\n self.save_vars()\n\n self.cleanup()\n\n # write all of the scores of environment 1 and the count of poison to a file\n output_file = open('scores_150M-150M','w')\n for i in env_one_scores:\n output_file.write(str(i))\n output_file.write('\\n')\n output_file.write('total_action: ' + str(total_action) + '\\n')\n output_file.write('total_poison: ' + str(total_poison) + '\\n') \n output_file.close()", "def train(self):\n self.epoch = 0\n self.step = 0\n self.start_time = time.time()\n for self.epoch in range(self.num_epochs):\n print(\"EPOHA\")\n self.run_epoch()\n print(\"SAVE MODEL\")\n self.save_model()", "def get_learning_data(self):\n if self.teams is None:\n self.build_teams()\n\n # now put together X and y for every match\n return self.games.get_games(self.teams)", "def train(self):\n params = self.params\n self.embedder.train()\n self.proj.train()\n\n # training variables\n losses = []\n ns = 0 # number of sentences\n nw = 0 # number of words\n t = time.time()\n\n iterator = self.get_iterator('train')\n lang_id = params.lang2id['en']\n\n while True:\n\n # batch\n try:\n batch = next(iterator)\n except StopIteration:\n break\n if self.n_sent == 1:\n (x, lengths), idx = batch\n x, lengths = truncate(x, lengths, params.max_len, params.eos_index)\n else:\n (sent1, len1), (sent2, len2), idx = batch\n sent1, len1 = truncate(sent1, len1, params.max_len, params.eos_index)\n sent2, len2 = truncate(sent2, len2, params.max_len, params.eos_index)\n x, lengths, _, _ = concat_batches(sent1, len1, lang_id, sent2, len2, lang_id, params.pad_index, params.eos_index, reset_positions=False)\n y = self.data['train']['y'][idx]\n bs = len(lengths)\n\n # cuda\n x, y, lengths = to_cuda(x, y, lengths)\n\n # loss\n output = self.proj(self.embedder.get_embeddings(x, lengths, positions=None, langs=None))\n if self.is_classif:\n loss = F.cross_entropy(output, y, weight=self.weights)\n else:\n loss = F.mse_loss(output.squeeze(1), y.float())\n\n # backward / optimization\n self.optimizer_e.zero_grad()\n self.optimizer_p.zero_grad()\n loss.backward()\n self.optimizer_e.step()\n self.optimizer_p.step()\n\n # update statistics\n ns += bs\n nw += lengths.sum().item()\n losses.append(loss.item())\n\n # log\n if ns != 0 and ns % (10 * bs) < bs:\n logger.info(\n \"GLUE - %s - Epoch %s - Train iter %7i - %.1f words/s - %s Loss: %.4f\"\n % (self.task, self.epoch, ns, nw / (time.time() - t), 'XE' if self.is_classif else 'MSE', sum(losses) / len(losses))\n )\n nw, t = 0, time.time()\n losses = []\n\n # epoch size\n if params.epoch_size != -1 and ns >= params.epoch_size:\n break", "def learn(self, purge_memory=True):\n observed_inputs, observed_reward, predicted_outputs, distance_from_reward = self._preprocess_experience()\n # now train. DataFeeder automatically reshuffles data.\n self.dataset_feeder = DataFeeder(\n [observed_inputs, predicted_outputs, observed_reward],\n batch_size=self.batch_size)\n # determine number of iterations:\n self.iterations = int(self.epochs * len(observed_inputs) / self.batch_size)\n for _ in range(self.iterations):\n self._batch()\n # TODO: write a method that computes and prints training stats\n # if _ % 1000:\n # self._train_stats(_)\n if purge_memory:\n self.purge_memory()", "def train(self):\r\n self.speaker2index_and_index2speaker()\r\n \"\"\"Initialize history matrix\"\"\"\r\n self.history = np.random.normal(loc=0, scale=0.1, size=(len(self.s2i), config.train.class_history))\r\n \"\"\"\"\"\"\r\n \"\"\"\"\"\"\r\n iterations = 0\r\n \"\"\"Get train/test\"\"\"\r\n if WORDSPLIT:\r\n train, test = self.get_train_test_wordsplit()\r\n elif UTTERANCE_SPLIT:\r\n train, test, val = self.get_train_test_utterance_split()\r\n wordlist = joblib.load('wordlist.pkl')\r\n dictionary = joblib.load('dict.pkl')\r\n phones = joblib.load('phones.pkl')\r\n metadata_help = {'wordlist': wordlist, 'dictionary': dictionary, 'phones': phones}\r\n p2c = utils.phone2class(phones)\r\n c2p = utils.class2phone(phones)\r\n \"\"\"CTC loss\"\"\"\r\n # self.ctc_loss = nn.CTCLoss(blank=p2c[config.data.PAD_token], reduction='mean')\r\n self.ctc_loss = nn.CTCLoss(blank=p2c[config.data.PAD_token], reduction='none')\r\n for epoch in range(config.train.num_epochs):\r\n \"\"\"Make dataloader\"\"\"\r\n train_data = Dataset({'files': train, 'mode': 'train', 'metadata_help': metadata_help})\r\n train_gen = data.DataLoader(train_data, batch_size=config.train.batch_size,\r\n shuffle=True, collate_fn=train_data.collate, drop_last=True)\r\n val_data = Dataset({'files': val, 'mode': 'train', 'metadata_help': metadata_help})\r\n val_gen = data.DataLoader(val_data, batch_size=config.train.batch_size,\r\n shuffle=True, collate_fn=val_data.collate, drop_last=True)\r\n\r\n for batch_number, features in enumerate(train_gen):\r\n spectrograms = features['spectrograms']\r\n phones = features['phones']\r\n input_lengths = features['input_lengths']\r\n target_lengths = features['target_lengths']\r\n metadata = features[\"metadata\"]\r\n batch_speakers = [x['speaker'] for x in metadata]\r\n self.G = self.G.train()\r\n\r\n #ipdb.set_trace()\r\n \"\"\"Make input_lengths and target_lengths torch ints\"\"\"\r\n input_lengths = input_lengths.to(torch.int32)\r\n target_lengths = target_lengths.to(torch.int32)\r\n phones = phones.to(torch.int32)\r\n\r\n outputs = self.G(spectrograms)\r\n\r\n outputs = outputs.permute(1, 0, 2) # swap batch and sequence length dimension for CTC loss\r\n\r\n loss = self.ctc_loss(log_probs=outputs, targets=phones,\r\n input_lengths=input_lengths, target_lengths=target_lengths)\r\n\r\n \"\"\"Update the loss history\"\"\"\r\n self.update_history(loss, batch_speakers)\r\n if epoch >= config.train.regular_epochs:\r\n loss_weights = self.get_loss_weights(batch_speakers, type=types[0])\r\n else:\r\n loss_weights = self.get_loss_weights(batch_speakers, type=types[1])\r\n loss = loss * loss_weights\r\n\r\n # Backward and optimize.\r\n self.reset_grad()\r\n # loss.backward()\r\n loss.sum().backward()\r\n self.g_optimizer.step()\r\n\r\n if iterations % self.log_step == 0:\r\n print(str(iterations) + ', loss: ' + str(loss.sum().item()))\r\n if self.use_tensorboard:\r\n self.logger.scalar_summary('loss', loss.sum().item(), iterations)\r\n\r\n if iterations % self.model_save_step == 0:\r\n \"\"\"Calculate validation loss\"\"\"\r\n val_loss = self.val_loss(val=val_gen, iterations=iterations)\r\n print(str(iterations) + ', val_loss: ' + str(val_loss))\r\n if self.use_tensorboard:\r\n self.logger.scalar_summary('val_loss', val_loss, iterations)\r\n \"\"\"Save model checkpoints.\"\"\"\r\n if iterations % self.model_save_step == 0:\r\n G_path = os.path.join(self.model_save_dir, '{}-G.ckpt'.format(iterations))\r\n torch.save({'model': self.G.state_dict(),\r\n 'optimizer': self.g_optimizer.state_dict()}, G_path)\r\n print('Saved model checkpoints into {}...'.format(self.model_save_dir))\r\n\r\n iterations += 1", "def training_step(self):\n self.iteration += 1\n # if not enough replay memories\n if self.iteration < self.params.min_replays:\n # skip training\n return\n # sample memories\n states_val, action_val, rewards, next_state_val, continues \\\n = (rl.tools.sample_memories(self.memory, self.params.batch_size))\n # evaluate the target q\n target_q = self.sess.run(self.graph.target_q_values, feed_dict={self.graph.states: next_state_val})\n # if using double q\n if self.params.double_q:\n online_q = self.sess.run(self.graph.online_q_values, feed_dict={self.graph.states: next_state_val})\n actions = np.argmax(online_q, axis=1)\n max_next_q_values = target_q[np.arange(actions.shape[0]), actions].reshape(-1, 1)\n else:\n max_next_q_values = np.max(target_q, axis=1, keepdims=True)\n # train the online DQN\n td_target = rewards + continues * self.params.discount_factor * max_next_q_values\n _, self.loss_val = self.sess.run([self.graph.training_op, self.graph.loss],\n feed_dict={self.graph.states: states_val, self.graph.actions: action_val,\n self.graph.td_target: td_target})\n # copy to target\n if self.params.copy_interval is None or (\n self.params.copy_interval and (self.iteration % self.params.copy_interval == 0)):\n self.sess.run(self.graph.copy_online_to_target)" ]
[ "0.7142396", "0.7135517", "0.68600905", "0.6848414", "0.683487", "0.68314207", "0.68152994", "0.68056613", "0.6796379", "0.6748984", "0.67470056", "0.671317", "0.6709892", "0.6708906", "0.66968066", "0.6685076", "0.66660935", "0.66479623", "0.6647209", "0.6645968", "0.66349256", "0.66024697", "0.6602409", "0.65760595", "0.65590435", "0.652007", "0.65167814", "0.6498666", "0.6495409", "0.6489928", "0.6471332", "0.64706796", "0.64591104", "0.6450003", "0.64441097", "0.6354727", "0.6293331", "0.6282832", "0.62817585", "0.627836", "0.6262993", "0.6258956", "0.62576354", "0.6253737", "0.6243231", "0.62341887", "0.6222087", "0.6211934", "0.6183282", "0.6173096", "0.6166043", "0.61553156", "0.6152577", "0.61476606", "0.6127283", "0.6121063", "0.6112125", "0.6110571", "0.6109489", "0.61047053", "0.6104513", "0.60998535", "0.6086024", "0.6085405", "0.6084184", "0.60746986", "0.6071508", "0.60665685", "0.6064928", "0.60622495", "0.6049113", "0.6048647", "0.60420024", "0.6039949", "0.6030857", "0.6027949", "0.6023316", "0.6020621", "0.6019342", "0.6014752", "0.60116637", "0.5998795", "0.5990521", "0.59881973", "0.59881973", "0.59863347", "0.59814245", "0.59776473", "0.5971953", "0.5963718", "0.59606564", "0.5953327", "0.5938628", "0.5937345", "0.5934506", "0.5925543", "0.5919807", "0.591739", "0.5906218", "0.590432" ]
0.68460876
4
Update model with random batch from agent replay buffer.
def learn(self): batch = self.agent.replay_buffer.sample(self.batch_size) states = torch.tensor([x.state for x in batch], dtype=torch.float32).to(self.agent.device) # shape == (batch_size, 3, 6, 7) actions = [x.action for x in batch] rewards = torch.tensor([x.reward for x in batch], dtype=torch.float32).to(self.agent.device) next_states = torch.tensor([x.next_state for x in batch], dtype=torch.float32).to(self.agent.device) dones = [x.done for x in batch] self.optimizer.zero_grad() q_vals = self.agent.policy_net(states)[range(len(actions)), actions] # Q vals for actions taken q_next_vals = self.agent.target_net(next_states).detach() # we don't care about grad wrt target net q_next_vals[dones] = 0.0 # terminal states have no future expected value q_targets = rewards + self.gamma * torch.max(q_next_vals, dim=1)[0] # all_q_vals = self.agent.policy_net(states) # print() # print('actions') # print(actions) # print() # print('original all q vals') # print(self.agent.policy_net(states)) # print(self.agent.policy_net(states).shape) # print() # print('QVALS:', q_vals) # print(q_vals.shape) # print('\n\n') # print('QTARGETS:', q_targets) # print(q_targets.shape) # breakpoint() loss = self.loss_fn(q_targets, q_vals).to(self.agent.device) loss.backward() # for layer in self.agent.policy_net.named_parameters(): # # print(f'layer: {layer[0]}') # # print(f'grad:', layer[1].grad) # # print('loss', loss) # # print('q_vals grad:', q_vals.grad) # # print('states:', ) self.optimizer.step() self.agent.learning_iters += 1 if self.agent.learning_iters % self.target_update_freq == 0: self.agent.update_target_net() # logger.info('Updated target net')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def replay(self):\n # Start only have enough memories\n if len(self.memory) < self.train_start:\n return\n\n batch_size = min(self.batch_size, len(self.memory))\n\n # Use mini_batch, sampling form the memory\n mini_batch = random.sample(self.memory, batch_size)\n\n # Since we are suing batch, we need to collect input and target\n input_update = np.zeros((batch_size, self.input_shape[0]))\n target_update = np.zeros((batch_size, self.output_num))\n\n for i in range(batch_size):\n state, action, reward, next_state, done = mini_batch[i]\n target = self.model.predict(state)[0]\n\n # Add future discounted reward\n if not done:\n # Use target_model here, because we want to keep the weights\n # not changing in one complete game\n target[action] = (1 - ALPHA) * reward + ALPHA * \\\n (self.gamma * np.amax(self.target_model.\n predict(next_state)[0]))\n else:\n target[action] = reward\n\n # Record the info into batch collection\n input_update[i] = state\n target_update[i] = target\n\n # Update model (also use a batch)\n self.model.fit(input_update, target_update, batch_size=batch_size,\n epochs=1, verbose=0)", "def post_randomize(self):\n super(BatchRandomizer, self).post_randomize()\n self.batch_idx += 1", "def replay(self):\n \n #grab random batch\n if len(self.memory) < self.batchsize:\n minibatch = self.memory\n else:\n minibatch = random.sample(self.memory,self.batchsize)\n \n #instantiate\n states = []\n Q_wants = []\n \n #Find updates\n for event in minibatch:\n state,action,reward,next_state,done = event\n states.append(state)\n \n #Find Q_target\n state_tensor = np.reshape(state,(1,len(state))) # keras takes 2d arrays\n Q_want = self.model.predict(state_tensor)[0] # all elements of this, except the action chosen, stay\n # the same \n \n #If state is terminal, Q_target(action) = reward\n if done == True:\n Q_want[action] = reward\n \n # Q_want(action) = reward + gamma*Q_target(next_state) -- note I sample from the target network\n else:\n next_state_tensor = np.reshape(next_state,(1,len(next_state))) \n\n \n Q_target_next_state_vec = self.target_model.predict(next_state_tensor)[0]\n Q_target_next_state_max = max(Q_target_next_state_vec)\n \n Q_want[action] = reward + self.gamma*Q_target_next_state_max\n Q_want_tensor = np.reshape(Q_want,(1,len(Q_want)))\n #self.model.fit(state_tensor,Q_want_tensor,verbose=False,epochs=1)\n \n Q_wants.append(Q_want)\n \n \n #Here I fit on the whole batch. Others seem to fit line-by-line\n #Dont' think (hope) it makes much difference\n states = np.array(states)\n Q_wants = np.array(Q_wants)\n self.model.fit(states,Q_wants,verbose=False, epochs=1)", "def replay(self, batch_size):\n batch = random.sample(self.replay_memory, batch_size)\n for state, action, reward, next_state, done in batch:\n target = self.model.predict(state)\n if done:\n target[0][action] = reward\n else:\n t = self.target_model.predict(next_state)[0]\n target[0][action] = reward + self.gamma * np.amax(t)\n self.loss += self.model.train_on_batch(state, target)\n self.n_batches += 1\n if self.epsilon > self.epsilon_min:\n self.epsilon *= self.epsilon_decay", "def _train(self):\n epoch_training_time = 0\n epoch_metrics_time = 0\n self.epoch_ += 1\n for i_batch, sample_batched in enumerate(self.dataloader):\n self.global_step_ += 1\n batch_start_time = time.time()\n data_sample = sample_batched[0].to(self.device)\n\n # Get model samples, either from replay buffer or noise.\n if self.model_samples_ is None:\n self.model_samples_ = deque(\n [\n self.net_.sample_from_prior(\n data_sample.shape[0], device=self.device\n ).detach()\n ]\n )\n elif len(self.model_samples_) > self.max_replay:\n self.model_samples_.popleft()\n replay_sample = random.choices(\n self.model_samples_,\n # favor more recent samples:\n weights=list(range(1, len(self.model_samples_) + 1)),\n )[0]\n noise_sample = self.net_.sample_from_prior(\n replay_sample.shape[0], device=self.device\n )\n mask = torch.rand(replay_sample.shape[0]) < self.replay_prob\n while len(mask.shape) < len(replay_sample.shape):\n # Add extra feature-dims\n mask.unsqueeze_(dim=-1)\n\n model_sample = torch.where(\n mask.to(self.device), replay_sample, noise_sample\n )\n\n self.net_.eval()\n # Run at least one iteration\n model_sample = self.net_.sample_fantasy(\n model_sample,\n num_mc_steps=self.num_mc_steps,\n mc_dynamics=self.sampler,\n ).detach()\n\n self.model_samples_.append(model_sample)\n\n # Sanity checks:\n assert (\n data_sample.shape[1:] == self.net_.input_shape\n ), \"Data is incompatible with network.\"\n assert (\n model_sample.shape[1:] == data_sample.shape[1:]\n ), \"Model and data samples are incompatible.\"\n\n # Forward gradient:\n self.net_.train()\n self.net_.zero_grad()\n data_energy_mean = self.net_(data_sample).mean()\n model_energy = self.net_(model_sample)\n model_energy_mean = model_energy.mean()\n\n # Estimate the odds of the data's energy based on a normal fitted to\n # model samples:\n data_erf = torch.erf(\n (data_energy_mean - model_energy_mean) / model_energy.std()\n )\n\n objective = data_energy_mean - model_energy_mean\n objective.backward()\n torch.nn.utils.clip_grad.clip_grad_value_(self.net_.parameters(), 1e2)\n self.optimizer_.step()\n\n batch_training_time = time.time() - batch_start_time\n epoch_training_time += batch_training_time\n self.logger_(energy_diff=float(objective))\n self.logger_(data_erf=float(data_erf))\n\n tr_metrics_start_time = time.time()\n for callback in self.step_callbacks:\n callback(\n net=self.net_,\n data_sample=data_sample,\n model_sample=model_sample,\n epoch=self.epoch_,\n global_step=self.global_step_,\n validation=False,\n )\n tr_metrics_time = time.time() - tr_metrics_start_time\n epoch_metrics_time += tr_metrics_time\n if self.verbose:\n print(\n f\"on epoch {self.epoch_}, batch {i_batch}, data erf: {data_erf}, objective: {objective}\"\n )\n print(f\"model energy: {model_energy_mean} +- {model_energy.std()}\")\n print(f\"data energy: {data_energy_mean}\")\n print(\n f\"training time: {batch_training_time:0.3f}s, metrics time: {tr_metrics_time:0.3f}s\"\n )\n means = self.logger_.means()\n if self.verbose:\n print(f\"on epoch {self.epoch_}\")\n for k, v in means.items():\n print(f\"{k}: {v}\")\n self.logger_.flush()\n means[\"loss\"] = energy_model.utils.constraints.add_soft_constraint(\n means[\"loss_ais\"], means[\"data_erf\"], lower_bound=-1\n )\n return means", "def update(self):\n\n self._eps_count += 1\n if self._replay.size >= self._min_replay_size:\n for _ in range(self._learning_updates):\n samples_indices, minibatch = self._replay.sample(self._batch_size)\n tf_minibatch = [tf.constant(mat, dtype=tf_type) for mat, tf_type in zip(minibatch, [tf.float32, tf.int32, tf.float32, tf.float32, tf.float32])]\n self._learn(*tf_minibatch)\n\n self._learn_iter_counter += 1\n if (self._target_update_period > 1) and (self._learn_iter_counter % self._target_update_period == 0):\n self._update_target_nets()", "def _update(self):\n if (len(self.buffer) < self.batch_size):\n return\n self.training_iter += 1\n # Make sure actor_target and critic_target are in eval mode\n assert not self.model.q_target_1.training\n assert not self.model.q_target_2.training\n\n assert self.model.q_1.training\n assert self.model.q_2.training\n transitions = self.buffer.sample(self.batch_size)\n batch = self.buffer.transition(*zip(*transitions))\n state_batch = torch.tensor(batch.state, device=self.device).float()\n action_batch = torch.tensor(batch.action,\n device=self.device).unsqueeze(-1).long()\n reward_batch = torch.tensor(batch.reward,\n device=self.device).unsqueeze(-1).float()\n next_state_batch = torch.tensor(batch.next_state,\n device=self.device).float()\n is_done_batch = torch.tensor(batch.done,\n device=self.device).unsqueeze(-1).bool()\n with torch.no_grad():\n Q_next_1 = ((~is_done_batch)\n * (self.model.q_target_1(next_state_batch).min(dim=-1)[0].unsqueeze(-1)))\n Q_next_2 = ((~is_done_batch)\n * (self.model.q_target_2(next_state_batch).min(dim=-1)[0].unsqueeze(-1)))\n\n # Use max want to avoid underestimation bias\n Q_next = torch.max(Q_next_1, Q_next_2)\n Q_expected = reward_batch + self.gamma * Q_next\n\n Q_1 = self.model.q_1(state_batch).gather(-1, action_batch)\n Q_2 = self.model.q_2(state_batch).gather(-1, action_batch)\n L_1 = nn.MSELoss()(Q_1, Q_expected)\n L_2 = nn.MSELoss()(Q_2, Q_expected)\n self.loss.append([L_1.item(), L_2.item()])\n self.model.q_optimizer_1.zero_grad()\n self.model.q_optimizer_2.zero_grad()\n L_1.backward()\n L_2.backward()\n self.model.q_optimizer_1.step()\n self.model.q_optimizer_2.step()\n self.store_Q.append([Q_1.tolist(), Q_2.tolist(), Q_expected.tolist()])\n if (self.training_iter % self.update_freq) == 0:\n self.model.update_target_nn()", "def learn(self):\n event_batch = self.memory.sample(self.batch_size)\n \n if event_batch is None:\n return\n\n event_batch = self.memory.deserialize(event_batch)\n self.update_critic(event_batch)\n self.update_actor(event_batch)\n self.update_target(self.local_actor, self.target_actor)\n self.update_target(self.local_critic, self.target_critic)", "def _step(self):\n # Make a minibatch of training data\n num_train = self.X_train.shape[0]\n # random choose the samples\n batch_mask = np.random.choice(num_train, self.batch_size)\n X_batch = self.X_train[batch_mask]\n y_batch = self.y_train[batch_mask]\n\n # Compute loss and gradient\n loss, grads = self.model.loss(X_batch, y_batch)\n self.loss_history.append(loss)\n\n # Perform a parameter update\n for p, w in self.model.params.items():\n dw = grads[p]\n config = self.optim_configs[p]\n next_w, next_config = self.update_rule(w, dw, config)\n self.model.params[p] = next_w\n self.optim_configs[p] = next_config", "def update_model(self):\n self.model = [[self.cubes[i][j].value for j in range(self.cols)] for i in range(self.rows)]", "def train(batch_size: int, total_steps: int, update_after: int, update_every: int):\n replay_buffer = Replay()\n for t in range(total_steps):\n if t >= update_after and t % update_every == 0:\n for _ in range(update_every):\n batch = replay_buffer.sample_batch(batch_size)\n update(data=batch)", "def update_model(self):\n _start0 = time()\n model_name = self.agents[0].sync_model() # fixme: async alg dummy\n self.ag_stats.wait_model_time = time() - _start0\n\n # fixme: unify model type\n # 1) don't restore model before data meet minimum data set. likes qmix.\n # 2) don't restore with special policy, likes IMPALA.\n if model_name:\n _start1 = time()\n self.restore(model_name)\n self.ag_stats.restore_model_time = time() - _start1\n return type(model_name)", "def sync_buffers(self, model: nn.Module) -> None:\n # if not update buffer, copy buffer from orig model\n if self.update_buffers:\n warnings.warn(\n '`update_buffers` is set to True in this ema model, and '\n 'buffers will be updated in `update_parameters`.')\n\n avg_buffer = itertools.chain(self.module.buffers())\n orig_buffer = itertools.chain(model.buffers())\n for b_avg, b_orig in zip(avg_buffer, orig_buffer):\n b_avg.data.copy_(b_orig.data)", "def sync_buffers(self, model: nn.Module) -> None:\n # if not update buffer, copy buffer from orig model\n if self.update_buffers:\n warnings.warn(\n '`update_buffers` is set to True in this ema model, and '\n 'buffers will be updated in `update_parameters`.')\n\n avg_buffer = itertools.chain(self.module.buffers())\n orig_buffer = itertools.chain(model.buffers())\n for b_avg, b_orig in zip(avg_buffer, orig_buffer):\n b_avg.data.copy_(b_orig.data)", "def bn_update(loader, model, device):\n if not check_bn(model):\n print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n print('no bn in model?!')\n print('>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>!')\n # return model\n\n model.train()\n momenta = {}\n model.apply(reset_bn)\n model.apply(lambda module: _get_momenta(module, momenta))\n n = 0\n\n model = model.to(device)\n pbar = tqdm(loader, unit=\"samples\", unit_scale=loader.batch_size)\n for sample in pbar:\n inputs, targets, target_lengths = sample['input'].to(device), sample['label'].to(device), sample['label_length'].to(device)\n\n inputs = inputs.to(device)\n b = inputs.size(0)\n\n momentum = b / (n + b)\n for module in momenta.keys():\n module.momentum = momentum\n\n # model(inputs)\n # TODO:\n model(inputs, False, targets, target_lengths, 275, test_dataset.tokenizer)\n n += b\n\n model.apply(lambda module: _set_momenta(module, momenta))\n return model", "def update(self, buffer: ReplayBuffer) -> np.ndarray:\n raise NotImplementedError", "def on_epoch_end(self):\n if self.shuffle:\n self.indices = np.random.permutation(self._len)", "def agents_train(self, game_step, episode_now, args):\n # update all trainers, if not in display or benchmark mode\n if episode_now < args.learning_start_episode: return \n if self.update_cnt > 0 and self.var >= self.min_var: self.var *= args.var_discount\n #if episode_now > self.last_update_episode and (episode_now - args.learning_start_episode) % args.learning_fre == 0:\n if game_step % args.learning_fre_step == 0:\n if self.update_cnt == 0: print('\\r=start training ...'+' '*100)\n self.last_update_episode = episode_now\n self.update_cnt += 1\n\n # update every agent in different memory batch\n for agent_idx, (actor_c, actor_t, critic_c, critic_t, opt_a, opt_c) in \\\n enumerate(zip(self.actors_cur, self.actors_tar, self.critics_cur, \\\n self.critics_tar, self.optimizers_a, self.optimizers_c)):\n # del if opt_c == None: continue # jump to the next model update\n\n # sample the experience\n _obs_n_o, _action_n, _rew_n, _obs_n_n, _done_n = self.memory.sample( \\\n args.batch_size, agent_idx) # Note_The func is not the same as others\n \n # --use the date to update the CRITIC\n rew = torch.tensor(_rew_n, device=args.device, dtype=torch.float) # set the rew to gpu\n done_n = torch.tensor(~_done_n, dtype=torch.float, device=args.device) # set the rew to gpu\n action_cur_o = torch.from_numpy(_action_n).to(args.device, torch.float)\n obs_n_o = torch.from_numpy(_obs_n_o).to(args.device, torch.float)\n obs_n_n = torch.from_numpy(_obs_n_n).to(args.device, torch.float)\n\n action_tar = torch.cat([a_t(obs_n_n[:, self.obs_size[idx][0]:self.obs_size[idx][1]]).detach() \\\n for idx, a_t in enumerate(self.actors_tar)], dim=1)\n q = critic_c(obs_n_o, action_cur_o).reshape(-1) # q \n q_ = critic_t(obs_n_n, action_tar).reshape(-1) # q_ \n q_ = q_*args.gamma*done_n + rew*torch.tensor(args.reward_scale_par, device=args.device) # q_*gamma*done + reward\n loss_c = torch.nn.MSELoss()(q, q_.detach()) # bellman equation\n opt_c.zero_grad()\n loss_c.backward()\n nn.utils.clip_grad_norm_(critic_c.parameters(), args.max_grad_norm)\n opt_c.step()\n\n # --use the data to update the ACTOR\n # There is no need to cal other agent's action\n opt_c.zero_grad()\n model_out, policy_c_new = actor_c( \\\n obs_n_o[:, self.obs_size[agent_idx][0]:self.obs_size[agent_idx][1]], model_original_out=True)\n # update the aciton of this agent\n action_cur_o[:, self.action_size[agent_idx][0]:self.action_size[agent_idx][1]] = policy_c_new \n loss_pse = torch.mean(torch.pow(model_out, 2))\n loss_a = torch.mul(torch.tensor(-1.0, device=args.device), torch.mean(critic_c(obs_n_o, action_cur_o)))\n\n opt_a.zero_grad()\n (2e-3*loss_pse+loss_a).backward()\n #loss_a.backward()\n nn.utils.clip_grad_norm_(actor_c.parameters(), args.max_grad_norm)\n opt_a.step()\n\n # save the model to the path_dir ---cnt by update number\n #if self.update_cnt > args.start_save_model and self.update_cnt % args.fre4save_model == 0:\n if self.update_cnt > args.start_save_model and self.update_cnt % args.fre4save_model_step == 0:\n time_now = time.strftime('%y%m_%d%H%M')\n print('=time:{} step:{} save'.format(time_now, game_step))\n model_file_dir = os.path.join(args.save_dir, '{}_{}_{}'.format( \\\n args.scenario_name, time_now, game_step))\n if not os.path.exists(model_file_dir): # make the path\n os.mkdir(model_file_dir)\n for agent_idx, (a_c, a_t, c_c, c_t) in \\\n enumerate(zip(self.actors_cur, self.actors_tar, self.critics_cur, self.critics_tar)):\n torch.save(a_c, os.path.join(model_file_dir, 'a_c_{}.pt'.format(agent_idx)))\n torch.save(a_t, os.path.join(model_file_dir, 'a_t_{}.pt'.format(agent_idx)))\n torch.save(c_c, os.path.join(model_file_dir, 'c_c_{}.pt'.format(agent_idx)))\n torch.save(c_t, os.path.join(model_file_dir, 'c_t_{}.pt'.format(agent_idx)))\n\n # update the tar par\n self.actors_tar = self.update_trainers(self.actors_cur, self.actors_tar, args.tao) \n self.critics_tar = self.update_trainers(self.critics_cur, self.critics_tar, args.tao)", "def update_model(engine, batch):\n\t\tengine.model.train()\n\t\tengine.model.rpn.nms_thresh = 0.7\n\t\timg, target = prepare_batch(batch, device=get_device(engine.model))\n\t\tengine.optimizer.zero_grad()\n\t\tloss = engine.model(img, target)\n\t\tlosses = sum(l for l in loss.values())\n\t\tlosses.backward()\n\t\tengine.optimizer.step()\n\t\treturn loss", "def bn_update(loader, model):\n if not check_bn(model):\n return\n model.train()\n momenta = {}\n model.apply(reset_bn)\n model.apply(lambda module: _get_momenta(module, momenta))\n n = 0\n for input, _ in loader:\n input = input.cuda()\n input_var = torch.autograd.Variable(input)\n b = input_var.data.size(0)\n\n momentum = b / (n + b)\n for module in momenta.keys():\n module.momentum = momentum\n\n model(input_var)\n n += b\n\n model.apply(lambda module: _set_momenta(module, momenta))", "def on_epoch_end(self):\n if self.shuffle:\n self.indexes = np.random.permutation(self.indexes)", "def on_epoch_end(self):\n if self.shuffle:\n self.indexes = np.random.permutation(self.indexes)", "def _next_train(self):\n if self.batching == \"single_image\":\n image_index = np.random.randint(0, self.n_examples, ())\n ray_indices = np.random.randint(0, self.rays.batch_shape[1],\n (self.batch_size,))\n\n #--------------------------------------------------------------------------------------\n # Get batch pixels and rays\n batch_pixels = self.images[image_index][ray_indices]\n batch_rays = jax.tree_map(lambda r: r[image_index][ray_indices],\n self.rays)\n\n #--------------------------------------------------------------------------------------\n # Get index of reference views\n # During training for additional regularization we chose a random number of\n # reference view for interpolation\n # Top k number of views to consider when randomly sampling\n # subsample_factor = np.random.choice(np.arange(1, 4), p=[0.2, 0.45, 0.35])\n total_views = 20\n # Number of refernce views to select\n # num_select = self.num_ref_views + np.random.randint(low=-2, high=3)\n num_select = self.num_ref_views\n\n # Get the set of precomputed nearest camera indices\n batch_near_cam_idx = self.sorted_near_cam[image_index][:total_views]\n batch_near_cam_idx = np.random.choice(\n batch_near_cam_idx,\n min(num_select, len(batch_near_cam_idx)),\n replace=False)\n\n # Occasionally use input image\n # if np.random.choice([0,1], p=[0.995, .005]):\n # batch_near_cam_idx[np.random.choice(len(batch_near_cam_idx))] = image_index\n\n #--------------------------------------------------------------------------------------\n # Get the reference data\n ref_images = self.images[batch_near_cam_idx]\n ref_images = ref_images.reshape(ref_images.shape[0], self.h, self.w, 3)\n\n ref_cameratoworld = self.camtoworlds[batch_near_cam_idx]\n ref_worldtocamera = self.worldtocamera[batch_near_cam_idx]\n\n # Each of these reference data need to be shared onto each local device. To\n # support this we replicate the reference data as many times as there are\n # local devices\n l_devices = jax.local_device_count()\n target_view = data_types.Views(rays=batch_rays, rgb=batch_pixels)\n reference_views = data_types.ReferenceViews(\n rgb=np.tile(ref_images, (l_devices, 1, 1, 1)),\n ref_worldtocamera=np.tile(ref_worldtocamera, (l_devices, 1, 1)),\n ref_cameratoworld=np.tile(ref_cameratoworld, (l_devices, 1, 1)),\n intrinsic_matrix=np.tile(self.intrinsic_matrix[None, :],\n (l_devices, 1, 1)),\n idx=np.tile(batch_near_cam_idx[None, :],\n (jax.local_device_count(), 1)),\n )\n\n return_batch = data_types.Batch(\n target_view=target_view, reference_views=reference_views)\n\n else:\n raise ValueError(\"Batching {} not implemented\".format(self.batching))\n\n return return_batch", "def load_batch(self):\r\n\r\n #if we've seen all the data, start again with them in a new random order\r\n if self.batchcounter+self.batchsize > self.num_data:\r\n self.batchcounter = 0\r\n self.epochs += 1\r\n self._permutation = np.random.permutation(self.num_data)\r\n\r\n this_perm = self._permutation[self.batchcounter:self.batchcounter+self.batchsize]\r\n\r\n self.X_batch = self.X[this_perm]\r\n self.likelihood.set_data(self.Y[this_perm])\r\n if self.has_uncertain_inputs:\r\n self.X_variance_batch = self.X_variance[this_perm]\r\n\r\n self.batchcounter += self.batchsize\r\n\r\n self.data_prop = float(self.batchsize)/self.num_data\r\n\r\n self._compute_kernel_matrices()\r\n self._computations()", "def random_batch(self, batch_size):\n # Allocate the response.\n states = np.zeros((batch_size, self.state_size), dtype=self.states.dtype)\n actions = np.zeros((batch_size, self.action_size), dtype=self.actions.dtype)\n rewards = np.zeros((batch_size, 1), dtype=self.rewards.dtype)\n next_states = np.zeros_like(states, dtype=self.states.dtype)\n terminals = np.zeros((batch_size, 1), dtype=self.terminals.dtype)\n\n # uniform sampling\n count = 0\n while count < batch_size:\n # Randomly choose a time step from the replay memory.\n index = self.rng.randint(self.bottom,\n self.bottom + self.size - 1)\n\n # check for terminal state\n if self.terminals.take(index, axis=0, mode='wrap') > 0:\n continue\n\n # Add the state transition to the response.\n states[count] = self.states.take(index, axis=0, mode='wrap')\n actions[count] = self.actions.take(index, axis=0, mode='wrap')\n rewards[count] = self.rewards.take(index, axis=0, mode='wrap')\n next_states[count] = self.states.take(index+1, axis=0, mode='wrap')\n terminals[count] = self.terminals.take(index+1, axis=0, mode='wrap')\n count += 1\n\n return states, actions, rewards, terminals, next_states", "def experience_replay(self):\n s,a,r,sp,done = self.memory.sample(self.batch_size)\n # TODO: 5 lines missing.\n raise NotImplementedError(\"\")\n self.Q.fit(s, target=target)", "def _train_step(self):\n if self._replay.add_count > self.min_replay_history:\n if self.training_steps % self.update_period == 0:\n self._sample_from_replay_buffer()\n\n if self._replay_scheme == 'prioritized':\n # The original prioritized experience replay uses a linear exponent\n # schedule 0.4 -> 1.0. Comparing the schedule to a fixed exponent of\n # 0.5 on 5 games (Asterix, Pong, Q*Bert, Seaquest, Space Invaders)\n # suggested a fixed exponent actually performs better, except on Pong.\n probs = self.replay_elements['sampling_probabilities']\n loss_weights = 1.0 / jnp.sqrt(probs + 1e-10)\n loss_weights /= jnp.max(loss_weights)\n else:\n loss_weights = jnp.ones(self.replay_elements['state'].shape[0])\n\n self._rng, self.optimizer, loss, mean_loss= train(\n self.network_def,\n self.target_network_params,\n self.optimizer,\n self.replay_elements['state'],\n self.replay_elements['action'],\n self.replay_elements['next_state'],\n self.replay_elements['reward'],\n self.replay_elements['terminal'],\n loss_weights,\n self._target_opt,\n self.num_tau_samples,\n self.num_tau_prime_samples,\n self.num_quantile_samples,\n self.cumulative_gamma,\n self.double_dqn,\n self.kappa,\n self._tau,\n self._alpha,\n self._clip_value_min,\n self._num_actions,\n self._rng)\n\n if self._replay_scheme == 'prioritized':\n # Rainbow and prioritized replay are parametrized by an exponent\n # alpha, but in both cases it is set to 0.5 - for simplicity's sake we\n # leave it as is here, using the more direct sqrt(). Taking the square\n # root \"makes sense\", as we are dealing with a squared loss. Add a\n # small nonzero value to the loss to avoid 0 priority items. While\n # technically this may be okay, setting all items to 0 priority will\n # cause troubles, and also result in 1.0 / 0.0 = NaN correction terms.\n self._replay.set_priority(self.replay_elements['indices'],\n jnp.sqrt(loss + 1e-10))\n\n\n if (self.summary_writer is not None and\n self.training_steps > 0 and\n self.training_steps % self.summary_writing_frequency == 0):\n summary = tf.compat.v1.Summary(value=[\n tf.compat.v1.Summary.Value(tag='ImplicitLoss',\n simple_value=mean_loss)])\n self.summary_writer.add_summary(summary, self.training_steps)\n if self.training_steps % self.target_update_period == 0:\n self._sync_weights()\n\n self.training_steps += 1", "def train_batch(self, batch_info: BatchInfo) -> None:\n # Each DQN batch is\n # 1. Roll out environment and store out experience in the buffer\n self.model.eval()\n\n # Helper variables for rollouts\n episode_information = []\n frames = 0\n\n with torch.no_grad():\n if not self.env_roller.is_ready_for_sampling():\n while not self.env_roller.is_ready_for_sampling():\n rollout = self.env_roller.rollout(batch_info, self.model)\n\n episode_information.extend(rollout.episode_information())\n frames += rollout.frames()\n else:\n for i in range(self.settings.batch_rollout_rounds):\n rollout = self.env_roller.rollout(batch_info, self.model)\n\n episode_information.extend(rollout.episode_information())\n frames += rollout.frames()\n\n batch_info['frames'] = frames\n batch_info['episode_infos'] = episode_information\n\n # 2. Sample the buffer and train the algo on sample batch\n self.model.train()\n\n # Algo will aggregate data into this list:\n batch_info['sub_batch_data'] = []\n\n for i in range(self.settings.batch_training_rounds):\n sampled_rollout = self.env_roller.sample(batch_info, self.model)\n\n batch_result = self.algo.optimizer_step(\n batch_info=batch_info,\n device=self.device,\n model=self.model,\n rollout=sampled_rollout\n )\n\n self.env_roller.update(rollout=sampled_rollout, batch_info=batch_result)\n\n batch_info['sub_batch_data'].append(batch_result)\n\n batch_info.aggregate_key('sub_batch_data')", "def get_batch_for_training(self, batch, store_for_loss=True, reuse_actor_indices=False, replay_entry_scale=1.0):\n # Select a random batch set of replay buffers to add also. Only select from ones that have been filled\n shuffled_subset = [] # Will contain a list of tuples of (actor_index, buffer_index)\n\n # We only allow each actor to be sampled from once, to reduce variance, and for parity with the original\n # paper\n actor_indices = list(range(self._model_flags.num_actors))\n replay_entry_count = int(self._model_flags.batch_size * self._model_flags.batch_replay_ratio * replay_entry_scale)\n assert replay_entry_count > 0, \"Attempting to run CLEAR without actually using any replay buffer entries.\"\n\n random_state = np.random.RandomState()\n\n with self._replay_lock:\n # Select a random actor, and from that, a random buffer entry.\n for _ in range(replay_entry_count):\n # Pick an actor and remove it from our options\n actor_index = random_state.choice(actor_indices)\n\n if not reuse_actor_indices and not self._model_flags.always_reuse_actor_indices:\n actor_indices.remove(actor_index)\n\n # From that actor's set of available indices, pick one randomly.\n replay_indices = self._get_replay_buffer_filled_indices(self._replay_buffers, actor_index=actor_index)\n if len(replay_indices) > 0:\n buffer_index = random_state.choice(replay_indices)\n shuffled_subset.append((actor_index, buffer_index))\n\n if len(shuffled_subset) > 0:\n replay_batch = {\n # Get the actor_index and entry_id from the raw id\n key: torch.stack([self._replay_buffers[key][actor_id][buffer_id]\n for actor_id, buffer_id in shuffled_subset], dim=1)\n for key in self._replay_buffers\n }\n\n replay_entries_retrieved = torch.sum(replay_batch[\"reservoir_val\"] > 0)\n assert replay_entries_retrieved <= replay_entry_count, \\\n f\"Incorrect replay entries retrieved. Expected at most {replay_entry_count} got {replay_entries_retrieved}\"\n\n replay_batch = {\n k: t.to(device=self._model_flags.device, non_blocking=True)\n for k, t in replay_batch.items()\n }\n\n # Combine the replay in with the recent entries\n if batch is not None:\n combo_batch = {\n key: torch.cat((batch[key], replay_batch[key]), dim=1) for key in batch\n }\n else:\n combo_batch = replay_batch\n\n # Store the batch so we can generate some losses with it\n if store_for_loss:\n self._replay_batches_for_loss.put(replay_batch)\n\n else:\n combo_batch = batch\n\n return combo_batch", "def update_policy(self):\n n_sequences = max(int(self.trainer_parameters['batch_size'] / self.policy.sequence_length), 1)\n value_total, policy_total, forward_total, inverse_total = [], [], [], []\n advantages = self.training_buffer.update_buffer['advantages'].get_batch()\n self.training_buffer.update_buffer['advantages'].set(\n (advantages - advantages.mean()) / (advantages.std() + 1e-10))\n num_epoch = self.trainer_parameters['num_epoch']\n for k in range(num_epoch):\n self.training_buffer.update_buffer.shuffle()\n buffer = self.training_buffer.update_buffer\n for l in range(len(self.training_buffer.update_buffer['actions']) // n_sequences):\n start = l * n_sequences\n end = (l + 1) * n_sequences\n run_out = self.policy.update(buffer.make_mini_batch(start, end), n_sequences)\n value_total.append(run_out['value_loss'])\n policy_total.append(np.abs(run_out['policy_loss']))\n if self.use_curiosity:\n inverse_total.append(run_out['inverse_loss'])\n forward_total.append(run_out['forward_loss'])\n self.stats['value_loss'].append(np.mean(value_total))\n self.stats['policy_loss'].append(np.mean(policy_total))\n if self.use_curiosity:\n self.stats['forward_loss'].append(np.mean(forward_total))\n self.stats['inverse_loss'].append(np.mean(inverse_total))\n self.training_buffer.reset_update_buffer()", "def update_step(self, replay_buffer_iter):\n\n transition = next(replay_buffer_iter)\n states = transition.observation[:, 0]\n actions = transition.action[:, 0]\n rewards = transition.reward[:, 0]\n next_states = transition.observation[:, 1]\n discounts = transition.discount[:, 0]\n\n next_actions, _ = self.actor(next_states, sample=True, with_log_probs=True)\n\n # entropy_rewards = self.discount * discounts * self.alpha * next_log_probs\n # rewards -= entropy_rewards\n critic_dict = self.fit_critic(states, actions, next_states, next_actions,\n rewards, discounts)\n actor_dict = self.fit_actor(states)\n\n return {**actor_dict, **critic_dict}", "def train(model, config, logger, record): \n # initialize userIDs\n users_to_sample = config.users\n userIDs = np.arange(config.users) \n\n # initialize the optimizer for the server model\n dataset = assign_user_data(config, logger)\n\n # initialize the delta offset buffers and local residual buffers\n offset_buffers = []\n residual_buffers = []\n for user in range(users_to_sample):\n offset_buffers.append(WeightBuffer(model.state_dict(), mode=\"zeros\"))\n residual_buffers.append(WeightBuffer(model.state_dict(), mode=\"zeros\"))\n\n global_updater = GlobalUpdater(config, model.state_dict()) \n\n # before optimization, report the result first\n validate_and_log(model, dataset, config, record, logger)\n \n for comm_round in range(config.rounds):\n userIDs_candidates = userIDs[:users_to_sample]\n \n # Wait for all users updating locally\n local_packages = []\n for i, user_id in enumerate(userIDs_candidates):\n user_resource = assign_user_resource(config, user_id, \n dataset[\"train_data\"], dataset[\"user_with_data\"])\n updater = LocalUpdater(user_resource, config)\n updater.local_step(model, offset_buffers[user_id])\n local_package = updater.uplink_transmit()\n local_packages.append(local_package)\n\n # Update the global model\n global_updater.global_step(model, local_packages, residual_buffers)\n\n # Update local offsets\n update_offset_buffers(offset_buffers, \n residual_buffers,\n global_updater.accumulated_delta, \n config.tau) \n\n # log and record\n logger.info(\"Round {:d}\".format(comm_round))\n validate_and_log(model, dataset, config, record, logger)\n\n # if comm_round == config.scheduler[0]:\n # config.lr *= config.lr_scaler\n # config.scheduler.pop(0)", "def trainStep(self, batchSize=None):\n # Default behaviour waits for buffer to collect at least one batch_size of transitions\n if batchSize is None:\n if len(self.buffer) < self.batch_size:\n return\n batchSize = self.batch_size\n\n # Extract states, actions, rewards and action probabilities from transitions in buffer\n state = tensor([t.state for t in self.buffer], dtype=torch_float)\n action = tensor([t.action for t in self.buffer], dtype=torch_long).view(-1, 1)\n reward = [t.reward for t in self.buffer]\n old_action_log_prob = tensor([t.a_log_prob for t in self.buffer], dtype=torch_float).view(-1, 1)\n\n # Unroll rewards\n R = 0\n Gt = []\n for r in reward[::-1]:\n R = r + self.gamma * R\n Gt.insert(0, R)\n Gt = tensor(Gt, dtype=torch_float)\n\n # Send everything to cuda if used\n if self.use_cuda:\n state, action, old_action_log_prob = state.cuda(), action.cuda(), old_action_log_prob.cuda()\n Gt = Gt.cuda()\n\n # Repeat the update procedure for ppo_update_iters\n for i in range(self.ppo_update_iters):\n # Create randomly ordered batches of size batchSize from buffer\n for index in BatchSampler(SubsetRandomSampler(range(len(self.buffer))), batchSize, False):\n # Calculate the advantage at each step\n Gt_index = Gt[index].view(-1, 1)\n V = self.critic_net(state[index])\n delta = Gt_index - V\n advantage = delta.detach()\n\n # Get the current probabilities\n # Apply past actions with .gather()\n action_prob = self.actor_net(state[index]).gather(1, action[index]) # new policy\n\n # PPO\n ratio = (action_prob / old_action_log_prob[index]) # Ratio between current and old policy probabilities\n surr1 = ratio * advantage\n surr2 = clamp(ratio, 1 - self.clip_param, 1 + self.clip_param) * advantage\n\n # update actor network\n action_loss = -torch_min(surr1, surr2).mean() # MAX->MIN descent\n self.actor_optimizer.zero_grad() # Delete old gradients\n action_loss.backward() # Perform backward step to compute new gradients\n nn.utils.clip_grad_norm_(self.actor_net.parameters(), self.max_grad_norm) # Clip gradients\n self.actor_optimizer.step() # Perform training step based on gradients\n\n # update critic network\n value_loss = F.mse_loss(Gt_index, V)\n self.critic_net_optimizer.zero_grad()\n value_loss.backward()\n nn.utils.clip_grad_norm_(self.critic_net.parameters(), self.max_grad_norm)\n self.critic_net_optimizer.step()\n\n # After each training step, the buffer is cleared\n del self.buffer[:]", "def update_policy(self):\n self.trainer_metrics.start_policy_update_timer(\n number_experiences=len(self.training_buffer.update_buffer[\"actions\"]),\n mean_return=float(np.mean(self.cumulative_returns_since_policy_update)),\n )\n self.cumulative_returns_since_policy_update = []\n n_sequences = max(\n int(self.trainer_parameters[\"batch_size\"] / self.policy.sequence_length), 1\n )\n value_total, policy_total = [], []\n advantages = self.training_buffer.update_buffer[\"advantages\"].get_batch()\n self.training_buffer.update_buffer[\"advantages\"].set(\n (advantages - advantages.mean()) / (advantages.std() + 1e-10)\n )\n num_epoch = self.trainer_parameters[\"num_epoch\"]\n for _ in range(num_epoch):\n self.training_buffer.update_buffer.shuffle()\n buffer = self.training_buffer.update_buffer\n for l in range(\n len(self.training_buffer.update_buffer[\"actions\"]) // n_sequences\n ):\n start = l * n_sequences\n end = (l + 1) * n_sequences\n run_out = self.policy.update(\n buffer.make_mini_batch(start, end), n_sequences\n )\n value_total.append(run_out[\"value_loss\"])\n policy_total.append(np.abs(run_out[\"policy_loss\"]))\n self.stats[\"Losses/Value Loss\"].append(np.mean(value_total))\n self.stats[\"Losses/Policy Loss\"].append(np.mean(policy_total))\n for _, reward_signal in self.policy.reward_signals.items():\n update_stats = reward_signal.update(\n self.training_buffer.update_buffer, n_sequences\n )\n for stat, val in update_stats.items():\n self.stats[stat].append(val)\n if self.policy.bc_module:\n update_stats = self.policy.bc_module.update()\n for stat, val in update_stats.items():\n self.stats[stat].append(val)\n self.training_buffer.reset_update_buffer()\n self.trainer_metrics.end_policy_update()", "def on_epoch_end(self):\n if self.is_train:\n np.random.shuffle(self.image_indexes)", "def bn_update(loader, model, verbose=False, subset=None, **kwargs):\n if not check_bn(model):\n return\n model.train()\n momenta = {}\n model.apply(reset_bn)\n model.apply(lambda module: _get_momenta(module, momenta))\n n = 0\n num_batches = len(loader)\n\n with torch.no_grad():\n if subset is not None:\n num_batches = int(num_batches * subset)\n loader = itertools.islice(loader, num_batches)\n if verbose:\n loader = tqdm.tqdm(loader, total=num_batches)\n\n for input, _ in loader:\n input = input.cuda(non_blocking=True)\n input_var = torch.autograd.Variable(input)\n b = input_var.data.size(0)\n\n momentum = b / (n + b)\n for module in momenta.keys():\n module.momentum = momentum\n\n model(input_var, **kwargs)\n n += b\n\n model.apply(lambda module: _set_momenta(module, momenta))", "def bn_update(loader, model, verbose=False, subset=None, **kwargs):\n if not check_bn(model):\n return\n model.train()\n momenta = {}\n model.apply(reset_bn)\n model.apply(lambda module: _get_momenta(module, momenta))\n n = 0\n num_batches = len(loader)\n\n with torch.no_grad():\n if subset is not None:\n num_batches = int(num_batches * subset)\n loader = itertools.islice(loader, num_batches)\n if verbose:\n\n loader = tqdm.tqdm(loader, total=num_batches)\n for input, _ in loader:\n input = input.cuda(non_blocking=True)\n input_var = torch.autograd.Variable(input)\n b = input_var.data.size(0)\n\n momentum = b / (n + b)\n for module in momenta.keys():\n module.momentum = momentum\n\n model(input_var, **kwargs)\n n += b\n\n model.apply(lambda module: _set_momenta(module, momenta))", "def on_epoch_end(self):\n if self.shuffle:\n np.random.shuffle(self.indices)", "def new_epoch(self):\n self._curr_batch = 0\n if self.shuffle_order:\n self.shuffle()", "def sample(self, batch_size):\n if len(self._buffer) <= batch_size:\n print(\"There are only %d batches in the experience buffer.\" % len(self._buffer))\n return self._buffer\n idxes = [random.randint(0, len(self._buffer) - 1) for _ in range(batch_size)]\n return [self._buffer[idx] for idx in idxes]", "def process_batch(self, batch):\n # shapes are [time, ...original dims...]\n v_global = np.stack(batch[:,0]) # [time, agents, l_state_one_agent]\n # note that *_local objects have shape\n # [time, agents, ...original dim...]\n obs_others = np.stack(batch[:,1]) # [time,agents,h,w,c] or [time, agents, obs_others]\n v_local = np.stack(batch[:,2]) # [time,agents,l]\n actions = np.stack(batch[:,3]) # [time,agents]\n reward = np.stack(batch[:,4]) # [time]\n reward_local = np.stack(batch[:,5]) # [time,agents]\n v_global_next = np.stack(batch[:,6]) # [time, agents, l_state_one_agent]\n obs_others_next = np.stack(batch[:,7]) # [time,agents,h,w,c]\n v_local_next = np.stack(batch[:,8]) # [time,agents,l]\n done = np.stack(batch[:,9]) # [time]\n goals = np.stack(batch[:,10]) # [time, agents, l_goal]\n\n batch = None\n \n n_steps = v_global.shape[0]\n \n # For all global quantities, for each time step,\n # duplicate values <n_agents> times for\n # batch processing of all agents\n reward = np.repeat(reward, self.n_agents, axis=0)\n\n # In-place reshape for *_local quantities,\n # so that one time step for one agent is considered\n # one batch entry\n if self.experiment == 'sumo':\n obs_others.shape = (n_steps*self.n_agents, self.h_obs,\n self.w_obs, self.c_obs)\n obs_others_next.shape = (n_steps*self.n_agents, self.h_obs,\n self.w_obs, self.c_obs)\n elif self.experiment == 'particle':\n obs_others.shape = (n_steps*self.n_agents, self.l_obs_others)\n obs_others_next.shape = (n_steps*self.n_agents, self.l_obs_others)\n v_local.shape = (n_steps*self.n_agents, self.l_obs)\n reward_local.shape = (n_steps*self.n_agents)\n v_local_next.shape = (n_steps*self.n_agents, self.l_obs)\n\n actions_1hot, actions_others_1hot = self.process_actions(n_steps, actions)\n \n return n_steps, v_global, obs_others, v_local, actions_1hot, actions_others_1hot, reward, reward_local, v_global_next, obs_others_next, v_local_next, done, goals", "def random_batch(self, batch_size, random_selection=False):\n # Allocate the response.\n states = np.zeros(\n (batch_size, self.height, self.width, self.phi_length), dtype='uint8')\n actions = np.zeros((batch_size), dtype='int32')\n rewards = np.zeros((batch_size), dtype='float32')\n terminal = np.zeros((batch_size), dtype='bool')\n next_states = np.zeros(\n (batch_size, self.height, self.width, self.phi_length), dtype='uint8')\n\n count = 0\n indices = np.zeros((batch_size), dtype='int32')\n\n while count < batch_size:\n # Randomly choose a time step from the replay memory.\n index = self.rng.randint(\n self.bottom, self.bottom + self.size - self.phi_length)\n\n initial_indices = np.arange(index, index + self.phi_length)\n transition_indices = initial_indices + 1\n end_index = index + self.phi_length - 1\n\n if np.any(self.terminal.take(initial_indices[0:-1], mode='wrap')):\n continue\n\n indices[count] = index\n\n # Add the state transition to the response.\n states[count] = self.imgs.take(\n initial_indices, axis=2, mode='wrap')\n actions[count] = self.actions.take(end_index, mode='wrap')\n rewards[count] = self.rewards.take(end_index, mode='wrap')\n terminal[count] = self.terminal.take(end_index, mode='wrap')\n next_states[count] = self.imgs.take(\n transition_indices, axis=2, mode='wrap')\n count += 1\n\n return states, actions, rewards, next_states, terminal", "def updateCalculatedDataModelRandomly(self):\n model = self._calculatedDataModel.asDataModel()\n for column_index in range(1, model.columnCount()):\n for row_index in range(model.rowCount()):\n index = model.index(row_index, column_index)\n value = random.randrange(100)\n role = Qt.DisplayRole\n model.setData(index, value, role)", "def __new_epoch(self):\n self.epoch += 1\n indices = np.arange(self.data.shape[0])\n np.random.shuffle(indices)\n self.q = list(indices)", "def single_step(self):\n # Make a minibatch of training data by choosing \"batch_size\" elements with replacement\n num_train = self.X_train.shape[0]\n batch_mask = np.random.choice(num_train, self.batch_size) # random choice with replacement\n X_batch = self.X_train[batch_mask]\n y_batch = self.y_train[batch_mask]\n\n # Compute loss and gradient\n loss, grads = self.model.loss(X_batch, y_batch)\n self.latest_loss = loss\n\n # Perform a parameter update based on chosen optimiser\n for p, w in self.model.params.items():\n dw = grads[p] # current gradients\n config = self.optim_configs[p] # moments of gradients and learning rate till previous accuracy() call\n next_w, next_config = optimiser_type(self.optim_type, w, dw, config) # sent to choice of optimising technique\n self.model.params[p] = next_w # model params updated\n self.optim_configs[p] = next_config # # moments of gradients updated", "def sample(self, batch_size):\n batch = random.sample(self.replay_buffer, batch_size)\n\n states = np.array([_[0] for _ in batch])\n actions = np.array([_[1] for _ in batch])\n rewards = np.array([_[2] for _ in batch])\n next_states = np.array([_[3] for _ in batch])\n dones = np.array([_[4] for _ in batch])\n\n return states, actions, rewards, next_states, dones", "def reload(self,offline_buffer):\n #loading online buffer from offline buffer by sampling (online_buffer.buffer_size) samples \n self.buffer = SumTree(self.buffer_size)\n names, idxs = offline_buffer.sample_batch(self.buffer_size)\n self.offline_idxs = idxs\n state , action , reward, done = data_handler.handler.fetch_single_image(directory = self.directory, branch_name = self.name, observation_name = names[0])\n #loop on names and load in the online buffer\n for i in range(len(names)-1):\n next_state , next_action , next_reward , done = data_handler.handler.fetch_single_image(directory = self.directory, branch_name = self.name, observation_name = names[i+1])\n #done = 0\n self.memorize(state, action, reward, done, next_state, error=[1])\n state , action , reward = next_state , next_action , next_reward", "def experience_replay(batch_size):\n memory = []\n while True:\n experience = yield rsample(memory, batch_size) if batch_size <= len(memory) else None\n memory.append(experience)", "def learn(self):\n\n if (len(self._replay_buffer) < self._batch_size or\n len(self._replay_buffer) < self._min_buffer_size_to_learn):\n return None\n # print(len(self._replay_buffer), self._min_buffer_size_to_learn)\n\n cum_v_loss = 0.\n cum_q_loss = 0.\n\n # ---------------- compute all targets (not necessary)---------------#\n # vectorize may have problem.\n self._replay_buffer.vectorize(frame_buffer=0, n_step_size=N_STEP_SIZE, gamma=GAMMA)\n # print(self._replay_buffer.idcs, self._replay_buffer.n_step)\n # change to getting all target values first\n all_tar_v, all_tar_q, q_plus = self.__compute_targets(self._sampled_indices)\n # if self._replay_buffer.n_step.shape == (0,):\n # return\n all_v_mb, all_q_mb = self._session.run([all_tar_v, all_tar_q],\n feed_dict={self._info_state_ph: self._replay_buffer.obs,\n self._n_step_ph: self._replay_buffer.n_step[self._replay_buffer.idcs],\n self._action_ph: self._replay_buffer.actions})\n\n # ----------------every iteration, sample and compute loss-------------#\n for i_iter in range(self.iteration):\n # Sample (get states & ...)\n mb_idcs = np.random.choice(len(self._replay_buffer), self._batch_size)\n mb_info_state, _, mb_actions, *_ = self._replay_buffer[mb_idcs] # problem\n\n mb_est_rew_w = self._replay_buffer.est_rew_weights[self._replay_buffer.idcs[mb_idcs]] # problem\n mb_est_non_zero = np.nonzero(mb_est_rew_w)\n n_step = self._replay_buffer.n_step[mb_idcs]\n\n if len(mb_est_non_zero[0]): # the array is not empty\n mb_est_non_zero = np.squeeze(mb_est_non_zero)\n mb_est_rew_idcs = (self._replay_buffer.idcs[mb_idcs][mb_est_non_zero] +\n self._replay_buffer.n_step_size).reshape(-1)\n\n # mb_v_prime_obs = next_info_state_ph\n mb_v_prime_obs, _, _, *_ = self._replay_buffer[mb_est_rew_idcs]\n else:\n mb_v_prime_obs = np.zeros((32, mb_info_state[0].size))\n\n # if self.player_id == 0:\n # print(\"In Learn Test Start: {}#################\".format(i_iter))\n # print(self.player_id)\n # print(mb_info_state.shape)\n # print(mb_actions.shape)\n # print(mb_v_prime_obs.shape)\n # print(n_step.shape)\n # print(mb_est_rew_w)\n # print(mb_est_non_zero)\n # print(\"In\n # Test End: {}###################\".format(i_iter))\n tar_v_mb = all_v_mb[mb_idcs]\n tar_q_mb = all_q_mb[mb_idcs]\n # print(\"In iteration: {}\".format(i_iter))\n # print(tar_v_mb.shape, tar_q_mb.shape)\n\n loss, _, v_loss, q_loss = self._session.run(\n [self._loss, self._learn_step, self.v_loss, self.q_loss],\n feed_dict={\n self._info_state_ph: mb_info_state,\n self._action_ph: mb_actions,\n self._next_info_state_ph: mb_v_prime_obs,\n self._mb_est_rew_ph: mb_est_rew_w,\n self._tar_q_ph: tar_q_mb,\n self._tar_v_ph: tar_v_mb\n })\n\n self._session.run(self._update_target_network)\n\n cum_q_loss += q_loss\n cum_v_loss += v_loss\n\n if (i_iter + 1) % (self.iteration / 10) == 0.:\n # print loss\n mean_v_loss = (cum_v_loss / int(self.iteration / 10))\n mean_q_loss = (cum_q_loss / int(self.iteration / 10))\n print(\"interation: {}, v_loss: {:.6f}, q_loss: {:.6f}\".format(\n i_iter + 1, mean_v_loss, mean_q_loss), end='\\r')\n if (i_iter + 1) == self.iteration:\n cum_v_loss = mean_v_loss\n cum_q_loss = mean_q_loss\n else:\n cum_v_loss = 0.0\n cum_q_loss = 0.0\n\n # transitions = self._replay_buffer.sample(self._batch_size)\n # info_states = [t.info_state for t in transitions]\n # actions = [t.action for t in transitions]\n # rewards = [t.reward for t in transitions]\n # next_info_states = [t.next_info_state for t in transitions]\n # are_final_steps = [t.is_final_step for t in transitions]\n # legal_actions_mask = [t.legal_actions_mask for t in transitions]\n # loss, _ = self._session.run(\n # [self._loss, self._learn_step],\n # feed_dict={\n # self._info_state_ph: info_states,\n # self._action_ph: actions,\n # self._reward_ph: rewards,\n # self._is_final_step_ph: are_final_steps,\n # self._next_info_state_ph: next_info_states,\n # self._legal_actions_mask_ph: legal_actions_mask,\n # })\n self._last_loss_value = [cum_v_loss, cum_q_loss]\n self.replay_buffer.clear()\n return [cum_v_loss, cum_q_loss]", "def update_batch(self, *args, **kwargs):\n pass", "def on_epoch_end(self):\n if self.shuffle:\n p = np.random.permutation(len(self.X))\n self.X = self.X[p]\n self.y = self.y[p]", "def simulate_batch():\n this_run = op_util.current_run()\n util.ensure_dir(this_run.guild_path(\"proto\"))", "def train(self, global_step):\n if self.replay_buffer.size > self.warmup_size:\n s0, a, r, t, s1 = self.replay_buffer.sample_batch(self.batch_size)\n target_actions = self.actor.get_target_action(s1)\n target_qval = self.get_target_qval(s1, target_actions)\n t = t.astype(dtype=int)\n y = r + self.gamma * target_qval * (1 - t)\n self.critic.train(s0, a, y)\n actions = self.actor.get_action(s0)\n grads = self.critic.get_action_gradients(s0, actions)\n self.actor.train(s0, grads[0])\n self.update_targets()", "def __init__(self,buffer_size,state_dim,action_dim,random_seed=123):\n print(\"Creating Replay Buffer object\")\n self.buffer_size=buffer_size\n self.state_dim=state_dim\n self.action_dim=action_dim\n self.pointer=0\n self.states=np.zeros(shape=[buffer_size,state_dim])\n self.actions=np.zeros(shape=[buffer_size,action_dim])\n self.rewards=np.zeros(shape=[buffer_size,1])\n self.dones=np.zeros(shape=[buffer_size,1])\n self.next_states=np.zeros(shape=[buffer_size,state_dim])\n self.filled=False\n \n random.seed(random_seed)", "def update_random_state(self):\n self.random_state = RandomState()", "def run(self):\n while self.running:\n if self.data_buffer is None:\n if self.index_start + self.batch_size <= len(self.shuffle_index):\n # This case means we are still in this epoch\n batch_index = self.shuffle_index[self.index_start: self.index_start + self.batch_size]\n self.index_start += self.batch_size\n\n elif self.index_start < len(self.shuffle_index):\n # This case means we've come to the\n # end of this epoch, take all the rest data\n # and shuffle the training data again\n batch_index = self.shuffle_index[self.index_start:]\n\n # Now, we've finished this epoch\n # let's shuffle it again.\n self.shuffle_index = range(len(self.x_train))\n self.rng.shuffle(self.shuffle_index)\n self.index_start = 0\n else:\n # This case means index_start == len(shuffle_index)\n # Thus, we've finished this epoch\n # let's shuffle it again.\n self.shuffle_index = range(len(self.x_train))\n self.rng.shuffle(self.shuffle_index)\n batch_index = self.shuffle_index[0: self.batch_size]\n self.index_start = self.batch_size\n\n# final_dim_time = int(self.dim_time*self.ratio_subset)\n data = np.zeros((len(batch_index), self.dim_feature))\n label = np.zeros((len(batch_index), self.dim_class_num))\n\n \n \n \n for i in range(len(batch_index)):\n start_point = 0\n\n data[i] = self.x_train[batch_index[i]]\n\n label[i] = self.y_train[batch_index[i]]\n \n\n with self.lock:\n self.data_buffer = data, label\n sleep(0.0001)", "def update_policy(self):\n # this is update_policy \n # sample batch of 32 from the memory\n batch_of_samples = self.replay_memory.sample(batch_size=32)\n current_state_samples = batch_of_samples['current_state_samples']\n next_state_samples = batch_of_samples['next_state_samples']\n #print type(current_state_samples[0])\n #print current_state_samples\n\n # fetch stuff we need from samples 32*84*84*4\n current_state_images = np.zeros([1, 84, 84, 4])\n #print current_state_samples\n current_state_images[0,...] = np.dstack([sample.state for sample in current_state_samples])\n\n next_state_images = np.zeros([1, 84, 84, 4])\n next_state_images[0,...] = np.dstack([sample.state for sample in next_state_samples])\n\n # preprocess\n current_state_images = self.preprocessor.process_batch(current_state_images)\n next_state_images = self.preprocessor.process_batch(next_state_images)\n # print \"current_state_images {} max {} \".format(current_state_images.shape, np.max(current_state_images))\n #print current_state_images.shape\n q_current = self.q_network.predict(current_state_images,batch_size=self.batch_size) # 32*num_actions\n q_next = self.q_network.predict(next_state_images,batch_size=self.batch_size)\n\n # targets\n y_targets_all = q_current #1*num_actions\n #print y_targets_all.shape # [1,6]\n idx = 0 \n last_sample = current_state_samples[-1]\n if last_sample.is_terminal:\n y_targets_all[idx, last_sample.action] = last_sample.reward\n else:\n if self.mode == 'vanilla':\n y_targets_all[idx, last_sample.action] = np.float32(last_sample.reward) + self.gamma*np.max(q_next[idx])\n if self.mode == 'double': \n y_targets_all[idx, last_sample.action] = np.float32(last_sample.reward) + self.gamma*q_next[idx, np.argmax(q_current[idx])] \n\n loss = self.q_network.train_on_batch(current_state_images, np.float32(y_targets_all))\n\n with tf.name_scope('summaries'):\n self.tf_log_scaler(tag='train_loss', value=loss, step=self.iter_ctr)\n\n if not (self.iter_ctr % self.log_loss_every_nth):\n self.dump_train_loss(loss)\n\n # if (self.iter_ctr > (self.num_burn_in+1)) and not(self.iter_ctr%self.target_update_freq):\n # # copy weights\n # print \"Iter {} Updating target Q network\".format(self.iter_ctr)\n # self.target_q_network.set_weights(self.q_network.get_weights())\n # [self.target_q_network.trainable_weights[i].assign(self.q_network.trainable_weights[i]) \\\n # for i in range(len(self.target_q_network.trainable_weights))]", "def sample_batch(self, batch_size):\n batch = []\n\n # Sample using prorities\n if(self.with_per):\n T = self.buffer.total() // batch_size\n for i in range(batch_size):\n a, b = T * i, T * (i + 1)\n s = random.uniform(a, b)\n idx, error, data = self.buffer.get(s)\n batch.append((*data, idx))\n idx = np.array([i[5] for i in batch])\n #TD errors are only updated for transitions that are replayed\n \n # Sample randomly from Buffer\n elif self.count < batch_size:\n idx = None\n batch = random.sample(self.buffer, self.count)\n else:\n idx = None\n batch = random.sample(self.buffer, batch_size)\n\n # Return a batch of experience\n s_batch = np.array([i[0] for i in batch])\n a_batch = np.array([i[1] for i in batch])\n r_batch = np.array([i[2] for i in batch])\n d_batch = np.array([i[3] for i in batch])\n new_s_batch = np.array([i[4] for i in batch])\n\n return s_batch, a_batch, r_batch, d_batch, new_s_batch, idx", "def mc_update(self):\n i = random.randint(0,self.N-1)\n return self.mc_update_fixed(i)", "def rebatch(pad_idx, batch):\n return Batch(batch.src, batch.trg, pad_idx)", "def reset(self, model, device, flags=None):\n self._env.reset()\n\n # Randomly shuffle the deck\n if model is None:\n _deck = deck.copy()\n np.random.shuffle(_deck)\n card_play_data = {'landlord': _deck[:20],\n 'landlord_up': _deck[20:37],\n 'landlord_down': _deck[37:54],\n 'three_landlord_cards': _deck[17:20],\n }\n for key in card_play_data:\n card_play_data[key].sort()\n self._env.card_play_init(card_play_data)\n self.infoset = self._game_infoset\n return get_obs(self.infoset)\n else:\n self.total_round += 1\n bid_done = False\n card_play_data = []\n landlord_cards = []\n last_bid = 0\n bid_count = 0\n player_ids = {}\n bid_info = None\n bid_obs_buffer = []\n multiply_obs_buffer = []\n bid_limit = 3\n force_bid = False\n while not bid_done:\n bid_limit -= 1\n bid_obs_buffer.clear()\n multiply_obs_buffer.clear()\n _deck = deck.copy()\n np.random.shuffle(_deck)\n card_play_data = [\n _deck[:17],\n _deck[17:34],\n _deck[34:51],\n ]\n for i in range(3):\n card_play_data[i].sort()\n landlord_cards = _deck[51:54]\n landlord_cards.sort()\n bid_info = np.array([[-1, -1, -1],\n [-1, -1, -1],\n [-1, -1, -1],\n [-1, -1, -1]])\n bidding_player = random.randint(0, 2)\n # bidding_player = 0 # debug\n first_bid = -1\n last_bid = -1\n bid_count = 0\n if bid_limit <= 0:\n force_bid = True\n for r in range(3):\n bidding_obs = _get_obs_for_bid(bidding_player, bid_info, card_play_data[bidding_player])\n with torch.no_grad():\n action = model.forward(\"bidding\", torch.tensor(bidding_obs[\"z_batch\"], device=device),\n torch.tensor(bidding_obs[\"x_batch\"], device=device), flags=flags)\n if bid_limit <= 0:\n wr = BidModel.predict_env(card_play_data[bidding_player])\n if wr >= 0.7:\n action = {\"action\": 1} # debug\n bid_limit += 1\n\n bid_obs_buffer.append({\n \"x_batch\": bidding_obs[\"x_batch\"][action[\"action\"]],\n \"z_batch\": bidding_obs[\"z_batch\"][action[\"action\"]],\n \"pid\": bidding_player\n })\n if action[\"action\"] == 1:\n last_bid = bidding_player\n bid_count += 1\n if first_bid == -1:\n first_bid = bidding_player\n for p in range(3):\n if p == bidding_player:\n bid_info[r][p] = 1\n else:\n bid_info[r][p] = 0\n else:\n bid_info[r] = [0, 0, 0]\n bidding_player = (bidding_player + 1) % 3\n one_count = np.count_nonzero(bid_info == 1)\n if one_count == 0:\n continue\n elif one_count > 1:\n r = 3\n bidding_player = first_bid\n bidding_obs = _get_obs_for_bid(bidding_player, bid_info, card_play_data[bidding_player])\n with torch.no_grad():\n action = model.forward(\"bidding\", torch.tensor(bidding_obs[\"z_batch\"], device=device),\n torch.tensor(bidding_obs[\"x_batch\"], device=device), flags=flags)\n bid_obs_buffer.append({\n \"x_batch\": bidding_obs[\"x_batch\"][action[\"action\"]],\n \"z_batch\": bidding_obs[\"z_batch\"][action[\"action\"]],\n \"pid\": bidding_player\n })\n if action[\"action\"] == 1:\n last_bid = bidding_player\n bid_count += 1\n for p in range(3):\n if p == bidding_player:\n bid_info[r][p] = 1\n else:\n bid_info[r][p] = 0\n break\n card_play_data[last_bid].extend(landlord_cards)\n card_play_data = {'landlord': card_play_data[last_bid],\n 'landlord_up': card_play_data[(last_bid - 1) % 3],\n 'landlord_down': card_play_data[(last_bid + 1) % 3],\n 'three_landlord_cards': landlord_cards,\n }\n card_play_data[\"landlord\"].sort()\n player_ids = {\n 'landlord': last_bid,\n 'landlord_up': (last_bid - 1) % 3,\n 'landlord_down': (last_bid + 1) % 3,\n }\n player_positions = {\n last_bid: 'landlord',\n (last_bid - 1) % 3: 'landlord_up',\n (last_bid + 1) % 3: 'landlord_down'\n }\n for bid_obs in bid_obs_buffer:\n bid_obs.update({\"position\": player_positions[bid_obs[\"pid\"]]})\n\n # Initialize the cards\n self._env.card_play_init(card_play_data)\n multiply_map = [\n np.array([1, 0, 0]),\n np.array([0, 1, 0]),\n np.array([0, 0, 1])\n ]\n for pos in [\"landlord\", \"landlord_up\", \"landlord_down\"]:\n pid = player_ids[pos]\n self._env.info_sets[pos].player_id = pid\n self._env.info_sets[pos].bid_info = bid_info[:, [(pid - 1) % 3, pid, (pid + 1) % 3]]\n self._env.bid_count = bid_count\n # multiply_obs = _get_obs_for_multiply(pos, self._env.info_sets[pos].bid_info, card_play_data[pos],\n # landlord_cards)\n # action = model.forward(pos, torch.tensor(multiply_obs[\"z_batch\"], device=device),\n # torch.tensor(multiply_obs[\"x_batch\"], device=device), flags=flags)\n # multiply_obs_buffer.append({\n # \"x_batch\": multiply_obs[\"x_batch\"][action[\"action\"]],\n # \"z_batch\": multiply_obs[\"z_batch\"][action[\"action\"]],\n # \"position\": pos\n # })\n action = {\"action\": 0}\n self._env.info_sets[pos].multiply_info = multiply_map[action[\"action\"]]\n self._env.multiply_count[pos] = action[\"action\"]\n self.infoset = self._game_infoset\n if force_bid:\n self.force_bid += 1\n if self.total_round % 100 == 0:\n print(\"发牌情况: %i/%i %.1f%%\" % (self.force_bid, self.total_round, self.force_bid / self.total_round * 100))\n self.force_bid = 0\n self.total_round = 0\n return get_obs(self.infoset), {\n \"bid_obs_buffer\": bid_obs_buffer,\n \"multiply_obs_buffer\": multiply_obs_buffer\n }", "def build_replay_buffer(agent, batch_size, steps_per_loop):\n buf = tf_uniform_replay_buffer.TFUniformReplayBuffer(\n data_spec=agent.policy.trajectory_spec,\n batch_size=batch_size,\n max_length=steps_per_loop)\n return buf", "def random_batch(self, batch_size):\n sample_size = batch_size if self.size > batch_size else self.size\n idx = np.random.choice(self.size, sample_size, replace=False)\n return self.state[idx, :], self.action_index[idx], self.reward[idx], self.not_terminal[idx], self.succ_state[idx, :], self.succ_player[idx], [self.succ_legal_move[i] for i in idx]", "def init_game_setting(self):\n np.random.seed(1) \n self.s_prev = np.zeros((80, 80, 1))\n print('loading trained model from {}'.format(self.model_path))\n self.sess = tf.InteractiveSession(graph=self.model)\n self.saver.restore(self.sess, self.model_path)", "def forward(self, state):\n batch_size = get_batch_size(state, self.dim_state)\n if batch_size:\n return self.random(batch_size)\n else:\n return self.random()", "def reset(self):\n obs = self.gym.reset()\n # self.step = 1\n agent_obs = self.featurize(obs[self.gym.training_agent])\n # self.observations_history = [agent_obs]\n return agent_obs", "def on_epoch_end(self):\n if self.shuffle:\n np.random.shuffle(self.indexes)", "def sync_batch_stats(state: TrainState) -> TrainState:\n # Each device has its own version of the running average batch\n # statistics and those are synced before evaluation\n return state.replace(batch_stats=cross_replica_mean(state.batch_stats))", "def update_model(self, boards, outcomes, verbose=0):\n\t\t# Compile the model\n\t\tself.model.compile(\n\t\t\toptimizer='adam',\n\t\t\tloss='mse'\n\t\t)\n\n\t\t# Train the model\n\t\tmf = self.model.fit(boards, outcomes, verbose=verbose)\n\n\t\t# Crush the graph, maybe that is what is messing with RAM?\n\t\ttf.keras.backend.clear_session()", "def _update(self):\n self.parametrize_beam()\n self.update_ranks()\n self._points = tf.reshape(self._endpoint, (1, 2)) * tf.reshape(self._ranks, (-1, 1))", "def update(self):\n q = self.M[self.state,:][0]\n self.state = random.choice(self.N,1,p = q)\n return self.state", "def _next_minibatch(self):\n batch = self.data[self.ix:self.ix+self.batch_size]\n if len(batch) < self.batch_size:\n random.shuffle(self.data)\n self.ix = self.batch_size - len(batch)\n batch += self.data[:self.ix]\n else:\n self.ix += self.batch_size\n self.batch = batch", "def update(self):\r\n\r\n self.target.load_state_dict(self.model.state_dict())\r\n self.target.eval()", "def sample(self, batch_size):\n buffer_size = len(self.buffer)\n print(\"**\",buffer_size)\n index = np.random.choice(np.arange(buffer_size), size=batch_size, replace=False)\n return [self.buffer[i] for i in index]", "def recv(self, model_param):\n self.model.load_state_dict(copy.deepcopy(model_param))", "def _run(self):\n if not self.is_train:\n return self.test() \n\n logger.debug(\"Actor {} resuming at Step {}, {}\".format(self.actor_id, \n self.global_step.value(), time.ctime()))\n\n s = self.emulator.get_initial_state()\n \n s_batch = []\n a_batch = []\n y_batch = []\n bonuses = deque(maxlen=100)\n\n exec_update_target = False\n total_episode_reward = 0\n episode_ave_max_q = 0\n episode_over = False\n qmax_down = 0\n qmax_up = 0\n prev_qmax = -10*6\n low_qmax = 0\n ep_t = 0\n \n while (self.global_step.value() < self.max_global_steps):\n # Sync local learning net with shared mem\n self.sync_net_with_shared_memory(self.local_network, self.learning_vars)\n self.save_vars()\n\n rewards = []\n states = []\n actions = []\n local_step_start = self.local_step\n \n while not episode_over:\n logger.debug('steps: {} / {}'.format(self.global_step.value(), self.max_global_steps))\n # Choose next action and execute it\n a, readout_t = self.choose_next_action(s)\n\n new_s, reward, episode_over = self.emulator.next(a)\n total_episode_reward += reward\n\n current_frame = new_s[...,-1]\n bonus = self.density_model.update(current_frame)\n bonuses.append(bonus)\n\n if (self.actor_id == 0) and (self.local_step % 200 == 0):\n bonus_array = np.array(bonuses)\n logger.debug('Mean Bonus={:.4f} / Max Bonus={:.4f}'.format(\n bonus_array.mean(), bonus_array.max()))\n\n # Rescale or clip immediate reward\n # reward = self.rescale_reward(reward + bonus)\n reward = self.rescale_reward(reward)\n ep_t += 1\n \n rewards.append(reward)\n states.append(s)\n actions.append(a)\n \n s = new_s\n self.local_step += 1\n episode_ave_max_q += np.max(readout_t)\n \n global_step, update_target = self.global_step.increment(\n self.q_target_update_steps)\n\n if update_target:\n update_target = False\n exec_update_target = True\n\n if self.local_step % 4 == 0:\n self.batch_update()\n \n self.local_network.global_step = global_step\n\n else:\n mc_returns = list()\n running_total = 0.0\n for r in reversed(rewards):\n running_total = r + self.gamma*running_total\n mc_returns.insert(0, running_total)\n\n mixed_returns = self.cts_eta*np.array(rewards) + (1-self.cts_eta)*np.array(mc_returns)\n\n states.append(new_s)\n episode_length = len(rewards)\n for i in range(episode_length):\n self.replay_memory.append((\n states[i],\n actions[i],\n mixed_returns[i],\n states[i+1],\n i+1 == episode_length))\n\n \n if exec_update_target:\n self.update_target()\n exec_update_target = False\n # Sync local tensorflow target network params with shared target network params\n if self.target_update_flags.updated[self.actor_id] == 1:\n self.sync_net_with_shared_memory(self.target_network, self.target_vars)\n self.target_update_flags.updated[self.actor_id] = 0\n\n s, total_episode_reward, _, ep_t, episode_ave_max_q, episode_over = \\\n self.prepare_state(s, total_episode_reward, self.local_step, ep_t, episode_ave_max_q, episode_over)", "def resynth_data(self):\n if not hasattr(self, '_Ro'):\n self._Ro = self.R\n self.R = self._Ro + numpy.random.randn(*self._Ro.shape)*self.dR", "def update_model(self) -> torch.Tensor:\n # PER needs beta to calculate weights\n samples = self.memory.sample_batch(self.beta)\n weights = torch.FloatTensor(\n samples[\"weights\"].reshape(-1, 1)\n ).to(self.device)\n indices = samples[\"indices\"]\n \n # 1-step Learning loss\n elementwise_loss = self._compute_dqn_loss(samples, self.gamma)\n \n # PER: importance sampling before average\n loss = torch.mean(elementwise_loss * weights)\n \n # N-step Learning loss\n # we are gonna combine 1-step loss and n-step loss so as to\n # prevent high-variance. The original rainbow employs n-step loss only.\n if self.use_n_step:\n gamma = self.gamma ** self.n_step\n samples = self.memory_n.sample_batch_from_idxs(indices)\n elementwise_loss_n_loss = self._compute_dqn_loss(samples, gamma)\n elementwise_loss += elementwise_loss_n_loss\n \n # PER: importance sampling before average\n loss = torch.mean(elementwise_loss * weights)\n\n self.optimizer.zero_grad()\n loss.backward()\n clip_grad_norm_(self.dqn.parameters(), 10.0)\n self.optimizer.step()\n \n # PER: update priorities\n loss_for_prior = elementwise_loss.detach().cpu().numpy()\n new_priorities = loss_for_prior + self.prior_eps\n self.memory.update_priorities(indices, new_priorities)\n \n # NoisyNet: reset noise\n self.dqn.reset_noise()\n self.dqn_target.reset_noise()\n\n return loss.item()", "def on_epoch_end(self):\r\n self.indexes = np.arange(len(self.list_IDs))\r\n if self.shuffle == True:\r\n np.random.shuffle(self.indexes)", "def on_epoch_end(self):\n self.indexes = np.arange(len(self.list_IDs))\n if self.shuffle == True:\n np.random.shuffle(self.indexes)", "def on_epoch_end(self):\n self.indexes = np.arange(len(self.list_IDs))\n if self.shuffle == True:\n np.random.shuffle(self.indexes)", "async def update_model(model_updates):\n async for model_update in model_updates:\n model_location = model_update['model_location']\n print(f\"Updating model to: {model_location}\")\n\n # using incrementing version number to keep track of live model\n # but obviously doesnt work for a real distributed system \n model_table['live_version'] += 1\n model_table['model_location'] = model_location", "def _do_update(self):\n sample = np.random.choice(self._seeds, 1, replace=False, p=self._seed_weights)[0]\n index = self._seeds.index(sample)\n new_seed = random.choice([neb for neb in self._graph.neighbors(sample)])\n self._edges.add((sample, new_seed))\n self._nodes.add(sample)\n self._nodes.add(new_seed)\n self._seeds[index] = new_seed", "def update_target_model(self):\n self.target_model.set_weights(self.model.get_weights())", "def update_target_model(self):\n self.target_model.set_weights(self.model.get_weights())", "def update_target_model(self):\n self.target_model.set_weights(self.model.get_weights())", "def distribute_model(self):\n state_dict = self.model.state_dict()\n for client in self.clients:\n new_state_dict = copy.deepcopy(state_dict)\n client.model.load_state_dict(new_state_dict)", "def collect_samples(self):\n self.replay_buffer = self.collect_initial_batch(\n self.replay_buffer, self.acm_pre_train_samples\n )", "def update(self, batch):\n for experience in batch:\n # First calculate the expected future reward from the final state\n estimated_future_value = self.sess.run(self.network.policyLayer,\n feed_dict={self.network.inputs: [experience.next_state]})\n max_estimated_future_value = np.max(estimated_future_value)\n\n # Get the estimated future reward for each action based on the initial state\n updated_action_value = self.sess.run(self.network.policyLayer,\n feed_dict={self.network.inputs: [experience.state]})\n\n # Update the expected future reward for the selected action based on the actual reward obtained\n updated_action_value[0, experience.action] = experience.reward + self.discount_factor * max_estimated_future_value\n\n # Update the policy with the new estimated action values\n self.sess.run(self.updateModel, feed_dict={self.network.inputs: [experience.state],\n self.updated_value: updated_action_value})", "def next_batch(self, batch_size, shuffle=True):", "def _update_agents_model(self, task):\n logger.log('entering _update_agents_model', 'steps: {}'.format(self._steps),\n level=logger.DEBUG + 5)\n if (self.self_infserver_addr is None\n and self._should_update_model(self.self_model, task.model_key1)):\n model1 = self._model_pool_apis.pull_model(task.model_key1)\n me_id = self._learning_agent_id # short name\n self.agents[me_id].load_model(model1.model)\n self.self_model = model1\n if self._should_update_model(self.oppo_model, task.model_key2):\n model2 = self._model_pool_apis.pull_model(task.model_key2)\n oppo_id = self._oppo_agent_id # short name\n for agt in self.agents[oppo_id:]:\n agt.load_model(model2.model)\n self.oppo_model = model2\n logger.log('leaving _update_agents_model', level=logger.DEBUG + 5)", "def on_epoch_end(self):\n self.indexes = np.arange(len(self.list_ids))\n if self.shuffle:\n np.random.shuffle(self.indexes)", "def trainOnDataset(self, dataset):\n cfg = self.cfg\n for rows in dataset.randomBatches(self.datasetField, cfg.batchSize):\n olduw, olduhb, olduvb = \\\n zeros((self.rbm.visibleDim, self.rbm.hiddenDim)), \\\n zeros(self.rbm.hiddenDim), zeros(self.rbm.visibleDim)\n\n for t in range(cfg.maxIter):\n #print(\"*** Iteration %2d **************************************\" % t)\n\n params = self.rbm.params\n params = params.reshape((self.rbm.visibleDim, self.rbm.hiddenDim))\n biasParams = self.rbm.biasParams\n\n mm = cfg.iniMm if t < cfg.mmSwitchIter else cfg.finMm\n\n w, hb, vb = self.calcUpdateByRows(rows)\n\n #print(\"Delta: \")\n #print(\"Weight: \",)\n #print(w)\n #print(\"Visible bias: \",)\n #print(vb)\n #print(\"Hidden bias: \",)\n #print(hb)\n #print(\"\")\n\n olduw = uw = olduw * mm + \\\n \tcfg.rWeights * (w - cfg.weightCost * params)\n olduhb = uhb = olduhb * mm + cfg.rHidBias * hb\n olduvb = uvb = olduvb * mm + cfg.rVisBias * vb\n\n #print(\"Delta after momentum: \")\n #print(\"Weight: \",)\n #print(uw)\n #print(\"Visible bias: \",)\n #print(uvb)\n #print(\"Hidden bias: \",)\n #print(uhb)\n #print(\"\")\n\n # update the parameters of the original rbm\n params += uw\n biasParams += uhb\n\n # Create a new inverted rbm with correct parameters\n invBiasParams = self.invRbm.biasParams\n invBiasParams += uvb\n self.invRbm = self.rbm.invert()\n self.invRbm.biasParams[:] = invBiasParams\n\n #print(\"Updated \")\n #print(\"Weight: \",)\n #print(self.rbm.connections[self.rbm['visible']][0].params.reshape( \\)\n # (self.rbm.indim, self.rbm.outdim))\n #print(\"Visible bias: \",)\n #print(self.invRbm.connections[self.invRbm['bias']][0].params)\n #print(\"Hidden bias: \",)\n #print(self.rbm.connections[self.rbm['bias']][0].params)\n #print(\"\")", "def learn(self):\r\n \r\n # take a mini-batch from replay experience\r\n cur_batch_size = min(len(self.replay_exp), self.batch_size)\r\n mini_batch = random.sample(self.replay_exp, cur_batch_size)\r\n \r\n # batch data\r\n sample_states = np.ndarray(shape = (cur_batch_size, self.state_size)) # replace 128 with cur_batch_size\r\n sample_actions = np.ndarray(shape = (cur_batch_size, 1))\r\n sample_rewards = np.ndarray(shape = (cur_batch_size, 1))\r\n sample_next_states = np.ndarray(shape = (cur_batch_size, self.state_size))\r\n sample_dones = np.ndarray(shape = (cur_batch_size, 1))\r\n\r\n temp=0\r\n for exp in mini_batch:\r\n sample_states[temp] = exp[0]\r\n sample_actions[temp] = exp[1]\r\n sample_rewards[temp] = exp[2]\r\n sample_next_states[temp] = exp[3]\r\n sample_dones[temp] = exp[4]\r\n temp += 1\r\n \r\n \r\n sample_qhat_next = self.brain_target.predict(sample_next_states)\r\n \r\n # set all Q values terminal states to 0\r\n sample_qhat_next = sample_qhat_next * (np.ones(shape = sample_dones.shape) - sample_dones)\r\n # choose max action for each state\r\n sample_qhat_next = np.max(sample_qhat_next, axis=1)\r\n \r\n sample_qhat = self.brain_policy.predict(sample_states)\r\n \r\n for i in range(cur_batch_size):\r\n a = sample_actions[i,0]\r\n sample_qhat[i,int(a)] = sample_rewards[i] + self.gamma * sample_qhat_next[i]\r\n \r\n q_target = sample_qhat\r\n \r\n self.brain_policy.fit(sample_states, q_target, epochs = 1, verbose = 0)\r\n \r\n \r\n \r\n \"\"\"\r\n \r\n for state, action, reward, next_state, done in mini_batch:\r\n target_Q_s_a = 0 # new target for Q(s,a)\r\n state = np.reshape(state, [1, state_size])\r\n next_state = np.reshape(next_state, [1, state_size])\r\n \r\n # if it is not the terminal state\r\n if not done:\r\n qhat_next = self.brain_target.predict(next_state) # estimate Q(s',a')\r\n target_Q_s_a = reward + self.gamma * np.amax(qhat_next[0]) # because the output is m * n, so we need to consider the dimension [0]\r\n else:\r\n target_Q_s_a = reward\r\n \r\n target_output = self.brain_policy.predict(state) # we will replace target of Q(s,a) for specific a later\r\n target_output[0][action] = target_Q_s_a # new target for state s and action a\r\n \r\n self.brain_policy.fit(state, target_output, epochs = 1, verbose = 0)\r\n \r\n \"\"\"", "def fit(self, env, num_iteration, do_train=False):\n\n #s, a, r, new_s, d = get_multi_step_sample(one_step_memory, self.gamma, self.num_step)\n #self.replay_memory.append((s, a, r, new_s, d))\n # epsilon update\n num_env = env.num_process\n env.reset()\n\n for t in range(0, num_iteration, num_env):\n self.global_step += 1\n #print(\"Global_step: {}\".format(self.global_step))\n old_state, action, reward, new_state, is_terminal = self.get_multi_step_sample(env)\n self.replay_memory.append(old_state, action, reward, new_state, is_terminal)\n\n \"\"\"\n Epsilon update\n epsilon begin 1.0, end up 0.1\n FIX\n \"\"\"\n\n self.epsilon = self.epsilon+ num_env*self.epsilon_increment if self.epsilon > EPSILON_END else EPSILON_END\n num_update = sum([1 if i%self.update_freq == 0 else 0 for i in range(t, t+num_env)])\n if do_train:\n for _ in range(num_update):\n\n if self.per == 1:\n (old_state_list, action_list, reward_list, new_state_list, is_terminal_list), \\\n idx_list, p_list, sum_p, count = self.replay_memory.sample(self.batch_size)\n else:\n old_state_list, action_list, reward_list, new_state_list, is_terminal_list \\\n = self.replay_memory.sample(self.batch_size)\n\n feed_dict = {self.target_s: new_state_list.astype(np.float32)/255. ,\n self.s : old_state_list.astype(np.float32)/255.,\n self.a_ph: list(enumerate(action_list)),\n self.r_ph: np.array(reward_list).astype(np.float32),\n self.d_ph: np.array(is_terminal_list).astype(np.float32),\n }\n\n if self.double:\n action_chosen_by_online = self.sess.run(self.a,\n feed_dict={\n self.s: new_state_list.astype(np.float32)/255.})\n feed_dict[self.a_for_new_state_ph] = list(enumerate(action_chosen_by_online))\n\n if self.per == 1:\n # Annealing weight beta\n feed_dict[self.loss_weight_ph] = (np.array(p_list) * count / sum_p) ** (-self.beta)\n error, _ = self.sess.run([self.error_op, self.train_op], feed_dict=feed_dict)\n self.replay_memory.update(idx_list, error)\n\n else:\n self.sess.run(self.train_op, feed_dict=feed_dict)\n\n self.update_time += 1\n\n if self.beta < BETA_END:\n self.beta += self.beta_increment\n\n if (self.update_time)%self.target_update_freq == 0 :\n #print(\"Step: {} \".format(self.update_time) + \"target_network update\")\n self.sess.run([self.target_update])\n #print(\"Step: {} \".format(self.update_freq) + \"Network save\")\n self.save_model()", "def _sample_propagation_indices(\n self, batch_size: int, _rng: torch.Generator\n ) -> torch.Tensor:\n model_len = (\n len(self.elite_models) if self.elite_models is not None else len(self)\n )\n if batch_size % model_len != 0:\n raise ValueError(\n \"To use GaussianMLP's ensemble propagation, the batch size must \"\n \"be a multiple of the number of models in the ensemble.\"\n )\n # rng causes segmentation fault, see https://github.com/pytorch/pytorch/issues/44714\n return torch.randperm(batch_size, device=self.device)", "def RunModel(self):\n np.random.shuffle(self.pool)\n data = self.pool[:self.n], self.pool[self.n:]\n return data", "def RunModel(self):\n np.random.shuffle(self.pool)\n data = self.pool[:self.n], self.pool[self.n:]\n return data", "def on_epoch_end(self):\n if self.shuffle == True:\n numpy.random.shuffle(self.indexes)", "def random_batch2(self, batch_size):\n # shuld be more efficient variant of random sampling\n\n # Randomly choose a time step from the replay memory.\n index = self.rng.randint(self.bottom + batch_size,\n self.bottom + self.size - 1)\n idxs = np.arange(index-batch_size, index)\n # create mask for terminal stats\n m = ~self.terminals.take(idxs, axis=0, mode='wrap')\n\n # Add the state transition to the response.\n states = self.states.take(idxs, axis=0, mode='wrap')\n actions = self.actions.take(idxs, axis=0, mode='wrap')\n rewards = self.rewards.take(idxs, axis=0, mode='wrap').reshape(-1, 1)\n next_states = self.states.take(idxs+1, axis=0, mode='wrap')\n terminals = self.terminals.take(idxs+1, axis=0, mode='wrap').reshape(-1, 1)\n\n return states[m], actions[m], rewards[m], terminals[m], next_states[m]", "def run(self) -> None:\n self.model = self.trainer.train_model(self.model, self.data)" ]
[ "0.6248699", "0.6233435", "0.6136998", "0.6025851", "0.6016958", "0.596585", "0.5935107", "0.5765406", "0.5739933", "0.57378775", "0.5711635", "0.5678842", "0.5633065", "0.5633065", "0.55791724", "0.55476403", "0.55153525", "0.54968476", "0.54914474", "0.54901624", "0.5484997", "0.5484997", "0.54839903", "0.54768825", "0.54571074", "0.54555583", "0.5449336", "0.54487705", "0.5447869", "0.5441691", "0.543982", "0.54348916", "0.54285854", "0.54110837", "0.53990793", "0.5397008", "0.53944975", "0.5393681", "0.53918505", "0.5390389", "0.5383881", "0.5382783", "0.5382354", "0.5380821", "0.5379814", "0.5374095", "0.5348241", "0.5345907", "0.53406775", "0.53380585", "0.53343815", "0.53252953", "0.5322908", "0.53155434", "0.5315366", "0.5309805", "0.5290152", "0.5289907", "0.5282324", "0.5279195", "0.5275273", "0.52658224", "0.5264589", "0.52557063", "0.525202", "0.5246692", "0.52434444", "0.52415663", "0.52392757", "0.52290756", "0.5219294", "0.52179706", "0.5217743", "0.5217632", "0.5214463", "0.51904947", "0.5189714", "0.51782995", "0.5177985", "0.5176852", "0.5176852", "0.5171847", "0.51679647", "0.51674855", "0.51674855", "0.51674855", "0.5167119", "0.51624805", "0.51592124", "0.5157444", "0.5157249", "0.5155082", "0.5154384", "0.5153643", "0.5139335", "0.51370263", "0.51288867", "0.51288867", "0.5124887", "0.5123125", "0.5121874" ]
0.0
-1
Train agent over given number of iterations. Each iteration consists of self play over n_episodes and then a learn step where agent updates network based on random sample from replay buffer
def train(self, iters, n_episodes): for i in range(iters): self.self_play(n_episodes) self.learn()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def learn(self, num_episodes=10000):\n for i in range(num_episodes):\n self.actor()\n self.learner()", "def trainAgent(self):\r\n\t\tfor episode in range(self.TOT_EPISODES):\r\n\t\t\t#reset environment, stacked frames every episode.\r\n\t\t\tstate = self.env.reset()\r\n\t\t\trewards = 0\r\n\t\t\t#preprocess and stack the frame/state.\r\n\t\t\tstate, self.stacked_frames = stack_frames(self.stack_size,\r\n\t\t\t\t\t\t\t\t\tself.stacked_frames, state, True)\r\n\t\t\t\r\n\t\t\tfor step in range(self.MAX_STEPS):\r\n\t\t\t#for every step in episode:\r\n\t\t\t\r\n\t\t\t\tif (step%100==0):\r\n\t\t\t\t\tprint(\"Episode No.: \", episode, \"Step No.: \", step)\r\n\t\t\t\t\r\n\t\t\t\t#agent acts - explores or exploitation of the model\r\n\t\t\t\taction = self.dqn.predictAction(state)\r\n\t\t\t\t#reduce epsilon for more exploitation later.\r\n\t\t\t\tself.dqn.decayEpsilon()\r\n\t\t\t\t#Perform the action and get the next_state, reward, and done vals.\r\n\t\t\t\tnext_state, reward, done, _ = self.env.step(action)\r\n\t\t\t\t#append this state to the frame. Pass the previous stacked frame.\r\n\t\t\t\tnext_state, self.stacked_frames = stack_frames(self.stack_size,\r\n\t\t\t\t\t\t\t\t\t\tself.stacked_frames, next_state, False)\r\n\t\t\t\trewards+=reward\r\n\t\t\t\t\r\n\t\t\t\t#add this experience into memory (experience buffer)\r\n\t\t\t\tself.dqn.remember(state, action, reward, next_state, done)\r\n\t\t\t\t\r\n\t\t\t\tstate = next_state\r\n\t\t\t\t\r\n\t\t\t\tif done:\r\n\t\t\t\t\tprint(\"took %d steps\" %step)\r\n\t\t\t\t\tprint(\"Earned a total of reward equal to \", rewards)\r\n\t\t\t\t\tbreak\r\n\t\t\t\r\n\t\t\t\t# TRAIN\r\n\t\t\t\tself.dqn.replay()\r\n\t\t\t\t#sync target_model and model weights every 10k steps.\r\n\t\t\t\tif step % 10000 == 9999:\r\n\t\t\t\t\tself.dqn.target_train()\r\n\t\t\t\r\n\t\t\t# Save the network every 1000 iterations\r\n\t\t\tif episode % 5 == 4:\r\n\t\t\t\tprint(\"Saving Network\")\r\n\t\t\t\tself.dqn.save_network(self.path)", "def train(self, num_episodes=10000):\n\n self.game.restart()\n\n self.exp_states = defaultdict(int)\n\n for i in tqdm(range(num_episodes)):\n\n self.game.deal_cards()\n\n possible_actions = self.game.get_actions()\n\n player_state = self.game.get_player_state()\n player_action = self.player.get_action(player_state,\n possible_actions,\n explore_exploit='explore')\n\n # Bookkeep visited states (?)\n player_state_str = np.array2string(player_state)\n self.exp_states[player_state_str] += 1\n\n opponent_state = self.game.get_opponent_state()\n opponent_action = self.opponent.get_action(opponent_state,\n possible_actions)\n\n self.game.set_player_action(player_action)\\\n .set_opponent_action(opponent_action)\n\n player_score, opponent_score = self.game.get_scores()\n\n reward = self._get_reward(player_score, opponent_score)\n self.player.learn(player_state,\n player_action,\n reward)\n self.player.learn(opponent_state,\n opponent_action,\n -reward)\n \n print(\"Training done!\")", "def train(\n self, num_episodes, max_episode_length, reward_network=None,\n ):\n\n for _ in range(num_episodes):\n self.train_episode(max_episode_length)\n\n if self.training_i % self.play_interval == 0:\n self.play(\n max_episode_length,\n self.render,\n reward_network=reward_network,\n )", "def train(self, env):\n\n\t\tmin_average_reward_for_stopping = 195\n\t\tconsecutive_successful_episodes_to_stop = 10\n\t\tlast_10_rewards = deque(maxlen=consecutive_successful_episodes_to_stop)\n\n\t\tnum_Episodes = []\n\t\tEpisode_Rewards = []\n\n\t\tfor episode in range(self.episodes):\n\t\t\tstate = env.reset()\n\t\t\tstate = np.reshape(state, [1, self.state_size])\n\t\t\tdone = False\n\t\t\ttotal_reward = 0\n\n\t\t\twhile not done:\n\t\t\t\taction = self.act(state)\n\t\t\t\tnext_state, reward, done, _ = env.step(action)\n\t\t\t\tnext_state = np.reshape(next_state, [1, self.state_size])\n\t\t\t\tself.remember(state, action, reward, next_state, done)\n\t\t\t\tstate = next_state\n\t\t\t\ttotal_reward += reward\n\n\t\t\tnum_Episodes.append(episode)\n\t\t\tEpisode_Rewards.append(total_reward)\n\t\t\tlast_10_rewards.append(total_reward)\n\t\t\tlast_10_avg_reward = np.mean(last_10_rewards)\n\t\t\tprint(episode, last_10_avg_reward)\n\n\t\t\t# call experience relay\n\t\t\tif len(self.memory) >= self.batch_size:\n\t\t\t\tself.replay(self.batch_size)\n\t\t\t# Stopping criteria\n\t\t\tif len(\n\t\t\t\t\tlast_10_rewards) == consecutive_successful_episodes_to_stop \\\n\t\t\t\t\tand last_10_avg_reward > min_average_reward_for_stopping:\n\t\t\t\tprint(\"Solved after {} epsiodes\".format(episode))\n\t\t\t\tbreak", "def train_experience_replay(self, epochs, batch_size, iterations_per_epoch, capacity, n_obs, **kwargs):\n\n # Initialize losses dictionary and memory replay buffer\n losses = dict()\n mem = MemoryReplayBuffer(capacity)\n\n for ep in range(1, epochs+1):\n losses[ep] = []\n with tqdm(total=iterations_per_epoch, desc='Training epoch {}'.format(ep)) as p_bar:\n\n for it in range(1, iterations_per_epoch+1):\n \n # Determine n_obs and generate data on-the-fly\n if type(n_obs) is int:\n n_obs_it = n_obs\n else:\n n_obs_it = n_obs()\n # Simulate and add to buffer\n params, sim_data = self._forward_inference(batch_size, n_obs_it, **kwargs)\n mem.store(params, sim_data)\n\n # Sample from buffer\n params, sim_data = mem.sample()\n\n # One step backprop\n loss = self._train_step(params, sim_data)\n \n # Store loss into dictionary\n losses[ep].append(loss)\n\n # Update progress bar\n p_bar.set_postfix_str(\"Epoch {0},Iteration {1},Loss: {2:.3f},Running Loss: {3:.3f}\"\n .format(ep, it, loss, np.mean(losses[ep])))\n p_bar.update(1)\n\n # Store after each epoch, if specified\n if self.manager is not None:\n self.manager.save()\n return losses", "def agents_train(self, game_step, episode_now, args):\n # update all trainers, if not in display or benchmark mode\n if episode_now < args.learning_start_episode: return \n if self.update_cnt > 0 and self.var >= self.min_var: self.var *= args.var_discount\n #if episode_now > self.last_update_episode and (episode_now - args.learning_start_episode) % args.learning_fre == 0:\n if game_step % args.learning_fre_step == 0:\n if self.update_cnt == 0: print('\\r=start training ...'+' '*100)\n self.last_update_episode = episode_now\n self.update_cnt += 1\n\n # update every agent in different memory batch\n for agent_idx, (actor_c, actor_t, critic_c, critic_t, opt_a, opt_c) in \\\n enumerate(zip(self.actors_cur, self.actors_tar, self.critics_cur, \\\n self.critics_tar, self.optimizers_a, self.optimizers_c)):\n # del if opt_c == None: continue # jump to the next model update\n\n # sample the experience\n _obs_n_o, _action_n, _rew_n, _obs_n_n, _done_n = self.memory.sample( \\\n args.batch_size, agent_idx) # Note_The func is not the same as others\n \n # --use the date to update the CRITIC\n rew = torch.tensor(_rew_n, device=args.device, dtype=torch.float) # set the rew to gpu\n done_n = torch.tensor(~_done_n, dtype=torch.float, device=args.device) # set the rew to gpu\n action_cur_o = torch.from_numpy(_action_n).to(args.device, torch.float)\n obs_n_o = torch.from_numpy(_obs_n_o).to(args.device, torch.float)\n obs_n_n = torch.from_numpy(_obs_n_n).to(args.device, torch.float)\n\n action_tar = torch.cat([a_t(obs_n_n[:, self.obs_size[idx][0]:self.obs_size[idx][1]]).detach() \\\n for idx, a_t in enumerate(self.actors_tar)], dim=1)\n q = critic_c(obs_n_o, action_cur_o).reshape(-1) # q \n q_ = critic_t(obs_n_n, action_tar).reshape(-1) # q_ \n q_ = q_*args.gamma*done_n + rew*torch.tensor(args.reward_scale_par, device=args.device) # q_*gamma*done + reward\n loss_c = torch.nn.MSELoss()(q, q_.detach()) # bellman equation\n opt_c.zero_grad()\n loss_c.backward()\n nn.utils.clip_grad_norm_(critic_c.parameters(), args.max_grad_norm)\n opt_c.step()\n\n # --use the data to update the ACTOR\n # There is no need to cal other agent's action\n opt_c.zero_grad()\n model_out, policy_c_new = actor_c( \\\n obs_n_o[:, self.obs_size[agent_idx][0]:self.obs_size[agent_idx][1]], model_original_out=True)\n # update the aciton of this agent\n action_cur_o[:, self.action_size[agent_idx][0]:self.action_size[agent_idx][1]] = policy_c_new \n loss_pse = torch.mean(torch.pow(model_out, 2))\n loss_a = torch.mul(torch.tensor(-1.0, device=args.device), torch.mean(critic_c(obs_n_o, action_cur_o)))\n\n opt_a.zero_grad()\n (2e-3*loss_pse+loss_a).backward()\n #loss_a.backward()\n nn.utils.clip_grad_norm_(actor_c.parameters(), args.max_grad_norm)\n opt_a.step()\n\n # save the model to the path_dir ---cnt by update number\n #if self.update_cnt > args.start_save_model and self.update_cnt % args.fre4save_model == 0:\n if self.update_cnt > args.start_save_model and self.update_cnt % args.fre4save_model_step == 0:\n time_now = time.strftime('%y%m_%d%H%M')\n print('=time:{} step:{} save'.format(time_now, game_step))\n model_file_dir = os.path.join(args.save_dir, '{}_{}_{}'.format( \\\n args.scenario_name, time_now, game_step))\n if not os.path.exists(model_file_dir): # make the path\n os.mkdir(model_file_dir)\n for agent_idx, (a_c, a_t, c_c, c_t) in \\\n enumerate(zip(self.actors_cur, self.actors_tar, self.critics_cur, self.critics_tar)):\n torch.save(a_c, os.path.join(model_file_dir, 'a_c_{}.pt'.format(agent_idx)))\n torch.save(a_t, os.path.join(model_file_dir, 'a_t_{}.pt'.format(agent_idx)))\n torch.save(c_c, os.path.join(model_file_dir, 'c_c_{}.pt'.format(agent_idx)))\n torch.save(c_t, os.path.join(model_file_dir, 'c_t_{}.pt'.format(agent_idx)))\n\n # update the tar par\n self.actors_tar = self.update_trainers(self.actors_cur, self.actors_tar, args.tao) \n self.critics_tar = self.update_trainers(self.critics_cur, self.critics_tar, args.tao)", "def self_play(self, n_episodes): \n eps = self.eps(self.agent.learning_iters)\n experiences = self_play_episodes(self.mdp, self.agent, n_episodes, eps) \n for state, action, reward, next_state, done in experiences:\n self.agent.replay_buffer.push(state, action, reward, next_state, done)", "def train(self):\n tic = time.time()\n means = []\n stds = []\n steps = 0\n scores_window = deque(maxlen=100)\n for e in range(1,self.episodes):\n\n self.noise.step()\n episode_scores = []\n obs = self.env.reset()\n for t in range(self.tmax):\n actions = self.act(obs)\n next_obs,rewards,dones = self.env.step(actions)\n\n # Store experience\n if np.max(rewards) > 0:\n print('hit the ball over the net',rewards)\n self.R.add(obs.reshape(1,48),obs,actions,rewards,next_obs.reshape(1,48),next_obs,dones)\n obs = next_obs\n # Score tracking\n episode_scores.append(np.max(rewards))\n \n # Learn\n if len(self.R) > self.min_buffer_size:\n for _ in range(self.SGD_epoch):\n # Update each agent\n for i in range(self.num_agents):\n self.learn(i)\n # update target networks\n self.update_targets_all()\n \n steps += int(t)\n means.append(np.mean(episode_scores))\n stds.append(np.std(episode_scores))\n scores_window.append(np.sum(episode_scores))\n if e % 4 == 0:\n toc = time.time()\n r_mean = np.mean(scores_window)\n r_max = max(scores_window)\n r_min = min(scores_window)\n r_std = np.std(scores_window)\n plot(self.name,means,stds)\n print(\"\\rEpisode: {} out of {}, Steps {}, Rewards: mean {:.2f}, min {:.2f}, max {:.2f}, std {:.2f}, Elapsed {:.2f}\".format(e,self.episodes,steps,r_mean,r_min,r_max,r_std,(toc-tic)/60))\n if np.mean(scores_window) > self.winning_condition:\n print('Env solved!')\n # save scores\n pickle.dump([means,stds], open(str(self.name)+'_scores.p', 'wb'))\n # save policy\n self.save_weights(self.critic_path,self.actor_path)\n break", "def train(self,env, iter_n=2000):\n\n\t\tfor i in range(iter_n):\n\t\t\tif i > 50:\n\t\t\t\tif all(reward > 195 for reward in self.step_count[-10:]):\n\t\t\t\t\tprint('solved at episode {}'.format(i))\n\t\t\t\t\tbreak\n\t\t\tstate = self.env.reset()\n\t\t\tstate = np.reshape(state, [1, self.state_size])\n\n\t\t\tepisode_complete = False\n\t\t\tstep = 0\n\t\t\twhile not episode_complete and (step < self.max_steps):\n\t\t\t\taction = self.define_action(state)\n\t\t\t\tnew_state, reward, episode_complete, info = env.step(action)\n\t\t\t\tnew_state = np.reshape(new_state, [1, self.state_size])\n\n\t\t\t\tself.memory.append((state, action, reward, new_state, episode_complete))\n\t\t\t\tself.round_reward += reward\n\t\t\t\tstate = new_state\n\t\t\t\tstep += 1\n\t\t\t\tif episode_complete:\n\t\t\t\t\tself.round_reward += -10\n\t\t\t\t\tself.update_target_model()\n\t\t\t\t\tself.print_results(i, iter_n, step)\n\t\t\t\t\tif i != 0: # Update totals in memory if not the first run\n\t\t\t\t\t\tself.update_totals(i, step)\n\t\t\t\tif len(self.memory) > self.training_iter:\n\t\t\t\t\tself.replay()\n\t\t\tif self.epsilon > self.epsilon_min:\n\t\t\t\tself.epsilon *= self.epsilon_decay\n\n\t\treturn self.all_iterations, self.all_rewards, self.step_count", "def train(self, n_steps=5000):\n all_rewards = []\n losses = []\n epsilons = []\n episode_reward = 0\n\n state = self.env.reset()\n for frame_idx in range(1, n_steps + 1):\n\n epsilon = self.epsilon_schedule(frame_idx)\n epsilons.append(epsilon)\n action = self.act(state, epsilon)\n next_state, reward, done, _ = self.env.step(action)\n episode_reward += reward\n self.replay_buffer.append(state, action, reward, next_state, done)\n\n if len(self.replay_buffer) >= self.learn_start:\n loss = self._compute_loss()\n self._update_parameters(loss)\n losses.append(loss.item())\n\n if done:\n state = self.env.reset()\n all_rewards.append(episode_reward)\n episode_reward = 0\n\n if frame_idx % self.target_update_rate == 0:\n self._update_target()\n\n state = next_state\n\n self._plot(all_rewards, losses, epsilons)", "def train(n_episodes=1000, max_n_steps=300, eps_start=1.0, eps_end=0.01, eps_decay=0.995, strCheckpointFile='checkpoint.pth'):\n\n global env\n scores = [] # list containing scores from each episode\n scores_window = deque(maxlen=100) # last 100 scores\n eps = eps_start # initialize epsilon\n num_saves = 0\n for i_episode in range(1, n_episodes + 1):\n env_info = env.reset(train_mode=True)[brain_name] # reset the environment\n state = env_info.vector_observations[0] # get the current state\n score = 0 # initialize the score\n last_t = max_n_steps\n for t in range(max_n_steps):\n action = agent.act(state, eps) # agent returns an epsilon-greedy action based on state\n env_info = env.step(action)[brain_name] # send the action to the environment\n next_state = env_info.vector_observations[0] # get the next state\n reward = env_info.rewards[0] # get the reward\n done = env_info.local_done[0] # see if episode has finished\n agent.step(state, action, reward, next_state, done) # records experience and learns (depending on settings)\n state = next_state\n score += reward\n if done:\n last_t = t + 1\n break\n scores_window.append(score) # save most recent score\n scores.append(score) # save most recent score\n eps = max(eps_end, eps_decay * eps) # decrease epsilon\n print('\\rEpisode {}\\tNum steps: {}\\tAverage Score: {:.2f}'.format(i_episode, last_t, np.mean(scores_window)))\n # if i_episode % 100 == 0:\n # print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))\n if np.mean(scores_window) >= 13: # win condition in course\n if num_saves == 0:\n print('\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.2f}'.format(i_episode - 100, np.mean(scores_window)))\n print('\\nTraining will continue and the checkpoint will be overwritten every 100 episodes')\n print('\\nSaving a checkpoint now, you may interrupt code execution with eg Ctrl+C')\n torch.save(agent.qnetwork_local.state_dict(), strCheckpointFile)\n else:\n if i_episode % 100 == 0:\n print('\\nSaving another checkpoint now, you may interrupt code execution with eg Ctrl+C')\n torch.save(agent.qnetwork_local.state_dict(), strCheckpointFile)\n num_saves += 1\n\n env.close()\n\n # plot the scores\n fig = plt.figure()\n ax = fig.add_subplot(111)\n plt.plot(np.arange(len(scores)), scores)\n plt.ylabel('Score')\n plt.xlabel('Episode #')\n # plt.show()\n plt.savefig('training_score_by_episode.png')\n return scores", "def train(Game, agent, episodes=1000):\n a = agent\n # eps_start = a.epsilon\n # eps_end = a.epsilon_min\n # eps_dec = np.exp(1/episodes * np.log(eps_end/eps_start))\n # a.epsilon_decrement = eps_dec\n times_taken = np.zeros(episodes)\n print(\"Training starting\")\n for n in range(episodes):\n start_time = time.time()\n g = Game()\n print(\"EPISODE\", n+1)\n while not g.success:\n state = 1.0*g.get_state()\n action = a.action(state)\n reward = g.play(action)\n # print(g.success)\n # print(\"reward: \", reward)\n # print(state)\n # print(action)\n # print(g.get_state())\n a.train(state, action, reward, g.get_state(), g.success)\n end_time = time.time()\n times_taken[n] = end_time - start_time\n print(\"Training complete ({} episodes)\".format(episodes))\n return times_taken", "def train(self,\n num_episodes = 100,\n num_steps = 500000,\n max_steps_per_episode = 10000,\n target_interval = 10000,\n learning_interval = 4,\n frame_skip = 1,\n warmup_steps = None,\n pretrain_steps = None,\n output_freq = 50,\n save_freq = 5, \n store_memory = False):\n \n # prefill memory with random transitions if requested\n if warmup_steps is not None:\n self._random_warmup(warmup_steps)\n \n # pretrain the agent on its on own memory\n if pretrain_steps is not None:\n self._pretrain(pretrain_steps, target_interval)\n \n # logging initialization\n self._score, self._q_values, self._losses = 0., [], []\n raw_frames = np.zeros(shape = (max_steps_per_episode, *self.env._unprocessed_frame.shape), dtype = np.uint8)\n\n episode_idx = 0\n while episode_idx < num_episodes or self._step_counter < num_steps:\n # reset environment and get first state\n self._start_episode()\n \n for i in range(max_steps_per_episode):\n \n #-------------------------------------------------------------------------------#\n #####################\n # Interactive Phase #\n #####################\n \n # choose an action, observe reactions of the environment and\n # add this experience to the agent's memory \n if self._step_counter % frame_skip == 0: \n action = self._make_decision()\n new_frame, reward, done, _ = self.env.step(action)\n self.memory.add_experience(action, reward, new_frame, 1, done)\n \n # update current state\n self._current_state[0, :(self.num_stacked_frames-1)] = self._current_state[0, 1:]\n self._current_state[0, self.num_stacked_frames-1] = new_frame\n #-------------------------------------------------------------------------------#\n \n \n #-------------------------------------------------------------------------------#\n ##################\n # Learning Phase #\n ##################\n \n # perform a parameter update of the current policy model\n if self._step_counter % learning_interval == 0:\n self._batch_update()\n \n # update the target model\n if self._step_counter % target_interval == 0:\n self._update_target_model()\n #-------------------------------------------------------------------------------#\n \n # logging\n self._score += self.env._unprocessed_reward\n raw_frames[i] = self.env._unprocessed_frame\n \n \n self._step_counter += 1\n \n if self.env.was_real_done:\n self.logger.add_episode_logs(self._step_counter, self._score, self._q_values, self._losses, raw_frames[:i])\n self._score, self._q_values, self._losses = 0., [], []\n break\n \n if done:\n self.env.reset()\n \n \n if not self.env.was_real_done:\n self.memory.add_experience(action, reward, new_frame, 1, True)\n self.logger.add_episode_logs(self._step_counter, self._score, self._q_values, self._losses, raw_frames[:i])\n self._score, self._q_values, self._losses = 0., [], []\n \n if episode_idx%(num_episodes/output_freq)==0:\n validation_score, validation_frames = self.test(record = True, max_steps_per_episode = max_steps_per_episode)\n #validation_score, validation_frames = 0, []\n lower_idx = int(clip(episode_idx-(num_episodes/output_freq)+1, 0, num_episodes-1))\n self.logger.show_progress(lower_idx, episode_idx, validation_score, validation_frames, self.policy_network.model)\n \n if episode_idx%(num_episodes/save_freq)==0:\n self.logger.make_plots()\n self.logger.save_all(self.policy_network.model, self.memory, store_memory)\n \n \n\n episode_idx += 1 \n print('==========================\\ntraining session completed\\n==========================\\n\\n\\n=======\\nSummary\\n======='\n )\n self.logger.show_progress(0, num_episodes, summary = True)\n self.logger.make_plots()\n self.logger.save_all(self.policy_network.model, self.memory, store_memory)", "def train(\n env: DiscreteEnvironment[TState, TAction],\n agent: DiscreteAgent[TState, TAction],\n n_episodes: int,\n on_action: Callable[[TState, TAction, float, int], None] = None,\n on_episode_end: Callable[[int], None] = None,\n) -> None:\n for ep in range(n_episodes):\n t = 0\n while not env.terminated:\n s, a, r = agent.act_and_train(t) # returns (S_t, A_t, R_t)\n if on_action:\n on_action(s, a, r, t)\n t += 1\n agent.episode_end()\n if on_episode_end:\n on_episode_end(t)\n env.reset()", "def train(self):\n total_steps = 0\n scores_history = [deque(maxlen=self.run_settings.averaging_window)\n for a in range(len(self.agents))]\n averages_history = [[] for a in range(len(self.agents))]\n\n for e in range(self.run_settings.num_episodes):\n # Initialize episode\n try:\n env_states, rewards, done, metainfo = self.custom_env.reset()\n except EpisodeCrashException:\n for a in range(len(self.agents)):\n if hasattr(self.agents[a], 'notify_episode_crashed'):\n self.agents[a].notify_episode_crashed(self.run_settings)\n continue\n\n # Initialize scores to starting reward (probably 0)\n scores = rewards\n step = 0\n\n while not done:\n states = [self.agents[a].state_space_converter(env_states[a])\n for a in range(len(self.agents))]\n\n # Train agents\n if total_steps > 0 and total_steps % self.run_settings.train_every == 0:\n for agent in self.agents:\n agent.train(self.run_settings)\n\n # Save agent model\n if total_steps > 0 and total_steps % self.run_settings.save_every == 0:\n for agent in self.agents:\n agent.save()\n\n # Get actions\n actions = [self.agents[a].sample(states[a])\n for a in range(len(self.agents))]\n env_actions = [self.agents[a].action_space_converter(actions[a])\n for a in range(len(self.agents))]\n # Take environment step\n try:\n env_states, rewards, done, metainfo = self.custom_env.step(env_actions)\n except EpisodeCrashException:\n for a in range(len(self.agents)):\n if hasattr(self.agents[a], 'notify_episode_crashed'):\n self.agents[a].notify_episode_crashed(self.run_settings)\n break\n step += 1\n total_steps += 1\n\n # Update scores\n scores = [scores[a] + rewards[a] for a in range(len(self.agents))]\n # Push to agent Memories\n for a in range(len(self.agents)):\n self.agents[a].push_memory(states[a], actions[a], rewards[a], done)\n\n if done:\n averages = []\n for a in range(len(scores_history)):\n scores_history[a].append(scores[a])\n averages.append(np.mean(scores_history[a]))\n averages_history[a].append(averages[a])\n\n if len(scores) == 1:\n scores = scores[0]\n averages = averages[0]\n if self.run_settings.verbose:\n print(\"Game {} ended after {} steps. Game score: {}. Averages: {}\"\n .format(e+1, step, scores, averages))\n if (self.run_settings.graph_every > 0 and e > 0\n and e % self.run_settings.graph_every == 0):\n self.plot_results(averages_history)", "def learn(self):\n\n for i in range(1, self.args.numIters + 1):\n # bookkeeping\n log.info(f'Starting Iter #{i} ...')\n # examples of the iteration\n if not self.skipFirstSelfPlay or i > 1:\n iterationTrainExamples = deque([], maxlen=self.args.maxlenOfQueue)\n\n for _ in tqdm(range(self.args.numEps), desc=\"Self Play\"):\n self.mcts = MCTS(self.game, self.nnet, self.args) # reset search tree\n iterationTrainExamples += self.executeEpisode()\n\n # save the iteration examples to the history \n self.trainExamplesHistory.append(iterationTrainExamples)\n\n if len(self.trainExamplesHistory) > self.args.numItersForTrainExamplesHistory:\n log.warning(\n f\"Removing the oldest entry in trainExamples. len(trainExamplesHistory) = {len(self.trainExamplesHistory)}\")\n self.trainExamplesHistory.pop(0)\n # backup history to a file\n # NB! the examples were collected using the model from the previous iteration, so (i-1) \n self.saveTrainExamples(i - 1)\n\n # shuffle examples before training\n trainExamples = []\n for e in self.trainExamplesHistory:\n trainExamples.extend(e)\n shuffle(trainExamples)\n\n # training new network, keeping a copy of the old one\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')\n self.pnet.load_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')\n pmcts = MCTS(self.game, self.pnet, self.args)\n\n self.nnet.train(trainExamples)\n nmcts = MCTS(self.game, self.nnet, self.args)\n\n log.info('PITTING AGAINST PREVIOUS VERSION')\n arena = Arena(lambda x: np.argmax(pmcts.getActionProb(x, temp=0)),\n lambda x: np.argmax(nmcts.getActionProb(x, temp=0)), self.game)\n pwins, nwins, draws = arena.playGames(self.args.arenaCompare)\n\n log.info('NEW/PREV WINS : %d / %d ; DRAWS : %d' % (nwins, pwins, draws))\n if pwins + nwins == 0 or float(nwins) / (pwins + nwins) < self.args.updateThreshold:\n log.info('REJECTING NEW MODEL')\n self.nnet.load_checkpoint(folder=self.args.checkpoint, filename='temp.pth.tar')\n else:\n log.info('ACCEPTING NEW MODEL')\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename=self.getCheckpointFile(i))\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename='best.pth.tar')", "def train(self, n_episodes):\n for episode in trange(n_episodes):\n policy_loss, entropy, episode_reward = self.train_step()\n self.writer.add_scalar('policy_loss', policy_loss, episode)\n self.writer.add_scalar('entropy', entropy, episode)\n self.writer.add_scalar('episode_reward', episode_reward, episode)", "def learn(self):\n\n for i in range(1, self.args.numIters + 1):\n print('------ ITER ' + str(i) + '------')\n iterationTrainExamples = deque([], maxlen=self.args.maxlenOfQueue)\n\n for eps in range(self.args.numEps):\n print('------ Self Play Episode ' + str(eps) + '------')\n self.mcts = TSPMCTS(self.args, self.game, self.nnet) # reset search tree\n iterationTrainExamples += self.executeEpisode()\n\n # save the iteration examples to the history\n self.trainExamplesHistory.append(iterationTrainExamples)\n\n if len(self.trainExamplesHistory) > self.args.numItersForTrainExamplesHistory:\n self.trainExamplesHistory.pop(0)\n\n # shuffle examples before training\n trainExamples = []\n for e in self.trainExamplesHistory:\n trainExamples.extend(e)\n\n # training new network\n if self.args.numEps > 0:\n self.nnet.train(trainExamples)\n nmcts = TSPMCTS(self.args, self.game, self.nnet)\n\n print('PLAYING GAMES')\n if self.args.arenaCompare:\n arena = SinglePlayerArena(lambda x: np.argmax(nmcts.getActionProb(x, temp=0)), self.game)\n wins, losses = arena.playSinglePlayerGames(self.args.arenaCompare)\n print('WINS/LOSSES: %d / %d' % (wins, losses))", "def train_episode(self):\n state = self.env.reset()\n states = []\n actions = []\n rewards = []\n for _ in range(self.options.steps):\n probs = self.actor_baseline.predict([[state]])[0][0]\n action = np.random.choice(len(probs), p=probs)\n\n next_state, reward, done, _ = self.step(action)\n states.append(state)\n actions.append(action)\n rewards.append(reward)\n\n state = next_state\n\n if done:\n break\n\n # Compute and store returns in G\n G = np.zeros_like(rewards)\n ################################\n # YOUR IMPLEMENTATION HERE #\n ################################\n\n # One-hot encoding for actions\n actions_one_hot = np.zeros([len(actions), self.env.action_space.n])\n actions_one_hot[np.arange(len(actions)), actions] = 1\n\n # Compute one-hot encoded deltas\n ################################\n # YOUR IMPLEMENTATION HERE #\n ################################\n deltas = [[0]]\n\n # Update actor and state estimator\n self.actor_baseline.fit(x=[np.array(states)],\n y={'actor_output': deltas, 'baseline_output': returns},\n epochs=1, batch_size=self.options.batch_size, verbose=0)", "def train_agent(self):\n # Retrieve collected experiences from memory\n experiences = np.array(self.replay.get_all())\n # rewards = np.array([h['reward'] for h in experiences])\n #rewards = experiences[:,2]\n rewards = np.array([r[2] for r in experiences])\n\n # Discount and normalize rewards\n norm_rewards = self.discount_rewards_and_normalize(rewards)\n\n # Shuffle for better learning\n shuffled_experiences = np.random.shuffle(experiences)\n\n # Feed the experiences through the network with rewards to compute and\n # minimize the loss.\n\n feed={\n self.X: [r[0] for r in experiences],\n self.rewards:norm_rewards,\n self.ep_actions:experiences[:,1]\n }\n self.tf_sess.run(self.train,feed_dict=feed)\n\n pass", "def run(self):\n # Observe the game by randomly sampling actions from the environment\n # and performing those actions\n self.__observe__()\n for i in xrange(self.num_epochs):\n self.environment.resetStatistics()\n time_now = time.time()\n for j in xrange(self.train_steps_per_epoch):\n # Get action using epsilon-greedy strategy\n action = self.__sample_epsilon_action__()\n # Perform action based on epsilon-greedy search and store the transitions\n # in experience replay\n self.__supply_action_to_environment__(action)\n # If the environment is in the terminal state, reset the environment, and\n # perform self.stack_num actions to reset the environment\n self.isGameOver()\n if j % self.train_frequency == 0:\n # print \"Started training\"\n # Sample minibatch of size self.minibatch_size from experience replay\n minibatch = self.experience_replay.sample()\n minibatch_states, minibatch_action, minibatch_reward, minibatch_next_states, \\\n minibatch_terminals = minibatch\n cost = self.network.train_network(minibatch_states,\n minibatch_action,\n minibatch_reward,\n minibatch_terminals,\n minibatch_next_states)\n if j % self.record_frequency == 0:\n total_score, num_games = self.environment.getStatistics()\n avg_score = total_score / num_games\n self.network.record_average_qvalue(\n self.experience_replay.getCurrentState(),\n i * self.train_steps_per_epoch + j,\n self.epsilon, avg_score)\n # Epsilon annealing\n self.__anneal_epsilon__()\n # if self.time_step % 1000 == 0:\n # print \"Cost at iteration\", self.time_step, \" is\", cost\n # print \"Value of epsilon is\", self.epsilon\n self.steps += 1\n if j % self.copy_steps == 0:\n self.network.copy_weights()\n total_score, num_games = self.environment.getStatistics()\n time_taken = (time.time() - time_now)\n logger.info(\"Finished epoch %d: Steps=%d; Time taken=%.2f\",\n i, j, time_taken)\n logger.info(\"\\tNumber of games: %d; Average reward: %.2f\", num_games, (total_score / num_games))\n logger.info(\"\\tFinal epsilon value for epoch: %f\", self.epsilon)\n self.network.create_checkpoint()", "def train_agent(iterations, modeldir, logdir, policydir):\n\n # TODO: add code to instantiate the training and evaluation environments\n\n\n # TODO: add code to create a reinforcement learning agent that is going to be trained\n\n\n tf_agent.initialize()\n\n eval_policy = tf_agent.policy\n collect_policy = tf_agent.collect_policy\n\n tf_policy_saver = policy_saver.PolicySaver(collect_policy)\n\n # Use reverb as replay buffer\n replay_buffer_signature = tensor_spec.from_spec(tf_agent.collect_data_spec)\n replay_buffer_signature = tensor_spec.add_outer_dim(replay_buffer_signature)\n table = reverb.Table(\n REPLAY_BUFFER_TABLE_NAME,\n max_size=REPLAY_BUFFER_CAPACITY,\n sampler=reverb.selectors.Uniform(),\n remover=reverb.selectors.Fifo(),\n rate_limiter=reverb.rate_limiters.MinSize(1),\n signature=replay_buffer_signature,\n ) # specify signature here for validation at insertion time\n\n reverb_server = reverb.Server([table])\n\n replay_buffer = reverb_replay_buffer.ReverbReplayBuffer(\n tf_agent.collect_data_spec,\n sequence_length=None,\n table_name=REPLAY_BUFFER_TABLE_NAME,\n local_server=reverb_server,\n )\n\n replay_buffer_observer = reverb_utils.ReverbAddEpisodeObserver(\n replay_buffer.py_client, REPLAY_BUFFER_TABLE_NAME, REPLAY_BUFFER_CAPACITY\n )\n\n # Optimize by wrapping some of the code in a graph using TF function.\n tf_agent.train = common.function(tf_agent.train)\n\n # Evaluate the agent's policy once before training.\n avg_return = compute_avg_return_and_steps(\n eval_env, tf_agent.policy, NUM_EVAL_EPISODES\n )\n\n summary_writer = tf.summary.create_file_writer(logdir)\n\n for i in range(iterations):\n # TODO: add code to collect game episodes and train the agent\n\n\n logger = tf.get_logger()\n if i % EVAL_INTERVAL == 0:\n avg_return, avg_episode_length = compute_avg_return_and_steps(\n eval_env, eval_policy, NUM_EVAL_EPISODES\n )\n with summary_writer.as_default():\n tf.summary.scalar(\"Average return\", avg_return, step=i)\n tf.summary.scalar(\"Average episode length\", avg_episode_length, step=i)\n summary_writer.flush()\n logger.info(\n \"iteration = {0}: Average Return = {1}, Average Episode Length = {2}\".format(\n i, avg_return, avg_episode_length\n )\n )\n\n summary_writer.close()\n\n tf_policy_saver.save(policydir)", "def train(env, agents, data_log, n_episodes=10000, n_steps=None, generate_val_data=False, record_env=None, trainer=None):\n # Setup logging and start code\n logger = logging.getLogger('root')\n step_tot = 0\n logger.info(env.observation_space[0].high)\n alphas = [agent.alpha for agent in trainer.agents]\n data_log.log_var(\"alphas\", alphas)\n\n ep_generator = range(n_episodes) if n_episodes else itertools.count()\n # Start training\n for i in ep_generator:\n # Do some logging\n logger.info(\"episode:\" + str(i))\n data_log.set_episode(i)\n\n # Periodically store networks\n if i % 250 == 0: #was 25\n store_networks(trainer, agents, data_log)\n\n # Run a single episode\n score, step, extra_data = run_episode(env, agents, render=False, store_data=True, trainer=trainer)\n\n # Do more logging\n logger.info(\"Score: \" + str(score))\n step_tot += step\n data_log.set_step(step_tot)\n data_log.log_var(\"score\", score)\n alphas = [agent.alpha for agent in trainer.agents]\n data_log.log_var(\"alphas\", alphas)\n\n # Break training loop\n if n_steps and step_tot > n_steps:\n break\n\n #Periodically save logs\n if i % 50 == 0: #was 5\n logger.info(\"Saving log...\")\n data_log.save()\n logger.info(\"Saved log\")\n\n # Save logs one last time\n logger.info(\"Saving log...\")\n data_log.save()\n logger.info(\"Saved log\")\n return", "def train_network(self, batch, episode_nr):\n global eps, eps_min, eps_decay\n for exp in batch:\n S = exp[0]\n S = process_state(S)\n action_number = exp[1]\n r = exp[2]\n S_new = exp[3]\n S_new = process_state(S_new)\n terminal = exp[4]\n\n if not terminal: # If agent is not at its final destination\n target = (r + gamma*np.amax(self.target.predict(S_new)[0]))\n else:\n target = r\n target_f = self.policy.predict(S)\n\n target_f[0][action_number] = target # Update something???\n self.policy.fit(S, target_f, epochs=1, verbose=0) # Train network # Verbose - makes training line?\n if self.epsilon > self.eps_min and episode_nr > 10:\n self.epsilon *= self.eps_decay # Decrease exploration rate", "def q_learning(env, agent, num_episodes, batch_size, epsilon, epsilon_min, epsilon_decay, folder):\n \n # Keeps track of useful statistics\n stats = plotting.EpisodeStats(\n episode_lengths=np.zeros(num_episodes),\n episode_rewards=np.zeros(num_episodes))\n\n\n for i_episode in range(num_episodes):\n if epsilon > epsilon_min and i_episode > 500:\n # complete random exploration 500 episodes, \n # then decrase exploration till epsilon less than epsilon_min\n epsilon *= epsilon_decay\n sys.stdout.flush()\n\n state = env.reset()\n state = np.reshape(state, [1, env.nS])\n\n \n for t in range(MAX_STEP):\n\n ## Decide action\n action = agent.act(state, epsilon)\n ## Advance the game to the next frame based on the action\n next_state, reward, done, _ = env.step(action)\n\n env.my_render(folder)\n\n stats.episode_rewards[i_episode] += reward\n stats.episode_lengths[i_episode] = t+1\n\n next_state = np.reshape(next_state, [1, env.nS])\n ## Remember the previous state, action, reward, and done\n agent.remember(state, action, reward, next_state, done)\n ## make next_state the new current state for the next frame.\n state = next_state ## change to copy.copy(next_state), if it is a array\n\n if len(agent.memory) > batch_size:\n agent.replay(batch_size) \n\n if done: \n break\n \n mean_score = stats.episode_rewards[i_episode]/stats.episode_lengths[i_episode]\n print(\"episode: {}/{}, score: {}, e: {:.2}, steps:{}, mean score:{:.2}\"\n .format(i_episode, num_episodes, stats.episode_rewards[i_episode], epsilon, \n stats.episode_lengths[i_episode], \n mean_score))\n #if(i_episode > 200):\n write_csv(folder, i_episode, stats.episode_lengths[i_episode], mean_score)\n if(i_episode%50 == 0):\n agent.save(folder + \"_qn\" + str(i_episode) + \".h5\") \n agent.save(folder + \"_qn-final\" + \".h5\") \n\n return stats", "def train_episode(self, max_episode_length):\n\n # Populate the buffer\n self.populate_buffer(max_episode_length)\n\n # weight updates\n replay_samples = self.replay_buffer.sample(self.buffer_sample_size)\n state_batch = torch.from_numpy(replay_samples[0]).to(DEVICE)\n action_batch = torch.from_numpy(replay_samples[1]).to(DEVICE)\n reward_batch = (\n torch.from_numpy(replay_samples[2]).to(DEVICE).unsqueeze(1)\n )\n next_state_batch = torch.from_numpy(replay_samples[3]).to(DEVICE)\n dones = (\n torch.from_numpy(replay_samples[4])\n .type(torch.long)\n .to(DEVICE)\n .unsqueeze(1)\n )\n\n # alpha must be clamped with a minumum of zero, so use exponential.\n alpha = self.log_alpha.exp().detach()\n\n with torch.no_grad():\n # Figure out value function\n next_actions, log_next_actions, _ = self.policy.sample(\n next_state_batch\n )\n target_q1, target_q2 = self.avg_q_net(\n next_state_batch, next_actions\n )\n target_q = torch.min(target_q1, target_q2)\n next_state_values = target_q - alpha * log_next_actions\n\n # Calculate Q network target\n done_floats = dones.type(torch.float)\n q_target = reward_batch.clone()\n q_target += self.gamma * done_floats * next_state_values\n\n # Q net outputs values for all actions, so we index specific actions\n q1, q2 = self.q_net(state_batch, action_batch)\n q1_loss = F.mse_loss(q1, q_target)\n q2_loss = F.mse_loss(q2, q_target)\n\n # policy loss\n actions_pi, log_probs_pi, action_dist = self.policy.sample(state_batch)\n q1_pi, q2_pi = self.q_net(state_batch, actions_pi)\n q_pi = torch.min(q1_pi, q2_pi)\n policy_loss = ((alpha * log_probs_pi) - q_pi).mean()\n\n # update parameters\n self.q_optim.zero_grad()\n q1_loss.backward()\n self.q_optim.step()\n\n self.q_optim.zero_grad()\n q2_loss.backward()\n self.q_optim.step()\n\n self.policy_optim.zero_grad()\n policy_loss.backward()\n self.policy_optim.step()\n\n # automatic entropy tuning\n alpha_loss = (\n self.log_alpha * (log_probs_pi + self.entropy_target).detach()\n )\n alpha_loss = -alpha_loss.mean()\n\n if self.entropy_tuning:\n self.alpha_optim.zero_grad()\n alpha_loss.backward()\n self.alpha_optim.step()\n\n # Step average Q net\n move_average(self.q_net, self.avg_q_net, self.tau)\n\n # logging\n self.tbx_logger(\n {\n \"loss/q1 loss\": q1_loss.item(),\n \"loss/q2 loss\": q2_loss.item(),\n \"loss/pi loss\": policy_loss.item(),\n \"loss/alpha loss\": alpha_loss.item(),\n \"Q/avg_q_target\": q_target.mean().item(),\n \"Q/avg_q1\": q1.mean().item(),\n \"Q/avg_q2\": q2.mean().item(),\n \"Q/avg_reward\": reward_batch.mean().item(),\n \"Q/avg_V\": next_state_values.mean().item(),\n \"H/alpha\": alpha.item(),\n \"H/pi_entropy\": action_dist.entropy().mean(),\n \"H/pi_log_pi\": log_probs_pi.mean(),\n },\n self.training_i,\n )\n\n self.training_i += 1\n self.checkpointer.increment_counter()", "def train(self):\n if len(self.experience) < self.minibatch_size:\n return\n\n # sample a minibatch_size of random episode with a number of transitions >= unrollings_num\n random_episodes_indecies = np.random.choice(len(self.experience), self.minibatch_size)\n random_episodes = []\n for index in random_episodes_indecies:\n episode = self.experience[index]\n\n # 0:random_transitions_space is the range from which a random transition\n # can be picked up while having unrollings_num - 1 transitions after it\n random_transitions_space = len(episode) - self.unrollings_num\n random_start = np.random.choice(random_transitions_space, 1)\n\n random_episodes.append(episode[random_start:random_start + self.unrollings_num])\n\n state_shape = tuple([self.minibatch_size, self.unrollings_num] + self.state_shape)\n\n # prepare the training data\n states = np.empty(state_shape, dtype=np.float32)\n next_states = np.empty(state_shape, dtype=np.float32)\n rewards = np.empty((self.minibatch_size, self.unrollings_num, ), dtype=np.float32)\n transition_action_filters = np.zeros((self.minibatch_size, self.unrollings_num, self.actions_count), dtype=np.float32)\n next_legal_actions_filters = np.zeros((self.minibatch_size, self.unrollings_num, self.actions_count), dtype=np.float32)\n\n for i, episode in enumerate(random_episodes):\n for j, transition in enumerate(episode):\n state, action, reward, nextstate, next_legal_actions = transition\n\n states[i,j], rewards[i,j], next_states[i,j] = state, reward, nextstate\n transition_action_filters[i,j][action] = 1.0\n next_legal_actions_filters[i,j][next_legal_actions] = 1.0\n\n self.prediction_nn.clearLSTMS(self.session)\n self.target_nn.clearLSTMS(self.session)\n\n loss,_ = self.session.run([self.loss, self.finalize], {\n self.states: states,\n self.next_states: next_states,\n self.rewards: np.reshape(rewards, (self.minibatch_size * self.unrollings_num, )),\n self.transition_action_filters: np.reshape(transition_action_filters, (self.minibatch_size * self.unrollings_num, self.actions_count)),\n self.next_legal_actions_filters: np.reshape(next_legal_actions_filters, (self.minibatch_size * self.unrollings_num, self.actions_count))\n })\n\n if self.iteration != 0 and self.iteration % self.freeze_period == 0:\n self.target_nn.assign_to(self.prediction_nn, self.session)\n\n self.iteration += 1\n\n return loss, self.iteration", "def train(self, batch_size=64, n_episodes=100, max_episode_length=3000, save_path=\"last_save.h5\",\n load_path=None):\n\n self.explore = True # Explore if needed\n\n self._play_through(n_episodes=n_episodes, max_episode_length=max_episode_length, save_path=save_path,\n callbacks=self._train_callbacks_factory())", "def train_agent(\n self,\n *,\n env,\n test_env,\n save_name,\n train_every=1,\n max_episodes=1000,\n center_returns=True,\n render=True,\n ):\n\n agent = self.create_agent(env)\n\n for episode in range(1, max_episodes + 1):\n obs = env.reset()\n done = False\n\n episode_return = 0.0\n while not done:\n action = agent.act(obs, deterministic=False)\n next_obs, reward, done, _ = env.step(action)\n episode_return += reward\n agent.store_step(obs, action, reward, next_obs, done)\n obs = next_obs\n\n if render:\n env.render()\n\n if episode % train_every == 0:\n agent.perform_training(\n gamma=self.gamma, center_returns=center_returns\n )\n torch.save(agent, f\"saved_agents/{save_name}\")\n\n print(\"Episode {} -- return={}\".format(episode, episode_return))\n return agent", "def training(self, dataset, repeat=1, gamma=1.0, learning_rate=0.1, model='3yo'):\n for _ in range(repeat):\n for episode in dataset:\n # 1- Get the data stored inside the dataset\n image_index = episode[0] # image of the object\n label_index = episode[1] # label given by the informant\n informant_index = episode[2] # a integer representing the informant\n informant_action = episode[3] # 0=reject, 1=accept\n\n # 2- The agent take an action (with softmax) considering is current state-action table\n # [0=cup, 1=book, 2=ball]\n col = (image_index * self.tot_images) + label_index\n action_array = self.actor_matrix[:, col]\n action_distribution = self._softmax(action_array)\n child_action = np.random.choice(self.tot_actions,\n 1,\n p=action_distribution) # select the action through softmax\n\n # 3- (External) New state and reward obtained from the environment\n # u_t = self.critic_vector[0, col] # previous state\n # New state is estimated, in this simple case nothing happen\n # because the next state is terminal\n # u_t1 = u_t # Only in this example they are the same\n\n # 4- (Intrinsic) The informant_reputation is updated:\n # agent_action, agent_confidence, informant_action, reward\n # informant_vector: 0=unreliable, 1=reliable\n # do_actions_agree: False, True\n # Estimating child_confidence\n distance = np.absolute(action_distribution[0] - action_distribution[1])\n child_confidence_distribution = [1 - distance, distance] # non-knowledgeable, knowledgeable\n child_confidence = np.random.choice(2, 1, p=child_confidence_distribution)\n # Check if child and informant agree\n if (child_action == informant_action):\n do_actions_agree = True\n else:\n do_actions_agree = False\n # Increment the counter in the informant_vector.\n # Here we update the counter distribtuion only if\n # the child is confident, because it is only in that\n # case that the child can say if the informant is\n # reliable or not.\n if (do_actions_agree == False and child_confidence == 1):\n self.informant_vector[informant_index][0] += 1 # unreliable\n elif (do_actions_agree == True and child_confidence == 1):\n self.informant_vector[informant_index][1] += 1 # reliable\n elif (do_actions_agree == False and child_confidence == 0):\n self.informant_vector[informant_index][1] += 0 # reliable\n self.informant_vector[informant_index][0] += 0 # unreliable\n elif (do_actions_agree == True and child_confidence == 0):\n self.informant_vector[informant_index][1] += 0 # reliable\n self.informant_vector[informant_index][0] += 0 # unreliable\n else:\n raise ValueError(\"ERROR: anomaly in the IF condition for informant_vector update\")\n # Using the informant_vector given as input it estimates the reputation of the informant\n informant_reputation_distribution = np.true_divide(self.informant_vector[informant_index],\n np.sum(self.informant_vector[informant_index]))\n informant_reputation = np.random.choice(2, 1, p=informant_reputation_distribution)\n\n # 5- (Intrinsic) The Cost is estimated:\n # current_state, agent_action, agent_confidence, informant_action, informant_reputation\n # child_confidence: 0=non-knowledgeable, 1=knowledgeable\n # informant_reputation: 0=non-knowledgeable, 1=knowledgeable\n # action: 0=reject, 1=accept\n # informant_action: 0=reject, 1=accept\n cost = self._return_cost(child_confidence,\n informant_reputation,\n child_action,\n informant_action,\n value=model)\n\n # 6- The utility table is updated using: previous_state, current_state, cost, reward\n # Updating the critic using Temporal Differencing Learning\n # In this simple case there is not a u_t1 state.\n # The current state is considered terminal.\n # We can delete the term (gamma*u_t1)-u_t and considering\n # only (reward-cost) as utility of the state (see Russel Norvig).\n reward = 0 # only for intrinsic learning reward=0\n delta = (reward - cost) # + (gamma*u_t1) - u_t\n self.critic_vector[0, col] += learning_rate * delta\n\n # 7- The actor table is updated using the delta from the critic\n # Update the ACTOR using the delta\n self.actor_matrix[child_action, col] += learning_rate * delta # the current action\n self.actor_matrix[1 - child_action, col] -= learning_rate * delta # the opposite action", "def _train_step(self):\n if self._replay.add_count > self.min_replay_history:\n if self.training_steps % self.update_period == 0:\n self._sample_from_replay_buffer()\n\n if self._replay_scheme == 'prioritized':\n # The original prioritized experience replay uses a linear exponent\n # schedule 0.4 -> 1.0. Comparing the schedule to a fixed exponent of\n # 0.5 on 5 games (Asterix, Pong, Q*Bert, Seaquest, Space Invaders)\n # suggested a fixed exponent actually performs better, except on Pong.\n probs = self.replay_elements['sampling_probabilities']\n loss_weights = 1.0 / jnp.sqrt(probs + 1e-10)\n loss_weights /= jnp.max(loss_weights)\n else:\n loss_weights = jnp.ones(self.replay_elements['state'].shape[0])\n\n self._rng, self.optimizer, loss, mean_loss= train(\n self.network_def,\n self.target_network_params,\n self.optimizer,\n self.replay_elements['state'],\n self.replay_elements['action'],\n self.replay_elements['next_state'],\n self.replay_elements['reward'],\n self.replay_elements['terminal'],\n loss_weights,\n self._target_opt,\n self.num_tau_samples,\n self.num_tau_prime_samples,\n self.num_quantile_samples,\n self.cumulative_gamma,\n self.double_dqn,\n self.kappa,\n self._tau,\n self._alpha,\n self._clip_value_min,\n self._num_actions,\n self._rng)\n\n if self._replay_scheme == 'prioritized':\n # Rainbow and prioritized replay are parametrized by an exponent\n # alpha, but in both cases it is set to 0.5 - for simplicity's sake we\n # leave it as is here, using the more direct sqrt(). Taking the square\n # root \"makes sense\", as we are dealing with a squared loss. Add a\n # small nonzero value to the loss to avoid 0 priority items. While\n # technically this may be okay, setting all items to 0 priority will\n # cause troubles, and also result in 1.0 / 0.0 = NaN correction terms.\n self._replay.set_priority(self.replay_elements['indices'],\n jnp.sqrt(loss + 1e-10))\n\n\n if (self.summary_writer is not None and\n self.training_steps > 0 and\n self.training_steps % self.summary_writing_frequency == 0):\n summary = tf.compat.v1.Summary(value=[\n tf.compat.v1.Summary.Value(tag='ImplicitLoss',\n simple_value=mean_loss)])\n self.summary_writer.add_summary(summary, self.training_steps)\n if self.training_steps % self.target_update_period == 0:\n self._sync_weights()\n\n self.training_steps += 1", "def train(self, num_episodes, max_episode_steps=100, save_freq=100, render=False):\n while self.episodes_done < num_episodes:\n self.trainOneEpisode(num_episodes, max_episode_steps, save_freq, render)\n self.saveCheckpoint()", "def train_by_episode(self):\n # only REINFORCE and REINFORCE with baseline\n # use the ff code\n # convert the rewards to returns\n rewards = []\n gamma = 0.99\n for item in self.memory:\n [_, _, _, reward, _] = item\n rewards.append(reward)\n # rewards = np.array(self.memory)[:,3].tolist()\n\n # compute return per step\n # return is the sum of rewards from t til end of episode\n # return replaces reward in the list\n for i in range(len(rewards)):\n reward = rewards[i:]\n horizon = len(reward)\n discount = [math.pow(gamma, t) for t in range(horizon)]\n return_ = np.dot(reward, discount)\n self.memory[i][3] = return_\n\n # train every step\n for item in self.memory:\n self.train(item, gamma=gamma)", "def fit(self, env, num_iterations, max_episode_length=None):\n print ('initializing replay memory...')\n sys.stdout.flush()\n self.mode = 'init'\n self.memory.clear()\n self.preprocessor.reset()\n self.num_steps = 0\n num_updates = 0\n num_episodes = 0\n while num_updates < num_iterations:\n state = env.reset()\n self.preprocessor.reset()\n num_episodes += 1\n t = 0\n total_reward = 0\n while True:\n self.num_steps +=1\n t += 1\n action, _ = self.select_action(state)\n next_state, reward, is_terminal, debug_info = env.step(action)\n\n reward = self.preprocessor.process_reward(reward)\n total_reward += reward\n\n preprocessed_state = self.preprocessor.process_state_for_memory(state)\n\n self.memory.append(preprocessed_state, action, reward, is_terminal)\n\n if self.num_steps > self.num_burn_in:\n if self.mode != 'train':\n print('Finish Burn-in, Start Training!')\n\n self.mode = 'train'\n if self.num_steps % self.train_freq == 0:\n self.update_predict_network()\n num_updates += 1\n if num_updates % 10000 == 0:\n self.q_network.save_weights('%s/model_weights_%d.h5' % (self.save_path, num_updates // 10000))\n \n if is_terminal or (max_episode_length is not None and t > max_episode_length):\n break\n \n state = next_state\n #print ('episode %d ends, lasts for %d steps (total steps:%d), gets $d reward. (%d/%d updates.)' % (num_episodes, t, self.))", "def train_epoch(self) -> None:\n ct = self.config.training\n total_games = self._get_total_games()\n print(f\"Total Games: {total_games}\")\n train_size = int(0.9 * total_games)\n dataset_wrapper = DatasetWrapper(self.config)\n self.agent.model.fit(\n dataset_wrapper.get_dataset(train_size),\n epochs=ct.epoch_to_checkpoint,\n validation_data=dataset_wrapper.get_dataset(train_size, is_training=False),\n )", "def fit(self, env, env_eval, num_iterations, max_episode_length=None):\n train_counter = 0;\n eval_res_hist = np.zeros((1,3));\n\n time_this, ob_this, is_terminal = env.reset()\n\n ob_this = self._preprocessor.process_observation(time_this, ob_this)\n\n setpoint_this = ob_this[6:8]\n \n this_ep_length = 0;\n flag_print_1 = True;\n flag_print_2 = True;\n action_counter = 0;\n \n for step in range(num_iterations):\n #Check which stage is the agent at. If at the collecting stage,\n #then the actions will be random action.\n if step <= self._num_burn_in:\n if flag_print_1:\n logging.info (\"Collecting samples to fill the replay memory...\");\n flag_print_1 = False;\n\n action_mem = self.select_action(None, stage = 'collecting');\n action = self._policy.process_action(setpoint_this, action_mem)\n\n else:\n if flag_print_2:\n logging.info (\"Start training process...\");\n flag_print_2 = False;\n\n obs_this_net = self._preprocessor.process_observation_for_network(\n ob_this, self._min_array, self._max_array)\n \n state_this_net = np.append(obs_this_net[0:13], obs_this_net[14:]).reshape(1,16)\n\n action_mem = self.select_action(state_this_net, stage = 'training')\n # covert command to setpoint action \n action = self._policy.process_action(setpoint_this, action_mem) \n\n action_counter = action_counter + 1 if action_counter < 4 else 1;\n\n time_next, ob_next, is_terminal = env.step(action)\n ob_next = self._preprocessor.process_observation(time_next, ob_next)\n \n setpoint_next = ob_next[6:8]\n \n #check if exceed the max_episode_length\n if max_episode_length != None and \\\n this_ep_length >= max_episode_length:\n is_terminal = True;\n\n #save sample into memory \n self._memory.append(Sample(ob_this, action_mem, ob_next\n , is_terminal))\n\n \n #Check which stage is the agent at. If at the training stage,\n #then do the training\n if step > self._num_burn_in:\n #Check the train frequency\n if action_counter % self._train_freq == 0 \\\n and action_counter > 0:\n action_counter = 0;\n #Eval the model\n if train_counter % self._eval_freq == 0:\n eval_res = self.evaluate(env_eval, self._eval_epi_num\n , show_detail = True);\n eval_res_hist = np.append(eval_res_hist\n , np.array([step\n , eval_res[0], eval_res[1]]).reshape(1, 3)\n , axis = 0);\n np.savetxt(self._log_dir + '/eval_res_hist.csv'\n , eval_res_hist, delimiter = ',');\n logging.info ('Global Step: %d, '%(step), 'evaluation average \\\n reward is %0.04f, average episode length is %d.'\\\n %eval_res);\n \n \n #Sample from the replay memory\n samples = self._preprocessor.process_batch(\n self._memory.sample(self._batch_size), \n self._min_array, self._max_array);\n #Construct target values, one for each of the sample \n #in the minibatch\n samples_x = None;\n targets = None;\n for sample in samples:\n sample_s = np.append(sample.obs[0:13], sample.obs[14:]).reshape(1,16)\n sample_s_nex = np.append(sample.obs_nex[0:13], \n sample.obs_nex[14:]).reshape(1,16)\n sample_r = self._preprocessor.process_reward(sample.obs_nex[12:15])\n\n target = self.calc_q_values(sample_s);\n a_max = self.select_action(sample_s_nex, stage = 'greedy');\n \n \n\n if sample.is_terminal:\n target[0, sample.a] = sample_r;\n else:\n target[0, sample.a] = (sample_r\n + self._gamma \n * self.calc_q_values_1(\n sample_s_nex)[0, a_max]);\n if targets is None:\n targets = target;\n else:\n targets = np.append(targets, target, axis = 0);\n if samples_x is None:\n samples_x = sample_s;\n else:\n samples_x = np.append(samples_x, sample_s, axis = 0);\n #Run the training\n \n \n feed_dict = {self._state_placeholder:samples_x\n ,self._q_placeholder:targets}\n sess_res = self._sess.run([self._train_op, self._loss]\n , feed_dict = feed_dict);\n \n #Update the target parameters\n if train_counter % self._target_update_freq == 0:\n self.update_policy();\n logging.info('Global Step %d: update target network.' \n %(step));\n #Save the parameters\n if train_counter % self._save_freq == 0 or step + 1 == num_iterations:\n checkpoint_file = os.path.join(self._log_dir\n , 'model_data/model.ckpt');\n self._saver.save(self._sess\n , checkpoint_file, global_step=step);\n \n if train_counter % 100 == 0:\n logging.info (\"Global Step %d: loss %0.04f\"%(step, sess_res[1]));\n # Update the events file.\n summary_str = self._sess.run(self._summary, feed_dict=feed_dict)\n self._summary_writer.add_summary(summary_str, train_counter);\n self._summary_writer.add_graph(self._sess.graph);\n self._summary_writer.flush()\n \n train_counter += 1;\n \n #check whether to start a new episode\n if is_terminal:\n time_this, ob_this, is_terminal = env.reset()\n ob_this = self._preprocessor.process_observation(time_this, ob_this)\n setpoint_this = ob_this[6:8]\n\n this_ep_length = 0;\n action_counter = 0;\n else:\n ob_this = ob_next\n setpoint_this = setpoint_next\n time_this = time_next\n this_ep_length += 1;", "def testStepTrain(self):\n with tf.compat.v1.Session() as sess:\n agent = self._create_test_agent(sess)\n agent.eval_mode = False\n base_observation = np.ones(self.observation_shape + (1,))\n # We mock the replay buffer to verify how the agent interacts with it.\n agent._replay = test_utils.MockReplayBuffer()\n self.evaluate(tf.compat.v1.global_variables_initializer())\n # This will reset state and choose a first action.\n agent.begin_episode(base_observation)\n\n expected_state = self.zero_state\n num_steps = 10\n for step in range(1, num_steps + 1):\n # We make observation a multiple of step for testing purposes (to\n # uniquely identify each observation).\n observation = base_observation * step\n self.assertEqual(agent.step(reward=1, observation=observation), 0)\n stack_pos = step - num_steps - 1\n if stack_pos >= -self.stack_size:\n expected_state[:, :, :, stack_pos] = np.full(\n (1,) + self.observation_shape, step)\n self.assertAllEqual(agent.state, expected_state)\n self.assertAllEqual(\n agent._last_observation,\n np.full(self.observation_shape, num_steps - 1))\n self.assertAllEqual(agent._observation, observation[:, :, 0])\n # We expect one more than num_steps because of the call to begin_episode.\n self.assertEqual(agent.training_steps, num_steps + 1)\n self.assertEqual(agent._replay.add.call_count, num_steps)\n\n agent.end_episode(reward=1)\n self.assertEqual(agent._replay.add.call_count, num_steps + 1)", "def train_RL(DIM, SHIPS):\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n agent = ModelQLearning(\"Vikram\", DIM, len(SHIPS), device)\n env = Environment(DIM, SHIPS, \"Vikram\")\n batch_size = 64\n num_episodes = 100\n\n\n total_moves = 0\n\n for e in range(num_episodes):\n env.reset()\n state = env.get_state()\n inputs = []\n actions = []\n hits = []\n done = False\n for time in range(DIM*DIM):\n action = agent.move(state)\n reward, next_state = env.step(action)\n next_input, open_locations, hit, sunk, done = next_state\n if done == True:\n total_moves += len(hits)\n if e % batch_size == 0 and e != 0:\n print(\"Episodes: {}, Avg Moves: {}\".format(e,float(total_moves)/float(batch_size)))\n total_moves = 0\n\n agent.replay(inputs, actions, hits, env.total_ships_lengths)\n break\n\n inputs.append(next_input)\n actions.append(action)\n hits.append(hit)\n state = next_state\n\n if done == False:\n print(env.placement)\n print(inputs,actions, hits)\n # break", "def train(env, agent, n_episodes:int=1000, max_t:int=1000, eps_start:float=1.0, eps_end:float=0.01, eps_decay:float=0.995, score_threshold:float=13)->list:\n scores = []\n scores_window:Deque[float] = deque(maxlen=100)\n eps = eps_start\n best_score = float(\"-inf\")\n writer = tensorboard.SummaryWriter(f\"runs/{int(time())}\")\n for i_episode in range(1, n_episodes+1):\n state = env.reset()\n score = 0\n writer.add_scalar(\"train/epsilon\", eps, i_episode)\n \n start = time()\n for t in range(max_t):\n action = agent.act(state, eps)\n next_state, reward, done, _ = env.step(action)\n # writer.add_scalar(\"reward\", reward, (i_episode - 1) * max_t + t)\n agent.step(state, action, reward, next_state, done)\n state = next_state\n score += reward\n if done:\n break\n\n time_for_episode = time() - start\n writer.add_scalar(\"train/time\", time_for_episode, i_episode)\n scores_window.append(score)\n scores.append(score)\n\n eps = max(eps_end, eps_decay*eps)\n window_score = np.mean(scores_window)\n\n writer.add_scalar(\"train/reward\", score, i_episode) \n writer.add_scalar(\"train/window\", window_score, i_episode)\n writer.add_scalar(\"train/memory_size\", len(agent.memory), i_episode)\n\n probs = getattr(agent.memory, 'probs', None)\n if probs is not None:\n writer.add_histogram(\"train/memory_probs\", probs, i_episode)\n\n beta = getattr(agent.memory, 'beta', None)\n if beta is not None:\n writer.add_scalar(\"train/memory_beta\", beta, i_episode)\n agent.memory.beta = min(1., agent.memory.beta + agent.memory.beta_incremental)\n \n print(f'\\rEpisode {i_episode}\\tAverage Score: {window_score:.2f}\\tTime: {time_for_episode:.2f}', end=\"\")\n \n if i_episode % 100 == 0:\n print(f'\\rEpisode {i_episode}\\tAverage Score: {window_score:.2f}')\n\n if window_score >= score_threshold and best_score < score_threshold:\n print(f'\\nEnvironment solved in {i_episode:d} episodes!\\tAverage Score: {window_score:.2f}')\n\n if window_score > best_score and window_score >= score_threshold:\n best_score = window_score\n torch.save(agent.qnetwork_local.state_dict(), 'checkpoint.pt')\n\n memory_dump = getattr(agent.memory, 'memory', None) \n if memory_dump is not None: \n torch.save(memory_dump, \"memory.pt\")\n\n print(f\"Best average score: {best_score}\")\n writer.close()\n return scores", "def _sp_train(self, max_steps, instances, visualize, plot):\n # Keep track of rewards per episode per instance\n episode_reward_sequences = [[] for i in range(instances)]\n episode_step_sequences = [[] for i in range(instances)]\n episode_rewards = [0] * instances\n\n # Create and initialize environment instances\n envs = [self.create_env() for i in range(instances)]\n envs[0].render(mode='human')\n states = [env.reset()['observation'][0] for env in envs] # get the image\n\n for step in range(max_steps):\n for i in range(instances):\n if visualize: envs[i].render()\n action, angle_index, action_index = self.agent.act(states[i], i)\n\n next_state, reward, done, _ = envs[i].step(action)\n (next_image, next_depth) = next_state['observation']\n self.agent.push(\n Transition(states[i], [angle_index, action_index], reward, None if done else next_image), i)\n episode_rewards[i] += reward\n if done:\n episode_reward_sequences[i].append(episode_rewards[i])\n episode_step_sequences[i].append(step)\n episode_rewards[i] = 0\n if plot: plot(episode_reward_sequences, episode_step_sequences)\n (image, depth) = envs[i].reset()['observation']\n states[i] = image\n else:\n states[i] = next_image\n # Perform one step of the optimization\n self.agent.train(step)\n\n if plot: plot(episode_reward_sequences, episode_step_sequences, done=True)", "def train_dqn(env, learn_dict, agent, log_results=True):\n scores = [] # list containing scores from each episode\n scores_window = deque(maxlen=100) # last 100 scores\n eps = learn_dict['eps_start']\n brain_name = learn_dict['brain_name']\n n_episodes=learn_dict['n_episodes']\n max_t= learn_dict['max_t']\n eps_start= learn_dict['eps_start']\n eps_end= learn_dict['eps_end']\n eps_decay= learn_dict['eps_decay']\n early_stop = learn_dict['early_stop']\n\n for i_episode in range(1, n_episodes+1):\n env_info = env.reset(train_mode=True)[brain_name] # reset the environment\n state = env_info.vector_observations[0] # get the current state\n score = 0\n for t in range(max_t):\n \n action = agent.act(state, eps).astype(int)\n env_info = env.step(action)[brain_name] # send the action to the environment\n next_state = env_info.vector_observations[0] # get the next state\n reward = env_info.rewards[0] # get the reward\n done = env_info.local_done[0] # see if episode has finished\n # have the agent learn a step\n agent.step(state, action, reward, next_state, done)\n state = next_state\n score += reward\n if done:\n break \n scores_window.append(score) # save most recent score\n scores.append(score) # save most recent score\n eps = max(eps_end, eps_decay*eps) # decrease epsilon\n if log_results: print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end=\"\")\n if i_episode % 100 == 0:\n if log_results: print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))\n if np.mean(scores_window)>=early_stop:\n if log_results: print('\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_window)))\n break\n return scores", "def _train_step(self):\n # Run a train op at the rate of self.update_period if enough training steps\n # have been run. This matches the Nature DQN behaviour.\n if self._replay.add_count > self.min_replay_history:\n if self.training_steps % self.update_period == 0:\n self._sample_from_replay_buffer()\n\n if self._replay_scheme == 'prioritized':\n # The original prioritized experience replay uses a linear exponent\n # schedule 0.4 -> 1.0. Comparing the schedule to a fixed exponent of\n # 0.5 on 5 games (Asterix, Pong, Q*Bert, Seaquest, Space Invaders)\n # suggested a fixed exponent actually performs better, except on Pong.\n probs = self.replay_elements['sampling_probabilities']\n # Weight the loss by the inverse priorities.\n loss_weights = 1.0 / jnp.sqrt(probs + 1e-10)\n loss_weights /= jnp.max(loss_weights)\n else:\n loss_weights = jnp.ones(self.replay_elements['state'].shape[0])\n\n\n self.optimizer, loss, mean_loss = train(self.network_def,\n self.target_network_params,\n self.optimizer,\n self.replay_elements['state'],\n self.replay_elements['action'],\n self.replay_elements['next_state'],\n self.replay_elements['reward'],\n self.replay_elements['terminal'],\n loss_weights,\n self.cumulative_gamma,\n self._target_opt,\n self._mse_inf,\n self._tau,\n self._alpha,\n self._clip_value_min,\n self._rng)\n\n if self._replay_scheme == 'prioritized':\n # Rainbow and prioritized replay are parametrized by an exponent\n # alpha, but in both cases it is set to 0.5 - for simplicity's sake we\n # leave it as is here, using the more direct sqrt(). Taking the square\n # root \"makes sense\", as we are dealing with a squared loss. Add a\n # small nonzero value to the loss to avoid 0 priority items. While\n # technically this may be okay, setting all items to 0 priority will\n # cause troubles, and also result in 1.0 / 0.0 = NaN correction terms.\n self._replay.set_priority(self.replay_elements['indices'],\n jnp.sqrt(loss + 1e-10))\n \n if (self.summary_writer is not None and\n self.training_steps > 0 and\n self.training_steps % self.summary_writing_frequency == 0):\n summary = tf.compat.v1.Summary(value=[\n tf.compat.v1.Summary.Value(tag='HuberLoss', simple_value=mean_loss)])\n self.summary_writer.add_summary(summary, self.training_steps)\n if self.training_steps % self.target_update_period == 0:\n self._sync_weights()\n\n self.training_steps += 1", "def train(self, episodes=2000, max_steps=99):\n\n for episode in range(episodes):\n state = self.env.reset()\n\n for step in range(max_steps):\n explore_eploit_tradeoff = np.random.uniform()\n\n if explore_eploit_tradeoff > self.epsilon:\n action = np.argmax(self.q_table[state, :])\n else:\n action = self.env.action_space.sample()\n\n new_state, reward, done, info = self.env.step(action)\n\n self.q_table[state, action] = self.q_table[state, action] \\\n + self.lr * (reward + self.gamma * np.amax(\n self.q_table[new_state, :]\n ) - self.q_table[state, action]\n )\n\n state = new_state\n if done:\n break\n exp_ = np.exp(-self.decay_rate * episode)\n self.epsilon = self.min_eps + exp_ * (self.max_eps - self.min_eps)", "def trainOneEpisode(self, num_episodes, max_episode_steps=100, save_freq=100, render=False):\n # tqdm.write('------Episode {} / {}------'.format(self.episodes_done, num_episodes))\n self.resetEnv()\n r_total = 0\n with trange(1, max_episode_steps+1, leave=False) as t:\n\n for step in t:\n if render:\n self.env.render()\n state = self.state\n action, q = self.selectAction(state, require_q=True)\n obs_, r, done, info = self.takeAction(action.item())\n # if print_step:\n # print 'step {}, action: {}, q: {}, reward: {} done: {}' \\\n # .format(step, action.item(), q, r, done)\n r_total += r\n # t.set_postfix(step='{:>5}'.format(step), q='{:>5}'.format(round(q, 4)), total_reward='{:>5}'.format(r_total))\n t.set_postfix_str('step={:>5}, q={:>5}, total_reward={:>5}'.format(step, round(q, 2), r_total))\n if done or step == max_episode_steps:\n next_state = None\n else:\n next_state = self.getNextState(obs_)\n reward = torch.tensor([r], device=self.device, dtype=torch.float)\n self.memory.push(state, action, next_state, reward)\n self.optimizeModel()\n if self.steps_done % self.target_update == 0:\n self.target_net.load_state_dict(self.policy_net.state_dict())\n\n if done or step == max_episode_steps - 1:\n tqdm.write('------Episode {} ended, total reward: {}, step: {}------' \\\n .format(self.episodes_done, r_total, step))\n tqdm.write('------Total steps done: {}, current e: {} ------' \\\n .format(self.steps_done, self.exploration.value(self.steps_done)))\n # print '------Episode {} ended, total reward: {}, step: {}------' \\\n # .format(self.episodes_done, r_total, step)\n # print '------Total steps done: {}, current e: {} ------' \\\n # .format(self.steps_done, self.exploration.value(self.steps_done))\n self.episodes_done += 1\n self.episode_rewards.append(r_total)\n self.episode_lengths.append(step)\n if self.episodes_done % save_freq == 0:\n self.saveCheckpoint()\n break\n self.state = next_state", "def _train_model(self):\n self.experiment = EpisodicExperiment(self.task, self.agent)\n n_epochs = int(self.rl_params.n_training_episodes / self.rl_params.n_episodes_per_epoch)\n logger.debug(\"Fitting user model over {} epochs, each {} episodes, total {} episodes.\"\n .format(n_epochs, self.rl_params.n_episodes_per_epoch, n_epochs*self.rl_params.n_episodes_per_epoch))\n for i in range(n_epochs):\n logger.debug(\"RL epoch {}\".format(i))\n self.experiment.doEpisodes(self.rl_params.n_episodes_per_epoch)\n self.agent.learn()\n self.agent.reset() # reset buffers", "def train(self, num_episodes = 10000, verbose = True):\n start_time = datetime.now().replace(microsecond=0)\n for e in range(num_episodes):\n S_old = self.env.reset()\n steps = 0\n # there is an interal limit of 100 steps\n while steps < 1000:\n steps += 1\n A = self.epsilon_greedy(S_old)\n S_new, reward, done, info = self.env.step(A)\n self.Q[S_old, A] = self.Q[S_old, A] + self.alpha * \\\n (reward + self.gamma * np.max(self.Q[S_new, :]) - self.Q[S_old, A])\n if done:\n break\n S_old = S_new\n if verbose:\n clear_output(wait=True)\n now_time = datetime.now().replace(microsecond=0)\n print(\"Epoch: {}/{} - Steps: {:4} - Duration: {}\".format(e+1, num_episodes, steps, now_time-start_time))\n\n return self.Q", "def train(\n self,\n num_episodes: int = int(1e4),\n num_reward_updates: int = 10,\n batch_size: int = 128,\n expert_demos: str = 'demos.pkl',\n ) -> None:\n # Set train\n self.agent.set_train()\n self.best_loss = np.float('inf')\n self.best_reward = np.float('-inf')\n self.reward_tracker = self.best_reward * np.ones(self.env.num_envs)\n\n # Expert demonstrations\n with open(expert_demos, 'rb') as f:\n demos = pickle.load(f) # runner.Experiences\n if self.use_gpu:\n demos.to_gpu()\n\n for i in range(num_episodes):\n # Generate samples\n batch = self.runner.generate_batch(64)\n flat_batch = flatten_batch(copy.deepcopy(batch))\n agent_batch_size = len(flat_batch['states'])\n expert_batch_size = len(demos.states)\n\n # Update cost function\n for j in range(num_reward_updates):\n selected_idxs = torch.randperm(expert_batch_size)[:batch_size]\n expert_states = demos.states[selected_idxs]\n expert_actions = demos.actions[selected_idxs]\n\n selected_idxs = torch.randperm(agent_batch_size)[:batch_size]\n states = flat_batch['states'][selected_idxs]\n actions = flat_batch['actions'][selected_idxs]\n\n states = torch.cat([states, expert_states], dim=0)\n actions = torch.cat([actions, expert_actions], dim=0)\n loss_cost_dict = self.agent.update_cost(\n states, actions, expert_states, expert_actions\n )\n\n # Update policy\n loss_reward_dict = self.agent.update(batch)\n # Log\n self.log(i, loss_cost_dict)\n self.log(i, loss_reward_dict)\n\n # Save agent\n loss = loss_cost_dict['loss/ioc'] + loss_reward_dict['loss/total']\n\n # Logging\n for ep_count, info_dict in batch['infos']:\n self.log(ep_count, info_dict)\n for (k, v) in info_dict.items():\n if 'reward' in k:\n agent_num = int(k.split('/')[1])\n self.reward_tracker[agent_num] = v\n\n mean_reward = np.mean(self.reward_tracker)\n\n self.log(i, {'values/mean_reward': mean_reward})\n\n # added in a check to make sure we aren't counting initial low loss\n if (loss < self.best_loss and i > 1000) or i % 500 == 0:\n self.agent.save(self.save_path, i)\n self.best_loss = loss\n logging.info(\n \"Save new best model at epoch %i with loss %0.4f.\"\n % (i, loss)\n )", "def main(model_path, n_rounds):\n\n env = gym.make('LunarLander-v2')\n\n agent = Agent()\n\n state_dict = torch.load(model_path)\n agent.network.load_state_dict(state_dict)\n agent.network.eval()\n\n for i in range(n_rounds):\n\n state = env.reset()\n total_reward, total_step = 0, 0\n\n while True:\n env.render()\n action = agent.sample(state)\n state, reward, done, _ = env.step(action)\n total_reward += reward\n total_step += 1\n\n if done:\n print(f\"episode {i+1:3d}, \"\n f\"total_reward = {total_reward:6.1f}, \"\n f\"total step: {total_step:4d}\")\n break", "def train(self, iterations=1):\n for _ in range(iterations):\n self.trainer.train()\n self.test_network()", "def fit(self, X_train: np.ndarray, y_train: np.ndarray, epochs: int, batch_size: int, eval_step: int, log_step: int,\n collect_steps_per_episode: int) -> None:\n\n self.dataset = self.replay_buffer.as_dataset(\n num_parallel_calls=3,\n sample_batch_size=batch_size,\n num_steps=2).prefetch(3)\n\n self.iterator = iter(self.dataset)\n\n def collect_step(environment, policy, buffer):\n time_step = environment.current_time_step()\n action_step = policy.action(time_step)\n next_time_step = environment.step(action_step.action)\n traj = trajectory.from_transition(time_step, action_step, next_time_step)\n\n # Add trajectory to the replay buffer\n buffer.add_batch(traj)\n\n def collect_data(env, policy, buffer, steps):\n for _ in range(steps):\n collect_step(env, policy, buffer)\n\n # (Optional) Optimize by wrapping some of the code in a graph using TF function.\n self.agent.train = common.function(self.agent.train)\n\n # Reset the train step\n self.agent.train_step_counter.assign(0)\n\n for _ in range(epochs):\n #print(\"epoch: \", _)\n # Collect a few steps using collect_policy and save to the replay buffer.\n collect_data(self.train_env, self.agent.collect_policy, self.replay_buffer, collect_steps_per_episode)\n\n # Sample a batch of data from the buffer and update the agent's network.\n experience, _ = next(self.iterator)\n train_loss = self.agent.train(experience).loss\n\n step = self.agent.train_step_counter.numpy()\n\n if step % log_step == 0:\n print('step = {0}: loss = {1}'.format(step, train_loss))\n\n if step % eval_step == 0:\n metrics = self.compute_metrics(X_train, y_train)\n print(metrics)", "def train(self, gamma = GAMMA, learningRate = LR, eps = EPS, epsDecayInterval = EPS_DECAY_INTERVAL, epsDecayRate = EPS_DECAY_RATE, minEps = MIN_EPS, epNum = NUM_EPISODES, epStart = 0, trainingStart = TRAINING_START, experienceSize = EXPERIENCE_SIZE, minibatchSize = MINIBATCH_SIZE, adversary = None, checkpointFolder = CHECKPOINT_FOLDER, checkpointInterval = CHECKPOINT_INTERVAL, printInterval = PRINT_INTERVAL):\n\n allActions = np.asarray(range(self.env.action_space.n))\n saver = tf.train.Saver()\n experience = deque([], experienceSize)\n\n episodeLengths = []\n episodeLengthsSeconds = []\n episodeRewards = []\n attacksNumbers = []\n losses = []\n\n trainingStart = epStart + trainingStart\n for i in range(epStart, epNum):\n s = utils.preprocess(self.env.reset())\n frames = np.expand_dims(np.repeat(s, 4, 2), 0)\n done = False\n episodeLength = 0\n episodeReward = 0.0\n attNum = 0\n\n episodeStartTime = time()\n while not done:\n actionScores, actionProbs = self.sess.run([self.logits, self.probs], feed_dict={self.inputs:frames})\n a = np.random.choice(allActions, p=utils.epsGreedyProbs(actionScores[0], eps))\n self._attack(adversary, frames, actionProbs)\n\n for j in range(self.frameSkip):\n sj, r, done, _ = self.env.step(a)\n sj = utils.preprocess(sj)\n episodeLength += 1\n episodeReward += r\n\n framesJ = utils.pushframe(frames, sj)\n experience.append((frames, a, r, framesJ, done))\n frames = framesJ\n\n if i > trainingStart:\n # actionScoresJ = sess.run(outQ, feed_dict={self.inputs:framesJ})\n startStates, actions, rewards, endStates, dones = getRandomMinibatch(experience, minibatchSize)\n\n actionScoresSS = self.sess.run(self.logits, feed_dict={self.inputs:startStates})\n actionScoresES = self.sess.run(self.logits, feed_dict={self.inputs:endStates})\n targets = computeMinibatchTargets(actions, rewards, dones, gamma, actionScoresSS, actionScoresES)\n los = self.sess.run([self.loss, self.update], feed_dict={self.inputs:startStates, self.target:targets})[0]\n losses.append(los)\n\n episodeEndTime = time()\n episodeLengths.append(episodeLength)\n episodeLengthsSeconds.append(episodeEndTime-episodeStartTime)\n episodeRewards.append(episodeReward)\n attacksNumbers.append(attNum)\n\n if eps > minEps and ((i+1) % epsDecayInterval) == 0:\n eps = eps * epsDecayRate\n print(\"eps decayed to \" + str(eps) + \" in episode \" + str(i + 1) + \" (\" + str(sum(episodeLengths)) + \"'th timestamp)\")\n\n if (i + 1) % checkpointInterval == 0:\n saver.save(self.sess, checkpointFolder + \"dqn_episode\" + str(i + 1) + \".ckpt\")\n print(\"Saved checkpoint in episode \" + str(i + 1) + \" with reward = \" + str(episodeRewards[-1]))\n\n if (i + 1) % printInterval == 0:\n print(str(i + 1) + \" / \" + str(epNum) + \" length = \" + str(np.mean(episodeLengths[-10:])) + \" (\" + str(np.mean(episodeLengthsSeconds[-10:])) + \"s) reward = \" + str(np.mean(episodeRewards[-10:])) + \" loss = \" + str(losses[-1]))\n if self.goalReached(episodeRewards):\n print(\"Finished training after \" + str(i + 1) + \" episodes. Goal achieved.\")\n break\n\n saver.save(self.sess, checkpointFolder + \"dqn_final.ckpt\")\n print(\"Finished training. Saved final checkpoint.\")\n return episodeLengths, episodeRewards, attacksNumbers, losses", "def train(epoch, rewards=1, punishment=-100):\n # Init setting\n environment = gym.make('CartPole-v1')\n agent = Learner(environment)\n\n # Early stopping\n perfect_times = 0\n\n # Plot\n scores, epsilons = [], []\n\n for e in range(epoch):\n # Reset state for each epoch\n state = environment.reset().reshape((1, 4))\n done = False\n\n # Assume 2000 is our ultimate goal (cart keeps 2000 frames)\n for frame in range(2000):\n # Make one action\n action = agent.act(state)\n next_state, _, done, _ = environment.step(action)\n next_state = next_state.reshape((1, 4))\n\n # Customised reward and punishment\n reward = punishment if done else rewards\n\n # Build memory\n agent.remember_play(state, action, reward, next_state, done)\n\n # Train process\n agent.replay()\n state = next_state\n\n # End this game if done\n if done:\n # Update the target model for next inner prediction\n agent.update_target_model()\n\n # Store the scores for plotting\n scores.append(frame)\n epsilons.append(agent.epsilon)\n\n print((\"epoch: {}/{}, score {}, \" +\n \"epsilon {} {}\").format(e, epoch, frame,\n agent.epsilon, FILE))\n break\n\n # Early stopping when getting `EARLY` continuous perfect score\n if frame == 499:\n perfect_times += 1\n if perfect_times == EARLY:\n break\n else:\n perfect_times = 0\n\n # Save the model and weights\n save_weight(agent.model)\n save_model(agent.model)\n\n # Save plotting data\n df = pd.DataFrame()\n df['epoch'] = range(1, len(scores) + 1)\n df['score'] = scores\n df['epsilon'] = epsilons\n df.to_csv(CSV_FILE, index=False)\n\n return agent", "def train(self, num_decisions=350):\n os.system(\"mkdir \" + self.folder_name + \"Train\")\n for i in range(5000):\n episode_folder_name = self.folder_name + \"Train/\" + str(i) + \"/\"\n all_system_states = []\n all_system_rewards = []\n all_system_states_cluster = []\n all_grid_states_cluster = []\n all_surrounding_states_cluster = []\n os.system(\"mkdir \" + episode_folder_name)\n filename = episode_folder_name + str(i) + \".h5\"\n self.system.reset_context(filename)\n self.system.run_decorrelation(20)\n grid_dist, surrounding_dist, _, _, _, _ = self.system.get_state_reward()\n state = self._get_state(grid_dist, surrounding_dist)\n for j in range(num_decisions):\n action_index = self._get_action(state, i)\n transition_to_add = [state, action_index]\n tag = \"_train_\" + str(j)\n actions = [self.all_actions[i] for i in action_index]\n try:\n self.system.update_action(actions)\n system_states, system_rewards, system_states_cluster = self.system.run_step(\n is_detailed=True, tag=tag)\n all_system_states.append(system_states)\n all_system_rewards.append(system_rewards)\n all_system_states_cluster.append(system_states_cluster)\n\n except OpenMMException:\n print(\"Broken Simulation at Episode:\",\n str(i), \", Decision:\", str(j))\n break\n\n grid_dist, surrounding_dist, grid_reward, surrounding_reward, grid_states_cluster, surrounding_states_cluster = self.system.get_state_reward()\n state = self._get_state(grid_dist, surrounding_dist)\n reward = self._get_reward(grid_reward, surrounding_reward)\n\n all_grid_states_cluster.append(grid_states_cluster)\n all_surrounding_states_cluster.append(surrounding_states_cluster)\n\n # Use len_reward for number of grids\n done = [False] * len(reward) # Never Done\n transition_to_add.extend([reward, state, done])\n rb_decision_samples = 0\n for rb_tuple in zip(*transition_to_add):\n self.buffer.push(*list(rb_tuple))\n\n for _ in range(self.update_num):\n self._update()\n self._save_episode_data(episode_folder_name)\n np.save(episode_folder_name + \"system_states\",\n np.array(all_system_states))\n np.save(episode_folder_name + \"system_rewards\",\n np.array(all_system_rewards))\n np.save(episode_folder_name + \"system_states_cluster\",\n np.array(all_system_states_cluster))\n np.save(episode_folder_name + \"grid_states_cluster\",\n np.array(all_grid_states_cluster, dtype=object))\n np.save(episode_folder_name + \"all_states_cluster\",\n np.array(all_surrounding_states_cluster))\n self._save_data()", "def _train(trainer, train_data, batcher_fn, total_batch_steps = 5, seed = 1):\n for i in range(total_batch_steps):\n torch.manual_seed(seed)\n set_seed(seed)\n data, targets = batcher_fn(train_data, i*35)\n trainer.train_step(data, targets)", "def train(self, episodes, epsilon_initial, epsilon_min, epsilon_stop_episode,\n network_update_freq, gamma, memory_capacity, batch_size):\n\n memory = ReplayMemory(memory_capacity)\n\n tot_steps = 0\n running_loss = 0\n\n depsilon = (epsilon_initial-epsilon_min)/epsilon_stop_episode\n\n for episode in range(episodes):\n\n if epsilon_initial > epsilon_min:\n epsilon_initial -= depsilon\n\n if episode % network_update_freq == 0:\n # Update target network\n self.NN_target.load_state_dict(self.NN.state_dict())\n\n if (episode + 1) % 10 == 0:\n print(f'Episode {episode + 1}/{episodes} completed!')\n print(f'Average steps per episode: {tot_steps / 10}')\n writer.add_scalar('training loss', running_loss / tot_steps, episode)\n self.plotValue()\n tot_steps = 0\n running_loss = 0\n\n state, done = self.env.reset()\n\n\n while not done:\n tot_steps += 1\n\n action = self.chooseAction(epsilon_initial, state)\n\n reward, next_state, done= self.env.transitionState(state, action)\n\n #score += reward\n reward = torch.tensor([[reward]], device=device)\n done = torch.tensor([[done]], device=device)\n\n # Saves the transition\n memory.push(self.RBF[state], self.RBF[next_state], reward, done)\n\n # Perform one step of batch gradient descent\n running_loss += self.optimizeModel(memory, batch_size, gamma)\n\n state = next_state\n\n writer.close()", "def train(net, start):\n # Initialize optimizer\n optimizer = optim.Adam(net.parameters(), lr=1e-6)\n # Initialize loss function\n loss_func = nn.MSELoss()\n\n # Initialize game\n game_state = game.GameState()\n\n # Initialize replay memory\n memory = ReplayMemory(net.replay_memory_size)\n\n # Initial action is do nothing\n action = torch.zeros(2, dtype=torch.float32)\n action[0] = 1\n\n # [1, 0] is do nothing, [0, 1] is fly up\n image_data, reward, terminal = game_state.frame_step(action)\n\n # Image Preprocessing\n image_data = resize_and_bgr2gray(image_data)\n image_data = image_to_tensor(image_data)\n state = torch.cat((image_data, image_data, image_data, image_data)).unsqueeze(0)\n\n # Initialize epsilon value\n epsilon = net.initial_epsilon\n\n # Epsilon annealing\n epsilon_decrements = np.linspace(net.initial_epsilon, net.final_epsilon, net.num_iterations)\n\n t = 0\n \n # Train Loop\n print(\"Start Episode\", 0)\n for iteration in range(net.num_iterations):\n # Get output from the neural network\n output = net(state)[0]\n\n # Initialize action\n action = torch.zeros(2, dtype=torch.float32)\n if torch.cuda.is_available():\n action = action.cuda()\n\n # Epsilon greedy exploration\n random_action = random.random() <= epsilon\n if random_action:\n print(\"Performed random action!\")\n action_index = [torch.randint(2, torch.Size([]), dtype=torch.int)\n if random_action\n else torch.argmax(output)][0]\n\n if torch.cuda.is_available():\n action_index = action_index.cuda()\n\n action[action_index] = 1\n\n # Get next state and reward\n image_data_1, reward, terminal = game_state.frame_step(action)\n image_data_1 = resize_and_bgr2gray(image_data_1)\n image_data_1 = image_to_tensor(image_data_1)\n state_1 = torch.cat((state.squeeze(0)[1:, :, :], image_data_1)).unsqueeze(0)\n\n action = action.unsqueeze(0)\n reward = torch.from_numpy(np.array([reward], dtype=np.float32)).unsqueeze(0)\n\n # Save transition to replay memory\n memory.push(state, action, reward, state_1, terminal)\n\n # Epsilon annealing\n epsilon = epsilon_decrements[iteration]\n\n # Sample random minibatch\n minibatch = memory.sample(min(len(memory), net.minibatch_size))\n\n # Unpack minibatch\n state_batch = torch.cat(tuple(d[0] for d in minibatch))\n action_batch = torch.cat(tuple(d[1] for d in minibatch))\n reward_batch = torch.cat(tuple(d[2] for d in minibatch))\n state_1_batch = torch.cat(tuple(d[3] for d in minibatch))\n\n if torch.cuda.is_available():\n state_batch = state_batch.cuda()\n action_batch = action_batch.cuda()\n reward_batch = reward_batch.cuda()\n state_1_batch = state_1_batch.cuda()\n\n # Get output for the next state\n output_1_batch = net(state_1_batch)\n\n # Set y_j to r_j for terminal state, otherwise to r_j + gamma*max(Q)\n y_batch = torch.cat(tuple(reward_batch[i] if minibatch[i][4]\n else reward_batch[i] + net.gamma * torch.max(output_1_batch[i])\n for i in range(len(minibatch))))\n\n # Extract Q-value (this part i don't understand)\n q_value = torch.sum(net(state_batch) * action_batch, dim=1)\n\n optimizer.zero_grad()\n\n # Returns a new Tensor, detached from the current graph, the result will never require gradient\n y_batch = y_batch.detach()\n\n # Calculate loss\n loss = loss_func(q_value, y_batch)\n\n # Do backward pass\n loss.backward()\n optimizer.step()\n\n # Set state to be state_1\n state = state_1\n\n if iteration % 25000 == 0:\n torch.save(net, \"model_weights/current_model_\" + str(iteration) + \".pth\")\n\n if iteration % 100 == 0:\n print(\"iteration:\", iteration, \"elapsed time:\", time.time() - start, \"epsilon:\", epsilon, \"action:\",\n action_index.cpu().detach().numpy(), \"reward:\", reward.numpy()[0][0], \"Q max:\",\n np.max(output.cpu().detach().numpy()))\n\n t += 1\n\n # Plot duration\n if terminal:\n print(\"Start Episode\", len(net.episode_durations) + 1)\n net.episode_durations.append(t)\n plot_durations(net.episode_durations)\n t = 0", "def train(self, training_steps=10):", "def train(self, environment, seed=0):\n # set the seeds\n np.random.seed(seed)\n environment.seed(seed)\n # prepare the file for the results\n save_results = SaveResults()\n save_results.set_seed(seed)\n\n # prepare to display the states\n if self.parameters[\"display_environment\"]:\n self.show_render = ShowRender()\n\n for t in tqdm(range(1, self.parameters[\"number_episodes\"] + 1)):\n self._train_simulate(environment, t)\n\n if not t % 200:\n save_results.write_message_in_a_file(\"score\", self.score)", "def step(self, num_agent):\n # Save experience / reward\n # memory.add(state, action, reward, next_state, done)\n\n self.n_steps = (self.n_steps + 1) % UPDATE_EVERY ###\n # Learn, if enough samples are available in memory\n if len(memory) > BATCH_SIZE and self.n_steps == 0: ###\n experiences = memory.sample()\n self.learn(experiences, GAMMA, num_agent)\n \n self.n_steps += 1", "def init_trainers(self, args):\n self.actors_cur = [None for _ in range(self.num_agents)]\n self.critics_cur = [None for _ in range(self.num_agents)]\n self.actors_tar = [None for _ in range(self.num_agents)]\n self.critics_tar = [None for _ in range(self.num_agents)]\n self.optimizers_c = [None for _ in range(self.num_agents)]\n self.optimizers_a = [None for _ in range(self.num_agents)]\n input_size_global = sum(self.obs_shape_n) + sum(self.action_shape_n)\n\n if args.restore == True: # restore the model\n game_step = int(args.old_model_name.split('_')[-1][:-1])\n for idx in range(self.num_agents):\n self.actors_cur[idx] = torch.load(args.old_model_name+'a_c_{}.pt'.format(idx))\n self.actors_tar[idx] = torch.load(args.old_model_name+'a_t_{}.pt'.format(idx))\n self.critics_cur[idx] = torch.load(args.old_model_name+'c_c_{}.pt'.format(idx))\n self.critics_tar[idx] = torch.load(args.old_model_name+'c_t_{}.pt'.format(idx))\n self.optimizers_a[idx] = optim.Adam(self.actors_cur[idx].parameters(), args.lr_a)\n self.optimizers_c[idx] = optim.Adam(self.critics_cur[idx].parameters(), args.lr_c)\n self.var = self.var - (game_step-args.learning_start_episode*args.per_episode_max_len)*args.var_discount\n self.var = self.min_var if self.var < self.min_var else self.var\n old_data = {'game_step':game_step, 'episode_gone_old':int(game_step/args.per_episode_max_len)}\n\n # Note: if you need load old model, there should be a procedure for juding if the trainers[idx] is None\n for i in range(self.num_agents):\n self.actors_cur[i] = actor_agent(self.obs_shape_n[i], self.action_shape_n[i], \\\n args).to(args.device)\n self.critics_cur[i] = critic_agent(sum(self.obs_shape_n), sum(self.action_shape_n), \\\n args).to(args.device)\n self.actors_tar[i] = actor_agent(self.obs_shape_n[i], self.action_shape_n[i], \\\n args).to(args.device)\n self.critics_tar[i] = critic_agent(sum(self.obs_shape_n), sum(self.action_shape_n), \\\n args).to(args.device)\n self.optimizers_a[i] = optim.Adam(self.actors_cur[i].parameters(), args.lr_a)\n self.optimizers_c[i] = optim.Adam(self.critics_cur[i].parameters(), args.lr_c)\n\n # return the old data, no need to update the trainers\n if args.restore == True: return old_data\n\n self.actors_tar = self.update_trainers(self.actors_cur, self.actors_tar, 1.0) # update the target par using the cur\n self.critics_tar = self.update_trainers(self.critics_cur, self.critics_tar, 1.0) # update the target par using the cur", "def train_epoch(self):\n # We can't validate a winner for submissions generated by the learner,\n # so we will use a winner-less match when getting rewards for such states\n blank_match = {\"winner\":None}\n\n learner_submitted_actions = 0\n null_actions = 0\n\n # Shuffle match presentation order\n if(self.N_TEMP_TRAIN_MATCHES):\n path_to_db = \"../data/competitiveMatchData.db\"\n sources = {\"patches\":self.TEMP_TRAIN_PATCHES, \"tournaments\":[]}\n print(\"Adding {} matches to training pool from {}.\".format(self.N_TEMP_TRAIN_MATCHES, path_to_db))\n temp_matches = pool.match_pool(self.N_TEMP_TRAIN_MATCHES, path_to_db, randomize=True, match_sources=sources)[\"matches\"]\n else:\n temp_matches = []\n data = self.training_data + temp_matches\n\n shuffled_matches = random.sample(data, len(data))\n for match in shuffled_matches:\n for team in self.teams:\n # Process match into individual experiences\n experiences = mp.process_match(match, team)\n for pick_id, experience in enumerate(experiences):\n # Some experiences include NULL submissions (usually missing bans)\n # The learner isn't allowed to submit NULL picks so skip adding these\n # to the buffer.\n state,actual,_,_ = experience\n (cid,pos) = actual\n if cid is None:\n null_actions += 1\n continue\n # Store original experience\n self.replay.store([experience])\n self.step_count += 1\n\n # Give model feedback on current estimations\n if(self.step_count > self.observations):\n # Let the network predict the next action\n feed_dict = {self.ddq_net.online_ops[\"input\"]:[state.format_state()],\n self.ddq_net.online_ops[\"valid_actions\"]:[state.get_valid_actions()]}\n q_vals = self.ddq_net.sess.run(self.ddq_net.online_ops[\"valid_outQ\"], feed_dict=feed_dict)\n sorted_actions = q_vals[0,:].argsort()[::-1]\n top_actions = sorted_actions[0:4]\n\n if(random.random() < self.epsilon):\n pred_act = random.sample(list(top_actions), 1)\n else:\n # Use model's top prediction\n pred_act = [sorted_actions[0]]\n\n for action in pred_act:\n (cid,pos) = state.format_action(action)\n if((cid,pos)!=actual):\n pred_state = deepcopy(state)\n pred_state.update(cid,pos)\n r = get_reward(pred_state, blank_match, (cid,pos), actual)\n new_experience = (state, (cid,pos), r, pred_state)\n\n self.replay.store([new_experience])\n learner_submitted_actions += 1\n\n if(self.epsilon > 0.1):\n # Reduce epsilon over time\n self.epsilon -= self.eps_decay_rate\n\n # Use minibatch sample to update online network\n if(self.step_count > self.pre_training_steps):\n self.train_step()\n\n if(self.step_count % self.target_update_frequency == 0):\n # After the online network has been updated, update target network\n _ = self.ddq_net.sess.run(self.ddq_net.target_ops[\"target_update\"])\n\n # Get training loss, training_acc, and val_acc to return\n loss, train_acc = self.validate_model(self.training_data)\n _, val_acc = self.validate_model(self.validation_data)\n return (loss, train_acc, val_acc)", "def explore(self):\n\n i = 0\n while True:\n i += 1\n \n state_counts = {game.__class__.__name__: Counter() for game in self.games} \n\n policies_prime = []\n pi_sum = 0\n v_sum = 0\n counter = 0\n \n # bookkeeping\n log.info(f'Starting Exploration Iteration #{i} ...')\n\n # for task in tasks...\n for _ in range(self.args['taskBatchSize']):\n\n # create deepcopy for training a theta'\n policy_prime = copy.deepcopy(self.nnet)\n \n # sample a game (task)\n game = np.random.choice(self.games, p=self.probs)\n log.info(f'Sampled game {type(game).__name__} ...')\n\n # multiprocess to get our training examples\n iterationTrainExamples = deque([], maxlen=self.args['maxlenOfQueue'])\n iterationTrainExamples = run_apply_async_multiprocessing(self.executeEpisode, [(MCTS(game, self.nnet, self.args), type(game)(), self.args.copy())] * self.args['numEps'], self.args['numWorkers'], desc='Self Play')\n iterationTrainExamples, iter_counters = zip(*iterationTrainExamples)\n\n iterationTrainExamples = list(itertools.chain.from_iterable(iterationTrainExamples))\n state_counts[game.__class__.__name__] += sum(iter_counters, Counter())\n\n # shuffle examples before training\n shuffle(iterationTrainExamples)\n\n # train our network\n pi_v_losses = policy_prime.train(iterationTrainExamples)\n\n policies_prime.append(policy_prime.state_dict())\n\n for pi,v in pi_v_losses:\n pi_sum += pi\n v_sum += v\n counter += 1\n \n # compute average parameters and load into self.nnet\n self.nnet.load_average_params(policies_prime)\n\n # training new network, keeping a copy of the old one\n self.nnet.save_checkpoint(folder=self.args['checkpoint'] + '/exploring', filename='temp.pth.tar')\n self.pnet.load_checkpoint(folder=self.args['checkpoint'] + '/exploring', filename='temp.pth.tar')\n pmcts = MCTS(self.games[0], self.pnet, self.args)\n\n\n # Arena if we choose to run it\n if self.args['arenaComparePerGame'] > 0:\n # ARENA\n nmcts = MCTS(self.games[0], self.nnet, self.args)\n\n log.info('PITTING AGAINST PREVIOUS VERSION')\n arena = Arena()\n pwins, nwins, draws = arena.playGames(self.pnet, self.nnet, self.args, self.games)\n\n log.info('NEW/PREV WINS : %d / %d ; DRAWS : %d' % (nwins, pwins, draws))\n if pwins + nwins == 0 or float(nwins) / (pwins + nwins) < self.args['updateThreshold']:\n log.info('REJECTING NEW MODEL')\n self.nnet.load_checkpoint(folder=self.args['checkpoint'] + '/exploring', filename='temp.pth.tar')\n else:\n log.info('ACCEPTING NEW MODEL')\n self.nnet.save_checkpoint(folder=self.args['checkpoint'] + '/exploring', filename=self.getCheckpointFile(i))\n self.nnet.save_checkpoint(folder=self.args['checkpoint'] + '/exploring', filename='best.pth.tar')\n\n log.info('Iteration Complete. Writing counts to \"%s/%s\"...', *self.args['json_folder_file'])\n # create the json file\n path = os.path.join(self.args['json_folder_file'][0], self.args['json_folder_file'][1])\n with open(path, 'a+') as f:\n if os.stat(path).st_size == 0: ## file just created/empty\n log.info('No counts found. Writing to empty file.')\n old_counts = {game.__class__.__name__: Counter() for game in self.games}\n else: ## load the counts from the file\n log.info('Loading counts...')\n f.seek(0)\n str_counts = f.read()\n # print('STRING OF JSON:', type(str_counts), str_counts)\n old_counts = json.loads(str_counts)\n old_counts = {game: Counter(v) for game, v in old_counts.items()}\n master_counts = {game.__class__.__name__: state_counts[game.__class__.__name__]+old_counts[game.__class__.__name__] for game in self.games}\n # countiung logic: turn {gametype -> Counter} into {gametype -> {state -> count}}\n master_counts = {game: dict(counter) for game, counter in master_counts.items()}\n log.info('Writing...')\n f.truncate(0) #clear file\n json.dump(master_counts, f)\n log.info('Counts written to json file \"%s/%s\"...', *self.args['json_folder_file'])", "def learn(self):\n\n self.t_step += 1\n\n # Sample from replay buffer, which already has nstep rollout calculated.\n batch = self.memory.sample(self.batch_size)\n obs, next_obs, actions, rewards, dones = batch\n\n # Gather and concatenate actions because critic networks need ALL\n # actions as input, the stored actions were concatenated before storing\n # in the buffer\n target_actions = [agent.actor_target(next_obs[i]) for i, agent in\n enumerate(self.agents)]\n predicted_actions = [agent.actor(obs[i]) for i, agent in\n enumerate(self.agents)]\n target_actions = torch.cat(target_actions, dim=-1)\n predicted_actions = torch.cat(predicted_actions, dim=-1)\n\n # Change state data from [agent_count, batch_size]\n # to [batchsize, state_size * agent_count]\n # because critic networks need to ALL observations as input\n obs = obs.transpose(1,0).contiguous().view(self.batch_size, -1)\n next_obs = next_obs.transpose(1,0).contiguous().view(self.batch_size,-1)\n\n # Perform a learning step for each agent using concatenated data as well\n # as unique-perspective data where algorithmically called for\n for i, agent in enumerate(self.agents):\n agent.learn(obs, next_obs, actions, target_actions,\n predicted_actions, rewards[i], dones[i])\n self.update_networks(agent)", "def setup_training(self):\n print('setup training called')\n self.steps_done = 0\n self.current_episode_num = 1\n self.total_reward = 0\n\n # self.optimizer = optim.RMSprop(policy_net.parameters())\n self.memory = ReplayMemory(300000)\n self.total_reward_history = []\n # self.loss_history = []\n self.positions = []\n self.n_destroyed_crates = 0\n self.is_in_bomb_range = False", "def train_SN(model, optimizer, scheduler, episodes=1):\n model = model.to(device=device) # move the model parameters to CPU/GPU\n for episode in range(episodes):\n scheduler.step(episode)\n model.train() # set to train mode\n\n # make the samplers \n # make 2 samplers, one for the \"sample/training set\" of a one-shot classifier\n # other sampler is for the \"query/test set\" which provides many comparisons\n train_sample_sampler = SampleSampler(num_cl=NUM_CL)\n sampled_classes = train_sample_sampler.cl_list\n sampled_examples = train_sample_sampler.ex_list\n train_query_sampler = QuerySampler(sampled_classes, sampled_examples, num_inst=NUM_EX)\n\n # make the dataloaders\n s_batch_num = 1 # one shot \"training\" each\n q_batch_num = NUM_EX # pair up number of examples per class in a batch (default 19)\n train_sample_loader = DataLoader(omni_train, batch_size=s_batch_num, sampler=train_sample_sampler)\n train_query_loader = DataLoader(omni_train, batch_size=q_batch_num, sampler=train_query_sampler)\n \n # start training\n scores = torch.zeros(NUM_CL,(NUM_EX+NUM_CL-1)).to(device=device, dtype=dtype)\n targets = torch.zeros(NUM_CL,(NUM_EX+NUM_CL-1)).to(device=device, dtype=dtype)\n sample_count = 0\n for i, (sample, sample_label) in enumerate(train_sample_loader):\n sample_count += 1\n idx = 0\n for j, (batch, batch_labels) in enumerate(train_query_loader):\n if sample_label != batch_labels[0]:\n k = np.random.randint(NUM_EX)\n query = batch[k,:,:,:].to(device=device, dtype=dtype)\n query = query.view(1,1,IMG_SIZE,IMG_SIZE)\n sample = sample.to(device=device, dtype=dtype)\n targets[i,idx] = make_target(sample_label, batch_labels[0])\n scores[i,idx] = model(sample,query)\n idx += 1\n \n elif sample_label == batch_labels[0]:\n for k in range(NUM_EX):\n query = batch[k,:,:,:].to(device=device, dtype=dtype)\n query = query.view(1,1,IMG_SIZE,IMG_SIZE)\n sample = sample.to(device=device, dtype=dtype)\n targets[i,idx] = make_target(sample_label, batch_labels[0])\n scores[i,idx] = model(sample,query)\n idx += 1\n \n targets = targets.view(-1)\n scores = scores.view(-1)\n \n # train and update model\n optimizer.zero_grad()\n #loss = F.binary_cross_entropy(scores, targets)\n loss = F.mse_loss(scores, targets)\n loss.backward()\n #nn.utils.clip_grad_norm_(model.parameters(),0.5)\n optimizer.step()\n\n # episodic updates\n if (episode+1)%100 == 0:\n print(\"episode:\",episode+1,\"loss\",loss.data)\n\n if (episode+1)%1000 == 0:\n ''' Test the model '''\n # make the samplers \n test_sample_sampler = SampleSampler(total_cl=659)\n sampled_classes = test_sample_sampler.cl_list\n sampled_examples = test_sample_sampler.ex_list\n test_query_sampler = QuerySampler(sampled_classes, sampled_examples, num_inst=1)\n\n # make the dataloaders\n s_batch_num = 1 # one shot each\n q_batch_num = 1 # one test each\n test_sample_loader = DataLoader(omni_test, batch_size=s_batch_num, sampler=test_sample_sampler)\n test_query_loader = DataLoader(omni_test, batch_size=q_batch_num, sampler=test_query_sampler)\n check_accuracy(test_sample_loader, test_query_loader, model)\n\n if (episode+1)%100000 == 0:\n \"\"\" Save as a draft model \"\"\"\n torch.save(model.state_dict(), PATH)", "def train(self, persist: bool = False, run: int = -1, checkpoint: int = -1):\n self.meta = ICMMetaDataV1(fp=open(os.path.join(MODULE_CONFIG.BaseConfig.BASE_DIR, 'agent_stats.csv'), 'w'),\n args=self.state.config)\n train_start = time.time()\n for episode in range(self.state.episodes):\n start_time = time.time()\n state = self.env.reset()\n state = torch.reshape(tensor(state, dtype=torch.float32), [1, 84, 84, 4]).permute(0, 3, 1, 2).to(\n self.device)\n done = False\n episode_reward = []\n episode_loss = []\n\n # save network\n # if episode % self.state.model_save_interval == 0:\n # save_path = self.state.model_save_path + '/' + self.run_name + '_' + str(episode) + '.pt'\n # torch.save(self.q_network.state_dict(), save_path)\n # print('Successfully saved: ' + save_path)\n\n # Save Model\n self.save(episode)\n # Collect garbage\n # To Do Later\n\n while not done:\n\n # update target network\n if self.state.step % self.state.network_update_interval == 0:\n print('Updating target network')\n self.target_network.load_state_dict(self.q_network.state_dict())\n\n if self.state.step > len(self.replay_memory):\n self.state.epsilon = max(self.state.final_epsilon,\n self.state.initial_epsilon - self.state.epsilon_step * self.state.step)\n if self.state.epsilon > self.state.final_epsilon:\n self.state.mode = 'Explore'\n else:\n self.state.mode = 'Exploit'\n\n action, q = self.take_action(state, test=False, state_count=0)\n next_state, reward, done, _ = self.env.step(action)\n\n next_state = torch.reshape(tensor(next_state, dtype=torch.float32), [1, 84, 84, 4]).permute(0, 3, 1,\n 2).to(\n self.device)\n self.push((state, torch.tensor([int(action)]), torch.tensor([reward], device=self.device), next_state,\n torch.tensor([done], dtype=torch.float32)))\n episode_reward.append(reward)\n self.state.step += 1\n state = next_state\n\n # train network\n if self.state.step >= self.start_to_learn and self.state.step % self.state.network_train_interval == 0:\n loss = self.optimize_network()\n episode_loss.append(loss)\n\n if done:\n # print('Episode:', episode, ' | Steps:', self.state.step, ' | Eps: ', self.state.epsilon,\n # ' | Reward: ',\n # sum(episode_reward),\n # ' | Avg Reward: ', np.mean(self.last_n_rewards), ' | Loss: ',\n # np.mean(episode_loss), ' | Intrinsic Reward: ', sum(self.intrinsic_episode_reward),\n # ' | Avg Intrinsic Reward: ', np.mean(self.last_n_intrinsic_rewards),\n # ' | Mode: ', self.state.mode)\n # print('Episode:', episode, ' | Steps:', self.state.step, ' | Eps: ', self.state.epsilon,\n # ' | Reward: ',\n # sum(episode_reward),\n # ' | Avg Reward: ', np.mean(self.last_n_rewards), ' | Loss: ',\n # np.mean(episode_loss), ' | Intrinsic Reward: ', sum(self.intrinsic_episode_reward),\n # ' | Avg Intrinsic Reward: ', np.mean(self.last_n_intrinsic_rewards),\n # ' | Mode: ', self.state.mode, file=self.log_file)\n # self.log_summary(episode, episode_loss, episode_reward)\n self.last_n_rewards.append(sum(episode_reward))\n self.last_n_intrinsic_rewards.append(sum(self.intrinsic_episode_reward))\n self.meta.update_episode(episode, self.state.step, self.state.epsilon,\n sum(episode_reward), np.mean(self.last_n_rewards),\n np.mean(episode_loss), sum(self.intrinsic_episode_reward),\n np.mean(self.last_n_intrinsic_rewards), self.state.mode)\n\n episode_reward.clear()\n episode_loss.clear()\n self.intrinsic_episode_reward.clear()", "def train_dqn(self, n_episodes=2000, max_t=1000, eps_start=1.0, eps_end=0.01, eps_decay=0.995):\n self.scores = [] # list containing scores from each episode\n scores_window = deque(maxlen=100) # last 100 scores\n eps = eps_start # initialize epsilon\n for i_episode in range(1, n_episodes+1):\n env_info = self.env.reset(train_mode=True)[self.brain_name] # reset the environment\n state = env_info.vector_observations[0] # get the current state\n score = 0 # initialize the score\n for t in range(max_t):\n action = self.agent.act(state, eps)\n env_info = self.env.step(action)[self.brain_name] # send the action to the environment\n next_state = env_info.vector_observations[0] # get the next state\n reward = env_info.rewards[0] # get the reward\n done = env_info.local_done[0] # see if episode has finished\n self.agent.step(state, action, reward, next_state, done)\n state = next_state\n score += reward\n if done:\n break \n scores_window.append(score) # save most recent score\n self.scores.append(score) # save most recent score\n eps = max(eps_end, eps_decay*eps) # decrease epsilon\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)), end=\"\")\n if i_episode % 100 == 0:\n print('\\rEpisode {}\\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_window)))\n # we use 15.0 just to be sure\n if np.mean(scores_window)>=self.threshold:\n print('\\nEnvironment solved in {:d} episodes!\\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_window)))\n break\n return self.scores", "def training_step(self):\n self.iteration += 1\n # if not enough replay memories\n if self.iteration < self.params.min_replays:\n # skip training\n return\n # sample memories\n states_val, action_val, rewards, next_state_val, continues \\\n = (rl.tools.sample_memories(self.memory, self.params.batch_size))\n # evaluate the target q\n target_q = self.sess.run(self.graph.target_q_values, feed_dict={self.graph.states: next_state_val})\n # if using double q\n if self.params.double_q:\n online_q = self.sess.run(self.graph.online_q_values, feed_dict={self.graph.states: next_state_val})\n actions = np.argmax(online_q, axis=1)\n max_next_q_values = target_q[np.arange(actions.shape[0]), actions].reshape(-1, 1)\n else:\n max_next_q_values = np.max(target_q, axis=1, keepdims=True)\n # train the online DQN\n td_target = rewards + continues * self.params.discount_factor * max_next_q_values\n _, self.loss_val = self.sess.run([self.graph.training_op, self.graph.loss],\n feed_dict={self.graph.states: states_val, self.graph.actions: action_val,\n self.graph.td_target: td_target})\n # copy to target\n if self.params.copy_interval is None or (\n self.params.copy_interval and (self.iteration % self.params.copy_interval == 0)):\n self.sess.run(self.graph.copy_online_to_target)", "def train(n):\n\n ai = NimAI()\n\n print(f\"Play {n} training games\")\n for _ in range(n): \n game = Nim()\n\n # Keep track of last move made by either player\n last = {\n 0: {\"state\": None, \"action\": None},\n 1: {\"state\": None, \"action\": None}\n }\n\n # Game loop\n while True:\n\n # Keep track of current state and action\n state = game.piles.copy()\n action = ai.chooseAction(game.piles)\n\n # Keep track of last state and action\n last[game.player][\"state\"] = state\n last[game.player][\"action\"] = action\n\n # Make move and switch players\n game.move(action)\n new_state = game.piles.copy()\n\n # When game is over, update Q values with rewards\n if game.winner is not None:\n # The game is over when a player just made a move that lost him the game.\n # The move from the previous player was therefore game winning.\n # Both events are used to update the AI.\n # new_state is [0, 0, 0, 0] here and its used to update the AI, because\n # future rewards should not be considered in the Q-learning formula.\n ai.update(state, action, new_state, -1)\n ai.update(\n last[game.player][\"state\"],\n last[game.player][\"action\"],\n new_state,\n 1\n )\n break\n\n # If game is continuing, no rewards yet\n elif last[game.player][\"state\"] is not None:\n ai.update(\n last[game.player][\"state\"],\n last[game.player][\"action\"],\n new_state,\n 0\n )\n\n print(\"Done training\")\n\n # Return the trained AI\n return ai", "def test_num_steps(ray_start_2_cpus, use_local):\n\n def data_creator(config):\n train_dataset = [0] * 5 + [1] * 5\n val_dataset = [0] * 5 + [1] * 5\n return DataLoader(train_dataset, batch_size=config[\"batch_size\"]), \\\n DataLoader(val_dataset, batch_size=config[\"batch_size\"])\n\n batch_size = 1\n Operator = TrainingOperator.from_creators(model_creator, optimizer_creator,\n data_creator)\n\n def train_func(self, iterator, info=None):\n total_sum = 0\n num_items = 0\n for e in iterator:\n total_sum += e\n num_items += 1\n return {\"average\": total_sum.item() / num_items}\n\n TestOperator = get_test_operator(Operator)\n trainer = TorchTrainer(\n training_operator_cls=TestOperator,\n num_workers=2,\n use_local=use_local,\n add_dist_sampler=False,\n config={\n \"batch_size\": batch_size,\n \"custom_func\": train_func\n })\n\n # If num_steps not passed, should do one full epoch.\n result = trainer.train()\n # Average of 5 0s and 5 1s\n assert result[\"average\"] == 0.5\n assert result[\"epoch\"] == 1\n val_result = trainer.validate()\n assert val_result[\"average\"] == 0.5\n\n # Train again with num_steps.\n result = trainer.train(num_steps=5)\n # 5 zeros\n assert result[\"average\"] == 0\n assert result[\"epoch\"] == 2\n val_result = trainer.validate(num_steps=5)\n assert val_result[\"average\"] == 0\n\n # Should continue where last train run left off.\n result = trainer.train(num_steps=3)\n # 3 ones.\n assert result[\"average\"] == 1\n assert result[\"epoch\"] == 2\n val_result = trainer.validate(num_steps=3)\n assert val_result[\"average\"] == 1\n\n # Should continue from last train run, and cycle to beginning.\n result = trainer.train(num_steps=5)\n # 2 ones and 3 zeros.\n assert result[\"average\"] == 0.4\n assert result[\"epoch\"] == 3\n val_result = trainer.validate(num_steps=5)\n assert val_result[\"average\"] == 0.4\n\n # Should continue, and since num_steps not passed in, just finishes epoch.\n result = trainer.train()\n # 2 zeros and 5 ones.\n assert result[\"average\"] == 5 / 7\n assert result[\"epoch\"] == 3\n val_result = trainer.validate()\n assert val_result[\"average\"] == 5 / 7\n\n trainer.shutdown()", "def fit(self, num_iterations, max_episode_length=250, eval_every_nth=1000, save_model_every_nth=1000, log_loss_every_nth=1000, video_every_nth=20000):\n self.compile()\n self.policy = LinearDecayGreedyEpsilonPolicy(start_value=1., end_value=0.1, num_steps=1e6, num_actions=self.num_actions) # for training\n self.replay_memory = ReplayMemory(max_size=1000000)\n self.log_loss_every_nth = log_loss_every_nth\n random_policy = UniformRandomPolicy(num_actions=self.num_actions) # for burn in \n num_episodes = 0\n\n # tf logging\n self.tf_session = K.get_session()\n self.tf_summary_writer = tf.summary.FileWriter(self.log_dir, self.tf_session.graph)\n\n while self.iter_ctr < num_iterations:\n state = self.env.reset()\n self.preprocessor.reset_history_memory()\n\n num_timesteps_in_curr_episode = 0\n total_reward_curr_episode = 0 \n\n while num_timesteps_in_curr_episode < max_episode_length:\n self.iter_ctr+=1 # number of steps overall\n num_timesteps_in_curr_episode += 1 # number of steps in the current episode\n\n # logging\n # if not self.iter_ctr % 1000:\n # print \"iter_ctr {}, num_episodes : {} num_timesteps_in_curr_episode {}\".format(self.iter_ctr, num_episodes, num_timesteps_in_curr_episode)\n\n # this appends to uint8 history and also returns stuff ready to be spit into the network\n state_network = self.preprocessor.process_state_for_network(state) #shape is (4,84,84,1). axis are swapped in cal_q_vals\n # print \"shape {}, max {}, min {}, type {} \".format(state_network.shape, np.max(state_network), np.min(state_network), state_network.dtype)\n\n # burning in \n if self.iter_ctr < self.num_burn_in:\n action = random_policy.select_action() # goes from 0 to n-1\n next_state, reward, is_terminal, _ = self.env.step(action)\n reward_proc = self.preprocessor.process_reward(reward)\n total_reward_curr_episode += reward_proc\n state_proc_memory = self.preprocessor.process_state_for_memory(state)\n # atari_preprocessor.process_state_for_memory converts it to grayscale, resizes it to (84, 84) and converts to uint8\n self.replay_memory.append(state_proc_memory, action, reward_proc, is_terminal)\n\n if is_terminal or (num_timesteps_in_curr_episode > max_episode_length-1):\n state = self.env.reset()\n num_episodes += 1\n with tf.name_scope('summaries'):\n self.tf_log_scaler(tag='train_reward_per_episode_wrt_no_of_episodes', value=total_reward_curr_episode, step=num_episodes)\n self.tf_log_scaler(tag='train_reward_per_episode_wrt_iterations', value=total_reward_curr_episode, step=self.iter_ctr)\n print \"iter_ctr {}, num_episodes : {}, episode_reward : {}, loss : {}, episode_timesteps : {}, epsilon : {}\".format\\\n (self.iter_ctr, num_episodes, total_reward_curr_episode, self.loss_last, num_timesteps_in_curr_episode, self.policy.epsilon)\n num_timesteps_in_curr_episode = 0\n self.dump_train_episode_reward(total_reward_curr_episode)\n # this should be called when num_timesteps_in_curr_episode > max_episode_length, but we can call it in is_terminal as well. \n # it won't change anything as it just sets the last entry's is_terminal to True\n self.replay_memory.end_episode() \n break\n\n # training\n else:\n # print \"iter_ctr {}, num_episodes : {} num_timesteps_in_curr_episode {}\".format(self.iter_ctr, num_episodes, num_timesteps_in_curr_episode)\n q_values = self.calc_q_values(state_network)\n # print \"q_values {} q_values.shape {}\".format(q_values, q_values.shape)\n #print \"q_values.shape \", q_values.shape\n action = self.policy.select_action(q_values=q_values, is_training=True)\n next_state, reward, is_terminal, _ = self.env.step(action)\n reward_proc = self.preprocessor.process_reward(reward)\n total_reward_curr_episode += reward_proc\n state_proc_memory = self.preprocessor.process_state_for_memory(state)\n self.replay_memory.append(state_proc_memory, action, reward_proc, is_terminal)\n\n # validation. keep this clause before the breaks!\n if not(self.iter_ctr%eval_every_nth):\n print \"\\n\\nEvaluating at iter {}\".format(self.iter_ctr)\n if not(self.iter_ctr%video_every_nth):\n self.evaluate(num_episodes=20, max_episode_length=max_episode_length, gen_video=True)\n else:\n self.evaluate(num_episodes=20, max_episode_length=max_episode_length, gen_video=False)\n print \"Done Evaluating\\n\\n\"\n\n # save model\n if not(self.iter_ctr%save_model_every_nth):\n self.q_network.save(os.path.join(self.log_dir, 'weights/q_network_{}.h5'.format(str(self.iter_ctr).zfill(7))))\n\n if is_terminal or (num_timesteps_in_curr_episode > max_episode_length-1):\n state = self.env.reset()\n num_episodes += 1\n with tf.name_scope('summaries'):\n self.tf_log_scaler(tag='train_reward_per_episode_wrt_no_of_episodes', value=total_reward_curr_episode, step=num_episodes)\n self.tf_log_scaler(tag='train_reward_per_episode_wrt_iterations', value=total_reward_curr_episode, step=self.iter_ctr)\n print \"iter_ctr {}, num_episodes : {}, episode_reward : {}, loss : {}, episode_timesteps : {}, epsilon : {}\".format\\\n (self.iter_ctr, num_episodes, total_reward_curr_episode, self.loss_last, num_timesteps_in_curr_episode, self.policy.epsilon)\n num_timesteps_in_curr_episode = 0\n self.dump_train_episode_reward(total_reward_curr_episode)\n self.replay_memory.end_episode() \n break\n\n if not(self.iter_ctr % self.train_freq):\n self.update_policy()\n\n state = next_state", "def training_step(self):\n self.iteration += 1\n # if not enough replay memories\n if self.iteration < self.params.min_replays:\n # skip training\n return\n # for each batch\n for _ in range(self.params.num_batches):\n # sample memories\n mem_states, mem_controls, mem_rewards, mem_next_states, mem_continues = \\\n (rl.tools.sample_memories(self.memory, self.params.batch_size))\n # train the critic\n max_q = self.sess.run(self.graph.target_critic_outputs, feed_dict={self.graph.states: mem_next_states})\n td_target = mem_rewards + mem_continues * self.params.discount_factor * max_q\n self.reg_loss_val, self.critic_loss_val, _ = self.sess.run(\n [self.graph.critic_reg_loss, self.graph.critic_loss, self.graph.critic_training_op],\n feed_dict={self.graph.states: mem_states, self.graph.actor_outputs: mem_controls,\n self.graph.td_target: td_target})\n # train the actor\n neg_mean_q_val, _ = self.sess.run([self.graph.neg_mean_q, self.graph.actor_training_op],\n feed_dict={self.graph.states: mem_states})\n self.mean_q_val = -1.0 * neg_mean_q_val\n # copy to target\n self.sess.run(self.graph.copy_online_to_target)", "def train(\r\n self,\r\n max_episodes : int,\r\n exploration_rate=0.9,\r\n discount=0.9,\r\n batch_size=32,\r\n timesteps_per_episode=200,\r\n warm_start=False,\r\n model_alignment_period=100,\r\n save_animation_period=100,\r\n save_model_period=10,\r\n evaluate_model_period=50,\r\n evaluation_size=10,\r\n exploration_rate_decay=0.99,\r\n min_exploration_rate=0.1,\r\n epochs=1,\r\n log_q_values=False) -> Controller:\r\n\r\n # Log training parameters\r\n params = {\r\n \"max_episodes\": max_episodes,\r\n \"exploration_rate\": exploration_rate,\r\n \"discount\": discount,\r\n \"batch_size\": batch_size,\r\n \"timesteps_per_episode\": timesteps_per_episode,\r\n \"model_alignment_period\": model_alignment_period,\r\n \"evaluate_model_period\": evaluate_model_period,\r\n \"evaluation_size\": evaluation_size,\r\n \"exploration_rate_decay\": exploration_rate_decay,\r\n \"min_exploration_rate\": min_exploration_rate,\r\n \"epochs\": epochs\r\n }\r\n self.params.update(params)\r\n self.Logger.log_params(self.params)\r\n\r\n # Load existing model for warm start\r\n if warm_start:\r\n check = self._load_model()\r\n if not check:\r\n print(\"Using default network\") # TODO: temp solution\r\n\r\n max_reward = -100\r\n for episode in range(1, max_episodes + 1):\r\n t1 = time.time()\r\n total_reward = 0\r\n eval_score = 0\r\n terminated = False\r\n steps = 0\r\n state = self.environment.reset(random=True) # start from random state\r\n\r\n # TODO: Check if possible to avoid reshape!!\r\n state = state[self.idx].reshape(1, self.state_size)\r\n\r\n for timestep in range(timesteps_per_episode):\r\n # Predict which action will yield the highest reward.\r\n action = self._act(state, exploration_rate,log_q_values)\r\n\r\n # Take the system forward one step in time.\r\n next_state = self.environment.step(action)\r\n\r\n # Compute the actual reward for the new state the system is in.\r\n current_time = timestep * self.environment.step_size\r\n reward = self.environment.reward(next_state, current_time)\r\n\r\n # Check whether the system has entered a terminal case.\r\n terminated = self.environment.terminated(next_state, current_time)\r\n\r\n # TODO: Can this be avoided?\r\n next_state = next_state[self.idx].reshape(1, self.state_size)\r\n\r\n # Store results for current step.\r\n self._store(state, action, reward, next_state, terminated)\r\n\r\n # Update statistics.\r\n total_reward += reward\r\n state = next_state\r\n steps = timestep+1\r\n\r\n if len(self.experience) >= batch_size:\r\n self._experience_replay(batch_size, discount, epochs)\r\n #exploration_rate *= exploration_rate_decay\r\n\r\n # Terminate episode if the system has reached a termination state.\r\n if terminated:\r\n break\r\n\r\n # Log the average loss for this episode\r\n\r\n self.Logger.log_loss(np.mean(self.episode_loss), episode)\r\n self.episode_loss = []\r\n\r\n # Log the average Q-values for this episode\r\n if log_q_values:\r\n self.Logger.log_q_values(self.episode_q_values/steps, episode)\r\n self.episode_q_values = np.zeros(len(self.environment.action_space))\r\n\r\n if exploration_rate > min_exploration_rate:\r\n exploration_rate *= exploration_rate_decay\r\n else:\r\n exploration_rate = min_exploration_rate\r\n t2 = time.time()\r\n print(\r\n f\"Episode: {episode:>5}, \"\r\n f\"Score: {total_reward:>10.1f}, \"\r\n f\"Steps: {steps:>4}, \"\r\n f\"Simulation Time: {(steps * self.environment.step_size):>6.2f} Seconds, \"\r\n f\"Computation Time: {(t2-t1):>6.2f} Seconds, \"\r\n f\"Exploration Rate: {exploration_rate:>0.3f}\")\r\n\r\n if episode % model_alignment_period == 0:\r\n self._align_target_model()\r\n\r\n # if episode % save_animation_period == 0:\r\n # self.environment.save(episode)\r\n\r\n\r\n if episode % evaluate_model_period == 0:\r\n eval_score = self._evaluate(evaluation_size, max_steps=timesteps_per_episode,episode=episode)\r\n\r\n if eval_score > max_reward:\r\n self._save_model(\"best\")\r\n max_reward = eval_score\r\n\r\n self._save_model(\"latest\")\r\n\r\n # Create Controller object\r\n controller = Controller(self.environment.get_action_space(), self.q_network, self.idx)\r\n print(\"Controller Created\")\r\n return controller", "def train(self):\n\n agent_step = self._num_actions_taken\n\n if agent_step >= self._train_after:\n if (agent_step % self._train_interval) == 0:\n pre_states, actions, post_states, rewards, terminals = self._memory.minibatch(self._minibatch_size)\n\n self._trainer.train_minibatch(\n self._trainer.loss_function.argument_map(\n pre_states=pre_states,\n actions=Value.one_hot(actions.reshape(-1, 1).tolist(), self.nb_actions),\n post_states=post_states,\n rewards=rewards,\n terminals=terminals\n )\n )\n\n # Update the Target Network if needed\n if (agent_step % self._target_update_interval) == 0:\n self._target_net = self._action_value_net.clone(CloneMethod.freeze)\n filename = \"models\\model%d\" % agent_step\n self._trainer.save_checkpoint(filename)", "def _Train(self, limit):\n if len(self.Memory)>BATCH_SIZE: \n # Limit of Agents to Train\n for i in range(limit): \n # 'n' number of rounds to train \n for _ in range(50):\n # Get Batch Data\n experiances = self.Memory.sample()\n # Train Models\n self._Learn(self.Actor[i], self.ActorTarget, self.actorOpt[i], experiances)", "def learn(self):\n epochswin = [] # count the number of wins at every epoch of the network against the preceding version\n epochdraw = [] # count the number of draws at every epoch of the network against the preceding version\n epochswingreedy = [] # count the number of wins against greedy at every epoch\n epochswinrandom = [] # count the number of wins against random at every epoch\n epochsdrawgreedy = [] # count the number of draws against greedy at every epoch\n epochsdrawrandom = [] # count the number of wins against random at every epoch\n epochswinminmax = [] # count the number of wins against minmax at every epoch\n epochsdrawminmax = [] # count the number of draws against minmax at every epoch\n\n\n if self.args.load_model == True:\n file = open(self.args.trainExampleCheckpoint + \"graphwins:iter\" + str(self.args.numIters) + \":eps\" + str(\n self.args.numEps) + \":dim\" + str(self.game.n) + \".txt\", \"r+\")\n lines = file.readlines()\n for index, line in enumerate(lines):\n for word in line.split():\n if index == 0:\n epochswin.append(word)\n elif index == 1:\n epochdraw.append(word)\n file.close()\n\n file = open(self.args.trainExampleCheckpoint + \"graphwins:iter\" + str(self.args.numIters) + \":eps\" + str(\n self.args.numEps) + \":dim\" + str(self.game.n) + \":greedyrandom.txt\", \"r+\")\n lines = file.readlines()\n for index, line in enumerate(lines):\n for word in line.split():\n if index == 0:\n epochswingreedy.append(word)\n elif index == 1:\n epochsdrawgreedy.append(word)\n elif index == 2:\n epochswinrandom.append(word)\n elif index == 3:\n epochsdrawrandom.append(word)\n elif index == 4:\n epochswinminmax.append(word)\n elif index == 5:\n epochsdrawminmax.append(word)\n file.close()\n self.loadTrainExamples()\n\n for i in range(1, self.args.numIters + 1):\n # bookkeeping\n print('------ITER ' + str(i) + '------')\n # examples of the iteration\n iterationTrainExamples = deque([], maxlen=self.args.maxlenOfQueue)\n\n eps_time = AverageMeter()\n bar = Bar('Self Play', max=self.args.numEps)\n end = time.time()\n\n for eps in range(self.args.numEps):\n iterationTrainExamples += self.executeEpisode()\n\n # bookkeeping + plot progress\n eps_time.update(time.time() - end)\n end = time.time()\n bar.suffix = '({eps}/{maxeps}) Eps Time: {et:.3f}s | Total: {total:} | ETA: {eta:}'.format(eps=eps + 1,\n maxeps=self.args.numEps,\n et=eps_time.avg,\n total=bar.elapsed_td,\n eta=bar.eta_td)\n bar.next()\n bar.finish()\n\n # save the iteration examples to the history\n self.trainExamplesHistory.append(iterationTrainExamples)\n\n if len(self.trainExamplesHistory) > self.args.numItersForTrainExamplesHistory:\n print(\"len(trainExamplesHistory) =\", len(self.trainExamplesHistory),\n \" => remove the oldest trainExamples\")\n self.trainExamplesHistory.pop(0)\n # backup history to a file\n # NB! the examples were collected using the model from the previous iteration, so (i-1)\n self.saveTrainExamples(i - 1)\n\n # shuffle examlpes before training\n trainExamples = []\n for e in self.trainExamplesHistory:\n trainExamples.extend(e)\n shuffle(trainExamples)\n\n # training new network, keeping a copy of the old one\n\n filename = \"curent\"+str(i)+\"temp:iter\" + str(self.args.numIters) + \":eps\" + str(self.args.numEps) + \":dim\" + str(\n self.game.n) + \".pth.tar\"\n filenameBest = \"best\" + str(self.args.numIters) + \":eps\" + str(self.args.numEps) + \":dim\" + str(\n self.game.n) + \".pth.tar\"\n\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename=filename)\n exists = os.path.isfile(filenameBest)\n if exists:\n self.pnet.load_checkpoint(folder=self.args.checkpoint, filename=filenameBest)\n else:\n self.pnet.load_checkpoint(folder=self.args.checkpoint, filename=filename)\n pmcts = MCTS(self.game, self.pnet, self.args)\n\n self.nnet.train(trainExamples)\n nmcts = MCTS(self.game, self.nnet, self.args)\n\n print('PITTING AGAINST PREVIOUS VERSION')\n arena = Arena(lambda x: np.argmax(pmcts.getActionProb(x, temp=0)),\n lambda x: np.argmax(nmcts.getActionProb(x, temp=0)), self.game,nmcts,pmcts,evaluate=True)\n\n pwins, nwins, draws = arena.playGames(self.args.arenaCompare, False)\n\n pmcts.clear()\n nmcts.clear()\n del pmcts\n del nmcts\n\n print(' ')\n print('NEW/PREV WINS : %d / %d ; DRAWS : %d' % (nwins, pwins, draws))\n if i == 1:\n epochswin.append(pwins)\n epochdraw.append(0)\n\n epochswin.append(nwins)\n epochdraw.append(draws)\n self.writeLogsToFile(epochswin, epochdraw)\n\n ''' Get all the players and then pit them against the network. You need to modify here if you implement \n more players\n '''\n (gp, rp, mp) = self.decidePlayers()\n\n if self.args.parallel == 0:\n\n\n nmcts1 = MCTS(self.game, self.nnet, self.args)\n nmcts2 = MCTS(self.game, self.nnet, self.args)\n nmcts3 = MCTS(self.game, self.nnet, self.args)\n\n arenagreedy = Arena(lambda x: np.argmax(nmcts1.getActionProb(x, temp=0)), gp, self.game,nmcts1)\n arenarandom = Arena(lambda x: np.argmax(nmcts2.getActionProb(x, temp=0)), rp, self.game,nmcts2)\n arenaminmax = Arena(lambda x: np.argmax(nmcts3.getActionProb(x, temp=0)), mp, self.game,nmcts3,evaluate=True)\n\n pwinsminmax, nwinsminmax, drawsminmax = arenaminmax.playGames(self.args.arenaCompare)\n print(\"minmax - \"+str(pwinsminmax)+\" \"+str(nwinsminmax)+\" \"+str(drawsminmax))\n pwinsgreedy, nwinsgreedy, drawsgreedy = arenagreedy.playGames(self.args.arenaCompare)\n print(\"greedy - \"+str(pwinsgreedy)+\" \"+str(nwinsgreedy)+\" \"+str(drawsgreedy))\n pwinsreandom, nwinsrandom, drawsrandom = arenarandom.playGames(self.args.arenaCompare)\n print(\"random - \"+str(pwinsreandom)+\" \"+str(nwinsrandom)+\" \"+str(drawsrandom))\n\n nmcts1.clear()\n nmcts2.clear()\n nmcts3.clear()\n del nmcts1\n del nmcts2\n del nmcts3\n\n else:\n '''\n This will be used if you want to evaluate the network against the benchmarks in a parallel way\n '''\n\n self.args.update({'index': str(i)})\n\n p = self.parallel(self.args.arenaCompare)\n (pwinsminmax, nwinsminmax, drawsminmax) = p[0] # self.parallel(\"minmax\", self.args.arenaCompare)\n (pwinsgreedy, nwinsgreedy, drawsgreedy) = p[1] # self.parallel(\"greedy\",self.args.arenaCompare)\n (pwinsreandom, nwinsrandom, drawsrandom) = p[2] # self.parallel(\"random\",self.args.arenaCompare)\n\n epochsdrawgreedy.append(drawsgreedy)\n epochsdrawrandom.append(drawsrandom)\n epochswinrandom.append(pwinsreandom)\n epochswingreedy.append(pwinsgreedy)\n epochswinminmax.append(pwinsminmax)\n epochsdrawminmax.append(drawsminmax)\n\n self.writeLogsToFile(epochswingreedy, epochsdrawgreedy, epochswinrandom, epochsdrawrandom, epochswinminmax,\n epochsdrawminmax, training=False)\n\n if pwins + nwins == 0 or float(nwins) / (pwins + nwins) <= self.args.updateThreshold:\n print('REJECTING NEW MODEL')\n filename = \"curent\"+str(i)+\"temp:iter\" + str(self.args.numIters) + \":eps\" + str(self.args.numEps) + \":dim\" + str(\n self.game.n) + \".pth.tar\"\n filenameBest = \"best\" + str(self.args.numIters) + \":eps\" + str(self.args.numEps) + \":dim\" + str(\n self.game.n) + \".pth.tar\"\n exists = os.path.isfile(filenameBest)\n if exists:\n self.nnet.load_checkpoint(folder=self.args.checkpoint, filename=filenameBest)\n else:\n self.nnet.load_checkpoint(folder=self.args.checkpoint, filename=filename)\n\n else:\n print('ACCEPTING NEW MODEL')\n\n filename = \"best\" + str(self.args.numIters) + \":eps\" + str(self.args.numEps) + \":dim\" + str(\n self.game.n) + \".pth.tar\"\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename=self.getCheckpointFile(i))\n self.nnet.save_checkpoint(folder=self.args.checkpoint, filename=filename)\n self.mcts.clear()\n del self.mcts\n self.mcts = MCTS(self.game, self.nnet, self.args, mcts=True) # reset search tree\n print(self.tracker.print_diff())\n self.writeLogsToFile(epochswin, epochdraw, training=True)", "def train(self, n_steps=1000, show=False):\n epsilon = 0.01\n state = self.env.reset()\n done = False\n episode_rewards = []\n episode_reward = 0\n losses = []\n for step_i in range(n_steps):\n action = self.act(state, epsilon)\n next_state, reward, done, _ = self.env.step(action)\n episode_reward += reward\n\n self.optimizer.zero_grad()\n loss = self._compute_loss(state, action, reward, next_state, done)\n losses.append(loss)\n loss.backward()\n self.optimizer.step()\n\n if done:\n state = self.env.reset()\n episode_rewards.append(episode_reward)\n episode_reward = 0\n else:\n state = next_state\n \n if show:\n self._plot(step_i, episode_rewards, losses)", "def on_train_start(self, agent, **kwargs):\n self.train_start = timeit.default_timer()\n self.nb_steps = kwargs['nb_steps']\n print('Training for {} steps ...'.format(self.nb_steps))", "def fit(self, env, num_iteration, do_train=False):\n\n #s, a, r, new_s, d = get_multi_step_sample(one_step_memory, self.gamma, self.num_step)\n #self.replay_memory.append((s, a, r, new_s, d))\n # epsilon update\n num_env = env.num_process\n env.reset()\n\n for t in range(0, num_iteration, num_env):\n self.global_step += 1\n #print(\"Global_step: {}\".format(self.global_step))\n old_state, action, reward, new_state, is_terminal = self.get_multi_step_sample(env)\n self.replay_memory.append(old_state, action, reward, new_state, is_terminal)\n\n \"\"\"\n Epsilon update\n epsilon begin 1.0, end up 0.1\n FIX\n \"\"\"\n\n self.epsilon = self.epsilon+ num_env*self.epsilon_increment if self.epsilon > EPSILON_END else EPSILON_END\n num_update = sum([1 if i%self.update_freq == 0 else 0 for i in range(t, t+num_env)])\n if do_train:\n for _ in range(num_update):\n\n if self.per == 1:\n (old_state_list, action_list, reward_list, new_state_list, is_terminal_list), \\\n idx_list, p_list, sum_p, count = self.replay_memory.sample(self.batch_size)\n else:\n old_state_list, action_list, reward_list, new_state_list, is_terminal_list \\\n = self.replay_memory.sample(self.batch_size)\n\n feed_dict = {self.target_s: new_state_list.astype(np.float32)/255. ,\n self.s : old_state_list.astype(np.float32)/255.,\n self.a_ph: list(enumerate(action_list)),\n self.r_ph: np.array(reward_list).astype(np.float32),\n self.d_ph: np.array(is_terminal_list).astype(np.float32),\n }\n\n if self.double:\n action_chosen_by_online = self.sess.run(self.a,\n feed_dict={\n self.s: new_state_list.astype(np.float32)/255.})\n feed_dict[self.a_for_new_state_ph] = list(enumerate(action_chosen_by_online))\n\n if self.per == 1:\n # Annealing weight beta\n feed_dict[self.loss_weight_ph] = (np.array(p_list) * count / sum_p) ** (-self.beta)\n error, _ = self.sess.run([self.error_op, self.train_op], feed_dict=feed_dict)\n self.replay_memory.update(idx_list, error)\n\n else:\n self.sess.run(self.train_op, feed_dict=feed_dict)\n\n self.update_time += 1\n\n if self.beta < BETA_END:\n self.beta += self.beta_increment\n\n if (self.update_time)%self.target_update_freq == 0 :\n #print(\"Step: {} \".format(self.update_time) + \"target_network update\")\n self.sess.run([self.target_update])\n #print(\"Step: {} \".format(self.update_freq) + \"Network save\")\n self.save_model()", "def train_agent(\n self,\n *,\n env,\n test_env,\n save_name,\n train_every=32,\n eval_every=1000,\n max_steps=100000,\n start_epsilon=0.9,\n end_epsilon=0.001,\n epsilon_decay_steps=1000,\n render=True,\n ):\n\n agent = self.create_agent(env)\n curr_epsilon = start_epsilon\n epsilon_decay = self.get_decay_value(\n start_epsilon, end_epsilon, epsilon_decay_steps\n )\n\n obs = env.reset()\n action = agent.act(obs, epsilon=curr_epsilon)\n\n for step in range(1, max_steps + 1):\n next_obs, reward, done, _ = env.step(action)\n next_action = agent.act(next_obs, epsilon=curr_epsilon)\n agent.store_step(obs, action, reward, next_obs, next_action, done)\n obs = next_obs\n\n if render:\n env.render()\n\n if self.time_to(train_every, step):\n agent.perform_training(gamma=self.gamma)\n curr_epsilon = max(end_epsilon, curr_epsilon - epsilon_decay)\n\n if self.time_to(eval_every, step):\n self.evaluate_agent(agent, test_env, end_epsilon)\n torch.save(agent, f\"saved_agents/{save_name}\")\n\n if done:\n obs = env.reset()\n action = agent.act(obs, epsilon=curr_epsilon)\n\n print(\"At step {}\".format(step), end=\"\\r\")\n print(\"\\nDone!\")\n\n return agent", "def generate_episode(env, args, render=False, test_mode=False):\n episode = []\n state, done = env.reset(), False\n observations = transform_obs(env.get_all_observations())\n n_steps = 0\n\n for agent in env.agents: # for agents where it matters,\n agent.set_hidden_state() # set the init hidden state of the RNN\n\n while not done:\n unavailable_actions = env.get_unavailable_actions()\n \n # compute action, keep record of hidden state of the agents to store in experience\n actions, hidden, next_hidden = {}, [], []\n for idx, agent in enumerate(env.agents):\n hidden.append(agent.get_hidden_state())\n actions[agent] = agent.act(observations[idx, :], test_mode=test_mode)\n next_hidden.append(agent.get_hidden_state())\n\n if render:\n print(f\"Step {n_steps}\")\n env.render()\n print([action.name for action in actions.values()])\n\n next_state, rewards, done, _ = env.step(actions)\n next_obs = transform_obs(env.get_all_observations())\n \n # episodes that take long are not allowed and penalized for both agents\n n_steps += 1\n if n_steps > args.max_episode_length:\n done = True\n rewards = {'blue': -1, 'red': -1}\n\n actions = torch.tensor([action.id for action in actions.values()])\n unavail_actions = torch.zeros((args.n_agents, args.n_actions), dtype=torch.long)\n for idx, agent in enumerate(env.agents):\n act_ids = [act.id for act in unavailable_actions[agent]]\n unavail_actions[idx, act_ids] = 1.\n \n episode.append(Experience(transform_state(state), actions, rewards, \n transform_state(next_state), done, \n observations, torch.stack(hidden), \n next_obs, torch.stack(next_hidden),\n unavail_actions))\n \"\"\"\n episode.append(Experience(None, actions, rewards, \n None, done, \n observations, torch.stack(hidden), \n next_obs, torch.stack(next_hidden),\n unavail_actions))\n \"\"\" \n state = next_state\n observations = next_obs\n \n if render:\n print(f\"Game won by team {env.terminal(next_state)}\")\n return episode", "def train(self):\n ##################\n # YOUR CODE HERE #\n ##################\n start = time.time()\n if self.gae:\n self.train_gae()\n return\n\n def optimize_model():\n R = 0\n for i in reversed(range(len(self.rewards))):\n if abs(self.rewards[i]) > 0.0:\n R = 0\n R = self.rewards[i] + self.gamma * R\n self.rewards[i] = R\n rewards = torch.Tensor(self.rewards)\n if self.var_reduce:\n rewards = (rewards - rewards.mean()) / (rewards.std() + np.finfo(np.float32).eps)\n\n policy_loss = 0.0\n for (log_prob, r) in zip(self.log_probs, rewards):\n policy_loss -= log_prob * r\n\n loss = policy_loss.data[0, 0]\n\n self.opt.zero_grad()\n policy_loss = cu(policy_loss)\n policy_loss.backward()\n self.opt.step()\n\n self.clear_action()\n return loss\n\n self.model.train()\n if USE_CUDA:\n self.model.cuda()\n running_reward = None\n\n for episode in range(1, self.n_episode+1):\n self.init_game_setting()\n state = self.env.reset()\n\n tot_reward = 0\n a, b = 0, 0\n for t in range(self.episode_len):\n action = self.make_action(state, test=False)\n state, reward, done, info = self.env.step(action)\n self.rewards.append(reward)\n if reward > 0:\n a += 1\n if reward < 0:\n b += 1\n tot_reward += reward\n if done:\n break\n\n if running_reward is None:\n running_reward = tot_reward\n else:\n running_reward = 0.99 * running_reward + 0.01 * tot_reward\n\n if episode % self.update_every == 0:\n loss = optimize_model()\n print(\"Episode %d\" % episode)\n print(time_since(start))\n print(\"reward %.4f %d:%d len=%d\" % (running_reward, a, b, t))\n torch.save(self.model.state_dict(), self.model_fn)", "def train(self, num_training_steps):\n observation = self.env_pool.reset()\n for step in trange(num_training_steps):\n observations, actions, rewards, dones = self.collect_batch(observation)\n policy_loss, value_loss, entropy = self.train_on_batch(observations, actions, rewards, dones)\n self.writer.add_scalar('policy_loss', policy_loss, step)\n self.writer.add_scalar('value_loss', value_loss, step)\n self.writer.add_scalar('entropy', entropy, step)\n observation = observations[-1]", "def fast():\n # Need a minimum of 10 total_timesteps for adversarial training code to pass\n # \"any update happened\" assertion inside training loop.\n total_timesteps = 10\n n_expert_demos = 1\n n_episodes_eval = 1\n algorithm_kwargs = dict(\n shared=dict(\n demo_batch_size=1,\n n_disc_updates_per_round=4,\n ),\n )\n gen_batch_size = 2\n parallel = False # easier to debug with everything in one process\n max_episode_steps = 5\n # SB3 RL seems to need batch size of 2, otherwise it runs into numeric\n # issues when computing multinomial distribution during predict()\n num_vec = 2\n init_rl_kwargs = dict(batch_size=2)", "def train(self, iterations: int):\n\n s = \"{:3d} reward {:6.2f}/{:6.2f}/{:6.2f} len {:6.2f} epsilon {:1.3f} {}\"\n s_check = \"{:3d} reward {:6.2f}/{:6.2f}/{:6.2f} len {:6.2f} epsilon {:1.3f} saved {} \"\n total_steps = 0\n iter_metrics = []\n for n in range(iterations):\n r_min, r_mean, r_max, iter_steps = self.train_iter()\n iter_metrics.append((r_min, r_mean, r_max))\n total_steps += iter_steps\n\n if n == int(iterations / 2):\n self.steps_to_update_target_model = int(self.steps_to_update_target_model / 2)\n\n # checkpointing & logging\n s_print = s\n file_name = \"\"\n if n % self.checkpoint_freq == 0:\n file_name = f'my_dqn_{n}.pth'\n torch.save(self.target_dqn.state_dict(), os.path.join(self.checkpoint_path, file_name))\n s_print = s_check\n\n if self.verbose:\n print(s_print.format(\n n + 1,\n r_min,\n r_mean,\n r_max,\n total_steps,\n self.e_greedy,\n file_name\n ))\n iter_min = np.mean([x[0] for x in iter_metrics])\n iter_mean = np.mean([x[1] for x in iter_metrics])\n iter_max = np.mean([x[2] for x in iter_metrics])\n return iter_min, iter_mean, iter_max", "def step(self, i_episode, states, actions, rewards, next_states, dones):\n #for stepping maddpg\n # index 0 is for agent 0 and index 1 is for agent 1\n full_states = np.reshape(states, newshape=(-1))\n full_next_states = np.reshape(next_states, newshape=(-1))\n \n # Save experience / reward\n self.memory.add(full_states, states, actions, rewards, full_next_states, next_states, dones)\n \n # Learn, if enough samples are available in memory\n if len(self.memory) > BATCH_SIZE and i_episode > self.episodes_before_training:\n for _ in range(NUM_LEARN_STEPS_PER_ENV_STEP): #learn multiple times at every step\n for agent_no in range(self.num_agents):\n samples = self.memory.sample()\n self.learn(samples, agent_no, GAMMA)\n self.soft_update_all()", "def run(self) -> None:\n for episode in range(1, self.episodes + 1):\n print('Episode:', episode)\n steps, state_action_history = self.run_one_episode()\n self.steps_per_episode.append(steps)\n if episode % parameters.CACHING_INTERVAL == 0 or steps < 1000:\n visualize.animate_track(state_action_history, f'agent-{episode}')\n\n print('Training completed.')\n visualize.plot_steps_per_episode(self.steps_per_episode)\n visualize.plot_epsilon(self.agent.epsilon_history)\n\n if parameters.VISUALIZE_FINAL_GAME:\n print('Showing one episode with the greedy strategy.')\n self.agent.epsilon = 0\n steps, state_action_history = self.run_one_episode()\n print(f'Episode completed in {steps} steps.')\n visualize.animate_track(state_action_history)", "def train(self, num_tickers=4, episodes_per_ticker=5, **kwargs):\n num_tickers = min(num_tickers, len(self.filtered_tickers))\n for i in range(num_tickers):\n ticker = self.filtered_tickers[i % num_tickers]\n env = self.ENV_CONSTRUCTOR(ticker=ticker, **kwargs)\n for j in tqdm(range(episodes_per_ticker)):\n history = self.run_episode(env)\n history[\"ticker\"] = ticker\n history[\"episode\"] = j + 1\n history[\"t\"] = range(len(history))\n self.history = pd.concat((self.history, history))\n self.history = self.history.reset_index(\"Date\", drop=True)", "def _train_internal(self, opts):\n\n batches_num = self._data.num_points / opts['batch_size']\n train_size = self._data.num_points\n num_plot = 320\n sample_prev = np.zeros([num_plot] + list(self._data.data_shape))\n l2s = []\n\n counter = 0\n decay = 1.\n logging.error('Training VAE')\n for _epoch in xrange(opts[\"gan_epoch_num\"]):\n\n if opts['decay_schedule'] == \"manual\":\n if _epoch == 30:\n decay = decay / 2.\n if _epoch == 50:\n decay = decay / 5.\n if _epoch == 100:\n decay = decay / 10.\n\n if _epoch > 0 and _epoch % opts['save_every_epoch'] == 0:\n os.path.join(opts['work_dir'], opts['ckpt_dir'])\n self._saver.save(self._session,\n os.path.join(opts['work_dir'],\n opts['ckpt_dir'],\n 'trained-pot'),\n global_step=counter)\n\n for _idx in xrange(batches_num):\n # logging.error('Step %d of %d' % (_idx, batches_num ) )\n data_ids = np.random.choice(train_size, opts['batch_size'],\n replace=False, p=self._data_weights)\n batch_images = self._data.data[data_ids].astype(np.float)\n batch_noise = utils.generate_noise(opts, opts['batch_size'])\n _, loss, loss_kl, loss_reconstruct = self._session.run(\n [self._optim, self._loss, self._loss_kl,\n self._loss_reconstruct],\n feed_dict={self._real_points_ph: batch_images,\n self._noise_ph: batch_noise,\n self._lr_decay_ph: decay,\n self._is_training_ph: True})\n counter += 1\n\n if opts['verbose'] and counter % opts['plot_every'] == 0:\n debug_str = 'Epoch: %d/%d, batch:%d/%d' % (\n _epoch+1, opts['gan_epoch_num'], _idx+1, batches_num)\n debug_str += ' [L=%.2g, Recon=%.2g, KLQ=%.2g]' % (\n loss, loss_reconstruct, loss_kl)\n logging.error(debug_str)\n\n if opts['verbose'] and counter % opts['plot_every'] == 0:\n metrics = Metrics()\n points_to_plot = self._run_batch(\n opts, self._generated, self._noise_ph,\n self._noise_for_plots[0:num_plot],\n self._is_training_ph, False)\n l2s.append(np.sum((points_to_plot - sample_prev)**2))\n metrics.l2s = l2s[:]\n metrics.make_plots(\n opts,\n counter,\n None,\n points_to_plot,\n prefix='sample_e%04d_mb%05d_' % (_epoch, _idx))\n reconstructed = self._session.run(\n self._reconstruct_x,\n feed_dict={self._real_points_ph: batch_images,\n self._is_training_ph: False})\n metrics.l2s = None\n metrics.make_plots(\n opts,\n counter,\n None,\n reconstructed,\n prefix='reconstr_e%04d_mb%05d_' % (_epoch, _idx))\n if opts['early_stop'] > 0 and counter > opts['early_stop']:\n break\n if _epoch > 0:\n os.path.join(opts['work_dir'], opts['ckpt_dir'])\n self._saver.save(self._session,\n os.path.join(opts['work_dir'],\n opts['ckpt_dir'],\n 'trained-pot-final'),\n global_step=counter)", "def test(self):\n total_steps = 0\n running_scores = np.zeros(len(self.agents))\n\n for e in range(self.run_settings.test_episodes):\n # Initialize episode\n try:\n env_states, rewards, done, metainfo = self.custom_env.reset()\n except EpisodeCrashException:\n print('Episode crashed, resetting.')\n continue\n\n # Initialize scores to starting reward (probably 0)\n scores = np.array(rewards)\n step = 0\n\n while not done:\n states = [self.agents[a].state_space_converter(env_states[a])\n for a in range(len(self.agents))]\n\n # Get actions\n actions = [self.agents[a].sample(states[a])\n for a in range(len(self.agents))]\n env_actions = [self.agents[a].action_space_converter(actions[a])\n for a in range(len(self.agents))]\n if self.run_settings.verbose:\n self.print_action(env_actions)\n # Take environment step\n try:\n env_states, rewards, done, metainfo = self.custom_env.step(env_actions)\n except EpisodeCrashException:\n print('Episode crashed, resetting.')\n break\n step += 1\n total_steps += 1\n\n # Update scores\n scores += np.array(rewards)\n\n if done:\n running_scores += scores\n\n if len(scores) == 1:\n scores = scores[0]\n if self.run_settings.verbose:\n print(\"Game {} ended after {} steps. Game score: {}\"\n .format(e+1, step, scores))\n if self.run_settings.verbose:\n print(\"Average game scores: {}\".format(running_scores / self.run_settings.test_episodes))", "def run_episode(self, environment):\n state = environment.reset()\n self.steps_done = 0\n while True:\n state_tensor = FloatTensor([state])\n position = self.Q.sample_from_softmax_policy(state_tensor)\n action = position + 1\n next_state, reward, done, _ = environment.step(position.item())\n self.memory.push((state_tensor, action,))\n self.learn(state_tensor, action, next_state, reward)\n state = next_state\n self.steps_done += 1\n if done:\n break\n history = environment.close()\n return history", "def train_prediction(self, episodes=100, batch_size=32, steps=100):\n hidden_state = None\n for episode in range(episodes):\n state_data = self.env.dataset(batch_size=batch_size)\n state = self.state_to_observation(state_data)\n\n prediction = self.model(state, hidden_state)\n loss = F.mse_loss(prediction['value'], state_data['reward'])\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()", "def _train_epochs(self, data, model, n_epochs, start_epoch, start_step, dev_data, teacher_forcing_ratio, early_stopping_patience):\n print_loss_total = 0 # Reset every print_every\n epoch_loss_total = 0 # Reset every epoch\n\n device = None if torch.cuda.is_available() else -1\n batch_iterator = torchtext.data.BucketIterator(data, batch_size=self.batch_size, repeat=False,\n sort_key=lambda x: len(x.src),\n shuffle=True, device=device, sort=False, sort_within_batch=True)\n\n steps_per_epoch = len(batch_iterator)\n total_steps = steps_per_epoch * n_epochs\n\n step = start_step\n step_elapsed = 0\n previous_dev_loss = 10e6\n dev_loss_increased_epochs = 0\n for epoch in range(start_epoch, n_epochs + 1):\n self.logger.info(\"Epoch: %d, Step: %d\" % (epoch, step))\n\n batch_generator = batch_iterator.__iter__()\n # consuming seen batches from previous training\n for _ in range((epoch - 1) * steps_per_epoch, step):\n next(batch_generator)\n\n model.train(True)\n for batch in batch_generator:\n step += 1\n step_elapsed += 1\n\n input_variables, input_lengths = getattr(batch, UTTERANCE_FIELD_NAME)\n target_variables = getattr(batch, RESPONSE_FIELD_NAME)\n emotion_variables = getattr(batch, EMOTION_FIELD_NAME)\n\n loss = self.train_batch(input_variables, input_lengths.tolist(), target_variables, emotion_variables,\n model, teacher_forcing_ratio)\n\n # Record average loss\n print_loss_total += loss\n epoch_loss_total += loss\n\n if step % self.print_every == 0 and step_elapsed > self.print_every:\n print_loss_avg = print_loss_total / self.print_every\n print_loss_total = 0\n log_msg = 'Progress: %.2f%%, Train %s: %.4f' % (\n step / total_steps * 100,\n self.loss.name,\n print_loss_avg)\n self.logger.info(log_msg)\n beam_search = EmotionSeq2seq(model.encoder, EmotionTopKDecoder(model.decoder, 20))\n predictor = Predictor(beam_search, data.vocabulary, data.emotion_vocabulary)\n seq = \"how are you\".split()\n self.logger.info(\"Happy: \" + \" \".join(predictor.predict(seq, 'happiness')))\n self.logger.info(\"Angry: \" + \" \".join(predictor.predict(seq, 'anger')))\n\n # Checkpoint\n if step % self.checkpoint_every == 0 or step == total_steps:\n Checkpoint(model=model,\n optimizer=self.optimizer,\n epoch=epoch, step=step).save(self.expt_dir)\n\n if step_elapsed == 0:\n continue\n\n epoch_loss_avg = epoch_loss_total / min(steps_per_epoch, step - start_step)\n epoch_loss_total = 0\n log_msg = \"Finished epoch %d: Train %s: %.4f\" % (epoch, self.loss.name, epoch_loss_avg)\n if dev_data is not None:\n dev_loss, accuracy = self.evaluator.evaluate(model, dev_data)\n self.optimizer.update(dev_loss)\n log_msg += \", Dev %s: %.4f, Accuracy: %.4f\" % (self.loss.name, dev_loss, accuracy)\n model.train(mode=True)\n if dev_loss > previous_dev_loss:\n dev_loss_increased_epochs += 1\n if dev_loss_increased_epochs == early_stopping_patience:\n self.logger.info(\"EARLY STOPPING\")\n break\n else:\n dev_loss_increased_epochs = 0\n previous_dev_loss = dev_loss\n Checkpoint(model=model,\n optimizer=self.optimizer,\n epoch=epoch, step=step).save(self.expt_dir)\n else:\n self.optimizer.update(epoch_loss_avg)\n\n self.logger.info(log_msg)", "def sequential(\n env: Environment,\n num_episodes: int,\n agent_interfaces: Dict[AgentID, AgentInterface],\n fragment_length: int,\n behavior_policies: Dict[AgentID, PolicyID],\n agent_episodes: Dict[AgentID, Episode],\n metric: Metric,\n send_interval: int = 50,\n dataset_server: ray.ObjectRef = None,\n):\n\n # use env.env as real env\n env = env.env\n cnt = 0\n evaluated_results = []\n\n assert fragment_length > 0, fragment_length\n for ith in range(num_episodes):\n env.reset()\n metric.reset()\n for aid in env.agent_iter(max_iter=fragment_length):\n observation, reward, done, info = env.last()\n\n if isinstance(observation, dict):\n info = {\"action_mask\": np.asarray([observation[\"action_mask\"]])}\n action_mask = observation[\"action_mask\"]\n else:\n action_mask = np.ones(\n get_preprocessor(env.action_spaces[aid])(\n env.action_spaces[aid]\n ).size\n )\n\n # observation has been transferred\n observation = agent_interfaces[aid].transform_observation(\n observation, behavior_policies[aid]\n )\n\n info[\"policy_id\"] = behavior_policies[aid]\n\n if not done:\n action, action_dist, extra_info = agent_interfaces[aid].compute_action(\n [observation], **info\n )\n # convert action to scalar\n action = action[0]\n else:\n info[\"policy_id\"] = behavior_policies[aid]\n action = None\n env.step(action)\n if action is None:\n action = [agent_interfaces[aid].action_space.sample()]\n if aid in agent_episodes:\n agent_episodes[aid].insert(\n **{\n Episode.CUR_OBS: [observation],\n Episode.ACTION_MASK: [action_mask],\n Episode.ACTION_DIST: action_dist,\n Episode.ACTION: action,\n Episode.REWARD: reward,\n Episode.DONE: done,\n }\n )\n metric.step(\n aid,\n behavior_policies[aid],\n observation=observation,\n action=action,\n action_dist=action_dist,\n reward=reward,\n done=done,\n info=info,\n )\n cnt += 1\n evaluated_results.append(\n metric.parse(agent_filter=tuple(agent_episodes.keys()))\n )\n # when dataset_server is not None\n if dataset_server:\n for e in agent_episodes.values():\n e.clean_data()\n dataset_server.save.remote(agent_episodes, wait_for_ready=False)\n for e in agent_episodes.values():\n e.reset()\n\n # aggregated evaluated results groupped in agent wise\n evaluated_results = metric.merge_parsed(evaluated_results)\n return evaluated_results, cnt", "def _train_simulate(self, env, train_episode=None):\n # The initial observation\n o_r_d_i = [env.reset()] + [None]*3 # o_r_d_i means \"Observation_Reward_Done_Info\"\n # Reset all the manager parameters\n self.reset(o_r_d_i[0][\"manager\"])\n done = False\n current_option = None\n # Render the current state\n if self.parameters[\"display_environment\"]:\n self.show_render.render(o_r_d_i[0])\n\n while not done:\n # If no option is activated then choose one\n if current_option is None:\n current_option = self.select_option(o_r_d_i, train_episode)\n assert current_option.score == 0, \"the option's reset function must reset the score to 0.\"\n\n # choose an action\n action = current_option.act(train_episode)\n\n # make an action and display the state space\n o_r_d_i = env.step(action)\n if self.parameters[\"display_environment\"]:\n self.show_render.render(o_r_d_i[0])\n\n # check if the option ended correctly\n correct_termination = self.check_end_option(current_option, o_r_d_i[0][\"manager\"])\n\n # update the option\n intra_reward = self.compute_intra_reward(o_r_d_i, correct_termination)\n current_option.update_option(o_r_d_i, action, correct_termination, train_episode, intra_reward)\n\n # If the option is done, update the manager\n if correct_termination is not None:\n if check_type(current_option, AbstractOption):\n # record the correct transition when the option is a regular option (i.e. not an explore option)\n self.successful_transition.append(correct_termination)\n self.write_success_rate_transitions()\n\n # the manager does not need to know if the correct_termination is 0 or 1.\n self.update_manager(o_r_d_i, current_option, train_episode)\n\n current_option = None\n\n done = self.check_end_manager(o_r_d_i)\n\n self.write_manager_score(train_episode)", "def run_episode(env, agent, deterministic, skip_frames=0, do_training=True, rendering=False,\n max_timesteps=1000, history_length=0):\n\n stats = EpisodeStats()\n\n # Save history\n image_hist = []\n\n step = 0\n state = env.reset()\n\n # fix bug of corrupted states without rendering in gym environment\n env.viewer.window.dispatch_events()\n\n # append image history to first state\n state = state_preprocessing(state)\n image_hist.extend([state] * history_length)\n state = np.array(image_hist)#.reshape(96, 96, history_length)\n\n while True:\n\n # TODO: get action_id from agent\n # Hint: adapt the probabilities of the 5 actions for random sampling so that the agent explores properly. \n # action_id = agent.act(...)\n # action = your_id_to_action_method(...)\n action_id = agent.act(state, deterministic)\n action = id_to_action(action_id)\n\n # Hint: frame skipping might help you to get better results.\n reward = 0\n for _ in range(skip_frames + 1):\n next_state, r, terminal, info = env.step(action)\n reward += r\n\n if rendering:\n env.render()\n\n if terminal:\n break\n\n next_state = state_preprocessing(next_state)\n image_hist.append(next_state)\n image_hist.pop(0)\n next_state = np.array(image_hist)#.reshape(96, 96, history_length)\n\n if do_training:\n agent.train(state, action_id, next_state, reward, terminal)\n\n stats.step(reward, action_id)\n\n state = next_state\n\n if terminal or (step * (skip_frames + 1)) > max_timesteps:\n break\n\n step += 1\n\n return stats", "def agent_start(self, observation):\n\n self.step_counter = 0\n self.batch_counter = 0\n self.episode_reward = 0\n self.episode_q = 0\n self.episode_chosen_steps = 0\n\n # We report the mean loss for every epoch.\n self.loss_averages = []\n\n self.start_time = time.time()\n this_int_action = self.randGenerator.randint(0, self.num_actions-1)\n return_action = Action()\n return_action.intArray = [this_int_action]\n\n self.last_action = this_int_action\n\n self.last_image, raw_image = self.preprocess_observation(observation.intArray)\n if raw_image is not None:\n self.episode_images = [raw_image]\n else:\n self.episod_images = []\n\n\n return return_action", "def _train_step(self):\n if self._replay.add_count > self.min_replay_history:\n if self.training_steps % self.update_period == 0:\n self._sample_from_replay_buffer()\n (self._rng, self.optimizer_state, self.online_params,\n loss, quantile_loss, coherence_loss, orthogonality_loss) = train(\n self.network_def,\n self.online_params,\n self.target_network_params,\n self.optimizer,\n self.optimizer_state,\n self.replay_elements['state'],\n self.replay_elements['action'],\n self.replay_elements['next_state'],\n self.replay_elements['reward'],\n self.replay_elements['terminal'],\n self.num_tau_samples,\n self.num_tau_prime_samples,\n self.num_quantile_samples,\n self.cumulative_gamma,\n self.double_dqn,\n self.kappa,\n self._rng,\n self._coherence_weight,\n self._option,\n self._use_ortho_loss,\n self._use_cohe_loss,\n self._tau,\n self._alpha,\n self._clip_value_min)\n if (self.summary_writer is not None and\n self.training_steps > 0 and\n self.training_steps % self.summary_writing_frequency == 0):\n if self._use_ortho_loss and self._use_cohe_loss:\n summary = tf.compat.v1.Summary(value=[\n tf.compat.v1.Summary.Value(\n tag='Losses/Combined', simple_value=loss),\n tf.compat.v1.Summary.Value(\n tag='Losses/Quantile', simple_value=quantile_loss),\n tf.compat.v1.Summary.Value(\n tag='Losses/Incoherence', simple_value=coherence_loss),\n tf.compat.v1.Summary.Value(\n tag='Losses/Orthogonality',\n simple_value=orthogonality_loss),\n ])\n elif self._use_ortho_loss and not self._use_cohe_loss:\n summary = tf.compat.v1.Summary(value=[\n tf.compat.v1.Summary.Value(\n tag='Losses/Combined', simple_value=loss),\n tf.compat.v1.Summary.Value(\n tag='Losses/Quantile', simple_value=quantile_loss),\n tf.compat.v1.Summary.Value(\n tag='Losses/Orthogonality', simple_value=orthogonality_loss),\n ])\n elif self._use_cohe_loss and not self._use_ortho_loss:\n summary = tf.compat.v1.Summary(value=[\n tf.compat.v1.Summary.Value(\n tag='Losses/Combined', simple_value=loss),\n tf.compat.v1.Summary.Value(\n tag='Losses/Quantile', simple_value=quantile_loss),\n tf.compat.v1.Summary.Value(\n tag='Losses/Incoherence', simple_value=coherence_loss),\n ])\n self.summary_writer.add_summary(summary, self.training_steps)\n if self.training_steps % self.target_update_period == 0:\n self._sync_weights()\n\n self.training_steps += 1", "def train(self):\n self.epoch = 0\n self.step = 0\n self.start_time = time.time()\n for self.epoch in range(self.num_epochs):\n print(\"EPOHA\")\n self.run_epoch()\n print(\"SAVE MODEL\")\n self.save_model()" ]
[ "0.7785768", "0.76625186", "0.76138806", "0.7555034", "0.7430718", "0.7399803", "0.73953015", "0.73881173", "0.7361592", "0.72675776", "0.72441524", "0.7235535", "0.72085416", "0.71934515", "0.71493644", "0.71422166", "0.7102493", "0.7057307", "0.70422393", "0.70264834", "0.70260596", "0.697359", "0.69708925", "0.6915815", "0.6906336", "0.6871906", "0.6855419", "0.68539417", "0.6850543", "0.6828136", "0.6808362", "0.679806", "0.6793059", "0.67921335", "0.67918116", "0.6790365", "0.6770697", "0.6755952", "0.6725245", "0.6724269", "0.6709945", "0.67098683", "0.67016023", "0.66855395", "0.6682471", "0.6682348", "0.6680599", "0.66799796", "0.6674966", "0.66396", "0.66346866", "0.66106564", "0.66027826", "0.6576512", "0.65652806", "0.65620124", "0.6558556", "0.655361", "0.6544947", "0.6531041", "0.65059054", "0.64940697", "0.6478391", "0.64750564", "0.64700377", "0.6465983", "0.6459009", "0.6452644", "0.6448382", "0.64449894", "0.6444222", "0.6439714", "0.6437733", "0.64357466", "0.64333445", "0.6423327", "0.64216644", "0.6416283", "0.64003545", "0.6376684", "0.63693863", "0.6347895", "0.63327694", "0.63205475", "0.63186216", "0.63138497", "0.62892574", "0.6288756", "0.628053", "0.62650204", "0.6253272", "0.62530255", "0.62400556", "0.6238982", "0.6236931", "0.6234987", "0.6232157", "0.62315243", "0.6229727", "0.6226249" ]
0.81745845
0
generate pair message between node Hv and Hw. since the cat operation, msgs from hv > hw and hw > hv are different
def __init__(self, dim_hv, dim_hw, msg_dim): super(PairMessageGenerator, self).__init__() self.dim_hv, self.dim_hw, self.msg_dim = dim_hv, dim_hw, msg_dim self.in_dim = dim_hv + dim_hw # row * feature_dim, 2048 self.mlp = nn.Sequential( nn.LayerNorm(self.in_dim), # this layer norm is important to create diversity nn.Linear(self.in_dim, self.msg_dim), nn.LeakyReLU(0.2) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_HHH(self, msg):\n self.prev_head = self.head\n self.head = msg.headx, msg.heady, msg.headz\n # self.torso = msg.torsox, msg.torsoy, msg.torsoz\n # self.Rhand = msg.Rhandx, msg.Rhandy, msg.Rhandz\n # self.Lhand = msg.Lhandx, msg.Lhandy, msg.Lhandz\n\n # if the distance to Kinect is 0, then that means it's not seeing anything\n if self.head[2] == 0:\n self.person = None\n else:\n xpos, ypos, zpos = self.kinect_transform(self.head[0], self.head[1], self.head[2])\n\n #either sets up a new presence or updates a person's presence\n if self.person is None:\n self.person = Coordinates(1, xpos, ypos, zpos)\n else:\n self.person.set_Coordinates(xpos, ypos, zpos)", "def differentiate(hp):\n if hp.force_skip:\n G.add_edge(\"input\", \"output\")\n for node in G.nodes(data=True):\n node_id, node_data = node\n log(\"differentiate\", node_id, node_data)\n node_data[\"output\"] = None\n node_data[\"op\"] = None\n if node_data[\"shape\"] is \"square\" or \"output\" in node_id:\n if node_id == \"output\":\n d_out = node_data[\"output_shape\"][-1]\n node_type = hp.last_layer\n activation = \"tanh\"\n else:\n node_type = str(np.random.choice(['sepconv1d', 'transformer',\n 'k_conv1', 'k_conv2', 'k_conv3',\n \"deep\", \"wide_deep\"],\n 1, p=hp.layer_distribution).item(0))\n activation = str(np.random.choice([ 'tanh', 'linear', 'relu', 'selu',\n 'elu', 'sigmoid', 'hard_sigmoid', 'exponential', 'softmax',\n 'softplus', 'softsign', 'gaussian', 'sin', 'cos', 'swish'],\n 1, p=hp.activation_distribution).item(0))\n d_out = None\n node_data[\"force_residual\"] = random.random() < hp.p_force_residual\n node_data[\"activation\"] = clean_activation(activation)\n node_data[\"node_type\"] = node_type\n node_data['style'] = \"\"\n if node_type == 'sepconv1d':\n if d_out is None:\n d_out = safe_sample(hp.min_filters, hp.max_filters)\n node_data[\"filters\"] = d_out\n node_data[\"kernel_size\"] = 1\n if node_type == \"transformer\":\n if d_out is None:\n d_out = safe_sample(hp.min_units, hp.max_units) * hp.attn_heads\n node_data[\"d_model\"] = d_out\n node_data[\"n_heads\"] = 2 if d_out % 2 == 0 else 1\n if \"k_conv\" in node_type or node_type in [\"deep\", \"wide_deep\"]:\n layers = design_layers(hp, d_out, activation)\n if d_out is None:\n d_out = layers[-1][0]\n node_data[\"stddev\"] = hp.stddev\n node_data['layers'] = layers\n node_data[\"d_out\"] = d_out\n if node_type in [\"deep\", \"wide_deep\"]:\n node_data['kernel'] = node_type\n else:\n node_data['kernel'] = \"wide_deep\" if random.random() < hp.p_wide_deep else \"deep\"\n label = f\"{node_type}\"\n log(f\"set {node_id} to {label}\")\n node_data[\"label\"] = label\n node_data[\"color\"] = \"green\"\n # we handle recurrent shapes:\n try:\n feedback_node_id = f\"{node_id}_feedback\"\n input_shape = (None, d_out)\n log(f\"attempt to set input_shape for {feedback_node_id} to {input_shape}\")\n feedback_node = G.node[feedback_node_id]\n feedback_node[\"input_shape\"] = input_shape\n node_data[\"gives_feedback\"] = True\n except Exception as e:\n log(\"ERROR HANDLING FEEDBACK SHAPE:\", e)", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(hw_msg, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.header is None:\n self.header = std_msgs.msg._Header.Header()\n if self.temp_1_curr is None:\n self.temp_1_curr = 0\n if self.temp_1_min is None:\n self.temp_1_min = 0\n if self.temp_1_max is None:\n self.temp_1_max = 0\n if self.temp_2_curr is None:\n self.temp_2_curr = 0\n if self.temp_2_min is None:\n self.temp_2_min = 0\n if self.temp_2_max is None:\n self.temp_2_max = 0\n if self.temp_3_curr is None:\n self.temp_3_curr = 0\n if self.temp_3_min is None:\n self.temp_3_min = 0\n if self.temp_3_max is None:\n self.temp_3_max = 0\n if self.temp_4_curr is None:\n self.temp_4_curr = 0\n if self.temp_4_min is None:\n self.temp_4_min = 0\n if self.temp_4_max is None:\n self.temp_4_max = 0\n if self.temp_5_curr is None:\n self.temp_5_curr = 0\n if self.temp_5_min is None:\n self.temp_5_min = 0\n if self.temp_5_max is None:\n self.temp_5_max = 0\n if self.temp_6_curr is None:\n self.temp_6_curr = 0\n if self.temp_6_min is None:\n self.temp_6_min = 0\n if self.temp_6_max is None:\n self.temp_6_max = 0\n if self.akku_voltage_curr is None:\n self.akku_voltage_curr = 0\n if self.akku_voltage_min is None:\n self.akku_voltage_min = 0\n if self.akku_voltage_max is None:\n self.akku_voltage_max = 0\n if self.hals_motor_voltage_curr is None:\n self.hals_motor_voltage_curr = 0\n if self.hals_motor_voltage_min is None:\n self.hals_motor_voltage_min = 0\n if self.hals_motor_voltage_max is None:\n self.hals_motor_voltage_max = 0\n if self.hals_logik_voltage_curr is None:\n self.hals_logik_voltage_curr = 0\n if self.hals_logik_voltage_min is None:\n self.hals_logik_voltage_min = 0\n if self.hals_logik_voltage_max is None:\n self.hals_logik_voltage_max = 0\n if self.tablett_logik_voltage_curr is None:\n self.tablett_logik_voltage_curr = 0\n if self.tablett_logik_voltage_min is None:\n self.tablett_logik_voltage_min = 0\n if self.tablett_logik_voltage_max is None:\n self.tablett_logik_voltage_max = 0\n if self.arm_logik_voltage_curr is None:\n self.arm_logik_voltage_curr = 0\n if self.arm_logik_voltage_min is None:\n self.arm_logik_voltage_min = 0\n if self.arm_logik_voltage_max is None:\n self.arm_logik_voltage_max = 0\n if self.tablett_motor_voltage_curr is None:\n self.tablett_motor_voltage_curr = 0\n if self.tablett_motor_voltage_min is None:\n self.tablett_motor_voltage_min = 0\n if self.tablett_motor_voltage_max is None:\n self.tablett_motor_voltage_max = 0\n if self.hals_motor_current_curr is None:\n self.hals_motor_current_curr = 0\n if self.hals_motor_current_min is None:\n self.hals_motor_current_min = 0\n if self.hals_motor_current_max is None:\n self.hals_motor_current_max = 0\n if self.hals_logik_current_curr is None:\n self.hals_logik_current_curr = 0\n if self.hals_logik_current_min is None:\n self.hals_logik_current_min = 0\n if self.hals_logik_current_max is None:\n self.hals_logik_current_max = 0\n if self.tablett_logik_current_curr is None:\n self.tablett_logik_current_curr = 0\n if self.tablett_logik_current_min is None:\n self.tablett_logik_current_min = 0\n if self.tablett_logik_current_max is None:\n self.tablett_logik_current_max = 0\n if self.arm_logik_current_curr is None:\n self.arm_logik_current_curr = 0\n if self.arm_logik_current_min is None:\n self.arm_logik_current_min = 0\n if self.arm_logik_current_max is None:\n self.arm_logik_current_max = 0\n if self.tablett_motor_current_curr is None:\n self.tablett_motor_current_curr = 0\n if self.tablett_motor_current_min is None:\n self.tablett_motor_current_min = 0\n if self.tablett_motor_current_max is None:\n self.tablett_motor_current_max = 0\n else:\n self.header = std_msgs.msg._Header.Header()\n self.temp_1_curr = 0\n self.temp_1_min = 0\n self.temp_1_max = 0\n self.temp_2_curr = 0\n self.temp_2_min = 0\n self.temp_2_max = 0\n self.temp_3_curr = 0\n self.temp_3_min = 0\n self.temp_3_max = 0\n self.temp_4_curr = 0\n self.temp_4_min = 0\n self.temp_4_max = 0\n self.temp_5_curr = 0\n self.temp_5_min = 0\n self.temp_5_max = 0\n self.temp_6_curr = 0\n self.temp_6_min = 0\n self.temp_6_max = 0\n self.akku_voltage_curr = 0\n self.akku_voltage_min = 0\n self.akku_voltage_max = 0\n self.hals_motor_voltage_curr = 0\n self.hals_motor_voltage_min = 0\n self.hals_motor_voltage_max = 0\n self.hals_logik_voltage_curr = 0\n self.hals_logik_voltage_min = 0\n self.hals_logik_voltage_max = 0\n self.tablett_logik_voltage_curr = 0\n self.tablett_logik_voltage_min = 0\n self.tablett_logik_voltage_max = 0\n self.arm_logik_voltage_curr = 0\n self.arm_logik_voltage_min = 0\n self.arm_logik_voltage_max = 0\n self.tablett_motor_voltage_curr = 0\n self.tablett_motor_voltage_min = 0\n self.tablett_motor_voltage_max = 0\n self.hals_motor_current_curr = 0\n self.hals_motor_current_min = 0\n self.hals_motor_current_max = 0\n self.hals_logik_current_curr = 0\n self.hals_logik_current_min = 0\n self.hals_logik_current_max = 0\n self.tablett_logik_current_curr = 0\n self.tablett_logik_current_min = 0\n self.tablett_logik_current_max = 0\n self.arm_logik_current_curr = 0\n self.arm_logik_current_min = 0\n self.arm_logik_current_max = 0\n self.tablett_motor_current_curr = 0\n self.tablett_motor_current_min = 0\n self.tablett_motor_current_max = 0", "def make_nap_visual_msg( i_curr, i_prev, str_curr, str_prev ):\n nap_visual_edge_msg = NapVisualEdgeMsg()\n nap_visual_edge_msg.c_timestamp = S_timestamp[i_curr]\n nap_visual_edge_msg.prev_timestamp = S_timestamp[i_prev]\n nap_visual_edge_msg.goodness = sim_scores_logistic[i_prev]\n nap_visual_edge_msg.curr_image = CvBridge().cv2_to_imgmsg( S_thumbnail[i_curr].astype('uint8'), \"bgr8\" )\n nap_visual_edge_msg.prev_image = CvBridge().cv2_to_imgmsg( S_thumbnail[i_prev].astype('uint8'), \"bgr8\" )\n nap_visual_edge_msg.curr_label = str_curr #str(i_curr) #+ '::%d,%d' %(nInliers,nMatches)\n nap_visual_edge_msg.prev_label = str_prev #str(i_prev)\n\n return nap_visual_edge_msg", "def make_nap_visual_msg( i_curr, i_prev, str_curr, str_prev ):\n nap_visual_edge_msg = NapVisualEdgeMsg()\n nap_visual_edge_msg.c_timestamp = S_timestamp[i_curr]\n nap_visual_edge_msg.prev_timestamp = S_timestamp[i_prev]\n nap_visual_edge_msg.goodness = sim_scores_logistic[i_prev]\n nap_visual_edge_msg.curr_image = CvBridge().cv2_to_imgmsg( S_thumbnail[i_curr].astype('uint8'), \"bgr8\" )\n nap_visual_edge_msg.prev_image = CvBridge().cv2_to_imgmsg( S_thumbnail[i_prev].astype('uint8'), \"bgr8\" )\n nap_visual_edge_msg.curr_label = str_curr #str(i_curr) #+ '::%d,%d' %(nInliers,nMatches)\n nap_visual_edge_msg.prev_label = str_prev #str(i_prev)\n\n return nap_visual_edge_msg", "def generateTopology():\n switches = {}\n interfaces = {}\n links = {}\n return (switches,links)", "def homothick():\n return se2hmt(binary([[1,1,1],\n [0,0,0],\n [0,0,0]]),\n binary([[0,0,0],\n [0,1,0],\n [1,1,1]]))", "def create_ising_wishbone(h, w, **kwargs):\n assert h == 2 # Only works for 2 branches\n G = nx.empty_graph(h * w)\n n = w\n G.add_edges_from([(v, v+1) for v in range(n-1)])\n G.add_edges_from([(v, v+1) for v in range(n,2*n-1)])\n G.add_edges_from([(v, v+n) for v in range(n // 2)]) # Connect first half of nodes\n return nx.to_numpy_matrix(G)", "def _cat_directions(self, h):\n if self.cfg.bidirectional:\n new_h = torch.cat([h[0:h.size(0):2], h[1:h.size(0):2]], 2)\n else:\n new_h = h\n lsize, bsize, hsize = new_h.size()\n new_h = new_h.view(lsize, bsize, hsize, 1, 1)\n new_h = new_h.expand(lsize, bsize, hsize, self.cfg.grid_size[1], self.cfg.grid_size[0])\n return new_h", "def generate_huawei_2g_node_level_discrepancies(self):\n engine = create_engine('postgresql://bodastage:password@database/bts')\n vendor_pk = 2\n tech_pk = 1\n schema_name = 'hua_cm_2g'\n\n conn = psycopg2.connect(\"dbname=bts user=bodastage password=password host=database\")\n conn.autocommit = True\n cur = conn.cursor()\n\n # Get MO\n sql = \"\"\"\n SELECT DISTINCT\n t3.name as mo,\n t3.pk as pk,\n t3.affect_level\n FROM \n live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk \n AND t3.vendor_pk = {} AND t3.tech_pk = {}\n AND t3.affect_level = 7 -- BSC\n \"\"\".format(vendor_pk, tech_pk)\n cur.execute(sql)\n mo_list = cur.fetchall()\n\n for mo in mo_list:\n mo_name, mo_pk, mo_affect_level = mo\n\n # Get parameters\n sql = \"\"\"\n SELECT \n t2.name as pname,\n t2.pk as pk\n FROM \n live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk \n INNER JOIN network_entities t4 on t4.pk = t3.affect_level\n AND t3.vendor_pk = {} AND t3.tech_pk = {}\n AND t3.affect_level = 7 -- BSC\n WHERE\n t3.name = '{}'\n \"\"\".format(vendor_pk, tech_pk, mo_name)\n cur.execute(sql)\n\n parameters = cur.fetchall()\n\n attr_list = [p[0] for p in parameters]\n\n str_param_values = \",\".join([\"t_mo.{0}{1}{0}\".format('\"', p) for p in attr_list])\n str_param_names = \",\".join([\"{0}{1}{0}\".format('\\'', p) for p in attr_list])\n\n # Join all cell level mos with the primary cell mo i.e. GCELL.\n # p_mo for primary MO\n cell_level_join = \"\"\" INNER JOIN {0}.BSCBASIC p_mo ON p_mo.neid = t_mo.neid \n AND p_mo.module_type = t_mo.module_type \"\"\".format(schema_name)\n\n # Add new entries\n sql = \"\"\"\n INSERT INTO network_audit.baseline_node_parameters \n (node, mo, parameter, bvalue, nvalue, vendor, technology, age, modified_by, added_by, date_added, date_modified)\n SELECT TT1.* FROM (\n SELECT\n t8.name as node,\n t3.name as mo,\n t2.name as parameter,\n t1.value as bvalue,\n TRIM(t4.pvalue) as nvalue,\n t9.name as vendor,\n t10.name as technology,\n 1 as age,\n 0 as modified_by,\n 0 as added_by,\n date_time as date_added,\n date_time as date_modified\n from live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk\n INNER JOIN live_network.baseline_parameter_config t5 on t5.mo_pk = t3.pk AND t5.parameter_pk = t2.pk\n INNER JOIN (\n SELECT * FROM (\n SELECT\n '{2}' as \"MO\",\n p_mo.neid as node,\n p_mo.\"varDateTime\" as date_time,\n unnest(array[{0}]) AS pname,\n unnest(array[{1}]) AS pvalue\n FROM\n hua_cm_2g.{2} t_mo\n {3}\n ) TT\n ) t4 on t4.pname = t2.name AND trim(t4.pvalue) != t1.value \n INNER JOIN live_network.nodes t8 on t8.name = t4.node\n INNER JOIN vendors t9 on t9.pk = t8.vendor_pk\n INNER JOIN technologies t10 ON t10.pk = t8.tech_pk\n ) TT1\n LEFT JOIN network_audit.baseline_node_parameters TT2 on TT2.node = TT1.node\n AND TT2.mo = TT1.mo\n AND TT2.parameter = TT1.parameter\n AND TT2.bvalue = TT1.bvalue\n AND TT2.nvalue = TT1.nvalue\n WHERE\n TT2.node is NULL\n \"\"\".format(str_param_names, str_param_values, mo_name, cell_level_join)\n print(sql)\n cur.execute(sql)\n\n # Delete old entries\n sql = \"\"\"\n WITH rd AS (\n SELECT TT2.* FROM \n network_audit.baseline_node_parameters TT2\n LEFT JOIN \n (\n select\n t8.name as node,\n t3.name as mo,\n t2.name as parameter,\n t1.value as bvalue,\n TRIM(t4.pvalue) as nvalue,\n t9.name as vendor,\n t10.name as technology,\n 0 as modified_by,\n 0 as added_by,\n date_time as date_added,\n date_time as date_modified\n from live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk\n INNER JOIN live_network.baseline_parameter_config t5 on t5.mo_pk = t3.pk AND t5.parameter_pk = t2.pk\n INNER JOIN (\n SELECT * FROM (\n SELECT\n '{2}' as \"MO\",\n p_mo.neid as node,\n p_mo.\"varDateTime\" as date_time,\n unnest(array[{0}]) AS pname,\n unnest(array[{1}]) AS pvalue\n FROM\n hua_cm_2g.{2} t_mo\n {3}\n ) TT\n ) t4 on t4.pname = t2.name AND trim(t4.pvalue) != t1.value\n INNER JOIN live_network.nodes t8 on t8.name = t4.node\n INNER JOIN vendors t9 on t9.pk = t8.vendor_pk\n INNER JOIN technologies t10 ON t10.pk = t8.tech_pk\n ) TT1 ON TT2.node = TT1.node\n AND TT2.mo = TT1.mo\n AND TT2.parameter = TT1.parameter\n AND TT2.bvalue = TT1.bvalue\n AND TT2.nvalue = TT1.nvalue\n WHERE\n TT1.node IS NULL\n )\n DELETE FROM network_audit.baseline_node_parameters t1\n WHERE t1.pk IN (SELECT pk from rd)\n \"\"\".format(str_param_names, str_param_values, mo_name, cell_level_join)\n print(sql)\n cur.execute(sql)\n\n # Update old entries\n sql = \"\"\"\n WITH rd AS (\n SELECT TT2.pk, TT1.* FROM \n network_audit.baseline_node_parameters TT2\n INNER JOIN \n (\n select\n t8.name as node,\n t3.name as mo,\n t2.name as parameter,\n t1.value as bvalue,\n trim(t4.pvalue) as nvalue,\n t9.name as vendor,\n t10.name as technology,\n 0 as modified_by,\n 0 as added_by,\n date_time as date_added,\n date_time as date_modified\n from live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk\n INNER JOIN live_network.baseline_parameter_config t5 on t5.mo_pk = t3.pk AND t5.parameter_pk = t2.pk\n INNER JOIN (\n SELECT * FROM (\n SELECT\n '{2}' as \"MO\",\n p_mo.neid as node,\n p_mo.\"varDateTime\" as date_time,\n unnest(array[{0}]) AS pname,\n unnest(array[{1}]) AS pvalue\n FROM\n hua_cm_2g.{2} t_mo\n {3}\n ) TT\n ) t4 on t4.pname = t2.name AND trim(t4.pvalue) != t1.value\n INNER JOIN live_network.nodes t8 on t8.name = t4.node\n INNER JOIN vendors t9 on t9.pk = t8.vendor_pk\n INNER JOIN technologies t10 ON t10.pk = t8.tech_pk\n ) TT1 ON TT2.node = TT1.node\n AND TT2.mo = TT1.mo\n AND TT2.parameter = TT1.parameter\n AND TT2.bvalue = TT1.bvalue\n AND TT2.nvalue = TT1.nvalue\n )\n UPDATE network_audit.baseline_node_parameters AS nb\n SET \n date_modified = rd.date_added, \n age=DATE_PART('day',AGE(nb.date_added, rd.date_added))\n FROM \n rd \n where \n rd.pk = nb.pk\n \"\"\".format(str_param_names, str_param_values, mo_name, cell_level_join)\n print(sql)\n cur.execute(sql)", "def generate_message(self):\r\n intent = torch.zeros(10)\r\n rand = 0\r\n if self.action_base == C:\r\n rand = torch.normal(mean=self.TRUTH_MEAN,std=self.TRUTH_STD,generator = self.generator)\r\n else: #self.action_base == D:\r\n rand = torch.normal(mean=self.DECEIVE_MEAN,std=self.DECEIVE_STD,generator = self.generator)\r\n if rand < 0.1: intent[0] = 1\r\n elif rand < 0.2: intent[1] = 1\r\n elif rand < 0.3: intent[2] = 1\r\n elif rand < 0.4: intent[3] = 1\r\n elif rand < 0.5: intent[4] = 1\r\n elif rand < 0.6: intent[5] = 1\r\n elif rand < 0.7: intent[6] = 1\r\n elif rand < 0.8: intent[7] = 1\r\n elif rand < 0.9: intent[8] = 1\r\n elif rand > 0.9: intent[9] = 1 #the truth is more likely anyways.\r\n return intent", "def _cat_directions(self, h):\n if self.bidirectional_encoder:\n h = torch.cat([h[0:h.size(0):2], h[1:h.size(0):2]], 2)\n return h", "def _cat_directions(self, h):\n if self.bidirectional_encoder:\n h = torch.cat([h[0:h.size(0):2], h[1:h.size(0):2]], 2)\n return h", "def _cat_directions(self, h):\n if self.bidirectional_encoder:\n h = torch.cat([h[0:h.size(0):2], h[1:h.size(0):2]], 2)\n return h", "def _generate_G_from_H(H, variable_weight=False):\n n_edge = H.shape[1]\n # the weight of the hyperedge\n W_vector = np.ones(n_edge)\n W = sp.diags(W_vector, format=\"csr\")\n # W = np.ones(n_edge)\n # the degree of the node\n DV = H\n DV = DV.sum(axis=1)\n #DV = np.sum(H * W, axis=1)\n # the degree of the hyperedge\n #DE = np.sum(H, axis=0)\n DE = H.sum(axis=0)\n invDE = np.squeeze(np.asarray(np.power(DE, -1)))\n invDE = sp.diags(invDE, format=\"csr\")\n DV2 = sp.diags(np.squeeze(np.asarray(np.power(DV, -0.5))), format=\"csr\")\n\n # invDE = np.mat(np.diag(np.power(DE, -1)))\n # DV2 = np.mat(np.diag(np.power(DV, -0.5)))\n HT = H.T\n\n if variable_weight:\n DV2_H = DV2 * H\n invDE_HT_DV2 = invDE * HT * DV2\n #invDE_HT = invDE * HT\n return DV2_H.toarray(), W_vector, invDE_HT_DV2.toarray()\n else:\n W = sp.diags(W_vector, format=\"csr\")\n G = DV2 * H * W * invDE * HT * DV2\n return G", "def create_output(self, messages):", "def lhco_line(self):\n if not self.check_def(['eta','phi','pt','mass','pid']): \n sys.exit('Particle error: some attribute not defined')\n\n jet=[1,2,3,4,5,6,21]\n inv_list=[12,14,16,18,1000022,1000023,1000024,1000025,1000035]\n\n #define pid-> type\n pid_to_type={11:1,-11:1,13:2,-13:2,15:3,-15:3,22:0}\n for data in jet:\n pid_to_type[data]=4\n pid_to_type[-data]=4\n for data in inv_list:\n pid_to_type[data]=6\n pid_to_type[-data]=6\n\n\n \n type=''\n for key in pid_to_type.keys():\n if self.pid==key:\n type=pid_to_type[key]\n break\n \n if type=='':\n print 'Warning unknown type'\n return ''\n\n text =' '+str(type) #type LHCO\n text+=' '+str(self.eta) #ETA\n text+=' '+str(self.phi) #PHI\n text+=' '+str(self.pt) #PT\n text+=' '+str(self.mass) #JMASS\n if self.pid in [11,13]: #NTRK\n text+=' -1' \n else:\n text+=' 1'\n if self.pid in [-5,5]: #BTAG\n text+=' 2'\n else:\n text+=' 0'\n text+=' 0' #HAD/EM\n text+=' 0' #DUMMY 1\n text+=' 0' #DUMMY 2\n \n return text", "def generate_huawei_2g_cell_level_discrepancies(self):\n engine = create_engine('postgresql://bodastage:password@database/bts')\n vendor_pk = 2\n tech_pk = 1\n schema_name = 'hua_cm_2g'\n\n conn = psycopg2.connect(\"dbname=bts user=bodastage password=password host=database\")\n conn.autocommit = True\n cur = conn.cursor()\n\n # Get MO\n sql = \"\"\"\n SELECT DISTINCT\n t3.name as mo,\n t3.pk as pk,\n t3.affect_level\n FROM \n live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk \n AND t3.vendor_pk = {} AND t3.tech_pk = {}\n AND t3.affect_level = 1\n \"\"\".format(vendor_pk, tech_pk)\n cur.execute(sql)\n mo_list = cur.fetchall()\n\n for mo in mo_list:\n mo_name, mo_pk, mo_affect_level = mo\n\n # Get parameters\n sql = \"\"\"\n SELECT \n t2.name as pname,\n t2.pk as pk\n FROM \n live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk \n INNER JOIN network_entities t4 on t4.pk = t3.affect_level\n AND t3.vendor_pk = {} AND t3.tech_pk = {}\n WHERE\n t3.name = '{}'\n \"\"\".format(vendor_pk, tech_pk, mo_name)\n cur.execute(sql)\n\n parameters = cur.fetchall()\n\n attr_list = [p[0] for p in parameters]\n\n str_param_values = \",\".join([\"t_mo.{0}{1}{0}\".format('\"', p) for p in attr_list])\n str_param_names = \",\".join([\"{0}{1}{0}\".format('\\'', p) for p in attr_list])\n\n # Join all cell level mos with the primary cell mo i.e. GCELL\n cell_level_join = \"\"\" INNER JOIN {0}.GCELL gcell ON gcell.\"CELLID\" = t_mo.\"CELLID\" AND gcell.neid = t_mo.neid \n AND gcell.module_type = t_mo.module_type \"\"\".format(schema_name)\n\n # Add new entries\n sql = \"\"\"\n INSERT INTO network_audit.network_baseline \n (node, site, cellname, mo, parameter, bvalue, nvalue, vendor, technology, age, modified_by, added_by, date_added, date_modified)\n SELECT TT1.* FROM (\n SELECT\n t8.name as node,\n t7.name as site,\n t4.cellname,\n t3.name as mo,\n t2.name as parameter,\n t1.value as bvalue,\n TRIM(t4.pvalue) as nvalue,\n t9.name as vendor,\n t10.name as technology,\n 1 as age,\n 0 as modified_by,\n 0 as added_by,\n date_time as date_added,\n date_time as date_modified\n from live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk\n INNER JOIN live_network.baseline_parameter_config t5 on t5.mo_pk = t3.pk AND t5.parameter_pk = t2.pk\n INNER JOIN (\n SELECT * FROM (\n SELECT\n '{2}' as \"MO\",\n gcell.\"CELLNAME\" as cellname,\n gcell.\"varDateTime\" as date_time,\n unnest(array[{0}]) AS pname,\n unnest(array[{1}]) AS pvalue\n FROM\n hua_cm_2g.{2} t_mo\n {3}\n WHERE\n t_mo.module_type = 'Radio'\n ) TT\n ) t4 on t4.pname = t2.name AND trim(t4.pvalue) != t1.value\n INNER JOIN live_network.cells t6 on t6.name = t4.cellname\n INNER JOIN live_network.sites t7 on t7.pk = t6.site_pk\n INNER JOIN live_network.nodes t8 on t8.pk = t7.node_pk\n INNER JOIN vendors t9 on t9.pk = t6.vendor_pk\n INNER JOIN technologies t10 ON t10.pk = t6.tech_pk\n ) TT1\n LEFT JOIN network_audit.network_baseline TT2 on TT2.node = TT1.node\n AND TT2.site = TT1.site \n AND TT2.cellname = TT1.cellname\n AND TT2.mo = TT1.mo\n AND TT2.parameter = TT1.parameter\n AND TT2.bvalue = TT1.bvalue\n AND TT2.nvalue = TT1.nvalue\n WHERE\n TT2.cellname is NULL\n \"\"\".format(str_param_names, str_param_values, mo_name, cell_level_join)\n print(sql)\n cur.execute(sql)\n\n # Delete old entries\n sql = \"\"\"\n WITH rd AS (\n SELECT TT2.* FROM \n network_audit.network_baseline TT2\n LEFT JOIN \n (\n select\n t8.name as node,\n t7.name as site,\n t4.cellname,\n t3.name as mo,\n t2.name as parameter,\n t1.value as bvalue,\n TRIM(t4.pvalue) as nvalue,\n t9.name as vendor,\n t10.name as technology,\n 0 as modified_by,\n 0 as added_by,\n date_time as date_added,\n date_time as date_modified\n from live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk\n INNER JOIN live_network.baseline_parameter_config t5 on t5.mo_pk = t3.pk AND t5.parameter_pk = t2.pk\n INNER JOIN (\n SELECT * FROM (\n SELECT\n '{2}' as \"MO\",\n gcell.\"CELLNAME\" as cellname,\n gcell.\"varDateTime\" as date_time,\n unnest(array[{0}]) AS pname,\n unnest(array[{1}]) AS pvalue\n FROM\n hua_cm_2g.{2} t_mo\n {3}\n WHERE\n t_mo.module_type = 'Radio'\n ) TT\n ) t4 on t4.pname = t2.name AND trim(t4.pvalue) != t1.value\n INNER JOIN live_network.cells t6 on t6.name = t4.cellname\n INNER JOIN live_network.sites t7 on t7.pk = t6.site_pk\n INNER JOIN live_network.nodes t8 on t8.pk = t7.node_pk\n INNER JOIN vendors t9 on t9.pk = t6.vendor_pk\n INNER JOIN technologies t10 ON t10.pk = t6.tech_pk\n ) TT1 ON TT2.node = TT1.node\n AND TT2.site = TT1.site \n AND TT2.cellname = TT1.cellname\n AND TT2.mo = TT1.mo\n AND TT2.parameter = TT1.parameter\n AND TT2.bvalue = TT1.bvalue\n AND TT2.nvalue = TT1.nvalue\n WHERE\n TT1.cellname IS NULL\n )\n DELETE FROM network_audit.network_baseline t1\n WHERE t1.pk IN (SELECT pk from rd)\n \"\"\".format(str_param_names, str_param_values, mo_name, cell_level_join)\n print(sql)\n cur.execute(sql)\n\n # Update old entries\n sql = \"\"\"\n WITH rd AS (\n SELECT TT2.pk, TT1.* FROM \n network_audit.network_baseline TT2\n INNER JOIN \n (\n select\n t8.name as node,\n t7.name as site,\n t4.cellname,\n t3.name as mo,\n t2.name as parameter,\n t1.value as bvalue,\n trim(t4.pvalue) as nvalue,\n t9.name as vendor,\n t10.name as technology,\n 0 as modified_by,\n 0 as added_by,\n date_time as date_added,\n date_time as date_modified\n from live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk\n INNER JOIN live_network.baseline_parameter_config t5 on t5.mo_pk = t3.pk AND t5.parameter_pk = t2.pk\n INNER JOIN (\n SELECT * FROM (\n SELECT\n '{2}' as \"MO\",\n gcell.\"CELLNAME\" as cellname,\n gcell.\"varDateTime\" as date_time,\n unnest(array[{0}]) AS pname,\n unnest(array[{1}]) AS pvalue\n FROM\n hua_cm_2g.{2} t_mo\n {3}\n WHERE\n t_mo.module_type = 'Radio'\n ) TT\n ) t4 on t4.pname = t2.name AND trim(t4.pvalue) != t1.value\n INNER JOIN live_network.cells t6 on t6.name = t4.cellname\n INNER JOIN live_network.sites t7 on t7.pk = t6.site_pk\n INNER JOIN live_network.nodes t8 on t8.pk = t7.node_pk\n INNER JOIN vendors t9 on t9.pk = t6.vendor_pk\n INNER JOIN technologies t10 ON t10.pk = t6.tech_pk\n ) TT1 ON TT2.node = TT1.node\n AND TT2.site = TT1.site \n AND TT2.cellname = TT1.cellname\n AND TT2.mo = TT1.mo\n AND TT2.parameter = TT1.parameter\n AND TT2.bvalue = TT1.bvalue\n AND TT2.nvalue = TT1.nvalue\n )\n UPDATE network_audit.network_baseline AS nb\n SET \n date_modified = rd.date_added, \n age=DATE_PART('day',AGE(nb.date_added, rd.date_added))\n FROM \n rd \n where \n rd.pk = nb.pk\n \"\"\".format(str_param_names, str_param_values, mo_name, cell_level_join)\n print(sql)\n cur.execute(sql)", "def make_nap_msg( i_curr, i_prev, edge_color=None):\n nap_msg = NapMsg() #edge msg\n nap_msg.c_timestamp = S_timestamp[i_curr]\n nap_msg.prev_timestamp = S_timestamp[i_prev]\n nap_msg.goodness = sim_scores_logistic[i_prev]\n\n if edge_color is None:\n edge_color = (0,1.0,0)\n\n if len(edge_color) != 3:\n edge_color = (0,1.0,0)\n\n nap_msg.color_r = edge_color[0] #default color is green\n nap_msg.color_g = edge_color[1]\n nap_msg.color_b = edge_color[2]\n return nap_msg", "def make_nap_msg( i_curr, i_prev, edge_color=None):\n nap_msg = NapMsg() #edge msg\n nap_msg.c_timestamp = S_timestamp[i_curr]\n nap_msg.prev_timestamp = S_timestamp[i_prev]\n nap_msg.goodness = sim_scores_logistic[i_prev]\n\n if edge_color is None:\n edge_color = (0,1.0,0)\n\n if len(edge_color) != 3:\n edge_color = (0,1.0,0)\n\n nap_msg.color_r = edge_color[0] #default color is green\n nap_msg.color_g = edge_color[1]\n nap_msg.color_b = edge_color[2]\n return nap_msg", "def morphology(seed=425, th=120):\n \n # impact parameters\n M = 1e8*u.Msun\n B = 19.85*u.kpc\n V = 220*u.km/u.s\n phi = coord.Angle(0*u.deg)\n theta = coord.Angle(th*u.deg)\n Tenc = 0.01*u.Gyr\n T = 0.5*u.Gyr\n dt = 0.1*u.Myr\n rs = 0*u.pc\n \n # potential parameters\n potential = 3\n Vh = 220*u.km/u.s\n q = 1*u.Unit(1)\n rhalo = 20*u.pc\n par_pot = np.array([Vh.si.value, q.value, rhalo.si.value])\n \n # setup tube\n Nstar = 3000\n wx = 30*u.kpc\n wy = 0*u.pc\n wz = 0*u.pc\n sx = 0*u.km/u.s\n \n np.random.seed(seed)\n \n xphi = np.linspace(-0.3*np.pi,0.3*np.pi, Nstar)\n xr = 20*u.kpc + np.random.randn(Nstar)*0.02*u.kpc\n x = np.sin(xphi) * xr\n y = np.cos(xphi) * xr\n z = x * 0\n vx = -np.cos(xphi) * Vh\n vy = np.sin(xphi) * Vh\n vz = vx * 0\n \n potential_perturb = 1\n par_perturb = np.array([M.si.value, 0., 0., 0.])\n \n x1, x2, x3, v1, v2, v3 = interact.interact(par_perturb, B.si.value, phi.rad, V.si.value, theta.rad, Tenc.si.value, T.si.value, dt.si.value, par_pot, potential, potential_perturb, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)\n stream = {}\n stream['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)\n stream['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)\n\n plt.close()\n fig, ax = plt.subplots(3,1,figsize=(12,8), sharex=True)\n \n c_init = mpl.cm.Blues_r(1)\n c_fin0 = mpl.cm.Blues_r(0.5)\n c_fin = mpl.cm.Blues_r(0.2)\n \n eta = coord.Angle(np.arctan2(np.sqrt(stream['x'][0].to(u.kpc).value**2 + stream['x'][1].to(u.kpc).value**2),xr.to(u.kpc).value)*u.rad)\n xi = np.arctan2(stream['x'][1].to(u.kpc).value, stream['x'][0].to(u.kpc).value)\n xi = coord.Angle((xi - np.median(xi))*u.rad)\n \n vlabel = ['x', 'y', 'z']\n \n for i in range(3):\n plt.sca(ax[i])\n im = plt.scatter(xi.deg, eta.deg, c=stream['v'][i].value, s=20)\n \n plt.xlim(-60, 50)\n plt.ylim(55, 35)\n plt.gca().set_aspect('equal')\n \n if i==2:\n plt.xlabel('$\\phi_1$ [deg]')\n plt.ylabel('$\\phi_2$ [deg]')\n \n divider = make_axes_locatable(plt.gca())\n cax = divider.append_axes(\"right\", size=\"3%\", pad=0.1)\n plt.colorbar(im, cax=cax)\n plt.ylabel('$V_{{{}}}$ [km s$^{{-1}}$]'.format(vlabel[i]))\n \n plt.tight_layout()", "def __call__(self, h):\n\n Wh = self.W(h)\n p_yt = F.log_softmax(Wh) # should be (B x V)\n\n return p_yt", "def output_loss_and_grads(self, h, V, c, y):\n\n loss, dh, dV, dc = 0.0, [], np.zeros_like(self.V), np.zeros_like(self.c)\n # calculate the output (o) - unnormalized log probabilities of classes\n # calculate yhat - softmax of the output\n # calculate the cross-entropy loss\n # calculate the derivative of the cross-entropy softmax loss with respect to the output (o)\n # calculate the gradients with respect to the output parameters V and c\n # calculate the gradients with respect to the hidden layer h\n for t in range(self.sequence_length):\n hp = h[:, t, :] # BS x H\n #o = self.output(hp, V, c) # leng x BS\n o = self.output(hp, V, c) # BS x leng\n #exp = np.exp(o) # leng x BS\n exp = np.exp(o) # BS x leng\n #s = exp / np.sum(exp, axis=0, keepdims=True) # leng x BS\n s = exp / np.sum(exp, axis=1, keepdims=True) # BS x leng\n yp = y[:, t, :]\n #dO = s - yp # leng x BS\n dO = s - yp # BS x leng\n #dV += np.dot(dO, hp.T) # ( leng x BS ) * ( H x BS ).T = leng x H\n dV += np.dot(hp.T, dO) # ( BS x H ).T * ( BS x leng ) = H x leng\n #dc += np.sum(dO, axis=1).reshape([-1, 1]) #\n dc += np.sum(dO, axis=0).reshape([1, -1]) #\n #dh.append(np.dot(self.V.T, dO)) # ( leng x H ).T * ( leng x BS ) = ( BS x H )\n dh.append(np.dot(dO, self.V.T)) # ( BS x leng ) * ( H x leng ).T = ( BS x H )\n loss += -np.sum(np.log(s)*yp)\n return loss, np.array(dh), dV, dc", "def connectInfo(self,compInfo, node, nodeDic, numNodesSub,subcktName):\n connInfo = []\n print \"compinfo-------->\",compInfo\n sourcesInfo = self.separateSource(compInfo)\n for eachline in compInfo:\n words = eachline.split()\n print \"eachline----->\",eachline\n print \"eachline[0]------->\",eachline[0]\n if eachline[0]=='r' or eachline[0]=='R' or eachline[0]=='c' or eachline[0]=='C' or eachline[0]=='d' or eachline[0]=='D' \\\n or eachline[0]=='l' or eachline[0]=='L' or eachline[0]=='v' or eachline[0]=='V':\n conn = 'connect(' + words[0] + '.p,' + nodeDic[words[1]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.n,' + nodeDic[words[2]] + ');'\n connInfo.append(conn)\n elif eachline[0]=='q' or eachline[0]=='Q':\n print \"Inside Transistor--->\"\n print \"Node Dict------>\",nodeDic\n conn = 'connect(' + words[0] + '.C,' + nodeDic[words[1]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.B,' + nodeDic[words[2]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.E,' + nodeDic[words[3]] + ');'\n connInfo.append(conn)\n elif eachline[0]=='m' or eachline[0]=='M':\n conn = 'connect(' + words[0] + '.D,' + nodeDic[words[1]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.G,' + nodeDic[words[2]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.S,' + nodeDic[words[3]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.B,' + nodeDic[words[4]] + ');'\n connInfo.append(conn)\n elif eachline[0] in ['f','h','F','H']:\n vsource = words[3]\n sourceNodes = sourcesInfo[vsource]\n sourceNodes = sourceNodes.split()\n conn = 'connect(' + words[0] + '.p1,'+ nodeDic[sourceNodes[0]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.n1,'+ nodeDic[sourceNodes[1]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.p2,'+ nodeDic[words[1]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.n2,'+ nodeDic[words[2]] + ');'\n connInfo.append(conn)\n elif eachline[0] in ['g','e','G','E']:\n conn = 'connect(' + words[0] + '.p1,'+ nodeDic[words[3]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.n1,'+ nodeDic[words[4]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.p2,'+ nodeDic[words[1]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.n2,'+ nodeDic[words[2]] + ');'\n connInfo.append(conn)\n elif eachline[0] in ['g','e','G','E']:\n conn = 'connect(' + words[0] + '.p1,'+ nodeDic[words[3]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.n1,'+ nodeDic[words[4]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.p2,'+ nodeDic[words[1]] + ');'\n connInfo.append(conn)\n conn = 'connect(' + words[0] + '.n2,'+ nodeDic[words[2]] + ');'\n connInfo.append(conn)\n elif eachline[0]=='x' or eachline[0]=='X':\n templine = eachline.split()\n temp = templine[0].split('x')\n index = temp[1]\n for i in range(0,len(templine),1):\n if templine[i] in subcktName: #Ask Manas Added subcktName in function Call\n subname = templine[i]\n nodeNumInfo = self.getSubInterface(subname, numNodesSub)\n for i in range(0, numNodesSub[subname], 1):\n #conn = 'connect(' + subname + '_instance' + index + '.' + nodeDic[nodeNumInfo[i]] + ',' + nodeDic[words[i+1]] + ');'\n conn = 'connect(' + subname + '_instance' + index + '.' + 'n'+ nodeNumInfo[i] + ',' + nodeDic[words[i+1]] + ');'\n connInfo.append(conn)\n else:\n continue\n if '0' or 'gnd' in node:\n conn = 'connect(g.p,n0);'\n connInfo.append(conn)\n \n return connInfo", "def _create_msg(self, tr_id, i_triples, i_type, r_triples, r_type, confirm):\n params = SSAP_UPDATE_PARAM_TEMPLATE % (str(i_type).upper(),\n str(i_triples),\n str(r_type).upper(),\n str(r_triples),\n str(confirm).upper())\n tmp = SSAP_MESSAGE_TEMPLATE % (str(self.node_id), str(self.targetSS),\n self.tr_type, str(tr_id), params)\n return tmp", "def h(self,node):\n return 0", "def womcom(hop,num,weights):\n import logging\n import matplotlib.pyplot as plt\n import numpy as np\n from tmath.wombat.inputter import inputter\n from tmath.wombat import HOPSIZE\n hopnum=[0]\n weight=[0]\n while (hopnum[0] < 1) or (hopnum[0] > HOPSIZE):\n hopnum[0]=inputter('Enter first hopper: ','int',False)\n if (weights):\n weight[0]=inputter('Enter weight for first hopper: ','float',False)\n for i in range(1,num):\n hoploop=0\n weightloop=0\n if (num > 3):\n print('Use hopper 99 to end')\n while (hoploop < 1) or (hoploop > HOPSIZE):\n hoploop=inputter('Enter next hopper: ','int',False)\n if (hoploop == 99):\n break\n if (hoploop == 99):\n break\n if (hop[hopnum[0]].wave[0] != hop[hoploop].wave[0]) or \\\n (hop[hopnum[0]].wave[1] != hop[hoploop].wave[1]) or \\\n (hop[hopnum[0]].wave[-1] != hop[hoploop].wave[-1]):\n print('Hoppers to not have the same wavelength scale!')\n return hop\n hopnum.append(hoploop)\n if (weights):\n weightloop=inputter('Enter next weight: ','float',False)\n weight.append(weightloop)\n if (weights) and (abs(sum(weight)-1.0) > 0.00001):\n print('Weights do not add to 1.0')\n return hop\n if (not weights):\n weight=[1./len(hopnum)]*len(hopnum)\n newflux=np.zeros(len(hop[hopnum[0]].flux))\n logging.debug('Combining spectra:')\n \n for i in range(0,len(hopnum)):\n newflux=newflux+hop[hopnum[i]].flux*weight[i]\n logging.debug('Combining {} with weight {}'.format(hop[hopnum[i]].obname,\\\n weight[i]))\n hopout=0\n while (hopout < 1) or (hopout > HOPSIZE):\n hopout=inputter('Enter hopper to store combined spectrum: ','int',False)\n hop[hopout].wave=hop[hopnum[0]].wave.copy()\n hop[hopout].flux=newflux.copy()\n hop[hopout].obname=hop[hopnum[0]].obname\n hop[hopout].header=hop[hopnum[0]].header\n hop[hopout].var=hop[hopnum[0]].var.copy()\n plt.cla()\n plt.plot(hop[hopout].wave,hop[hopout].flux,drawstyle='steps-mid',color='k')\n plt.xlabel('Wavelength')\n plt.ylabel('Flux')\n plt.title(hop[hopout].obname)\n xmin,xmax=plt.xlim()\n ymin,ymax=plt.ylim()\n print('\\nPlotting combined spectrum in black, components in color\\n')\n for i in range(0,len(hopnum)):\n plt.plot(hop[hopnum[i]].wave,hop[hopnum[i]].flux,drawstyle='steps-mid')\n plt.xlim([xmin,xmax])\n plt.ylim([ymin,ymax])\n \n #FIX var\n return hop", "def CryoFan(m_dot, p_in, T_in, p_out, T_out):\r\n\r\n def Q_Boehmwind(vol_flow, Rho):\r\n # Efficiency of the Boehmwind CryoFan in -.\r\n # Fit function of measured data for rpm = 22000.\r\n # Parameter: Volume flow needs to be in m³/h.\r\n efficiency_Boehmwind = 0.01 *(1.5962e-3*vol_flow**4 - 1.0414e-2*vol_flow**3 - 2.8084*vol_flow**2 + 2.3715e1*vol_flow + 9.1550) #-\r\n # Dynamic loss of the Boehmwind CryoFan in W/rho.\r\n # Fit function of measured data for rpm = 22000.\r\n # Parameter: Volume flow needs to be in m³/h.\r\n dynLoss_Boehmwind = -3.1011e-4*vol_flow**4 - 3.0597e-3*vol_flow**3 + 1.6961e-2*vol_flow**2 + 2.9853e-1*vol_flow + 4.6333e-2 #W/rho\r\n\r\n # Friction loss\r\n Q_friction = dynLoss_Boehmwind * Rho #W\r\n # Dynamic heat load\r\n Q_dynamic = Q_friction/efficiency_Boehmwind - Q_friction #W\r\n # Static heat load\r\n # Using the given value for operation at 30 K and 20 bara\r\n Q_static = 7.0 #W\r\n\r\n return Q_friction + Q_dynamic + Q_static\r\n\r\n # Calculation of a mean rho\r\n Rho_in = hp.HeCalc(3, 0, 1, p_in, 2, T_in, 1) #kg/m³\r\n Rho_out = hp.HeCalc(3, 0, 1, p_out, 2, T_out, 1) #kg/m³\r\n Rho = 0.5 * (Rho_in + Rho_out) #kg/m³\r\n # Calculation of a mean cp\r\n Cp_in = hp.HeCalc(14, 0, 1, p_in, 2, T_out, 1) #J/(kgK)\r\n Cp_out = hp.HeCalc(14, 0, 1, p_out, 2, T_out, 1) #J/(kgK)\r\n Cp = 0.5 * (Cp_in + Cp_out) #J/(kgK)\r\n # Mean volume flow\r\n vol_flow = m_dot / Rho * 3600 #m³/h\r\n\r\n ## Heat loads\r\n # Estimating the different heat loads that are applied on the system by the cryofan\r\n # Static heat load and the heat load from the fan efficiency will be dissipated across the fan.\r\n # Friction losses will occur in the piping of system and application.\r\n # Since the friction losses are small in the respective application it is assumed that all friction loss occurs at the CryoFan aswell!\r\n # ->Tested the friction loss in a remote cooling application and it was negligible\r\n # Boehmwind CryoFan\r\n # Call of the function for the Boehmwind CryoFan\r\n Q_CryoFan = Q_Boehmwind(vol_flow, Rho)\r\n\r\n # New temperature due to the heat load of the Cryofan\r\n T_out = T_in + Q_CryoFan/(Cp * m_dot)\r\n\r\n # Prepare the output of the results\r\n h_out = hp.HeCalc(9, 0, 1, p_out, 2, T_out, 1) #J/kg\r\n\r\n print(\"Cryofan heat load: \", Q_CryoFan)\r\n\r\n state_out = {\"h\": h_out, \"T\": T_out, \"p\": p_out}\r\n return state_out", "def test_weight_hh(self):\n # Folder must be root to load in make_net properly\n if os.getcwd().split('\\\\')[-1] == 'tests': os.chdir('..')\n \n # Create parents\n cfg = Config()\n gene1, gene2 = get_gru_node_gene(0, cfg.genome)\n \n # Ratio of 0.5, so possible to cross to both parents\n p1 = False\n p2 = False\n for _ in range(100):\n gene3 = gene1.crossover(other=gene2, cfg=cfg.genome, ratio=0.5)\n for value in gene3.weight_hh:\n for v in value:\n if v == 0:\n p1 = True\n elif v == 1:\n p2 = True\n else:\n raise self.failureException(\"Must be mutated to one of parent's values\")\n if p1 and p2: break\n self.assertTrue(p1 and p2)\n \n # Ratio of 1, so always inherits from first parent\n for _ in range(10):\n gene3 = gene1.crossover(other=gene2, cfg=cfg.genome, ratio=1)\n self.assertEqual(np.linalg.norm(gene3.weight_hh - gene1.weight_hh), 0)\n self.assertNotEqual(np.linalg.norm(gene3.weight_hh - gene2.weight_hh), 0)\n \n # Ratio of 0, so always inherits from second parent\n for _ in range(10):\n gene3 = gene1.crossover(other=gene2, cfg=cfg.genome, ratio=0)\n self.assertNotEqual(np.linalg.norm(gene3.weight_hh - gene1.weight_hh), 0)\n self.assertEqual(np.linalg.norm(gene3.weight_hh - gene2.weight_hh), 0)", "def pair (cls):\n a_to_b = MessageChannel()\n b_to_a = MessageChannel()\n a = cls(a_to_b, b_to_a)\n b = cls(b_to_a, a_to_b)\n return (a,b)", "def wemo_process(self, msg):\n if msg[\"content\"][\"command\"] == \"nickname\":\n # print msg\n self.nickname = msg[\"content\"][\"value\"]\n self.controller.sending(\n {\"subject\": \"control\" + \".\" + self.controller.type,\n \"content_type\": \"request\",\n \"content\": {\"request\": \"nickname\",\n \"target\": self.controller.type + \".\" + self.name,\n #\"token\": self.controller.target,\n \"value\": {\"name\": self.name, \"nickname\": msg[\"content\"][\"value\"]}}})\n elif msg[\"content\"][\"command\"] == \"status\":\n # Not gone the way of the dodo\n # try:\n self.controller.sending({\"subject\": self.controller.type,\n \"content_type\": \"event\",\n \"content\": {\"event\": \"status\",\n \"target\": self.controller.type +\n \".\" +\n self.name,\n \"icon status\":\n {\"bu-radar1\": {\"fill\":\"black\", \"opacity\":\"1\"},\n \"bu-radar2\": {\"fill\":cssColour(), \"opacity\":\"0\"},\n \"bu-not-present\": {\n \"opacity\": 0}},\n \"value\": {}}})\n # except: #Most probably is known but we lost pairing\n # pass\n\n\n return None", "def interpret_attributes(self, msg_data):\n struct = OrderedDict([('sequence', 8),\n ('short_addr', 16),\n ('endpoint', 8),\n ('cluster_id', 16),\n ('attribute_id', 16),\n ('attribute_status', 8),\n ('attribute_type', 8),\n ('attribute_size', 'len16'),\n ('attribute_data', 'raw'),\n ('end', 'rawend')])\n msg = self.decode_struct(struct, msg_data)\n device_addr = msg['short_addr']\n endpoint = msg['endpoint']\n cluster_id = msg['cluster_id']\n attribute_id = msg['attribute_id']\n attribute_size = msg['attribute_size']\n attribute_data = msg['attribute_data']\n self.set_device_property(device_addr, endpoint, ZGT_LAST_SEEN, strftime('%Y-%m-%d %H:%M:%S'))\n\n if msg['sequence'] == b'00':\n ZGT_LOG.debug(' - Sensor type announce (Start after pairing 1)')\n elif msg['sequence'] == b'01':\n ZGT_LOG.debug(' - Something announce (Start after pairing 2)')\n\n # Device type\n if cluster_id == b'0000':\n if attribute_id == b'0005':\n self.set_device_property(device_addr, endpoint, 'type', attribute_data.decode())\n ZGT_LOG.info(' * type : {}'.format(attribute_data))\n ## proprietary Xiaomi info including battery\n if attribute_id == b'ff01' and attribute_data != b'':\n struct = OrderedDict([('start', 16), ('battery', 16), ('end', 'rawend')])\n raw_info = unhexlify(self.decode_struct(struct, attribute_data)['battery'])\n battery_info = int(hexlify(raw_info[::-1]), 16)/1000\n self.set_device_property(device_addr, endpoint, 'battery', battery_info)\n ZGT_LOG.info(' * Battery info')\n ZGT_LOG.info(' * Value : {} V'.format(battery_info))\n # Button status\n elif cluster_id == b'0006':\n ZGT_LOG.info(' * General: On/Off')\n if attribute_id == b'0000':\n if hexlify(attribute_data) == b'00':\n self.set_device_property(device_addr, endpoint, ZGT_STATE, ZGT_STATE_ON)\n ZGT_LOG.info(' * Closed/Taken off/Press')\n else:\n self.set_device_property(device_addr, endpoint, ZGT_STATE, ZGT_STATE_OFF)\n ZGT_LOG.info(' * Open/Release button')\n elif attribute_id == b'8000':\n clicks = int(hexlify(attribute_data), 16)\n self.set_device_property(device_addr, endpoint, ZGT_STATE, ZGT_STATE_MULTI.format(clicks))\n ZGT_LOG.info(' * Multi click')\n ZGT_LOG.info(' * Pressed: {} times'.format(clicks))\n # Movement\n elif cluster_id == b'000c': # Unknown cluster id\n if attribute_id == b'ff05':\n if hexlify(attribute_data) == b'01f4':\n ZGT_LOG.info(' * Rotation horizontal')\n elif attribute_id == b'0055':\n ZGT_LOG.info(' * Rotated: %s°' % (unpack('!f', attribute_data)[0]))\n elif cluster_id == b'0012': # Unknown cluster id\n if attribute_id == b'0055':\n if hexlify(attribute_data) == b'0000':\n ZGT_LOG.info(' * Shaking')\n elif hexlify(attribute_data) in [b'0100', b'0101', b'0102', b'0103', b'0104', b'0105']:\n ZGT_LOG.info(' * Sliding')\n else:\n ZGT_LOG.info(' * Rotating vertical')\n if hexlify(attribute_data) in [b'0050', b'0042',\n b'0044', b'0060',\n b'0045', b'0068',\n b'0041', b'0048',\n\n b'0063', b'005c',\n b'0059', b'004b',\n b'005d', b'006b',\n b'005a', b'0053',\n\n b'004a', b'0051',\n b'0054', b'0062',\n b'0069', b'004d',\n b'006c', b'0065',]:\n ZGT_LOG.info(' * Rotated: 90°')\n if hexlify(attribute_data) in [b'0080', b'0083',\n b'0081', b'0084',\n b'0085', b'0082',]:\n ZGT_LOG.info(' * Rotated: 180°')\n # Illuminance Measurement\n elif cluster_id == b'0400':\n # MeasuredValue\n if attribute_id == b'0000':\n illuminance = int.from_bytes(attribute_data, 'big', signed=True)\n self.set_device_property(device_addr, endpoint, ZGT_ILLUMINANCE_MEASUREMENT, illuminance)\n # MinMeasuredValue\n elif attribute_id == b'0001':\n if attribute_data == b'FFFF':\n ZGT_LOG.info('Minimum illuminance is unused.')\n else:\n illuminance = int.from_bytes(attribute_data, 'big', signed=True)\n ZGT_LOG.info('Minimum illuminance is ', illuminance)\n # MaxMeasuredValue\n elif attribute_id == b'0002':\n if attribute_data == b'FFFF':\n ZGT_LOG.info('Maximum illuminance is unused.')\n else:\n illuminance = int.from_bytes(attribute_data, 'big', signed=True)\n ZGT_LOG.info('Maximum illuminance is ', illuminance)\n # Tolerance\n elif attribute_id == b'0003':\n illuminance = int.from_bytes(attribute_data, 'big', signed=True)\n ZGT_LOG.info('Illuminance tolerance is ', illuminance)\n # Sensor type\n elif attribute_id == b'0004':\n sensor_type = 'Unknown'\n if attribute_data == b'00':\n sensor_type = 'Photodiode'\n elif attribute_data == b'01':\n sensor_type = 'CMOS'\n elif b'02' <= attribute_data <= b'3F':\n sensor_type = 'Reserved'\n elif b'40' <= attribute_data <= b'FE':\n sensor_type = 'Reserved for manufacturer'\n ZGT_LOG.info('Sensor type is: ', sensor_type)\n # Temperature\n elif cluster_id == b'0402':\n temperature = int.from_bytes(attribute_data, 'big', signed=True) / 100\n #temperature = int(hexlify(attribute_data), 16) / 100\n self.set_device_property(device_addr, endpoint, ZGT_TEMPERATURE, temperature)\n ZGT_LOG.info(' * Measurement: Temperature'),\n ZGT_LOG.info(' * Value: {} °C'.format(temperature))\n # Atmospheric Pressure\n elif cluster_id == b'0403':\n ZGT_LOG.info(' * Atmospheric pressure')\n pressure = int(hexlify(attribute_data), 16)\n if attribute_id == b'0000':\n self.set_device_property(device_addr, endpoint, ZGT_PRESSURE, pressure)\n ZGT_LOG.info(' * Value: {} mb'.format(pressure))\n elif attribute_id == b'0010':\n self.set_device_property(device_addr, endpoint, ZGT_DETAILED_PRESSURE, pressure/10)\n ZGT_LOG.info(' * Value: {} mb'.format(pressure/10))\n elif attribute_id == b'0014':\n ZGT_LOG.info(' * Value unknown')\n # Humidity\n elif cluster_id == b'0405':\n humidity = int(hexlify(attribute_data), 16) / 100\n self.set_device_property(device_addr, endpoint, ZGT_HUMIDITY, humidity)\n ZGT_LOG.info(' * Measurement: Humidity')\n ZGT_LOG.info(' * Value: {} %'.format(humidity))\n # Presence Detection\n elif cluster_id == b'0406':\n # Only sent when movement is detected\n if hexlify(attribute_data) == b'01':\n self.set_device_property(device_addr, endpoint, ZGT_EVENT, ZGT_EVENT_PRESENCE)\n ZGT_LOG.debug(' * Presence detection')\n\n ZGT_LOG.info(' FROM ADDRESS : {}'.format(msg['short_addr']))\n ZGT_LOG.debug(' - Source EndPoint : {}'.format(msg['endpoint']))\n ZGT_LOG.debug(' - Cluster ID : {}'.format(msg['cluster_id']))\n ZGT_LOG.debug(' - Attribute ID : {}'.format(msg['attribute_id']))\n ZGT_LOG.debug(' - Attribute type : {}'.format(msg['attribute_type']))\n ZGT_LOG.debug(' - Attribute size : {}'.format(msg['attribute_size']))\n ZGT_LOG.debug(' - Attribute data : {}'.format(hexlify(msg['attribute_data'])))", "def _cat_directions(h):\n if bidirectional_encoder:\n h = torch.cat([h[0:h.size(0):2], h[1:h.size(0):2]], 2)\n return h", "def on_message(mqttc,obj,msg):\n gateways = []\n output['measurement'] = 'LoStick'\n output['time'] = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%SZ')\n try:\n x = json.loads(msg.payload.decode('utf-8'))\n if \"up\" in msg._topic.decode(\"utf-8\"):\n airtime = x[\"metadata\"][\"airtime\"]\n gateways = x[\"metadata\"][\"gateways\"]\n fields[\"airtimeUL\"] = airtime\n output['fields'] = fields\n elif \"down\" in msg._topic.decode(\"utf-8\"):\n airtime = x[\"config\"][\"airtime\"]\n fields[\"airtimeDL\"] = airtime\n if len(gateways) > 1:\n rssi = 0\n for gw in gateways:\n rssi += gw[\"rssi\"]\n rssi /= len(gateways)\n fields[\"gw_rssi\"] = rssi\n print(\"outpout: \", output)\n output['tags'] = tags\n output['fields'] = fields\n client.write_points([output])\n sys.stdout.flush()\n except Exception as e:\n print(e)\n pass", "def comsume_msg(self, msg_type):", "def makeD2hhAsymm(name,\n config,\n KPIDK_string,\n PiPIDK_string,\n Mass_low_string,\n Mass_high_string,\n CombPIDK_string,\n DecayDescriptor,\n inputSel,\n useTOS,\n Hlt1TOS,\n Hlt2TOS\n ) :\n\n def makeTISTOS( name, _input, _hlttos ) :\n from Configurables import TisTosParticleTagger\n _tisTosFilter = TisTosParticleTagger( name + \"Tagger\" )\n _tisTosFilter.TisTosSpecs = _hlttos\n return Selection( name\n , Algorithm = _tisTosFilter\n , RequiredSelections = [ _input ]\n ) \n\n _Kcuts1 = \"~ISMUON & (PT > %(DaugPtMin)s* MeV) & (MIPCHI2DV(PRIMARY) > %(DaugIPChi2)s)\" % locals()['config']\n _KcutsPIDK = KPIDK_string % locals()['config']\n _Kcuts2 = \" & (ISLONG) & (P > %(DaugP)s* MeV) & (TRCHI2DOF < %(DaugTrkChi2)s)\" % locals()['config']\n _Kcuts = _Kcuts1 + _KcutsPIDK + _Kcuts2\n _Picuts1 = \"~ISMUON & (PT > %(DaugPtMin)s* MeV) & (MIPCHI2DV(PRIMARY) > %(DaugIPChi2)s)\" % locals()['config']\n _PicutsPIDK = PiPIDK_string % locals()['config']\n _Picuts2 = \" & (ISLONG) & (P > %(DaugP)s* MeV) & (TRCHI2DOF < %(DaugTrkChi2)s)\" % locals()['config']\n _Picuts = _Picuts1 + _PicutsPIDK + _Picuts2\n _dauCuts = { 'K+': _Kcuts, 'pi+': _Picuts }\n\n _massLow = Mass_low_string % locals()['config']\n _massHigh = Mass_high_string % locals()['config']\n _combCuts1 = \"(APT > %(D0Pt)s* MeV)\" \\\n \"& (AHASCHILD( PT > %(DaugPtMax)s* MeV ) )\" \\\n \"& (ADOCA(1,2)< %(D0DOCA)s* mm)\" \\\n \"& (AP > %(D0P)s* MeV)\" % locals()['config']\n _combCutsPIDK = CombPIDK_string % locals()['config']\n _combCuts = _combCuts1 + _combCutsPIDK + _massLow + _massHigh\n\n _motherCuts = \"(VFASPF(VCHI2PDOF) < %(D0VtxChi2Ndof)s)\" \\\n \"& (BPVVDCHI2 > %(D0FDChi2)s)\" \\\n \"& (BPVLTIME() > %(D0Tau)s)\" \\\n \"& (BPVDIRA > %(D0BPVDira)s)\" % locals()['config']\n\n _D0 = CombineParticles( DecayDescriptor = DecayDescriptor,\n MotherCut = _motherCuts,\n CombinationCut = _combCuts,\n DaughtersCuts = _dauCuts)\n\n _sel = Selection ( name+'Sel',\n Algorithm = _D0,\n RequiredSelections = inputSel )\n\n if not useTOS:\n return _sel\n\n _selD2hhHlt1TOS = makeTISTOS( name + \"D2hhHlt1TOS\"\n , _sel\n , Hlt1TOS\n )\n _selD2hhHlt2TOS = makeTISTOS( name + \"D2hhHlt2TOS\"\n , _selD2hhHlt1TOS\n , Hlt2TOS\n )\n \n return _selD2hhHlt2TOS", "def _create_msg(self, tr_id, triples, type, confirm):\n params = SSAP_REMOVE_PARAM_TEMPLATE % (str(type).upper(),\n str(triples),\n str(confirm).upper())\n tmp = SSAP_MESSAGE_TEMPLATE % (str(self.node_id), str(self.targetSS),\n self.tr_type, str(tr_id), params)\n return tmp", "def horde_message(self, message):", "def get_v_given_h_samples(self, h):\n \n sig_input = T.dot(h, T.transpose(self.W)) + self.b\n \n sig_output= T.nnet.sigmoid(sig_input)\n \n sample = self.theano_rand_gen.binomial(size= sig_output.shape,\n n=1, \n p= sig_output,\n dtype=theano.config.floatX)\n \n return [sig_input, sig_output, sample]", "def concatTwoHMMs(hmm1, hmm2):\n \n concatedHMM = {}\n #M is the number of emitting states in each HMM model (could be different for each)\n #K is the sum of the number of emitting states from the input models\n \n M1 = hmm1['means'].shape[0]\n M2 = hmm2['means'].shape[0]\n K = M1 + M2\n \n concatedHMM['name'] = hmm1['name'] + hmm2['name']\n concatedHMM['startprob'] = np.zeros((K + 1, 1))\n concatedHMM['transmat'] = np.zeros((K + 1, K + 1))\n concatedHMM['means'] = np.vstack((hmm1['means'],hmm2['means']))\n concatedHMM['covars'] = np.vstack((hmm1['covars'],hmm2['covars']))\n \n \n start1 = hmm1['startprob'].reshape(-1,1)\n start2 = hmm2['startprob'].reshape(-1,1)\n \n concatedHMM['startprob'][:hmm1['startprob'].shape[0]-1,:] = start1[:-1,:]\n concatedHMM['startprob'][hmm1['startprob'].shape[0]-1:,:] = np.dot(start1[-1,0],start2)\n trans = concatedHMM['transmat']\n trans1 = hmm1['transmat']\n trans2 = hmm2['transmat']\n\n trans[:trans1.shape[0]-1,:trans1.shape[1]-1] = trans1[:-1,:-1]\n temp = trans1[:-1,-1].reshape(-1,1)\n trans[:trans1.shape[0]-1,trans1.shape[1]-1:] = \\\n np.dot(temp,start2.T)\n trans[trans1.shape[0]-1:,trans1.shape[1]-1:] = trans2\n concatedHMM['transmat'] = trans \n \n return concatedHMM", "def hwc2chw(hwc):\n return hwc.permute(2, 0, 1)", "def _data_move_out_mc_on_h():\n\n pass", "def genfb_py(h, n, u, v, f, dt, dx, dy, du,dv,dn, gridu,gridv,gridn, threadblock, beta=0.281105, eps=0.013, gamma=0.0880, mu=0.3, nu=0, dudt_x=dudt, dvdt_x=dvdt, dndt_x=dndt, grav=True, cori=True, advx=True, advy=True, attn=True, ): # generalized forward backward feedback timestep\n \n p5 = np.float32(0.5)\n one = np.float32(1)\n p32 = np.float32(1.5)\n beta = np.float32(beta)\n eps = np.float32(eps)\n gamma= np.float32(gamma)\n mu = np.float32(mu)\n \n dn_m1,dn_m2,dn_m0 = dn # dn[0], dn[1], dn[2] # unpack\n if dn_m1 is dn_m2 or dn_m1 is dn_m0:\n print (\"error dn_m1\")\n if dn_m2 is dn_m0: \n print (\"error dn_m0\")\n print( dn_m1[280,5],dn_m2[280,5],dn_m0[280,5]) \n# hn = n.copy_to_host()\n# print ('n', hn.shape,n.shape, np.argmax(hn),np.max(hn),np.argmin(hn),np.min(hn))\n# hn = u.copy_to_host()\n# print ('u', hn.shape,n.shape, np.argmax(hn),np.max(hn),np.argmin(hn),np.min(hn))\n# hn = v.copy_to_host()\n# print ('v', hn.shape,n.shape, np.argmax(hn),np.max(hn),np.argmin(hn),np.min(hn))\n dndt_x[gridn, threadblock](h, n, u, v, dx, dy, dn_m0)\n \n # must do the following before the u and v !\n #n1 = n + ((p32+beta)* dn_m0 - (p5+beta+beta)* dn_m1+ (beta)* dn_m2)*dt\n# n_m0 = dn_m0.copy_to_host()\n# print ('dn_m0',dn_m0.shape,np.argmax(n_m0),np.max(n_m0),np.argmin(n_m0),np.min(n_m0))\n# lincomb4_cuda[gridn,threadblock](n, dn_m0, dn_m1, dn_m2, one, (p32+beta)*dt, -(p5+beta+beta)*dt, (beta)*dt, n)\n \n h_n = n.copy_to_host()\n hn_m0 = dn_m0.copy_to_host()\n hn_m1 = dn_m1.copy_to_host()\n hn_m2 = dn_m2.copy_to_host()\n h_n = h_n+(p32+beta)*dt*hn_m0 -(p5+beta+beta)*dt*hn_m0+(beta)*dt*hn_m0\n n[:]=h_n\n \n \n \n du_m0,du_m1,du_m2,du_p1 = du # du[0], du[1], du[2], du[3] # unpack\n# if du_p1 is du_m0 or du_p1 is du_m1 or du_p1 is du_m2:\n# print (\"error du_p1\")\n# if du_m0 is du_m2 or du_m0 is du_m1 :\n# print (\"error du_m0\")\n# if du_m2 is du_m1:\n# print (\"error du_m1\")\n print( du_m0[280,5],du_m1[280,5],du_m2[280,5],du_p1[280,5]) \n dudt_x[gridu, threadblock](h, n, f, u, v, dx, dy, du_p1, grav, cori, advx, advy, attn,nu,mu)\n\n dv_m0,dv_m1,dv_m2,dv_p1 = dv #dv[0], dv[1], dv[2], dv[3] # unpack \n print( dv_m0[280,5],dv_m1[280,5],dv_m2[280,5],dv_p1[280,5]) \n dvdt_x[gridv, threadblock](h, n, f, u, v, dx, dy, dv_p1, grav, cori, advx, advy, attn,nu,mu)\n \n #u1 = u+ ((p5+gamma+eps+eps)*du_p1 +(p5-gamma-gamma-eps-eps-eps)*du_m0 +gamma*du_m1+eps*du_m2)*dt\n # lincomb5_cuda[gridu,threadblock](u, du_p1, du_m0, du_m1, du_m2, one, (p5+gamma+eps+eps)*dt, (p5-gamma-gamma-eps-eps-eps)*dt, gamma*dt, eps*dt, u)\n lincomb5_cuda[gridu,threadblock](u, du_p1, du_m0, du_m1, du_m2, one, one*dt, np.float32(0.0), np.float32(0.0), np.float32(0.0), u)\n\n #v1 = v+ ((p5+gamma+eps+eps)*dv_p1 +(p5-gamma-gamma-eps-eps-eps)*dv_m0 +gamma*dv_m1+eps*dv_m2)*dt\n # lincomb5_cuda[gridv,threadblock](v, dv_p1, dv_m0, dv_m1, dv_m2, one, (p5+gamma+eps+eps)*dt, (p5-gamma-gamma-eps-eps-eps)*dt, gamma*dt, eps*dt, v)\n lincomb5_cuda[gridv,threadblock](v, dv_p1, dv_m0, dv_m1, dv_m2, one, one*dt, np.float32(0.0), np.float32(0.0), np.float32(0.0), v)\n \n\n dv = [ dv_p1,dv_m0,dv_m1,dv_m2 ]\n du = [ du_p1,du_m0,du_m1,du_m2 ]\n dn = [ dn_m0,dn_m1,dn_m2 ]\n return du, dv, dn", "def check_Motifs(H, m):\n\t#This function will take each possible subgraphs of gr of size 3, then\n\t#compare them to the mo dict using .subgraph() and is_isomorphic\n\t\n\t#This line simply creates a dictionary with 0 for all values, and the\n\t#motif names as keys\n\n\t##paper source \"Higher-order organization ofcomplex networks\" (2016) Benson et al, Science\n\t## I choose only the unidirection ones : M1, M5, M8, M9, M10\n\n\n\ts = int(m)\n\n\tif (s==3):\n\t\t#motifs = {'M1': nx.DiGraph([(1,2),(2,3),(3,1)]), 'M5': nx.DiGraph([(1,2),(2,3),(1,3)]), 'M8': nx.DiGraph([(2, 1),(2,3)]), 'M9': nx.DiGraph([(2, 1),(3, 2)]), 'M10': nx.DiGraph([(1,2),(3,2)])}\n\t\tmotifs = {'M1': [(1,2),(2,3),(3,1)], 'M5': [(1,2),(2,3),(1,3)], 'M8': [(2, 1),(2,3)], 'M9': [(2, 1),(3, 2)], 'M10': [(1,2),(3,2)],\n\t\t\t\t\t'M2': [(1,2),(2,3),(3,2),(3,1)], 'M3': [(1,2),(2,3),(3,2),(1,3),(3,1)], 'M4': [(1,2),(2,1),(2,3),(3,2),(1,3),(3,1)], 'M6': [(2, 1),(2,3),(1,3),(3,1)], 'M7': [(1,2),(3,2),(1,3),(3,1)],\n\t\t\t\t\t'M11': [(1,2),(2,1),(2,3)], 'M12': [(1,2),(2,1),(3,2)], 'M13': [(1,2),(2,1),(2,3),(3,2)]}\n\n\telif (s==4): ## under development\n\t\tmotifs = {'bifan': [(1,2),(1,3),(4,2),(4,3)]}\n\n\t\t#edgeLists=[[[1,2],[1,3],[1,4]]]\n\t\t#edgeLists.append([[1,2],[1,3],[1,4],[2,3]])\n\t\t#edgeLists.append([[1,2],[1,3],[1,4],[2,3],[3,4]])\n\t\t#edgeLists.append([[1,2],[1,3],[1,4],[2,3],[3,4],[2,4]])\n\telse:\n\t\traise nx.NetworkXNotImplemented('Size of motif must be 3 or 4')\n\n\t#outf = open(f2, 'w')\n\t#print >> outf, 'commitid|motiflabel|count'\n\n\tG = H\n\n\tmcount = dict(zip(motifs.keys(), list(map(int, np.zeros(len(motifs))))))\n\n\t## match the pattern and count the motifs \n\tdict_edges = defaultdict(list); dict_nodes = defaultdict(list)\n\tfor key in motifs :\n\t\n\t\t\tpattern = motifs[key]\n\t\t\n\t\t\tgmoti = nx.DiGraph()\n\t\t\tgmoti.add_edges_from(pattern)\n\n\t\t\tmotif_pattern_obs = subgraph_pattern(G, gmoti, sign_sensitive=False)\n\n\t\t\ts = []\n\t\t\tfor subgraph in motif_pattern_obs :\n\t\t\t\ttup = tuple(subgraph.keys())\n\t\t\t\ts.append(tup)\n\n\t\t\tuniqs = list(set(s))\n\n\t\t\tif len(uniqs) > 0 :\n\t\t\t\tmaplist = map(list, uniqs)\n\n\t\t\t### label the edges as per the motif labels\n\t\t\t\tmcount[str(key)] = len(maplist)\n\n\t\t\t\tfor triplets in maplist :\n\t\t\t\t\tsubgraph = G.subgraph(triplets)\n\t\t\t\t\tedgeLists = [e for e in subgraph.edges() if G.has_edge(*e)]\n\n\t\t\t\t## an edge is part of multiple motifs\n\t\t\t\t## lets count the number of motifs an edge is part of \n\t\t\t\t\tfor u, v in edgeLists :\n\t\t\t\t\t\tdict_edges[(u, v)].append(str(key))\n\t\n\n\t\t\t\t## A node is also part of multiple motifs. \n\t\t\t\t## We count the total number of motifs a node is part of\n\t\t\t\t## We count the frequency of occurence each motif the node is part of\n\t\t\t\t\tnodelists = subgraph.nodes()\n\t\t\t\t\tfor n in nodelists :\n\t\t\t\t\t\tdict_nodes[str(n)].append(str(key))\n\n\n\n\t\t#for keys, values in mcount.items() :\n\t\t#\tprint >> outf, '%s|%s|%s' %(outname, keys, values) \n\n\t### Let's mark the edge with motif type and count. We count the number of types\n\t### of motif an edge is a part of. An edge could appear in M1: M1x times and in M2: M2x times and so on\n\n\tfor u,v in G.edges() :\n\t\t\tif (u,v) in dict_edges :\n\t\t\t\tG[u][v]['num_motif_edge'] = len(list(set(dict_edges[(u,v)])))\n\n\t### Let's mark the node with motif type and count. We count the number of types of motif a node is a part of. \n\n\tfor n in G.nodes() :\n\t\tmotficountnode = dict(zip(motifs.keys(), list(map(int, np.zeros(len(motifs))))))\n\n\t\tif str(n) in dict_nodes :\n\t\t\tsubgraphnodeslist = dict_nodes[str(n)]\n\n\t\t\tfor key in subgraphnodeslist:\n\t\t\t\tmotficountnode[str(key)] +=1\n\n\t\tfor motif, count in motficountnode.items() :\n\t\t\tG.node[n][str(motif)] = int(count)\n\n\t### Let's mark the edge with motif type and count. We count the number of types\n\t### of motif an edge is a part of. An edge could appear in M1: M1x times and in M2: M2x times and so on\n\n\tfor u,v in G.edges() :\n\t\tmotficountedge = dict(zip(motifs.keys(), list(map(int, np.zeros(len(motifs))))))\n\n\t\tif (u,v) in dict_edges :\n\t\t\tsubgraphedgeslist = dict_edges[(u,v)]\n\n\t\t\tfor key in subgraphedgeslist:\n\t\t\t\tmotficountedge[str(key)] +=1\n\n\t\tfor motif, count in motficountedge.items() :\n\t\t\tG[u][v][str(motif)] = int(count)\n\n\n\treturn G", "def outgoing_message(self,msg):\n if msg._type == 'offset_etddf/AgentMeasurement':\n self.comms_meas_pub.publish(msg)\n elif msg._type == 'offset_etddf/AgentState':\n self.comms_state_pub.publish(msg)", "def Message(title, msg):\r\n return _hiew.HiewGate_Message(title, msg)", "def GOST34112012H256(msg):\n pi_sharp = [\n 252, 238, 221, 17, 207, 110, 49, 22, 251, 196, 250, 218, 35, 197, 4, 77, 233, 119, 240,\n 219, 147, 46, 153, 186, 23, 54, 241, 187, 20, 205, 95, 193, 249, 24, 101, 90, 226, 92, 239,\n 33, 129, 28, 60, 66, 139, 1, 142, 79, 5, 132, 2, 174, 227, 106, 143, 160, 6, 11, 237, 152, 127,\n 212, 211, 31, 235, 52, 44, 81, 234, 200, 72, 171, 242, 42, 104, 162, 253, 58, 206, 204, 181,\n 112, 14, 86, 8, 12, 118, 18, 191, 114, 19, 71, 156, 183, 93, 135, 21, 161, 150, 41, 16, 123,\n 154, 199, 243, 145, 120, 111, 157, 158, 178, 177, 50, 117, 25, 61, 255, 53, 138, 126, 109,\n 84, 198, 128, 195, 189, 13, 87, 223, 245, 36, 169, 62, 168, 67, 201, 215, 121, 214, 246, 124,\n 34, 185, 3, 224, 15, 236, 222, 122, 148, 176, 188, 220, 232, 40, 80, 78, 51, 10, 74, 167, 151,\n 96, 115, 30, 0, 98, 68, 26, 184, 56, 130, 100, 159, 38, 65, 173, 69, 70, 146, 39, 94, 85, 47,\n 140, 163, 165, 125, 105, 213, 149, 59, 7, 88, 179, 64, 134, 172, 29, 247, 48, 55, 107, 228,\n 136, 217, 231, 137, 225, 27, 131, 73, 76, 63, 248, 254, 141, 83, 170, 144, 202, 216, 133, 97,\n 32, 113, 103, 164, 45, 43, 9, 91, 203, 155, 37, 208, 190, 229, 108, 82, 89, 166, 116, 210,\n 230, 244, 180, 192, 209, 102, 175, 194, 57, 75, 99, 182\n ]\n\n C = [\n 0xb1085bda1ecadae9ebcb2f81c0657c1f2f6a76432e45d016714eb88d7585c4fc4b7ce09192676901a2422a08a460d31505767436cc744d23dd806559f2a64507,\n 0x6fa3b58aa99d2f1a4fe39d460f70b5d7f3feea720a232b9861d55e0f16b501319ab5176b12d699585cb561c2db0aa7ca55dda21bd7cbcd56e679047021b19bb7,\n 0xf574dcac2bce2fc70a39fc286a3d843506f15e5f529c1f8bf2ea7514b1297b7bd3e20fe490359eb1c1c93a376062db09c2b6f443867adb31991e96f50aba0ab2,\n 0xef1fdfb3e81566d2f948e1a05d71e4dd488e857e335c3c7d9d721cad685e353fa9d72c82ed03d675d8b71333935203be3453eaa193e837f1220cbebc84e3d12e,\n 0x4bea6bacad4747999a3f410c6ca923637f151c1f1686104a359e35d7800fffbdbfcd1747253af5a3dfff00b723271a167a56a27ea9ea63f5601758fd7c6cfe57,\n 0xae4faeae1d3ad3d96fa4c33b7a3039c02d66c4f95142a46c187f9ab49af08ec6cffaa6b71c9ab7b40af21f66c2bec6b6bf71c57236904f35fa68407a46647d6e,\n 0xf4c70e16eeaac5ec51ac86febf240954399ec6c7e6bf87c9d3473e33197a93c90992abc52d822c3706476983284a05043517454ca23c4af38886564d3a14d493,\n 0x9b1f5b424d93c9a703e7aa020c6e41414eb7f8719c36de1e89b4443b4ddbc49af4892bcb929b069069d18d2bd1a5c42f36acc2355951a8d9a47f0dd4bf02e71e,\n 0x378f5a541631229b944c9ad8ec165fde3a7d3a1b258942243cd955b7e00d0984800a440bdbb2ceb17b2b8a9aa6079c540e38dc92cb1f2a607261445183235adb,\n 0xabbedea680056f52382ae548b2e4f3f38941e71cff8a78db1fffe18a1b3361039fe76702af69334b7a1e6c303b7652f43698fad1153bb6c374b4c7fb98459ced,\n 0x7bcd9ed0efc889fb3002c6cd635afe94d8fa6bbbebab076120018021148466798a1d71efea48b9caefbacd1d7d476e98dea2594ac06fd85d6bcaa4cd81f32d1b,\n 0x378ee767f11631bad21380b00449b17acda43c32bcdf1d77f82012d430219f9b5d80ef9d1891cc86e71da4aa88e12852faf417d5d9b21b9948bc924af11bd720,\n ]\n\n tau = [\n 0, 8, 16, 24, 32, 40, 48, 56, 1, 9, 17, 25, 33, 41, 49, 57, 2, 10, 18, 26, 34, 42, 50, 58,\n 3, 11, 19, 27, 35, 43, 51, 59, 4, 12, 20, 28, 36, 44, 52, 60, 5, 13, 21, 29, 37, 45, 53, 61, 6, 14,\n 22, 30, 38, 46, 54, 62, 7, 15, 23, 31, 39, 47, 55, 63\n ]\n\n A = [\n 0x8e20faa72ba0b470, 0x47107ddd9b505a38, 0xad08b0e0c3282d1c, 0xd8045870ef14980e,\n 0x6c022c38f90a4c07, 0x3601161cf205268d, 0x1b8e0b0e798c13c8, 0x83478b07b2468764,\n 0xa011d380818e8f40, 0x5086e740ce47c920, 0x2843fd2067adea10, 0x14aff010bdd87508,\n 0x0ad97808d06cb404, 0x05e23c0468365a02, 0x8c711e02341b2d01, 0x46b60f011a83988e,\n 0x90dab52a387ae76f, 0x486dd4151c3dfdb9, 0x24b86a840e90f0d2, 0x125c354207487869,\n 0x092e94218d243cba, 0x8a174a9ec8121e5d, 0x4585254f64090fa0, 0xaccc9ca9328a8950,\n 0x9d4df05d5f661451, 0xc0a878a0a1330aa6, 0x60543c50de970553, 0x302a1e286fc58ca7,\n 0x18150f14b9ec46dd, 0x0c84890ad27623e0, 0x0642ca05693b9f70, 0x0321658cba93c138,\n 0x86275df09ce8aaa8, 0x439da0784e745554, 0xafc0503c273aa42a, 0xd960281e9d1d5215,\n 0xe230140fc0802984, 0x71180a8960409a42, 0xb60c05ca30204d21, 0x5b068c651810a89e,\n 0x456c34887a3805b9, 0xac361a443d1c8cd2, 0x561b0d22900e4669, 0x2b838811480723ba,\n 0x9bcf4486248d9f5d, 0xc3e9224312c8c1a0, 0xeffa11af0964ee50, 0xf97d86d98a327728,\n 0xe4fa2054a80b329c, 0x727d102a548b194e, 0x39b008152acb8227, 0x9258048415eb419d,\n 0x492c024284fbaec0, 0xaa16012142f35760, 0x550b8e9e21f7a530, 0xa48b474f9ef5dc18,\n 0x70a6a56e2440598e, 0x3853dc371220a247, 0x1ca76e95091051ad, 0x0edd37c48a08a6d8,\n 0x07e095624504536c, 0x8d70c431ac02a736, 0xc83862965601dd1b, 0x641c314b2b8ee083,\n ]\n\n def mult_b_A(b):\n c = 0\n for i in range(64):\n if b % 2 == 1:\n c = c ^ A[63-i]\n b = b // 2\n return c\n\n def MSB256(val):\n return val // (2**256)\n\n def int512(msg):\n res = 0\n for i in range(len(msg)):\n res += (2**(8 * i)) * msg[-i-1]\n return res\n\n def S(m):\n res = 0\n for i in range(64):\n byte = m // (2 ** (8 * i)) % 256\n res += pi_sharp[byte] * (2 ** (8 * i))\n return res\n\n def P(m):\n res = 0\n for i in range(64):\n byte = m // (2 ** (8 * tau[i])) % 256\n res += byte * (2 ** (8 * i))\n return res\n\n def L(m):\n res = 0\n for i in range(8):\n block = m // (2 ** (64 * i)) % (2**64)\n res += mult_b_A(block) * (2 ** (64 * i))\n return res\n\n def X(K, m):\n return K ^ m\n\n def E(K, m):\n res = X(K, m)\n for i in range(2, 14):\n res = L(P(S(res)))\n K = L(P(S(K ^ C[i - 2])))\n res = X(K, res)\n return res\n\n def g(h, m, N):\n return E(L(P(S(h ^ N))), m) ^ h ^ m\n\n IV = 0\n for i in range(64):\n IV += 2 ** (i * 8)\n h = IV\n N = 0\n Sigma = 0\n\n while len(msg) * 8 >= 512:\n m = int512(msg[-512 // 8:])\n h = g(h, m, N)\n N = (N + 512) % (2**512)\n Sigma = (Sigma + m) % (2**512)\n msg = msg[:-512 // 8]\n\n m = 2**(len(msg)*8) + int512(msg)\n h = g(h, m, N)\n N = (N + len(msg) * 8) % (2**512)\n Sigma = (Sigma + m) % (2**512)\n h = g(h, N, 0)\n h = MSB256(g(h, Sigma, 0))\n\n return h.to_bytes(64, 'big')", "def topo_conf():\n for k in switches.keys():\n switches_ip[k] = IPAddr((192<<24)+int(k))\n switches_mac[k] = EthAddr(\"aa\"+ \"%010d\"%(k))", "def _generate_G_from_H(H, variable_weight=False):\n H = np.array(H)\n n_edge = H.shape[1]\n # the weight of the hyperedge\n W = np.ones(n_edge)\n # the degree of the node\n DV = np.sum(H * W, axis=1)\n # the degree of the hyperedge\n DE = np.sum(H, axis=0)\n\n invDE = np.mat(np.diag(np.power(DE, -1)))\n DV2 = np.mat(np.diag(np.power(DV, -0.5)))\n W = np.mat(np.diag(W))\n H = np.mat(H)\n HT = H.T\n\n if variable_weight:\n DV2_H = DV2 * H\n invDE_HT_DV2 = invDE * HT * DV2\n return DV2_H, W, invDE_HT_DV2\n else:\n G = DV2 * H * W * invDE * HT * DV2\n return G", "def demarcate_heap(hgt=self.level, cell_wid=minimum_cell):\n # Number of nodes on bottom is 2^hgt\n max_nodes = int(np.power(2, hgt))\n print (''.center(cell_wid * max_nodes, '*'))", "def __init__(self, is_p1_turn: bool, side_length: int) -> None:\n super().__init__(is_p1_turn)\n self.side_length = side_length\n # ISSUE: what if node is more than 26 --> no need to handle side more than 5\n # construct a list of uppercase and lower case letters\n alph_lst_upper = list(string.ascii_uppercase)\n alph_lst_lower = list(string.ascii_lowercase)\n # alph_lst has a length of 52\n alph_lst = alph_lst_upper + alph_lst_lower\n\n # assign original value for each ley-line\n hori_result = []\n for i in range(side_length + 1):\n hori_result.append(\"@\")\n left_result = []\n for i in range(side_length + 1):\n left_result.append(\"@\")\n right_result = []\n for i in range(side_length + 1):\n right_result.append(\"@\")\n self.hori_result = hori_result\n self.left_result = left_result\n self.right_result = right_result\n\n self.hori_lst = []\n self.left_lst = []\n self.right_lst = []\n\n # construct horizontal ley-lines\n n = 2\n start_index = 0\n end_index = 0\n while n <= side_length + 1:\n end_index = start_index + n\n self.hori_lst.append(alph_lst[start_index:end_index])\n start_index = end_index\n n += 1\n end_index = start_index + side_length\n self.hori_lst.append(alph_lst[start_index:end_index])\n\n # copy hori_lst\n hori_copy = []\n for item in self.hori_lst:\n hori_copy.append(item)\n\n # construct left ley-lines\n for i in range(side_length + 1):\n temp = []\n for lst in hori_copy[:len(hori_copy) - 1]:\n if len(lst) > i:\n temp.append(lst[i])\n self.left_lst.append(temp)\n for i in range(1, side_length + 1):\n self.left_lst[i].append(hori_copy[-1][i - 1])\n\n # construct right ley-lines\n for i in range(-1, side_length * (-1) - 2, -1):\n temp = []\n for lst in hori_copy[:len(hori_copy) - 1]:\n if len(lst) >= i * (-1):\n temp.append(lst[i])\n self.right_lst.append(temp)\n self.right_lst = self.right_lst[::-1]\n for i in range(side_length):\n self.right_lst[i].append(hori_copy[-1][i])", "def generateMessage(self, frequency):\n\t\t# make sure all entries for GCDC communication are here\n\n\t\t#messages sent at 25 Hz\n\t\tmessage_dict = dict()\n\t\tmessage_dict['header']='header'\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#id 1 header\n\t\t\t\t\t\t\t\t\t\t\t#might be part of the header?\n\t\tmessage_dict['message_id'] = hash(message_dict['time'])\n\t\tmessage_dict['vehicle_id'] = self.vehicle.id\t\t\t\t\t\t\t\t\t\t\t\t#id 3 Station ID\n\t\tmessage_dict['station_type']=5\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#id 4 unknown(0), pedestrian(1), cyclist(2),moped(3), motorcycle(4), passengerCar(5), bus(6), lightTruck(7),heavyTruck(8), trailer(9), specialVehicles(10), tram(11),\n\n\n\t\tmessage_dict['vehicle_length'] = self.vehicle.length\t\t\t\t\t\t\t\t\t\t#id 6\n\t\tmessage_dict['vehicle_rear_axle_location']=0\t\t\t\t\t\t\t\t\t\t\t\t\t#id 7\n\t\tmessage_dict['vehicle_width'] = self.vehicle.width\t\t\t\t\t\t\t\t\t\t\t#id 8\n\t\tmessage_dict['controller_type']=self.supervisory.type_controller\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#id 9\n\t\tmessage_dict['vehicle_response_time_constant']=0\t\t\t\t\t\t\t\t\t\t\t\t#id 10\n\t\tmessage_dict['vehicle_response_time_delay']=0\t\t\t\t\t\t\t\t\t\t\t\t#id 11\n\t\tmessage_dict['reference_position']=0\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#id 12\n\t\t#if hasattr(self.vehicle, 'bodies_dict'):\n\t\t#\tmessage_dict['x'] = self.vehicle.bodies_dict[self.vehicle.id].x\t\t\t\t\t\t\t\t\t#reference positions probably\n\t\t#\tmessage_dict['y'] = self.vehicle.bodies_dict[self.vehicle.id].y\n\t\tmessage_dict['heading']=0\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#id 13\n\t#\t\tmessage_dict['speed'] = self.vehicle.bodies_dict[self.vehicle.id].commands['throttle']\t\t#id 14 speed =velocity\n\t#\tmessage_dict['yaw_rate'] = self.vehicle.yaw\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#id 15\n\t\tmessage_dict['long_vehicle_acc']=0\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#id 16\n\t\tmessage_dict['desired_long_vehicle_acc']=0\t\t\t\t\t\t\t\t\t\t\t\t\t#id 17\n\t\tmessage_dict['MIO_id']=self.supervisory.MIO_id\t\t\t\t\t\t\t\t\t\t#id 18 Most Iportant Object in front needs to be defined in supervisorymodule\n\t\tmessage_dict['MIO_range']=self.supervisory.MIO_range\t\t\t\t\t\t\t\t#id 19\n\t\tmessage_dict['MIO_bearing']=self.supervisory.MIO_bearing\t\t\t\t\t\t\t#id 20\n\t\tmessage_dict['MIO_range_rate']=self.supervisory.MIO_range_rate\t\t\t\t\t\t#id 21 velocity of MIO\n\t\tmessage_dict['time_headway']=self.supervisory.time_headway\t\t\t\t\t\t\t#id 22 time to vehicle in front\n\t\tmessage_dict['cruise_speed']=self.supervisory.cruise_speed\t\t\t\t\t\t\t#id 23 nominal speed of platoon\n\t\tmessage_dict['travelled_distance_CZ']=self.supervisory.travelled_CZ\t\t\t\t#id 32\n\t\tmessage_dict['new_bwd_pair_partner'] = self.vehicle.new_bwd_pair_partner\n\t\tmessage_dict['new_fwd_pair_partner'] = self.vehicle.new_fwd_pair_partner\n\t\tmessage_dict['platooned_vehicle'] = self.vehicle.platooned_vehicle\n\n\t\tif frequency==1:\n\t\t\t#messages sent with 1Hz\n\t\t\tmessage_dict['vehicle_role']=0\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#id 5 0 deafault, 6 emergency\n\t\t\tmessage_dict['merge_request_flag']=self.supervisory.merge_flag_request\t\t\t#id 24\n\t\t\tmessage_dict['STOM']=self.supervisory.STOM\t\t\t\t\t\t\t\t\t\t#id 25\n\t\t\tmessage_dict['merging_flag']=self.supervisory.merging_flag\t\t\t\t\t\t#id 26\n\t\t\tmessage_dict['fwd_pair_partner'] = self.vehicle.fwd_pair_partner\t\t\t\t\t\t\t#id 27\n\t\t\tmessage_dict['bwd_pair_partner'] = self.vehicle.bwd_pair_partner\t\t\t\t\t\t\t#id 28\n\t\t\tmessage_dict['tail_vehicle_flag'] = self.vehicle.tail_vehicle\t\t\t\t\t#id 29\n\t\t\tmessage_dict['head_vehicle_flag'] = self.vehicle.platoon_leader\t\t\t\t\t#id 20\n\t\t\tmessage_dict['platoon_id'] = self.supervisory.platoon_id\t\t\t\t\t\t#id 31\n\t\t\tmessage_dict['intention'] =self.supervisory.intention\t\t\t\t\t\t\t#id 33\n\t\t\tmessage_dict['lane_entering_CZ'] = self.supervisory.lane_entering_CZ\t\t\t#id 34\n\t\t\tmessage_dict['intersection_vehicle_counter'] = self.supervisory.intersection_vehicle_counter #id 35\n\t\t\t#message_dict['pair_acknowledge_flag'] = self.supervisory.pair_acknowledge_flag\t#id 36 #currently not used in GCDC\n\t\t\tmessage_dict['participants_ready'] = True\t\t\t\t\t\t\t\t\t\t\t\t\t#id 41 Not really used\n\t\t\tmessage_dict['start_scenario'] = True\t\t\t\t\t\t\t\t\t\t\t\t\t\t# id 42\n\t\t\tmessage_dict['EoS'] = False\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t#id 43\n\n\t\treturn message_dict", "def TR_algo3(h, vd=2):\n ve = 0\n vd = 2\n p = [0]*N\n for i in range(M-1, -1, -1):\n w = [bit_component(h, i*N+ii) for ii in range(N)]\n #print(i, w)\n w = sum( [wx*2**j for j, wx in enumerate(w)] )\n #print(i, w, gc(w))\n l = gc(w)\n l = T_inv(ve, vd, l)\n for j in range(N):\n p[j] += bit_component(l, j) << i\n ve = ve ^ rotate_left(e(w), vd+1)\n vd = (vd + d(w) + 1) % N\n return p", "def __repr__(self):\n output = \"\"\n output +=\"V:\\n\"\n for row in self.V:\n output += \"\\t\"\n for el in row:\n output += str(el) + \" \" \n output += \"\\n\" \n \n output += \"\\nW:\\n\"\n for row in self.W:\n output += \"\\t\"\n for el in row:\n output += str(el) + \" \" \n output += \"\\n\"\n return output", "def cminfo_compute():\n from hera_mc import cm_sysutils \n h = cm_sysutils.Handling()\n cminfo = h.get_cminfo_correlator()\n snap_to_ant = {}\n ant_to_snap = {}\n for antn, ant in enumerate(cminfo['antenna_numbers']):\n name = cminfo['antenna_names'][antn]\n for pol in cminfo['correlator_inputs'][antn]:\n if pol.startswith('e'):\n e_pol = pol\n if pol.startswith('n'):\n n_pol = pol\n ant_to_snap[ant] = {}\n if e_pol != 'None':\n snapi_e, channel_e = snap_part_to_host_input(cminfo['correlator_inputs'][antn][0])\n ant_to_snap[ant]['e'] = {'host': snapi_e, 'channel': channel_e}\n if snapi_e not in snap_to_ant.keys():\n snap_to_ant[snapi_e] = [None] * 6\n snap_to_ant[snapi_e][channel_e] = name + 'E'\n if n_pol != 'None':\n snapi_n, channel_n = snap_part_to_host_input(cminfo['correlator_inputs'][antn][1])\n ant_to_snap[ant]['n'] = {'host': snapi_n, 'channel': channel_n}\n if snapi_n not in snap_to_ant.keys():\n snap_to_ant[snapi_n] = [None] * 6\n snap_to_ant[snapi_n][channel_n] = name + 'N'\n return snap_to_ant, ant_to_snap", "def __init__(self, h, inp, sent, nll):\n self.h, self.inp, self.sent, self.nll = h, inp, sent, nll", "def _shapeOSCMsg(self, header, ID, mat44):\r\n mat44_str = ''\r\n for elmt in mat44.col: mat44_str = mat44_str + str(elmt.to_tuple(2)[:]) # to tuple allows to round the Vector\r\n mat44_str = mat44_str.replace('(','').replace(')',' ').replace(',','')\r\n osc_msg = header + ' ' + ID + ' ' + mat44_str\r\n return osc_msg", "def _fp32_vnchwconv_process(axis_0_index, h_loop_idx, h_size):\n\n def _fp32_inner_vnchwconv(col_lp_idx, col_size):\n \"\"\"\n inner vnchwconv\n \"\"\"\n\n # move data in\n in_offset = (block_idx * per_core_col_size + col_lp_idx * max_sub_w_size +\n h_loop_idx * max_sub_h_size * axis_2 +\n axis_0_index * axis_1 * axis_2)\n data_in_info = (h_size, col_size, axis_1, axis_2, in_offset)\n _data_move_in_mc_on_w(tik_inst, ub_input, data_in, data_in_info)\n\n # for this case, data_move will move in one more block\n with tik_inst.new_stmt_scope():\n h_size_temp = tik_inst.Scalar(\"int64\")\n with tik_inst.if_scope(tik.all(axis_1 > data_size_one_block,\n h_size % data_size_one_block > 0)):\n h_size_temp.set_as(_ceil_div(h_size, data_size_one_block) *\n data_size_one_block)\n with tik_inst.else_scope():\n h_size_temp.set_as(h_size)\n # transpose by vnchwconv\n sub_hw_size = (h_size_temp, col_size)\n _transpose_by_2_vnchwconv(tik_inst, ub_input[ub_offset],\n ub_input, sub_hw_size)\n\n # move data out\n out_offset = ((block_idx * per_core_col_size + col_lp_idx * max_sub_w_size) *\n axis_1 + h_loop_idx * max_sub_h_size +\n axis_0_index * axis_1 * axis_2)\n data_out_info = (h_size, col_size, axis_1, axis_2, out_offset)\n _data_move_out_mc_on_w(tik_inst, data_out, ub_input[ub_offset], data_out_info)\n\n with tik_inst.for_range(0, loop_cnt) as lp_idx:\n _fp32_inner_vnchwconv(lp_idx, max_sub_w_size)\n with tik_inst.if_scope(left_size > 0):\n _fp32_inner_vnchwconv(loop_cnt, left_size)", "def create_cont_constraint_mat_separable(H,v1s,v2s,nSides,nConstraints,nC,\n dim_domain,dim_range,tess):\n if dim_domain != 2:\n raise ValueError\n if dim_range not in [1,2]:\n raise ValueError\n nHomoCoo=dim_domain+1 \n length_Avee = dim_range*nHomoCoo\n L1 = np.zeros((nConstraints/2,nC*nHomoCoo))\n\n \n\n nPtsInSide = 2 # Since, in 2D, the side is always a line joining 2 pts.\n# if nSides != nConstraints/(nPtsInSide*dim_domain):\n# raise ValueError(nSides,nConstraints)\n \n if nSides != nConstraints/(nPtsInSide*dim_range):\n print \" print nSides , nConstraints/(nPtsInSide*dim_range):\"\n print nSides , nConstraints/(nPtsInSide*dim_range)\n ipshell('stop')\n raise ValueError( nSides , (nConstraints,nPtsInSide,dim_range))\n\n \n if nSides != H.shape[0]:\n raise ValueError(nSides,H.shape)\n\n\n# M = nPtsInSide*dim_range\n M = nPtsInSide\n if dim_range == 1:\n raise NotImplementedError\n for i in range(nSides): \n v1 = v1s[i]\n v2 = v2s[i]\n \n h = H[i]\n a,b = h.nonzero()[0] # idx for the relevant As \n # s stands for start\n # e stands for end \n s1 = a*length_Avee \n e1 = s1+nHomoCoo \n s2 = b*length_Avee\n e2 = s2+nHomoCoo \n \n # Constraint 1: \n L[i*M,s1:e1]= v1 \n L[i*M,s2:e2]= -v1 \n # Constraint 2: \n L[i*M+1,s1:e1]= v2 \n L[i*M+1,s2:e2]= -v2 \n \n \n elif dim_range==2:\n for i in range(nSides): \n v1 = v1s[i]\n v2 = v2s[i]\n\n if np.allclose(v1,v2):\n raise ValueError(v1,v2)\n\n\n \n \n \n h = H[i]\n a,b = h.nonzero()[0] # idx for the relevant As \n \n\n # L1 is acting on columns of the following form:\n # [ a_1 b_1 c_1 d_1 a_2 b_2 c_2 d_2 ... a_Nc b_Nc c_Nc d_Nc] \n # s stands for start\n # e stands for end \n s1 = a*nHomoCoo\n e1 = s1+nHomoCoo \n s2 = b*nHomoCoo\n e2 = s2+nHomoCoo \n \n \n try: \n # Constraint 1: \n row = np.zeros(L1.shape[1])\n row[s1:e1]=v1\n row[s2:e2]=-v1 \n # x component \n L1[i*M]=row \n except:\n ipshell('fail')\n raise \n\n # Constraint 2: \n row = np.zeros(L1.shape[1])\n row[s1:e1]=v2\n row[s2:e2]=-v2 \n # x component \n L1[i*M+1]=row\n \n\n \n \n \n \n \n else:\n raise ValueError(dim_range)\n\n \n return L1", "def _create_message(self, msg):\n head = msg[\"head\"]\n body = msg[\"body\"]\n body = body.format(**self.data)\n length = len(body)\n head = head.format(length=length, **self.data)\n return head + body", "def out(self, printto, what, who=None, how='msg', fromm=None, speed=5):\n\n # convert the data to the encoding\n try:\n what = toenc(what.rstrip())\n except Exception, ex:\n rlog(10, self.name, \"can't output: %s\" % str(ex))\n return\n if not what:\n return\n\n # split up in parts of 375 chars overflowing on word boundaries\n txtlist = splittxt(what)\n size = 0\n\n # send first block\n self.output(printto, txtlist[0], how, who, fromm)\n\n # see if we need to store output in less cache\n result = \"\"\n if len(txtlist) > 2:\n if not fromm:\n self.less.add(printto, txtlist[1:])\n else:\n self.less.add(fromm, txtlist[1:])\n size = len(txtlist) - 2\n result = txtlist[1:2][0]\n if size:\n result += \" \u0002(+%s)\u0002\" % size\n else:\n if len(txtlist) == 2:\n result = txtlist[1]\n\n # send second block\n if result:\n self.output(printto, result, how, who, fromm)", "def generateMessage(fen, certainty, side, visualize_link):\n vals = {} # Holds template responses\n\n # Things that don't rely on black/white to play \n # FEN image link is aligned with screenshot, not side to play\n if fen == '8/8/8/8/8/8/8/8':\n # Empty chessboard link, fen-to-image doesn't correctly identify those\n vals['unaligned_fen_img_link'] = 'http://i.stack.imgur.com/YxP53.gif'\n else:\n vals['unaligned_fen_img_link'] = 'http://www.fen-to-image.com/image/60/%s.png' % fen\n vals['certainty'] = certainty*100.0 # to percentage\n vals['pithy_message'] = getPithyMessage(certainty)\n \n if side == 'b':\n # Flip FEN if black to play, assumes image is flipped\n fen = invert(fen)\n \n inverted_fen = invert(fen)\n\n # Get castling status based on pieces being in initial positions or not\n castle_status = getCastlingStatus(fen)\n inverted_castle_status = getCastlingStatus(inverted_fen)\n\n # Fill out template and return\n vals['fen_w'] = \"%s w %s -\" % (fen, castle_status)\n vals['fen_b'] = \"%s b %s -\" % (fen, castle_status)\n vals['inverted_fen_w'] = \"%s w %s -\" % (inverted_fen, inverted_castle_status)\n vals['inverted_fen_b'] = \"%s b %s -\" % (inverted_fen, inverted_castle_status)\n\n vals['lichess_analysis_w'] = 'https://www.lichess.org/analysis/%s_w_%s' % (fen, castle_status)\n vals['lichess_analysis_b'] = 'https://www.lichess.org/analysis/%s_b_%s' % (fen, castle_status)\n vals['lichess_editor_w'] = 'https://www.lichess.org/editor/%s_w_%s' % (fen, castle_status)\n vals['lichess_editor_b'] = 'https://www.lichess.org/editor/%s_b_%s' % (fen, castle_status)\n\n vals['inverted_lichess_analysis_w'] = 'https://www.lichess.org/analysis/%s_w_%s' % (inverted_fen, inverted_castle_status)\n vals['inverted_lichess_analysis_b'] = 'https://www.lichess.org/analysis/%s_b_%s' % (inverted_fen, inverted_castle_status)\n vals['inverted_lichess_editor_w'] = 'https://www.lichess.org/editor/%s_w_%s' % (inverted_fen, inverted_castle_status)\n vals['inverted_lichess_editor_b'] = 'https://www.lichess.org/editor/%s_b_%s' % (inverted_fen, inverted_castle_status)\n\n vals['visualize_link'] = visualize_link\n \n return MESSAGE_TEMPLATE.format(**vals)", "def on_message(self, client, userdata, msg):\n st = datetime.datetime.fromtimestamp(msg.timestamp).strftime('%Y-%m-%d %H:%M:%S.%f')\n# print st[:-3], \":\", msg.topic, \":\", msg.payload\n\n # Note: Update_display from this function does not work\n if msg.topic == self.mqtt_topic_electricity:\n self.my_gui.update_electricity(float(msg.payload)) # kWh\n\n elif self.mqtt_topic_electricity in msg.topic: # covers /1 /2 ... etc.\n index = int(msg.topic.split('/')[-1])\n self.my_gui.update_electricity_hour(index, float(msg.payload))\n\n # -----------------------------------------------------------------\n elif msg.topic == self.mqtt_topic_water:\n self.my_gui.update_water(int(msg.payload)) # Litter\n\n elif self.mqtt_topic_water in msg.topic: \n index = int(msg.topic.split('/')[-1])\n self.my_gui.update_water_hour(index, int(msg.payload))\n\n # -----------------------------------------------------------------\n elif msg.topic == self.mqtt_topic_gas:\n self.my_gui.update_gas(float(msg.payload)) # m3, 10 Litters/msg\n\n elif self.mqtt_topic_gas in msg.topic:\n index = int(msg.topic.split('/')[-1])\n self.my_gui.update_gas_hour(index, float(msg.payload))\n\n# elif self.mqtt_topic_status == msg.topic:\n# # TODO\n# if \"online\" in msg.payload:\n# print \"A is online\"\n# elif \"offline\" in msg.payload:\n# print \"A is offline\"\n# print st[:-3], \":\", msg.topic, \":\", msg.payload\n\n self.my_gui.update_eur_total()", "def origami_H2_2cyl(w1,h1,t1,w2,h2,t2):\n assert((w2 < w1) and (t1 < w1) and (t2 < w2))\n\n # v for volumes and z for z\n v1 = h1*w1\n v2 = h2*w2\n z1 = (h1-1)*w1 + 1\n z2 = v1 + (h2-1)*w2 + 1\n\n # the horizontal permutation\n x = [None] + range(2,v1+v2+1) + [1]\n for i in range(h1):\n x[(i+1)*w1] = i*w1 + 1\n for i in range(h2):\n x[v1 + (i+1)*w2] = v1 + i*w2 + 1\n\n # the vertical permutation\n y = ([None] +\n range(w1+1,v1+1) + [None]*w1 +\n range(v1+w2+1,v1+v2+1) + [None]*w2)\n\n for i in range(w2):\n # up-left of the first cylinder\n # print \"U1L) z1 + (t1+i)%w1 -> 1+v1+i: \", z1+(t1+i)%w1, 1+v1+i\n y[z1+(t1+i)%w1] = 1+v1+i\n for i in range(w2):\n # up of the second cylinder\n # print \"U2) z2+(t2+i)%w2 -> 1 + (t1+i)%w1: \", z2+(t2+i)%w2, 1+(t1+i)%w1\n y[z2+(t2+i)%w2] = 1+i\n for i in range(w1-w2):\n # up-right of the first cylinder\n # print \"U1R) z1+w2+(t1+i) -> 1+i: \", z1+(w2+t1+i)%w1, 1+w2+i\n y[z1+(w2+t1+i)%w1] = 1+w2+i\n\n return Origami(x[1:],y[1:])", "def genH(self,fp):\n id = 0\n for nm in GetOsekObjects('NM'):\n if(self == nm):\n break\n else:\n id += 1\n fp.write('\\n#define %s %s\\n'%(self.name,id))\n fp.write('#define %s_TYPE NM_%s\\n'%(self.name,self.getValue('TYPE')))\n fp.write('#define %s_tTyp %s\\n'%(self.name,self.getValue('TTYP')))\n fp.write('#define %s_tMax %s\\n'%(self.name,self.getValue('TMAX')))\n fp.write('#define %s_tError %s\\n'%(self.name,self.getValue('TERROR')))\n fp.write('#define %s_tTx %s\\n'%(self.name,self.getValue('TTX')))\n fp.write('#define %s_IDBASE %s\\n'%(self.name,self.getValue('IDBASE')))\n fp.write('#define %s_WINDOWMASK %s\\n'%(self.name,self.getValue('WINDOWMASK')))\n fp.write('#define %s_CONTROLLER %s\\n'%(self.name,self.getValue('CONTROLLER')))", "def hopping(h,name=\"HOPPING.OUT\",reps=0):\n if h.has_eh: raise\n if h.has_spin: (ii,jj,ts) = extract.hopping_spinful(h.intra)\n else: (ii,jj,ts) = extract.hopping_spinless(h.intra)\n f = open(name,\"w\") # write file\n for (i,j,t) in zip(ii,jj,ts):\n f.write(str(h.geometry.r[i][0])+\" \")\n f.write(str(h.geometry.r[i][1])+\" \")\n f.write(str(h.geometry.r[j][0])+\" \")\n f.write(str(h.geometry.r[j][1])+\" \")\n f.write(str(t)+\"\\n\")\n f.close()", "def gen_measurement_msg(agent_id,msg):\n\n if type(msg) is list:\n\n meas_msg = []\n\n for m in msg:\n\n new_msg = AgentMeasurement()\n\n # new_msg.type = msg._type.split('/')[1]\n new_msg.header.stamp = rospy.Time.now()\n new_msg.src = agent_id\n\n if m._type == 'offset_etddf/linrelMeasurement':\n new_msg.type = 'rel'\n new_msg.data = [m.x, m.y]\n new_msg.target = int(m.robot_measured.split(\"_\")[1])\n elif m._type == 'offset_etddf/gpsMeasurement':\n new_msg.type = 'abs'\n new_msg.data = [m.x, m.y]\n\n new_msg.status = [1 for x in new_msg.data]\n\n meas_msg.append(new_msg)\n\n return meas_msg\n\n else:\n\n meas_msg = AgentMeasurement()\n\n # meas_msg.type = msg._type.split('/')[1]\n meas_msg.header.stamp = rospy.Time.now()\n meas_msg.src = agent_id\n\n if msg._type == 'offset_etddf/linrelMeasurement':\n meas_msg.type = 'rel'\n meas_msg.data = [msg.x, msg.y]\n new_msg.target = int(m.robot_measured.split(\"_\")[1])\n elif msg._type == 'offset_etddf/gpsMeasurement':\n meas_msg.type = 'abs'\n meas_msg.data = [msg.x, msg.y]\n\n meas_msg.status = [1 for x in meas_msg.data]\n\n return meas_msg", "def I_v_shift_scatter(comp_key,conn):\n \n # get file name/comp_num\n (comp_num,fname) = conn.execute(\"select comp_key,fout from comps\\\n where comp_key = ? and function = 'Iden'\",(comp_key,)).fetchone()\n (hwhm,p_rad) = conn.execute(\"select hwhm,p_rad from Iden_prams \\\n where comp_key = ?\",(comp_num,)).fetchone()\n frame = 15\n fr = 'frame%(#)06d'%{'#':frame}\n # open file/group\n\n F = h5py.File(fname,'r')\n\n\n shift_data = np.sqrt(F[fr][\"x_shift_%(#)07d\"%{'#':comp_num}][:]**2 +\n F[fr][\"y_shift_%(#)07d\"%{'#':comp_num}][:]**2)\n \n I_data = F[fr][\"eccentricity_%(#)07d\"%{'#':comp_num}][:]\n F.close()\n\n fig = lplts.Figure('E','shift mag','I v shift_mag',func = matplotlib.axes.Axes.scatter)\n fig.plot(I_data,shift_data,lab = '(' + str(hwhm) + ',' + str(p_rad)+ ')')\n\n ## stolen from numpy manual\n \n H, (edges) = np.histogramdd( (I_data/np.max(I_data),shift_data), bins=(100, 200))\n print H.shape\n\n # We can now use the Matplotlib to visualize this 2-dimensional histogram:\n\n \n import matplotlib.pyplot as plt\n plt.figure()\n plt.imshow(np.flipud(H),extent=[0,2,0,1])\n # <matplotlib.image.AxesImage object at ...>\n plt.show()\n plt.draw()\n ##", "def print_model_generation(model):\n print('g1 = {} MW'.format(model.g[1].value))\n print('g2 = {} MW'.format(model.g[2].value))", "def final_result(self, hyp, uttid):\n msg = String()\n msg.data = str(hyp.lower())\n rospy.loginfo(msg.data)\n self.pub.publish(msg)", "def final_result(self, hyp, uttid):\n msg = String()\n msg.data = str(hyp.lower())\n rospy.loginfo(msg.data)\n self.pub.publish(msg)", "def generate_huawei_2g_site_level_discrepancies(self):\n engine = create_engine('postgresql://bodastage:password@database/bts')\n vendor_pk = 2\n tech_pk = 1\n schema_name = 'hua_cm_2g'\n\n conn = psycopg2.connect(\"dbname=bts user=bodastage password=password host=database\")\n conn.autocommit = True\n cur = conn.cursor()\n\n # Get MO\n sql = \"\"\"\n SELECT DISTINCT\n t3.name as mo,\n t3.pk as pk,\n t3.affect_level\n FROM \n live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk \n AND t3.vendor_pk = {} AND t3.tech_pk = {}\n AND t3.affect_level = 4\n \"\"\".format(vendor_pk, tech_pk)\n cur.execute(sql)\n mo_list = cur.fetchall()\n\n for mo in mo_list:\n mo_name, mo_pk, mo_affect_level = mo\n\n # Get parameters\n sql = \"\"\"\n SELECT \n t2.name as pname,\n t2.pk as pk\n FROM \n live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk \n INNER JOIN network_entities t4 on t4.pk = t3.affect_level\n AND t3.vendor_pk = {} AND t3.tech_pk = {}\n AND t3.affect_level = 4\n WHERE\n t3.name = '{}'\n \"\"\".format(vendor_pk, tech_pk, mo_name)\n cur.execute(sql)\n\n parameters = cur.fetchall()\n\n attr_list = [p[0] for p in parameters]\n\n str_param_values = \",\".join([\"t_mo.{0}{1}{0}\".format('\"', p) for p in attr_list])\n str_param_names = \",\".join([\"{0}{1}{0}\".format('\\'', p) for p in attr_list])\n\n # Join all cell level mos with the primary cell mo i.e. GCELL.\n # p_mo for primary MO\n cell_level_join = \"\"\" INNER JOIN {0}.BTS p_mo ON p_mo.\"BTSID\" = t_mo.\"BTSID\" AND p_mo.neid = t_mo.neid \n AND p_mo.module_type = t_mo.module_type \"\"\".format(schema_name)\n\n # Add new entries\n sql = \"\"\"\n INSERT INTO network_audit.baseline_site_parameters \n (node, site, mo, parameter, bvalue, nvalue, vendor, technology, age, modified_by, added_by, date_added, date_modified)\n SELECT TT1.* FROM (\n SELECT\n t8.name as node,\n t7.name as site,\n t3.name as mo,\n t2.name as parameter,\n t1.value as bvalue,\n TRIM(t4.pvalue) as nvalue,\n t9.name as vendor,\n t10.name as technology,\n 1 as age,\n 0 as modified_by,\n 0 as added_by,\n date_time as date_added,\n date_time as date_modified\n from live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk\n INNER JOIN live_network.baseline_parameter_config t5 on t5.mo_pk = t3.pk AND t5.parameter_pk = t2.pk\n INNER JOIN (\n SELECT * FROM (\n SELECT\n '{2}' as \"MO\",\n p_mo.\"BTSNAME\" as sitename,\n p_mo.\"varDateTime\" as date_time,\n unnest(array[{0}]) AS pname,\n unnest(array[{1}]) AS pvalue\n FROM\n hua_cm_2g.{2} t_mo\n {3}\n ) TT\n ) t4 on t4.pname = t2.name AND trim(t4.pvalue) != t1.value\n INNER JOIN live_network.sites t7 on t7.name = t4.sitename \n INNER JOIN live_network.nodes t8 on t8.pk = t7.node_pk\n INNER JOIN vendors t9 on t9.pk = t7.vendor_pk\n INNER JOIN technologies t10 ON t10.pk = t7.tech_pk\n ) TT1\n LEFT JOIN network_audit.baseline_site_parameters TT2 on TT2.node = TT1.node\n AND TT2.site = TT1.site \n AND TT2.mo = TT1.mo\n AND TT2.parameter = TT1.parameter\n AND TT2.bvalue = TT1.bvalue\n AND TT2.nvalue = TT1.nvalue\n WHERE\n TT2.site is NULL\n \"\"\".format(str_param_names, str_param_values, mo_name, cell_level_join)\n print(sql)\n cur.execute(sql)\n\n # Delete old entries\n sql = \"\"\"\n WITH rd AS (\n SELECT TT2.* FROM \n network_audit.baseline_site_parameters TT2\n LEFT JOIN \n (\n select\n t8.name as node,\n t7.name as site,\n t3.name as mo,\n t2.name as parameter,\n t1.value as bvalue,\n TRIM(t4.pvalue) as nvalue,\n t9.name as vendor,\n t10.name as technology,\n 0 as modified_by,\n 0 as added_by,\n date_time as date_added,\n date_time as date_modified\n from live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk\n INNER JOIN live_network.baseline_parameter_config t5 on t5.mo_pk = t3.pk AND t5.parameter_pk = t2.pk\n INNER JOIN (\n SELECT * FROM (\n SELECT\n '{2}' as \"MO\",\n p_mo.\"BTSNAME\" as sitename,\n p_mo.\"varDateTime\" as date_time,\n unnest(array[{0}]) AS pname,\n unnest(array[{1}]) AS pvalue\n FROM\n hua_cm_2g.{2} t_mo\n {3}\n ) TT\n ) t4 on t4.pname = t2.name AND trim(t4.pvalue) != t1.value\n INNER JOIN live_network.sites t7 on t7.name = t4.sitename \n INNER JOIN live_network.nodes t8 on t8.pk = t7.node_pk\n INNER JOIN vendors t9 on t9.pk = t7.vendor_pk\n INNER JOIN technologies t10 ON t10.pk = t7.tech_pk\n ) TT1 ON TT2.node = TT1.node\n AND TT2.site = TT1.site \n AND TT2.mo = TT1.mo\n AND TT2.parameter = TT1.parameter\n AND TT2.bvalue = TT1.bvalue\n AND TT2.nvalue = TT1.nvalue\n WHERE\n TT1.site IS NULL\n )\n DELETE FROM network_audit.baseline_site_parameters t1\n WHERE t1.pk IN (SELECT pk from rd)\n \"\"\".format(str_param_names, str_param_values, mo_name, cell_level_join)\n print(sql)\n cur.execute(sql)\n\n # Update old entries\n sql = \"\"\"\n WITH rd AS (\n SELECT TT2.pk, TT1.* FROM \n network_audit.baseline_site_parameters TT2\n INNER JOIN \n (\n select\n t8.name as node,\n t7.name as site,\n t3.name as mo,\n t2.name as parameter,\n t1.value as bvalue,\n trim(t4.pvalue) as nvalue,\n t9.name as vendor,\n t10.name as technology,\n 0 as modified_by,\n 0 as added_by,\n date_time as date_added,\n date_time as date_modified\n from live_network.base_line_values t1\n INNER JOIN vendor_parameters t2 on t2.pk = t1.parameter_pk\n INNER JOIN managedobjects t3 on t3.pk = t2.parent_pk\n INNER JOIN live_network.baseline_parameter_config t5 on t5.mo_pk = t3.pk AND t5.parameter_pk = t2.pk\n INNER JOIN (\n SELECT * FROM (\n SELECT\n '{2}' as \"MO\",\n p_mo.\"BTSNAME\" as sitename,\n p_mo.\"varDateTime\" as date_time,\n unnest(array[{0}]) AS pname,\n unnest(array[{1}]) AS pvalue\n FROM\n hua_cm_2g.{2} t_mo\n {3}\n ) TT\n ) t4 on t4.pname = t2.name AND trim(t4.pvalue) != t1.value\n INNER JOIN live_network.sites t7 on t7.name = t4.sitename \n INNER JOIN live_network.nodes t8 on t8.pk = t7.node_pk\n INNER JOIN vendors t9 on t9.pk = t7.vendor_pk\n INNER JOIN technologies t10 ON t10.pk = t7.tech_pk\n ) TT1 ON TT2.node = TT1.node\n AND TT2.site = TT1.site \n AND TT2.mo = TT1.mo\n AND TT2.parameter = TT1.parameter\n AND TT2.bvalue = TT1.bvalue\n AND TT2.nvalue = TT1.nvalue\n )\n UPDATE network_audit.baseline_site_parameters AS nb\n SET \n date_modified = rd.date_added, \n age=DATE_PART('day',AGE(nb.date_added, rd.date_added))\n FROM \n rd \n where \n rd.pk = nb.pk\n \"\"\".format(str_param_names, str_param_values, mo_name, cell_level_join)\n print(sql)\n cur.execute(sql)", "def TwoModeThermalHD(Ns,t,nth,shots):\n \n s1 = (1+1j)*np.zeros(shots)\n s2 = (1+1j)*np.zeros(shots)\n \n\n \n for i in range(shots):\n prog= sf.Program(2)\n \n with prog.context as q:\n \n sf.ops.Thermal(Ns) | q[0] # State preparation\n sf.ops.BSgate() | (q[0],q[1])\n \n sf.ops.ThermalLossChannel(t,nth) | q[0] # Thermal loss channel mimicing target\n \n sf.ops.MeasureHD | q[0] # Het. Msmnt of signal 1\n sf.ops.MeasureHD | q[1] # Het. Msmnt of signal 2\n\n # Need to run twice because of bug in the bosonic backend in dealing with repeated HD measurements\n \n eng = sf.Engine(\"bosonic\")\n results = eng.run(prog)\n eng = sf.Engine(\"bosonic\")\n results = eng.run(prog)\n \n \n \n #Collecting the samples\n samples = results.all_samples\n \n #Creating the measurement records\n s1[i] = samples[0][0]\n s2[i] = samples[1][0]\n \n # Interation over number of shots is done, outputing the records\n \n return s1,s2", "def gt_command(self):\n self.write(\n \"@SP\\nA=M-1\\nD=M\\n@NEG1\" + str(\n self.__label_num) + \"\\nD;JLT\\n@POS1\" + str(\n self.__label_num) +\n \"\\nD;JGE\\n(NEG1\" + str(\n self.__label_num) + \")\\n@SP\\nA=M-1\\nA=A-1\\nD=M\\n@POS2\" + str(\n self.__label_num) + \"\\nD;JGT\\n@CONT\"\n + str(self.__label_num) + \"\\n0;JMP\\n(POS1\" + str(\n self.__label_num) + \")\\n@SP\\nA=M-1\\nA=A-1\\nD=M\\n@NEG2\" +\n str(self.__label_num) + \"\\nD;JLT\\n@CONT\" + str(\n self.__label_num) + \"\\n0;JMP\\n(POS2\" + str(\n self.__label_num) + \")\\n@SP\"\n \"\\nA=M-1\\nA=A-1\\nM=-1\\n@SP\\nM=M-1\\n@ENDLABEL\" + str(\n self.__label_num) + \"\\n0;JMP\\n(NEG2\" + str(\n self.__label_num) + \")\\n@SP\" +\n \"\\nA=M-1\\nA=A-1\\nM=0\\n@SP\\nM=M-1\\n@ENDLABEL\" + str(\n self.__label_num) + \"\\n0;JMP\\n(CONT\" + str(\n self.__label_num) + \")\\n\"\n \"@SP\\nM=M-1\\nA=M\\nD=M\\n@SP\\nA=M-1\\nD=M-D\\n@TRUE\" + str(\n self.__label_num) + \"\\nD;JGT\\n@SP\\nA=M-1\\nM=0\\n@ENDLABEL\" +\n str(self.__label_num) + \"\\n0;JMP\\n(TRUE\" + str(\n self.__label_num) + \")\\n@SP\\nA=M-1\\nM=-1\\n(ENDLABEL\" +\n str(self.__label_num) + \")\\n\")", "def hmFormMsg(destination, protocol, source, function, start, payload) :\r\n if protocol == constants.HMV3_ID:\r\n start_low = (start & constants.BYTEMASK)\r\n start_high = (start >> 8) & constants.BYTEMASK\r\n if function == constants.FUNC_READ:\r\n payloadLength = 0\r\n length_low = (constants.RW_LENGTH_ALL & constants.BYTEMASK)\r\n length_high = (constants.RW_LENGTH_ALL >> 8) & constants.BYTEMASK\r\n else:\r\n payloadLength = len(payload)\r\n length_low = (payloadLength & constants.BYTEMASK)\r\n length_high = (payloadLength >> 8) & constants.BYTEMASK\r\n msg = [destination, 10+payloadLength, source, function, start_low, start_high, length_low, length_high]\r\n if function == constants.FUNC_WRITE:\r\n msg = msg + payload\r\n type(msg)\r\n return msg\r\n else:\r\n assert 0, \"Un-supported protocol found %s\" % protocol", "def produce_13TeV_template(tag_name=\"HKHI\"):\n num_rebin = 1\n file_name = \"inputs/BkgEstimation_Lin/BkgEstimation_NONE_TOPO_PTDEP_\"+tag_name+\"_Lin.root\"\n print \"Input: \", file_name\n fin = ROOT.TFile.Open(file_name, \"read\")\n h_nom = fin.Get(\"bkg_total_gg_full\").Clone(\"bkg_nominal_old\")\n h_nom.Rebin(num_rebin)\n fout = ROOT.TFile.Open(\"hists_input_\"+tag_name+\".root\", \"recreate\")\n\n h_purity_sys = fin.Get(\"bkg_purity_syst_gg_full\").Clone(\"bkg_purity_syst_gg\")\n h_reducible_sys = fin.Get(\"bkg_reducible_syst_gg_full\").Clone(\"bkg_reducible_syst_gg\")\n h_irreducible_sys = fin.Get(\"bkg_irreducible_syst_gg_full\").Clone(\"bkg_irreducible_syst_gg\")\n h_iso_sys = fin.Get(\"bkg_iso_syst_gg_full\").Clone(\"bkg_iso_syst_gg\")\n\n #file_iso = \"isolation_sys/hist.root\"\n #fin2 = ROOT.TFile.Open(file_iso, \"read\")\n #h_iso_sys = fin2.Get(\"bkg_isolation_syst_gg\")\n ## inflat irreducible uncertainty by factor of 10\n # so that it closes to stats uncertainty in data\n sf = 1\n if INFLATE_SYS:\n sf = 10\n\n # after rebinning systematic uncertainties, need to scale down,\n # otherwise the uncertainties are inflated.\n h_purity_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_irreducible_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_reducible_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n h_iso_sys.Rebin(num_rebin).Scale(sf/num_rebin)\n\n ## truncate the histograms to [200, 2000] GeV\n h_nom_new = truncate_hist(h_nom, \"bkg_nominal\")\n h_purity_sys_new = truncate_hist(h_purity_sys, \"h_purity_sys_new\")\n h_irreducible_sys_new = truncate_hist(h_irreducible_sys, \"h_irreducible_sys_new\")\n h_reducible_sys_new = truncate_hist(h_reducible_sys, \"h_reducible_sys_new\")\n h_iso_sys_new = truncate_hist(h_iso_sys, \"h_iso_sys_new\")\n\n #write down sys and nominal\n fout.cd()\n h_nom_new.Write()\n h_purity_sys_new.Write()\n h_reducible_sys_new.Write()\n h_irreducible_sys_new.Write()\n h_iso_sys_new.Write()\n\n h_purity_up, h_purity_down = create_sys_hist(h_nom_new, h_purity_sys_new, \"purity_sys\")\n h_purity_up.Write()\n h_purity_down.Write()\n\n h_red_up, h_red_down = create_sys_hist(h_nom_new, h_reducible_sys_new, \"reducible_sys\")\n h_red_up.Write()\n h_red_down.Write()\n\n h_irred_up, h_irred_down = create_sys_hist(h_nom_new, h_irreducible_sys_new, \"irreducible_sys\")\n h_irred_up.Write()\n h_irred_down.Write()\n\n h_iso_up, h_iso_down = create_sys_hist(h_nom_new, h_iso_sys, \"isolation_sys\")\n h_iso_up.Write()\n h_iso_down.Write()\n\n fin.Close()\n fout.Close()", "def __init__(self, *args):\n moose.HHChannel.__init__(self,*args)\n self.Ek = VKDR\n self.Gbar = GKDR\n self.addField('ion')\n self.setField('ion','K')\n self.Xpower = 1 # This will create HHGate instance xGate inside the Na channel\n #self.Ypower = 0 # This will create HHGate instance yGate inside the Na channel\n ## Below gates get created after Xpower or Ypower are set to nonzero values\n ## I don't anymore have to explicitly create these attributes in the class\n #self.xGate = moose.HHGate(self.path + \"/xGate\")\n #self.yGate = moose.HHGate(self.path + \"/yGate\")\n self.xGate.A.xmin = VMIN\n self.xGate.A.xmax = VMAX\n self.xGate.A.xdivs = NDIVS\n self.xGate.B.xmin = VMIN\n self.xGate.B.xmax = VMAX\n self.xGate.B.xdivs = NDIVS\n \n v = VMIN\n\n for i in range(NDIVS+1):\n mtau = calc_KA_mtau(v)\n self.xGate.A[i] = calc_KA_minf(v)/mtau\n self.xGate.B[i] = 1.0/mtau\n v = v + dv", "def __init__(self, n=1, cpu=.1, bw=10, delay=None,\n max_queue_size=None, **params):\n\n # Initialize topo\n Topo.__init__(self, **params)\n\n # Host and link configuration\n hconfig = {'cpu': cpu}\n lconfig = {'bw': bw, 'delay': delay,\n 'max_queue_size': max_queue_size }\n\n # Create the actual topology\n receiver = self.addHost('receiver')\n\n # Switch ports 1:uplink 2:hostlink 3:downlink\n uplink, hostlink, downlink = 1, 2, 3\n\n # The following template code creates a parking lot topology\n # TODO: Replace the template code to create a parking lot topology for any arbitrary N (>= 1)\n if n < 1: # network must have at least 1 host\n return -1\n\n s = [] # Python list of switches\n h = [] # Python list of hosts\n\n # dynamically add all hosts and switches to network backbone first\n for i in range(n):\n switch_name = 's%s' % (i+1)\n host_name = 'h%s' % (i+1)\n\n s.append( self.addSwitch(switch_name) ) # s[0] is switch1\n h.append( self.addHost(host_name) ) # h[0] is host1\n\n # Wire up clients\n self.addLink(h[i], s[i], port1=0, port2=hostlink, **lconfig)\n \n # link to previous switch\n if i > 0:\n self.addLink(s[i-1], s[i], port1=downlink, port2=uplink, **lconfig)\n\n \n # Wire up receiver to first switch\n self.addLink(receiver, s[0], port1=0, port2=uplink, **lconfig)\n\n '''\n # for N = 1\n # Begin: Template code\n s1 = self.addSwitch('s1')\n h1 = self.addHost('h1', **hconfig)\n\n # Wire up receiver\n self.addLink(receiver, s1, port1=0, port2=uplink, **lconfig)\n\n # Wire up clients\n self.addLink(h1, s1, port1=0, port2=hostlink, **lconfig)\n\n # Uncomment the next 8 lines to create a N = 3 parking lot topology\n s2 = self.addSwitch('s2')\n h2 = self.addHost('h2', **hconfig)\n self.addLink(s1, s2,\n port1=downlink, port2=uplink, **lconfig)\n self.addLink(h2, s2,\n port1=0, port2=hostlink, **lconfig)\n\n s3 = self.addSwitch('s3')\n h3 = self.addHost('h3', **hconfig)\n self.addLink(s2, s3,\n port1=downlink, port2=uplink, **lconfig)\n self.addLink(h3, s3,\n port1=0, port2=hostlink, **lconfig)\n \n # End: Template code\n '''", "def _h(W):\r\n # E = slin.expm(W * W)\r\n # h = np.trace(E) - d\r\n M = np.eye(d) + W * W / d\r\n E = np.linalg.matrix_power(M, d - 1)\r\n h = (E.T * M).sum() - d\r\n G_h = E.T * W * 2\r\n return h, G_h", "def xmoe_2d():\n hparams = xmoe_top_2()\n hparams.decoder_layers = [\"att\", \"hmoe\"] * 4\n hparams.mesh_shape = \"b0:2;b1:4\"\n hparams.outer_batch_size = 4\n hparams.layout = \"outer_batch:b0;inner_batch:b1,expert_x:b1,expert_y:b0\"\n hparams.moe_num_experts = [4, 4]\n return hparams", "def transform_ip(self, H): # or update()\n self.vh = H @ self.vertices.T\n self.vh = self.vh.T \n self.va = self.vh[:,:2]", "def SG(self, h, y):\n self.check_sg_weights()\n\n A = self.sg_weights[0] #(n, n)\n B = self.sg_weights[1] #(10, n)\n C = self.sg_weights[2] #(1, n)\n\n delta = np.matmul(h, A) + np.matmul(y, B) + C\n return delta", "def _comp_het_pair_pattern(self,\n gt_types1, gt_nums1,\n gt_types2, gt_nums2,\n gt_phases1, gt_phases2):\n\n # already phased before sending here.\n ret = {'candidates': [], 'priority': 4}\n for kid in self.samples_with_parent:\n if gt_nums1[kid._i] == gt_nums2[kid._i]: continue\n if not (gt_types1[kid._i] == HET and gt_types2[kid._i] == HET): continue\n #if not (gt_phases1[kid._i] and gt_phases2[kid._i]): continue\n if gt_types1[kid.mom._i] == HOM_ALT or gt_types2[kid.dad._i] == HOM_ALT: continue\n mom, dad = kid.mom, kid.dad\n\n kid_phased = gt_phases1[kid._i] and gt_phases2[kid._i]\n dad_phased = gt_phases1[dad._i] and gt_phases2[dad._i]\n mom_phased = gt_phases1[mom._i] and gt_phases2[mom._i]\n\n if kid_phased and dad_phased and (gt_nums1[dad._i] == gt_nums1[kid._i]) and (gt_nums2[dad._i] == gt_nums2[kid._i]):\n continue\n if kid_phased and mom_phased and (gt_nums1[mom._i] == gt_nums1[kid._i]) and (gt_nums2[mom._i] == gt_nums2[kid._i]):\n continue\n\n if kid_phased and dad_phased and mom_phased and gt_types1[dad._i] != gt_types2[dad._i] and gt_types1[mom._i] != gt_types2[mom._i]:\n priority = 1\n\n elif kid_phased and gt_types1[dad._i] != gt_types1[mom._i] and gt_types2[dad._i] != gt_types2[mom._i]:\n # parents are unphased hets at different sites.\n priority = 1\n else:\n priority = 2\n for parent in (kid.mom, kid.dad):\n # unphased het\n if gt_types2[parent._i] == gt_types1[parent._i] == HET:\n priority += 1\n\n ret['candidates'].append(kid)\n ret['priority'] = min(ret['priority'], priority)\n ret['candidate'] = len(ret['candidates']) > 0\n return ret", "def receive_generate_info():\n input_info_tensor = torch.empty(5, dtype=torch.float32, device=torch.cuda.current_device())\n torch.distributed.broadcast(input_info_tensor, 0)\n batch_size = int(input_info_tensor[0].item())\n seq_len = int(input_info_tensor[1].item())\n tokens_to_generate = int(input_info_tensor[2].item())\n all_probs = int(input_info_tensor[3].item())\n temperature = float(input_info_tensor[4].item())\n \n context_length_tensor = torch.empty(batch_size, dtype=torch.int64, device=torch.cuda.current_device())\n context_tokens_tensor = torch.empty(batch_size, seq_len, dtype=torch.int64, device=torch.cuda.current_device())\n \n # Send variables to all ranks \n torch.distributed.broadcast(context_length_tensor, 0)\n torch.distributed.broadcast(context_tokens_tensor, 0)\n \n return context_length_tensor, context_tokens_tensor, tokens_to_generate, all_probs, temperature", "def get_v_given_h(self, hidden_minibatch):\n \n assert self.weight_vh is not None\n\n if self.is_top:\n \"\"\"\n Here visible layer has both data and labels. Compute total input for each unit (identical for both cases), \n and split into two parts, something like support[:, :-self.n_labels] and support[:, -self.n_labels:].\n Then, for both parts, use the appropriate activation function to get probabilities and a sampling method\n to get activities. The probabilities as well as activities can then be concatenated back into a normal visible layer.\n \"\"\"\n support = self.bias_v + np.dot(hidden_minibatch, np.transpose(self.weight_vh))\n data_support, label_support = support[:, :-self.n_labels], support[:, -self.n_labels:]\n data_prob = sigmoid(data_support)\n\n summax = np.max(label_support, axis=1)\n summax = np.reshape(summax, summax.shape+(1,)).repeat(np.shape(label_support)[1],axis=-1)\n label_support -= summax\n normalisers = np.exp(label_support).sum(axis=1)\n normalisers = np.reshape(normalisers, normalisers.shape+(1,)).repeat(np.shape(label_support)[1],axis=-1)\n label_prob = np.exp(label_support)/normalisers\n\n v_prob = np.concatenate((data_prob, label_prob), axis=1)\n v_act = np.concatenate((sample_binary(data_prob), label_prob), axis=1)\n else:\n v_prob = sigmoid(self.bias_v + np.dot(hidden_minibatch, np.transpose(self.weight_vh)))\n v_act = sample_binary(v_prob)\n\n return v_prob, v_act", "def process_wifi_com(self, wm):\n print wm.message", "def haplotype(self, ploidy):\n \n i = 0 # counter for ploidy\n final = [] #to store seq\n got_bubble = True\n \n while i < ploidy:\n \n frag = []\n longest_path = []\n\n sorted_node = self.topological_sort()\n # find greatest weight path\n path = self.longest_path(sorted_node)\n # last node in the graph\n curr_max = max(path, key=path.get)\n next_node = ''\n str_temp = ''\n\n # while it is not the source node\n while len(self.prefix[curr_max]) != 0:\n # prev max node\n prev = path[curr_max][1]\n\n # if current node longer than prev and next node\n if len(curr_max) >= len(prev) and len(curr_max) >= len(next_node):\n str_temp = curr_max \n\n # if current node shorter than prev and next node \n elif len(curr_max) < len(prev) and len(curr_max) < len(next_node):\n start = self.suffix[prev][curr_max][2]\n end = self.prefix[next_node][curr_max][1]\n str_temp = curr_max[start:end]\n\n # if current node shorter than prev but longer than the next node \n elif len(curr_max) < len(prev) and len(curr_max) >= len(next_node):\n start = self.suffix[prev][curr_max][2]\n str_temp = curr_max[start:]\n\n # if current node longer than prev but shorter than the next node \n else:\n end = self.prefix[next_node][curr_max][1]\n str_temp = curr_max[:end]\n\n frag.insert(0, str_temp)\n longest_path.insert(0, curr_max)\n next_node = curr_max\n curr_max = prev\n\n\n # if this is the first node\n if len(self.prefix[curr_max]) == 0:\n\n # if current longer than next node\n if len(curr_max) > len(next_node):\n frag.insert(0, curr_max)\n next_node = curr_max\n\n else: \n end = self.prefix[next_node][curr_max][1]\n str_temp = curr_max[:end]\n frag.insert(0, str_temp)\n next_node = curr_max\n \n longest_path.insert(0, curr_max) \n\n\n # combine all string\n seq = ''.join(frag)\n final.append(seq)\n \n got_bubble = self.reduce_graph(longest_path, sorted_node)\n if got_bubble is False:\n break\n i += 1\n \n \n return final", "def M2cholM(self):\n Nx = self.Nx\n n = self.n\n Ms_opt = []\n cholMs = []\n for k in range(Nx):\n Mk = np.linalg.inv(self.Ws_opt[k])\n cholMk = np.linalg.cholesky(Mk)\n cholMk = cholMk.T # upper triangular\n cholMk_vec = np.zeros(int(n*(n+1)/2)) \n for i in range (n):\n j = (n-1)-i;\n di = np.diag(cholMk,j)\n cholMk_vec[int(1/2*i*(i+1)):int(1/2*(i+1)*(i+2))] = di\n Ms_opt.append(Mk)\n cholMs.append(cholMk_vec)\n self.Ms_opt = Ms_opt\n self.cholMs = np.array(cholMs)\n pass", "def connect_tx(self, M1_track):\n # the first pmos drain to Vdd\n for i in range(len(self.pmos.active_contact_positions)):\n contact_pos = self.pmos_position1 + self.pmos.active_contact_positions[i]\n if i % 2 == 0:\n correct = self.pmos.active_contact.second_layer_position.scale(1,0) \n drain_posistion = contact_pos + correct \n height = self.vdd_position.y - drain_posistion.y\n self.add_rect(layer=\"metal1\",\n offset=drain_posistion,\n width=drc[\"minwidth_metal1\"],\n height=height)\n else:\n # source to pmos2\n correct = (self.pmos.active_contact.second_layer_position.scale(1,0)\n + vector(self.pmos.active_contact.second_layer_width,\n 0).scale(0.5,0))\n source_position = contact_pos + correct\n mid = [self.pmos_position2.x, M1_track]\n self.add_path(\"metal1\", [source_position, mid])\n\n # the second pmos\n for i in range(len(self.pmos.active_contact_positions)):\n if i % 2 == 0:\n # source to pmos2\n pmos_active =self.pmos_position2+self.pmos.active_contact_positions[i]\n correct= (self.pmos.active_contact.second_layer_position.scale(1,0)\n + vector(0.5 * self.pmos.active_contact.second_layer_width,0))\n source_position = pmos_active + correct\n mid = [self.pmos_position2.x, M1_track]\n self.add_path(\"metal1\", [source_position, mid])\n # two nmos source to gnd\n source_posistion1 = (self.nmos_position1\n + self.nmos.active_contact_positions[0]\n + self.nmos.active_contact.second_layer_position.scale(1,0))\n height = self.gnd_position.y - source_posistion1.y\n self.add_rect(layer=\"metal1\",\n offset=source_posistion1,\n width=drc[\"minwidth_metal1\"],\n height=height)\n\n source_posistion2 = (self.nmos_position2\n + self.nmos.active_contact_positions[1]\n + self.nmos.active_contact.second_layer_position.scale(1,0)) \n height = self.gnd_position.y - source_posistion2.y\n self.add_rect(layer=\"metal1\",\n offset=source_posistion2,\n width=drc[\"minwidth_metal1\"],\n height=height)", "def build_G_block(self,B_hat):\n \n eye = Variable(torch.eye(self.num_u))\n eye = self.vari_gpu(eye)\n G1 = torch.cat((eye, -eye), 0)\n G2 = torch.cat((B_hat, -B_hat), 0)\n # print(self.B_hat)\n # print(G.size())\n return G1,G2", "def encode(self, G, v, X, L, W0, W1, W2, W3, U1, U2, U3):\n h = [None] * (L+1)\n h[0] = np.zeros((L + 1, len(X)))\n for i in range(1, L+1):\n h[i] = np.zeros((L + 1, self.embedding_dimension))\n\n \"\"\"hN - embeddings do neighborhood dos nodes nas camadas 2..L (tem a mesma dimensão por uma questao de simplicidade,\n os 2 primeiros elementos vão ficar a 0) \"\"\"\n hN = np.zeros((L + 1, self.embedding_dimension))\n\n h[0] = np.transpose(X)\n\n self.H[0][v] = h[0]\n\n for node in range(self.nNodes):\n self.H[1][node] = ed.ReLU(np.matmul(W0, np.transpose(self.H[0][node])))\n if self.H[1][node].any(): # se nao for um vetor de zeros\n self.H[1][node] = self.H[1][node] / la.norm(self.H[1][node], 2)\n\n h[1] = self.H[1][v]\n\n for l in range(2, L + 1):\n for node in range(self.nNodes):\n \"\"\"AGGREGATE\"\"\"\n self.HN[l, node] = self.aggregateNeighborhood(G, node, G.get_neighbors(node), l)\n \"\"\"COMBINE\"\"\"\n self.H[l][node] = self.GRUCell(self.H[l - 1][node], self.HN[l, node], W1, W2, W3, U1, U2, U3)\n\n self.H[l][v] = self.H[l][v] / la.norm(self.H[l][v], 2)\n h[l] = self.H[l][v]\n\n \"\"\"z sera o embedding final, obtido atraves da funcao maxpool\"\"\"\n z = self.maxPool(h[1:], self.embedding_dimension)\n return [z]", "def encode_node_with_children(self, value, leftH, leftC, rightH, rightC):\n# print(\"value\", value)\n# print(\"leftH\", leftH)\n# print(\"leftC\", leftC)\n# print(\"rightH\", rightH)\n# print(\"rightC\", rightC)\n newH, newC = self.tree_lstm(value, [leftH, rightH], [leftC, rightC])\n# print(\"VALUE\", value)\n# print(\"NEWH\", newH.shape, newH)\n return newH, newC", "def add(self, node1, node2, w):\r\n\r\n self.graph[node1].add(node2 + ',' + str(w))\r\n self.graph[node2].add(node1 + ',' + str(w))", "def operations(h, w):\r\n A=np.random.random([h,w])\r\n B=np.random.random([h,w])\r\n s=A+B\r\n return A,B,s\r\n raise NotImplementedError", "def send(self, msg):\r\n\r\n # don't need to handle barrier messages\r\n if not hasattr(msg, 'command'):\r\n return\r\n\r\n subcmd = OvsSender.subcmds[msg.command]\r\n \r\n\r\n # TODO: this is different for remote switches (ie, on physical network)\r\n dest = msg.switch.name\r\n\r\n params = []\r\n if msg.match.nw_src is not None:\r\n params.append(\"nw_src={0}\".format(msg.match.nw_src))\r\n if msg.match.nw_dst is not None:\r\n params.append(\"nw_dst={0}\".format(msg.match.nw_dst))\r\n if msg.match.dl_src is not None:\r\n params.append(\"dl_src={0}\".format(msg.match.dl_src))\r\n if msg.match.dl_dst is not None:\r\n params.append(\"dl_dst={0}\".format(msg.match.dl_dst))\r\n if msg.match.dl_type is not None:\r\n params.append(\"dl_type={0}\".format(msg.match.dl_type))\r\n\r\n params.append(\"priority={0}\".format(msg.priority))\r\n actions = [\"flood\" if a == OFPP_FLOOD else str(a) for a in msg.actions]\r\n\r\n if msg.command == OFPFC_ADD:\r\n params.append(\"action=output:\" + \",\".join(actions))\r\n\r\n paramstr = \",\".join(params)\r\n cmd = \"{0} {1} {2} {3}\".format(OvsSender.command,\r\n subcmd,\r\n dest,\r\n paramstr)\r\n ret = os.system(cmd)\r\n return ret", "def lworker(self):\n\t\tfor k,v in self.data().items():\n\t\t\ttry:\n\t\t\t\tif v['color'] == 'blue':\n\t\t\t\t\tself.l.append('\"{}\" -> \"{}\" [color=\"{}\", penwidth=3]'.format(str(v.get(\"ppid\")), str(k), str(v.get(\"color\"))))\n\t\t\t\telse:\n\t\t\t\t\tself.l.append('\"{}\" -> \"{}\" [color=\"{}\"]'.format(str(v.get(\"ppid\")), str(k), str(v.get(\"color\"))))\n\t\t\texcept:\n\t\t\t\tself.l.append('\"{}\" -> \"{}\"'.format(str(v.get(\"ppid\")), str(k)))", "def mkMsg(self):\n # getting the version of project_coords\n project_coords_cmd = 'project_coords --version'\n outp = popen2.Popen4(project_coords_cmd)\n outpline = outp.fromchild.readlines()\n pcoorVer = outpline[0].split()[-1]\n \n self.meta = {}\n self.meta['module']= []\n self.meta['meta'] = []\n self.meta['input'] = []\n self.meta['output']= []\n self.meta['errorlist'] = []\n \n self.meta['module'].append(('module','name='+self.modName,'version='+__version__,'dataset='+self.obsName))\n self.meta['module'].append(('root',self.root))\n self.meta['meta'].append(('meta',))\n self.meta['meta'].append(('depend',))\n self.meta['meta'].append(('pkg',))\n self.meta['meta'].append(('name','python'))\n self.meta['meta'].append(('version',pyversion.split()[0]))\n self.meta['meta'].append(('pkg',))\n self.meta['meta'].append(('name','pyfits'))\n self.meta['meta'].append(('version',pyfits.__version__.split()[0]))\n self.meta['meta'].append(('pkg',))\n self.meta['meta'].append(('name','project_coords'))\n self.meta['meta'].append(('version',pcoorVer))\n self.meta['meta'].append(('pkg',))\n self.meta['meta'].append(('name','Guide Star Catalog'))\n self.meta['meta'].append(('version',_URL_.split(\"/\")[-1].split(\"q\")[0]))\n \n # SExtractor info\n sub = subprocess.Popen(['sex', '--version'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, close_fds=True)\n outp = sub.stdout.readlines()\n name = outp[0].split()[0]\n ver = outp[0].split()[2]\n self.meta['meta'].append(('pkg',))\n self.meta['meta'].append(('name',name))\n self.meta['meta'].append(('version',ver))\n cmdline1 = 'sex fitsfile -c self.InParFileName'\n self.meta['meta'].append(('commandline',cmdline1))\n del outp,sub,name,ver\n \n if self.errorList:\n self.meta['errorlist'].append(('errorlist',))\n for pkg,err in self.errorList:\n self.meta['errorlist'].append(('erroritem',err,'frompkg='+pkg))\n \n # input section\n self.meta['input'].append(('input',))\n for f in self.inputList:\n if string.find(f,\"_asn\") == -1:\n self.meta['input'].append(('file','type=image/x-fits'))\n self.meta['input'].append(('name',os.path.join(\"Images\",f)))\n else:\n self.meta['input'].append(('file','type=image/x-fits'))\n self.meta['input'].append(('name',os.path.join(\"Images\",f)))\n \n # output section\n if self.outputList:\n self.meta['output'].append(('output',))\n for f in self.outputList.keys():\n if string.find(f,\".xml\") == -1:\n self.meta['output'].append(('file','type=image/x-fits'))\n self.meta['output'].append(('name',os.path.join(\"Images\",f)))\n for pred in self.outputList[f]:\n self.meta['output'].append(('predecessor',os.path.join(\"Images\",pred)))\n else:\n self.meta['output'].append(('file','type=text/xml'))\n self.meta['output'].append(('name',os.path.join(\"Images\",f)))\n for pred in self.outputList[f]:\n self.meta['output'].append(('predecessor',os.path.join(\"Images\",pred)))\n \n # pass this dictionary to the class pMessage...\n msgFile = os.path.join(self.messagedir,self.modName+\"_module.xml\")\n mmsg = pMessage(self.meta)\n mmsg.writeMsg(msgFile)\n return", "def lstmcell_grad_h(input, hx, cx, w_ih, w_hh, b_ih, b_hh, dh, dc, target=\"cce\"):\n # things from fwd\n batch, input_size = get_shape(input)\n _, hidden_size = get_shape(hx)\n xh = akg.topi.concatenate((hx, input), 1)\n whl = [w_ih, w_hh]\n W = Concat(whl, 1) # [4*hidden_size, input_size+hidden_size]\n\n gates = dense(input, w_ih, b_ih, True) + dense(hx, w_hh, b_hh, True)\n\n ingate_in, forgetgate_in, cellgate_in, outgate_in = Split(gates, 4, 1)\n\n ingate = sigmoid(ingate_in)\n forgetgate = sigmoid(forgetgate_in)\n cellgate = Tanh(cellgate_in)\n outgate = sigmoid(outgate_in)\n cy = (forgetgate * cx) + (ingate * cellgate)\n tanh_cy = Tanh(cy)\n #hy = outgate * tanh_cy\n\n # starts bwd\n # head * dh/do shape [n,]\n doutgate = dh * tanh_cy\n doutgate_in = outgate * (1 - outgate) * doutgate\n kk = akg.tvm.reduce_axis((0, batch))\n dWo = akg.tvm.compute((hidden_size, hidden_size + input_size), lambda i, j:\n akg.tvm.sum(xh[kk, j] * doutgate_in(kk, i), axis=kk), name=\"dWo\")\n\n dtanh_cy = dh * outgate\n dc = (1 - tanh_cy * tanh_cy) * dtanh_cy\n\n dingate = cellgate * dc\n dingate_in = ingate * (1 - ingate) * dingate\n kk3 = akg.tvm.reduce_axis((0, batch))\n dWi = akg.tvm.compute((hidden_size, hidden_size + input_size), lambda i, j:\n akg.tvm.sum(xh[kk3, j] * dingate_in(kk3, i), axis=kk3), name=\"dWi\")\n\n dforgetgate = dc * cx\n dforgetgate_in = forgetgate * (1 - forgetgate) * dforgetgate\n kk2 = akg.tvm.reduce_axis((0, batch))\n dWf = akg.tvm.compute((hidden_size, hidden_size + input_size), lambda i, j:\n akg.tvm.sum(xh[kk2, j] * dforgetgate_in(kk2, i), axis=kk2), name=\"dWf\")\n\n dcellgate = ingate * dc\n dcellgate_in = (1 - cellgate * cellgate) * dcellgate\n kk4 = akg.tvm.reduce_axis((0, batch))\n dWc = akg.tvm.compute((hidden_size, hidden_size + input_size), lambda i, j:\n akg.tvm.sum(xh[kk4, j] * dcellgate_in(kk4, i), axis=kk4), name=\"dWc\")\n\n dW = akg.topi.concatenate((dWi, dWf, dWc, dWo))\n\n db = akg.topi.concatenate((dingate_in, dforgetgate_in, dcellgate_in, doutgate_in), 1)\n\n kk5 = akg.tvm.reduce_axis((0, 4 * hidden_size))\n dxh = akg.tvm.compute((batch, hidden_size + input_size), lambda i, j:\n akg.tvm.sum(W[kk5, j] * db[i, kk5], axis=kk5), name=\"dxh\")\n dhx = akg.tvm.compute((batch, hidden_size), lambda i, j: dxh[i, j], name=\"dhx\")\n dx = akg.tvm.compute((batch, input_size), lambda i, j: dxh[i, j + hidden_size], name=\"dx\")\n\n dcx = forgetgate * dc\n\n dw_ih = akg.tvm.compute(w_ih.shape, lambda i, j: dW[i, j])\n #dw_hh = akg.tvm.compute(w_hh.shape, lambda i, j: dW[i, j + input_size])\n\n bhr = akg.tvm.reduce_axis((0, batch))\n\n db_ih = akg.tvm.compute((4 * hidden_size,), lambda i: akg.tvm.sum(db[i, bhr], axis=bhr), name=\"dbih\")\n\n bir = akg.tvm.reduce_axis((0, batch))\n\n db_hh = akg.tvm.compute((4 * hidden_size,), lambda i: akg.tvm.sum(db[i, bir], axis=bir), name=\"dbhh\")\n\n return dw_ih, w_hh, db_ih, db_hh, dcx, dhx, dx", "def _transpose_by_2_vnchwconv(tik_inst, dst, src, sub_hw_size):\n\n # whether the sub_h_size is block align or not should be decided before transferring in\n sub_h_size, sub_w_size = sub_hw_size\n data_size_one_block = _get_elment_cnt_one_block(src.dtype)\n w_block_cnt = _ceil_div(sub_w_size, data_size_one_block)\n fp16_src = src.reinterpret_cast_to(\"float16\")\n fp16_dst = dst.reinterpret_cast_to(\"float16\")\n fp16_data_one_block = _get_elment_cnt_one_block(\"float16\")\n # vnchwconv get two bytes per time\n if src.dtype.lower() in (\"float32\", \"int32\", \"uint32\"):\n vnc_one_line_len = w_block_cnt * data_size_one_block * sub_h_size * 2\n elif src.dtype.lower() in (\"float16\", \"int16\", \"uint16\"):\n vnc_one_line_len = w_block_cnt * data_size_one_block * sub_h_size\n else:\n error_detail = \"not support the dtype\"\n error_manager_vector.raise_err_two_input_dtype_invalid(\"transpose_d\", \"in_dtype\",\n \"dst_dtype\", error_detail)\n\n # do 16hc to hc16 transfer\n src_addr_list = [fp16_src[vnc_one_line_len * i] for i in ADDR_IDX_LIST]\n dst_addr_list = [fp16_dst[fp16_data_one_block * i] for i in ADDR_IDX_LIST]\n repeat_cnt = _ceil_div(vnc_one_line_len, fp16_data_one_block)\n with tik_inst.new_stmt_scope():\n src_stride = tik_inst.Scalar(\"int64\")\n dst_stride = tik_inst.Scalar(\"int64\")\n with tik_inst.if_scope(repeat_cnt == 1):\n src_stride.set_as(0)\n dst_stride.set_as(0)\n with tik_inst.else_scope():\n src_stride.set_as(1)\n dst_stride.set_as(16)\n tik_inst.vnchwconv(False, False,\n dst_addr_list, src_addr_list,\n repeat_cnt, dst_stride, src_stride)\n\n # do hc16 to ch16 transfer\n with tik_inst.if_scope(sub_h_size > sub_w_size):\n with tik_inst.for_range(0, sub_w_size) as w_size_idx:\n tik_inst.data_move(\n fp16_src[w_size_idx * sub_h_size * fp16_data_one_block * 2],\n fp16_dst[w_size_idx * fp16_data_one_block * 2],\n 0, sub_h_size, 2, (w_block_cnt * data_size_one_block - 1) * 2, 0)\n with tik_inst.else_scope():\n with tik_inst.for_range(0, sub_h_size) as h_size_idx:\n tik_inst.data_move(\n fp16_src[h_size_idx * fp16_data_one_block * 2],\n fp16_dst[h_size_idx * w_block_cnt * data_size_one_block * fp16_data_one_block * 2],\n 0, sub_w_size, 2, 0, (sub_h_size - 1) * 2)\n\n # do ch16 to 16ch transfer\n src_addr_list = [fp16_src[fp16_data_one_block * i] for i in ADDR_IDX_LIST]\n dst_addr_list = [fp16_dst[vnc_one_line_len * i] for i in ADDR_IDX_LIST]\n repeat_cnt = _ceil_div(vnc_one_line_len, fp16_data_one_block)\n with tik_inst.new_stmt_scope():\n src_stride = tik_inst.Scalar(\"int64\")\n dst_stride = tik_inst.Scalar(\"int64\")\n with tik_inst.if_scope(repeat_cnt == 1):\n src_stride.set_as(0)\n dst_stride.set_as(0)\n with tik_inst.else_scope():\n src_stride.set_as(16)\n dst_stride.set_as(1)\n tik_inst.vnchwconv(False, False,\n dst_addr_list, src_addr_list,\n repeat_cnt, dst_stride, src_stride)", "def send_node_props(self, host_info):\n se = get_se()\n version = get_version()\n name = host_info.get_hostname()\n unique_id = '%s:Pool:%s' % (se, name)\n parent_id = \"%s:SE:%s\" % (se, se)\n\n sa = StorageElement.StorageElement()\n sar = StorageElementRecord.StorageElementRecord()\n sa.UniqueID(unique_id)\n sa.Name(name)\n sa.SE(se)\n sa.SpaceType(\"Pool\")\n sa.Implementation(XRD_NAME)\n sa.Version(version)\n sa.Status(XRD_STATUS)\n sa.ParentID(parent_id)\n sa.Timestamp(timestamp)\n sar.Timestamp(timestamp)\n sar.UniqueID(unique_id)\n sar.MeasurementType(\"raw\")\n sar.StorageType(\"disk\")\n sar.TotalSpace(1024*host_info.get_total_kb())\n sar.FreeSpace(1024*host_info.get_total_free_kb())\n sar.UsedSpace(1024*host_info.get_total_used_kb())\n Gratia.Send(sa)\n Gratia.Send(sar)" ]
[ "0.54044586", "0.5386834", "0.5290445", "0.51632214", "0.51632214", "0.51127476", "0.51023054", "0.50814384", "0.50715846", "0.50035816", "0.499038", "0.4964276", "0.4964276", "0.4964276", "0.49613354", "0.49603233", "0.49329245", "0.48807725", "0.48755825", "0.48755825", "0.4848897", "0.48343876", "0.48325709", "0.4828111", "0.48240665", "0.48137748", "0.48127112", "0.48031512", "0.47873393", "0.4786016", "0.4782685", "0.4782598", "0.47806707", "0.47720563", "0.4742033", "0.4727017", "0.47267538", "0.4715978", "0.47052526", "0.46957964", "0.4681442", "0.46733597", "0.4672464", "0.4664903", "0.46557137", "0.46547627", "0.4647065", "0.46470034", "0.46310353", "0.4628238", "0.46277374", "0.46239316", "0.46196467", "0.46177962", "0.4612847", "0.46120614", "0.46080798", "0.4600788", "0.4598885", "0.45917338", "0.45849034", "0.4581566", "0.45693582", "0.4568236", "0.4567522", "0.45673773", "0.45650467", "0.45633084", "0.45624852", "0.45587924", "0.45587924", "0.45544326", "0.4546075", "0.4542145", "0.45341182", "0.4530268", "0.45278808", "0.45274502", "0.45116374", "0.45109996", "0.4510299", "0.45050687", "0.45021403", "0.45007765", "0.4500703", "0.4499831", "0.4498469", "0.44948637", "0.44948483", "0.4491482", "0.44863716", "0.44846177", "0.44794825", "0.44754565", "0.4475147", "0.44707146", "0.44660938", "0.4464237", "0.44627994", "0.4461934" ]
0.58693963
0
for n graph instances given edge index number this connected_msgs_list contains all connnected edges msg, say m connected edges
def aggregate_msgs(self, connected_msgs_list): msg_num = len(connected_msgs_list) agg_msg = connected_msgs_list[0] for i in range(1, msg_num): agg_msg += connected_msgs_list[i] if self.msg_aggrgt == 'AVG': return agg_msg / msg_num elif self.msg_aggrgt == 'SUM': return agg_msg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def collect_messages(j, i):\n j_neighbors_except_i = [k for k in edges[j] if k != i]\n \n for k in j_neighbors_except_i: # No effect when j_neighbors_except_i is empty []\n collect_messages(k, j)\n send_message(j, i, j_neighbors_except_i)", "def distribute_messages(i, j):\n i_neighbors_except_j = [k for k in edges[i] if k != j]\n j_neighbors_except_i = [k for k in edges[j] if k != i]\n \n send_message(i, j, i_neighbors_except_j)\n for k in j_neighbors_except_i:\n distribute_messages(j, k)", "def edges(self, irc, msg, args, channel):\n pie = self.instances[irc.network]\n irc.reply([str(x) for x in pie.graphs[channel].edges.values()])", "def connected_components(self) -> List[list]:\n for n in self.dw_graph.get_all_v().values():\n n.distance=0.0\n mega_list = []\n for n in self.dw_graph.get_all_v().values():\n if n.distance!=-10:\n mega_list.append(self.connected_component(n.node_id))\n return mega_list", "def edge_maker(novel_sent_tagged, combined_counts):\n edge_list = []\n \n for i in range(len(novel_sent_tagged)):\n persons = combine_persons(novel_sent_tagged[i])\n edge_list.extend(connect_persons(persons, combined_counts))\n \n return(edge_list)", "def _get_sorted_by_n_connections(m):\n small = nx.Graph()\n for k, v in m.items():\n small.add_edge(k[0], k[1])\n return sorted(small.adj, key=lambda x: len(small[x])), small", "def connected_component(self):\n t1 = datetime.datetime.now()\n nodes = set(x.hex for x in self.agents)\n result = []\n while nodes:\n node = nodes.pop()\n # This set will contain the next group of nodes connected to each other.\n group = {node}\n # Build a queue with this node in it.\n queue = [node]\n # Iterate the queue.\n # When it's empty, we finished visiting a group of connected nodes.\n while queue:\n # Consume the next item from the queue.\n node = queue.pop(0)\n # Fetch the neighbors.\n neighbors = set(x for x in node.fon if x.is_occupied == 1)\n # Remove the neighbors we already visited.\n neighbors.difference_update(group)\n # Remove the remaining nodes from the global set.\n nodes.difference_update(neighbors)\n # Add them to the group of connected nodes.\n group.update(neighbors)\n # Add them to the queue, so we visit them in the next iterations.\n queue.extend(neighbors)\n\n # Add the group to the list of groups.\n result.append(len(group))\n td = datetime.datetime.now() - t1\n print(\"calculated {} connected components in {} seconds\".format(len(result),td.total_seconds()))\n return len(result), np.histogram(result, self.cluster_hist_breaks)[0]", "def Adj(self, vertex_name: n) -> list:\n return self._graph[vertex_name].get_connections()", "def cc_visited(ugraph):\r\n\tremaining_node = ugraph.keys()\t\t#The keys are accessible directly.\r\n\t\r\n\tcon_com = [] #connected component\r\n\twhile len(remaining_node) != 0 :\r\n\t\tnode = random.choice(remaining_node)\r\n\t\tvisited = bfs_visited(ugraph,node)\r\n\t\tcon_com.append(visited)\r\n\t\tfor item in visited:\r\n\t\t\tremaining_node.remove(item)\r\n\treturn con_com", "def connectivity_matrix(self):\n # TODO: make this more memory efficient by ordering i,j in code when needed.\n temp = []\n for i in range(self.n_atom):\n for j in range(i+1, self.n_atom):\n if self.bond(i, j):\n temp.append([i+1, j+1])\n self.connect = np.asarray(temp)", "def get_neighbours(self):\n return []", "def add_pconn(self):\n self.use_pconn= True\n self.pconn = []\n for i,c in enumerate(self.conn):\n atoms_pconn = []\n atoms_image = []\n for ji, j in enumerate(c):\n # If an atom or vertex is connected to another one multiple times (in an image), this\n # will be visible in the self.conn attribute, where the same neighbour will be listed\n # multiple times.\n # Sometimes, the distances are a bit different from each other, and in this case, we\n # have to increase the threshold, until the get_distvec function will find all imgis.\n n_conns = c.count(j)\n t = 0.01\n while True:\n d,r,imgi = self.get_distvec(i,j,thresh=t)\n t += 0.01\n if n_conns == len(imgi):\n break\n if len(imgi) == 1:\n # only one neighbor .. all is fine\n atoms_pconn.append(images[imgi[0]])\n atoms_image.append(imgi[0])\n else:\n # we need to assign an image to each connection\n # if an atom is connected to another atom twice this means it must be another\n # image\n for ii in imgi:\n # test if this image is not used for this atom .. then we can use it\n if atoms_image.count(ii)==0:\n atoms_image.append(ii)\n atoms_pconn.append(images[ii])\n else:\n # ok, we have this image already\n use_it = True\n #print(c, \"=>\", j)\n #print(atoms_image)\n for k, iii in enumerate(atoms_image):\n #print('k',k)\n if (iii == ii) and (c[k] == j): use_it=False\n if use_it:\n atoms_image.append(ii)\n atoms_pconn.append(images[ii])\n self.pconn.append(atoms_pconn)\n #if len(atoms_pconn) != len(c): print(\"AOSUHDAPUFHPOUFHPWOUFHPOUDHSPUODHASIUDHAUSIDHSD\")\n return\n\n # 'na',lower(label),xyz,i,j)", "def connected_components(self) -> int:\n # visited = set()\n def get_component(vert: Tuple[int, int]) -> Set[Tuple[int, int]]:\n \"\"\" \"\"\"\n nonlocal visited\n visited.add(vert)\n if graph.vertices[vert]:\n for neighbor in graph.vertices[vert]:\n if neighbor not in visited:\n visited.add(neighbor)\n neighbor_components = get_component(neighbor)\n visited = visited.union(neighbor_components)\n else:\n continue\n\n return visited\n else:\n return visited\n\n components: List[Set[Tuple[int, int]]] = list()\n for vertex in graph.vertices.keys():\n visited: Set[Tuple[int, int]] = set()\n component = get_component(vertex)\n if component not in components:\n components.append(component)\n else:\n continue\n \n return len(components)", "def build_network(self):\n\n\n logits_list = []\n for dn in self.find_datanodes():\n\n if len(dn.receives_from) == 0: continue\n\n logits = 0\n for rf in dn.receives_from:\n logits += rf.get_tensors(rf.connect_backwards())[0]\n\n logits_list.append(logits)\n\n return logits_list", "def _get_nodes(self):\n viewpoint = \"shiva_{}\".format(cherrypy.session[\"id\"])\n messages_db = self.mongo[viewpoint][\"messages\"]\n people_db = self.mongo[viewpoint][\"people\"]\n #\n senders = messages_db.distinct(\"sender\")\n owner_id = cherrypy.session[\"id\"]\n nodes = list()\n for sender in senders:\n person = people_db.find_one({\"id\": sender})\n if person is None:\n name = \"id{}\".format(sender)\n else:\n name = person[\"display_name\"]\n records = list(messages_db.aggregate([{\n \"$match\": {\n \"$or\": [\n {\"sender\": owner_id, \"receiver\": sender},\n {\"sender\": sender, \"receiver\": owner_id}\n ]\n }\n }, {\"$group\": {\"_id\": None, \"count\": {\"$sum\": 1}}}]))\n if not records:\n records = 0\n else:\n records = records[0][\"count\"]\n info = \"Total records: {}\".format(records)\n history_link = \"/vk/read?id={}\".format(sender)\n statistics_link = \"#\"\n if records > 0:\n nodes.append({\n \"id\": sender,\n \"name\": name,\n \"info\": info,\n \"records\": records,\n \"history_link\": history_link,\n \"statistics_link\": statistics_link\n })\n #\n return nodes", "def neighbours(self):\n return [x.node for x in self.edges]", "def _connect_neighbours(self):\n for prev in self.unvisited:\n for next in self.unvisited:\n if (next[0] == prev[0] and next[1] == prev[1] + 1) or (next[0] == prev[0] + 1 and next[1] == prev[1]):\n self.graph.addEdge((prev, next))\n self.visited.add(prev)\n self.visited.add(next)\n if self._find_intersection():\n self.intersection.append(prev)\n self.intersection.append(next)", "def buildGraph(M: List[List[int]]) -> List:\n l = len(M)\n G = [Node(i) for i in range(l)]\n for i in range(len(M)):\n for j in range(len(M)):\n if M[i][j]:\n G[i].add_adjacent(G[j])\n return G", "def construct_network_from_neighbours_list(related_characters: list):\n graph = nx.Graph()\n for edge in related_characters:\n sentiment = edge[1]\n color = ''\n if sentiment == 'Positive':\n color = 'g'\n elif sentiment == 'Negative':\n color = 'r'\n elif sentiment == 'Neutral':\n color = 'k'\n # graph.add_node(edge[0][0], popularity=\n graph.add_edge(edge[0][0], edge[0][1], color=color, weight=edge[2])\n\n return graph", "def nodes(self, irc, msg, args, channel):\n pie = self.instances[irc.network]\n irc.reply([str(x) for x in pie.graphs[channel].nodes.values()])", "def runNodesMessage(self):\n while True:\n for neighbour in self.nextIP:\n socketNodes = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n while True:\n try:\n socketNodes.connect((neighbour, 5003))\n socketNodes.send(self.message)\n break\n except TimeoutError:\n pass\n except ConnectionRefusedError:\n pass\n socketNodes.close()\n break", "def get_next_cliques(tree): \r\n for i,j in tree.edges():\r\n # if all neighbouring cliques except j have sent a message; and no message from i to j\r\n if not tree.edge[i][j]['msg_passed']:\r\n if scipy.alltrue([tree.edge[k][i]['msg_passed'] for k in tree.predecessors(i) if k!=j]):\r\n return i,j\r\n return -1,-1 # returning -1 would mean no more cliques; all messages have been passed\r", "def cc_visited(ugraph):\n remain = set(ugraph.keys())\n conn_comp = []\n while remain:\n node = remain.pop()\n visited = bfs_visited(ugraph, node)\n conn_comp.append(visited)\n remain = remain.difference(visited)\n return conn_comp", "def connexify(self, estimator, nb_connect=5, verbose=False):\n connex_groups_id = list(self.graph.connex_groups)\n connex_pairs = permutations(connex_groups_id, 2)\n new_edges = []\n for conidx1, conidx2 in connex_pairs:\n for _ in range(nb_connect):\n node_idx1 = random.choice(self.graph.connex_groups[conidx1])\n node_idx2 = random.choice(self.graph.connex_groups[conidx2])\n state1 = self.graph.nodes[node_idx1]\n state2 = self.graph.nodes[node_idx2]\n success, X_opt, U_opt, V_opt = self.opt_trajectories(\n (state1, state2), estimator,\n verbose=verbose)\n if success:\n new_edges.append(((node_idx1, node_idx2),\n X_opt, U_opt, V_opt))\n\n for edge in new_edges:\n self.graph.add_edge(*edge)", "def get_related_edges(nodes_list, graph):\n\n node_id_list = map(lambda x: x.id, nodes_list)\n node_id_set = set(node_id_list)\n edges = []\n\n for node in nodes_list:\n if node.id in graph.incoming_edges:\n for edge in graph.incoming_edges[node.id]:\n\n if edge.start in node_id_set:\n edges.append(edge)\n\n return edges", "def connect_cells(self):\n self.nclist = []\n N = self._N\n for i in range(N):\n src = self.cells[i]\n tgt_syn = self.cells[(i+1)%N].synlist[0]\n nc = src.connect2target(tgt_syn)\n nc.weight[0] = self.syn_w\n nc.delay = self.syn_delay\n\n nc.record(self.t_vec, self.id_vec, i)\n self.nclist.append(nc)", "def get_bridges(edges_list):\n\n # print(\"all edges:\", edges_list)\n\n # make a temporary graph\n temp_G = nx.Graph()\n\n # add all current edges to the graph\n for edge in edges_list:\n edge_node_1, edge_node_2 = edge\n temp_G.add_edge(edge_node_1, edge_node_2)\n\n # get all_bridges in temp graph\n bridges_all = list(nx.bridges(temp_G))\n\n # get set of edges with two traversals left (only want one of each, so use set)\n mult_trav_remaining = set([])\n\n for edge in edges_list:\n\n num_trav_remaining = edges_list.count(edge)\n\n if num_trav_remaining > 1:\n\n mult_trav_remaining.add(edge)\n\n mult_trav_remaining = list(mult_trav_remaining)\n\n # remove mult traversal edges from bridges list\n\n # print(\"bridges_ all:\", bridges_all)\n # print(\"\\nmult_trav_remaining:\", mult_trav_remaining)\n\n # make a new bridges list that contains only edges that don't have mult traversals left\n\n bridges_reduced = []\n\n for edge in bridges_all:\n # print(\"\\n\\nedge:\", edge)\n # print()\n if edge in mult_trav_remaining:\n continue\n # print()\n # print(f\"bridge {edge} is in {mult_trav_remaining}\")\n elif edge[::-1] in mult_trav_remaining:\n continue\n # print()\n # print(f\"bridge {edge} REVERSED is in {mult_trav_remaining}\")\n else:\n # print(f\"bridge {edge} is NOT in {mult_trav_remaining}\")\n\n bridges_reduced.append(edge)\n\n # return a list of true bridges\n return bridges_reduced", "def iterate_over_chanels(data, anots, n_neighbors):\r\n\r\n _, _, ch= data.shape\r\n chanels_acc = []\r\n for idx in range(ch):\r\n X = data[:, :,idx]\r\n acc = LeaveOneOutKnn(X, anots, n_neighbors)\r\n chanels_acc.append(acc)\r\n return np.array(chanels_acc)", "def edges_indices(self) -> numpy.array:\n r = numpy.array(self._receivers, dtype=numpy.int16)\n s = numpy.array(self._senders, dtype=numpy.int16)\n return numpy.asarray([r, s], dtype=numpy.int16)", "def _get_common_neighbour_node_pairs(self):\n node_pairs = []\n for node1 in self.graph.nodes():\n for node2 in self.graph.nodes():\n if node1 != node2:\n neighbour_count = self.neighbour_counts[(node1, node2)]\n if neighbour_count >= 1:\n node_pairs.append((node1, node2))\n return node_pairs", "def cc_visited(ugraph):\n\tremain = []\n\tfor node in ugraph:\n\t\tremain.append(node)\n\tconnected = []\n\twhile remain:\n\t\tvisited = bfs_visited(ugraph, remain[0])\n\t\tconnected.append(visited)\n\t\tremain = [i for i in remain if not i in visited]\n\treturn connected", "def _graph_connected_component(G, node_id):\n connected_components = np.zeros(shape=(G.shape[0]), dtype=np.bool)\n connected_components[node_id] = True\n n_node = G.shape[0]\n for i in range(n_node):\n last_num_component = connected_components.sum()\n _, node_to_add = np.where(G[connected_components] != 0)\n connected_components[node_to_add] = True\n if last_num_component >= connected_components.sum():\n break\n return connected_components", "def __generate_edges(self):\n edges = []\n for vertex in self.__graph_dict:\n for neighbour in self.__graph_dict[vertex]:\n edges.append({vertex, neighbour})\n return edges", "def clusters_connected( self):\n def check_connected( k, vertices, edges):\n dads = {}\n for p in vertices:\n dads[p] = p\n\n def Find( c):\n while c != dads[c]:\n c = dads[c]\n return c\n\n def Union( p, q):\n dads[Find(p)] = Find(q)\n\n for p,q in edges:\n Union( p, q)\n\n stuff = set([ Find(p) for (k,p) in dads.items()])\n assert len(stuff) == 1, \"More than one partition\"\n\n vertices = collections.defaultdict( list)\n for p in itertools.product( range(self.n), repeat=2):\n vertices[self.raster[p]].append( p)\n\n def X():\n for x in range(self.n-1):\n for y in range(self.n):\n yield (x,y),(x+1,y)\n\n def Y():\n for x in range(self.n):\n for y in range(self.n-1):\n yield (x,y),(x,y+1)\n\n connections = collections.defaultdict( list)\n for (p,q) in itertools.chain( X(), Y()):\n if self.raster[p] == self.raster[q]:\n connections[self.raster[p]].append( ( p, q))\n\n for (k,v) in vertices.items():\n check_connected( k, v, connections[k])", "def graphs_conn_iso(n):\n def graphs_conn_helper(n):\n for oldg in graphs_conn_iso(n-1):\n for s in powerset(range(n-1)):\n if s == ():\n continue\n g = oldg + [list(s)]\n for v in s:\n g[v] = g[v] + [n-1]\n # NOT g[v] += ... or g[v].append(...)\n # to avoid changing items in oldg\n yield g\n\n assert n >= 0\n if n >= 3:\n for g in unique_iso(graphs_conn_helper(n)):\n yield g\n elif n == 2:\n yield [ [1], [0] ]\n elif n == 1:\n yield [ [] ]\n else: # n == 0\n yield []", "def connected_component(self, id1: int) -> list:\n if self._graph is None or self._graph.get_node(id1) is None:\n return []\n\n self.reset_tags() # This method executes a BFS and tag nodes so reset_tags() must be called.\n\n # Traverse the original graph, from node id1, and tag all reachable nodes\n ans = []\n src = id1 # alias\n original_graph = self.get_graph()\n self.traverse_breadth_first(src, original_graph)\n # Transpose/Reverse graph's edges\n transposed_graph = self.reverse_graph()\n # Traverse the transposed graph, from node id1, and un-tag all reachable nodes\n self.traverse_breadth_first(src, transposed_graph)\n\n # Iterate over nodes in the transposed graph and find the nodes that are tagged twice!\n for key in transposed_graph.get_all_v():\n node = transposed_graph.get_node(key)\n if node.tag == 2:\n ans.append(self._graph.get_node(node.key)) # Append original node\n return ans", "def _refine_matrix_with_additional_connections(self):\n new_graph = self.graph.copy()\n for node in tqdm.tqdm(self.graph.nodes(), disable=not self.verbose):\n if self.graph.node[node][\"type\"] == \"hashtag\":\n for neighbour in self.graph.neighbors(node):\n if self.graph.node[neighbour][\"type\"] == \"username\":\n for other_node in self.graph.neighbors(neighbour):\n if self.graph.node[other_node][\"type\"] == \"hashtag\" \\\n and not self.graph.has_edge(node, other_node) \\\n and not node == other_node:\n new_graph.add_edge(node, other_node)\n self.graph = new_graph", "def generate_edges(self):\n edges = []\n for vertex in self.graph_dict:\n for neighbour in self.graph_dict[vertex]:\n if (neighbour, vertex) not in edges:\n edges.append((vertex, neighbour))\n \n for pair in edges:\n for otherpair in edges:\n if pair[1] == otherpair[0]:\n edges.append((pair[0],otherpair[1]))\n return edges", "def generate_connection_e(self,N_e):\n raise NotImplementedError", "def get_neighbour_edges(self, cur: Union[str, int]) -> list:\n\t\treturn [edge for edge in self.edges if cur in edge]", "def connected_components(self) -> List[list]:\n self.reset_tags()\n ans = []\n visited = dict() # A dictionary of visited nodes\n\n for key in self._graph.get_all_v():\n if not visited.get(key):\n path = self.connected_component(key)\n for node in path:\n visited.__setitem__(node.key, True)\n ans.append(path)\n return ans", "def compute_edge_logits(self):", "def neighbors(node, topology):\n return [n for n in topology[node]]", "def neighboring_consumers(self, position_list):\n agent_list = []\n #loop over all neighbors\n for position in position_list:\n agents_in_cell = self.model.grid.get_cell_list_contents(position)\n #loop over all agents in the cell to find if agent is present\n for agent in agents_in_cell:\n if type(agent).__name__ == \"Consumer\":\n agent_list.append(agent)\n \n return agent_list", "def connected_components(graph):\n graphCopy = graph.copy()\n edges = graph.edges(data=True)\n edgeCapacity = 1.0 * np.array([property['capa'] for node1, node2, property in edges])\n percentile = np.percentile(edgeCapacity, 50.0)\n for node1, node2, property in edges:\n if property['capa'] <= percentile:\n graphCopy.remove_edge(node1, node2)\n connectedComponents = nx.connected_components(graphCopy)\n connectedComponentSizes = np.array([len(component) for component in connectedComponents])\n return(connectedComponentSizes)", "def learn_connectome(self):\n episode_nodes = [node for node in self.container.nodes if node.is_episode]\n if len(episode_nodes) < 2:\n return\n connections_counter = {}\n for node in episode_nodes:\n self._collect_episode_callout_stats(node, connections_counter)\n\n pair_list = [(key, connections_counter[key]) for key in connections_counter]\n pair_list.sort(key=lambda item: item[1], reverse=True)\n top_count = pair_list[0][1]\n if top_count < 4:\n return\n # make connections for the top half of pairs\n for pair, cnt in pair_list:\n if cnt > top_count // 2:\n self._make_connection_for_pair(pair)", "def E(self) -> list:\n res = []\n for v in self.V():\n res.extend([(v.name, i) for i in v.get_connections().keys()])\n return res", "def _send_connected_ue_list(self):\n connected_states = [self.UE_CONNECTED, self.UE_DISCONNECTING, self.UE_HANDOVER_TO]\n ue_list = [ue for ue, status in self.ue_path.items() if status in connected_states]\n self.add_msg_to_queue(self.output_connected_ue_list, EnableChannels(self.ap_id, ue_list))", "def __generate_edges(self):\n\n edges = []\n for vertex in self.__graph_dict:\n for neighbour in self.__graph_dict[vertex]:\n if {neighbour, vertex} not in edges:\n edges.append( {vertex,neighbour} )\n return edges", "def __generate_edges(self):\n edges = []\n for vertex in self.__graph_dict:\n for neighbour in self.__graph_dict[vertex]:\n if {neighbour, vertex} not in edges:\n edges.append({vertex, neighbour})\n return edges", "def connected_components_with_size(self, size):\n components = [x for x in nx.connected_component_subgraphs(self.return_undirected()) if x.number_of_nodes() == size]\n for graph in components:\n SynonymNetwork.convert_to_special(graph)\n\n return components", "def iter_node(self,i):\n nd = self.nodes[i]\n for kn in nd.get_close():\n # for kn in nd.get_known():\n # for kn in nd.neighbours:\n kn_node = self.nodes[kn.lindex]\n nd.add_known_nodes(kn.path_len,kn_node.get_close())", "def graph_on_reaction(list_of_obj):\n\t# Use a multigraph so multiple edges can exist between nodes\n\treaction_graph = nx.MultiGraph(label='REACTION')\n\tfor gene in list_of_obj:\n\t\tprint gene.gene_ID\n\t\treaction_graph.add_node(gene.gene_ID)\n\t\n\t# Create edge dictionary\n\tedge_dict = {}\n\tfor gene in list_of_obj:\n\t\tif len(gene.reaction()) > 0:\n\t\t\tfor pred_reaction in gene.reaction:\n\t\t\t\tif len(pred_reaction) > 0: \n\t\t\t\t\tprint \"pred_reaction: \" + pred_reaction\n\t\t\t\t\tif pred_reaction not in edge_dict:\n\t\t\t\t\t\ttemp_gene_list = []\n\t\t\t\t\t\ttemp_gene_list.append(gene.gene_ID)\n\t\t\t\t\t\tedge_dict[pred_reaction] = temp_gene_list\n\t\t\t\t\telse:\n\t\t\t\t\t\tedge_dict[pred_reaction].append(gene.gene_ID)\n\t\n\t# Convert edge dictionary to edges with labels\n\tfor k in edge_dict:\n\t\tprint k, edge_dict[k]\n\t\tif len(edge_dict[k]) > 1:\n\t\t\tfor reacting_gene in edge_dict[k]:\n\t\t\t\ti = 0\n\t\t\t\twhile i < len(edge_dict[k]):\n\t\t\t\t\tif reacting_gene != edge_dict[k][i]:\n\t\t\t\t\t\tif test_edge_exists(reaction_graph, reacting_gene, edge_dict[k][i]) == False:\n\t\t\t\t\t\t\treaction_graph.add_edges_from([(reacting_gene,edge_dict[k][i])], reaction=k)\n\t\t\t\t\ti = i + 1\n\tprint reaction_graph.edges()\n\t\n\t#print test_edge_exists(reaction_graph, 'Rv2228c', 'Rv0054')\n\t\n\treturn reaction_graph", "def _out_connections(self, g, tick):\n # outputs could be connected to many different input ports - this is not yet covered\n out_connections=[]\n output_map = {}\n # get the out connections of the given task\n for source,dest in g.get_out_connections(tick):\n if source.port not in output_map.keys():\n output_map[source.port]=[]\n output_map[source.port].append(dest)\n for source,dest in self.body_graph.get_in_connections(graph.FINAL_TICK):\n out_source=graph.Endpoint(source.tick << tick, source.port)\n portname=dest.port\n for out_dest in output_map[portname]:\n out_connections.append((out_source, out_dest))\n return out_connections", "def __generate_connecticity_increasing_canidates(self,k=3):\n result = set()\n \n if k < 3:\n k = 3\n \n self.__logger.info(\"IMPROOVE_NETWORK: Try to seek {} nodes which are currently bad connected\".format(k))\n candidates = heapq.nlargest(3*k, self.__generate_all_shortest_paths(), key=itemgetter(0))\n\n for _, path in candidates:\n result.add(path[0])\n result.add(path[-1])\n\n self.__logger.info(\"IMPROOVE_NETWORK: Found {} candidates which are currently bad connected\".format(len(result))) \n if len(result) <= k:\n return list(result)\n self.__logger.info(\"IMPROOVE_NETWORK: Sample {} items from the candidates as was requested\".format(k))\n tmp = list(result)\n random.shuffle(tmp)\n return tmp[0:k]", "def get_all_conn_edges_remaining_in_graph(current_node, remaining_edges, nodes_dict):\n\n # remove duplicate edges from remaining edges by makeing a set\n\n remaining_edges_unique_set = set(remaining_edges)\n\n # print(\"\\nremaining_edges_unique_set:\", remaining_edges_unique_set)\n\n all_conn_edges_in_original_graph_set = set(\n nodes_dict[current_node][\"connected_edges\"]\n )\n\n # print(\"\\nall_conn_edges_in_original_graph_set:\", all_conn_edges_in_original_graph_set)\n\n remaining_edges_conn_to_node = remaining_edges_unique_set.intersection(\n all_conn_edges_in_original_graph_set\n )\n\n # print(\"\\nremaining_edges_conn_to_node:\", remaining_edges_conn_to_node)\n\n return list(remaining_edges_conn_to_node)", "def connected_components(self) -> List[list]:\n self.__set_all_nodes_unvisited()\n res = self.__tarjan()\n # res.reverse()\n return res", "def _num_conn_comp(graph):\n\n return nx.number_connected_components(graph)", "def connect_nodes(self):\n for src_id, trg_id in itertools.product(self.selected_nodes, repeat=2):\n if src_id != trg_id:\n app.edges[src_id].add(trg_id)\n self.mark_as_unsaved()\n self.update()", "def get_edges(self) -> []:\n graph_edges = []\n\n for vertex in self.adj_list:\n for connection in self.adj_list[vertex]:\n if (vertex, connection) not in graph_edges and (connection, vertex) not in graph_edges:\n graph_edges.append((vertex, connection))\n\n return graph_edges", "def disconnected_graph(n):\n g = nx.DiGraph()\n for i in range(0, n):\n g.add_node(i)\n return g", "def get_outgoing_connections(self, comp):\n return self.connections.get(comp.id, [])", "def generate_edges(self):\n edges = []\n for vertex in self.graph_dict:\n for neighbour in self.graph_dict[vertex]:\n if {neighbour, vertex} not in edges:\n edges.append({neighbour, vertex})\n return edges", "def connectedComponents(analyzer):\n return model.connectedComponents(analyzer)", "def Generate_edges(size, connectedness):\r\n\r\n assert connectedness <= 1\r\n random.seed(10)\r\n for i in range(size):\r\n for j in range(i + 1, size):\r\n if random.randrange(0, 100) <= connectedness * 100:\r\n yield f'{i} {j}'", "def build_edges(self):\n print(\"Constructing Edges.\")\n # -----------------------------------------\n # TODO: You should write this method!\n\n # Note: this method may take some time to run - it is likely to be O(N^2), and some lists have N = 10,000 words or more.\n # (I've had students decide that their program was \"broken\" and quit it before this process finished... every time,\n # not realizing that the program was working hard behind the scenes.)\n # I recommend that you keep track of the number of edges you have added, and if it is a multiple of 1000, print\n # something so that you know your program is making progress.\n n = len(self.vertices)\n\n\n\n \n # -----------------------------------------\n print(\"Done Constructing Edges.\\n------------------------------------\")", "def generate_connection_i(self,N_e):\n raise NotImplementedError", "def all_connections(self):\n for i in _xrange(self.num_patterns):\n for c in self._available_connections[i]:\n yield c\n for c in self._in_use_connections[i]:\n yield c", "def _initializeAdjacencyList(self):\n\n if self.comm.rank == 0:\n # First, create a dictionary of common edges shared by components\n edgeToFace = {}\n for elemID in self.bdfInfo.elements:\n elemInfo = self.bdfInfo.elements[elemID]\n elemConn = elemInfo.nodes\n compID = self.meshLoader.nastranToTACSCompIDDict[elemInfo.pid]\n nnodes = len(elemConn)\n if nnodes >= 2:\n for j in range(nnodes):\n nodeID1 = elemConn[j]\n nodeID2 = elemConn[(j + 1) % nnodes]\n\n if nodeID1 < nodeID2:\n key = (nodeID1, nodeID2)\n else:\n key = (nodeID2, nodeID1)\n\n if key not in edgeToFace:\n edgeToFace[key] = [compID]\n elif compID not in edgeToFace[key]:\n edgeToFace[key].append(compID)\n\n # Now we loop back over each element and each edge. By\n # using the edgeToFace dictionary, we can now determine\n # which components IDs (jComp) are connected to the\n # current component ID (iComp).\n self.adjacentComps = []\n\n for edgeKey in edgeToFace:\n if len(edgeToFace[edgeKey]) >= 2:\n for i, iComp in enumerate(edgeToFace[edgeKey][:-1]):\n for jComp in edgeToFace[edgeKey][i + 1 :]:\n if iComp < jComp:\n dvKey = (iComp, jComp)\n else:\n dvKey = (jComp, iComp)\n if dvKey not in self.adjacentComps:\n self.adjacentComps.append(dvKey)\n\n else:\n self.adjacentComps = None\n\n # Wait for root\n self.comm.barrier()", "def make_complete_graph(num_nodes):\n #initialize empty graph\n complete_graph = {}\n #consider each vertex\n for vertex in range(num_nodes):\n #add vertex with list of neighbours\n complete_graph[vertex] = list(set([j for j in range(num_nodes) if j != vertex]))\n return complete_graph", "def edges(self):\r\n return self.__generate_edges()", "def connected_component(self, id1: int) -> list:\n list1 = []\n list2 = []\n if id1 in self.dw_graph.nodes:\n list1 = self.bfs(id1, False)\n list2 = self.bfs(id1, True)\n\n list3 = []\n temp=set(list2)\n for value in list1 :\n if value in temp:\n list3.append(value)\n self.dw_graph.nodes[value].distance=-10\n list3.sort()\n return list3", "def __generate_edges(self):\r\n edges = []\r\n for vertex in self.__graph_dict:\r\n for neighbor in self.__graph_dict[vertex]:\r\n if {neighbor, vertex} not in edges:\r\n edges.append({vertex, neighbor})\r\n return edges", "def construct_fast_graph_connection(coord_list, radie):\n\n connection_distance = []\n connection = []\n coord_list_tree = scipy.spatial.cKDTree(coord_list)\n for j, data in enumerate(coord_list):\n '''save nodes which are in range'''\n connections_ckd = coord_list_tree.query_ball_point(data, radie)\n for i in connections_ckd:\n #only save upper half of the matrix\n if i > j:\n #save the connection\n connection.append([j, i])\n #save the relative distance of the nodes\n connection_distance.append(np.hypot(coord_list[i,0]-data[0], coord_list[i,1]-data[1]))\n\n connection_distance = np.array(connection_distance)\n connection = np.array(connection)\n\n\n return connection, connection_distance", "def get_connected_nodes(self, node):\n assert node in self.nodes, \"No node \"+str(node)+\" in graph \"+str(self)\n result = [x.node2 for x in self.edges if x.node1 == node]\n result += [x.node1 for x in self.edges if x.node2 == node]\n return sorted(result)", "def connections(self, src=False, dst=True, params=True): \n conns = []\n if params:\n if src:\n #grab the node params that this node is a src to\n edges = self.parent.graph.out_edges(self, data=True) \n conns.extend([ edge[2][\"dst_param\"] for edge in edges ])\n if dst:\n #grab the node param that this node is a dst to\n edges = self.parent.graph.in_edges(self, data=True) \n conns.extend([ edge[2][\"src_param\"] for edge in edges ])\n else: \n if src:\n conns.extend(self.parent.graph.successors(self))\n if dst:\n conns.extend(self.parent.graph.predecessors(self))\n \n return conns", "def get_conn_matrix_vector(self):\n\n vect = []\n for line in sorted(self.connection_matrix):\n for item in self.connection_matrix[line]:\n vect.append(item)\n\n return vect", "def sum_product(nodes, edges, node_potentials, edge_potentials):\n marginals = {}\n messages = {}\n\n # -------------------------------------------------------------------------\n # YOUR CODE HERE\n #\n \n def send_message(j, i, grand_children_of_i):\n \"\"\"\n Send messages from node j to node i, i.e. summing over all xj\n \n Input\n -----\n j: Source node (to be summed over)\n i: Destination node\n grand_children_of_i: All neighboring nodes except node i (sources of messages).\n \"\"\"\n messages[(j,i)] = {xi: 0 for xi in node_potentials[i]}\n \n incoming_messages = {xj: 1 for xj in node_potentials[j]} # Default to be 1 for leaf nodes (no effect)\n if len(grand_children_of_i) != 0: # Only deal with this case because at leaf node, no messages to be collected\n for xj in node_potentials[j]:\n for grand_child in grand_children_of_i:\n incoming_messages[xj] *= messages[(grand_child, j)][xj]\n for xj in node_potentials[j]:\n for xi in node_potentials[i]:\n messages[(j,i)][xi] += node_potentials[j][xj] * edge_potentials[(j,i)][xj][xi] * incoming_messages[xj]\n \n \n def collect_messages(j, i):\n \"\"\"\n Collect messages from node j to node i\n \"\"\"\n j_neighbors_except_i = [k for k in edges[j] if k != i]\n \n for k in j_neighbors_except_i: # No effect when j_neighbors_except_i is empty []\n collect_messages(k, j)\n send_message(j, i, j_neighbors_except_i)\n \n def distribute_messages(i, j):\n \"\"\"\n Distribute messages from node i to node j\n \"\"\"\n i_neighbors_except_j = [k for k in edges[i] if k != j]\n j_neighbors_except_i = [k for k in edges[j] if k != i]\n \n send_message(i, j, i_neighbors_except_j)\n for k in j_neighbors_except_i:\n distribute_messages(j, k)\n \n def compute_marginal(i):\n marginals[i] = node_potentials[i]\n for x in marginals[i]:\n for neighbor_node in edges[i]:\n marginals[i][x] *= messages[(neighbor_node, i)][x]\n \n # Renormalize\n normalization_const = np.array(list(marginals[i].values())).sum()\n for x in marginals[i]:\n marginals[i][x] /= normalization_const\n \n \n root_node = list(nodes)[0]\n for node in edges[root_node]:\n collect_messages(node, root_node)\n for node in edges[root_node]:\n distribute_messages(root_node, node)\n for node in nodes:\n compute_marginal(node)\n \n\n #\n # END OF YOUR CODE\n # -------------------------------------------------------------------------\n\n return marginals", "def create_logits_mask_by_first_edge_graph(edge_indexes, num_edge, nvec):\n # find first edge\n # adj matrix of graphs\n # adj_mat = nx.to_numpy_matrix(graph, nodelist=range(nvec))[None]\n\n # bs = adj_mats.shape[0]\n # total_mask = []\n max_edge = edge_indexes.shape[0]\n total_mask = np.zeros(shape=(1, max_edge), dtype=np.int8)\n\n\n # edges = edge_indexes[:num_edge]\n # edges = np.where(adj_mats[i, :, :] > 0)\n\n # max_edge = max(max_edge, num_edges)\n # mask = np.zeros(shape=(1, max_edge), dtype=np.int8)\n all_half_edges = edge_indexes[:num_edge // 2] # only use the directed edge (a->b), not (b->a) # location[np.where(location[:, 0] < location[:, 1])[0]]\n all_valid_edges = edge_indexes[:num_edge] # only use the directed edge (a->b), not (b->a) # location[np.where(location[:, 0] < location[:, 1])[0]]\n\n # restore adj matrix\n rawobs = np.zeros(shape=(nvec, nvec), dtype=np.int8)\n for edge in all_valid_edges:\n rawobs[edge[0], edge[1]] = 1 # edge is an ndarray (2,), we cannot index using rawobs[edge] (is array with shape (2, 15))\n#\n for idx_1, edge_1 in enumerate(all_half_edges):\n encoded_edge_1 = idx_1\n # check if they are one-hop connected\n\n mask = np.zeros(shape=(max_edge,), dtype=np.int8)\n for idx_2, edge_2 in enumerate(all_valid_edges):\n\n fail_cond = edge_2[0] in edge_1 or edge_2[1] in edge_1 or\\\n int(rawobs[edge_2[0], edge_1[0]]) + int(rawobs[edge_2[0], edge_1[1]]) + \\\n int(rawobs[edge_2[1], edge_1[0]]) + int(rawobs[edge_2[1], edge_1[1]]) > 0\n\n mask[idx_2] = not fail_cond\n\n total_mask[0, encoded_edge_1] = mask.any()\n\n\n return total_mask", "def eligible_edges_with_indexes(self):\n return list(map(lambda e: (self.edges.index(e), e), self.eligible_edges))", "def detection_algorithm(G, edge_weight):\n Gc = G.copy()\n set_node_attributes(Gc, attr_name='k-index')\n seed_node2communities = {}\n\n from operator import itemgetter\n while Gc.number_of_nodes() > 0:\n seed_node = max(list(Gc.nodes(data='k-index')), key=itemgetter(1))[0]\n nodes_in_community, modularity = find_local_community(Gc, seed_node=seed_node, weight=edge_weight)\n seed_node2communities[seed_node] = (nodes_in_community, modularity)\n Gc.remove_nodes_from(nodes_in_community)\n return seed_node2communities", "def edges(self):\n return self.generate_edges()", "def _scan_targets(self, indices_to_nodes, node_property, source_index,\n factor_aggregator, compute_statistics,\n total_factor_instances,\n generated_edges, reverse_edges=False, limit=None,\n verbose=False):\n edge_list = []\n for target_index in range(source_index + 1, len(indices_to_nodes)):\n s = indices_to_nodes[source_index]\n t = indices_to_nodes[target_index]\n\n if node_property is not None:\n s_factors, t_factors = self._get_node_factors(\n s, t, node_property, factor_aggregator)\n else:\n if factor_aggregator is None:\n factor_aggregator = aggregate_index\n s_factors, t_factors = self._get_edge_factors(\n s, t, factor_aggregator, reverse_edges)\n\n common_factors = safe_intersection(\n s_factors, t_factors)\n\n if len(common_factors) > 0:\n edge = {\n \"@source_id\": s,\n \"@target_id\": t,\n \"common_factors\": common_factors\n }\n\n for stat in compute_statistics:\n edge[stat] = COOCCURRENCE_STATISTICS[stat](\n self.pgframe, s, t,\n node_property,\n common_factors,\n total_factor_instances,\n factor_aggregator,\n reverse_edges)\n\n edge_list.append(edge)\n\n if limit:\n if len(generated_edges) + len(edge_list) == limit:\n if verbose:\n print(\"Reached the edge limit ({})\".format(limit))\n return edge_list\n\n return edge_list", "def neighbors(self):\n return [e.name for e in self.edges()]", "def get_edge_ids(self):\n node_ids = self.node_ids\n return [(node_ids[0], node_ids[1])]", "def eligible_edges(self):\n if len(self.edges) == 4:\n return [self.edges[0], self.edges[2]]\n return []", "def graphs(n):\n assert n >= 0\n\n # Special cases for small vertex sets\n if n <= 2:\n if n == 0:\n yield []\n return\n if n == 1:\n yield [ [] ]\n return\n if n == 2:\n yield [ [], [] ]\n yield [ [1], [0] ]\n return\n\n # Make generator yielding all possible edges.\n # If a < b < c, then we yield edge (a,b) before (a,c).\n # If b < c < a, then we yield edge (b,a) before (c,a).\n # As a result, we will construct graph representations having sorted\n # adjacency lists, which our graph representation requires.\n alledges = ( (j, i) for i in range(n) for j in range(i) )\n\n # Generate all graphs\n # We unroll the portion of the loop dealing with edges (0,1), (0,2)\n for edges in powerset(itertools.islice(alledges, 2, None)):\n # unrolling for edges (0,1) and (0,2)\n g = [ [] for v in range(n) ]\n for e in edges:\n g[e[0]].append(e[1])\n g[e[1]].append(e[0])\n yield g\n\n # Add edge (0,1)\n g2 = g[:]\n # We can't use .insert below, since we don't want to modify the\n # items in the list we have (shallowly!) copied.\n g2[0] = [1]+g2[0]\n g2[1] = [0]+g2[1]\n yield g2\n\n # Add edge (0,2)\n g3 = g[:]\n g3[0] = [2]+g3[0]\n g3[2] = [0]+g3[2]\n yield g3\n\n # Add edges (0,1) and (0,2)\n g4 = g3[:] # Not copied from g!\n g4[0] = [1]+g4[0]\n g4[1] = [0]+g4[1]\n yield g4", "def links_to(adj, number):\n connected = []\n for i, v in adj.iteritems():\n if number in v:\n connected.append(i)\n return connected", "def connected(index,i):\n adj = concatenate([ where(ind==j)[0] for j in ind[i] if j >= 0 ])\n return unique(adj[adj != i])", "def neigh_comm(n):\n\n nc = {bl[n]: 0.0}\n for idx in range(0, node_count):\n neigh = idx\n ncomm = bl[neigh]\n nei_weight = network[n][idx]\n if (neigh != n) & (nei_weight > 0.0):\n if ncomm in nc:\n nc[ncomm] += nei_weight\n else:\n nc[ncomm] = nei_weight\n return nc", "def run_adding_edges(self):\n indices = np.where(self.X==0)\n idx=[]\n for i in range(len(indices[0])):\n idx.append((indices[0][i],indices[1][i]))\n idx = np.array(idx)\n return self.node_equivalent(idx)", "def config_connection_matrix(self):\n for leg in self.legs.values():\n for m in leg[\"muscles\"]:\n if \"brain_sig\" and \"name\" in m:\n self.connection_matrix[m[\"name\"]] = [0] * self.brain[\"n_osc\"]\n self.connection_matrix[m[\"name\"]][m[\"brain_sig\"] - 1] = 1.", "def edges(self):\n edge_list = []\n for node1 in self.node_dict:\n for node2 in self.node_dict[node1]:\n edge_list.append((node1,\n node2,\n self.node_dict[node1][node2]))\n return edge_list", "def all_node_ids(self):\n return [i for i in range(0, self.n_inputs + self.n_hidden + self.n_outputs)]", "def make_adjacency_list_from_edge_list(N, edges):\n adjacency_list = [[] for _ in range(N)]\n for e, (x, y, r) in enumerate(edges):\n adjacency_list[x].append((e, y, r))\n adjacency_list[y].append((e, x, r))\n return adjacency_list", "def connectedLineElems(elems):\n elems = Connectivity(elems).copy() # make copy to avoid side effects\n parts = []\n while elems.size != 0:\n loop = findConnectedLineElems(elems)\n parts.append(loop[(loop!=-1).any(axis=1)])\n elems = elems[(elems!=-1).any(axis=1)]\n return parts", "def get_components(graph):\n return [graph.subgraph(c).copy() for c in nx.connected_components(graph)]", "def connected_components(graph):\n all_nodes = list(graph.keys())\n\n counter = 0\n explored = set()\n components = []\n for node in all_nodes:\n if node not in explored:\n counter += 1\n visited = bfs(graph, node)\n components.append(visited)\n explored = explored.union(visited)\n\n return explored, components, counter", "def connected((e,r)):\n \n # Deal with the middle case so we don't divide by zero\n if r==0: return [(1,1),(2,1),(3,1),(4,1),(5,1),(0,1)]\n # If the input is impossible, return nothing to prune the branch (shouldn't\n # happen)\n if e>=6*r: return []\n connected=[]\n mult=e//r\n rem=e % r\n #Going sideways\n toAdd=((6*r-1,r) if e==0 else (e-1,r))\n connected.append(toAdd)\n toAdd=((0,r) if e==6*r-1 else (e+1,r))\n connected.append(toAdd)\n #Going inward\n toAdd=( (0,r-1)if mult==5 and rem==r-1 else (mult*(r-1)+rem,r-1) )\n connected.append(toAdd)\n if rem!=0:\n connected.append((mult*(r-1)+rem-1,r-1))\n\n #Going outward\n if r<nLayers-1:\n connected.append((mult*(r+1)+rem,r+1))\n connected.append((mult*(r+1)+rem+1,r+1))\n if rem==0: # only case where negatives could result\n if mult>0: connected.append( (mult*(r+1)-1,r+1))\n else: connected.append( (6*(r+1)-1,r+1))\n \n return connected", "def send_out_buf_messages(self, only_register=False):\n disconnected_nodes = []\n for node in self.nodes.copy():\n try:\n self.send_messages_to_node(node)\n except Exception:\n disconnected_nodes.append(node.get_server_address())\n return disconnected_nodes", "def get_channel_adjacency(self):\n ch_type = mne.io.meas_info._get_channel_types(self.info)[0] # Assuming these are all the same!\n adjacency, ch_names = mne.channels.channels._compute_ch_adjacency(self.info, ch_type)\n ntests = np.prod(self.data.data.shape[2:])\n ntimes = self.data.data.shape[3]\n print('{} : {}'.format(ntimes, ntests))\n return mne.stats.cluster_level._setup_adjacency(adjacency, ntests, ntimes)" ]
[ "0.66461325", "0.6315994", "0.5970758", "0.594913", "0.57439035", "0.57150644", "0.5673093", "0.56705296", "0.56621045", "0.56380904", "0.5598652", "0.55890334", "0.55843794", "0.5572508", "0.55645573", "0.5527443", "0.5512135", "0.54903924", "0.54806775", "0.5466238", "0.5456545", "0.5449172", "0.54412913", "0.54383034", "0.54375845", "0.5396538", "0.5396233", "0.53726685", "0.53707176", "0.53533685", "0.5347789", "0.534419", "0.53420335", "0.5337941", "0.53305036", "0.53291845", "0.5328974", "0.53275913", "0.5326052", "0.5322806", "0.53218365", "0.5320902", "0.5310932", "0.53033155", "0.53013486", "0.5293604", "0.52924144", "0.52911425", "0.52854645", "0.52842104", "0.5271592", "0.5267428", "0.52636445", "0.52513206", "0.524998", "0.52499306", "0.5244454", "0.52378833", "0.522037", "0.5219731", "0.521709", "0.5215537", "0.52030486", "0.5199474", "0.5198928", "0.519223", "0.51916707", "0.51894444", "0.5189199", "0.51730925", "0.5172012", "0.5171648", "0.5169941", "0.51673126", "0.51514906", "0.5148933", "0.5142425", "0.5141272", "0.51380694", "0.5137132", "0.51351696", "0.5132437", "0.5131594", "0.5127183", "0.5119799", "0.5119599", "0.51139957", "0.51139283", "0.5108405", "0.5106648", "0.5106636", "0.51035064", "0.5100455", "0.50999475", "0.509169", "0.5091669", "0.50910413", "0.5090254", "0.5090243", "0.50897425", "0.5085171" ]
0.0
-1
deep geometric feature set as state representation
def __init__(self, _conv_encoder=None, _encoder_out_channels=32, _vi_key_pointer=None, key_point_num=20, _debug_tool=None, _debug_frequency=None, _vi_mode=True): super(M6EKViEncoder, self).__init__() self.encoder = _conv_encoder self.k_heatmaps_layer = nn.Sequential( nn.Conv2d(_encoder_out_channels, key_point_num, kernel_size=(1, 1), stride=(1, 1)), nn.BatchNorm2d(key_point_num), nn.ReLU()) self.vi_key_pointer = _vi_key_pointer self.debug_tool = _debug_tool self.debug_frequency = _debug_frequency self.it_count = 0 self.vi_mode = _vi_mode
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getStates(self):\n feature_states = []\n for i, sim in enumerate(self.sims):\n state = sim.getState()\n features = []\n for j in range(36):\n long_id = self._make_id(state.scanId, state.location.viewpointId,str(j+1))\n feature = self.features[long_id]\n pad_num = 64-len(feature)\n \n if pad_num > 0: # padding the feature to [64, 2051]\n padding = np.zeros([pad_num, 2051]) \n feature = np.concatenate((feature,padding))\n\n features.append(feature)\n \n feature_states.append((features, state))\n # if self.features:\n # feature = self.features[long_id] # Get feature for\n # feature_states.append((feature, state))\n # else:\n # feature_states.append((None, state))\n\n return feature_states # [([64,2051]*36), sim_state] * batch_size", "def __setstate__(self,state):\n self.__dict__.update(state)\n self.KDTreeFinder = spatial.KDTree(self.featureVals)", "def state_nodes(self) -> np.ndarray:\n return np.array([[nd[c] for c in [\"alive\", \"infected\", \"immune\", \"isolated\", \"masked\"]]\n for nv, nd in self.g_.nodes.data()])", "def get_state(self):\n return convert_x_to_bbox(self.kalman_filter.x)[0].astype(int)", "def state_graph(self) -> np.ndarray:\n return nx.convert_matrix.to_numpy_array(self.g_,\n dtype=np.int16)", "def state(self):\n state = np.array(self.get_state_arr())\n om = utils.build_occupancy_maps(utils.build_humans(state))\n # We only have a batch of one so just get the first element of\n # transform and rotate\n state = utils.transform_and_rotate(state.reshape((1, -1)))[0]\n return torch.cat((state, om), dim=1)", "def dense_state(self):\n return {name: self.group[name].state for name in self.group.keys()}", "def getstate(self):\r\n return SparseGP.getstate(self) + [self.init]", "def _getStates(self):\n feature_states = []\n # for i, sim in enumerate(self.sims):\n # state = sim.getState()\n\n # long_id = self._make_id(state.scanId, state.location.viewpointId)\n # if self.features:\n # feature = self.features[long_id] # Get feature for\n # feature_states.append((feature, state))\n # else:\n # feature_states.append((None, state))\n for i in range(self.batch_size):\n while not self.qout[i].empty():\n self.qout[i].get()\n while not self.qtraj[i].empty():\n self.qtraj[i].get()\n\n self.qin[i].put(('state',None))\n \n for i in range(self.batch_size):\n state = self.qout[i].get()\n # print(state)\n long_id = self._make_id(state.scanId, state.location.viewpointId)\n if self.features:\n feature = self.features[long_id] # Get feature for\n feature_states.append((feature, state))\n else:\n feature_states.append((None, state))\n\n return feature_states", "def _getStates(self):\n feature_states = []\n # for i, sim in enumerate(self.sims):\n # state = sim.getState()\n\n # long_id = self._make_id(state.scanId, state.location.viewpointId)\n # if self.features:\n # feature = self.features[long_id] # Get feature for\n # feature_states.append((feature, state))\n # else:\n # feature_states.append((None, state))\n for i in range(self.batch_size):\n while not self.qout[i].empty():\n self.qout[i].get()\n while not self.qtraj[i].empty():\n self.qtraj[i].get()\n\n self.qin[i].put(('state',None))\n \n for i in range(self.batch_size):\n state = self.qout[i].get()\n # print(state)\n long_id = self._make_id(state.scanId, state.location.viewpointId)\n if self.features:\n feature = self.features[long_id] # Get feature for\n feature_states.append((feature, state))\n else:\n feature_states.append((None, state))\n\n return feature_states", "def getStates(self):\n feature_states = []\n for i, sim in enumerate(self.sims):\n state = sim.getState()\n\n long_id = self._make_id(state.scanId, state.location.viewpointId)\n if self.features:\n feature = self.features[long_id] # Get feature for\n feature_states.append((feature, state))\n else:\n feature_states.append((None, state))\n return feature_states", "def getStates(self):\n feature_states = []\n for i, sim in enumerate(self.sims):\n state = sim.getState()\n\n long_id = self._make_id(state.scanId, state.location.viewpointId)\n if self.features:\n feature = self.features[long_id] # Get feature for\n feature_states.append((feature, state))\n else:\n feature_states.append((None, state))\n return feature_states", "def featurize_state(self,state):\r\n try:\r\n state=(state[0].item(),state[1].item())\r\n except:\r\n pass\r\n \r\n scaled_x = np.divide(np.add(state[0],-self.x_mean),np.add(self.x_max,-self.x_min))\r\n scaled_vx = np.divide(np.add(state[1],-self.vx_mean),np.add(self.vx_max,-self.vx_min))\r\n return [scaled_x,scaled_vx]", "def get_features(self, state):\n temp_feats = np.zeros([8, NUM_VALUES])\n\n board, head = state\n head_pos, direction = head\n\n # the route for each region\n forward = ['F']\n left = ['L']\n right = ['R']\n forward_region = ['F', 'F', 'F']\n forward_left_region = ['L', 'R', 'F', 'F', 'L', 'L', 'F']\n forward_right_region = ['R', 'L', 'F', 'F', 'R', 'R', 'F']\n right_region = ['R', 'F', 'R', 'R']\n left_region = ['L', 'F', 'L', 'L']\n\n routes = [forward, left, right, forward_region, forward_left_region,\n forward_right_region, right_region, left_region]\n\n # for each route, count how many of each objects it contains\n for route_ind, route in enumerate(routes):\n temp_pos = head_pos\n temp_direction = direction\n for step in route:\n temp_direction = bp.Policy.TURNS[temp_direction][step]\n temp_pos = temp_pos.move(temp_direction)\n r = temp_pos[0]\n c = temp_pos[1]\n temp_feats[route_ind, board[r, c] + 1] += 1\n # we add one in the index since the minimum value is -1\n\n feats = temp_feats.flatten()\n\n return feats", "def __get_state(self, G):\n x = np.zeros(len(G))\n for node in self.infected_node_set:\n x[node] = 1\n\n # Random activation\n if self.self_activation>0:\n rdm_act = np.random.choice([0,1], size=len(x), p=[1-self.self_activation, self.self_activation])\n x = np.minimum(x+rdm_act, 1)\n return x", "def getstate(self):\r\n return GPBase.getstate(self) + [self.Z,\r\n self.num_inducing,\r\n self.has_uncertain_inputs,\r\n self.X_variance]", "def convert_state(self, x, v):\n \n\n #print(self.offset[0] * self.tiling_displacement[len(self.tiling_displacement)-1][0] / self.tile_width[0])\n\n #state = 0\n n_features = self.total_tiles[0] * self.total_tiles[1] * self.n_tilings\n state = np.zeros(n_features, dtype=int)\n print(np.shape(state))\n\n for i in range(self.n_tilings):\n # Finds the index of the tile in both dimensions\n x_tile = (x - self.offset[0] * self.tiling_displacement[i][0] - self.x_range[0] + self.extra_tiles[0] * self.tile_width[0]) // self.tile_width[0]\n v_tile = (v - self.offset[1] * self.tiling_displacement[i][1] - self.v_range[0] + self.extra_tiles[1] * self.tile_width[1]) // self.tile_width[1]\n \n #x_tile = (x - self.offset[0] * self.tiling_displacement[i][0] - self.x_range[0] + self.extra_tiles[0] * self.tile_width[0]) // self.tile_width[0]\n #v_tile = (v - self.offset[1] * self.tiling_displacement[i][1] - self.v_range[0] + self.extra_tiles[1] * self.tile_width[1]) // self.tile_width[1]\n\n index = int(i * (self.total_tiles[0]*self.total_tiles[1]) + x_tile * self.total_tiles[0] + v_tile)\n print(\"INDEX\" , index)\n state[index] = 1\n\n\n\n\n \"\"\"\n # adds the correct bit (corresponding to the state of the tiling) to the state integer\n state += 2 ** (i * self.n_tiles**2 + x_tile * self.n_tiles + v_tile)\n \"\"\"\n print (\"Tiling %s: (%s,%s)\" % (i, x_tile, v_tile))\n\n return state", "def states(self):\n return self._x_list", "def _get_state(self):\n\n # stack all variables and return state array\n state = np.hstack((self.sheep_com, self.farthest_sheep, \n self.target, self.dog_pose, self.radius_sheep, \n self.target_distance))\n return state", "def _state_convert(self, raw_state):\n variables_dict = dict()\n variables_dict[\"s_t\"] = np.hstack((0.8, 0.8, 0.8, 0.8, 0.8, 0.8, 0.0, 0.0, 1.0))\n variables_dict[\"v_t\"] = np.hstack((0.0, 0.0, 0.0, 0.0, 0.0, 0.0))\n variables_dict[\"add_s_t\"] = np.hstack((0.8, 0.8))\n variables_dict[\"add_v_t\"] = np.hstack((0.0, 0.0))\n variables_dict[\"flag_t\"] = 0.0\n variables_dict[\"add_dist_min\"] = np.hstack((1000.0, 1000.0, 1000.0, 1000.0))\n variables_dict[\"dist_min\"] = np.hstack((1000.0, 1000.0, 1000.0, 1000.0, 1000.0, 1000.0))\n variables_dict[\"ego_lane\"] = raw_state[16]\n variables_dict[\"lane_ids\"] = raw_state[18]\n variables_dict[\"ego_lane\"] = variables_dict[\"lane_ids\"].index(variables_dict[\"ego_lane\"])\n if variables_dict[\"ego_lane\"] == 0 or variables_dict[\"ego_lane\"] == 2:\n variables_dict[\"s_t\"][variables_dict[\"ego_lane\"]] = 1.0\n variables_dict[\"s_t\"][variables_dict[\"ego_lane\"] + 3] = 1.0\n variables_dict[\"flag_t\"] = 1 if variables_dict[\"ego_lane\"] == 0 else -1\n\n variables_dict[\"ego_raw_speed\"] = raw_state[3]\n variables_dict[\"filter_speed\"] = (variables_dict[\"ego_raw_speed\"]\n if variables_dict[\"ego_raw_speed\"] >= 10.0 else 10.0)\n variables_dict[\"s_t\"][6] = variables_dict[\"ego_raw_speed\"] / SPEED_RANGE\n objects = raw_state[-1]\n # print(\"ego_speed\",ego_raw_speed,\"ego_lane\",ego_lane)\n if objects[0] is not None:\n # for i in range(len(objects)):\n for i, _object in enumerate(objects):\n lane_id = objects[i][0]\n dist = abs(objects[i][1]) * np.sign(objects[i][1])\n speed = objects[i][2]\n pre_post = np.sign(dist)\n flag = 0 if pre_post == 1.0 else 1\n\n if abs(dist) < VIEW_RANGE:\n for j in range(3):\n adjacent_lane = variables_dict[\"ego_lane\"] - 1 + j\n dist_index = j + flag * 3\n if (lane_id == adjacent_lane and abs(dist) < variables_dict[\"dist_min\"][dist_index]):\n self.min_dist(\n variables_dict[\"v_t\"],\n variables_dict[\"s_t\"],\n dist_index,\n speed,\n dist,\n variables_dict[\"filter_speed\"],\n )\n variables_dict[\"dist_min\"][dist_index] = abs(dist)\n\n if abs(dist) < variables_dict[\"add_dist_min\"][flag]:\n if (variables_dict[\"ego_lane\"] == 0 and lane_id == variables_dict[\"ego_lane\"] + 2\n or variables_dict[\"ego_lane\"] == len(variables_dict[\"lane_ids\"]) - 1\n and lane_id == variables_dict[\"ego_lane\"] - 2):\n self.min_dist(\n variables_dict[\"add_v_t\"],\n variables_dict[\"add_s_t\"],\n flag,\n speed,\n dist,\n variables_dict[\"filter_speed\"],\n )\n\n state = np.hstack((\n variables_dict[\"s_t\"],\n variables_dict[\"v_t\"],\n variables_dict[\"add_s_t\"],\n variables_dict[\"add_v_t\"],\n variables_dict[\"flag_t\"],\n ))\n return state", "def ensemble_perts(self):\n #emean = self.ensemble_mean()\n return self - self.ensemble_mean()\n #return self.state.values", "def getstate(self):\n out = []\n for row in self.a:\n out.append([])\n for item in row:\n out[-1].append(itemstate(item))\n return (\"matrix\", out, self.y, self.x, self.converter, self.onlydiag())", "def featurize_state(self, state):\r\n scaled = scaler.transform([state])\r\n featurized = featurizer.transform(scaled)\r\n return featurized[0]", "def __feature_set__(self):\r\n import numpy as np\r\n import datetime\r\n import time\r\n cols_norm = [col for col in self.columns]\r\n cols_lower = [col.lower() for col in self.columns]\r\n fields = []\r\n features = []\r\n date_fields = []\r\n _geom_types = {\r\n arcgis.geometry._types.Point : \"esriGeometryPoint\",\r\n arcgis.geometry._types.Polyline : \"esriGeometryPolyline\",\r\n arcgis.geometry._types.MultiPoint : \"esriGeometryMultipoint\",\r\n arcgis.geometry._types.Polygon : \"esriGeometryPolygon\"\r\n }\r\n if self.sr is None:\r\n sr = {'wkid' : 4326}\r\n else:\r\n sr = self.sr\r\n fs = {\r\n \"objectIdFieldName\" : \"\",\r\n \"globalIdFieldName\" : \"\",\r\n \"displayFieldName\" : \"\",\r\n \"geometryType\" : _geom_types[type(self.geometry[self.geometry.first_valid_index()])],\r\n \"spatialReference\" : sr,\r\n \"fields\" : [],\r\n \"features\" : []\r\n }\r\n if 'objectid' in cols_lower:\r\n fs['objectIdFieldName'] = cols_norm[cols_lower.index('objectid')]\r\n fs['displayFieldName'] = cols_norm[cols_lower.index('objectid')]\r\n elif 'fid' in cols_lower:\r\n fs['objectIdFieldName'] = cols_norm[cols_lower.index('fid')]\r\n fs['displayFieldName'] = cols_norm[cols_lower.index('fid')]\r\n elif 'oid' in cols_lower:\r\n fs['objectIdFieldName'] = cols_norm[cols_lower.index('oid')]\r\n fs['displayFieldName'] = cols_norm[cols_lower.index('oid')]\r\n else:\r\n self['OBJECTID'] = list(range(1, self.shape[0] + 1))\r\n res = self.__feature_set__\r\n del self['OBJECTID']\r\n return res\r\n if 'objectIdFieldName' in fs:\r\n fields.append({\r\n \"name\" : fs['objectIdFieldName'],\r\n \"type\" : \"esriFieldTypeOID\",\r\n \"alias\" : fs['objectIdFieldName']\r\n })\r\n cols_norm.pop(cols_norm.index(fs['objectIdFieldName']))\r\n if 'globalIdFieldName' in fs and len(fs['globalIdFieldName']) > 0:\r\n fields.append({\r\n \"name\" : fs['globalIdFieldName'],\r\n \"type\" : \"esriFieldTypeGlobalID\",\r\n \"alias\" : fs['globalIdFieldName']\r\n })\r\n cols_norm.pop(cols_norm.index(fs['globalIdFieldName']))\r\n elif 'globalIdFieldName' in fs and \\\r\n len(fs['globalIdFieldName']) == 0:\r\n del fs['globalIdFieldName']\r\n if self._geometry_column_name in cols_norm:\r\n cols_norm.pop(cols_norm.index(self._geometry_column_name))\r\n for col in cols_norm:\r\n try:\r\n idx = self[col].first_valid_index()\r\n col_val = self[col].loc[idx]\r\n except:\r\n col_val = \"\"\r\n if isinstance(col_val, (str, np.str)):\r\n l = self[col].str.len().max()\r\n if str(l) == 'nan':\r\n l = 255\r\n\r\n fields.append({\r\n \"name\" : col,\r\n \"type\" : \"esriFieldTypeString\",\r\n \"length\" : int(l),\r\n \"alias\" : col\r\n })\r\n if fs['displayFieldName'] == \"\":\r\n fs['displayFieldName'] = col\r\n elif isinstance(col_val, (datetime.datetime,\r\n pd.Timestamp,\r\n np.datetime64,\r\n pd.datetime)):\r\n fields.append({\r\n \"name\" : col,\r\n \"type\" : \"esriFieldTypeDate\",\r\n \"alias\" : col\r\n })\r\n date_fields.append(col)\r\n elif isinstance(col_val, (np.int32, np.int16, np.int8)):\r\n fields.append({\r\n \"name\" : col,\r\n \"type\" : \"esriFieldTypeSmallInteger\",\r\n \"alias\" : col\r\n })\r\n elif isinstance(col_val, (int, np.int, np.int64)):\r\n fields.append({\r\n \"name\" : col,\r\n \"type\" : \"esriFieldTypeInteger\",\r\n \"alias\" : col\r\n })\r\n elif isinstance(col_val, (float, np.float64)):\r\n fields.append({\r\n \"name\" : col,\r\n \"type\" : \"esriFieldTypeDouble\",\r\n \"alias\" : col\r\n })\r\n elif isinstance(col_val, (np.float32)):\r\n fields.append({\r\n \"name\" : col,\r\n \"type\" : \"esriFieldTypeSingle\",\r\n \"alias\" : col\r\n })\r\n fs['fields'] = fields\r\n for row in self.to_dict('records'):\r\n geom = {}\r\n if self._geometry_column_name in row:\r\n geom = row[self._geometry_column_name]\r\n del row[self._geometry_column_name]\r\n for f in date_fields:\r\n try:\r\n row[f] = int(row[f].to_pydatetime().timestamp() * 1000)\r\n except:\r\n row[f] = None\r\n features.append(\r\n {\r\n \"geometry\" : dict(geom),\r\n \"attributes\" : row\r\n }\r\n )\r\n del row\r\n del geom\r\n fs['features'] = features\r\n return fs", "def get_states(self):\n return product(*[phi.automaton().states for phi in self])", "def get_states():\n # Getting all hidden state through time\n all_hidden_states = tf.scan(GRU, processed_input, \n initializer=initial_hidden, name='states')\n return all_hidden_states", "def features(self, state):\n jdecays = state[\"decays\"]\n cor_mean = state[\"means\"] / (1 - jdecays**(state[\"iteration\"]))\n # longest running decay\n approx_max = cor_mean[1:]\n cor_mean = cor_mean[0:-1]\n running_min = state[\"running_min\"][0:-1]\n\n den = jnp.maximum(1e-8, (approx_max - running_min))\n pre_center = (cor_mean - running_min) / den\n feature1 = (pre_center - 1.0)\n feature1 = jnp.clip(feature1, -1, 1)\n # first couple features are bad.\n return jnp.where(state[\"iteration\"] <= 2, feature1 * 0, feature1)", "def get_on_neurons(Fish,state):\r\n state_snap= Fish[state,:]\r\n \r\n label=[]\r\n for i in range(len(state_snap)):\r\n if state_snap[i]>2:\r\n label.append(1)\r\n else: \r\n label.append(0)\r\n return label", "def x(self):\n # REPLACE THE FOLLOWING WITH THE LOGIC TO CONSTRUCT/RETURN THE STATE\n x = {key: 0.0 for key in self.model.states}\n\n return x", "def to_features(self):\n to_return = dict()\n\n to_return['bias'] = 1.0\n to_return['user:' + self.user] = 1.0\n to_return['format:' + self.format] = 1.0\n to_return['token:' + self.token.lower()] = 1.0\n\n to_return['part_of_speech:' + self.part_of_speech] = 1.0\n for morphological_feature in self.morphological_features:\n to_return['morphological_feature:' + morphological_feature] = 1.0\n to_return['dependency_label:' + self.dependency_label] = 1.0\n\n return to_return", "def call(self, state):\n x = tf.cast(state, tf.float32)\n x = self.flatten(x)\n x -= self.min_vals\n x /= self.max_vals - self.min_vals\n x = 2.0 * x - 1.0 # Rescale in range [-1, 1].\n x = self.dense1(x)\n x = self.dense2(x)\n x = self.last_layer(x)\n return x", "def denseFeature(self, feat):\n return {'feat': feat}", "def get_featured_tree(self):\n\n for t in self.tree.get_terminals():\n t.sample_series = self.feature_table[t.name]\n self.feature_tree = self.recursion_tree(self.tree.root)\n for clade in self.feature_tree.find_clades(order='level'):\n clade.depth = 1+len(self.feature_tree.get_path(clade))\n \n #i = 0\n #for clade in self.feature_tree.find_clades(order='level'):\n # clade.ID_num = i \n #clade.abu = np.mean(clade.sample_series.values)\n #clade.domain_otu = clade.sample_series.idxmax()", "def state_2_features(self, state):\n # Simple tabular features\n phi = np.zeros((self.feature_dim,),\n dtype=np.float)\n\n # Set index\n s_idx = state - 1\n if 0 <= s_idx < self.feature_dim:\n phi[s_idx] = 1.0\n\n return phi", "def gather_state(self):\n self.x[0,0:self.n] = self.m[0:self.n]\n self.x[1,0:self.n] = self.r[0:self.n,0]\n self.x[2,0:self.n] = self.r[0:self.n,1]\n self.x[3,0:self.n] = self.r[0:self.n,2]\n self.x[4,0:self.n] = self.v[0:self.n,0]\n self.x[5,0:self.n] = self.v[0:self.n,1]\n self.x[6,0:self.n] = self.v[0:self.n,2]\n self.x[7,0:self.n] = self.rho[0:self.n]\n self.x[8,0:self.n] = self.p[0:self.n]\n # added second component of pressure\n self.x[9,0:self.n] = self.pco[0:self.n]\n self.x[10,0:self.n] = self.u[0:self.n]\n return(self.x)", "def full_output_state(self):\n state = self.circuit.global_input_state\n for layer in range(self.circuit.element_layers):\n #TODO: a way to update the state one layer at a time\n #instead of one element at a time might be slightly faster\n for element in self.circuit.elements[layer]:\n state = self.evolve_element(state, element)\n return state", "def gather_state(self):\n self.x[0,0:self.n] = self.m[0:self.n]\n self.x[1,0:self.n] = self.r[0:self.n,0]\n self.x[2,0:self.n] = self.r[0:self.n,1]\n self.x[3,0:self.n] = self.r[0:self.n,2]\n self.x[4,0:self.n] = self.v[0:self.n,0]\n self.x[5,0:self.n] = self.v[0:self.n,1]\n self.x[6,0:self.n] = self.v[0:self.n,2]\n return(self.x)", "def tree_features(self):\n return self._tree_features", "def getstate(self):\r\n return Model.getstate(self) + [self.X,\r\n self.num_data,\r\n self.input_dim,\r\n self.kern,\r\n self.likelihood,\r\n self.output_dim,\r\n self._Xoffset,\r\n self._Xscale]", "def _get_full_graph(self):", "def state_array_spec(self) -> Dict[str, Any]:", "def featurize_state(self, state):\n s_scaled = self.scaler.transform([state])\n s_transformed = self.feature_transformer.transform(s_scaled)\n return s_transformed[0]", "def _state(self):\n state = [] \n for _temp in self.config[\"performance_targets\"]:\n ID = _temp[0]\n attribute = _temp[1]\n state.append(self.methods[attribute](ID))\n \n for _temp in self.config[\"states\"]:\n ID = _temp[0]\n attribute = _temp[1]\n state.append(self.methods[attribute](ID))\n\n state = np.asarray(state)\n \n return state", "def getState(self) -> vector:\n return self._denormalizeState(self.Z)", "def state(self) -> pd.Series:\n return pd.Series(\n (stat.get() for stat in self._groups.values()),\n index=(\n pd.Index(key[0] for key in self._groups.keys())\n if self.by and len(self.by) == 1\n else pd.MultiIndex.from_tuples(self._groups.keys(), names=self.by)\n ),\n name=self._feature_name,\n )", "def state_dict(self):\n return {\n 'XY_net': self.XY_net.state_dict(),\n 'XY_optimizer_minee': self.XY_optimizer_minee.state_dict(),\n 'X_net': self.X_net.state_dict(),\n 'X_optimizer_minee': self.X_optimizer_minee.state_dict(),\n 'Y_net': self.Y_net.state_dict(),\n 'Y_optimizer_minee': self.Y_optimizer_minee.state_dict(),\n 'X': self.X,\n 'Y': self.Y,\n 'lr': self.lr,\n 'batch_size': self.batch_size,\n 'ref_batch_factor': self.ref_batch_factor\n }", "def state(self) -> nx.Graph:\n return self._state", "def __getstate__(self):\n return {\n 'token_embeddings' : self.token_embeddings,\n 'fee_embeddings' : self.fee_embeddings,\n 'all_fee_spans' : self.all_fee_spans,\n 'fee_indices' : self.fee_indices,\n 'labels' : self.labels,\n 'lemma_pos' : self.lemma_pos,\n 'dependency_labels' : self.all_dependency_labels,\n 'dependency_heads' : self.all_dependency_heads,\n 'n' : self.n,\n 'dim' : self.dim,\n 'loaded' : self.loaded,\n 'statistics' : self.statistics,\n 'version' : CURRENT_DATASET_VERSION\n }", "def stateVector(self):\n simulator=Aer.get_backend('statevector_simulator')\n result=execute(self.circuit,backend=simulator).result()\n statevector=result.get_statevector(decimals=4) #\"decimals=4\" doesn't work in version 0.20.0 \n return statevector.tolist()", "def get_states(self):\n raise NotImplementedError()", "def feature_representation_Full(state, env, action):\n\n fr = np.zeros(11 * 13, dtype='float32')\n\n # Hand\n hand = [c.rank_value for c in state.hand]\n for c in (hand):\n fr[c - 1] += 1\n\n # Played by opponent\n played_opp = [c.rank_value for c in env.played[int(not state.hand_id)]]\n for c in (played_opp):\n fr[13 + c - 1] += 1\n\n # Cards not available anymore\n # Get crib of player\n if state.hand_id == env.dealer:\n crib = [env.crib[0]] + [env.crib[2]]\n else:\n crib = [env.crib[1]] + [env.crib[3]]\n not_avail = [c.rank_value for c in env.played[state.hand_id]] + \\\n [c.rank_value for c in env.starter] + \\\n [c.rank_value for c in crib]\n for c in (not_avail):\n fr[2 * 13 + c - 1] += 1\n\n # Table\n for i, c in enumerate(env.table):\n i += 3\n fr[i * 13 + c.rank_value - 1] = 1\n\n # Action\n fr[10 * 13 + action.rank_value - 1] = 1\n\n return fr", "def final_components(self):\n DG = self.digraph()\n condensation = DG.strongly_connected_components_digraph()\n return [self.induced_sub_finite_state_machine([self.state(_) for _ in component])\n for component in condensation.vertices()\n if condensation.out_degree(component) == 0]", "def gen_graph(self):", "def getstate(self):\r\n return [self.tied_indices,\r\n self.fixed_indices,\r\n self.fixed_values,\r\n self.constrained_indices,\r\n self.constraints]", "def get_initial_ff_state(m):\n return [True] + [False] * (m-1)", "def _convert_to_features(self, img: np.ndarray) -> np.ndarray:", "def __repr__(self, state):\n print ' ',\n for w in range(len(state)+2):\n print \"___\",\n print '\\n'\n for x in state:\n print \"| \", x, \" |\"\n print ' ',\n for y in range(len(state)+2):\n print \"___\",\n print '\\n'\n return state", "def toState(attrs=ALL):", "def _features_to_state_space_index(self, features):\n if (np.any(features > np.expand_dims(self.factor_sizes, 0)) or\n np.any(features < 0)):\n raise ValueError(\"Feature indices have to be within [0, factor_size-1]!\")\n return np.array(np.dot(features, self.factor_bases), dtype=np.int64)", "def get_states_from_graph(graph):\n return [\n (y, x, td)\n for y in range(graph.size)\n for x in range(graph.size)\n for td in range(2)\n if graph.E[(0, y, x, td)].state\n ]", "def getAllWorldStates(self):\n arrays = []\n for i in range(1, self.num_ingredients):\n arrays.append(list(range(7))) #dont forget to change\n return list(itertools.product(*arrays))", "def features(self, state, action, next_state):\n raise NotImplementedError", "def la(x) :\r\n return Feature(x, \"leaf_area\")", "def state_to_features(self, game_state: dict) -> np.array:\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n # This is the dict before the game begins and after it ends\n if game_state is None:\n return None\n\n \n #get global information as a 17x17 channel\n x = game_state['field']\n x = np.swapaxes(x,0,1)\n for i in range(len(game_state['coins'])):\n a = game_state['coins'][i][1]\n b = game_state['coins'][i][0]\n x[a][b] = 4\n for i in range(len(game_state['bombs'])):\n a = game_state['bombs'][i][0][1]\n b = game_state['bombs'][i][0][0]\n x[a][b] = -(5+game_state['bombs'][i][1])\n for i in game_state['others']:\n if i[2]:\n x[i[3][1]][i[3][0]] = -10\n else:\n x[i[3][1]][i[3][0]] = -11\n if game_state['self'][2]:\n x[game_state['self'][3][1]][game_state['self'][3][0]] = 5\n else:\n x[game_state['self'][3][1]][game_state['self'][3][0]] = 6\n expl_List = np.argwhere(game_state['explosion_map'] != 0)\n for i in expl_List:\n x[i[1]][i[0]] = -4\n channel1 = x.copy()\n \n \n #prep local channel\n if self.modelToUse != 0:\n #get simpele direction to and aways from closest coin or crate if no coin on the field\n x_axis,y_axis,coin_creat_encoding = directionToNearestCoin_Crate(game_state['coins'], game_state['self'][3], game_state['field'])\n if x_axis == \"left\":\n if x[game_state['self'][3][1]][game_state['self'][3][0]-1] == 0:\n x[game_state['self'][3][1]][game_state['self'][3][0]-1] = coin_creat_encoding\n if x[game_state['self'][3][1]][game_state['self'][3][0]+1] == 0:\n x[game_state['self'][3][1]][game_state['self'][3][0]+1] = -2\n if x_axis == \"right\":\n if x[game_state['self'][3][1]][game_state['self'][3][0]-1] == 0:\n x[game_state['self'][3][1]][game_state['self'][3][0]-1] = -2\n if x[game_state['self'][3][1]][game_state['self'][3][0]+1] == 0:\n x[game_state['self'][3][1]][game_state['self'][3][0]+1] = coin_creat_encoding\n if y_axis == \"up\":\n if x[game_state['self'][3][1]-1][game_state['self'][3][0]] == 0:\n x[game_state['self'][3][1]-1][game_state['self'][3][0]] = coin_creat_encoding\n if x[game_state['self'][3][1]+1][game_state['self'][3][0]] == 0:\n x[game_state['self'][3][1]+1][game_state['self'][3][0]] = -2\n if y_axis == \"down\":\n if x[game_state['self'][3][1]-1][game_state['self'][3][0]] == 0:\n x[game_state['self'][3][1]-1][game_state['self'][3][0]] = -2\n if x[game_state['self'][3][1]+1][game_state['self'][3][0]] == 0:\n x[game_state['self'][3][1]+1][game_state['self'][3][0]] = coin_creat_encoding\n \n \n \n #get information of bombs: on which position the explotion will be and how far away the bomb is\n bombs = game_state['bombs']\n bombs.sort(key=lambda x: x[1],reverse=True)\n x = np.pad(x, (3,3), 'constant', constant_values=(-1))\n for i in (bombs):\n y_bomb = i[0][1] + 3\n x_bomb = i[0][0] + 3\n for j in range(4):\n if abs(x[y_bomb,x_bomb+j]) != 1 and x[y_bomb,x_bomb+j] != -4:\n blocked = False\n for l in range(j):\n if x[y_bomb,x_bomb+j-l] == -1:\n blocked = True\n if blocked == False:\n x[y_bomb,x_bomb+j] = -(9-j)\n #print(\"test1\")\n if abs(x[y_bomb,x_bomb-j]) != 1 and x[y_bomb,x_bomb-j] != -4:\n blocked = False\n for l in range(j):\n if x[y_bomb,x_bomb-j+l] == -1:\n blocked = True\n if blocked == False:\n x[y_bomb,x_bomb-j] = -(9-j)\n #print(\"test2\")\n if abs(x[y_bomb+j,x_bomb]) != 1 and x[y_bomb+j,x_bomb] != -4:\n blocked = False\n for l in range(j):\n if x[y_bomb+j-l,x_bomb] == -1:\n blocked = True\n if blocked == False:\n x[y_bomb+j,x_bomb] = -(9-j)\n #print(\"test3\")\n if abs(x[y_bomb-j,x_bomb]) != 1 and x[y_bomb-j,x_bomb] != -4:\n blocked = False\n for l in range(j):\n if x[y_bomb-j+l,x_bomb] == -1:\n blocked = True\n if blocked == False:\n x[y_bomb-j,x_bomb] = -(9-j)\n #print(\"test4\")\n x = x[3:-3,3:-3]\n \n \n #get local view and concatenate it with channel 1 (will be sliced apart in the model later)\n z = np.zeros(17)\n y = x[game_state['self'][3][1]-1:game_state['self'][3][1]+2,game_state['self'][3][0]-1:game_state['self'][3][0]+2]\n y = y.flatten()\n z[0:9] = y\n #get correct input for the model used\n if self.modelToUse == 2:\n z = Variable(torch.from_numpy(z)).to(device).to(torch.float)\n z = z.unsqueeze(0).unsqueeze(0).unsqueeze(0)\n channel1 = Variable(torch.from_numpy(channel1)).to(device).to(torch.float)\n channel1 = channel1.unsqueeze(0).unsqueeze(0)\n return torch.cat((channel1,z),2)\n elif self.modelToUse == 1:\n y = Variable(torch.from_numpy(y)).to(device).to(torch.float)\n y = y.unsqueeze(0)\n return y\n else:\n channel1 = Variable(torch.from_numpy(channel1)).to(device).to(torch.float)\n channel1 = channel1.unsqueeze(0).unsqueeze(0)\n return channel1\n return", "def getFeatures(self, state, action):\n features = qutils.Qcounter()\n features['bias'] = 1.0\n\n if state is None:\n return features\n else:\n\n if self.id%2 == 0:\n plrCoords = state.board.plr_coords['r']\n oppCoords = state.board.plr_coords['b']\n else:\n plrCoords = state.board.plr_coords['b']\n oppCoords = state.board.plr_coords['r']\n\n goalState = GoalState(state.board.plr_coords['r'],state.board.plr_coords['b'],state.agents[self.id].hand,\n state.board.draft)\n if action['coords'] is not None:\n draftCoords = goalState.CardsToCoords([action['draft_card']])\n else:\n draftCoords = None\n\n features['euclideanDistanceCentroid'] = eucDist(action, plrCoords)\n features['neighbour'] = neighbour(action, plrCoords, oppCoords)\n features['heart'] = heart(action, plrCoords)\n features['blockHeart'] = blockHeart(action, oppCoords)\n features['eHorizontal'] = eHorizontal(state, action, plrCoords, oppCoords)\n features['eVertical'] = eVertical(state, action, plrCoords, oppCoords)\n features['eIandIIIDiag'] = eIandIIIDiagonal(state, action, plrCoords, oppCoords)\n features['eIIandIVDiag'] = eIIandIVDiagonal(state, action, plrCoords, oppCoords)\n features['draftHorizontal'] = draftHorizontal(state, plrCoords, oppCoords, draftCoords)\n features['draftVertical'] = draftVertical(state, plrCoords, oppCoords, draftCoords)\n features['draftDiagIandIII'] = draftDiagIandIII(state, plrCoords, oppCoords, draftCoords)\n features['draftDiagIIandIV'] = draftDiagIIandIV(state, plrCoords, oppCoords, draftCoords)\n features['draftJacks'] = DraftJacks(action)\n features['PlayCentre'] = PlayCentre(action)\n features['HeuristicValuePlace'] = HeuristicValue(action, goalState)\n features['HeuristicValueDraft'] = HeuristicValueDraft(action, goalState, draftCoords, self.gamma)\n return features", "def getstate(self):\n return [elem.getstate() for elem in self]", "def __init__(self):\n self.state_dim = 12\n self.measurement_dim = 6", "def my_featurize(apartment):\n return x, y", "def process_state(state):\n grid = state.grid\n pos = state.pos\n reshaped_grid = np.reshape(grid,(1, grid_size*grid_size)) # Only use squared for square matrices\n reshaped_grid = reshaped_grid[0]\n processed_state = np.concatenate((pos, reshaped_grid))\n processed_state = np.array([processed_state])\n # processed_state.reshape(1, 1, grid_size*grid_size+2, 1)\n #print(processed_state.shape)\n\n return processed_state", "def convert_to_feature_frame(concrete_state, measure_color_distance=True):\n\n data = []\n\n parent_map = {}\n sibling_map = {}\n input_widgets = []\n\n for widget in concrete_state['widgets'].values():\n parent_key = widget['key']\n children = widget['children']\n for child in children:\n parent_map[child] = parent_key\n sibling_map[child] = len(children)\n\n tag = widget['properties']['tagName'].lower()\n\n if tag in basic_input_tags:\n input_widgets.append(widget)\n\n for widget in concrete_state['widgets'].values():\n if widget['properties']['is-hidden']:\n continue\n\n key = widget['key']\n depth = widget['domLevel']\n tag = widget['properties']['tagName'].lower()\n\n if tag not in basic_html_tags:\n tag = 0\n\n if key in parent_map:\n parent_key = parent_map[key]\n parent_tag = concrete_state['widgets'][parent_key]['properties']['tagName'].lower()\n\n if parent_tag not in basic_html_tags:\n parent_tag = 0\n\n number_of_siblings = sibling_map[key]\n else:\n parent_tag = 0\n number_of_siblings = 0\n\n number_of_children = len(widget['children'])\n\n x_percent = 0\n y_percent = 0\n font_size = 0\n font_weight = 0\n attr_for = False\n\n if 'properties' in widget:\n widget_props = widget['properties']\n\n if 'xPercent' in widget_props:\n x_percent = int(widget_props['xPercent'])\n\n if 'yPercent' in widget_props:\n y_percent = int(widget_props['yPercent'])\n\n if 'fontSize' in widget_props:\n font_size = int(widget_props['fontSize'])\n\n if 'font-weight' in widget_props:\n font_weight = int(widget_props['font-weight'])\n\n if 'for' in widget_props:\n attr_for = widget['properties']['for']\n if attr_for.strip() != \"\":\n attr_for = True\n else:\n attr_for = False\n\n text = widget['properties']['text']\n\n LOGGER.debug(f'convert_feature_to_frame, {key}, {text}')\n text = text.strip()\n text = text.replace('\\\"', \"\")\n is_text = text != \"\"\n\n color = widget['properties']['color']\n bg_color = widget['properties']['background-color']\n\n if measure_color_distance:\n nearest_color = get_nearest_color(color)\n nearest_bg_color = get_nearest_color(bg_color)\n else:\n nearest_color = base_colors[0][0]\n nearest_bg_color = base_colors[0][0]\n\n # This is affected by zoom level.\n distance_from_input_widget = 9999\n\n for input_widget in input_widgets:\n ix, iy = int(input_widget['properties']['x']), int(input_widget['properties']['y'])\n tx, ty = int(float(widget['properties']['x'])), int(float(widget['properties']['y']))\n dist = calc_point_distance(tx, ty, ix, iy)\n if dist < distance_from_input_widget:\n distance_from_input_widget = dist\n\n if attr_for:\n attr_for = 1.0\n else:\n attr_for = 0.0\n\n if is_text:\n is_text = 1.0\n else:\n is_text = 0.0\n\n for i in range(len(basic_html_tags)):\n if tag == basic_html_tags[i]:\n tag = i + 1\n if parent_tag == basic_html_tags[i]:\n parent_tag = i + 1\n\n data_row = [\n key,\n tag,\n parent_tag,\n attr_for,\n number_of_children,\n number_of_siblings,\n depth,\n x_percent,\n y_percent,\n font_size,\n font_weight,\n is_text,\n nearest_color,\n nearest_bg_color,\n distance_from_input_widget,\n text\n ]\n\n data.append(data_row)\n\n df = pd.DataFrame(data=data,\n columns=['Key', 'Tag', 'Parent_Tag', 'Attr_For', 'Num_Children', 'Num_Siblings', 'Depth',\n 'X_Percent', 'Y_Percent', 'Font_Size', 'Font_Weight', 'Is_Text',\n 'Nearest_Color', 'Nearest_Bg_Color', 'Distance_From_Input', 'Text'])\n\n # Normalize.\n df = normalize(df, ['Key', 'Tag', 'Parent_Tag', 'Attr_For', 'Is_Text', 'Text', 'Class', 'Nearest_Color',\n 'Nearest_Bg_Color'])\n\n return df", "def determinisation(self):\n if any(len(t.word_in) > 1 for t in self.iter_transitions()):\n return self.split_transitions().determinisation()\n\n epsilon_successors = {}\n direct_epsilon_successors = {}\n for state in self.iter_states():\n direct_epsilon_successors[state] = set(\n t.to_state\n for t in self.iter_transitions(state)\n if not t.word_in)\n epsilon_successors[state] = set([state])\n\n old_count_epsilon_successors = 0\n count_epsilon_successors = len(epsilon_successors)\n\n while old_count_epsilon_successors < count_epsilon_successors:\n old_count_epsilon_successors = count_epsilon_successors\n count_epsilon_successors = 0\n for state in self.iter_states():\n for direct_successor in direct_epsilon_successors[state]:\n epsilon_successors[state] = epsilon_successors[state].union(epsilon_successors[direct_successor])\n count_epsilon_successors += len(epsilon_successors[state])\n\n def set_transition(states, letter):\n result = set()\n for state in states:\n for transition in self.iter_transitions(state):\n if transition.word_in == [letter]:\n result.add(transition.to_state)\n result = result.union(*(epsilon_successors[s] for s in result))\n return (frozenset(result), [])\n\n result = self.empty_copy()\n new_initial_states = [frozenset(set().union(\n *(epsilon_successors[s]\n for s in self.iter_initial_states()\n )))]\n result.add_from_transition_function(set_transition,\n initial_states=new_initial_states)\n\n for state in result.iter_states():\n state.is_final = any(s.is_final for s in state.label())\n if all(s.color is None for s in state.label()):\n state.color = None\n else:\n state.color = frozenset(s.color for s in state.label())\n\n return result", "def __getstate__(self):\n state = Object.__getstate__(self)\n state['_strain'] = set()\n return state", "def S(self):\n return self._states", "def tag_with_features(self, efeats):\n if len(efeats)==3:\n print \"d\"\n\n # build array of dicts\n state_dicts = []\n for e_phi in efeats: \n state_dicts = self.viterbi1(e_phi, state_dicts)\n \n \n # trace back\n yyhat, phis = self.traceback(efeats, state_dicts)\n assert len(efeats)==len(yyhat)#len(yyhat), \n\n return (yyhat, phis)", "def save_expval_final_statevecs():\n # Get pre-measurement statevectors\n statevecs = []\n # State |+1>\n statevec = Statevector.from_label(\"+1\")\n statevecs.append(statevec)\n # State |00> + |11>\n statevec = (Statevector.from_label(\"00\") + Statevector.from_label(\"11\")) / np.sqrt(2)\n statevecs.append(statevec)\n # State |10> -i|01>\n statevec = (Statevector.from_label(\"10\") - 1j * Statevector.from_label(\"01\")) / np.sqrt(2)\n statevecs.append(statevec)\n return statevecs", "def get_data_from_state(state):\n data = []\n for column in range(aes.NB):\n for row in range(aes.R):\n data.append(state[row][column])\n return data", "def feature_calculator(args, graph):\n index_1 = [edge[0] for edge in graph.edges()]\n index_2 = [edge[1] for edge in graph.edges()]\n values = [1 for edge in graph.edges()]\n node_count = max(max(index_1)+1,max(index_2)+1)\n adjacency_matrix = sparse.coo_matrix((values, (index_1,index_2)),shape=(node_count,node_count),dtype=np.float32)\n degrees = adjacency_matrix.sum(axis=0)[0].tolist()\n degs = sparse.diags(degrees, [0])\n normalized_adjacency_matrix = degs.dot(adjacency_matrix)\n target_matrices = [normalized_adjacency_matrix.todense()]\n powered_A = normalized_adjacency_matrix\n if args.window_size > 1:\n for power in tqdm(range(args.window_size-1), desc = \"Adjacency matrix powers\"):\n powered_A = powered_A.dot(normalized_adjacency_matrix)\n to_add = powered_A.todense()\n target_matrices.append(to_add)\n target_matrices = np.array(target_matrices)\n return target_matrices", "def state_(state):\n return tuple( [ tuple( row ) for row in state ] )", "def dump_raw(self):\n return [ [ ( m.coeff, m.k1, m.k2 )\n for m in O.monomials if m.coeff ] for O in self.states ]", "def to_featureset(self):\r\n from arcgis.features import FeatureSet\r\n return FeatureSet.from_dataframe(self)", "def _getState(self, board):\r\n mySide = board.mySide(self.id)\r\n oppSide = board.oppSide(self.id)\r\n myMancala = board.stonesInMyMancala(self.id)\r\n oppMancala = board.stonesInOppMancala(self.id)\r\n \r\n state = [] # size should be inputSize - 1\r\n state.append(float(myMancala))\r\n# for i in range(self.rowSize):\r\n# state.append(mySide[i])\r\n for my in mySide:\r\n state.append(float(my))\r\n state.append(float(oppMancala))\r\n# for i in range(self.rowSize):\r\n# state.append(oppSide[i])\r\n for op in oppSide:\r\n state.append(float(op))\r\n return state", "def _extract_features(self):\n # print(os.getpid())\n return {n:self._extract_feature(f) for (n,f) in self.features.items()}", "def __getstate__(self) -> typing.Dict:\n # print(\"[INFO] Get state called\")\n\n state = self.__dict__ # get attribute dictionary\n\n # add the fitted_primitives\n state['fitted_pipe'] = self.runtime.pipeline\n state['pipeline'] = self.pipeline.to_json_structure()\n state['log_dir'] = self.log_dir\n state['id'] = self.id\n del state['runtime'] # remove runtime entry\n\n return state", "def gates(self, input_state):\n kernel = tf.get_variable(\n 'kernel', [self._concat_size, self._num_units * 2])\n bias = tf.get_variable(\n 'bias', self._num_units * 2,\n initializer=tf.constant_initializer(-1.))\n\n gates = tf.nn.sigmoid(tf.matmul(input_state, kernel) + bias)\n return tf.split(gates, num_or_size_splits=2, axis=1)", "def getAllStates(self):\n return list(itertools.product(self.getAllWorldStates(), self.getAllTheta()))", "def get_features(self):\n x,y = self.agent\n return np.array([x,y])", "def _get_native_state(self):\n state = self.__proxy__.get_state()\n state['classifier'] = state['classifier'].__proxy__\n del state['feature_extractor']\n del state['classes']\n return state", "def feature_map(self) -> QuantumCircuit:\n return self._feature_map", "def _extract_states(self, state):\n conf = self._config\n\n # c_prev is `m` (cell value), and\n # m_prev is `h` (previous output) in the paper.\n # Keeping c and m here for consistency with the codebase\n c_prev = [None] * conf.num_dims\n m_prev = [None] * conf.num_dims\n\n # for LSTM : state = memory cell + output, hence cell_output_size > 0\n # for GRU/RNN: state = output (whose size is equal to _num_units),\n # hence cell_output_size = 0\n total_cell_state_size = self._cell_state_size()\n cell_output_size = total_cell_state_size - conf.num_units\n\n if self._state_is_tuple:\n if len(conf.recurrents) != len(state):\n raise ValueError('Expected state as a tuple of {} '\n 'element'.format(len(conf.recurrents)))\n\n for recurrent_dim, recurrent_state in zip(conf.recurrents, state):\n if cell_output_size > 0:\n c_prev[recurrent_dim], m_prev[recurrent_dim] = recurrent_state\n else:\n m_prev[recurrent_dim] = recurrent_state\n else:\n for recurrent_dim, start_idx in zip(conf.recurrents,\n range(0, self.state_size,\n total_cell_state_size)):\n if cell_output_size > 0:\n c_prev[recurrent_dim] = array_ops.slice(state, [0, start_idx],\n [-1, conf.num_units])\n m_prev[recurrent_dim] = array_ops.slice(\n state, [0, start_idx + conf.num_units], [-1, cell_output_size])\n else:\n m_prev[recurrent_dim] = array_ops.slice(state, [0, start_idx],\n [-1, conf.num_units])\n return c_prev, m_prev, cell_output_size", "def prepare_state_representation(self, state):\n\n user_action = state['user_action']\n current_slots = state['current_slots']\n agent_last = state['agent_action']\n\n ########################################################################\n # Create one-hot of acts to represent the current user action\n ########################################################################\n user_act_rep = np.zeros((1, self.act_cardinality))\n user_act_rep[0, self.act_set[user_action['diaact']]] = 1.0\n\n ########################################################################\n # Create bag of inform slots representation to represent the current user action\n ########################################################################\n user_inform_slots_rep = np.zeros((1, self.slot_cardinality))\n for slot in user_action['inform_slots'].keys():\n user_inform_slots_rep[0, self.slot_set[slot]] = 1.0\n\n ########################################################################\n # Create bag of request slots representation to represent the current user action\n ########################################################################\n user_request_slots_rep = np.zeros((1, self.slot_cardinality))\n for slot in user_action['request_slots'].keys():\n user_request_slots_rep[0, self.slot_set[slot]] = 1.0\n\n ########################################################################\n # Creat bag of filled_in slots based on the current_slots\n ########################################################################\n current_slots_rep = np.zeros((1, self.slot_cardinality))\n for slot in current_slots['inform_slots']:\n current_slots_rep[0, self.slot_set[slot]] = 1.0\n\n ########################################################################\n # Encode last agent act\n ########################################################################\n agent_act_rep = np.zeros((1, self.act_cardinality))\n if agent_last:\n agent_act_rep[0, self.act_set[agent_last['diaact']]] = 1.0\n\n ########################################################################\n # Encode last agent inform slots\n ########################################################################\n agent_inform_slots_rep = np.zeros((1, self.slot_cardinality))\n if agent_last:\n for slot in agent_last['inform_slots'].keys():\n agent_inform_slots_rep[0, self.slot_set[slot]] = 1.0\n\n ########################################################################\n # Encode last agent request slots\n ########################################################################\n agent_request_slots_rep = np.zeros((1, self.slot_cardinality))\n if agent_last:\n for slot in agent_last['request_slots'].keys():\n agent_request_slots_rep[0, self.slot_set[slot]] = 1.0\n\n # turn_rep = np.zeros((1, 1)) + state['turn'] / 10.\n turn_rep = np.zeros((1, 1))\n\n ########################################################################\n # One-hot representation of the turn count?\n ########################################################################\n turn_onehot_rep = np.zeros((1, self.max_turn))\n turn_onehot_rep[0, state['turn']] = 1.0\n\n self.final_representation = np.hstack(\n [\n user_act_rep,\n user_inform_slots_rep,\n user_request_slots_rep,\n agent_act_rep,\n agent_inform_slots_rep,\n agent_request_slots_rep,\n current_slots_rep,\n turn_rep,\n turn_onehot_rep\n ])\n return self.final_representation", "def _get_state_sizes(self):\n ds = self.builder.nodes[self.ds_inputs[0]]\n return [[ds.xdim]]", "def bipartite_sets(G):\n color=bipartite_color(G)\n X=set(n for n in color if color[n]==1)\n Y=set(n for n in color if color[n]==0)\n return (X,Y)", "def test_save_geometric(self):\n G = nx.random_geometric_graph(20, 0.1)\n env = Environment(topology=G)\n f = io.BytesIO()\n env.dump_gexf(f)", "def get_state(self, flatten=True):\n if flatten:\n return self.data.flatten()\n return self.data", "def __setstate__(self, state):\r\n\r\n \"\"\"# Support adding a new member not previously defined in the class\r\n if 'new_member' not in state:\r\n self.new_member = \"new value\"\r\n self.__dict__.update(state)\"\"\"\r\n\r\n \"\"\" # Support removing old members not in new version of class\r\n if 'old_member' in state:\r\n # If you want: do something with the old member\r\n del state['old_member']\r\n self.__dict__.update(state) \"\"\"\r\n\r\n del state['_background_image']\r\n\r\n if '_background_image_data' not in state:\r\n print \"Detected old version of saved file!\"\r\n self._background_image_data = QtCore.QByteArray()\r\n\r\n\r\n if isinstance(state['_features'], list):\r\n for feature in state['_features']:\r\n self._append_feature(feature)\r\n del state['_features']\r\n\r\n self.__init__()\r\n self.__dict__.update(state)\r\n\r\n if isinstance(state['_features'], dict):\r\n\r\n for _id, feature in state['_features'].iteritems():\r\n if feature.get_feature_type() == 'Fuse':\r\n self._fuse_tree_item_model.addChild(feature, None)", "def get_state(self):\n return self.kf.x[:self.dim_z].squeeze()", "def _state_space_index_to_features(self, index):\n factor = []\n for base in self.factor_bases:\n f = math.floor(index/base)\n factor.append(f)\n index = index - f*base\n \n return factor", "def _generate_feature_tree(self, features):\n # build a set of all features, including top-level features and\n # dependencies.\n self.top_level_features = defaultdict(list)\n\n # find top-level features and index them by entity id.\n for f in self.all_features:\n _, num_forward = self.entityset.find_path(self.target_eid, f.entity.id,\n include_num_forward=True)\n if num_forward or f.entity.id == self.target_eid:\n self.top_level_features[f.entity.id].append(f)", "def compute_edge_logits(self):", "def extractFeatures(self, datum):\n abstract", "def _extract_state(self, state):\n raise NotImplementedError" ]
[ "0.63558125", "0.6102643", "0.606456", "0.6011864", "0.5996851", "0.59706515", "0.5943621", "0.59378874", "0.5885338", "0.5885338", "0.5870441", "0.5870441", "0.5846847", "0.5846655", "0.57815313", "0.5768379", "0.5745745", "0.5655412", "0.5653317", "0.56334156", "0.5630338", "0.561813", "0.5609718", "0.55877304", "0.55677485", "0.5558261", "0.55498123", "0.55393636", "0.5537607", "0.5527348", "0.55203485", "0.55117404", "0.54919183", "0.5487439", "0.54703456", "0.5453991", "0.5451592", "0.54461336", "0.54427457", "0.5431097", "0.54305536", "0.541559", "0.5415252", "0.5413874", "0.5410627", "0.5408188", "0.54062074", "0.540382", "0.53948087", "0.53680444", "0.53656316", "0.5362417", "0.53489584", "0.5347486", "0.53401357", "0.5337774", "0.53236187", "0.5315357", "0.52991855", "0.52894294", "0.5276093", "0.5267271", "0.52610284", "0.52609026", "0.5260231", "0.52590346", "0.52535033", "0.52510005", "0.52498794", "0.5241484", "0.5229735", "0.52279", "0.52236384", "0.5219085", "0.52170664", "0.5214188", "0.5211831", "0.5206101", "0.52041173", "0.52020746", "0.5183526", "0.51760507", "0.51613885", "0.51558083", "0.51556945", "0.51537883", "0.51447177", "0.51415753", "0.51403713", "0.513534", "0.5133706", "0.51298535", "0.51204836", "0.5118048", "0.5117581", "0.5116116", "0.5112767", "0.51112646", "0.5110004", "0.5106495", "0.5101511" ]
0.0
-1
Note that for training, you can keep training the model after it's already been trained because it automatically remembers the last error weights. So self.train(n=2) is equivalent to self.train(n=1); self.train(n=1)
def train(self, data, data_class, n=1): # Initialize the sample weights for _ in range(n): # Train the weak estimator new_estimator = BinaryDecisionTree(self.max_depth) new_estimator.train(data, data_class, self.error_weights) y_pred, _ = new_estimator.validate(data, data_class) # Check misclassifications and update the error weights accordingly incorrect = y_pred != data_class error = self.error_weights[incorrect].sum() alpha = 0.5 * np.log((1.0 - error) / error) sign = incorrect * 2 - 1 # Maps incorrect to 1, correct to -1 self.error_weights *= np.e ** (sign * alpha) self.error_weights /= self.error_weights.sum() # Save down alpha and estimator for prediction self.alphas.append(alpha) self.estimators.append(new_estimator)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def train_one_epoch(self):\n raise NotImplementedError", "def train(self, training_steps=10):", "def TrainOneStep(self):\n pass", "def train(self, ):\n raise NotImplementedError", "def train(self):\n raise NotImplementedError", "def train2(self):\n for epoch in range(self.epochs):\n print \"epoch: \", epoch\n self.train(self.D)\n self.alpha -= 0.002 # decrease the learning rate\n self.min_alpha = model.alpha # fix the learning rate, no decay", "def train_model(self, *args, **kwargs):\n raise NotImplementedError", "def _train(self):\n self.network.train() # note that here we are calling torch.nn.Module train class method\n epochs_since_improvement = 0\n best_params = None\n self.calculate_validation_loss()\n best_validation_loss = self.validation_average_loss\n\n while epochs_since_improvement < 10:\n self.train_epoch()\n self.calculate_validation_loss()\n if self.validation_average_loss < best_validation_loss:\n epochs_since_improvement = 0\n best_validation_loss = self.validation_average_loss\n best_params = self.network.state_dict()\n else:\n epochs_since_improvement += 1\n LOGGER.info(\"Epochs since improvement in validation_loss: {} \\n\".format(epochs_since_improvement))\n if self.maximum_epochs_allowed is not None and self.epochs_trained >= self.maximum_epochs_allowed:\n break\n LOGGER.info(\"Training complete after {} epochs \\n\".format(self.epochs_trained))\n LOGGER.info(\"Best training loss achieved: {} \\n\".format(self.training_average_loss))\n LOGGER.info(\"Best validation loss achieved: {}\".format(self.validation_average_loss))\n self.learned_params = best_params\n self.network.load_state_dict(best_params)", "def train(self):\r\n \r\n args = self.args\r\n model = self.model\r\n dataset = self.dataset\r\n train_state = self.train_state\r\n optimizer = self.optimizer\r\n scheduler = self.scheduler\r\n train_bar = self.train_bar\r\n val_bar = self.val_bar\r\n epoch_bar = self.epoch_bar\r\n \r\n for epoch_index in range(args.num_epochs):\r\n train_state['epoch_index'] = epoch_index\r\n \r\n # Iterate over training dataset\r\n \r\n running_loss,running_acc = self.train_loop(epoch_index, args, \r\n model, dataset, \r\n optimizer, train_bar)\r\n \r\n train_state['train_loss'].append(running_loss)\r\n train_state['train_acc'].append(running_acc)\r\n \r\n running_loss,running_acc = self.val_loop(epoch_index, args, model, \r\n dataset, optimizer, val_bar)\r\n \r\n \r\n train_state['val_loss'].append(running_loss)\r\n train_state['val_acc'].append(running_acc)\r\n \r\n print(\"Epoch \"+str(epoch_index+1)+\": Running loss=\"+ \\\r\n str(running_loss)+\", Running Acc=\"+str(running_acc))\r\n \r\n train_state = update_train_state(args=args, model=model, \r\n train_state=train_state)\r\n \r\n scheduler.step(train_state['val_loss'][-1])\r\n \r\n if train_state['stop_early']:\r\n break\r\n \r\n train_bar.n = 0\r\n val_bar.n = 0\r\n epoch_bar.set_postfix(best_val=train_state['early_stopping_best_val'] )\r\n epoch_bar.update()\r\n \r\n state_dict = torch.load(train_state['model_filename'])\r\n model.load_state_dict(state_dict)\r\n return model", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self, num_batches: int):", "def train(self, verbose=True):\n\n\n learned = False\n iteration = 0\n\n from util.loss_functions import DifferentError\n loss = DifferentError()\n\n\n\n\n\n # Train for some epochs if the error is not 0\n while not learned:\n # x ist ein Bild bestehend aus einem Label (erster Eintrag) und 784 Pixeln\n # t ist das Zielergebnis von x (überprüfbar mit dem Label)\n # o ist der tatsächliche Ergebnis von x\n # w ist der Gewichtsvektor\n # Als Aktivierungsfunktion verwenden wir die Sigmoid Funktion\n # Das Training wird dann beendet, sobald das Fehlerkriterium konvergiert\n\n totalError = 0\n\n output = []\n labels = self.trainingSet.label\n inputs = self.trainingSet.input\n\n # iteriere für jede Instanz im Trainingsset x € X\n for input in inputs:\n # Ermittle O_x = sig(w*x)\n output.append(self.fire(input))\n\n # Ermittle Fehler AE = tx - ox\n error = loss.calculateError(np.array(labels), np.array(output))\n\n # grad = [0]\n grad = np.zeros(len(self.trainingSet.input[0]))\n grad2 = np.zeros(len(self.trainingSet.input[0]))\n\n for e, input, out in zip(error, inputs, output):\n activationPrime = Activation.getDerivative(activationName)(np.dot(np.array(input), self.weight))\n #grad += np.multiply( np.multiply( input, e), activationPrime)\n grad += np.multiply( input, e)\n\n # Update grad = grad + errorPrime * x * activationPrime\n\n\n\n # print grad - grad2\n #print \"Error: \" + str(error) + \" Grad: \" + str(grad)\n\n # update w: w <- w + n*grad\n self.updateWeights(grad)\n\n\n iteration += 1\n totalError = error.sum()\n\n if verbose:\n logging.info(\"Epoch: %i; Error: %i\", iteration, totalError)\n\n if abs(totalError) < 0.01 or iteration >= self.epochs:\n # stop criteria is reached\n learned = True\n\n pass", "def train(self):\n\t\traise NotImplementedError", "def train(self):\n raise NotImplementedError()", "def train(self, *args, **kwargs):\n raise NotImplementedError", "def train(self):\n\t\tself.model.fit(self.training_data, self.training_labels)", "def train(self):\n raise IllegalOperationError(\"Cannot train multiplicative model!\")", "def train_model(self,model):\r\n \r\n train_state = {'stop_early': False,\r\n 'early_stopping_step': 0,\r\n 'early_stopping_best_val': 1e8,\r\n 'learning_rate': self.lr,\r\n 'epoch_index': 0,\r\n 'train_loss': [],\r\n 'val_loss': [],\r\n 'best_model':model}\r\n \r\n dataset = self.dataset\r\n loss_fn = self.loss_fn\r\n \r\n dataset.set_split('train')\r\n print(\"Training module with \"+str(len(dataset))+\" examples\")\r\n \r\n data_loader = DataLoader(dataset,batch_size=self.batch_size,shuffle=True,\r\n drop_last=True)\r\n \r\n optimizer = optim.Adam(model.parameters(), lr=self.lr)\r\n \r\n for epoch in range(self.epochs):\r\n train_state['epoch_index'] = epoch\r\n #First step in each epoch is to train over all batches\r\n model.train()\r\n dataset.set_split('train')\r\n train_loss = 0\r\n for b_i,batch_data in enumerate(data_loader):\r\n #Step 1: zero gradients\r\n optimizer.zero_grad()\r\n #Step 2: run forward\r\n X = batch_data['x']\r\n output = model(X)\r\n #Step 3: compute loss\r\n target = batch_data['y']\r\n loss = loss_fn(output,target)\r\n #Step 4: run backward\r\n loss.backward()\r\n #Step 5: update\r\n optimizer.step()\r\n \r\n #Record accumulated loss\r\n new_loss = loss.item()\r\n train_loss += new_loss\r\n \r\n train_loss /= b_i\r\n train_state['train_loss'].append(train_loss)\r\n \r\n #After training, compute loss on validation set and check for early stop\r\n model.eval()\r\n dataset.set_split('val')\r\n val_loss = 0\r\n for b_i,batch_data in enumerate(data_loader):\r\n #Step 1: run forward\r\n X = batch_data['x']\r\n output = model(X)\r\n #Step 2: compute loss\r\n target = batch_data['y']\r\n loss = loss_fn(output,target)\r\n \r\n #Record accumulated loss\r\n new_loss = loss.item()\r\n val_loss += new_loss\r\n \r\n val_loss /= b_i\r\n train_state['val_loss'].append(val_loss)\r\n \r\n print(\"Finished epoch \"+str(epoch+1)+\". Train loss=\"+\\\r\n str(train_loss)+\", Val loss=\"+str(val_loss))\r\n \r\n if val_loss < train_state['early_stopping_best_val']:\r\n #new best model, reset stopping counter, store model\r\n train_state['early_stopping_step'] = 0\r\n train_state['early_stopping_best_val'] = val_loss\r\n best_model = copy.deepcopy(model)\r\n best_model.load_state_dict(model.state_dict())\r\n train_state['best_model'] = best_model\r\n else:\r\n #val loss not improved; increase early stopping counter\r\n train_state['early_stopping_step'] += 1\r\n if train_state['early_stopping_step'] >= self.early_stopping_criteria:\r\n train_state['stop_early'] = True\r\n print(\"Val loss failed to improve. Stopping early.\")\r\n break\r\n \r\n return train_state['best_model'],train_state", "def train():\n pass", "def forward_train(self, *args, **kwargs):\n pass", "def train(self)->None:", "def train(x_train, y_train, x_valid, y_valid, config):\n train_acc = []\n valid_acc = []\n train_loss = []\n valid_loss = []\n best_model = None\n NUM_EPOCH = config['epochs']\n EARLY_STOP = config['early_stop']\n EARLY_STOP_EPOCH = config['early_stop_epoch']\n BATCH_SIZE = config['batch_size']\n model = NeuralNetwork(config=config)\n loss = float('inf')\n best_loss = float('inf')\n best_accuracy = 0\n patience = 0\n\n\n\n for i in range (NUM_EPOCH):\n\n x_train, y_train = shuffle(x_train, y_train)\n x_train = np.asarray(x_train)\n y_train = np.asarray(y_train)\n\n for j in range (0, len(x_train), BATCH_SIZE):\n start = j\n end = j + BATCH_SIZE\n if (end > len(x_train)):\n end = len(x_train)\n\n x = x_train[start:end]\n y = y_train[start:end]\n\n model.forward(x, y) \n model.backward()\n\n train_epoch_loss = model.forward(x_train, y_train)\n \n train_predict = np.zeros_like(model.y)\n train_predict[np.arange(len(model.y)), model.y.argmax(1)] = 1\n\n train_accuracy = sum([1 if all(train_predict[i] == y_train[i]) else 0 for i in range(len(y_train))])/len(y_train)\n\n train_loss.append(train_epoch_loss)\n train_acc.append(train_accuracy)\n \n valid_epoch_loss = model.forward(x_valid, y_valid)\n valid_predict = np.zeros_like(model.y)\n valid_predict[np.arange(len(model.y)), model.y.argmax(1)] = 1\n\n valid_accuracy = sum([1 if all(valid_predict[i] == y_valid[i]) else 0 for i in range(len(y_valid))])/len(y_valid)\n\n valid_loss.append(valid_epoch_loss)\n valid_acc.append(valid_accuracy)\n\n\n print(\"Epoch:\", i, \"Train Accuracy|Loss:\", train_accuracy,\"| \", train_epoch_loss, \"~|~ Valid: \", valid_accuracy, \" | \", valid_epoch_loss)\n if EARLY_STOP:\n if valid_epoch_loss > best_loss and patience >= EARLY_STOP_EPOCH:\n return train_acc, valid_acc, train_loss, valid_loss, best_model\n elif valid_epoch_loss > best_loss and patience < EARLY_STOP_EPOCH:\n patience += 1\n else:\n patience = 0\n if valid_epoch_loss < best_loss:\n best_loss = valid_epoch_loss\n best_accuracy = valid_accuracy\n best_model = copy.deepcopy(model)\n\n loss = valid_epoch_loss\n\n \n best_model = model \n return train_acc, valid_acc, train_loss, valid_loss, best_model", "def train_one_epoch(self):\n\t\tself.model.train()\n\t\ttrain_loss = 0\n\n\t\tfor batch_idx, data in enumerate(self.data_loader.train_loader):\n\t\t\tInput = data[0].float().to(self.device)\n\t\t\tOutput = data[1].float().to(self.device)\n\n\t\t\tself.optimizer.zero_grad()\n\t\t\tloss = self.loss(self.model(Input)[:,0],Output)\n\t\t\ttrain_loss += loss.item()\n\t\t\tloss.backward()\n\t\t\tself.optimizer.step()\n\t\t\tself.current_iteration += 1\n\n\t\tself.summary_writer.add_scalar('training/loss', loss.item(), self.current_epoch)", "def train(self, *args, **kwargs):\n # Handle overload of train() method\n if len(args) < 1 or (len(args) == 1 and type(args[0]) == bool):\n return nn.Sequential.train(self, *args, **kwargs)\n\n #\n # Parse training arguments\n #\n\n training_data = args[0]\n arguments = {\n \"validation_data\": None,\n \"batch_size\": 256,\n \"sigma_noise\": None,\n \"adversarial_training\": False,\n \"delta_at\": 0.01,\n \"initial_learning_rate\": 1e-2,\n \"momentum\": 0.0,\n \"convergence_epochs\": 5,\n \"learning_rate_decay\": 2.0,\n \"learning_rate_minimum\": 1e-6,\n \"maximum_epochs\": 1,\n \"training_split\": 0.9,\n \"gpu\": False,\n \"optimizer\": None,\n \"learning_rate_scheduler\": None\n }\n argument_names = arguments.keys()\n for a, n in zip(args[1:], argument_names):\n arguments[n] = a\n for k in kwargs:\n if k in arguments:\n arguments[k] = kwargs[k]\n else:\n raise ValueError(\"Unknown argument to {}.\".print(k))\n\n validation_data = arguments[\"validation_data\"]\n batch_size = arguments[\"batch_size\"]\n sigma_noise = arguments[\"sigma_noise\"]\n adversarial_training = arguments[\"adversarial_training\"]\n delta_at = arguments[\"delta_at\"]\n initial_learning_rate = arguments[\"initial_learning_rate\"]\n convergence_epochs = arguments[\"convergence_epochs\"]\n learning_rate_decay = arguments[\"learning_rate_decay\"]\n learning_rate_minimum = arguments[\"learning_rate_minimum\"]\n maximum_epochs = arguments[\"maximum_epochs\"]\n training_split = arguments[\"training_split\"]\n gpu = arguments[\"gpu\"]\n momentum = arguments[\"momentum\"]\n optimizer = arguments[\"optimizer\"]\n learning_rate_scheduler = arguments[\"learning_rate_scheduler\"]\n\n #\n # Determine device to use\n #\n if torch.cuda.is_available() and gpu:\n device = torch.device(\"cuda\")\n else:\n device = torch.device(\"cpu\")\n self.to(device)\n\n #\n # Handle input data\n #\n try:\n x, y = handle_input(training_data, device)\n training_data = BatchedDataset((x, y), batch_size)\n except:\n pass\n\n self.train()\n if not optimizer:\n self.optimizer = optim.SGD(\n self.parameters(), lr=initial_learning_rate, momentum=momentum\n )\n else:\n self.optimizer = optimizer\n self.criterion.to(device)\n\n if not optimizer and not learning_rate_scheduler:\n scheduler = ReduceLROnPlateau(\n self.optimizer,\n factor=1.0 / learning_rate_decay,\n patience=convergence_epochs,\n min_lr=learning_rate_minimum,\n )\n else:\n scheduler = learning_rate_scheduler\n\n training_errors = []\n validation_errors = []\n\n #\n # Training loop\n #\n\n for i in range(maximum_epochs):\n err = 0.0\n n = 0\n for j, (x, y) in enumerate(training_data):\n\n x = x.to(device)\n y = y.to(device)\n\n shape = x.size()\n shape = (shape[0], 1) + shape[2:]\n y = y.reshape(shape)\n\n self.optimizer.zero_grad()\n y_pred = self(x)\n c = self.criterion(y_pred, y)\n c.backward()\n self.optimizer.step()\n\n err += c.item() * x.size()[0]\n n += x.size()[0]\n\n if adversarial_training:\n self.optimizer.zero_grad()\n x_adv = self._make_adversarial_samples(x, y, delta_at)\n y_pred = self(x)\n c = self.criterion(y_pred, y)\n c.backward()\n self.optimizer.step()\n\n if j % 100:\n print(\n \"Epoch {} / {}: Batch {} / {}, Training error: {:.3f}\".format(\n i, maximum_epochs, j, len(training_data), err / n\n ),\n end=\"\\r\",\n )\n\n # Save training error\n training_errors.append(err / n)\n\n lr = [group[\"lr\"] for group in self.optimizer.param_groups][0]\n\n val_err = 0.0\n if not validation_data is None:\n n = 0\n for x, y in validation_data:\n x = x.to(device).detach()\n y = y.to(device).detach()\n\n shape = x.size()\n shape = (shape[0], 1) + shape[2:]\n y = y.reshape(shape)\n\n y_pred = self(x)\n c = self.criterion(y_pred, y)\n\n val_err += c.item() * x.size()[0]\n n += x.size()[0]\n validation_errors.append(val_err / n)\n\n print(\n \"Epoch {} / {}: Training error: {:.3f}, Validation error: {:.3f}, Learning rate: {:.5f}\".format(\n i,\n maximum_epochs,\n training_errors[-1],\n validation_errors[-1],\n lr,\n )\n )\n if scheduler:\n scheduler.step()\n else:\n scheduler.step()\n print(\n \"Epoch {} / {}: Training error: {:.3f}, Learning rate: {:.5f}\".format(\n i, maximum_epochs, training_errors[-1], lr\n )\n )\n\n self.training_errors += training_errors\n self.validation_errors += validation_errors\n self.eval()\n return {\n \"training_errors\": self.training_errors,\n \"validation_errors\": self.validation_errors,\n }", "def trainNet():", "def train_step(self):\n pass", "def train_model(self, *args, **kwargs):\n self.model.train(self.training, *args, **kwargs)", "def train(self):\n return", "def train(self,iTrain=1,Nbatch=32,Nlayers=3,Nunits=100,Nepochs=10000,\n ValidationSplit=0.1,Patience=20,Verbose=2):\n if iTrain == 1:\n self.cvstem()\n print(\"========================================================\")\n print(\"=================== NCM CONSTRUCTION ===================\")\n print(\"========================================================\")\n n = self.n\n n_p = self.n_p\n X = np.hstack((self.xs_opt,self.ps_opt))\n y = self.cholMs\n model = Sequential(name=\"NCM\")\n model.add(Dense(Nunits,activation=\"relu\",input_shape=(n+n_p,)))\n for l in range(Nlayers-1):\n model.add(Dense(Nunits,activation=\"relu\"))\n model.add(Dense(int(n*(n+1)/2)))\n model.summary()\n model.compile(loss=\"mean_squared_error\",optimizer=\"adam\")\n es = EarlyStopping(monitor=\"val_loss\",patience=Patience)\n model.fit(X,y,batch_size=Nbatch,epochs=Nepochs,verbose=Verbose,\\\n callbacks=[es],validation_split=ValidationSplit)\n self.model = model\n model.save(\"models/\"+self.fname+\".h5\")\n elif iTrain == 0:\n print(\"========================================================\")\n print(\"=================== NCM CONSTRUCTION ===================\")\n print(\"========================================================\")\n self.model = load_model(\"models/\"+self.fname+\".h5\")\n path = \"models/optvals/\"+self.fname\n self.alp_opt = np.load(path+\"/alp_opt.npy\")\n self.chi_opt = np.load(path+\"/chi_opt.npy\")\n self.nu_opt = np.load(path+\"/nu_opt.npy\")\n self.Jcv_opt = np.load(path+\"/Jcv_opt.npy\")\n print(\"Loading pre-trained NCM ...\")\n print(\"Loading pre-trained NCM END\")\n else:\n raise ValueError(\"Invalid iTrain: iTrain = 1 or 0\")\n print(\"========================================================\")\n print(\"================= NCM CONSTRUCTION END =================\")\n print(\"========================================================\")\n pass", "def _train_model(self):\n raise NotImplementedError()", "def train_model(self):\n early_stopping = EarlyStopping(self, self.hyper.early_stopping_enabled, self.hyper.early_stopping_limit)\n loss_history_train = []\n loss_metric_train = tf.keras.metrics.Mean()\n\n x_train, next_values_train = self.dataset.create_batches(self.hyper.batch_size,\n [self.dataset.x_train,\n self.dataset.next_values_train])\n\n x_train_val, next_values_train_val = self.dataset.create_batches(self.hyper.batch_size,\n [self.dataset.x_train_val,\n self.dataset.next_values_train_val])\n\n for epoch in range(self.hyper.epochs):\n print(\"Epoch %d\" % (epoch,))\n\n for step, (x_batch_train, next_values_batch_train) in enumerate(zip(x_train, next_values_train)):\n self.train_step(x_batch_train, next_values_batch_train, loss_metric_train)\n\n if step % 50 == 0:\n print(\"\\tStep %d: mean loss = %.4f\" % (step, loss_metric_train.result()))\n\n loss_train_batch = loss_metric_train.result()\n loss_history_train.append(loss_train_batch)\n loss_metric_train.reset_states()\n\n self.model.save_weights(self.checkpoint_path.format(epoch=epoch))\n\n # Check early stopping criterion --> Has the loss on the validation set not decreased?\n best_epoch = early_stopping.execute(epoch, x_train_val, next_values_train_val)\n self.clean_up(early_stopping, epoch)\n\n if best_epoch > 0:\n print('Model from epoch %d was selected by early stopping.' % best_epoch)\n print('Training process will be stopped now.')\n\n self.save_model(best_epoch)\n\n return\n\n self.save_model(epoch=self.hyper.epochs - 1)", "def train(self):\n raise NotImplemented()", "def training(self):\n self.model.fit(self.train_x, self.train_y)", "def train(self, batch):\n pass", "def trainModel( self, featureTrain, classTrain):", "def train_model(self):\n ### Early Stop Mechanism\n loss = previous_loss = float(\"inf\")\n patience_left = self.config.patience\n ### Early Stop Mechanism\n\n self.generator = Generator(self.model.config, training_strategy=self.training_strategy)\n self.evaluator = Evaluator(model=self.model, data_type=self.teston, debug=self.debug)\n\n if self.config.loadFromData:\n self.load_model()\n \n for cur_epoch_idx in range(self.config.epochs):\n print(\"Epoch[%d/%d]\"%(cur_epoch_idx,self.config.epochs))\n loss = self.train_model_epoch(cur_epoch_idx)\n self.test(cur_epoch_idx)\n\n ### Early Stop Mechanism\n ### start to check if the loss is still decreasing after an interval. \n ### Example, if early_stop_epoch == 50, the trainer will check loss every 50 epoche.\n ### TODO: change to support different metrics.\n if ((cur_epoch_idx + 1) % self.config.early_stop_epoch) == 0: \n if patience_left > 0 and previous_loss <= loss:\n patience_left -= 1\n print('%s more chances before the trainer stops the training. (prev_loss, curr_loss): (%.f, %.f)' % \\\n (patience_left, previous_loss, loss))\n\n elif patience_left == 0 and previous_loss <= loss:\n self.evaluator.result_queue.put(Evaluator.TEST_BATCH_EARLY_STOP)\n break\n else:\n patience_left = self.config.patience\n\n previous_loss = loss\n ### Early Stop Mechanism\n\n self.generator.stop()\n self.evaluator.save_training_result(self.training_results)\n self.evaluator.stop()\n\n if self.config.save_model:\n self.save_model()\n\n if self.config.disp_result:\n self.display()\n\n if self.config.disp_summary:\n self.config.summary()\n self.config.summary_hyperparameter(self.model.model_name)\n\n self.export_embeddings()\n\n return loss", "def train(self):\n self.epoch = 0\n self.step = 0\n self.start_time = time.time()\n for self.epoch in range(self.opt.num_epochs):\n self.run_epoch()\n if (self.epoch + 1) % self.opt.save_frequency == 0:\n self.save_model()", "def train():\n model.train()\n for batch_index, (xb, yb) in enumerate(train_dl):\n loss = loss_func(model(xb), yb)\n\n loss.backward()\n opt.step()\n opt.zero_grad()", "def __call__(self, trainer, epoch):\n # do not store intermediate iterations\n if epoch >= self.ignore_before and epoch != 0:\n if not self.num_iters == -1:\n\n # counting epochs starts from 1; i.e. +1\n epoch += 1\n # store model recurrently if set\n if epoch % self.num_iters == 0:\n name = self.prepend + \"training_epoch_{}.h5\".format(epoch)\n full_path = os.path.join(self.path, name)\n self.save_model(trainer, full_path)\n\n # store current model if improvement detected\n if self.store_best:\n current_res = 0\n try:\n # check if value can be used directly or not\n if isinstance(self.retain_metric, str):\n current_res = trainer.val_metrics[self.retain_metric][-1]\n else:\n current_res = trainer.val_metrics[self.retain_metric.__name__][-1]\n except KeyError:\n print(\"Couldn't find {} in validation metrics. Using \\\n loss instead.\".format(self.retain_metric))\n current_res = trainer.val_metrics[\"loss\"][-1]\n\n # update\n if self.window is None: # old update style\n if self._has_improved(current_res):\n self.best_res = current_res\n self.best_model = deepcopy(trainer.model.state_dict())\n else: # new update style\n # get validation metrics in certain window\n try:\n if isinstance(self.retain_metric, str):\n start = len(trainer.val_metrics[self.retain_metric]) - self.window\n start = 0 if start < 0 else start\n\n window_val_metrics = trainer.val_metrics[self.retain_metric][start:]\n else:\n start = len(trainer.val_metrics[self.retain_metric.__name__]) - self.window\n start = 0 if start < 0 else start\n window_val_metrics = trainer.val_metrics[self.retain_metric.__name__][start:]\n except KeyError:\n print(\n \"Couldn't find {} in validation metrics. Using \\\n loss instead.\".format(\n self.retain_metric\n )\n )\n start = len(trainer.val_metrics[self.retain_metric]) - self.window\n start = 0 if start < 0 else start\n window_val_metrics = trainer.val_metrics[\"loss\"][start:]\n\n # build mean\n mean_window_res = np.mean(window_val_metrics)\n\n # only safe when improvement to previous epoch detected\n # only a value BETTER than before can be the minimum/maximum of a\n # window with better mean than a previously detected window\n if len(window_val_metrics) == 1 \\\n or self._first_val_better(window_val_metrics[-1], window_val_metrics[-2]) \\\n or self._current_window_save_idx == -1:\n if self._current_window_save_idx == -1:\n self._current_window_save_idx = 0\n self._state_dict_storage[self._current_window_save_idx] = deepcopy(trainer.model.state_dict())\n # increase save idx and take modulo\n self._current_window_save_idx += 1\n self._current_window_save_idx = divmod(self._current_window_save_idx, self.window)[1]\n else: # only increase current_window_save_idx (for modulo index calculation to work)\n self._current_window_save_idx += 1\n self._current_window_save_idx = divmod(self._current_window_save_idx, self.window)[1]\n\n # always update current window best result - it might be at some point overall best result\n current_window_best_idx = self._get_cur_win_best_idx(window_val_metrics)\n if current_window_best_idx == len(window_val_metrics) - 1 \\\n or len(window_val_metrics) == 1: # case of improvement or initialisation\n # overwrite model_state saved so far\n self._current_window_best_model_save_idx = self._current_window_save_idx\n self._current_window_best_epoch = epoch\n self._current_window_best_res = window_val_metrics[-1]\n\n # check if mean has improved and copy values as best model result\n if self._has_window_mean_improved(mean_window_res):\n self.best_mean_res = mean_window_res\n self.best_window_start = 0 if epoch - self.window + 1 < 0 else epoch - self.window + 1\n # save current window best as overall best\n self.best_res = self._current_window_best_res\n self.best_model = copy.deepcopy(self._state_dict_storage[self._current_window_best_model_save_idx])\n if self.info:\n print(\"Found a window with better validation metric mean:\")\n print(\"\\t metric mean: {}\".format(mean_window_res))\n print(\"\\t epoch start: {}\".format(self.best_window_start))\n print(\"\\t best result: {}\".format(self.best_res))", "def train(self):\n self.emission_model(self.train_data)\n self.transition_model(self.train_data)", "def train(self, session, train_examples, dev_examples, train_dir):\n\n # some free code to print out number of parameters in your model\n # it's always good to check!\n # you will also want to save your model parameters in train_dir\n # so that you can use your trained model to make predictions, or\n # even continue training\n\n tic = time.time()\n params = tf.trainable_variables()\n num_params = sum(map(lambda t: np.prod(tf.shape(t.value()).eval()), params))\n toc = time.time()\n logging.info(\"Number of params: %d (retreival took %f secs)\" % (num_params, toc - tic))\n\n if self.summary_flag:\n self.train_writer = tf.summary.FileWriter(self.summaries_dir + '/train', session.graph)\n\n logging.info(\"Train Loss File: {}\".format(self.train_loss_log))\n logging.info(\"Dev Loss File: {}\".format(self.dev_loss_log))\n best_score = 100000\n train_log = open(self.train_loss_log, \"w\")\n dev_log = open(self.dev_loss_log, \"w\")\n for epoch in range(self.n_epoch):\n print(\"Epoch {:} out of {:}\".format(epoch + 1, self.n_epoch))\n dev_score = self.run_epoch(session, train_examples, dev_examples, epoch, train_log)\n dev_log.write(\"{},{}\\n\".format(epoch + 1, dev_score))\n logging.info(\"Average Dev Cost: {}\".format(dev_score))\n logging.info(\"train F1 & EM\")\n f1, em = self.evaluate_answer(session, train_examples, self.rev_vocab, log = True)\n logging.info(\"Dev F1 & EM\")\n f1, em = self.evaluate_answer(session, dev_examples, self.rev_vocab, log = True)\n if dev_score < best_score:\n best_score = dev_score\n print(\"New best dev score! Saving model in {}\".format(train_dir + \"/\" + self.model_name))\n self.saver.save(session, train_dir + \"/\" + self.model_name)\n\n return best_score", "def set_train(self):\n self.model.train()", "def train(self, x, t):\n for i in range(self.number_model):\n curr_model = self.all_model[i]\n curr_model.fit(x, t)", "def train(self, training_data):\n pass", "def train(self, inputs, desired):\n inputs.append(1) # bias input\n guess = self.feedforward(inputs)\n error = desired - guess\n for i in range(len(self.weights)):\n self.weights[i] = self.weights[i] + \\\n self.learning_rate * error * inputs[i]", "def train(self):\n\t\tprint(\"Training...\")\n\t\tprev_loss = self.compute_loss(self.w0, self.w, 'train')\n\t\tfor i in range(self.max_iter):\n\t\t\t# gradient descent\n\t\t\tdw0, dw = self.compute_grad(self.w0, self.w, 'train')\n\t\t\tself.w0 -= self.step_size * dw0\n\t\t\tself.w = [wj-self.step_size*dwj for wj, dwj in zip(self.w, dw)]\n\t\t\tcurr_loss = self.compute_loss(self.w0, self.w, 'train')\n\t\t\tif i%(self.max_iter/10)==0:\n\t\t\t\tprint('iteration: {}, loss: {}'.format(i, curr_loss))\n\t\t\tif abs(curr_loss-prev_loss) < self.tolerance:\n\t\t\t\tprint('# of iterations:',i)\n\t\t\t\tbreak\n\t\tself.trained = True\n\t\tprint('Mean log loss of TRAIN data:', curr_loss)", "def train(self, X_train, y_train):\n self.model.fit(X_train, y_train)", "def train():\n # YOUR TRAINING CODE GOES HERE", "def fit(self):\n for i in range(self.current_epoch, self.max_epoch):\n self.current_epoch += 1\n # train\n train_dataloader = self.data_module.get_train_dataloader(\n batch_size=self.train_batch_size, \n shuffle=self.train_dataloader_shuffle, \n num_workers=self.dataloader_num_workers,\n pin_memory=True\n )\n neptune.log_metric(\"learning_rate_vs_epoch\", self.optimizer.param_groups[0]['lr'])\n self.train_one_epoch(train_dataloader)\n\n # validate \n if self.validate_after == 'epoch' and self.train_on_all_data == False and self.run_lr_range_test == False:\n validation_dataloader = self.data_module.get_valid_dataloader(\n batch_size=self.valid_batch_size, \n shuffle=self.train_dataloader_shuffle, \n num_workers=self.dataloader_num_workers, \n pin_memory=True\n )\n self.validate_one_epoch(validation_dataloader)\n\n if self.scheduler:\n if self.step_scheduler_after == 'epoch': \n if self.step_scheduler_metric == 'val_auc':\n self.scheduler.step(self.metrics['valid'][-1]['auc_score'])\n else:\n self.scheduler.step()\n\n if self.run_lr_range_test:\n neptune.log_metric('validation_epoch_end_AUC_vs_LR', \n self.scheduler.get_last_lr()[0], y=self.metrics['valid'][-1]['auc_score'])\n\n # checkpoint model for resuming model\n if (self.current_epoch % self.checkpoint_epochs) == 0:\n self.save_checkpoint()\n\n # sleep the training process\n if self.current_epoch % self.sleep_in_epochs == 0:\n print(f\"SLEEPING FOR {self.sleep_time} at epoch={self.current_epoch}\")\n for i in range(int(self.sleep_time/30)):\n time.sleep(i)\n neptune.log_metric(\"sleeping_status\", y=1)\n\n stop_training = self.stopping_criteria()\n if stop_training:\n if self.fp16:\n self.scaler.step(self.optimizer)\n self.scaler.update()\n self.optimizer.zero_grad()\n else:\n self.optimizer.step()\n self.optimizer.zero_grad()\n # backward all the accumulate gradients\n print(f\"stopped training at {self.current_epoch} epoch\")\n break", "def train_network(self, batch_size, epochs):\n\n if self.eq_train: self.model.fit([self.X_train_high_level, self.X_train_low_level], self.y_train, epochs=epochs, batch_size=batch_size, sample_weight=self.train_weights_eq) \n else: self.model.fit([self.X_train_high_level, self.X_train_low_level], self.y_train, epochs=epochs, batch_size=batch_size, sample_weight=self.train_weights)", "def train(self, dataset):\n \"*** YOUR CODE HERE ***\"\n batch_size = 1\n while True:\n error = False\n for x, y in dataset.iterate_once(batch_size):\n y_pred = self.get_prediction(x)\n y = nn.as_scalar(y)\n if y != y_pred:\n error = True\n nn.Parameter.update(self.get_weights(),x,y)\n if error == False:\n break", "def train(self):\n self.log(f\"{self.cur_file_path}\\t\\tInfo: train method invoked!\")\n self.log(f\"{self.cur_file_path}\\t\\tInfo: training {self.model.__class__.__name__} model!\")\n\n self.model.fit(self.trainX, self.trainY)", "def model_switch_to_training(self):\n pass", "def _set_train(self):\n\n if not self.model.__dict__['training']:\n self.model.train()", "def train_model():\n\n if python_version == 2 :\n if num_hidden is None:\n num_hidden = int(raw_input('Enter number of hidden layers: '))\n if num_neuron is None:\n num_neuron = int(raw_input('Enter number of neurons in each hidden layer: '))\n else:\n if num_hidden is None:\n num_hidden = int(input('Enter number of hidden layers: '))\n if num_neuron is None:\n num_neuron = int(input('Enter number of neurons in each hidden layer: '))\n\n print('Activations are LeakyReLU. Optimizer is ADAM. Batch sizei is 32.' + \\\n 'Fully connected network without dropout.')\n\n # Construct model\n model = Sequential()\n\n # Add input layer.\n # MNIST dataset: each image is a 28x28 pixel square (784 pixels total).\n model.add(Flatten(input_shape=(1, 28, 28)))\n\n # Add hidden layers.\n for _ in range(num_hidden):\n model.add(Dense(num_neuron, use_bias=False))\n model.add(LeakyReLU(alpha=.01))\n\n # Add output layer\n model.add(Dense(10, activation='softmax', use_bias=False))\n\n # Compile the model\n model.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\n # Print information about the model\n print(model.summary())\n\n X_train, Y_train, X_test, Y_test = load_data()\n X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train,\n test_size=1/6.0,\n random_state=seed)\n\n # Fit the model\n model.fit(X_train, Y_train, batch_size=32, epochs=10, verbose=1)\n\n print(\"Save the model\")\n model_name = __save_trained_model(model, num_hidden, num_neuron)\n\n print(\"Training done\")\n\n return model_name, model", "def training(self):\n \n best_valid_loss = np.inf\n c = 0\n \n self.train_loader, self.test_loader = self.get_train_test_loaders()\n \n print('Training the {} model with the following architecture:'.format(self.model_name))\n print(summary(self.model, (3, self.image_width, self.image_height)))\n print('*'*100)\n print('Starting the training...')\n print('*'*100)\n \n # Create the model save dir if it already doesn't exist\n if not os.path.exists(self.model_save_dir):\n os.makedirs(self.model_save_dir)\n \n for epoch in range(self.n_epochs):\n\n print(f'Epoch: {epoch+1:02}')\n\n start_time = time.time()\n\n train_loss = self.train(self.train_loader)\n valid_loss = self.evaluate(self.test_loader)\n\n epoch_mins, epoch_secs = self.epoch_time(start_time, time.time())\n\n c+=1\n if valid_loss < best_valid_loss:\n best_valid_loss = valid_loss\n torch.save(self.model.state_dict(), os.path.join(self.model_save_dir, '{}_trained.pt'.format(self.model_name)))\n c=0\n\n if c>4:\n #decrease lr if loss does not decrease after 5 steps\n self.scheduler.step()\n c=0\n\n print(f'Time: {epoch_mins}m {epoch_secs}s') \n print(f'Train Loss: {train_loss:.3f}')\n print(f'Val Loss: {valid_loss:.3f}')\n print('-'*60)\n print('The best validation loss is', best_valid_loss)\n print('*'*100)", "def train(self):\n self.training = True", "def train_model(train, val, epochs, model, opt, nmode, n, val_nmode1, val_nmode2):\n for epoch in range(epochs):\n opt.zero_grad()\n loss = get_loss(model, train)\n loss.requires_grad = True\n loss.backward()\n opt.step()\n\n if epoch%10 == 0:\n model.eval()\n lossval = get_loss(model, val)\n print(f'Epoch: {epoch} \\tTrain loss: {loss}\\tVal loss: {lossval}\\tVal acc:')\n\n return model", "def train(self):\n for epoch in range(self.current_epoch, self.config.optim.epochs):\n self.current_epoch = epoch\n self.train_one_epoch()\n if epoch % self.config.optim.val_freq == 0:\n self.validate()\n if self.config.optim.auto_schedule:\n self.scheduler.step(self.current_val_loss)\n self.save_checkpoint()", "def train(self):\n\n # Create random sample of size self.n\n inst_set = []\n while len(inst_set) < self.n:\n for inst in self.training_instances:\n if np.random.binomial(1, 0.5) == 1 and len(inst_set) < self.n:\n inst_set.append(inst)\n\n if len(inst_set) == self.n:\n break\n\n fvs, labels = TRIMLearner.get_fv_matrix_and_labels(inst_set)\n\n # Calculate initial theta\n w, b = self._minimize_loss(fvs, labels)\n\n old_loss = -1\n loss = 0\n while loss != old_loss:\n if self.verbose:\n print('Current loss:', loss)\n\n # Calculate minimal set\n loss_vector = fvs.dot(w) + b\n loss_vector -= labels\n loss_vector = list(map(lambda x: x ** 2, loss_vector))\n\n loss_tuples = []\n for i in range(len(loss_vector)):\n loss_tuples.append((loss_vector[i], inst_set[i]))\n loss_tuples.sort(key=lambda x: x[0]) # sort using only first elem\n\n inst_set = list(map(lambda tup: tup[1], loss_tuples[:self.n]))\n\n # Minimize loss\n fvs, labels = TRIMLearner.get_fv_matrix_and_labels(inst_set)\n w, b = self._minimize_loss(fvs, labels)\n\n old_loss = loss\n loss = self._calc_loss(fvs, labels, w, b)\n\n self.w = w\n self.b = b", "def train(self, trainData):\n pass", "def update_train_state(self):\n\n # Save one model at least\n if self.train_state['epoch_index'] == 0:\n # torch.save(self.classifier.state_dict(), self.train_state['model_filename'])\n self.save_model()\n self.train_state['stop_early'] = False\n\n # Save model if performance improved\n elif self.train_state['epoch_index'] >= 1:\n loss_tm1, loss_t = self.train_state['val_loss'][-2:]\n\n # If loss worsened\n if loss_t >= self.train_state['early_stopping_best_val']:\n # Update step\n self.train_state['early_stopping_step'] += 1\n # Loss decreased\n else:\n # Save the best model\n if loss_t < self.train_state['early_stopping_best_val']:\n self.save_model()\n self.train_state['early_stopping_best_val'] = loss_t\n\n # Reset early stopping step\n self.train_state['early_stopping_step'] = 0\n\n # Stop early ?\n self.train_state['stop_early'] = \\\n self.train_state['early_stopping_step'] >= self.args.early_stopping_criteria", "def run(self) -> None:\n self.model = self.trainer.train_model(self.model, self.data)", "def training_step(self, **kwargs):\n raise NotImplementedError", "def train(self, x={}, **kwargs):\n return 0", "def test_n_and_train(self):\r\n\r\n n = NeuronNetwork(1,\r\n [1],\r\n [[[0.0,0.0]]],\r\n [[0.0]])\r\n\r\n inputs = [[0,0], [0,1], [1,0], [1,1]]\r\n targets = [[0], [0], [0], [1]]\r\n\r\n n.train(inputs,targets,1000,180)\r\n\r\n print(n)\r\n self.assertLess(n.feed_forward([0,0]), [0.001])\r\n self.assertGreater(n.feed_forward([1,0]), [0.001])\r\n self.assertGreater(n.feed_forward([0,1]), [0.001])\r\n self.assertGreater(n.feed_forward([1,1]), [0.9])", "def train(self, current_hyper_params):\n train_loss = 0\n train_n_iter = 0\n # Set model to train mode\n self.model.train()\n # Iterate over train data\n print(\"Iterating over training data...\")\n for i, batch in enumerate(tqdm(self.train_loader)):\n loss = self._train_batch(batch)\n # Statistics\n train_loss += loss.item()\n train_n_iter += 1\n self.stats.train_loss_history.append(train_loss / train_n_iter)", "def train(self) -> Any:\n pass", "def _train_epoch(self, epoch):\n raise NotImplementedError", "def _train_epoch(self, epoch):\n raise NotImplementedError", "def _train_epoch(self, epoch):\n raise NotImplementedError", "def _train_epoch(self, epoch):\n raise NotImplementedError", "def train(self):\n self.epoch = 0\n self.step = 0\n self.start_time = time.time()\n for self.epoch in range(self.num_epochs):\n print(\"EPOHA\")\n self.run_epoch()\n print(\"SAVE MODEL\")\n self.save_model()", "def train(self):\r\n raw_dataset = pd.read_csv(self.datafile, sep = ',', header = 0,\r\n na_values = '?', comment = '\\t',\r\n skipinitialspace = True)\r\n\r\n dataset = raw_dataset.copy()\r\n dataset.tail()\r\n\r\n # Clear unknown values\r\n dataset.isna().sum()\r\n dataset = dataset.dropna()\r\n\r\n # takes a sample of 80% of the data points\r\n train_dataset = dataset.sample(frac = 0.8, random_state = 0)\r\n test_dataset = dataset.drop(train_dataset.index)\r\n\r\n # Split features from labels for training and test datasets\r\n train_features = train_dataset.copy()\r\n test_features = test_dataset.copy()\r\n train_labels = train_features.pop('Quality')\r\n test_labels = test_features.pop('Quality')\r\n\r\n # normalize data\r\n normalizer = preprocessing.Normalization()\r\n normalizer.adapt(np.array(train_features))\r\n\r\n # builds the model\r\n def build_and_compile_model(norm):\r\n model = keras.Sequential([\r\n norm,\r\n layers.Dense(64, activation='relu'),\r\n layers.Dense(64, activation='relu'),\r\n layers.Dense(1)\r\n ])\r\n\r\n model.compile(loss='mean_absolute_error',\r\n optimizer=tf.keras.optimizers.Adam(0.001))\r\n return model\r\n\r\n deep_neural_network_model = build_and_compile_model(normalizer)\r\n\r\n history = deep_neural_network_model.fit(\r\n train_features, train_labels,\r\n validation_split=0.2,\r\n verbose=0, epochs=100)\r\n\r\n deep_neural_network_model.save('deep_neural_network_model')", "def train(self):\n\n # Ensure everything is sent to GPU if being trained on the cloud\n if self.local == False:\n torch.set_default_tensor_type(torch.cuda.FloatTensor)\n print(\"\\n \\n EVERYTHING TO CUDA \\n \\n\")\n\n # Load weights if applicable\n if self.load_weights == True:\n start_epoch, loss = self.load_checkpoint(\n self.model, self.optimizer, self.model_name\n )\n start_epoch += 1\n print(\"\\n \\n [WEIGHTS LOADED]\")\n else:\n start_epoch = 0\n\n # Start Training Loop\n for epoch in range(start_epoch, self.epochs + 1):\n\n # TRAIN\n if epoch > 0:\n\n # Set model to training mode\n self.model.train()\n\n # Initialize loss and counter that will allow model weights to be saved and overwritten every 10 minibatches\n train_loss = 0\n counter = 0\n\n # Iterate through train set\n for (\n image1,\n image2,\n annotation1,\n annotation2,\n landmark1,\n landmark2,\n ) in tqdm(self.train_loader, desc=\"Train Epoch \" + str(epoch)):\n\n # image tensors and bounding box and label tensors to device\n image1 = image1.to(self.device)\n image2 = image2.to(self.device)\n\n # Forward pass of model\n x1_hat, x2_hat, z1, z2 = self.model(image1, image2)\n\n # Calculate loss from one pass and append to training loss\n loss = self.loss_fcn(\n image1, image2, x1_hat, x2_hat, z1.detach(), z2.detach()\n )\n train_loss += loss.item()\n\n # Clear optimizer gradient\n self.optimizer.zero_grad()\n\n # Backprop\n loss.backward()\n\n # Take a step with optimizer\n self.optimizer.step()\n\n # Save/overwrite model weights every 10 minibatches\n if counter % 10 == 0:\n self.save_checkpoint(\n self.model,\n self.optimizer,\n self.model_name,\n epoch,\n train_loss,\n )\n\n print(\n f\"====> Epoch: {epoch} Average train loss: {train_loss / len(self.train_loader.dataset):.4f}\\n\"\n )\n\n # Save entire model as .pt after every epoch of training\n if self.local == True:\n torch.save(\n self.model,\n os.path.join(self.save_path, self.model_name + \".pt\"),\n )\n else:\n torch.save(\n self.model, self.model_name + \"_epoch\" + str(epoch) + \".pt\"\n )\n print(\"SAVED MODEL EPOCH \" + str(epoch))\n\n # Evaluate on Validation Set after each epoch\n with torch.no_grad():\n\n # Set model to evaluation mode\n self.model.eval()\n\n # Iterate through validation set\n for image1, image2, annotation1, annotation2 in tqdm(\n self.val_loader, desc=\"Validation Epoch \" + str(epoch)\n ):\n\n # Initialize validation loss\n val_loss = 0\n\n # Send images to device\n image1 = image1.to(self.device)\n image2 = image2.to(self.device)\n\n # Forward pass of model\n x1_hat, x2_hat, z1, z2 = self.model(image1, image2)\n\n # Calculate loss and append to validation loss\n loss = self.loss_fcn(\n image1, image2, x1_hat, x2_hat, z1.detach(), z2.detach()\n )\n val_loss += loss\n\n print(\n f\"====> Epoch: {epoch} Average test loss: {val_loss / len(self.val_loader.dataset):.4f}\\n\"\n )\n\n print(\"[DONE EPOCH{}]\".format(epoch))\n\n print(\"[DONE TRAINING]\")\n\n # Save model after all epochs are finished\n if self.local == True:\n torch.save(\n self.model, os.path.join(self.save_path, self.model_name + \".pt\")\n )\n else:\n torch.save(self.model, self.model_name + \".pt\")", "def train(self, batch_training=False):\n raise NotImplementedError", "def train(epoch, w1, w2, w3, samples, n_batches, bias_w1, bias_w2, bias_w3, n_hidden_layer, n_hidden_layer_2, \n batch_size, train_data, train_output, valid_data, valid_output, learning_rate, lmbda, l1):\n # Initialise empty error and accuracy arrays\n errors = np.zeros((epoch,))\n accuracies = np.zeros((epoch,))\n\n # If it is only a single layer network initialise variables for calcualting average weight\n if (n_hidden_layer == 0) and (n_hidden_layer_2 == 0):\n tau = 0.01\n average_weight = np.zeros(w1.shape)\n average_weight_plot = np.zeros((epoch,1))\n prev_w1 = np.copy(w1)\n\n # Epoch loop\n for i in range(epoch):\n # Build an array of shuffled indexes\n shuffled_indexes = np.random.permutation(samples)\n\n # Batch loop\n for batch in range(0, n_batches):\n \n # Initialise empty change in weight and bias depending on number of layers\n delta_w1 = np.zeros(w1.shape)\n delta_bias_w1 = np.zeros(bias_w1.shape)\n if n_hidden_layer > 0:\n delta_w2 = np.zeros(w2.shape)\n delta_bias_w2 = np.zeros(bias_w2.shape)\n if n_hidden_layer_2 > 0:\n delta_w3 = np.zeros(w3.shape)\n delta_bias_w3 = np.zeros(bias_w3.shape)\n\n # Extract indexes, and corresponding data from the input and expected output\n indexes = shuffled_indexes[batch*batch_size : (batch+1)*batch_size]\n x0 = train_data[indexes].T\n t = train_output[indexes].T\n\n # Apply input weights to summation of inputs and add bias terms\n h1 = np.matmul(w1, x0) + bias_w1\n # Apply the activation function to the summation\n x1 = relu(h1)\n \n # For first hidden layer\n if n_hidden_layer > 0:\n # Apply input weights to summation of inputs and add bias terms\n h2 = np.matmul(w2, x1) + bias_w2\n # Apply the activation function to the summation\n x2 = relu(h2)\n\n # For second hidden layer\n if n_hidden_layer_2 > 0:\n # Apply input weights to summation of inputs and add bias terms\n h3 = np.matmul(w3, x2) + bias_w3\n # Apply the activation function to the summation\n x3 = relu(h3)\n\n # Error signal\n error = t - x3\n # Local gradient for second hidden layer\n delta_3 = relu_prime(x3) * error\n # Change in weight at second hidden layer\n delta_w3 = (learning_rate / batch_size) * np.matmul(delta_3, x2.T)\n # Change in bias at second hidden layer\n delta_bias_w3 = (learning_rate / batch_size) * np.sum(delta_3, axis=1)\n # Reshape to be a matrix rather than column vector\n delta_bias_w3 = delta_bias_w3.reshape(-1, 1)\n\n # Local gradient for first hidden layer\n delta_2 = relu_prime(h2) * np.matmul(w3.T, delta_3)\n # Change in weight at first hidden layer\n delta_w2 = (learning_rate / batch_size) * np.matmul(delta_2, x1.T)\n # Change in bias at first hidden layer\n delta_bias_w2 = (learning_rate / batch_size) * np.sum(delta_2, axis=1)\n # Reshape to be a matrix rather than column vector\n delta_bias_w2 = delta_bias_w2.reshape(-1, 1)\n\n\n # Local gradient for input layer\n delta_1 = relu_prime(h1) * np.matmul(w2.T, delta_2)\n # Change in weight at input layer\n delta_w1 = (learning_rate / batch_size) * np.matmul(delta_1, x0.T)\n # Change in bias at input layer\n delta_bias_w1 = (learning_rate / batch_size) * np.sum(delta_1, axis=1)\n # Reshape to be a matrix rather than column vector \n delta_bias_w1 = delta_bias_w1.reshape(-1, 1)\n\n\n else:\n # Error signal\n error = t - x2\n # Change in weight at first hidden layer\n delta_2 = relu_prime(x2) * error\n # Change in weight at first hidden layer\n delta_w2 = (learning_rate / batch_size) * np.matmul(delta_2, x1.T)\n # Change in bias at first hidden layer\n delta_bias_w2 = (learning_rate / batch_size) * np.sum(delta_2, axis=1)\n # Reshape to be a matrix rather than column vector\n delta_bias_w2 = delta_bias_w2.reshape(-1, 1)\n\n # Local gradient for input layer\n delta_1 = relu_prime(h1) * np.matmul(w2.T, delta_2)\n # Change in weight at input layer\n delta_w1 = (learning_rate / batch_size) * np.matmul(delta_1, x0.T)\n # Change in bias at input layer\n delta_bias_w1 = (learning_rate / batch_size) * np.sum(delta_1, axis=1)\n # Reshape to be a matrix rather than column vector \n delta_bias_w1 = delta_bias_w1.reshape(-1, 1)\n\n else:\n # Error signal\n error = t - x1\n # Local gradient for input layer\n delta_1 = relu_prime(x1) * error\n # Change in weight at input layer\n delta_w1 = (learning_rate / batch_size) * np.matmul(delta_1, x0.T)\n # Change in bias at input layer\n delta_bias_w1 = (learning_rate / batch_size) * np.sum(delta_1, axis=1)\n # Reshape to be a matrix rather than column vector \n delta_bias_w1 = delta_bias_w1.reshape(-1, 1)\n\n # Checks if L1 error is used as well\n if l1:\n # Takes away the derivative of L1 from the change in weight\n delta_w1 -= (learning_rate / batch_size) * lmbda * np.sign(w1)\n # Takes away the derivative of L1 from the change in bias\n delta_bias_w1 -= (learning_rate / batch_size) * lmbda * np.sign(bias_w1)\n\n # Checks if hidden layer present\n if n_hidden_layer > 0:\n # Takes away the derivative of L1 from the change in weight\n delta_w2 -= (learning_rate / batch_size) * lmbda * np.sign(w2)\n # Takes away the derivative of L1 from the change in bias\n delta_bias_w2 -= (learning_rate / batch_size) * lmbda * np.sign(bias_w2)\n \n # Checks if second hidden layer present\n if n_hidden_layer_2 > 0:\n # Takes away the derivative of L1 from the change in weight\n delta_w3 -= (learning_rate / batch_size) * lmbda * np.sign(w3)\n # Takes away the derivative of L1 from the change in bias\n delta_bias_w3 -= (learning_rate / batch_size) * lmbda * np.sign(bias_w3)\n\n\n # Add change in weight\n w1 += delta_w1\n # Add change in bias\n bias_w1 += delta_bias_w1\n\n # Checks if hidden layer present\n if n_hidden_layer > 0:\n # Add change in weight\n w2 += delta_w2\n # Add change in bias\n bias_w2 += delta_bias_w2\n \n # Checks if second hidden layer present\n if n_hidden_layer_2 > 0:\n # Add change in weight\n w3 += delta_w3\n # Add change in bias\n bias_w3 += delta_bias_w3\n\n # Calculate and print average weight (single layer), accuracy and error at the end of the epoch\n print(\"------ Epoch {} ------\".format(i+1))\n if n_hidden_layer == 0:\n # If single layer present calculate average weight change\n average_weight_plot, average_weight = calculate_average_weight(tau, average_weight, average_weight_plot,\n prev_w1, w1, i)\n prev_w1 = np.copy(w1)\n # Calculate accuracy and error based on validation data\n accuracies[i], errors[i] = test(valid_data, valid_output, n_hidden_layer, n_hidden_layer_2, w1, w2, w3, \n bias_w1, bias_w2, bias_w3, l1, lmbda)\n print(\"---------------------\")\n print(\"\\n\")\n \n # Plot results for error, accruacy and average weight (single layer)\n #if n_hidden_layer == 0:\n # plot_results(average_weight_plot, 'Epoch', 'Average Weight Update Sum',\n # 'Average Weight Update Sum per Epoch', 'Average Weight Update Sum')\n #plot_results(errors, 'Epoch', 'Error', 'Error on Validation Set per Epoch', 'Error')\n #plot_results(accuracies, 'Epoch', 'Accuracy', 'Accuracy on Validation Set per Epoch', 'Accuracy')\n return w1, w2, w3, bias_w1, bias_w2, bias_w3", "def train(self, trainingData, trainingLabels, validationData, validationLabels ):\n\n self.setWeights(trainingData.shape[1])\n # DO NOT ZERO OUT YOUR WEIGHTS BEFORE STARTING TRAINING, OR\n # THE AUTOGRADER WILL LIKELY DEDUCT POINTS.\n \n # Hyper-parameters. Your can reset them. Default batchSize = 100, weight_decay = 1e-3, learningRate = 1e-2\n \"*** YOU CODE HERE ***\"\n self.batchSize = 100\n self.weight_decay = 1e-3\n self.learningRate = 0.1\n\n def Softmax(x):\n x_max = np.max(x, axis=0)\n x_exp = np.exp(x - x_max)\n x_exp_sum = np.sum(x_exp, axis=0)\n return x_exp / x_exp_sum\n\n for iteration in range(self.max_iterations):\n if iteration % 10 == 0:\n print(\"Starting iteration \", iteration, \"...\")\n self.learningRate *= 0.9\n dataBatches = self.prepareDataBatches(trainingData, trainingLabels)\n for batchData, batchLabel in dataBatches:\n \"*** YOUR CODE HERE ***\"\n Y = np.zeros((len(self.legalLabels), self.batchSize))\n for i in range(self.batchSize):\n Y[batchLabel[i]][i] = 1\n Y_pred = Softmax((batchData @ self.weights + self.bias).T)\n d_weight = ((Y_pred - Y) @ batchData / batchData.shape[0]).T + self.weight_decay * sum(self.weights)\n d_bias = np.mean(Y_pred - Y, axis=1) + self.weight_decay * sum(self.bias)\n self.weights -= d_weight * self.learningRate\n self.bias -= d_bias * self.learningRate", "def train(self, inputs, targets, validation_data, num_epochs, regularizer_type=None):\n for k in xrange(num_epochs):\n loss = 0\n # Forward pass\n a1, probs = self._feed_forward(inputs)\n \n # Backpropagation\n dWxh, dWhy, dbh, dby = self._back_propagation(inputs, targets, a1, probs,len(inputs))\n\n # Perform the parameter update with gradient descent\n self.Wxh += -self.learning_rate * dWxh\n self.bh += -self.learning_rate * dbh\n self.Why += -self.learning_rate * dWhy\n self.by += -self.learning_rate * dby \n \n\n # validation using the validation data\n\n validation_inputs = validation_data[0]\n validation_targets = validation_data[1]\n\n print 'Validation'\n\n # Forward pass\n a1, probs = self._feed_forward(validation_inputs)\n\n # Backpropagation\n dWxh, dWhy, dbh, dby = self._back_propagation(validation_inputs, validation_targets, a1, probs,len(validation_inputs))\n\n if regularizer_type == 'L2':\n dWhy = self.reg_lambda * self.Why\n dWxh = self.reg_lambda * self.Wxh\n\n # Perform the parameter update with gradient descent\n self.Wxh += -self.learning_rate * dWxh\n self.bh += -self.learning_rate * dbh\n self.Why += -self.learning_rate * dWhy\n self.by += -self.learning_rate * dby \n\n if k%1 == 0:\n print \"Epoch \" + str(k) + \" : Loss = \" + str(self._calc_smooth_loss(loss, len(inputs), regularizer_type))\n\n #self.save('models.pkl')", "def trainer(model, X_train, y_train, X_valid, y_valid, config):\n # loop for number of epochs\n # shuffle inputs based off seed\n # need to shuffle validation based off same seed\n # forward prop and get xenloss\n # backprop and update weights\n\n stop_count = config['early_stop_epoch']\n b_size = config[\"batch_size\"]\n stop = config['early_stop']\n\n xnloss = []\n val_loss = [float('inf')]\n test_scores = []\n\n train_accu = []\n valid_accu = []\n\n\n #validation loss increase per epoch counter\n c = -1\n \n for i in range(config[\"epochs\"]):\n np.random.seed(i)\n np.random.shuffle(X_train)\n\n np.random.seed(i)\n np.random.shuffle(y_train)\n\n '''You should average the loss across all mini batches'''\n #means sum up loss from all mini-batches and divide by num_batches\n sums = 0\n\n num_batches = int(X_train.shape[0] / b_size)\n k=0\n for j in range(num_batches):\n # choose minibatch\n x = X_train[j * b_size: (j+1) * b_size]\n targets = y_train[j * b_size: (j+1) * b_size]\n loss, y_pred = model.forward_pass(x, targets)\n loss = loss / (config['batch_size'] * 10) # 10 classes\n sums += loss\n #xnloss.append(loss)\n model.backward_pass()\n k +=1\n # if k < 5 or k > 44:\n # print(targets[0, :])\n # print(y_pred[0, :])\n # print(y_pred[0, :].sum())\n # print(k, '=============')\n\n # mini-batch done here, take avg of loss\n avg_loss = sums / num_batches\n xnloss.append(avg_loss)\n \n ''' epochs loop continues here\n 0) perform validation and compute its (val) loss\n\n 1) calculate test accuracy for every epoch where the\n validation loss is better than the previous validation loss.\n \n 2) Save this result (test score OR loss?) and choose the best \n one when you hit the early stopping criteria.\n\n 3) early stopping - stop training (epochs loop) after 5th consecutive \n increase in validation loss. (Experiment with diff values).\n '''\n\n '''VALIDATION PERFORMACE'''\n v_loss, v_pred = model.forward_pass(X_valid, y_valid)\n v_loss_norm = v_loss / (len(X_valid) * 10)\n\n\n '''TEST ACCURACY''' \n #if val loss better (less) than prev: calculate test scores\n \n if v_loss_norm > val_loss[-1]:\n print(\"val loss going up from last time at epoch i=\", i)\n c += 1\n else:\n c = 0\n '''insert code for test accu here'''\n # val_loss.append(v_loss_norm)\n # else: #else val loss increased, so increment counter\n \n val_loss.append(v_loss_norm)\n \n '''EARLY STOPPING'''\n if stop and c == stop_count:\n print(\"early stopped at epoch =\", i+1)\n break\n\n print(val_loss[1:3])\n print(val_loss, len(xnloss), len(val_loss[1:]))\n #outside of epochs loop\n plt.plot(xnloss, label='training loss')\n plt.plot(val_loss[1:], label='validation loss')\n plt.title(\"losses across all epochs\")\n plt.xlabel(\"epochs\")\n plt.ylabel(\"avg loss for the epoch\")\n plt.legend()\n plt.savefig('raised_a.png')\n plt.show()\n #firstplot.png is training loss against # of batches, in 1 epoch\n #avgacrossepochs.png is avg training loss of all batches, across 50 epochs\n # both_losses = []\n \n # for i in range(len(xnloss)):\n # both_losses.append((val_loss[i], xnloss[i]))\n # print(\"validation errors: \", [(val_loss[i], xnloss[i]) for i in range(len(xnloss))])", "def train(self, iterations: int):\n\n s = \"{:3d} reward {:6.2f}/{:6.2f}/{:6.2f} len {:6.2f} epsilon {:1.3f} {}\"\n s_check = \"{:3d} reward {:6.2f}/{:6.2f}/{:6.2f} len {:6.2f} epsilon {:1.3f} saved {} \"\n total_steps = 0\n iter_metrics = []\n for n in range(iterations):\n r_min, r_mean, r_max, iter_steps = self.train_iter()\n iter_metrics.append((r_min, r_mean, r_max))\n total_steps += iter_steps\n\n if n == int(iterations / 2):\n self.steps_to_update_target_model = int(self.steps_to_update_target_model / 2)\n\n # checkpointing & logging\n s_print = s\n file_name = \"\"\n if n % self.checkpoint_freq == 0:\n file_name = f'my_dqn_{n}.pth'\n torch.save(self.target_dqn.state_dict(), os.path.join(self.checkpoint_path, file_name))\n s_print = s_check\n\n if self.verbose:\n print(s_print.format(\n n + 1,\n r_min,\n r_mean,\n r_max,\n total_steps,\n self.e_greedy,\n file_name\n ))\n iter_min = np.mean([x[0] for x in iter_metrics])\n iter_mean = np.mean([x[1] for x in iter_metrics])\n iter_max = np.mean([x[2] for x in iter_metrics])\n return iter_min, iter_mean, iter_max", "def train(self) -> None:\n for _ in range(self.epochs):\n for x, y in zip(self.x_train, self.y_train):\n\n weights_gradient = [\n None for weight in self.weights\n ] # Initializing weight gradients for each layer which are going to be used to update the weights in the network.\n\n biases_gradient = [\n None for bias in self.biases\n ] # Initializing bias gradients for each layer which are going to be used to update the biases in the network.\n\n activation = np.expand_dims(x, axis=1)\n activations = [\n activation\n ] # A list for storing all the activations when doing forward propagation\n\n values = (\n []\n ) # A list for storing weight * x + bias values without applying the activation function.\n\n for weight, bias in zip(self.weights, self.biases):\n value = np.dot(weight, activation) + bias\n values.append(value)\n\n activation = self.sigmoid(value)\n activations.append(activation)\n\n \"\"\"\n Calculating the error delta from output layer to be propagated backwards in the network. It is calculated\n by taking the derivative of the loss function, which in our case is MSE, and multiply with derivate of\n the sigmoid function applied on the value that entered the last layer of the network.\n \"\"\"\n\n error_delta = (activations[-1] - y) * self.sigmoid_derivative(\n values[-1]\n )\n\n weights_gradient[-1] = np.dot(\n error_delta, activations[-2].T\n ) # Setting error delta multiplied with the second last layer activations as weight gradient for last layer.\n\n biases_gradient[-1] = error_delta # Setting error delta as bias gradient for last layer.\n\n \"\"\"\n This for-loop does the same as the code from line 128 - 136, but for each layer in the network.\n Thus, the error is propagated backwards in the network, and the gradients for each layer are set.\n \"\"\"\n for layer in range(2, self.total_layers):\n error_delta = np.dot(\n self.weights[-layer + 1].T, error_delta\n ) * self.sigmoid_derivative(values[-layer])\n\n weights_gradient[-layer] = np.dot(\n error_delta, activations[-layer - 1].T\n )\n\n biases_gradient[-layer] = error_delta\n\n self.weights = [\n weight - self.lr * weight_gradient\n for weight, weight_gradient in zip(self.weights, weights_gradient)\n ] # Updating the weights of the network by w_i - learning_rate * nabla w_i (w_i is the weight matrix at layer i, and nabla w_i is weight gradient.)\n\n self.biases = [\n bias - self.lr * bias_gradient\n for bias, bias_gradient in zip(self.biases, biases_gradient)\n ] # Updating the biases of the network by b_i - learning_rate * nabla b_i (b_i is the bias vector at layer i, and nabla b_i is weight gradient.)", "def _train_model(self):\n self.experiment = EpisodicExperiment(self.task, self.agent)\n n_epochs = int(self.rl_params.n_training_episodes / self.rl_params.n_episodes_per_epoch)\n logger.debug(\"Fitting user model over {} epochs, each {} episodes, total {} episodes.\"\n .format(n_epochs, self.rl_params.n_episodes_per_epoch, n_epochs*self.rl_params.n_episodes_per_epoch))\n for i in range(n_epochs):\n logger.debug(\"RL epoch {}\".format(i))\n self.experiment.doEpisodes(self.rl_params.n_episodes_per_epoch)\n self.agent.learn()\n self.agent.reset() # reset buffers", "def train(self):\n self.dataGenerator.printDataStatistics()\n sE = len(self.dataGenerator.ids[\"train\"])// 32\n sV = len(self.dataGenerator.ids[\"validation\"])// 32\n self.model.fit_generator(\n generator=self.dataGenerator.trainingGenerator,\n steps_per_epoch= sE,\n epochs=2,\n validation_data=self.dataGenerator.validationGenerator,\n validation_steps=sV,\n # use_multiprocessing=True,\n # workers=2,\n )", "def train(self, train_x, train_y, optimzer='adam'):\n self.history = self.model.fit(train_x, train_y, epochs=self.epochs, batch_size=self.batch_size,\n verbose=self.verbose, shuffle=False)", "def train(self, x, t):\n self.model.fit(x, t)", "def train(self, x, t):\n self.model.fit(x, t)", "def train(self, x, t):\n self.model.fit(x, t)", "def train(self, x, t):\n self.model.fit(x, t)", "def train(self, x, t):\n self.model.fit(x, t)", "def train_epoch(self, epoch_num: int) -> float:\n self.model.train()\n epoch_loss = 0.0\n # hidden_start = torch.zeros(self.batch_size, self.rnn_size)\n # for batch_num, (x, y) in enumerate(make_batches(self.train_data,\n # self.batch_size,\n # self.max_len)):\n\n for batch_num, batch_tuple in enumerate(self.train_data):\n print('batch: ', batch_num)\n # reset gradients in train epoch\n self.optimizer.zero_grad()\n x = len(batch_tuple[0])\n y = len(batch_tuple[0][0])\n # compute hidden states\n # batch x timesteps x hidden_size\n x, y = batch_tuple\n # x = x.to(self.device)\n # y = y.to(self.device)\n hidden_states = self.model(x)\n # compute unnormalized probabilities\n # batch x timesteps x vocab_size\n # logits = self.model.get_logits(hidden_states)\n\n # compute loss\n # scalar\n batch_loss = self.model.get_loss(hidden_states, y)\n epoch_loss += batch_loss.item()\n\n # backpropagation (gradient of loss wrt parameters)\n batch_loss.backward()\n\n # clip gradients if they get too large\n torch.nn.utils.clip_grad_norm_(list(self.model.parameters()),\n self.max_grad_norm)\n\n # update parameters\n self.optimizer.step()\n\n # we use a stateful RNN, which means the first hidden state for the\n # next batch is the last hidden state of the current batch\n # hidden_states.detach_()\n # hidden_start = hidden_states[:,-1,:] # add comment\n if batch_num % 100 == 0:\n print(\"epoch %d, %d/%d examples, batch loss = %f\"\n % (epoch_num, (batch_num + 1) * self.batch_size,\n self.num_train_examples, batch_loss.item()))\n epoch_loss /= (batch_num + 1)\n\n return epoch_loss", "def train(self):\n loss_func = torch.nn.MSELoss()\n training_done = False\n total_loss_array = []\n while not training_done:\n # sample a timestep before the cutoff for cross_validation\n rand_timestep_within_sched = np.random.randint(len(self.X_train_naive))\n input_nn = self.X_train_naive[rand_timestep_within_sched]\n\n # iterate over pairwise comparisons\n if torch.cuda.is_available():\n input_nn = Variable(torch.Tensor(np.asarray(input_nn).reshape(1, 242)).cuda())\n truth_nn = input_nn.clone()\n else:\n input_nn = Variable(torch.Tensor(np.asarray(input_nn).reshape(1, 242)))\n truth_nn = Variable(torch.Tensor(np.asarray(input_nn).reshape(1, 242)))\n\n self.opt.zero_grad()\n output = self.model.forward(input_nn)\n\n loss = loss_func(output, truth_nn)\n\n loss.backward()\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), 0.5)\n self.opt.step()\n\n total_loss_array.append(loss.item())\n\n total_iterations = len(total_loss_array)\n\n if total_iterations % 1000 == 999:\n print('current timestep:', total_iterations, 'avg loss for last 500: ', np.mean(total_loss_array[-500:]))\n torch.save({'nn_state_dict': self.model.state_dict()},\n '/home/rohanpaleja/PycharmProjects/bayesian_prolo/scheduling_env/additions_for_HRI/models/Autoencoder' + str(self.num_schedules) + '.tar')\n\n if total_iterations > 2000000:\n training_done = True\n torch.save({'nn_state_dict': self.model.state_dict()},\n '/home/rohanpaleja/PycharmProjects/bayesian_prolo/scheduling_env/additions_for_HRI/models/Autoencoder' + str(self.num_schedules) + '.tar')", "def train_network(self):\n if self.trainData:\n if self.verbose:\n print('Started training...')\n\n for epoch in range(135):\n pass\n # save the model\n else:\n if self.verbose:\n print('No train data available')", "def train(epochs, batch_size, lr, verbose):\n # autograd globally off\n torch.set_grad_enabled(False)\n # generate training and testing datasets\n train_data, train_label = generate_data()\n test_data, test_label = generate_data()\n # normalize data be centered at 0\n train_data, test_data = normalize(train_data, test_data)\n\n if verbose:\n print(\"--- Dataset ---\")\n print(\"Train X: \", train_data.size(), \" | Train y: \", train_label.size())\n print(\" Test X: \", test_data.size(), \" | Test y: \", test_label.size())\n\n layers =[]\n # input layer (2 input units)\n linear1 = Linear(2, 25, bias= True, weight_init=xavier_uniform)\n\n # 3 hidden layers (each 25 units)\n linear2 = Linear(25, 25, bias= True, weight_init=xavier_uniform)\n linear3 = Linear(25, 25, bias= True, weight_init=xavier_uniform)\n linear4 = Linear(25, 25, bias= True, weight_init=xavier_uniform)\n\n # output layer (2 output units)\n linear5 = Linear(25, 2, bias= True, weight_init=xavier_uniform)\n\n\n layers.append(linear1)\n layers.append(Relu())\n layers.append(linear2)\n layers.append(Relu())\n layers.append(linear3)\n layers.append(Relu())\n layers.append(linear4)\n layers.append(Tanh())\n layers.append(linear5)\n\n model = Sequential(layers)\n if verbose: print(\"Number of model parameters: {}\".format(sum([len(p) for p in model.param()])))\n\n criterion = MSE()\n optimizer = SGD(model, lr=lr)\n\n train_losses, test_losses = [], []\n train_accuracies, test_accuracies = [], []\n train_errors, test_errors = [], []\n\n if verbose: print(\"--- Training ---\")\n for epoch in range(1, epochs+1):\n if verbose:print(\"Epoch: {}\".format(epoch))\n\n # TRAINING\n for batch_idx in range(0, train_data.size(0), batch_size):\n # axis 0, start from batch_idx until batch_idx+batch_size\n output = model.forward(train_data.narrow(0, batch_idx, batch_size))\n\n # Calculate loss\n loss = criterion.forward(output, train_label.narrow(0, batch_idx, batch_size))\n train_losses.append(loss)\n if verbose: print(\"Train Loss: {:.2f}\".format(loss.item()))\n\n # put to zero weights and bias\n optimizer.zero_grad()\n\n ## Backpropagation\n # Calculate grad of loss\n loss_grad = criterion.backward()\n\n # Grad of the model\n model.backward(loss_grad)\n\n # Update parameters\n optimizer.step()\n\n train_prediction = model.forward(train_data)\n acc = accuracy(train_prediction, train_label)\n train_accuracies.append(acc)\n train_errors.append(1-acc)\n if verbose: print(\"Train Accuracy: {:.2f}\".format(acc.item()))\n\n # EVALUATION\n for batch_idx in range(0, test_data.size(0), batch_size):\n # axis 0, start from batch_idx until batch_idx+batch_size\n output = model.forward(test_data.narrow(0, batch_idx, batch_size))\n\n # Calculate loss\n loss = criterion.forward(output, test_label.narrow(0, batch_idx, batch_size))\n test_losses.append(loss)\n if verbose: print(\"Test Loss: {:.2f}\".format(loss.item()))\n\n test_prediction = model.forward(test_data)\n acc = accuracy(test_prediction, test_label)\n test_accuracies.append(acc) \n test_errors.append(1-acc)\n if verbose: print(\"Test Accuracy: {:.2f}\".format(acc.item()))\n\n return train_losses, test_losses, train_accuracies, test_accuracies, train_errors, test_errors", "def train(self):\n self.mode = \"train\"\n self.online_net.train()", "def train(self):\n self.mode = \"train\"\n self.online_net.train()", "def train_model(self):\n self.best_epoch = {'auto':{}, 'coffee':{}, 'movie':{}, 'pizza':{}, 'restaurant':{}, 'uber':{} }\n self.best_f1 = {'auto':{}, 'coffee':{}, 'movie':{}, 'pizza':{}, 'restaurant':{}, 'uber':{} }\n for t in self.topic:\n if t != 'other':\n for st in self.topic2sub_topic[t].keys():\n\n print(\"Now training the classsfier for topic: \", t, \" ; intent: \", st)\n print(128 * \"=\")\n print(\"Input: str; Output: boolean(if the str contents the intent: \", st, \" ).\")\n print(64 * \"-\")\n X, y = self.get_data(t, st)\n print(\"data_loaded!\")\n X_train, X_dev, y_train, y_dev = self.my_train_test_split(X, y)\n best_f1 = 0\n for e in range(1,10):\n model = tf.keras.Sequential()\n model.add(tf.keras.layers.InputLayer(input_shape=[1024, ]))\n model.add(tf.keras.layers.Dense(64, activation='relu'))\n model.add(tf.keras.layers.Dense(64, activation='relu'))\n model.add(tf.keras.layers.Dense(1, activation='relu'))\n model.compile(loss='mean_squared_logarithmic_error', optimizer='adam', metrics=[metrics.mae, metrics.categorical_accuracy])\n model.fit(X_train, y_train, epochs=e, batch_size=128)\n print(\"f1_score on dev set: \")\n f1 = self.f1_score_model(model, X_dev, y_dev)[0]\n if f1 > best_f1:\n self.model_zoo[t][st] = model\n model.save_weights(self.trained_w_folder+\"/%s/%s.h5\" %(t,st))\n self.best_epoch[t][st] = e\n self.best_f1[t][st] = f1\n best_f1 = f1\n\n print(64*\"=\")\n print()" ]
[ "0.7506583", "0.73702186", "0.7218326", "0.71550184", "0.7129135", "0.7113297", "0.7095796", "0.7077056", "0.7067491", "0.70636654", "0.70636654", "0.70636654", "0.70636654", "0.70636654", "0.70617354", "0.7049623", "0.703932", "0.70169145", "0.70089275", "0.69914174", "0.6968483", "0.69475514", "0.6945469", "0.6902776", "0.68845636", "0.6857797", "0.6855952", "0.6845244", "0.6822343", "0.68032074", "0.6796703", "0.6795639", "0.67946005", "0.67710704", "0.67663795", "0.67567074", "0.6725762", "0.67228395", "0.6719924", "0.6719893", "0.6719555", "0.6679611", "0.66552234", "0.6647868", "0.6647074", "0.66436833", "0.66366273", "0.6627547", "0.66113144", "0.6608865", "0.6605733", "0.6605051", "0.659692", "0.6595502", "0.65848315", "0.6584245", "0.658393", "0.65766996", "0.65734327", "0.656626", "0.65594244", "0.6548578", "0.654663", "0.6532964", "0.65264726", "0.65183437", "0.6513004", "0.6507027", "0.65014935", "0.64786434", "0.6477535", "0.6473398", "0.64598227", "0.64598227", "0.64598227", "0.64598227", "0.6452954", "0.64495456", "0.64434224", "0.64430285", "0.6430459", "0.64228153", "0.641976", "0.64158475", "0.6414544", "0.64137757", "0.6413077", "0.64102554", "0.6409902", "0.64086765", "0.64086765", "0.64086765", "0.64086765", "0.64086765", "0.64085543", "0.6406819", "0.6405903", "0.6405657", "0.64043385", "0.64043385", "0.6384157" ]
0.0
-1
Fastest way to just consume an iterator.
def consume(iterator): deque(iterator, maxlen=0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def consume(iterator, n=None):\n # Use functions that consume iterators at C speed.\n if n is None:\n # feed the entire iterator into a zero-length deque\n collections.deque(iterator, maxlen=0)\n else:\n # advance to the empty slice starting at position n\n next(islice(iterator, n, n), None)", "def consume(iterator, n=None):\n # Use functions that consume iterators at C speed.\n if n is None:\n # feed the entire iterator into a zero-length deque\n deque(iterator, maxlen=0)\n else:\n # advance to the empty slice starting at position n\n next(islice(iterator, n, n), None)", "def consume(iterator, n=None, next=next, islice=islice, deque=deque):\n if n is not None:\n next(islice(iterator, n, n), None)\n else:\n exhaust(iterator)", "def one_shot_iterator(dataloader):\n while True:\n for data in dataloader:\n yield data", "def iterate(itr: AnyIterable) -> AsyncIterator[T]:\n if isinstance(itr, AsyncIterator):\n return itr\n\n async def gen():\n for i in itr:\n yield i\n\n return gen()", "def getIter(object):\n iterator = None\n try:\n iterator = iter(object)\n except TypeError:\n pass\n return iterator", "def __next__(self):\n return next(self.iterator)", "def __iter__(self):\n cursor = self._front\n while not cursor is None:\n yield cursor.data\n cursor = cursor.next", "async def borrow(iterator: AsyncIterator[T]) -> AsyncGenerator[T, None]:\n async for item in iterator:\n yield item", "def iter_no_cache(query_set):\n if query_set._batch_size is None:\n query_set = query_set.batch_size(1000)\n\n next = query_set.__next__\n\n while True:\n try:\n yield next()\n except StopIteration:\n return", "def _iter_from_cache(self, offset=None):\n if offset is None:\n # Get the whole amount of data\n self.cache.seek(self.spindle)\n data = self.cache.read(self.max_cache-self.spindle)\n self.cache.seek(0)\n data += self.cache.read(self.spindle)\n\n elif offset > self.spindle:\n # Spindle has moved back past 0 (potentially multiple times,\n # causing bugs - though I don't think this edge case matters\n # enough to fix it as doing so would need more\n # complex logic/reduce performance).\n # Wrap around!\n self.cache.seek(offset)\n data = self.cache.read(self.max_cache-offset)\n self.cache.seek(0)\n data += self.cache.read(self.spindle)\n\n elif offset < self.spindle:\n # Just return the amount up to the spindle\n self.cache.seek(offset)\n data = self.cache.read(self.spindle-offset)\n\n else:\n # Nothing to return\n return\n\n self.cache.seek(self.spindle)\n for x, line in enumerate(data.split(b'\\n')):\n if not x:\n continue\n yield line", "def cycle(iterator: Iterable[Any]) -> Iterable[Any]:\n while True:\n yield from iterator", "def __iter__(self):\n for item in self._reader:\n yield item", "def __next__(self):\n if(self._isDone()):\n raise StopIteration\n return self._next()", "def _next(self):\n i = 0\n while i < self.size:\n yield self.data[i]\n i += 1", "def get_iter(self, numPerIter=None):\n if numPerIter == None:\n numPerIter = self.chunk_size\n while True:\n els = self.read(numPerIter)[:]\n if els.shape[0] == 0:\n break\n yield els", "def iterate(iterator, n):\n # throw away n-1 elements\n for index in range(1, n):\n next(iterator, None)\n\n return next(iterator, None)", "def safe_iterator(i):\n return i or []", "def get_iterator(dataset):\n if context.executing_eagerly():\n iterator = dataset_ops.make_one_shot_iterator(dataset)\n else:\n iterator = dataset_ops.make_initializable_iterator(dataset)\n initialize_iterator(iterator)\n return iterator", "def __next__(self):\n if self._cursor is None:\n raise StopIteration(\"Iterator has not been initialized. Use `iter` first.\")\n\n return self._cursor.next()", "def FastaM10Iterator(handle, seq_count=...):\n ...", "def get_next_as_optional(iterator):\n return iterator.get_next_as_optional()", "def get_next(self):\n raise NotImplementedError(\"Iterator.get_next()\")", "def forever(iterable):\n it = iter(iterable)\n while True:\n try:\n yield next(it)\n except Exception as e:\n print(e)\n it = iter(iterable)", "def iter_any(self) -> AsyncStreamIterator[bytes]:\n ...", "def iterator_copy(thing):\n # Even though normal copies are discouraged they should be possible.\n # Cannot do \"list\" because it may be infinite :-)\n next(copy.copy(thing))", "def __iter__(self):\n while True:\n if self.stop:\n return\n for item in self.get_next_batch():\n yield item", "def _next(self):\n i = 0\n while i < self.size:\n if self.data[i] != None:\n yield self.data[i]\n i += 1", "def __next__(self):\n return next(self.iter)", "def CacheNext(item):\n if iteration_utilities.EQ_PY2:\n def subiter():\n def newnext(self):\n raise CacheNext.EXC_TYP(CacheNext.EXC_MSG)\n Iterator.next = newnext\n yield item\n\n # Need to subclass a C iterator because only the \"tp_iternext\" slot is\n # cached, the \"__next__\" method itself always behaves as expected.\n class Iterator(filter):\n pass\n else:\n def subiter():\n def newnext(self):\n raise CacheNext.EXC_TYP(CacheNext.EXC_MSG)\n Iterator.__next__ = newnext\n yield item\n\n # Need to subclass a C iterator because only the \"tp_iternext\" slot is\n # cached, the \"__next__\" method itself always behaves as expected.\n class Iterator(filter):\n pass\n\n return Iterator(iteration_utilities.return_True, subiter())", "def __next__(self) -> T:\n buffer = self._buffer\n\n if buffer:\n return buffer.popleft()\n else:\n return next(self._iterator)", "def _Peek(self):\n try:\n # Object is a generator or iterator.\n return self._iterable.next()\n except AttributeError:\n pass\n except StopIteration:\n self._peek_seen = True\n return None\n try:\n # Object is a list.\n return self._iterable.pop(0)\n except (AttributeError, IndexError, KeyError, TypeError):\n pass\n # Object is not iterable -- treat it as the only item.\n return self._iterable", "def next(self):\n if not self._peek_seen:\n self._peek_seen = True\n return self._peek\n try:\n # Object is a generator or iterator.\n return self._iterable.next()\n except AttributeError:\n pass\n try:\n # Object is a list.\n return self._iterable.pop(0)\n except AttributeError:\n pass\n except (AttributeError, IndexError, KeyError, TypeError):\n raise StopIteration\n # Object is not iterable -- treat it as the only item.\n raise StopIteration", "def __iter__(self):\n items = self._fetch()\n for item in items:\n yield item", "def _iterator_unknown_size(self) -> Iterator[int]:\n raise NotImplementedError", "def __iter__(self) -> Union[Iterator[int], Iterator[Tuple[int, Any]]]:\n self.size = self._data._dataset_size\n if (not self._data._fully_cached or\n self._data._should_call_prefetch_source):\n self._data._start_iteration()\n # First epoch of lazy loading, calling prefetch, and returning\n # indices and examples.\n iterator = self._iterator_unknown_size()\n else:\n # Non-lazy loading, or when dataset has been fully iterated.\n assert self.size is not None\n iterator = self._iterator_given_size(self.size)\n\n if self._data._should_call_prefetch_processed:\n # Processing routine is performed in main process. Yield\n # processed examples instead.\n map_fn = lambda idx: (idx, self._data._processed_cache[idx])\n elif self._data._should_yield_raw_example:\n # Return indices and examples for any epoch in this case.\n map_fn = lambda idx: (idx, self._data._source[idx])\n else:\n map_fn = None # type: ignore\n if map_fn is not None:\n return map(map_fn, iterator)\n\n return iterator", "def __iter__(self):\n cursor = 0\n while cursor < len(self):\n yield self._items[cursor]\n cursor += 1", "def __iter__(self):\n return self._next()", "def __iter__(self):\n return self._next()", "def __iter__(self):\n return self._next()", "def _NextItem(self):\n if self._injected:\n self._injected = False\n return self._injected_value\n try:\n # Object is a generator or iterator.\n return self._iterable.next()\n except AttributeError:\n pass\n except StopIteration:\n self._tap.Done()\n raise\n try:\n # Object is a list.\n return self._iterable.pop(0)\n except (AttributeError, KeyError, TypeError):\n pass\n except IndexError:\n self._tap.Done()\n raise StopIteration\n # Object is not iterable -- treat it as the only item.\n if self._iterable is None or self._stop:\n self._tap.Done()\n raise StopIteration\n self._stop = True\n return self._iterable", "def iter_sequence_infinite(seq):\n while True:\n for item in seq:\n yield item", "def with_iter(contextmanager):\n with contextmanager as iterable:\n for item in iterable:\n yield item", "def nextIter(self):\n\t\tpass", "def __next__(self):\n _complain_ifclosed(self._closed)\n r = self.read()\n if not r:\n raise StopIteration\n return r", "def pick(iterable):\n for element in iterable:\n yield element\n while True:\n yield element", "def __iter__(self):\r\n return self._iterate()", "def iterfetch(cursor, batchsize=1000):\n\t# type: (Cursor, int) -> Iterator[Any]\n\n\twhile True:\n\t\tresults = cursor.fetchmany(batchsize)\n\t\tif not results:\n\t\t\tbreak\n\t\tfor result in results:\n\t\t\tyield result", "def __call__(self, start):\r\n return self._iterate(start)", "def next(self):\n try:\n ret = PymongoCursor.next(self)\n except StopIteration:\n self.__fullcache = True\n raise\n self.__itercache.append(ret)\n return ret", "def queue_iter(queue: Queue) -> Generator[T, None, None]:\n while True:\n val = queue.get()\n yield val", "def __iter__(self) -> Iterator[int]:\n return iter(self._cache)", "def test_iteriter_op_1():\n\n @ops.iteriter_op\n def f(x):\n return iter([4, 5, 6])\n\n result = f(iter([1, 2, 3])) # Passing in an iterator, as expected\n\n assert(isinstance(result, collections.abc.Iterator)), f\"{result}\"\n assert(list(result) == [4, 5, 6])", "def consume(iterable, keep_last=0):\n return _coconut.collections.deque(iterable, maxlen=keep_last)", "def generator_wrapper(iterable):\n\n num_items = len(iterable)\n for idx in range(num_items):\n yield iterable[idx]", "def _next_exhausted(self):\n\n raise StopIteration() from None", "def next(self):\n try:\n return self.queue.get()\n except Empty:\n raise StopIteration", "def only(it: Union[Iterator[_T], Iterable[_T]]) -> _T:\n if hasattr(it, \"__next__\"):\n # noinspection PyTypeHints\n iterator: Iterator[_T] = it # type: ignore\n else:\n iterator = iter(it)\n\n try:\n ret = next(iterator)\n except StopIteration:\n raise ValueError(\"Expected only a single element in an iterable, but got none\")\n\n second_element = next(iterator, _SENTINEL)\n if second_element != _SENTINEL:\n raise ValueError(\"Expected only a single element in iterable, but got at least 2\")\n return ret", "def Iterator():\n return _table.Iterator()", "def ireduce(f, it):\n acc = it.next()\n yield acc\n for x in it:\n acc = f(acc, x)\n yield acc", "def __iter__(self):\n return self.next()", "def scan(func, iterable, start=_EMPTY, *, echo_start=True):\n it = iter(iterable)\n if start is _EMPTY:\n start = next(it)\n if echo_start:\n yield start\n for item in it:\n start = func(start, item)\n yield start", "def _iter2aiter(iter):\n\n def _consume(loop, iter, q):\n for item in iter:\n q.put(item)\n q.put(SENTINEL)\n\n async def _aiter():\n loop = asyncio.get_running_loop()\n q = janus.Queue(maxsize=DEFAULT_INFLIGHT_CHUNKS)\n try:\n fut = loop.run_in_executor(None, lambda: _consume(loop, iter, q.sync_q))\n while True:\n item = await q.async_q.get()\n if item is SENTINEL:\n break\n yield item\n q.async_q.task_done()\n await fut\n finally:\n q.close()\n await q.wait_closed()\n\n return _aiter()", "async def locked_next():\n async with lock:\n try:\n next_value = await anext(iterator)\n except StopAsyncIteration:\n return None\n return next_value", "def _peek(iter: Iterable, index: int) -> Optional[any]:\r\n if index < len(iter):\r\n return iter[index]\r\n return None", "def __init__(self, iterator):\n self._iter = iterator", "def is_iterator(x):\n if sys.version_info >= (2, 7):\n return isinstance(x, collections.Iterator)\n return isinstance(x, collections.Iterator) and hasattr(x, '__iter__')", "def __iter__(self):\r\n return self.deque.__iter__()", "def ReadAll(buf: IO[bytes]) -> Iterator[bytes]:\n while True:\n chunk = Read(buf)\n if chunk is None:\n return\n\n yield chunk", "def cooperative_iter(citer):\n try:\n for chunk in citer:\n sleep(0)\n yield chunk\n except Exception as err:\n msg = (_(\"Error: cooperative_iter exception %(error)s\") %\n dict(error=err))\n LOG.error(msg)\n raise", "def next(self):\n while True:\n source_next = self.source_iter.next() \n if self.filter_func is None or self.filter_func(source_next):\n if self.casting_func is not None:\n return self.casting_func(source_next)\n else:\n return source_next", "async def anext(iterator):\n return await iterator.__anext__()", "async def anext(iterator):\n return await iterator.__anext__()", "def itime(iterable, seconds):\n items = iter(iterable)\n\n end = time.time() + seconds\n yield items.next()\n\n for item in itertools.takewhile(lambda _: time.time() < end, items):\n yield item", "def iter_py():\n s = \"Hello, World!\"\n it = iter(s)\n while True:\n try:\n print(next(it))\n except:\n break\n\n ## Output\n # H\n # e\n # l\n # l\n # o\n # ,\n #\n # W\n # o\n # r\n # l\n # d\n # !", "def get_iterator(self, name):\n return self._iterators[name]", "def __init__(self, iterator):\n self.iter = iterator\n self.tmpNext = iterator.next() if iterator.hasNext() else None", "def __iter__(self):\n cursor = self.first()\n while cursor is not None:\n yield cursor.element()\n cursor = self.after(cursor)", "def __iter__(self):\n while not self.accesses.empty():\n yield self.accesses.get()", "def uniform_iterator(sequence):\n\n if isinstance(sequence, abc.Mapping):\n return six.iteritems(sequence)\n else:\n return enumerate(sequence)", "def cohere_stream(stream):\n if isinstance(stream, IterIO):\n return stream\n return IterIO(stream)", "def __iter__(self):\n try:\n self._load(False)\n except KeyError:\n return iter([])\n\n return self._iter(self.head - self.count, self.count)", "def is_iterator(obj):\n cls = obj.__class__\n return hasattr(cls, '__next__') and not hasattr(cls, '__len__')", "def _iterator_given_size(self, size: int) -> Iterator[int]:\n raise NotImplementedError", "def __iter__(self):\n self._fetch_all()\n return iter(self._result_cache)", "def section_4_7():\n import itertools\n\n def test1():\n def count(n):\n while True:\n yield n\n n += 1\n\n c = count(0)\n for x in itertools.islice(c, 10, 20):\n print(x)\n\n test1()", "def __iter__(self):\n return iter(self._cached)", "def next(self):\n nxt = self.readentry()\n if nxt is None:\n raise StopIteration\n return nxt", "def slice(iterable, *args):\n return iter(it.islice(iterable, *args))", "def next_chunk(self):\n if self._unconsumed:\n data = self._unconsumed.pop()\n else:\n data = self._iterator.next() # Might raise StopIteration\n self._pos += len(data)\n return data", "def iter(self):\n s = self.first\n while True:\n yield s\n s = s.__next__\n if s == self.first:\n return", "def __init__(self, iterator):\n self.iterator = []\n while iterator.hasNext():\n self.iterator.append(iterator.next())", "def test_iter_method(self):\n ref = mock.Mock()\n ref.side_effect = [{'rows': [1,2,3]}, {'rows': []}]\n rslt = Result(ref)\n collection = [x for x in rslt]\n self.assertEqual(collection, [1,2,3])\n\n run_iter = lambda x: [y for y in x]\n\n rslt = Result(ref, skip=1000)\n self.assertRaises(CloudantArgumentError, run_iter, rslt)\n\n rslt = Result(ref, limit=1000)\n self.assertRaises(CloudantArgumentError, run_iter, rslt)", "def firstn(reader, n):\n\n # TODO(yuyang18): Check if just drop the reader, could clean the opened\n # resource or not?\n\n def firstn_reader():\n for i, item in enumerate(reader()):\n if i == n:\n break\n yield item\n\n return firstn_reader", "def dispatch_next(self):\r\n self._dispatch_amount += 1\r\n while self._dispatch_amount:\r\n try:\r\n # XXX: possible race condition shuffling the order of\r\n # dispatches in the next two lines.\r\n func, args, kwargs = next(self._original_iterable)\r\n self.dispatch(func, args, kwargs)\r\n self._dispatch_amount -= 1\r\n except ValueError:\r\n \"\"\" Race condition in accessing a generator, we skip,\r\n the dispatch will be done later.\r\n \"\"\"\r\n except StopIteration:\r\n self._iterating = False\r\n self._original_iterable = None\r\n return", "def enumerate(self):\n\n done = False\n while not done:\n mcs = self.compute()\n\n if mcs != None:\n yield mcs\n else:\n done = True", "def get_iter(self, reader: DataReader):\n\n if reader is None:\n return None\n\n xs, ys = get_dataset(reader)\n\n return self.prepare_dataset(xs, ys)", "def file_reading_iterator_raw(filename, options='r'):\n # actual loop\n with open(filename, options) as f:\n while True:\n line = f.readline()\n if not line:\n break\n # return line\n yield line", "def read_on(reader, f):\n while True:\n try:\n line = reader(f)\n except StopIteration:\n break\n\n if line is not None:\n yield line", "def read_iter_from_file(path_to_file_read):\n with open(path_to_file_read, \"r\") as fichero:\n line = fichero.readline().strip()\n while line:\n yield line\n line = fichero.readline().strip()" ]
[ "0.72329783", "0.71654475", "0.67598706", "0.606488", "0.60311234", "0.60123855", "0.5925557", "0.592464", "0.5869508", "0.5843964", "0.5843164", "0.58100826", "0.57714164", "0.5746189", "0.5739034", "0.5733739", "0.57284236", "0.5727265", "0.5726913", "0.5722843", "0.57191473", "0.5713289", "0.5691649", "0.56792927", "0.5672827", "0.5643873", "0.5632116", "0.55895126", "0.55862975", "0.55814505", "0.5576382", "0.5558363", "0.5556996", "0.55545294", "0.5551774", "0.5546963", "0.5546131", "0.5540399", "0.5540399", "0.5540399", "0.552863", "0.5516656", "0.55088925", "0.5495546", "0.5488417", "0.54828876", "0.54748183", "0.5474163", "0.5458508", "0.5452063", "0.54505134", "0.54497546", "0.54374593", "0.543637", "0.5429466", "0.5405434", "0.5402769", "0.5388035", "0.5382464", "0.53792596", "0.5368272", "0.535616", "0.5337097", "0.53350914", "0.53345793", "0.5325567", "0.5318317", "0.53135157", "0.53092045", "0.5308221", "0.530633", "0.53061146", "0.53061146", "0.53046405", "0.52993983", "0.5297952", "0.52975965", "0.5290019", "0.5270001", "0.526507", "0.5264395", "0.5258243", "0.5251604", "0.5246796", "0.5242029", "0.523984", "0.52369803", "0.52368116", "0.5230874", "0.5228873", "0.5228539", "0.52234966", "0.522092", "0.5214276", "0.5210443", "0.52078104", "0.5206895", "0.52068657", "0.5206484", "0.5204696" ]
0.7133738
2
Create an instance of this store with a loader object.
def __init__(self, loader): self.loader = loader self.models = []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, loader, *args, **kw):\n self._loader = loader", "def create(self):\n\t\tif self.isInitialized():\n\t\t\tself.Loaded = self.loader.create()", "def __init__(self, loader, *args, **kw):\r\n self._loader = loader", "def create_loader(self, *args, **kwargs):\n def loader():\n return self.load(*args, **kwargs)\n return loader", "def __new__(cls, *args, **kwargs):\n # Create an ArgumentSink matching the loader’s\n # initialization arguments:\n key = ArgumentSink(*args, **kwargs)\n \n # If a loader already matches these arguments,\n # return it immediately:\n if key in cls.instances:\n return cls.instances[key]\n \n # Create and register a new loader, as per the\n # arguments with which to initialize this new\n # loader instance:\n try:\n cls.instances[key] = instance = super().__new__(cls, *args, **kwargs)\n except TypeError:\n cls.instances[key] = instance = super().__new__(cls)\n \n # Return the newly created instance:\n return instance", "def __init__(self, loader, id):\n\n self.loader = loader\n self.id = id", "def create(self, loader, cache=True):\n self.loader = loader\n try:\n loader_io = loader.open(cache=cache)\n except Exception as err:\n logger.error('Load %s error: %s' % (self, err))\n return\n try:\n for line in iter(loader_io.readline, ''):\n line = line.strip()\n if not line or line.startswith('#'):\n continue\n ip, name = line.split()[:2]\n dn = self.get_subdomain(name)\n if ip.startswith('@'):\n self.records[dn] += [dnslib.CNAME(ip[1:])]\n elif ':' in ip:\n self.records[dn] += [dnslib.AAAA(ip)]\n else:\n self.records[dn] += [dnslib.A(ip)]\n except Exception as err:\n logger.error('Load %s error: %s (%s)' % (self, err, line))", "def loader(self, *args, **kwargs):\n raise NotImplementedError('Cannot instantiate builder base class.')", "def to_DataLoader(self, **kwargs):\r\n return DataLoader(self, **kwargs)", "def __init__(self, loaders = None):\n if loaders is None:\n loaders = list();\n self.__loaders = list();\n for loader in list(loaders):\n self.addLoader(loader);", "def _get_or_create_data_loader(\n cls, root: 'Any', model: 'Any', info: 'ResolveInfo', args: dict\n ) -> ModelLoader:\n context: 'Union[dict, object]' = info.context\n\n if isinstance(context, dict):\n try:\n data_loaders = context[cls.dataloaders_field]\n except KeyError:\n data_loaders = {}\n context[cls.dataloaders_field] = data_loaders\n\n else:\n data_loaders = getattr(context, cls.dataloaders_field, None)\n if data_loaders is None:\n data_loaders = {}\n setattr(info.context, cls.dataloaders_field, data_loaders)\n\n # Unique dataloader key for context.\n data_loader_key = tuple((p for p in info.path if isinstance(p, str)))\n\n try:\n current_data_loader: ModelLoader = data_loaders[data_loader_key]\n except KeyError:\n current_data_loader = ModelLoader(type(root), model, info, args)\n data_loaders[data_loader_key] = current_data_loader\n\n return current_data_loader", "def loader(self):\n return self.loader_class()", "def test_loader(cls):\r\n return _test_loader_factory(cls)", "def __init__(self, path):\n super().__init__()\n self.__cache = StorageCache()\n self.__db = InternalStorage(path)\n self.__serializer = Serializer()", "def instantiate(cls, data_store, identifier):\n pass", "def from_disk(cls, path: Path, loader_func: Callable = read_jsonl):\n return Dataset(\n loader_func(path / 'train.jsonl'),\n loader_func(path / 'dev.jsonl'),\n test=loader_func(path / 'test.jsonl')\n )", "def __init__(self, loader, validator, parser):\n if not isinstance(loader, SAMLMetadataLoader):\n raise ValueError(\n \"Argument 'loader' must be an instance of {0} class\".format(\n SAMLMetadataLoader\n )\n )\n if not isinstance(validator, SAMLFederatedMetadataValidator):\n raise ValueError(\n \"Argument 'validator' must be an instance of {0} class\".format(\n SAMLFederatedMetadataValidator\n )\n )\n if not isinstance(parser, SAMLMetadataParser):\n raise ValueError(\n \"Argument 'parser' must be an instance of {0} class\".format(\n SAMLMetadataParser\n )\n )\n\n self._loader = loader\n self._validator = validator\n self._parser = parser\n\n self._logger = logging.getLogger(__name__)", "def __init__(self, path=None, loader=None, filters=None):\n if (path is None and loader is None\n or path is not None and loader is not None):\n raise ValueError('Either specify path oder loader')\n if path is not None:\n loader = FileSystemLoader(path)\n self._env = Environment(loader=loader)\n self._env.add_extension('jinja2.ext.do')\n self._add_filters(filters)", "def load(cls, path):\n data = cls()\n data.set_params(pk.load(path))\n return data", "def setup_loader():\n # The type of loader to use, see simuran.loaders.loader_list.py for options\n # For now nc_loader is the most common option\n # loader = \"params_only\"\n loader = \"nc_loader\"\n\n # Keyword arguments to pass to the loader.\n loader_kwargs = {\n \"system\": \"Axona\",\n \"pos_extension\": \".txt\",\n }\n\n output_dict = {\n \"loader\": loader,\n \"loader_kwargs\": loader_kwargs,\n }\n\n return output_dict", "def __init__(self):\n self.__dict__ = dict()\n self.load()", "def __init__(self, name=None, pickle_file=None):\n self.pickle_file = pickle_file\n if pickle_file is not None:\n self.store = pickle.load(pickle_file)\n if not isinstance(self.store, dict):\n raise ValueError, \"pickle file '%s' does not contain a dict\" % pickle_file\n else:\n self.store = OrderedDict()", "def loader(self):\n return self._loader", "def __init__(self, **kwargs):\n DataLoader.__init__(self, **kwargs)", "def __init__(self, name, loadfile=None, loadpath=''):\n \n self.name = name\n \n if loadfile==None:\n self.data = []\n else:\n with open(loadpath+loadfile) as currentfile:\n self.data = pickle.load(currentfile)", "def dataloader(self):\n return DataLoader", "def load(self, *args, **kwargs):\r\n for store_attr in self.__store_attrs__:\r\n setattr(self, store_attr, {})", "def __init__(self, key, active=True, **kwargs):\n # Support multiple inheritance\n super(Store, self).__init__()\n \n # Make key safe and set it\n key = key.lower()\n if not is_filename_safe(key):\n raise ValueError('Key is not safe for filename')\n self.key = key\n \n # Set filename (if service has started)\n if self.manager._started:\n self._pre_start()\n \n # Tell fields to initialise themselves\n for field_name, field in self._fields.items():\n field.contribute_to_instance(self, field_name)\n \n # Now set any field values\n for field_name, value in kwargs.items():\n if field_name not in self._fields:\n raise TypeError(\n \"__init__() got an unexpected keyword argument '%s'\" %\n field_name\n )\n setattr(self, field_name, value)\n \n # Ensure manager knows about it\n if active:\n self.manager.add_active(self)", "def __init__(self):\n self.load()", "def __init__(self, path):\n self._path = path\n self._store = pd.HDFStore(path, \"r\")\n self._user_ids = self._load_user_ids()\n self._loads = dict()", "def __init__(self, loader, model, config):\n\n # objects\n\n self.config = config\n self.model = model\n\n try:\n self.words = loader.words\n self.linked = loader.linked\n except:\n raise Exception(\"Not text descriptions loaded\")\n\n self.vectors = self.embed(loader, model)\n self.Z = self.link()\n self.counts = self.count()", "def create_storage(conf):\n _name = conf.get(\"name\", \"\")\n _cls = importer(conf['class'])\n _kwargs = conf['kwargs']\n _io = importer(_kwargs['io_class'])\n return _cls(_kwargs[\"storage_config\"], name=_name, io_class=_io)", "def __init__(self, store):\n assert store is not None\n self.store = store", "def load(cls, filename):\n path = Path(filename)\n with path.open('rb') as f:\n model = pickle.load(f)\n wrapper = cls(\n estimator = model.estimator,\n param_grid = model.param_grid,\n )\n wrapper.model = model\n return wrapper", "def loader(cls, *a, **kw):\n def loader_for_Runtime2to3SourceFileLoader(fullname, path):\n return cls(fullname, path, *a, **kw)\n return loader_for_Runtime2to3SourceFileLoader", "def __init__(self, images, loader):\n super().__init__()\n self._images = images\n self._loader = loader", "def __init__(self, mappings, stores, i18n_service=None, **kwargs):\r\n super(MixedModuleStore, self).__init__(**kwargs)\r\n\r\n self.modulestores = {}\r\n self.mappings = {}\r\n\r\n for course_id, store_name in mappings.iteritems():\r\n try:\r\n self.mappings[CourseKey.from_string(course_id)] = store_name\r\n except InvalidKeyError:\r\n try:\r\n self.mappings[SlashSeparatedCourseKey.from_deprecated_string(course_id)] = store_name\r\n except InvalidKeyError:\r\n log.exception(\"Invalid MixedModuleStore configuration. Unable to parse course_id %r\", course_id)\r\n continue\r\n\r\n if 'default' not in stores:\r\n raise Exception('Missing a default modulestore in the MixedModuleStore __init__ method.')\r\n\r\n for key, store in stores.iteritems():\r\n is_xml = 'XMLModuleStore' in store['ENGINE']\r\n if is_xml:\r\n # restrict xml to only load courses in mapping\r\n store['OPTIONS']['course_ids'] = [\r\n course_key.to_deprecated_string()\r\n for course_key, store_key in self.mappings.iteritems()\r\n if store_key == key\r\n ]\r\n self.modulestores[key] = create_modulestore_instance(\r\n store['ENGINE'],\r\n # XMLModuleStore's don't have doc store configs\r\n store.get('DOC_STORE_CONFIG', {}),\r\n store['OPTIONS'],\r\n i18n_service=i18n_service,\r\n )", "def from_pretrained(cls, dstore_dir: str, no_load_keys=False, use_memory=False, mode=\"r\"):\n info = json.load(open(os.path.join(dstore_dir, \"info.json\")))\n dstore_size, hidden_size, vocab_size, dstore_fp16, val_size = (\n info[\"dstore_size\"],\n info[\"hidden_size\"],\n info.get(\"vocab_size\", None),\n info.get(\"dstore_fp16\", False),\n info.get(\"val_size\", 1),\n )\n return cls(dstore_size=dstore_size, hidden_size=hidden_size, dstore_dir=dstore_dir, dstore_fp16=dstore_fp16,\n vocab_size=vocab_size, no_load_keys=no_load_keys, mode=mode, use_memory=use_memory, val_size=val_size)", "def load_later(klass, name, load_func, *a, **kw):\n m = klass(name)\n m._loaded = False\n m._loader = (load_func, a, kw)\n return m", "def loader(self):\r\n return self._loader", "def register_loader(\n self, oid: Union[int, str], loader: Type[\"Loader\"]\n ) -> None:\n if isinstance(oid, str):\n oid = self.types[oid].oid\n if not isinstance(oid, int):\n raise TypeError(\n f\"loaders should be registered on oid, got {oid} instead\"\n )\n\n if _psycopg:\n loader = self._get_optimised(loader)\n\n fmt = loader.format\n if not self._own_loaders[fmt]:\n self._loaders[fmt] = self._loaders[fmt].copy()\n self._own_loaders[fmt] = True\n\n self._loaders[fmt][oid] = loader", "def __init__(self, etl, extractors=None, streams=None, loaders=None):\n # Get the extractor info.\n try:\n self.extractor_json = etl[\"extractor\"]\n self.extractor_name = self.extractor_json.keys()[0]\n except (KeyError, IndexError):\n raise Exception(\"Please define valid extractor\")\n\n # Get the transformers.\n self.transformers = etl.get(\"transformers\", [])\n\n # Get the loader info.\n try:\n self.loader_json = etl[\"loader\"]\n self.loader_name = self.loader_json.keys()[0]\n except (KeyError, IndexError):\n raise Exception(\"Please define valid loader.\")\n\n # Validate the extractor function.\n self._extractors = {}\n self._init_extractors()\n if extractors:\n pass\n try:\n self.extractor = self.extractors[self.extractor_name]\n except KeyError:\n raise Exception(\n \"{0} extractor not implemented\".format(self.extractor_name)\n )\n\n # Validate the extractor function.\n self._streams = {}\n self._init_streams()\n if streams:\n pass\n try:\n self.stream = self.streams[self.extractor_name]\n except KeyError:\n raise Exception(\n \"{0} stream not implemented\".format(self.extractor_name)\n )\n\n # Validate the loader function.\n self._loaders = {}\n self._init_loaders()\n if loaders:\n pass\n try:\n self.loader = self.loaders[self.loader_name]\n except KeyError:\n raise Exception(\n \"{0} loader not implemented\".format(self.loader_name)\n )", "def __init__(self, path=None, fp=None):\n if path is not None and fp is not None:\n raise ValueError(\n \"JSONFileLoader must be instantiated with one of path or fp\"\n )\n if path is None and fp is None:\n raise ValueError(\n \"JSONFileLoader must be instantiated with one of path or fp\"\n )\n\n if path is not None:\n path = os.path.abspath(os.path.expanduser(os.path.expandvars(path)))\n self._path = path\n self._fp = fp\n\n self._load_data()", "def load(cls, filename, **kwargs):\n with open(filename, 'rb') as fin:\n self = pickle.load(fin, **kwargs)\n self._check_types()\n return self", "def __init__(\r\n self, data_dir, default_class=None, course_dirs=None, course_ids=None,\r\n load_error_modules=True, i18n_service=None, **kwargs\r\n ):\r\n super(XMLModuleStore, self).__init__(**kwargs)\r\n\r\n self.data_dir = path(data_dir)\r\n self.modules = defaultdict(dict) # course_id -> dict(location -> XBlock)\r\n self.courses = {} # course_dir -> XBlock for the course\r\n self.errored_courses = {} # course_dir -> errorlog, for dirs that failed to load\r\n\r\n if course_ids is not None:\r\n course_ids = [SlashSeparatedCourseKey.from_deprecated_string(course_id) for course_id in course_ids]\r\n\r\n self.load_error_modules = load_error_modules\r\n\r\n if default_class is None:\r\n self.default_class = None\r\n else:\r\n module_path, _, class_name = default_class.rpartition('.')\r\n class_ = getattr(import_module(module_path), class_name)\r\n self.default_class = class_\r\n\r\n self.parent_trackers = defaultdict(ParentTracker)\r\n self.reference_type = Location\r\n\r\n # All field data will be stored in an inheriting field data.\r\n self.field_data = inheriting_field_data(kvs=DictKeyValueStore())\r\n\r\n self.i18n_service = i18n_service\r\n\r\n # If we are specifically asked for missing courses, that should\r\n # be an error. If we are asked for \"all\" courses, find the ones\r\n # that have a course.xml. We sort the dirs in alpha order so we always\r\n # read things in the same order (OS differences in load order have\r\n # bitten us in the past.)\r\n if course_dirs is None:\r\n course_dirs = sorted([d for d in os.listdir(self.data_dir) if\r\n os.path.exists(self.data_dir / d / \"course.xml\")])\r\n for course_dir in course_dirs:\r\n self.try_load_course(course_dir, course_ids)", "def load(self, which):\n\t\tpath = os.path.join(self.storagedir, which)\n\t\tprint(\"Loading from\", path)\n\t\twith open(path, \"rb\") as handle:\n\t\t\tsetattr(self, which, _pickle.load(handle))", "def _create_data_loader(self, data, **kwargs):\n if data is None:\n return None\n\n # Set DataLoader config\n # NOTE: Not applicable if data is already a DataLoader\n config = {\n **self.config[\"train_config\"][\"data_loader_config\"],\n **kwargs,\n \"pin_memory\": self.config[\"device\"] != \"cpu\",\n }\n # Return data as DataLoader\n if isinstance(data, DataLoader):\n return data\n elif isinstance(data, Dataset):\n return DataLoader(data, **config)\n elif isinstance(data, (tuple, list)):\n return DataLoader(self._create_dataset(*data), **config)\n else:\n raise ValueError(\"Input data type not recognized.\")", "def train_dataloader(self) -> DataLoader:\n return self._custom_data_loader()", "def load(cls, data):\n return cls(**data)", "def load(self) -> Scene:\n self.path = self.find_scene(self.meta.path)\n if not self.path:\n raise ImproperlyConfigured(\"Scene '{}' not found\".format(self.meta.path))\n\n self.scene = Scene(self.path)\n\n # Load gltf json file\n if self.path.suffix == \".gltf\":\n self.load_gltf()\n\n # Load binary gltf file\n if self.path.suffix == \".glb\":\n self.load_glb()\n\n self.gltf.check_version()\n self.gltf.check_extensions(self.supported_extensions)\n self.load_images()\n self.load_samplers()\n self.load_textures()\n self.load_materials()\n self.load_meshes()\n self.load_nodes()\n\n self.scene.calc_scene_bbox()\n self.scene.prepare()\n\n return self.scene", "def load(self, *args, **kwargs):\n pass", "def __init__(self, **kwargs):\n DataLoader.__init__(self, **kwargs)\n \n self._results_ = None", "def _load_from(cls, model_state: dict) -> AbstractModel:\n return cls(model=model_state.get('model'), **model_state.get('kwargs'))", "def create_loader(self):\n # load data to memory.\n if self.is_cifar100:\n (x_train, y_train), (x_test,\n y_test) = tf.keras.datasets.cifar100.load_data()\n else:\n (x_train, y_train), (x_test,\n y_test) = tf.keras.datasets.cifar10.load_data()\n\n y_train = y_train.astype(np.int32)\n y_test = y_test.astype(np.int32)\n\n x_train, y_train = shuffle_dataset(x_train, y_train)\n n_probe = int(math.floor(x_train.shape[0] * FLAGS.probe_dataset_hold_ratio))\n\n # TODO(zizhaoz): add other noise types.\n if 'asymmetric' in self.dataset_name:\n assert 'cifar100' not in self.dataset_name, 'Asymmetric only has CIFAR10'\n (x_train, y_train, y_gold), (x_probe, y_probe) = load_asymmetric(\n x_train,\n y_train,\n noise_ratio=self.noise_ratio,\n n_val=n_probe,\n random_seed=FLAGS.seed)\n elif 'uniform' in self.dataset_name:\n (x_train, y_train, y_gold), (x_probe,\n y_probe) = load_train_val_uniform_noise(\n x_train,\n y_train,\n n_classes=self.num_classes,\n noise_ratio=self.noise_ratio,\n n_val=n_probe)\n else:\n assert self.dataset_name in ['cifar10', 'cifar100']\n\n if not self.split_probe and x_probe is not None:\n # Usually used for supervised comparison.\n tf.logging.info('Merge train and probe')\n x_train = np.concatenate([x_train, x_probe], axis=0)\n y_train = np.concatenate([y_train, y_probe], axis=0)\n y_gold = np.concatenate([y_gold, y_probe], axis=0)\n\n conf_mat = sklearn_metrics.confusion_matrix(y_gold, y_train)\n conf_mat = conf_mat / np.sum(conf_mat, axis=1, keepdims=True)\n tf.logging.info('Corrupted confusion matirx\\n {}'.format(conf_mat))\n x_test, y_test = shuffle_dataset(x_test, y_test)\n self.train_dataset_size = x_train.shape[0]\n self.val_dataset_size = x_test.shape[0]\n if self.split_probe:\n self.probe_size = x_probe.shape[0]\n\n input_tuple = (x_train, y_train.squeeze())\n self.train_dataflow = self.create_ds(input_tuple, is_train=True)\n self.val_dataflow = self.create_ds((x_test, y_test.squeeze()),\n is_train=False)\n if self.split_probe:\n self.probe_dataflow = self.create_ds((x_probe, y_probe.squeeze()),\n is_train=True)\n\n tf.logging.info('Init [{}] dataset loader'.format(self.dataset_name))\n verbose_data('train', x_train, y_train)\n verbose_data('test', x_test, y_test)\n if self.split_probe:\n verbose_data('probe', x_probe, y_probe)\n\n return self", "def load(cls, load_information: Dict):\n params = load_information[\"params\"]\n fit_kwargs_path = load_information[\"fit_kwargs\"]\n with open(fit_kwargs_path, \"rb\") as infile:\n fit_kwargs = cloudpickle.load(infile)\n model_path = load_information[\"get_model\"]\n with open(model_path, \"rb\") as infile:\n get_model = cloudpickle.load(infile)\n\n module = cls(get_model=get_model, fit_kwargs=fit_kwargs, **params)\n return module", "def load(path):\n with open(join(path, 'job.pkl'), 'rb') as file:\n job = pickle.load(file)\n job.path = path\n return job", "def build_training_data_loader(self) -> DataLoader:\n pass", "def _loadClass(self, loader):\r\n raise NotImplementedError(\"The method 'loadClass' has to \"\r\n 'be implemented.')", "def initialize(self) -> None:\n self.model = load(self.path)", "def __init__(self, path: str, *loader_details: Tuple[str]) -> None:\n loader_details += ([XPYCEFileLoader, EXTENSIONS],)\n super().__init__(path, *loader_details)", "def __init__(self, subpath):\n super().__init__()\n self._path = join(settings.DATA_DIR, \"pickle\", subpath)\n # Make sure the path to the pickle store exists.\n if not exists(self._path):\n makedirs(self._path)", "def _instantiate(cls, **kwargs):\n return cls(**kwargs)", "def load_data_loader_from_file(cls, filename):\n print(\"Loading data loader from file: {}\".format(filename))\n\n with open(filename, \"rb\") as file:\n return pickle.load(file)", "def _init_loaders(self):\n @self.loaders_wrapper(\"nx2nx\")\n def get_nx2nx_loader(extractor, stream, transformers, graph):\n \"\"\"\n :param tranformers: List of dicts.\n :extractor: function.\n :param graph: networkx.Graph\n :returns: projx.nx_loader\n \"\"\"\n return loaders.nx2nx_loader(extractor, stream, transformers,\n self.loader_json[self.loader_name],\n graph)\n\n @self.loaders_wrapper(\"neo4j2nx\")\n def get_neo4j2nx_loader(extractor, stream, transformers, graph):\n \"\"\"\n :param tranformers: List of dicts.\n :extractor: function.\n :param graph: networkx.Graph\n :returns: projx.nx_loader\n \"\"\"\n return loaders.neo4j2nx_loader(extractor, stream, transformers,\n self.loader_json[self.loader_name],\n graph)\n\n\n @self.loaders_wrapper(\"neo4j2edgelist\")\n def get_neo4j2edgelist_loader(extractor, stream, transformers, graph):\n \"\"\"\n :param tranformers: List of dicts.\n :extractor: function.\n :param graph: networkx.Graph\n :returns: projx.nx_loader\n \"\"\"\n return loaders.neo4j2edgelist_loader(\n extractor,\n stream,\n transformers,\n self.loader_json[self.loader_name],\n graph\n )\n\n\n @self.loaders_wrapper(\"edgelist2neo4j\")\n def get_edgelist2neo4j_loader(extractor, stream, transformers, graph):\n \"\"\"\n :param tranformers: List of dicts.\n :extractor: function.\n :param graph: networkx.Graph\n :returns: projx.nx_loader\n \"\"\"\n return loaders.edgelist2neo4j_loader(\n extractor,\n stream,\n transformers,\n self.loader_json[self.loader_name],\n graph\n )", "def load(cls, host):\n\n return cls(host)", "def train(self, train_loader):\n pass", "def __init__(self, app_path, entity_type, resource_loader=None):\n self.app_path = app_path\n self.type = entity_type\n self._resource_loader = (\n resource_loader or ResourceLoader.create_resource_loader(app_path=self.app_path)\n )\n\n self._is_system_entity = Entity.is_system_entity(self.type)\n self._no_trainable_canonical_entity_map = False\n self.dirty = False # bool, True if exists any unsaved generated data that can be saved\n self.ready = False # bool, True if the model is fit by calling .fit()", "def init_from_pickle_file(cls, filename):\n with open(filename, 'rb') as f:\n loaded_tape = pickle.load(f)\n instance = cls(metrics_to_record=loaded_tape.keys())\n instance.tape = loaded_tape\n return instance", "def _load(self):\n self.logger.debug(\"Loading from persistence\")\n # load whole item from persistence\n data = self._persistence.load(self.id(), default={})\n if not data:\n return\n\n try:\n self.persistence_deserialize(data)\n except NotImplementedError:\n # allow backwards compatibility or persisted_values way\n for persisted_var in self.persisted_values():\n if persisted_var in data:\n self.logger.debug(\"Loaded value {} for attribute {}\".format(\n data[persisted_var], persisted_var))\n # Set the loaded value to the attribute on this class\n setattr(self, persisted_var, data[persisted_var])\n except:\n # log exception while loading and let it continue\n self.logger.exception(\n \"Failed to deserialize block with data: {}\".format(data))", "def __init__(self, config_path):\n cfg = Config.fromfile(config_path)\n self.cfg = cfg\n\n # Now make the dataloader\n self.dataset = build_dataset(cfg.data.test)\n\n self.loader = build_dataloader(\n self.dataset,\n imgs_per_gpu=1,\n workers_per_gpu=0,\n dist=False,\n shuffle=False\n )", "def get_store(cls):\n store_path = cls.get_store_path()\n store = JsonStore(store_path)\n return store", "def train_dataloader(self) -> DataLoader:\n return DataLoader(\n self.train_dataset,\n batch_size=self.hparams.batch_size,\n shuffle=True,\n num_workers=multiprocessing.cpu_count(),\n )", "def load(dirpath):\n\n batch = Pickler.load(join(dirpath, 'batch.pkl'))\n\n # load annotator\n if exists(join(dirpath, 'annotation.json')):\n annotator = Annotation.load(dirpath)\n batch.annotator = annotator\n\n return batch", "def load(cls,filename):\n obj = None\n f = open(filename,'r')\n try:\n obj = pickle.load(f)\n obj.filename = filename\n finally:\n f.close()\n return obj", "def load(self):\n with self.__lock:\n self._d.update(self.backend.load())\n log.debug(\"load: {}\".format(self.backend.filename))", "def storage_factory():\n return storage(transaction.manager, **kwargs)", "def load(self):\n pass", "def load(self):\n pass", "def load(self):\n pass", "def load(self):\n pass", "def open(self, name, convert_in=None, convert_out=None):\n # log.debug('self.caches=%r', self.caches)\n log.debug('[cache] opening store %r...', name)\n if name not in self.caches:\n log.info('[cache] creating table `%s`...', name)\n self._add_table(name)\n\n return Store(name, self, convert_in, convert_out)", "def load(self, *args, **kw):\n if self._loaded:\n return\n args = args or self._loader[1]\n kw = kw or self._loader[2]\n loaded_models = self._loader[0](*args, **kw)\n for m in loaded_models:\n if isinstance(m, Model):\n self.add(m)\n else:\n self.add(self.model_class(**m))\n self._loaded = True", "def from_archive(\n cls, archive_path: Pathlike, dataset_reader_to_load: str = VALIDATION\n ):\n # Uses lazy import because allennlp is an extra requirements.\n from allennlp.data import DatasetReader\n from allennlp.models.archival import load_archive\n\n archive = load_archive(str(archive_path))\n config = archive.config\n if dataset_reader_to_load == VALIDATION and VALIDATION_DATASET_READER in config:\n dataset_reader_params = config[VALIDATION_DATASET_READER]\n else:\n dataset_reader_params = config[DATASET_READER]\n dataset_reader = DatasetReader.from_params(dataset_reader_params)\n return cls(\n model=archive.model,\n dataset_reader=dataset_reader,\n config={\"allen_archive\": archive.config},\n archive_path=Path(archive_path).absolute(),\n )", "def __init__(self):\n manager = Manager()\n # As the snippet is executing in multiprocessing.Process, a shared\n # dict should be used to ensure the change is propagated.\n self.local = manager.dict()\n\n # the object in shared dict should also be shared\n for k, v in self.presets.items():\n if isinstance(v, list):\n self.local[k] = manager.list(v)\n elif isinstance(v, dict):\n self.local[k] = manager.dict(v)\n elif isinstance(v, set):\n self.local[k] = manager.set(v)\n else:\n self.local[k] = v", "def _init_train_loader(self):\n # Choose the right dataset type\n if self.config_args[\"num_members\"] > 1:\n class_dataset_wrapper = dataset_wrapper.MixMoDataset\n else:\n class_dataset_wrapper = dataset_wrapper.MSDADataset\n\n # Load augmentations\n self.traindatasetwrapper = class_dataset_wrapper(\n dataset=self.train_dataset,\n num_classes=int(self.config_args[\"data\"][\"num_classes\"]),\n num_members=self.config_args[\"num_members\"],\n dict_config=self.config_args[\"training\"][\"dataset_wrapper\"],\n properties=self.properties\n )\n\n # Build standard sampler\n _train_sampler = torch.utils.data.sampler.RandomSampler(\n data_source=self.traindatasetwrapper, ## only needed for its length\n num_samples=None,\n replacement=False,\n )\n\n # Wrap it with the repeating sampler used for multi-input models\n batch_sampler = batch_repetition_sampler.BatchRepetitionSampler(\n sampler=_train_sampler,\n batch_size=self.batch_size,\n num_members=self.config_args[\"num_members\"],\n drop_last=True,\n config_batch_sampler=self.config_args[\"training\"][\"batch_sampler\"]\n )\n\n self.train_loader = torch.utils.data.DataLoader(\n self.traindatasetwrapper,\n batch_sampler=batch_sampler,\n num_workers=self.num_workers,\n batch_size=1,\n shuffle=False,\n sampler=None,\n drop_last=False,\n pin_memory=True,\n )", "def load(self):\n raise NotImplementedError", "def load(self):\n raise NotImplementedError", "def _force_make_distributed_loader(loader: DataLoader) -> DataLoader:\n from catalyst.data.sampler import DistributedSamplerWrapper\n\n sampler = (\n DistributedSampler(dataset=loader.dataset)\n if getattr(loader, \"sampler\", None) is not None\n else DistributedSamplerWrapper(sampler=loader.sampler)\n )\n loader = DataLoader(\n dataset=copy(loader.dataset),\n batch_size=loader.batch_size,\n # shuffle=loader.shuffle,\n sampler=sampler,\n # batch_sampler=loader.batch_sampler,\n num_workers=loader.num_workers,\n # collate_fn=loader.collate_fn,\n pin_memory=loader.pin_memory,\n drop_last=loader.drop_last,\n )\n return loader", "def __init__(self,path):\n self.path = path\n self.data = {}\n self.hasChanged = False\n #--Load\n if os.path.exists(self.path):\n ins = open(self.path)\n inData = compat.uncpickle(ins)\n self.data.update(inData)", "def load(cls, filename, format=None, mode='rb'):\n format = infer_format(filename, format)\n if not os.path.isfile(filename):\n raise RuntimeError(\"{0!r} not found.\".format(filename))\n if format == 'pkl.gz':\n f = gzip.open(filename, 'rb')\n data = pickle.loads(f.read())\n f.close()\n elif format == 'pkl':\n with io.open(filename, 'rb') as f:\n data = pickle.loads(f.read())\n x = cls(**data)\n return x", "def _load(self):\n if os.path.exists(DEFAULT_STACK):\n self.stack = TasksInProgress.load(DEFAULT_STACK)\n else:\n self.stack = TasksInProgress()\n\n if os.path.exists(DEFAULT_QUEUE):\n self.backlog = TaskBacklog.load(DEFAULT_QUEUE)\n else:\n self.backlog = TaskBacklog()\n\n self.blocked = TaskLimbo(callback=self.stack.push)\n # FIXME: implement serialization for TaskLimbo\n # if os.path.exists(DEFAULT_LIMBO):\n # self.blocked.load(DEFAULT_LIMBO)\n\n if os.path.exists(DEFAULT_DORM):\n self.sleeping = TaskDorm.load(DEFAULT_DORM)\n self.sleeping.set_callback(self.stack.push)\n else:\n self.sleeping = TaskDorm(callback=self.stack.push)", "def create_modulestore_instance(engine, doc_store_config, options, i18n_service=None):\r\n class_ = load_function(engine)\r\n\r\n return class_(\r\n doc_store_config=doc_store_config,\r\n **options\r\n )", "def _init(self):\n if os.path.exists(self.fname):\n with open(self.fname, \"rb\") as fh:\n self.db = pickle.load(fh)\n else:\n self.db = {}\n print(\"DB loaded, len\", len(self.db))", "def __init__(self, directory, mode=Mode.READONLY):\n if not os.path.exists(directory):\n raise IOError(\"Directory %s for raw store does not exist\"%\n directory)\n self.directory = directory\n with open(os.path.join(directory, \"__rawformat__\"), 'rb') as rawformat:\n self.__dict__.update(pickle.load(rawformat))\n \n fname = self.fname = os.path.join(directory, \"__store___\")\n colCache = self.colCacheDir = os.path.join(directory, \"__colstore___\")\n\n self._f = None\n self.mode = mode\n self._openfile()", "def get_loader(data, json, batch_size, shuffle, num_workers):\n dataset = FinNumDataset(data, json)\n\n data_loader = torch.utils.data.DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n collate_fn=collate_fn)\n return data_loader", "def __init__(self):\n self.store = {}", "def create_from_string(cls, text):\n parts = text.split('::')\n pcount = len(parts)\n if pcount == 4:\n name = parts[0]\n u_path = parts[1]\n ds_name = parts[2]\n dir_struc = None\n for _ in DirStruc:\n if _.name == ds_name:\n dir_struc = _\n break\n else:\n raise DvczError(\n \"Not the name of a valid dir_struc name: '%s'\" % ds_name)\n\n # 'item access'\n hashtype = HashTypes[parts[3]]\n return Store(name, u_path, dir_struc, hashtype)\n else:\n raise DvczError(\"Invalid Store descriptor: '%s'\" % text)", "def load(self):\n raise NotImplementedError()", "def load(self):\n raise NotImplementedError()", "def new_datastore(self, **kwargs) -> DataStore:\n return storage.DataStore(\n self.system_params, self.param_name, self.param_vals, **kwargs\n )" ]
[ "0.7120568", "0.71177053", "0.70920056", "0.6825259", "0.65534455", "0.6519639", "0.6312017", "0.6218765", "0.60434765", "0.58976614", "0.5878757", "0.5864052", "0.5856109", "0.58553034", "0.57751334", "0.5761484", "0.57599795", "0.5753757", "0.5739945", "0.57246524", "0.56983006", "0.5695943", "0.5684986", "0.5683774", "0.5681828", "0.5668887", "0.566855", "0.56621295", "0.56606895", "0.5623442", "0.56175345", "0.5609016", "0.5601584", "0.55026376", "0.5493399", "0.5491028", "0.54819196", "0.5472755", "0.5455451", "0.5455224", "0.5450438", "0.54455376", "0.54453415", "0.5442021", "0.5437375", "0.5436789", "0.54357016", "0.5425675", "0.5425606", "0.54243785", "0.54238975", "0.5420784", "0.54171824", "0.5407163", "0.53952837", "0.5390731", "0.5386541", "0.53808475", "0.53753215", "0.536969", "0.5362955", "0.5359798", "0.5358389", "0.5349819", "0.53421646", "0.5339696", "0.5334283", "0.5330732", "0.53264815", "0.5325958", "0.53239495", "0.5322995", "0.53124875", "0.53082395", "0.52980506", "0.5295249", "0.5290855", "0.5290855", "0.5290855", "0.5290855", "0.5287129", "0.528635", "0.5274607", "0.52649546", "0.5258244", "0.5251362", "0.5251362", "0.5250737", "0.5248547", "0.52280164", "0.52256435", "0.52255917", "0.5218492", "0.5217607", "0.5213582", "0.5213178", "0.52109885", "0.5208403", "0.5208403", "0.52065504" ]
0.6796559
4
Import all model data using the loader.
def import_data(self): self.models = [] for o in self.loader.load(): klass = self.type_for(o) if hasattr(klass, "from_api"): self.models.append(klass.from_api(o)) else: self.models.append(klass(o)) return self.models
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def import_all():\n\n # count the number of files loaded\n count = 0\n\n # get model name\n model_name_list = [model for data_models in settings.OBJECT_DATA_MODELS\n for model in data_models]\n\n model_name_list += [model for model in settings.OTHER_DATA_MODELS]\n\n # import models one by one\n for model_name in model_name_list:\n import_model(model_name)\n\n # import localized strings\n import_localized_strings(settings.LANGUAGE_CODE)", "def load_models(self):\n logger.info('Loading {name} data'.format(name=self.__class__.__name__))\n for type_name, type_ in self.data_dict.iteritems():\n # An exclude for correlations. Isn't created nor has an ID.\n if type_name == \"correlations_main\":\n continue\n task_response = self.do_task(\n self.types[type_name],\n type_['taskId']\n )\n self.data_dict[type_name]['job_id'] = json.loads(\n task_response.content\n )['JobId']\n logger.info(\n 'Load {name} response: '.format(name=type_name) +\n task_response.content\n )\n\n print(\"Loaded model\")", "def load_data(self):\n\n self._load_train_data()\n self._load_test_data()", "def load(self):\n if self.verbosity:\n self.header(\"Loading data files\")\n\n model_list = [\n x for x in get_model_list() if os.path.exists(x.objects.get_csv_path())\n ]\n\n if self.resume_mode:\n # get finished load command logs of last update\n prev_loaded = [\n x.file_name\n for x in self.log_record.called.filter(\n command='loadcalaccessrawfile',\n finish_datetime__isnull=False\n )\n ]\n self.log(\"{} models already loaded.\".format(len(prev_loaded)))\n # remove these from model_list\n model_list = [x for x in model_list if x._meta.db_table not in prev_loaded]\n\n if self.verbosity:\n model_list = progress.bar(model_list)\n for model in model_list:\n call_command(\n \"loadcalaccessrawfile\",\n model.__name__,\n verbosity=self.verbosity,\n keep_files=self.keep_files,\n app_name=self.app_name,\n )", "def load(self, *args, **kw):\n if self._loaded:\n return\n args = args or self._loader[1]\n kw = kw or self._loader[2]\n loaded_models = self._loader[0](*args, **kw)\n for m in loaded_models:\n if isinstance(m, Model):\n self.add(m)\n else:\n self.add(self.model_class(**m))\n self._loaded = True", "def loadParts(self):\n for i in range(15):\n self.model_parts[i] = loadModel(\"ato_{}.pkl\".format(str(i)))", "def __init__(self, loader):\n self.loader = loader\n self.models = []", "def _load_training_data(self):\n self._save_training_data()", "def load_models(self, models, iteration = None):\n print(\"NOT IMPLEMENTED YET\")", "def import_all_model_modules():\r\n import brokerage.model\r\n # ensure that these imports don't get auto-deleted! they have side effects.\r\n brokerage.model", "def load_model(self):\n pass", "def _load_model(self):\n self._load_scaler('scaler.save')\n self._load_encoder('encoder0.save', 0)\n self._load_encoder('encoder1.save', 1)\n self._load_neural_network('model.json', 'model.h5')\n return", "def __load(self, model_name):\n\n print(\"Loading model.\")\n tstart = datetime.now()\n\n # Temporary directory to extract the zipped information\n with tempfile.TemporaryDirectory() as dirpath:\n\n # Unzip the directory that contains the saved model(s)\n with zipfile.ZipFile(model_name + \".zip\", \"r\") as zip_ref:\n zip_ref.extractall(dirpath)\n\n # Load metadata\n metadata = pickle.load(open(dirpath + \"/metadata.pickle\", \"rb\"))\n\n # Re-load metadata\n self.__dict__.update(metadata)\n\n # Load all sub-models\n try:\n self.__mol_to_latent_model = load_model(\n dirpath + \"/mol_to_latent_model.h5\"\n )\n except:\n print(\"'mol_to_latent_model' not found, setting to None.\")\n self.__mol_to_latent_model = None\n\n self.__latent_to_states_model = load_model(\n dirpath + \"/latent_to_states_model.h5\"\n )\n self.__batch_model = load_model(dirpath + \"/batch_model.h5\")\n \n # Build sample_model out of the trained batch_model\n self.__build_sample_model(batch_input_length=1) # Single-output model\n self.__build_sample_model(\n batch_input_length=256 # could also be self.batch_size\n ) # Multi-output model\n\n print(\"Loading finished in %i seconds.\" % ((datetime.now() - tstart).seconds))", "def load(\n self,\n modelLoadPath\n ):\n pass", "def load_model(self):\n self._logger.debug(f\"Loading Spacy Data Model : {self._model}... Could take time.\")\n self._nlp = spacy.load(self._model)\n self._logger.debug(\"Successfully loaded Spacy Data !\")\n\n # === Load entities ===\n if PIPE_ENTITY not in self._nlp.pipe_names:\n self._nlp.add_pipe(PIPE_ENTITY, last=True)\n\n entity_pipe = self._nlp.get_pipe(PIPE_ENTITY)\n for entity in self._entities:\n entity_pipe.add_label(entity)\n\n # === Load categories ===\n if PIPE_INTENT not in self._nlp.pipe_names:\n self._nlp.add_pipe(PIPE_INTENT, last=True)\n\n intent_pipe = self._nlp.get_pipe(PIPE_INTENT)\n for intent in self._intents:\n intent_pipe.add_label(intent)", "def load(self, dataset, model_dir):\n raise NotImplementedError", "def load(self) -> None:\n self._load_data()\n self._load_poses()\n self._load_timestamps()", "def import_model(model_name, clear=True):\n file_name = os.path.join(settings.GAME_DIR, settings.WORLD_DATA_FOLDER, model_name)\n import_file(file_name, model_name, widecard=True, clear=clear)", "def load_data_pickle(self, load_full=False):\n self.train = pd.read_pickle('../input/train_mod.pkl')\n self.test = pd.read_pickle('../input/test_mod.pkl')\n if load_full:\n self.train_full = pd.read_pickle('../input/train_full_mod.pkl')", "def __load_model(self):\n loaded = load(self.__file_name)\n self.__model = loaded['model']\n self.__meta_data = loaded['metadata']\n self.__is_ready = True", "def load_model(self):\n Thread(target=self.__load_model).start()", "def load_model(self) -> Any:", "def load_cleaned_data(self):\n try:\n self.train = pd.read_pickle('../input/train_clean.pkl')\n self.test = pd.read_pickle('../input/test_clean.pkl')\n except FileNotFoundError:\n self.load_raw_data()", "def load_dataset(self):\n # Get all the files in the directory\n file_list = self.get_file_list()\n\n # Concatenate the data corresponding to a list of files\n data = self.concatenate_file_data(file_list)\n\n # Shuffle the data and create the training and the validation datasets\n data = self.shuffle_data_dictionary(data)\n self.training_dataset, self.validation_dataset = self.split_data_into_training_and_validation(data)", "def load_bundles(self):\n path = os.path.join(self.user_directory, \"bundles\")\n for name in os.listdir(path):\n if not name.startswith(\"__\") and os.path.isdir(path + \"/\" + name):\n bundle = Bundle(self, name)\n self.bundles[name] = bundle\n for bundle in self.bundles.values():\n bundle.setup(self, self.loader)\n\n for model in self.models:\n type(model).extend(model)\n for model in self.models:\n self.data_connector.repository_manager.add_model(model)", "def load_all(self, file):\n self.model = load_model(file + \"_model.h5\")", "def loadAll(self, path):\n self.model = keras.models.load_model(path+\"/model\")\n with open(path + \"/modelConfig.json\") as f:\n config = json.load(f)\n firstLayerConfig = config['config']['layers'][0]['config']\n lastLayerConfig = config['config']['layers'][-1]['config']\n self.lookBack = firstLayerConfig['batch_input_shape'][-1]\n self.forecast = lastLayerConfig['units']", "def load(path_to_model):\n pass", "def test_load_model_data(self):\n add_components_and_load_data(\n prereq_modules=IMPORTED_PREREQ_MODULES,\n module_to_test=MODULE_BEING_TESTED,\n test_data_dir=TEST_DATA_DIRECTORY,\n subproblem=\"\",\n stage=\"\",\n )", "def test_load_model_data(self):\n add_components_and_load_data(\n prereq_modules=IMPORTED_PREREQ_MODULES,\n module_to_test=MODULE_BEING_TESTED,\n test_data_dir=TEST_DATA_DIRECTORY,\n subproblem=\"\",\n stage=\"\",\n )", "def load_all_files(self):\n\t\tself.get_rankings()\n\t\tself.get_partition()\n\t\tself.__load_factors()\n\t\tself.get_document_associations()\n\t\tself.get_term_associations()", "def _import_all(self):\n # on first load, documents dir may not be in import path\n if not self.app.documents_dir in sys.path:\n sys.path += [self.app.documents_dir]\n # clean modules dict before (re)loading anything\n self._remove_non_current_game_modules()\n # make copy of old modules table for import vs reload check\n old_modules = self.modules.copy()\n self.modules = {}\n # load/reload new modules\n for module_name in self._get_game_modules_list():\n try:\n # always reload built in modules\n if module_name in self.builtin_module_names or \\\n module_name in old_modules:\n m = importlib.reload(old_modules[module_name])\n else:\n m = importlib.import_module(module_name)\n self.modules[module_name] = m\n except Exception as e:\n self.app.log_import_exception(e, module_name)", "def file_loader(self):\n\n for folder in self.config[\"data_folders\"]:\n f = os.path.join(folder, self.data_file)\n yield jsonlist.load_file(f)", "def initialize(self) -> None:\n self.model = load(self.path)", "def load_data(self) -> None:", "def train(self, train_loader):\n pass", "def load_data(self):", "def load(self):\n for name, item in itertools.chain(\n self._cal_objs.items(),\n self._noise_objs.items()):\n logger.debug(\"load {}\".format(item))\n item.load()", "def load_model(self, path):\n pass", "def importData():\n #importChallengeDataToDB()\n importTrendingDataToDB()", "def load_all(cls, data):\n return [cls.load(obj) for obj in data]", "def _load_train_data(self):\n\n self.train_loader = data.Train_loader(self.N_max, self.n_per_conn,\n self.data_path, self.device)\n self.train_loader.load_data()\n\n # load mean and std\n scc_mean_std = np.loadtxt(\n os.path.join(self.data_path, 'scc_mean_std.csv'), delimiter=',')\n self.mean = torch.Tensor(scc_mean_std[0])\n self.std = torch.Tensor(scc_mean_std[1])", "def loadData(self, file):\n self.data = batchImport(file, self.ps)", "def import_model(self, model_pkg):\n\n keys = self.__dict__.keys()\n for k, v in model_pkg.items():\n if k != 'verbose' and k in keys:\n self.__dict__[k] = v\n elif k.startswith('datas_'):\n c = k.split('_')\n idx = int(c[1])\n while len(self.datas) <= idx:\n self.datas.append(Data(verbose=self.verbose))\n self.datas[int(c[1])].__dict__['_'.join(c[2:])] = v\n\n return self", "def load_model(self):\n if torch.cuda.is_available():\n map_location=lambda storage, loc: storage.cuda()\n else:\n map_location='cpu'\n\n for index, agent in enumerate(self.agents):\n agent.actor_local.load_state_dict(torch.load('agent{}_checkpoint_actor.pth'.format(index + 1), map_location=map_location))\n agent.critic_local.load_state_dict(torch.load('agent{}_checkpoint_critic.pth'.format(index + 1), map_location=map_location))", "def load_model(self, model_path: str):", "def loadInputFiles(self):\n\t\tfor filename in self.input_filename_list:\n\t\t\tfor module in self.modules:\n\t\t\t\tmodule.Add(filename)", "def preload_all(self):\n for tp in self.tps:\n for f in self.featurefiles + self.maskfiles:\n file = os.path.join(tp, f)\n print('preloading {}'.format(file))\n self.load(file, lazy=False)", "def loadmodels(self):\n for emane_model in EMANE_MODELS:\n logger.info(\"loading emane model: (%s) %s - %s\",\n emane_model, emane_model.name, RegisterTlvs(emane_model.config_type))\n self._modelclsmap[emane_model.name] = emane_model\n self.session.add_config_object(emane_model.name, emane_model.config_type,\n emane_model.configure_emane)", "def import_data_in_model(import_path, save_path, data_files):\n meta_file = import_path + \".meta\"\n pb_file = os.path.join(import_path, \"saved_model.pb\")\n pbtxt_file = os.path.join(import_path, \"saved_model.pbtxt\")\n with Session(graph=tf.Graph()) as session:\n if os.path.isfile(meta_file):\n logging.info(\"Loading v1 saved model from folder %s\", import_path)\n graph = MetaGraph(session, import_path)\n graph.import_variables(data_files)\n graph.save(save_path)\n elif os.path.isfile(pb_file) or os.path.isfile(pbtxt_file):\n logging.info(\"Loading v2 Keras SavedModel from folder %s\", import_path)\n model = SavedModel(session, import_path)\n model.import_variables(data_files)\n model.save(save_path)\n else:\n logging.fatal((\"Could not find any TensorFlow v1 (%s) or v2 (%s or %s)\"\n \" models\"), meta_file, pb_file, pbtxt_file)\n exit(1)", "def load_data(self):\n for set_name in self.image_dir_path:\n if self.verbose:\n print('\\n> Loading data files for the set: ' + set_name)\n\n # image dir\n image_dir = os.path.join(self.data_path, self.image_dir_path[set_name])\n\n # annotation file path\n annot_filepath = os.path.join(self.data_path, self.annotation_path[set_name])\n\n if 'test' in set_name:\n yield load_data_test(set_name, image_dir, annot_filepath, self.verbose)\n else:\n yield self.load_data_trainval(set_name, image_dir, annot_filepath)", "def load_models(self, episode):\r\n self.actor.load_state_dict(self.target_actor.state_dict())\r\n self.critic.load_state_dict(self.target_critic.state_dict())\r\n hard_update(self.target_actor, self.actor)\r\n hard_update(self.target_critic, self.critic)", "def data_loader(\n self, batch_size: int = 1, iter_steps: int = 0, batch_as_list: bool = True\n ) -> DataLoader:\n data = self.data\n datasets = []\n\n for _, dat in data.items():\n datasets.append(dat.dataset())\n\n if len(datasets) < 1:\n raise FileNotFoundError(\n \"no datasets available for this model to create a loader from\"\n )\n\n return DataLoader(\n *datasets,\n batch_size=batch_size,\n iter_steps=iter_steps,\n batch_as_list=batch_as_list,\n )", "def load_model(self, filename):\r\n pass", "def load(self):\n print(\"==> Loading model from\", self.model_dir)\n self.model = tf.keras.models.load_model(self.model_dir)", "def model_load(prefix='sl',data_dir=None,training=True):\r\n\r\n if not data_dir:\r\n data_dir = os.path.join(\"..\",\"data\",\"cs-train\")\r\n \r\n models = [f for f in os.listdir(os.path.join(\".\",\"models\")) if re.search(\"sl\",f)]\r\n\r\n if len(models) == 0:\r\n raise Exception(\"Models with prefix '{}' cannot be found did you train?\".format(prefix))\r\n\r\n all_models = {}\r\n for model in models:\r\n all_models[re.split(\"-\",model)[1]] = joblib.load(os.path.join(\".\",\"models\",model))\r\n\r\n ## load data\r\n ts_data = fetch_ts(data_dir)\r\n all_data = {}\r\n for country, df in ts_data.items():\r\n X,y,dates = engineer_features(df,training=training)\r\n dates = np.array([str(d) for d in dates])\r\n all_data[country] = {\"X\":X,\"y\":y,\"dates\": dates}\r\n \r\n return(all_data, all_models)", "def importAll(self, imdata = True, imlights = True, imaovs = True, imshaders = True, immaster = True, asset = '', searchAndReplace = ['',''] ):\n\t\tif immaster:\n\t\t\tself.importMasterSettings()\n\t\tif imlights and self.lightPath.exists:\n\t\t\tself.importLights( asset, searchAndReplace )\n\t\tif imaovs and self.aovsPath.exists:\n\t\t\tself.importAovs()\n\t\tif imshaders and self.shaderPath.exists:\n\t\t\tself.importShaders()\n\t\tif imdata and self.dataPath.exists:\n\t\t\tself.importData( asset, searchAndReplace )", "def _init_train_loader(self):\n # Choose the right dataset type\n if self.config_args[\"num_members\"] > 1:\n class_dataset_wrapper = dataset_wrapper.MixMoDataset\n else:\n class_dataset_wrapper = dataset_wrapper.MSDADataset\n\n # Load augmentations\n self.traindatasetwrapper = class_dataset_wrapper(\n dataset=self.train_dataset,\n num_classes=int(self.config_args[\"data\"][\"num_classes\"]),\n num_members=self.config_args[\"num_members\"],\n dict_config=self.config_args[\"training\"][\"dataset_wrapper\"],\n properties=self.properties\n )\n\n # Build standard sampler\n _train_sampler = torch.utils.data.sampler.RandomSampler(\n data_source=self.traindatasetwrapper, ## only needed for its length\n num_samples=None,\n replacement=False,\n )\n\n # Wrap it with the repeating sampler used for multi-input models\n batch_sampler = batch_repetition_sampler.BatchRepetitionSampler(\n sampler=_train_sampler,\n batch_size=self.batch_size,\n num_members=self.config_args[\"num_members\"],\n drop_last=True,\n config_batch_sampler=self.config_args[\"training\"][\"batch_sampler\"]\n )\n\n self.train_loader = torch.utils.data.DataLoader(\n self.traindatasetwrapper,\n batch_sampler=batch_sampler,\n num_workers=self.num_workers,\n batch_size=1,\n shuffle=False,\n sampler=None,\n drop_last=False,\n pin_memory=True,\n )", "def load(self) -> None:\n # Load in centroids\n if (self._path_model / f\"{self}\").is_file():\n with open(self._path_model / str(self), 'r') as file:\n self._centroids = {k: np.asarray(v, dtype=np.float32) for k, v in json.load(file).items()}\n \n # Load in (validation) clusters\n if (self._path_data / f\"{self}-train\").is_file():\n with open(self._path_data / f\"{self}-train\", 'r') as file:\n self._clusters = json.load(file)\n if (self._path_data / f\"{self}-val\").is_file():\n with open(self._path_data / f\"{self}-val\", 'r') as file:\n self._clusters_val = json.load(file)", "def initialize(self):\n LOG.info(\"Initializing Model.\")\n self.model = self.convert(df=self.training_df)\n if self.bootstraps is not None:\n LOG.info(\"Bootstrapping Data.\")\n self.bootstrap_data()", "def load(self):\n utils.get_previous_weights_from_gdrive(self.config.model_folder)\n last_used_model = utils.get_latest_model_name(self.config.model_folder)\n self.model = load_model(last_used_model)\n self.model.summary()", "def init_models(self):\n from ron import Application\n from ron.models.basemodel import BaseModel\n if self.models == None or not Application().db:\n return\n models_namespace = self.__namespace + \".models\" # TODO: allow customize this\n try:\n models_package = import_module(models_namespace)\n except:\n models_package = None\n if models_package:\n models_modules = self._get_package_modules(models_package)\n for model_name in models_modules:\n imported_model = import_module('.' + model_name, package=models_namespace)\n for i in dir(imported_model):\n attribute = getattr(imported_model, i)\n if inspect.isclass(attribute) and issubclass(attribute, BaseModel):\n self.models.append(attribute)\n Application().db().database.create_tables(self.models)", "async def load_model(\n self,\n model_name: str,\n headers: dict[str, t.Any] = ...,\n config: str = ...,\n files: dict[str, str] = ...,\n ) -> None:", "def _load_data(self):\n if self._name in BALANCE_DATASET:\n _loader = dataset_loaders[self._name]\n xnp, y = _loader()\n\n # Train - Test split\n gen = ShuffleSplit(n_splits=1, random_state=42, test_size=self._test_size).split(xnp)\n train_idx, test_idx = next(gen)\n\n # Train data.\n self.xnp_tr = xnp[train_idx]\n self.y_tr = y[train_idx]\n # Test data.\n self.xnp_ts = xnp[test_idx]\n self.y_ts = y[test_idx]\n\n else:\n _loader = dataset_loaders[self._name]\n xnp, xp, y = _loader()\n # self.xnp, self.xp, self.y = _loader()\n\n # Train - Test split\n gen = ShuffleSplit(n_splits=1, random_state=42, test_size=self._test_size).split(xnp)\n train_idx, test_idx = next(gen)\n\n # Train data.\n self.xnp_tr = xnp[train_idx]\n self.xp_tr = xp[train_idx]\n self.y_tr = y[train_idx]\n # Test data.\n self.xnp_ts = xnp[test_idx]\n self.xp_ts = xp[test_idx]\n self.y_ts = y[test_idx]", "def load_data(model, i18n_model, i18n_dirname):\n position_num = []\n dic_keys = {}\n csv_separator = ':'\n re_num = re.compile('\\d+$')\n\n # Get only the fields name\n fields = [ x for x in model[2:] if not (':' in x or '#' in x) ]\n # Get left data.\n fields_number = len(fields)\n csv_file = model[0]\n model_name = model[1]\n print \"Adding data in %s.%s table\" % (i18n_dirname, model_name)\n # Load the class of the models file\n exec \"%s = getattr(i18n_model, model_name)\" % model_name\n\n # Get the position of numeric fields\n if ':N' in model:\n pos = model.index(':N')\n position_num = model[pos-1] # The field numeric is before of ':N'\n position_num = [ int(x) for x in position_num if not '#' in x ]\n\n # Info. about keys\n if ':K' in model:\n pos = model.index(':K')\n info_keys = model[pos-1]\n # Format-> :[position],[model name]:...\n info_keys = info_keys.split(':')[1:]\n keys = [ (int(x.split(',')[0]), x.split(',')[1]) for x in info_keys ]\n dic_keys = dict(keys)\n\n # To store the keys. Set to values null\n model_id = {}\n for x in dic_keys.keys():\n model_id.setdefault(x, None)\n\n # Convert from CSV to Django ORM\n reader = csv.reader(comment_stripper(\n open(csv_file)), delimiter=csv_separator)\n\n line_bool = [] # Lines where is enabled a boolean field.\n bool_found = False\n line_number = 0\n for csv_line in reader:\n #debug\n# if \\\n# model_name == \"Phone\" or \\\n# model_name == \"AddressFormat\":\n# model_name == \"Country\" or \\\n# model_name == \"CountryLanguage\" or \\\n# model_name == \"Language\" or \\\n# model_name == \"Subdivision\" or \\\n# model_name == \"TimeZone\" or\n# print \"\\tskip\"\n# break\n\n object_line = []\n key_line_s = []\n line_number += 1\n\n object_line.append(\"c%d = %s(\" % (line_number, model_name))\n\n for position in range(0, fields_number):\n field_text = csv_line[position]\n if field_text == 'True':\n if not bool_found:\n bool_field = fields[position]\n bool_found = True\n line_bool.append(line_number)\n elif field_text: # If is not empty\n key_line = []\n if object_line[-1][-1] != '(': # Check the last character\n object_line.append(', ')\n # If is a key\n if dic_keys and dic_keys.has_key(position):\n object_line.append('%s=key_id%d'\n % (fields[position], position))\n key_model = dic_keys.get(position)\n\n # Load the class of the foreigner model.\n try:\n eval(\"%s\" % key_model)\n except NameError:\n exec \"%s = getattr(i18n_model, key_model)\" %key_model\n\n if csv_line[position] != model_id.get(position):\n model_id[position] = csv_line[position]\n\n key_line.append('key_id%d = %s.objects.get(pk='\n % (position, key_model))\n if re_num.match(model_id.get(position)): # integer\n key_line.append('%d)' % model_id.get(position))\n else:\n key_line.append('\"%s\")' % model_id.get(position))\n\n key_line = ''.join(key_line)\n key_line_s.append(key_line)\n\n # If is an integer\n elif position in position_num:\n object_line.append('%s=%s' \\\n % (fields[position], csv_line[position]))\n # If is a string.\n else:\n object_line.append('%s=\"%s\"' \\\n % (fields[position], csv_line[position]))\n\n if key_line_s:\n for key in key_line_s:\n# print key #debug\n exec(key)\n\n object_line.append(\")\")\n load_object = ''.join(object_line)\n# print load_object #debug\n exec(load_object) # Load the object\n\n # At the end, save all objects together\n if model_name == 'Language':\n # Display the english language.\n for num in range(1, line_number+1):\n obj = eval(\"c%d\" % num)\n if obj.iso3_code == 'eng':\n obj.display = True\n obj.save()\n else:\n for num in range(1, line_number+1):\n obj = eval(\"c%d\" % num)\n if num in line_bool:\n exec(\"obj.%s = True\" % bool_field)\n try:\n \tobj.save()\n except:\n print \"Problem loading data. Entry will not be loaded.\"\n try:\n transaction.rollback()\n except:\n #Some databases were having trouble with the rollback\n pass", "def load_raw_data(apps, schema_editor):\n from season.import_raw_data import InitialDataProcessor\n matches_path = str(BASE_DIR) + '/season/migrations/matches.csv'\n deliveries_path = str(BASE_DIR) + '/season/migrations/deliveries.csv'\n # Initialization path to read data\n load_data = InitialDataProcessor(matches_path=matches_path, deliveries_path=deliveries_path)\n # transform data frame and save the data step by step\n # only support new season import for the first tym when data structure is ready to use\n load_data.transform_input_save()", "def test_model_loading(self):\n\n inputs = tf.placeholder(dtype=tf.int32, shape=[None, None])\n\n for pretrained_model_name in XLNetEncoder.available_checkpoints():\n encoder = XLNetEncoder(pretrained_model_name=pretrained_model_name)\n _ = encoder(inputs)", "def load_trainer(self):\n super().load_trainer()\n\n logging.info(\"[Server #%d] Loading a pre-trained model.\", os.getpid())\n self.trainer.load_model()", "def build_training_data_loader(self) -> DataLoader:\n pass", "def __init__(self, load_model_dir=None):\n \n if load_model_dir:\n raise RuntimeError('Whoops. Not implemented yet')\n \n ## Load pickeled preprocessing function (applied to raw features)\n ## Load pickeled postprocessing function (applied to labels before output)\n ## Load tf model", "def load_static():\n\n for i, row in enumerate(open(\"seed_data/homepage_feature.static\")):\n row = row.rstrip()\n title, body, img_path_xs, img_path_sm, img_path_md, img_path_lg, is_active = row.split(\"|\")\n homepage_feature = HomepageFeatureModel(title=title,\n body=body,\n img_path_xs=img_path_xs,\n img_path_sm=img_path_sm,\n img_path_md=img_path_md,\n img_path_lg=img_path_lg,\n is_active=is_active)\n db.session.add(homepage_feature)\n\n for i, row in enumerate(open(\"seed_data/help_article.static\")):\n row = row.rstrip()\n title, description, body = row.split(\"|\")\n help_article = HelpArticleModel(title=title, \n description=description, \n body=body)\n db.session.add(help_article)\n\n db.session.commit()", "def _load_model(self):\n self.model = tf.keras.experimental.load_from_saved_model(\n self.m_cfg['load_model'], custom_objects=self.custom_objects)\n\n ref = 1 if self.m_cfg['configs']['recursive'] else self.levels\n self.opt = [self._inst_optimizer() for _ in range(ref)]\n self.loss = Losses(self.m_cfg['configs']['loss']).value\n\n l_groups = np.split(np.array(self.model.layers), ref)\n self.vars = list(map(\n lambda g: list(chain(*map(lambda e: e.variables, g))), l_groups))", "def load_label(self, fixture_label):\n show_progress = self.verbosity >= 3\n for fixture_file, fixture_dir, fixture_name in self.find_fixtures(fixture_label):\n _, ser_fmt, cmp_fmt = self.parse_name(os.path.basename(fixture_file))\n open_method, mode = self.compression_formats[cmp_fmt]\n fixture = open_method(fixture_file, mode)\n try:\n self.fixture_count += 1\n objects_in_fixture = 0\n loaded_objects_in_fixture = 0\n if self.verbosity >= 2:\n self.stdout.write(\"Installing %s fixture '%s' from %s.\" %\n (ser_fmt, fixture_name, humanize(fixture_dir)))\n\n objects = serializers.deserialize(ser_fmt, fixture,\n using=self.using, ignorenonexistent=self.ignore)\n\n create_dict = OrderedDict()\n\n for object in objects:\n obj = object.object\n objects_in_fixture += 1\n model = obj.__class__\n if router.allow_migrate_model(self.using, model):\n self.models.add(model)\n if model in create_dict.keys():\n create_dict[model].append(obj)\n else:\n create_dict[model] = [obj]\n for model in create_dict.keys():\n objs = create_dict[model]\n loaded_objects_in_fixture += len(objs)\n try:\n model.objects.using(self.using).bulk_create(objs)\n if show_progress:\n self.stdout.write(\n '\\rProcessed %i object(s).' % loaded_objects_in_fixture,\n ending=''\n )\n except (DatabaseError, IntegrityError) as e:\n e.args = (\"Could not load %(app_label)s.%(object_name)s: %(error_msg)s\" % {\n 'app_label': model._meta.app_label,\n 'object_name': model._meta.object_name,\n 'error_msg': force_text(e)\n },)\n raise\n if objects and show_progress:\n self.stdout.write('') # add a newline after progress indicator\n self.loaded_object_count += loaded_objects_in_fixture\n self.fixture_object_count += objects_in_fixture\n except Exception as e:\n if not isinstance(e, CommandError):\n e.args = (\"Problem installing fixture '%s': %s\" % (fixture_file, e),)\n raise\n finally:\n fixture.close()\n\n # Warn if the fixture we loaded contains 0 objects.\n if objects_in_fixture == 0:\n warnings.warn(\n \"No fixture data found for '%s'. (File format may be \"\n \"invalid.)\" % fixture_name,\n RuntimeWarning\n )", "def ImportModelPart(self):\n KratosMultiphysics.Logger.PrintInfo(\"::[MechanicalSolver]::\", \"Importing model part.\")\n problem_path = os.getcwd()\n input_filename = self.settings[\"model_import_settings\"][\"input_filename\"].GetString()\n if self.is_restarted():\n self.get_restart_utility().LoadRestart()\n elif(self.settings[\"model_import_settings\"][\"input_type\"].GetString() == \"mdpa\"):\n # Import model part from mdpa file.\n KratosMultiphysics.Logger.PrintInfo(\"::[MechanicalSolver]::\", \"Reading model part from file: \" + os.path.join(problem_path, input_filename) + \".mdpa\")\n KratosMultiphysics.ModelPartIO(input_filename).ReadModelPart(self.main_model_part)\n KratosMultiphysics.Logger.PrintInfo(\"::[MechanicalSolver]::\", \"Finished reading model part from mdpa file.\")\n self.PrepareModelPartForSolver()\n else:\n raise Exception(\"Other model part input options are not yet implemented.\")\n KratosMultiphysics.Logger.PrintInfo(\"ModelPart\", self.main_model_part)\n KratosMultiphysics.Logger.PrintInfo(\"::[MechanicalSolver]:: \", \"Finished importing model part.\")", "def _load_test_data(self):\n self._save_test_data()", "def load_data():\n print(\"PARSING TRAIN\")\n ys_train, x_train, ids_train = load_pickle_data(\"ys_train\"), load_pickle_data(\"x_train\"), load_pickle_data(\n \"ids_train\")\n if ys_train is None or x_train is None or ids_train is None:\n ys_train, x_train, ids_train = load_csv_data(\"{}/train.csv\".format(DATA_DIR))\n dump_pickle_data(ys_train, \"ys_train\")\n dump_pickle_data(x_train, \"x_train\")\n dump_pickle_data(ids_train, \"ids_train\")\n\n print(\"PARSING TEST\")\n x_test, ids_test = load_pickle_data(\"x_test\"), load_pickle_data(\"ids_test\")\n if x_test is None or ids_test is None:\n _, x_test, ids_test = load_csv_data(\"{}/test.csv\".format(DATA_DIR))\n dump_pickle_data(x_test, \"x_test\")\n dump_pickle_data(ids_test, \"ids_test\")\n\n return ys_train, x_train, ids_train, x_test, ids_test", "def init_all_models(app):\n\n from ggrc.extensions import get_extension_modules\n\n # Usually importing the module is enough, but just in case, also invoke\n # ``init_models``\n init_models(app)\n for extension_module in get_extension_modules():\n ext_init_models = getattr(extension_module, 'init_models', None)\n if ext_init_models:\n ext_init_models(app)", "def load_all(self):\n if os.path.isfile(self.vocab_path):\n self.vocab_processor = self.load_vocab()\n else:\n self.vocab_processor = self.train_vocab()\n if self.data_path:\n self.x, self.y = self.load_data(self.need_shuffle)\n print(\"Max document length: {}\".format(self.max_doc))", "def import_module(dataset, model_dir='dataset_processing'):\n return getattr(\n __import__(model_dir, fromlist=[dataset]), dataset)", "def load_model(self, tmp_dir):\n pass", "def load(self):\n #self.df = read_file(\"../data/yelp_academic_dataset_user.json\") #Full Data.\n self.df = read_file(\"../data/user300.json\") #For local machine.\n #self.get_friend_list()\n #self.save_friend_nodes()", "def load(self):\n try:\n if self.model.is_cuda:\n self.model.load_state_dict(torch.load(os.path.join(self.save_path, \"save_point.pth\")))\n else:\n self.model.load_state_dict(torch.load(os.path.join(self.save_path, \\\n \"save_point.pth\"), map_location=\"cpu\"))\n except:\n sys.exit(\"Unable to load previous model\")", "async def load(self) -> None:\n pass", "def load(self):\n pass", "def load(self):\n pass", "def load(self):\n pass", "def load(self):\n pass", "def launch(self, force_prep=False):\n #TODO process upploaded csv\n assert self.ready()\n self.launch_progress = 0\n self.set_status(\"Gathering data\")\n if 'O' not in set(self.labels.dict.values()):\n self.add_labels({max(list(self.labels.dict.keys()))+1:'O'})\n\n processed_file_path = os.path.join(DATASETS_PATH, self.dataset_uuid, 'processed.csv')\n bert_file_path = os.path.join(DATASETS_PATH, self.dataset_uuid, 'processed.bert')\n elmo_file_path = os.path.join(DATASETS_PATH, self.dataset_uuid, 'processed.elmo')\n nlp_file_path = os.path.join(DATASETS_PATH, self.dataset_uuid, 'processed.nlp')\n sbert_file_path = os.path.join(DATASETS_PATH, self.dataset_uuid, 'processed.sbert')\n if os.path.exists(processed_file_path) and not force_prep:\n df = pd.read_csv(processed_file_path)\n if 'span_label' in df.columns:\n df['span_label']=df['span_label'].apply(eval)\n # Let's say loading the file is ~half the launch time\n # (if the file already exists)\n self.total = 2\n self.update(1)\n else:\n datafiles = [os.path.join(DATASETS_PATH, self.dataset_uuid, d) \\\n for d in os.listdir(os.path.join(DATASETS_PATH, self.dataset_uuid))]\n df = concat_dataset(datafiles)\n # expand total to account for time it takes to initialize the model\n self.total = len(df)*(1.1) \n self.set_status(\"Preprocessing data\")\n df = self.process_data(df, processed_file_path)\n\n # load list of 'allennlp.data.instance's. allennlp.data.instance can store true labels and tag info internally.\n if os.path.exists(nlp_file_path) and not force_prep:\n with open(nlp_file_path, 'rb') as f:\n sentences = pickle.load(f)\n else:\n #TODO define a universal reader for certain format\n # reader = RestaurantsDatasetReader()\n # data = reader.read(processed_file_path)\n #TODO handle when aux files do not exist\n pass\n bert_emb = np.load(bert_file_path, allow_pickle=True)\n elmo_emb = np.load(elmo_file_path, allow_pickle=True)\n sbert_emb = np.load(sbert_file_path, allow_pickle=True)\n for s, b, e, sb in zip(sentences, bert_emb, elmo_emb, sbert_emb):\n s.fields['bert'] = b\n s.fields['sbert'] = sb\n s.fields['elmo'] = e\n\n df['bert'] = bert_emb\n df['sbert'] = [sb for sb in sbert_emb]\n df['elmo'] = elmo_emb\n df['text_nlp'] = sentences\n\n columns_to_drop = list(\n set(df.columns).intersection(set(['span_label','file','label'])))\n df = df.drop(columns=columns_to_drop).reset_index()\n # since df['text_nlp'] contains true label info, drop 'labels' column.\n columns_to_drop = list(set(df.columns).difference(set(['index', 'Unnamed: 0', 'text', 'labels', 'split', 'bert', 'sbert',\n 'elmo', 'text_nlp'])))\n if len(columns_to_drop) > 0:\n df = df.drop(columns=columns_to_drop)\n df_train = df[df['split']=='train']\n df_dev = df[df['split'] == 'dev']\n df_valid = df[df['split'] == 'valid']\n df_test = df[df['split'] == 'test']\n\n self.text_inv_dict = dict(\n zip(list(df['text']),list(df.index))\n )\n\n # TODO split heldout set if necessary\n # for now, passing empty df as heldout set\n df_heldout = df_test\n\n self.emb_dict = Embeddings(df)\n\n self.set_status(\"Initializing modeler\")\n self.modeler = Modeler(df_train, df_dev, df_valid, df_test, df_heldout, self.labels, emb_dict=self.emb_dict)\n\n self.launch_progress = 1.0\n self.set_status(\"Finished\")\n return self.modeler", "def load_data(self):\n self.tif_file = self._find_tif_file()\n if self.with_labeling is not None:\n self.colabel_file = self._find_colabeled_file()\n self.colabel_stack = self._load_colabeled_img()\n self.dff, self.indices = self._populate_dff_data()\n self.loaded = True", "def load_model(self, folder_name):\n raise NotImplementedError()", "def get_each_loader(data_path, batch_size, trn_negnum, shuffle=True, num_workers=0):\n \n dataset = ML_Dataset(data_path, trn_negnum)\n \n if data_path.endswith('trn') == True:\n collate = dataset.train_collate\n else:\n collate = test_collate\n\n data_loader = data.DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n collate_fn=collate)\n\n return data_loader", "def load_data(self):\n raise NotImplementedError()", "def load(self, path):\n load_model(path, self)", "def load_model(self):\n self.opt.load_weights_folder = os.path.expanduser(self.opt.load_weights_folder)\n\n assert os.path.isdir(self.opt.load_weights_folder), \\\n \"Cannot find folder {}\".format(self.opt.load_weights_folder)\n print(\"loading model from folder {}\".format(self.opt.load_weights_folder))\n\n for model_name in [\"encoder\", \"decoder\"]:\n print(\"Loading {} weights...\".format(model_name))\n path = os.path.join(self.opt.load_weights_folder, \"{}.pth\".format(model_name))\n model_dict = self.encoder.state_dict() if model_name == \"encoder\" else self.decoder.state_dict()\n pretrained_dict = torch.load(path)\n pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}\n model_dict.update(pretrained_dict)\n if model_name == \"encoder\":\n self.encoder.load_state_dict(model_dict)\n else:\n self.decoder.load_state_dict(model_dict)\n\n # loading adam state\n optimizer_load_path = os.path.join(self.opt.load_weights_folder, \"adam.pth\")\n if os.path.isfile(optimizer_load_path):\n print(\"Loading Adam weights\")\n optimizer_dict = torch.load(optimizer_load_path)\n self.optimizer.load_state_dict(optimizer_dict)\n else:\n print(\"Cannot find Adam weights so Adam is randomly initialized\")", "def load_all_data_from_file(self) -> None:\n self.load_gene_data_from_file()\n self.load_ontology_from_file(ontology_type=DataType.GO, ontology_url=self.go_ontology_url,\n ontology_cache_path=self.go_ontology_cache_path,\n config=self.config)\n self.load_associations_from_file(associations_type=DataType.GO, associations_url=self.go_associations_url,\n associations_cache_path=self.go_associations_cache_path, config=self.config)\n self.load_ontology_from_file(ontology_type=DataType.DO, ontology_url=self.do_ontology_url,\n ontology_cache_path=self.do_ontology_cache_path, config=self.config)\n self.load_associations_from_file(associations_type=DataType.DO, associations_url=self.do_associations_url,\n associations_cache_path=self.do_associations_cache_path,\n association_additional_cache_path=self.do_associations_new_cache_path,\n association_additional_url=self.do_associations_new_url, config=self.config)\n self.load_ontology_from_file(ontology_type=DataType.EXPR, ontology_url=self.expression_ontology_url,\n ontology_cache_path=self.expression_ontology_cache_path, config=self.config)\n self.load_associations_from_file(associations_type=DataType.EXPR,\n associations_url=self.expression_associations_url,\n associations_cache_path=self.expression_associations_cache_path,\n config=self.config)\n self.load_orthology_from_file()\n self.load_expression_cluster_data()\n self.load_protein_domain_information()", "def import_data_model(directory):\n analyses = pd.read_excel(directory + 'analyses.xlsx')\n analytes = pd.read_excel(directory + 'analytes.xlsx')\n for index, analysis in analyses.iterrows():\n analyte_data = []\n analyte_names = analysis.analyte_keys.split(', ')\n for analyte_key in analyte_names:\n analyte_item = analytes.loc[analytes.key == analyte_key]\n analyte_data.append(analyte_item.to_dict(orient='records'))\n analyses.at[index, 'analytes'] = analyte_data \n analyses_data = analyses.to_dict(orient='records')\n for index, values in analyses_data.iterrows():\n doc_id = str(values.key)\n doc_data = values.to_dict()\n ref = ''\n update_document(ref, doc_data)\n # doc_data = data.to_dict(orient='index')\n # data_ref = create_reference(db, ref)\n # data_ref.document(doc_id).set(doc_data, merge=True)\n # data_ref.set(doc_data, merge=True)\n\n return NotImplementedError", "def load_data_to_db(self, path):\n table_names = ['train_transaction', 'train_identity', 'test_transaction', 'test_identity']\n for table_name in table_names:\n pat = self.TRANSACTION_NON_NUMBER_PATTERN if 'transaction' in table_name else self.IDENTITY_NON_NUMBER_PATTERN\n print(\"Loading table: \" + table_name)\n fn = os.path.join(path, table_name + '.csv')\n self.dbinstance.build_table_from_csv(fn, pat, table_name)\n print(\"Loaded table \" + table_name)", "def load_data(ctx, klass=None):\n if klass:\n if klass and not klass.startswith(\"public_data.models\"):\n klass = f\"public_data.models.{klass}\"\n options = {\"class\": klass}\n connecter = ScalingoInterface(ctx.obj)\n connecter.manage_py(\"load_data\", **options)", "def load(dirpath):\n\n batch = Pickler.load(join(dirpath, 'batch.pkl'))\n\n # load annotator\n if exists(join(dirpath, 'annotation.json')):\n annotator = Annotation.load(dirpath)\n batch.annotator = annotator\n\n return batch", "def _load_data(self):\n self.mapper = Mapper()\n self.mapper.generate_vocabulary(self.review_summary_file)\n self.X_fwd, self.X_bwd, self.Y = self.mapper.get_tensor(reverseflag=True)\n # Store all the mapper values in a dict for later recovery\n self.mapper_dict = dict()\n self.mapper_dict['seq_length'] = self.mapper.get_seq_length()\n self.mapper_dict['vocab_size'] = self.mapper.get_vocabulary_size()\n self.mapper_dict['rev_map'] = self.mapper.get_reverse_map()\n # Split into test and train data\n self._split_train_tst()" ]
[ "0.7716706", "0.6954782", "0.6742698", "0.67201203", "0.6513403", "0.64677274", "0.64014745", "0.6369573", "0.6368797", "0.63448185", "0.633244", "0.6321181", "0.62659895", "0.6258032", "0.6243625", "0.6167777", "0.6158548", "0.6115253", "0.608663", "0.608182", "0.6074489", "0.6064093", "0.6046113", "0.6038184", "0.602964", "0.6023165", "0.59820455", "0.59791785", "0.5966759", "0.5966759", "0.59466356", "0.59404016", "0.5933369", "0.59170157", "0.5916506", "0.5915851", "0.58971727", "0.589235", "0.5889995", "0.58842236", "0.5878943", "0.58775145", "0.5875041", "0.5863987", "0.5833737", "0.5823342", "0.5811982", "0.58095014", "0.5804433", "0.5800179", "0.5783392", "0.5774168", "0.5768989", "0.57172596", "0.5715631", "0.56950706", "0.56871426", "0.56864333", "0.5685239", "0.56738085", "0.5666147", "0.56547", "0.5644036", "0.5641222", "0.56352794", "0.56323296", "0.56304955", "0.562074", "0.56088144", "0.56040555", "0.56003976", "0.5595491", "0.5588895", "0.55851", "0.5583495", "0.5582391", "0.55801964", "0.5575417", "0.55747", "0.5571575", "0.5569617", "0.55659467", "0.5564869", "0.5555497", "0.5555497", "0.5555497", "0.5555497", "0.5553746", "0.55524725", "0.5546364", "0.553478", "0.5527498", "0.55266404", "0.55262333", "0.5525401", "0.5524879", "0.55157936", "0.5514243", "0.551341", "0.5512399" ]
0.7674908
1
Return the class type to be used for a given event name.
def type_for(data): switcher = { # Startup "FileHeader": models.FileHeader, "ClearSavedGame": models.ClearSavedGame, "NewCommander": models.NewCommander, "LoadGame": models.LoadGame, "Progress": models.Progress, "Rank": models.Rank, # Travel "Docked": models.Docked, "DockingCancelled": models.DockingCancelled, "DockingDenied": models.DockingDenied, "DockingGranted": models.DockingGranted, "DockingRequested": models.DockingRequested, "DockingTimeout": models.DockingTimeout, "FSDJump": models.FSDJump, "Liftoff": models.Liftoff, "Location": models.Location, "SupercruiseEntry": models.SupercruiseEntry, "SupercruiseExit": models.SupercruiseExit, "Touchdown": models.Touchdown, "Undocked": models.Undocked, # Combat "Bounty": models.Bounty, "CapShipBond": models.CapShipBond, "Died": models.Died, "EscapeInterdiction": models.EscapeInterdiction, "FactionKillBond": models.FactionKillBond, "HeatDamage": models.HeatDamage, "HeatWarning": models.HeatWarning, "HullDamage": models.HullDamage, "Interdicted": models.Interdicted, "Interdiction": models.Interdiction, "PVPKill": models.PVPKill, "ShieldState": models.ShieldState, # Exploration "Scan": models.Scan, "MaterialCollected": models.MaterialCollected, "MaterialDiscarded": models.MaterialDiscarded, "MaterialDiscovered": models.MaterialDiscovered, "BuyExplorationData": models.BuyExplorationData, "SellExplorationData": models.SellExplorationData, "Screenshot": models.Screenshot, # Trade "BuyTradeData": models.BuyTradeData, "CollectCargo": models.CollectCargo, "EjectCargo": models.EjectCargo, "MarketBuy": models.MarketBuy, "MarketSell": models.MarketSell, "MiningRefined": models.MiningRefined, # Station Services "BuyAmmo": models.BuyAmmo, "BuyDrones": models.BuyDrones, "CommunityGoalDiscard": models.CommunityGoalDiscard, "CommunityGoalJoin": models.CommunityGoalJoin, "CommunityGoalReward": models.CommunityGoalReward, "CrewAssign": models.CrewAssign, "CrewFire": models.CrewFire, "CrewHire": models.CrewHire, "EngineerApply": models.EngineerApply, "EngineerCraft": models.EngineerCraft, "EngineerProgress": models.EngineerProgress, "FetchRemoteModule": models.FetchRemoteModule, "MassModuleStore": models.MassModuleStore, "MissionAbandoned": models.MissionAbandoned, "MissionAccepted": models.MissionAccepted, "MissionCompleted": models.MissionCompleted, "MissionFailed": models.MissionFailed, "ModuleBuy": models.ModuleBuy, "ModuleRetrieve": models.ModuleRetrieve, "ModuleSell": models.ModuleSell, "ModuleSellRemote": models.ModuleSellRemote, "ModuleStore": models.ModuleStore, "ModuleSwap": models.ModuleSwap, "PayFines": models.PayFines, "PayLegacyFines": models.PayLegacyFines, "RedeemVoucher": models.RedeemVoucher, "RefuelAll": models.RefuelAll, "RefuelPartial": models.RefuelPartial, "Repair": models.Repair, "RepairAll": models.RepairAll, "RestockVehicle": models.RestockVehicle, "ScientificResearch": models.ScientificResearch, "SellDrones": models.SellDrones, "ShipyardBuy": models.ShipyardBuy, "ShipyardNew": models.ShipyardNew, "ShipyardSell": models.ShipyardSell, "ShipyardTransfer": models.ShipyardTransfer, "ShipyardSwap": models.ShipyardSwap, # Powerplay "PowerplayCollect": models.PowerplayCollect, "PowerplayDefect": models.PowerplayDefect, "PowerplayDeliver": models.PowerplayDeliver, "PowerplayFastTrack": models.PowerplayFastTrack, "PowerplayJoin": models.PowerplayJoin, "PowerplayLeave": models.PowerplayLeave, "PowerplaySalary": models.PowerplaySalary, "PowerplayVote": models.PowerplayVote, "PowerplayVoucher": models.PowerplayVoucher, # Other Events "ApproachSettlement": models.ApproachSettlement, "CockpitBreached": models.CockpitBreached, "CommitCrime": models.CommitCrime, "Continued": models.Continued, "DatalinkScan": models.DatalinkScan, "DatalinkVoucher": models.DatalinkVoucher, "DataScanned": models.DataScanned, "DockFighter": models.DockFighter, "DockSRV": models.DockSRV, "FuelScoop": models.FuelScoop, "JetConeBoost": models.JetConeBoost, "JetConeDamage": models.JetConeDamage, "LaunchFighter": models.LaunchFighter, "LaunchSRV": models.LaunchSRV, "Promotion": models.Promotion, "RebootRepair": models.RebootRepair, "ReceiveText": models.ReceiveText, "Resurrect": models.Resurrect, "SelfDestruct": models.SelfDestruct, "SendText": models.SendText, "Synthesis": models.Synthesis, "USSDrop": models.USSDrop, "VehicleSwitch": models.VehicleSwitch, "WingAdd": models.WingAdd, "WingJoin": models.WingJoin, "WingLeave": models.WingLeave, } return switcher.get(data["event"], models.BaseModel)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_event_class_by_type(type):\n event_module = importlib.import_module('.'.join(type.split('.')[:-1]))\n return getattr(event_module, type.split('.')[-1])", "def _get_event_type(event):\n return event.type()", "def _get_event_type(event):\n return event.type()", "def class_name(name: str) -> str:\n return text.pascal_case(utils.safe_snake(name, \"type\"))", "def get_type(self):\n return self.event_type", "def event_type(self) -> str:\n try:\n return _EVENT_TYPE_MAPPINGS.get(self.transaction_event_code[0:3])\n except:\n return None", "def event_type_name(self, event_type):\n return irfman.IrfManager.event_type_names[event_type]", "def _get_classname(cls):\n return cls.__name__", "def get_event_type(self, raw_type):\n try:\n return self.codes['types'][raw_type]\n except KeyError:\n raise UnknownEventType(\"We don't know this event type\")", "def clsname(c):\n return c.__class__.__name__.lower()", "def get_class(self, name):\n return self.host.get_class(name)", "def find_class(self, class_name: str) -> Type:\n pass", "def getClassName(self):\n n = type(self).__name__\n return n", "def type(self) -> str:\n return self._event.get('type')", "def getEventType(event):\n\tmatch = reEventType.search(event)\n\tif match:\n\t\treturn match.group(0).split(\" \")[1].rstrip(\"\\r\")\n\telse:\n\t\treturn None\n\t\treturn None", "def type(cls):\n return cls.__name__", "def get_handle_class(handle_class_name: str) -> Type[\"Handle\"]:\n klass = get_type_registry().parse_type_name(handle_class_name)\n return klass", "def get_class(self, name):\n raise NotImplementedError", "def classname(cls):\n return cls.__name__.lower()", "def get_event_type(self, event_payload: dict):\n return event_payload[METASTOCK_EVENT_TYPE_KEY]", "def className(self):\n namevalue = self.__class__.__name__\n return str(namevalue)", "def Event(name):\n c = new_class(name, bases=(_Event,))(name)\n return c", "def name_to_type(self, name):\n return self.CUSTOM_PREFIX + name", "def f_get_class_name(self):\n return self.__class__.__name__", "def _find_class(self, class_name: str) -> Type:\n return self.class_resolver.find_class(class_name)", "def getclassname(instance_or_cls):\n return getclass(instance_or_cls).__name__", "def event_type(self) -> int:\n return self.data[\"args\"][\"eventType\"]", "def class_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"class_name\")", "def _type_name(cls, manual_name):\r\n cf_name = ''\r\n if manual_name:\r\n cf_name = manual_name.lower()\r\n else:\r\n camelcase = re.compile(r'([a-z])([A-Z])')\r\n ccase = lambda s: camelcase.sub(lambda v: '{}_{}'.format(v.group(1), v.group(2).lower()), s)\r\n \r\n cf_name += ccase(cls.__name__)\r\n cf_name = cf_name.lower()\r\n if cls.__use_module_name__:\r\n cf_name = cls.__module__ + '_{}'.format(cf_name)\r\n return cf_name", "def getClassName(self):\n return signal_base_get_class_name(self.obj)", "def type(name):", "def get_class_name(name):\n name = _strip_class_name(name)\n return convert_to_camel_case(name)", "def _get_class(self, name):\n return self._hw_mm.namespaces[\"hw_devices\"][name]", "def class_name(self) -> str:\n return pulumi.get(self, \"class_name\")", "def select_event_type(self, which):\n if which is None or which=='all': return None\n etnames = irfman.IrfManager.event_type_names\n try:\n if type(which)==str:\n which = which.lower()\n return etnames.index(which)\n t = etnames[which]\n return which\n except Exception as msg:\n print ('Bad event type, \"%s\": %s\\nMust be one of %s or a valid index' % (which, msg, etnames))\n raise", "def type_name(self):\n return self.TYPE_NAMES[self.type]", "def class_name(self) -> str:\n return self.__class__.__name__", "def get_class(self, name: str) -> Type:\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug(f'new instance of {name}')\n name = self.default_name if name is None else name\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug(f'creating instance of {name}')\n class_name, params = self._class_name_params(name)\n return self._find_class(class_name)", "def get_python_classname(raw_classname):\n class_name = raw_classname.replace(\" \",\"\")\n class_name = class_name.replace(\"-\",\"\")\n return class_name", "def class_name(cls):\n return cls.__name__", "def class_name(cls):\n return cls.__name__", "def type_name(attr_type: AttrType) -> str:\n return attr_type.native_name or class_name(attr_type.name)", "def get_attribute_class(self, name):\n self.validate_attribute_name(name)\n return self.schema[name].get_attribute_class()", "def get_class(self, class_name):\n try:\n return self._classes[class_name]\n except KeyError:\n raise NameError", "def get_class_name(self):\n\n if \"class\" in self._root.attrib:\n return self._root.attrib['class']\n else:\n return self._root.tag", "def get_type(self, type_name):\n return type_cache.get_type_cache().get_type(type_name, self.target)", "def type(self):\r\n return self.__class__.__name__", "def event_type(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['FilterEventTypeItem']]]]:\n return pulumi.get(self, \"event_type\")", "def getClass(strname):\n \n modulename, classname = strname.split('.')\n classname = classname.split('(')[0]\n if hasattr(Analysis,modulename):\n module_ = getattr(Analysis,modulename)\n class_ = getattr(module_,classname)\n else:\n module_ = getattr(Summary,modulename)\n class_ = getattr(module_,classname)\n \n return class_", "def get_type_by_name(self, name):\n raise NotImplementedError()", "def get_class_base_name(name):\n if name is not None:\n return get_class_name(name)\n else:\n return 'object'", "def exception_class(self, exception):\n\n\t\tcls = type(exception)\n\t\tif cls.__module__ == 'exceptions': # Built-in exception.\n\t\t\treturn cls.__name__\n\t\treturn \"%s.%s\" % (cls.__module__, cls.__name__)", "def str_to_class(referance_name):\n return getattr(sys.modules[__name__], referance_name)", "def classname(class_object):\n return class_object.__class__.__name__", "def get_type(self, name):\n pkg_name = name.split('.')[0]\n type_name = name.split('.')[1]\n for t in self.types:\n if t.package.name == pkg_name and t.name == type_name:\n return t\n return None", "def get_attribute_class(self, attr_name):\n return self.attrs.get_attribute_class(attr_name)", "def _make_class_name(name):\n return name[0].upper() + name[1:] + \"Ufunc\"", "def type_name(self):\n return self._type_name", "def type_name(self):\n return self.TYPE_NAMES.get(self.type, \"Unknown\")", "def get_class(self, name):\n if name in self._objects_mapping:\n classname = self._objects_mapping[name]\n\n klass = None\n try:\n klass = getattr(self._sdk, classname)\n except:\n Printer.raise_error('Unknown class %s' % classname)\n\n return klass\n\n Printer.raise_error('Unknown object named %s' % name)", "def get_type_from_string(cls_path: str) -> Type:\n module_name, class_name = cls_path.rsplit(\".\", 1)\n return getattr(import_module(module_name), class_name)", "def get_message_class_by_type(msgtype):\n\n try:\n module = importlib.import_module('platypush.message.' + msgtype)\n except ImportError as e:\n logging.warning('Unsupported message type {}'.format(msgtype))\n raise RuntimeError(e)\n\n cls_name = msgtype[0].upper() + msgtype[1:]\n\n try:\n msgclass = getattr(module, cls_name)\n except AttributeError as e:\n logging.warning('No such class in {}: {}'.format(\n module.__name__, cls_name))\n raise RuntimeError(e)\n\n return msgclass", "def getTypeCode(self):\n return _libsbml.Event_getTypeCode(self)", "def get_class(self):\n\t\treturn self.CLASS", "def get_class(self):\n return devices.get_class(self.type)", "def find_name(self):\r\n\r\n return self.__class__.__name__", "def get_typecode(self, name):\n return self.codes['type_codes'][name]", "def _get_frame_class(frame):\n if isinstance(frame, str):\n frame_names = frame_transform_graph.get_names()\n if frame not in frame_names:\n raise ValueError(\n f'Coordinate frame name \"{frame}\" is not a known '\n f\"coordinate frame ({sorted(frame_names)})\"\n )\n frame_cls = frame_transform_graph.lookup_name(frame)\n\n elif isinstance(frame, type) and issubclass(frame, BaseCoordinateFrame):\n frame_cls = frame\n\n else:\n raise ValueError(\n \"Coordinate frame must be a frame name or frame class, not a\"\n f\" '{frame.__class__.__name__}'\"\n )\n\n return frame_cls", "def typeof(inst):\n return type(inst).__name__", "def _get_kind(cls):\n return cls.__name__", "def guess_type(object):\n # retrieve a list of classes\n classes = (\n re.match(\"<class '(.+)'>\", str(object.__class__)).groups()[0].split(\".\")\n )\n # Return the most specific one\n return classes[-1]", "def get_class_name(o, lower=False):\n if not isinstance(o, type):\n o = o.__class__\n if lower:\n return o.__name__.lower()\n else:\n return o.__name__", "def name(self) -> str:\n return self.class_names[self.class_num]", "def get_class_name(obj) -> str:\n return obj.__class__.__name__", "def get_class_functional_name(name):\n name = _strip_class_name(name)\n return name", "def instance_class_name(instance):\n return class_name(instance.__class__)", "def instance_class_name(instance):\n return class_name(instance.__class__)", "def type_name(self):\n # TODO(peria): Replace with exceptions.NotImplementedError() after shipping.\n assert 'type_name() is not implemented for class %s' % (type(self))", "def typ(rxn_class):\n return rxn_class[0]", "def get_cls_name(obj: Any, package_name: bool = True) -> str:\n cls_name = str(obj.__class__)\n # remove class prefix\n cls_name = cls_name.split('\\'')[1]\n # split modules\n cls_split = cls_name.split('.')\n if len(cls_split) > 1:\n cls_name = cls_split[0] + '.' + cls_split[-1] if package_name else cls_split[-1]\n else:\n cls_name = cls_split[0]\n return cls_name", "def name_to_label(self, name):\n\t\treturn self.classes[name]", "def C(classname):\n return objc.objc_getClass(_utf8(classname))", "def get_name(name, class_name):\n if name:\n return name\n if not class_name:\n raise MLRunInvalidArgumentError(\"name or class_name must be provided\")\n if isinstance(class_name, type):\n return class_name.__name__\n return class_name", "def get_class_file_name(name):\n name = _strip_class_name(name)\n return name + FILE_EXTENSION", "def name_to_label(self, name):\n return self.classes[name]", "def name_to_label(self, name):\n\t\t\treturn self.classes[name]", "def type(self):\n if \"type\" in self._prop_dict:\n if isinstance(self._prop_dict[\"type\"], OneDriveObjectBase):\n return self._prop_dict[\"type\"]\n else :\n self._prop_dict[\"type\"] = EventType(self._prop_dict[\"type\"])\n return self._prop_dict[\"type\"]\n\n return None", "def determine_file_type(self, name):\n for type_key, type_name, type_test in self.filetypes:\n if type_test(name):\n return type_key\n return self.filetypes[-1][0]", "def _declaring_class(obj):\n name = _qualname(obj)\n return name[:name.rfind('.')]", "def getWidgetClassName(self, tagName):\n if tagName == \"labelframe\":\n className = \"TkLabelFrame\"\n elif tagName == \"optionmenu\":\n className = \"TkOptionMenu\"\n elif tagName == \"toplevel\":\n className = \"TkToplevel\"\n else:\n className = \"Tk\" + tagName.capitalize()\n\n return className", "def get_subclass_from_name(cls, name):\n\n for subclass in cls.all_named_subclasses():\n if subclass.UI_NAME == name:\n return subclass\n\n return None", "def get_type_name(type):\n name = type.name\n if type.is_simple:\n return _get_simple_type_mapping(name)\n elif type.is_enum:\n return _get_simple_type_mapping('str')\n elif type.is_complex:\n return get_class_name(name)", "def get_class(name):\n try:\n cls, constructor = registry[name]\n except KeyError:\n raise UnregisteredClassError(\"'%s' is not a registered \"\n \"JSONAlizable class name\" % name, name)\n if constructor is not None:\n return constructor\n return cls", "def _get_basetype(self, event_arg: dict) -> Tuple[str, dict]:\n arg_type = event_arg['type']\n if arg_type in _base_types:\n return (arg_type, None)\n # enum\n if '::' in arg_type:\n enum_type = arg_type\n else:\n enum_type = self.namespace() + '::' + arg_type\n enum = self._event_extras['enums'][enum_type]\n return (enum['type'], enum)", "def _class(self):\n return self.__class", "def name_to_type(name: str):\n return GCPNodeType(name.split(\"-\")[-1])", "def type_name(self) -> str:\n return self.head.__class__.__name__", "def type_name_to_type(name):\n if name in SIMPLE_TYPES:\n return SIMPLE_TYPES[name]\n elif name in PROXY_TYPES:\n return PROXY_TYPES[name]\n return None", "def device_class(self):\n return self.sensor_type[\"class\"]", "def identify_class(self, cls):", "def parse_dispatch_type(dispatch_string: str):\n if not dispatch_string:\n return None\n\n dispatch_string = dispatch_string.lower().strip()\n\n if dispatch_string == \"load\":\n return DispatchType.LOAD\n\n if dispatch_string == \"generating\":\n return DispatchType.GENERATOR\n\n if dispatch_string == \"generator\":\n return DispatchType.GENERATOR\n\n raise Exception(\"Unknown dispatch type: {}\".format(dispatch_string))" ]
[ "0.7802693", "0.70439726", "0.70439726", "0.6722869", "0.6596402", "0.65878123", "0.65594405", "0.65270084", "0.645151", "0.64356005", "0.64065075", "0.6406168", "0.6361024", "0.63387644", "0.6334703", "0.62997335", "0.6297802", "0.6282378", "0.625796", "0.6205886", "0.61820716", "0.61627823", "0.6153638", "0.61332405", "0.61171585", "0.60715765", "0.6065666", "0.60650027", "0.6064955", "0.6059583", "0.602151", "0.6013818", "0.6012643", "0.601128", "0.600833", "0.5997745", "0.59640425", "0.59399486", "0.59392786", "0.5930554", "0.5930554", "0.59146833", "0.5906721", "0.5904402", "0.59034604", "0.5891652", "0.5889303", "0.5879886", "0.5878332", "0.5874089", "0.5869396", "0.58559704", "0.58547324", "0.5834856", "0.58308727", "0.5828099", "0.5812461", "0.58057755", "0.57977694", "0.5797635", "0.57927746", "0.57776135", "0.5733759", "0.57075626", "0.57068735", "0.5699394", "0.5681236", "0.5660051", "0.56555265", "0.56511724", "0.564995", "0.56291115", "0.5623439", "0.5622625", "0.56182337", "0.56159365", "0.56159365", "0.5608342", "0.5598947", "0.55935335", "0.558199", "0.5551427", "0.555141", "0.55318147", "0.5506132", "0.5494491", "0.5486138", "0.5482307", "0.54807407", "0.5473278", "0.5471169", "0.54674774", "0.54649764", "0.5457716", "0.545494", "0.5454496", "0.54424965", "0.54327315", "0.54263884", "0.5425586", "0.54171014" ]
0.0
-1
Force cancellations when owners of predictions are bankrupt
def admin_bankruptcy(self, with_report=False): name = self.client.srandmember(self._NAMES) self.admin_bankruptcy_forced_cancellation(name=name, with_report=with_report) self.admin_bankruptcy_notification(name=name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cancel_dummy(self):\n if self.state != 'authorized':\n self.raise_user_error('cancel_only_authorized')\n else:\n self.state = 'cancel'\n self.save()", "def cancel():", "def cancel(self):", "def cancel(self):", "def cancel(self):", "def do_uncancel(self):\r\n self.write({'cancelled': False})", "def admin_bankruptcy_forced_cancellation(self, name, with_report):\n discards = list()\n for delay in self.DELAYS:\n write_keys = self._get_sample_owners(name=name, delay=delay)\n leaderboard = self.get_leaderboard(granularity=LeaderboardGranularity.name_and_delay, count=10000,\n name=name, delay=delay, readable=False)\n losers = [key for key in write_keys if\n (self.shash(key) in leaderboard) and (leaderboard[self.shash(key)] < -1.0)]\n\n for write_key in losers:\n if self.bankrupt(write_key=write_key):\n code = self.shash(write_key)\n memo = Memo(activity=Activity.cancel, context=ActivityContext.bankruptcy, name=name, code=code,\n write_key=write_key, message='Initiating cancellation of submissions due to bankruptcy')\n self.add_memo_as_owner_confirm(memo=memo)\n self.cancel(name=name, write_key=write_key, delay=delay)\n discards.append((name, write_key))\n report_data = dict(discards)\n report_memo = Memo(activity=Activity.daemon, context=ActivityContext.bankruptcy, count=len(report_data), data=report_data)\n self.add_memo_as_system_confirm(memo=report_memo,private_actor=PrivateActor.bankruptcy_daemon)\n return len(discards) if not with_report else report_memo.as_dict()", "def do_cancel(self):\r\n self.write({'cancelled': True})", "def cancel(self):\n pass", "def cancel(self):\n self.cancelled = True", "def cancel(self):\n self.cancelled = True", "async def cancel_shielded_checkpoint(cls) -> None:\n with cls.create_cancel_scope(shield=True):\n await cls.sleep(0)", "def cancel(self):\n if not self.is_cancelled:\n self.will_change_value_for('is_cancelled')\n self.cancelled = True\n # remove our dependencies so that we're ready, properly behaved operations\n # will honor the cancel flag\n self.dependencies.clear()\n self.did_change_value_for('is_cancelled')\n \n if not self.is_executing and not self.is_finished:\n with self.changing('is_finished'):\n self.finished = True", "def onDealCanceled(argsList):\r\n\tCyInterface().setDirty(InterfaceDirtyBits.Score_DIRTY_BIT, True)", "def cancel(self):\n self.cancelled.set()", "def cancel(self):\n self.succeeded = False\n self.reject()", "def cancel(self):\n self.succeeded = False\n self.reject()", "def cancel(self):\n self.succeeded = False\n self.reject()", "def cancel(self):\n self.succeeded = False\n self.reject()", "def case_cancel(self, cr, uid, ids, context=None):\n res = super(crm_lead, self).case_cancel(cr, uid, ids, context=context)\n self.write(cr, uid, ids, {'probability' : 0.0}, context=context)\n return res", "def landlord_button_cancel_tenancy(self):\n for record in self:\n self.write(\n {'state': 'cancelled', 'tenancy_cancelled': True})\n rent_ids = self.env['tenancy.rent.schedule'].search(\n [('tenancy_id', '=', record.id),\n ('paid', '=', False),\n ('move_check', '=', False)])\n for value in rent_ids:\n value.write({'is_readonly': True})\n return True", "def cancel_workers(self):\n pass", "def canCancel(self) -> bool:\n ...", "def canCancel(self) -> bool:\n ...", "def canCancel(self) -> bool:\n ...", "def canCancel(self) -> bool:\n ...", "def cancel_all():\n\twhile _running:\n\t\t_running[0].cancel(noerror=True)", "def cancel(self):\n self.on_cancel()", "def cancel_callback(self):\n pass", "def cancel(self):\n self.__canceled = True", "def test_dont_cancel_for_already_cancelled(self, mock_tz):\n mock_tz.now.return_value = datetime(\n 2015, 2, 10, 10, tzinfo=dt_timezone.utc\n )\n self.unpaid.status = 'CANCELLED'\n self.unpaid.save()\n self.assertEqual(\n self.unpaid.status, 'CANCELLED', self.unpaid.status\n )\n management.call_command('cancel_unpaid_bookings')\n # emails are sent to user per cancelled booking and studio once\n # for all cancelled bookings\n unpaid_booking = Booking.objects.get(id=self.unpaid.id)\n self.assertEqual(len(mail.outbox), 0)\n self.assertEqual(\n unpaid_booking.status, 'CANCELLED', unpaid_booking.status\n )\n\n # auto_cancelled set to True only on cancelled bookings\n self.assertFalse(unpaid_booking.auto_cancelled)\n self.assertFalse(self.paid.auto_cancelled)", "def mark_cancelled(self):\n self.status = STATUS_CANCELED", "def on_cancel(self):\n self.state = CANCELED\n self._reject()", "def cancel(self):\n self.is_active = False\n self.save()", "def cancel(self):\n self.is_active = False\n self.save()", "def on_cancel(self) -> None:\n pass", "def on_cancel(self) -> None:\n pass", "def canceled(self):\n self.reject()", "def abort(self):\n try:\n self.acqRunning = False\n except:\n print('Cannot abort properly')", "def cancel_inner():\n kernel32.SetEvent(cancel_event)", "def cancel(self):\n GameLoop.getInstance()._cancelation_token = True", "def cancel(self) -> asyncio.Future:\n pass # pragma: no cover", "def canceled(self):\n return", "def cancel_run(self, run_id):\n raise NotImplementedError()", "def cancel(self): #$NON-NLS-1$\r", "def cancel(self):\n raise NotImplementedError(\n u\"%s: Method not implemented\", self.__class__.__name__)", "def cancel(self):\n self._logger.warning(\"Comparison run being cancelled.\")\n self._msg_logger.warning(run_analysis_view._text[\"log10\"])\n if hasattr(self, \"_off_thread\"):\n self._off_thread.cancel()", "def cancelUnblockVis(self):\n if self.__nextSetZoneDoneEvent is not None:\n self.ignore(self.__nextSetZoneDoneEvent)\n self.__nextSetZoneDoneEvent = None", "def accept_cancel(self):\n self.ok = False\n self.destroy()", "async def cancel_shielded_checkpoint() -> None:\n await get_async_backend().cancel_shielded_checkpoint()", "def hook_cancel_assistance(self, data):\n request_id = data[\"request_id\"]\n assignee_chat_id = data[\"volunteer\"]\n log.info(\"CANCEL req:%s\", request_id)\n self.send_message(assignee_chat_id, c.MSG_REQUEST_CANCELED)\n\n self.updater.dispatcher.user_data[assignee_chat_id].update(\n {\"current_request\": None, \"reviewed_request\": None, \"state\": c.State.AVAILABLE}\n )\n del self.updater.dispatcher.bot_data[request_id]\n self.updater.dispatcher.update_persistence()", "def check_auto_reject(self):\r\n for pr in self:\r\n if not pr.line_ids.filtered(lambda l: l.cancelled is False):\r\n pr.write({'state': 'rejected'})", "async def checkpoint_if_cancelled(cls) -> None:\n if cls.current_effective_deadline() == -math.inf:\n await cls.checkpoint()", "def cancel_initialization(self):", "def cancel_operation(self):\n # <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>\n self.proceed = False\n self.entry_view.destroy()", "def cancel(self):\n self.blackened = self.blackened_history[-1]\n self.blackened_history.pop()\n if self.victory:\n self.victory = False\n self.blackened_history_size -= 1", "def test_dont_cancel_for_already_cancelled(self, mock_tz):\n mock_tz.now.return_value = datetime(\n 2015, 2, 11, 10, tzinfo=dt_timezone.utc\n )\n self.unpaid.cancelled = True\n self.unpaid.save()\n\n management.call_command('cancel_unpaid_ticket_bookings')\n # emails are sent to user per cancelled booking and studio once\n # for all cancelled bookings\n self.unpaid.refresh_from_db()\n self.assertEqual(len(mail.outbox), 0)\n self.assertTrue(self.unpaid.cancelled)", "async def canceltorment(self, ctx):\r\n\t\t\r\n\t\tchannel = ctx.message.channel\r\n\t\tauthor = ctx.message.author\r\n\t\tserver = ctx.message.guild\r\n\r\n\t\t# Only allow owner to change server stats\r\n\t\tisOwner = self.settings.isOwner(ctx.author)\r\n\t\tif isOwner == None:\r\n\t\t\treturn\r\n\t\telif isOwner == False:\r\n\t\t\treturn\r\n\t\t\t\r\n\t\tif not self.toTorment:\r\n\t\t\tawait ctx.message.author.send('Not currently tormenting.')\r\n\t\t\treturn\r\n\t\t# Cancel it!\r\n\t\tself.toTorment = False\r\n\t\tawait ctx.message.author.send('Tormenting cancelled.')", "def reqGlobalCancel(self):\r\n self.ib.reqGlobalCancel()\r\n logging.info('reqGlobalCancel')", "def _order_cancel(self, bo):\n log.info(\"bo_blotter: order_cancel bracket order bo#%s\" % bo.ticket) \n cancelled = bo.cancel()\n return(cancelled)", "def kill_pending(self):\n for req in self._outbox:\n if not req.Test():\n req.Cancel()\n self._outbox = []", "def cancel_analysis(request):\n cancel_flag[0] = True\n global currently_analyzing\n\n if not currently_analyzing:\n return HttpResponse(\n json.dumps({'error':\"error\"}),\n content_type=\"application/json\"\n )\n else:\n currently_analyzing = False\n return HttpResponse(\n json.dumps({'success':\"success\"}),\n content_type=\"application/json\"\n )", "def requestCancelled(builder, request):", "def change_abandoned(self, event):\n pass", "def okToUnblockVis(self):\n self.cancelUnblockVis()", "def reject_waiting_call(self) -> None:", "def reject(self):\n pass", "def aborting(self):\n \n pass", "def force_stop(self):\n #cancel any current request:\n self._cancel_current_request()", "def abort_unnecessary_jobs(self):\n self._update_candidate_range()\n for r in self.revisions:\n if r == self.lkgr:\n break\n if not r.tested or r.failed:\n r.good = True # pragma: no cover\n if r.in_progress:\n r.abort() # pragma: no cover\n for r in self.revisions[self.fkbr.list_index + 1:]:\n if not r.tested or r.failed:\n r.bad = True # pragma: no cover\n if r.in_progress:\n r.abort() # pragma: no cover", "def cancel_policy(self, cancellation_cause=None, date_cursor=None):\n if not date_cursor:\n date_cursor = datetime.now().date()\n if not cancellation_cause:\n cancellation_cause = \"Policy was cancelled on demand\"\n self.policy.status = u'Canceled'\n self.policy.cancellation_date = date_cursor\n self.policy.status_info = cancellation_cause\n\n # mark all policy's invoices deleted ??\n\n db.session.commit()", "def stopThinking(self):\n self._brain.setState(\"controlled\")", "def set_abort_flag(self):\r\n self.abort_flag = True", "def set_abort_flag(self):\r\n self.abort_flag = True", "async def checkpoint_if_cancelled() -> None:\n await get_async_backend().checkpoint_if_cancelled()", "def evaluate_cancellation_pending_due_to_non_pay(self, date_cursor=None):\n pass", "def cancel(self):\n self.session.rollback()", "def cancel(self, membership, callback=None):", "def cancel(self):\n _notify.remove(self.callb)", "def abortAndBrake(self):\n return self.set_command(\"B\")", "def action_cancel(self):\n self.state = 'canceled'", "def cancelChanges(self):\n self.close()", "def consider_deactivation(self):\n pass", "def OnCancel(self, event):\n pass", "def OnCancel(self, event):\n pass", "def test_ignore_duplicate_cancel(client, mocker, application):\n order = create_test_order(application, 123, fulfilled=False)\n order.status = Order.FAILED\n order.save()\n\n data = {\"req_reference_number\": make_reference_id(order), \"decision\": \"CANCEL\"}\n mocker.patch(\n \"ecommerce.views.IsSignedByCyberSource.has_permission\", return_value=True\n )\n resp = client.post(reverse(\"order-fulfillment\"), data=data)\n assert resp.status_code == statuses.HTTP_200_OK\n\n assert Order.objects.count() == 1\n assert Order.objects.get(id=order.id).status == Order.FAILED", "def cancel_loan(request):\n if request.user.is_superuser:\n for user in User.objects.all():\n user.cancel_loan()\n return HttpResponse('Loan Deducted', status=200)\n return redirect('home')", "def unapprove(self):\n self._check_if_open()\n return super(BitbucketCloudBase, self).delete(\"approve\")", "async def cancel(self, ctx):\n\n return", "def do_cancel(order):\r\n self.gox.cancel(order.oid)", "def cancel_last_classification(self):\n _, file_path = self.binder.remove_last()\n self.img_waiting_list.append(self.current_img_path)\n self._set_image(file_path)", "def cancel0(self):\n with lock_for_object(self.workers):\n setPeriod(-2)\n if self.workers.isEmpty():\n self.runners.remove(getTaskId())\n return True", "def on_cancel_order(self, data, request):\n self.update_rate_limit(request)", "def cancel(self):\n return self.RES_OK", "def cancel(self):\n if self.is_market:\n log.info(\"bo#%s: can't cancel order (market)\" % self.ticket)\n return(False)\n else:\n log.info(\"bo#%s: cancel master order, limit and stop order\" % self.ticket)\n if self.is_cancellable:\n cancel_order(self.order_master)\n cancel_order(self.order_limit)\n cancel_order(self.order_stop)\n self.cancelled.emit(bo=self)\n self.bo_blotter._move_cancelled_order(self)\n return(True)\n else:\n log.info(\"bo#%s: can't cancel order (not cancellable)\" % self.ticket)\n return(False)", "def cancelOrder(self, order_number):\n pass", "def test_only_relevant_task_is_cancelled(serve_instance):\n signal_actor = SignalActor.remote()\n\n @serve.deployment\n class Ingress:\n async def __call__(self, *args):\n await signal_actor.wait.remote()\n return \"ok\"\n\n h = serve.run(Ingress.bind()).options(use_new_handle_api=True)\n\n r1 = h.remote()\n r2 = h.remote()\n\n # Wait for both requests to be executing.\n wait_for_condition(lambda: ray.get(signal_actor.cur_num_waiters.remote()) == 2)\n\n r1.cancel()\n with pytest.raises(ray.exceptions.TaskCancelledError):\n r1.result()\n\n # Now signal r2 to run to completion and check that it wasn't cancelled.\n ray.get(signal_actor.send.remote())\n assert r2.result() == \"ok\"", "def cancel_previous(self):\n\n previous_interviews = self.application.interviews.exclude(\n pk=self.pk,\n ).filter(canceled=False)\n\n previous_interviews.update(canceled=True, canceled_at=timezone.now())", "def abortPrivateValueSet(self, data):\n\n self._log(\"abort-private-value-set\").debug4(\"%s: abort data - %s\", self.name, data)\n\n self.candidateEnabled = self.runningEnabled \n self.candidateTechMode = self.runningTechMode \n\n return ReturnCodes.kOk", "async def cancel(self):\n\n await self.cb_0.cancel()\n await self.cb_1.cancel()", "def check_for_cancellation(self) -> Iterator:\n yield" ]
[ "0.6585705", "0.6542071", "0.64821595", "0.64821595", "0.64821595", "0.6472716", "0.6299607", "0.6293841", "0.6284326", "0.6247299", "0.6247299", "0.6156231", "0.6093092", "0.6050534", "0.60215384", "0.59936893", "0.59936893", "0.59936893", "0.59936893", "0.59633005", "0.5921159", "0.59097934", "0.5901111", "0.5901111", "0.5901111", "0.5901111", "0.5885911", "0.5860146", "0.5850081", "0.5845153", "0.5830173", "0.58226556", "0.5820831", "0.58132577", "0.58132577", "0.5801458", "0.5801458", "0.5798034", "0.57977015", "0.57881624", "0.5787007", "0.57784945", "0.5759816", "0.5758419", "0.5698868", "0.5691072", "0.5682229", "0.5678573", "0.5675719", "0.56510127", "0.56452465", "0.5627689", "0.56275105", "0.5606798", "0.5597608", "0.55893344", "0.5580157", "0.5574244", "0.55528474", "0.55426323", "0.5530585", "0.5513446", "0.55053186", "0.549986", "0.5499062", "0.54902136", "0.54872155", "0.54843014", "0.5481056", "0.54768306", "0.54746747", "0.5452221", "0.54370606", "0.54370606", "0.5432594", "0.5430732", "0.5421114", "0.5420546", "0.5418803", "0.54072857", "0.5402508", "0.53979146", "0.5384446", "0.53815985", "0.53815985", "0.5377091", "0.53759265", "0.53708214", "0.5370619", "0.53639615", "0.53602767", "0.5358536", "0.53584045", "0.53567076", "0.53521574", "0.5343994", "0.53428674", "0.53415346", "0.5331113", "0.5327539", "0.5307095" ]
0.0
-1
Notify folks and set attribute so it only happens once
def admin_bankruptcy_notification(self, name:str): write_keys = list() for delay in self.DELAYS: write_keys.extend( self._get_sample_owners(name=name, delay=delay) ) for write_key in write_keys: if self.bankrupt(write_key): notified = self.get_attribute(attribute_type=AttributeType.update, granularity=AttributeGranularity.write_key, write_key=write_key) or False if not notified: self.set_attribute(attribute_type=AttributeType.update, granularity=AttributeGranularity.write_key, write_key=write_key, value=1) message = 'Your write_key is bankrupt. Consider topping it up with put_balance()' self.add_owner_alert_message(write_key=write_key, message=message)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __setattr__(self, name, value):\n super(Message, self).__setattr__(name, value)\n if name not in ('bcc', '_dirty', '_processed'): \n self.__dict__['_dirty'] = True", "def dummy_update( self ):\r\n pass", "def _set_silent(self, model_instance, value):\n setattr(model_instance, self._cached_name, value)", "def freeze_notify(self): # reliably restored by inspect\n pass", "def freeze_notify(self): # reliably restored by inspect\n pass", "def freeze_notify(self): # reliably restored by inspect\n pass", "def freeze_notify(self): # reliably restored by inspect\n pass", "def freeze_notify(self): # reliably restored by inspect\n pass", "def freeze_notify(self): # reliably restored by inspect\n pass", "def freeze_notify(self): # reliably restored by inspect\n pass", "def freeze_notify(self): # reliably restored by inspect\n pass", "def freeze_notify(self): # reliably restored by inspect\n pass", "def freeze_notify(self): # reliably restored by inspect\n pass", "def freeze_notify(self): # reliably restored by inspect\n pass", "def freeze_notify(self): # reliably restored by inspect\n pass", "def freeze_notify(self): # reliably restored by inspect\n pass", "def freeze_notify(self): # reliably restored by inspect\n pass", "def freeze_notify(self): # reliably restored by inspect\n pass", "def freeze_notify(self): # reliably restored by inspect\n pass", "def freeze_notify(self): # reliably restored by inspect\n pass", "def set_set_later(self, value):\r\n self.set_later = value", "def OnAttributesUpdated():\n pass", "def fire(self):", "def mark_no_changes(self):", "def _attr_updated(self, name, value):\n event = AttributeUpdateEvent(self, name, value)\n events.notify(event)", "def on_assign(self):", "def fire(self):\n pass", "def on_notify(self, name):\r\n pass", "def before_update(self, obj, st):\n pass", "def onUpdated(self):", "def notifyObservers(self):", "def update(self):\n self._is_on = self._is_on", "def notify(self) -> None:\n pass", "def notify(self) -> None:\n pass", "def _setAttributes(self, reactor, done):\n self.reactor = reactor\n self._done = done", "def acknowledged(self):\n ...", "def notified(self, notified):\n\n self._notified = notified", "def __setattr__(self, name: str, val: Any) -> None:\n if name == \"_unready_attributes\":\n pass\n elif hasattr(self, \"_unready_attributes\") and name in self._unready_attributes:\n self._unready_attributes.remove(name)\n super().__setattr__(name, val)", "def after_update(self, obj, st):\n pass", "def set_once(setter):\n set_instances = DescDict()\n @wraps(setter)\n def __set__(desc, instance, value):\n if instance in set_instances:\n raise AttributeError(\"Cannot set a read-only attribute\")\n else:\n set_instances[instance] = True\n setter(desc, instance, value)\n return __set__", "def set_flag(self, new):\n self.flag = new", "def on(self) -> None:", "def _notify_update(self, cuds_object):", "def _set_attributes(self):", "def start_notify(self, on_change):\n raise NotImplementedError", "def process_IN_ATTRIB(self, event):", "def _update(self):\n pass", "def _dirty (self):\n pass", "def async_update_callback(self) -> None:\n self._attr_is_on = self._switch.on", "def set_attribute(self, attr, value):\n super().set_attribute(attr, value) # Keep this line, it triggers the parent class method.\n setattr(self, attr, value)", "def friewallOn():\n pass", "def touch(self):\n self.evaluated = 0", "def attribute_updated(self, attrid: int, value: Any, _: Any) -> None:\n attr_name = self._get_attribute_name(attrid)\n self.debug(\n \"Attribute report '%s'[%s] = %s\", self.cluster.name, attr_name, value\n )\n if attr_name == \"fan_mode\":\n self.async_send_signal(\n f\"{self.unique_id}_{SIGNAL_ATTR_UPDATED}\", attrid, attr_name, value\n )", "def notify_kicked(self):\n self.is_kicked = True", "def func(self):\r\n ##Warn: W0201\r\n self._func = 42", "def __set_signal_refresh(self):\n self._signal_refresh = True", "def _set_changed(self) -> None:\n self._changed = True", "def beforeUpdate(self):", "def _set(self, value):\n value = self._call_func(value)\n self._set_value(value)\n if value is undefined:\n return # no need to update\n for signal in self._downstream_reconnect[:]: # list may be modified\n signal.connect(False)\n for signal in self._downstream:\n signal._set_status(1, self) # do not set status of *this* signal!", "def on(self):", "def changed(self):\n\t\tpass", "def _update(self, count=True, forced=False):", "def on(self):\n raise NotImplementedError", "def __init__(self, set_on_create=False):\n self.set_on_create = set_on_create", "def __init__(self, set_on_create=False):\n self.set_on_create = set_on_create", "def __init__(self, set_on_create=False):\n self.set_on_create = set_on_create", "def Notify(self):\r\n\r\n self._owner._findPrefix = \"\"", "def _async_update_attrs(self) -> None:\n self._attr_is_on = self.entity_description.is_on(self._lock.state)", "def add_new_attributes(self):\n self.task = None\n self.reset_shadow()", "def update(self):\r\n pass", "def _notify(self, observable):\n pass", "def __setattr__(self, attr, value):\n super().__setattr__(attr, value)", "def _update(self, device=None):\n self._attr_available = True\n self.schedule_update_ha_state(True)", "def set_state( self ):", "def svn_client_ctx_t_notify_baton_set(svn_client_ctx_t_self, void_notify_baton): # real signature unknown; restored from __doc__\n pass", "def setChanged(self,value=True):\n self.changed = value", "def setChanged(self,value=True):\n self.changed = value", "def svn_client_ctx_t_notify_func_set(svn_client_ctx_t_self, svn_wc_notify_func_t_notify_func): # real signature unknown; restored from __doc__\n pass", "def _update_on_active(self):\n pass", "def notify_modification(self):\n self._trigger_modification(done=True)", "def __init__(self):\n self._update_scheduled = False", "def init(self):\n\n self.new_thing = True", "def flag(self, reason):\r\n self._flagged = True\r\n self._flagged_reason = reason", "def __setattr__(self,name,value):\n\n if name == '_dont_touch':\n super.__setattr__(self,name,value) \n elif name in self._dont_touch:\n raise ValueError(\"Parameter %s is protected, please don't touch!\"%name)\n else:\n super.__setattr__(self,name,value)\n self._dont_touch.append(name)", "def __setattr__(self,name,value):\n\n if name == '_dont_touch':\n super.__setattr__(self,name,value) \n elif name in self._dont_touch:\n raise ValueError(\"Parameter %s is protected, please don't touch!\"%name)\n else:\n super.__setattr__(self,name,value)\n self._dont_touch.append(name)", "def update(self):", "def update(self):", "def update(self):", "def on_add(self):\n self.notify(on_add())", "def update( ):\r\n pass", "def on(self) -> None:\n ...", "def Update(self):\r\n\r\n # does nothing\r\n pass", "def notify_change(self, change):\n # Send the state to the frontend before the user-registered callbacks\n # are called.\n name = change['name']\n if self.comm is not None and getattr(self.comm, 'kernel', True) is not None:\n # Make sure this isn't information that the front-end just sent us.\n if name in self.keys and self._should_send_property(name, getattr(self, name)):\n # Send new state to front-end\n self.send_state(key=name)\n super().notify_change(change)", "def changes_data(f):\n @functools.wraps(f)\n def wrapper(self, *args, **kwargs):\n self.modified = True\n return f(self, *args, **kwargs)\n return wrapper", "def _track_changes(self):\n if self.untrack is False:\n self._event._track_changes.add('attendees')", "def put(self):\n self._val = True", "def update(self):\n # default implementation is to do nothing.", "def notify(self, notifier):\n\n self._notify = notifier", "def updates(f):\n f.updates = True\n return f", "def mark_seen(self):\r\n self.seen_at = now()\r\n return self", "def _mark_fresh(self):\n if self._is_stale:\n self._logger.debug(\"%s: transition to fresh\", self.ping_address)\n self.on_fresh()\n self._is_stale = False" ]
[ "0.6458744", "0.6421269", "0.63834894", "0.627908", "0.627908", "0.627908", "0.627908", "0.627908", "0.627908", "0.627908", "0.627908", "0.627908", "0.627908", "0.627908", "0.627908", "0.627908", "0.627908", "0.627908", "0.627908", "0.627908", "0.6269448", "0.62105566", "0.61986357", "0.61782974", "0.6172104", "0.61643064", "0.6151282", "0.61415076", "0.6132746", "0.61106765", "0.60897684", "0.60716337", "0.6062292", "0.6062292", "0.6057354", "0.60383093", "0.60116434", "0.5959115", "0.59252316", "0.5905855", "0.5892873", "0.5888908", "0.58866554", "0.5856675", "0.58524173", "0.58512044", "0.58482414", "0.5835825", "0.58336383", "0.58085424", "0.5806916", "0.5806889", "0.5805758", "0.57818204", "0.57759666", "0.5775568", "0.574188", "0.5727356", "0.5726699", "0.57264185", "0.57250285", "0.5724307", "0.5711174", "0.5708498", "0.5708498", "0.5708498", "0.569507", "0.5684407", "0.56779546", "0.5671346", "0.56659997", "0.5665696", "0.5662383", "0.565715", "0.56401014", "0.5639953", "0.5639953", "0.56380874", "0.56357235", "0.56335974", "0.56300527", "0.56156695", "0.5609342", "0.5604969", "0.5604969", "0.5604162", "0.5604162", "0.5604162", "0.5602619", "0.559644", "0.5593875", "0.5590906", "0.55821955", "0.5579905", "0.55743146", "0.5574254", "0.55733466", "0.5566788", "0.55598813", "0.5554176", "0.5552362" ]
0.0
-1
Force cancellation of submissions for poorly performing bankrupt algorithms
def admin_bankruptcy_forced_cancellation(self, name, with_report): discards = list() for delay in self.DELAYS: write_keys = self._get_sample_owners(name=name, delay=delay) leaderboard = self.get_leaderboard(granularity=LeaderboardGranularity.name_and_delay, count=10000, name=name, delay=delay, readable=False) losers = [key for key in write_keys if (self.shash(key) in leaderboard) and (leaderboard[self.shash(key)] < -1.0)] for write_key in losers: if self.bankrupt(write_key=write_key): code = self.shash(write_key) memo = Memo(activity=Activity.cancel, context=ActivityContext.bankruptcy, name=name, code=code, write_key=write_key, message='Initiating cancellation of submissions due to bankruptcy') self.add_memo_as_owner_confirm(memo=memo) self.cancel(name=name, write_key=write_key, delay=delay) discards.append((name, write_key)) report_data = dict(discards) report_memo = Memo(activity=Activity.daemon, context=ActivityContext.bankruptcy, count=len(report_data), data=report_data) self.add_memo_as_system_confirm(memo=report_memo,private_actor=PrivateActor.bankruptcy_daemon) return len(discards) if not with_report else report_memo.as_dict()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cancel():", "def cancel(self):", "def cancel(self):", "def cancel(self):", "def cancel(self):\n pass", "def cancel_workers(self):\n pass", "def do_uncancel(self):\r\n self.write({'cancelled': False})", "def kill_pending(self):\n for req in self._outbox:\n if not req.Test():\n req.Cancel()\n self._outbox = []", "def cancel(self):\n self.cancelled = True", "def cancel(self):\n self.cancelled = True", "def cancel(self):\n self.succeeded = False\n self.reject()", "def cancel(self):\n self.succeeded = False\n self.reject()", "def cancel(self):\n self.succeeded = False\n self.reject()", "def cancel(self):\n self.succeeded = False\n self.reject()", "def canceled(self):\n self.reject()", "def force_stop(self):\n #cancel any current request:\n self._cancel_current_request()", "def abort(self):\n try:\n self.acqRunning = False\n except:\n print('Cannot abort properly')", "def cancel(self):\n self._logger.warning(\"Comparison run being cancelled.\")\n self._msg_logger.warning(run_analysis_view._text[\"log10\"])\n if hasattr(self, \"_off_thread\"):\n self._off_thread.cancel()", "def do_cancel(self):\r\n self.write({'cancelled': True})", "def cancel_all():\n\twhile _running:\n\t\t_running[0].cancel(noerror=True)", "def cancel(self):\n self.__canceled = True", "def cancel(self):\n self.cancelled.set()", "def future(self, cancel):", "def cancel(self) -> asyncio.Future:\n pass # pragma: no cover", "def cancel(self):\n GameLoop.getInstance()._cancelation_token = True", "def cancel(self):\n self.waiter.set_result_if_pending([])\n \n timer = self.timer\n if (timer is not None):\n self.timer = None\n timer.cancel()", "def cancel0(self):\n with lock_for_object(self.workers):\n setPeriod(-2)\n if self.workers.isEmpty():\n self.runners.remove(getTaskId())\n return True", "def reject_waiting_call(self) -> None:", "def cancel(self):\n raise NotImplementedError(\n u\"%s: Method not implemented\", self.__class__.__name__)", "def aborting(self):\n \n pass", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.queue.channel.rpc(self._basic_cancel)\n self.queue.consuming = False", "def _cancel(self):\n now = LOOP_TIME()\n next_ = self.last + USER_CHUNK_TIMEOUT\n if next_ > now:\n self.timer = KOKORO.call_at(next_, type(self)._cancel, self)\n else:\n self.timer = None\n self.waiter.set_result_if_pending(False)", "def cancel_inner():\n kernel32.SetEvent(cancel_event)", "def cancelRequests(self):\n self.get_bmc_website()\n self.__cancelMyRequest = Cancel(self.browser)\n self.__cancelMyRequest.cancelRequest()", "def cancel(self):\n if not self.is_cancelled:\n self.will_change_value_for('is_cancelled')\n self.cancelled = True\n # remove our dependencies so that we're ready, properly behaved operations\n # will honor the cancel flag\n self.dependencies.clear()\n self.did_change_value_for('is_cancelled')\n \n if not self.is_executing and not self.is_finished:\n with self.changing('is_finished'):\n self.finished = True", "def canceled(self):\n return", "def cancel_operation(self):\n # <><><><><><><><><><><><><><><><><><><><><><><><><><><><><><>\n self.proceed = False\n self.entry_view.destroy()", "def _cancel(self):\n self.waiter.set_result_if_pending(None)\n \n timer = self.timer\n if (timer is not None):\n self.timer = None\n timer.cancel()", "async def cancel(self, ctx):\n\n return", "def cancel(self):\n self.waiter.set_result_if_pending(True)\n \n timer = self.timer\n if (timer is not None):\n self.timer = None\n timer.cancel()", "def cancel(self, task):\n raise NotImplementedError", "def cancel_callback(self):\n pass", "def cancel(self):\n\n self.end()\n super().cancel()", "def cancel(self):\n self.on_cancel()", "def abort(self):\n print(\"abort\")", "def cancel_run(self, run_id):\n raise NotImplementedError()", "def abort(self):\n raise NotImplementedError", "def _doAbort(self):\n self._cmdAbort()", "def cancel_tasks(self) -> None:\n for group in self._queue.values():\n for expected_response in group.values():\n expected_response.set(None)\n self._queue = defaultdict(OrderedDict)", "async def cancel(self, ctx):\n author: User = ctx.user_object\n\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n try:\n task = adv.get_adventure(ctx.author.id)\n\n adventureid = task[0]\n if adventureid == '0':\n if author.has_item_by_item(REAPER_TOKEN):\n author.update_inventory(REAPER_TOKEN, remove=True)\n adv.remove(ctx.author.id)\n out = 'Slayer task cancelled!'\n else:\n out = 'Error: You do not have a reaper token.'\n elif adventureid == '1':\n adv.remove(ctx.author.id)\n out = 'Killing session cancelled!'\n elif adventureid == '2':\n adv.remove(ctx.author.id)\n out = 'Quest cancelled!'\n elif adventureid == '3':\n adv.remove(ctx.author.id)\n out = 'Gather cancelled!'\n elif adventureid == '4':\n adv.remove(ctx.author.id)\n out = 'Clue scroll cancelled!'\n elif adventureid == '5':\n adv.remove(ctx.author.id)\n out = 'Reaper task cancelled!'\n elif adventureid == '6':\n adv.remove(ctx.author.id)\n out = 'Runecrafting session cancelled!'\n else:\n out = f'Error: Invalid Adventure ID {adventureid}'\n\n except NameError:\n out = 'You are not currently doing anything.'\n await ctx.send(out)", "async def cancel():\n await asyncio.get_running_loop().run_in_executor(None, cancel_inner)", "def requestCancelled(builder, request):", "def _resubmit(self, *args, **kwargs):\n self.retry()", "def cancel(self):\n _notify.remove(self.callb)", "def abort_acquisition(self):\n self.lib.AbortAcquisition()", "def cancel_wait(self):\n self.lib.CancelWait()", "def reqGlobalCancel(self):\r\n self.ib.reqGlobalCancel()\r\n logging.info('reqGlobalCancel')", "def cancel(self): #$NON-NLS-1$\r", "def _cancel(self, d):\n if self._finished:\n return\n try:\n raise CancelledError()\n except:\n self._caught_failure = failure.Failure()\n self._iterate()", "def cancel(self):\n self.blackened = self.blackened_history[-1]\n self.blackened_history.pop()\n if self.victory:\n self.victory = False\n self.blackened_history_size -= 1", "def shush(self):\n cancel_all()", "def cancel(self):\n self.stop()\n self.make_callback('canceled')", "def cancel_initialization(self):", "def cancel(self):\n return self.RES_OK", "async def cancel(self):\n\n await self.cb_0.cancel()\n await self.cb_1.cancel()", "def on_cancel(self):\n self.state = CANCELED\n self._reject()", "def cancel(self) -> None:\n if not self._state == AsyncPostRequest._RUNNING:\n raise Exception(\"Request not started.\")\n self._hasBeenCancelled = True\n self._resultFuture.cancel()", "def cancel(self, request):\n self.clear(request)", "def on_cancel(self):\n self.quit()", "def on_cancel(self):\n self.quit()", "def on_cancel(self):\n self.quit()", "def on_cancel(self):\n self.quit()", "async def cancel_shielded_checkpoint() -> None:\n await get_async_backend().cancel_shielded_checkpoint()", "def OnCancelScripts(self, event):\n # self.shutdown()\n print(\"Cancel multiprocessor\")\n event.Skip()", "async def __aexit__(self, *args) -> None:\n assert self._task\n await cancel(self._task)\n self._task = None", "def cancel(self):\n self._task.cancel()", "def cancel(self):\n # terminate background thread\n self.stop.set()\n CoordinationAdaptor.delete_cds(self.url)", "def accept_cancel(self):\n self.ok = False\n self.destroy()", "def cancel1_command(self):\n from time import sleep\n\n logger.debug(\" cancel1_command.py \")\n\n logger.debug(\"QUEUE_S1.put(True, block=0\")\n\n # QUEUE_S.put(True, block=False)\n queue1_put(QUEUE_S1, True)\n\n sleep(0.8) # wait for thread to quit", "def _chain_cancel(_):\n if recvd.done():\n return\n if f.cancelled():\n recvd.cancel()", "def abort() -> NoReturn:\n raise AbortSignal", "def cancelMessageRetrieval(self):\n \n self.gatherData.stop()", "def do_abort(self):\n self.abort = True\n if self.monitor: self.monitor.stop( )", "def resubmit(self):\n self.clean(delete_outputs=True)\n self.state = 'Not_Submitted'\n self.write()\n return self.submit()", "def cancelUnblockVis(self):\n if self.__nextSetZoneDoneEvent is not None:\n self.ignore(self.__nextSetZoneDoneEvent)\n self.__nextSetZoneDoneEvent = None", "def run_no_args(self):\n while True:\n if self.cancelled:\n return\n self.func()\n time.sleep(self.sleep_time / 1000.00)", "async def cancel_shielded_checkpoint(cls) -> None:\n with cls.create_cancel_scope(shield=True):\n await cls.sleep(0)", "def submit_and_cancel(backend: IBMQBackend) -> IBMQJob:\n qobj = bell_in_qobj(backend=backend)\n job = backend.run(qobj)\n cancel_job(job, True)\n return job", "def abort_unnecessary_jobs(self):\n self._update_candidate_range()\n for r in self.revisions:\n if r == self.lkgr:\n break\n if not r.tested or r.failed:\n r.good = True # pragma: no cover\n if r.in_progress:\n r.abort() # pragma: no cover\n for r in self.revisions[self.fkbr.list_index + 1:]:\n if not r.tested or r.failed:\n r.bad = True # pragma: no cover\n if r.in_progress:\n r.abort() # pragma: no cover", "def cancel_job(self, job_number):\n raise NotImplementedError", "def on_abort(self):\r\n try:\r\n self.cam.new_images.disconnect(self.send_new_images)\r\n except TypeError:\r\n # The abort button was probably pressed mid-backg collection\r\n self.cam.acquisition_done.disconnect(\r\n self.store_backg_and_continue)\r\n else:\r\n # This will throw errors if only the background collection\r\n # has started when abort is pressed\r\n self.abortDisplay.emit()\r\n del self.background\r\n del self.data_pair, self.pump_probe_data, self.probe_only_data\r\n del self.next_data_has_pump\r\n del self.wavelen_arr\r\n finally:\r\n # Definitely make sure the acquisition is actually aborted\r\n self.abortAcq.emit()", "def cancel(self, membership, callback=None):", "def abort(self):\r\n LOG(\"Aborting execution\")\r\n self.controller.abort()", "def cancel_copy(self):\n self.copyWorker.must_run = False\n self.copyButton.setEnabled(True)", "def on_cancel(self) -> None:\n pass", "def on_cancel(self) -> None:\n pass", "def cancel_upload(self):\r\n self.bucket.cancel_multipart_upload(self.key_name, self.id)", "def cancel_main():\n entry1.delete(0, END)\n output_on_display.delete(1.0, END)\n output()", "def shutdown(self, *args, **kwargs):\n # Set shared variable to 0 to signal shutdown\n logger.debug(\"Setting value to cancel\")\n self.cancel_value.value = 0\n\n self.submit_process.join()\n self.collector_thread.join()\n\n return True", "async def cleanup(self):\n if self.preparing_task:\n self.preparing_task.cancel()" ]
[ "0.7574049", "0.72383225", "0.72383225", "0.72383225", "0.7049807", "0.6872498", "0.6760143", "0.6673882", "0.660217", "0.660217", "0.659783", "0.659783", "0.659783", "0.659783", "0.64751947", "0.6469023", "0.6427791", "0.6421745", "0.6420724", "0.64180344", "0.6395012", "0.63927335", "0.63780195", "0.63730633", "0.6317932", "0.63121676", "0.6306247", "0.6291113", "0.62651825", "0.6255757", "0.6251958", "0.6238236", "0.62349546", "0.6221964", "0.62202525", "0.62111723", "0.6176428", "0.6159503", "0.61252975", "0.6123519", "0.6115081", "0.6113367", "0.61050075", "0.6100191", "0.6089086", "0.6083257", "0.6076163", "0.60681957", "0.60484344", "0.60442364", "0.6037022", "0.6035982", "0.6035702", "0.60125893", "0.6000006", "0.5997502", "0.59948593", "0.59942806", "0.5989425", "0.5957915", "0.59438896", "0.5943405", "0.59405875", "0.59342617", "0.5931292", "0.5927164", "0.59109426", "0.5908433", "0.59054536", "0.59054536", "0.59054536", "0.59054536", "0.58848315", "0.5877026", "0.5866531", "0.5855955", "0.5851803", "0.5847229", "0.5846496", "0.584359", "0.58432615", "0.5840402", "0.5830632", "0.58233875", "0.5816678", "0.58092046", "0.58036345", "0.57959783", "0.5791525", "0.57818323", "0.5779591", "0.57697314", "0.5755546", "0.5752557", "0.5737821", "0.5737821", "0.5735862", "0.57312125", "0.57270813", "0.5720755" ]
0.64316154
16
Test running artifact rejection.
def test_temporal_derivative_distribution_repair(fname, tmp_path): raw = read_raw_nirx(fname) raw_od = optical_density(raw) raw_hb = beer_lambert_law(raw_od) # With optical densities # Add a baseline shift artifact about half way through data max_shift = np.max(np.diff(raw_od._data[0])) shift_amp = 5 * max_shift raw_od._data[0, 0:30] = raw_od._data[0, 0:30] - shift_amp # make one channel zero std raw_od._data[1] = 0.0 raw_od._data[2] = 1.0 assert np.max(np.diff(raw_od._data[0])) > shift_amp # Ensure that applying the algorithm reduces the step change raw_od = tddr(raw_od) assert np.max(np.diff(raw_od._data[0])) < shift_amp assert_allclose(raw_od._data[1], 0.0) # unchanged assert_allclose(raw_od._data[2], 1.0) # unchanged # With Hb # Add a baseline shift artifact about half way through data max_shift = np.max(np.diff(raw_hb._data[0])) shift_amp = 5 * max_shift raw_hb._data[0, 0:30] = raw_hb._data[0, 0:30] - (1.1 * shift_amp) # make one channel zero std raw_hb._data[1] = 0.0 raw_hb._data[2] = 1.0 assert np.max(np.diff(raw_hb._data[0])) > shift_amp # Ensure that applying the algorithm reduces the step change raw_hb = tddr(raw_hb) assert np.max(np.diff(raw_hb._data[0])) < shift_amp assert_allclose(raw_hb._data[1], 0.0) # unchanged assert_allclose(raw_hb._data[2], 1.0) # unchanged
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_itar_restrict_software_asset(self):\n pass", "def test_reject_proposal_demand(self):\n pass", "async def test_finish_release_no_release(doof, repo_info, event_loop, mocker):\n get_release_pr_mock = mocker.patch('bot.get_release_pr', autospec=True, return_value=None)\n with pytest.raises(ReleaseException) as ex:\n await doof.run_command(\n manager='mitodl_user',\n channel_id=repo_info.channel_id,\n words=['finish', 'release'],\n loop=event_loop,\n )\n assert 'No release currently in progress' in ex.value.args[0]\n org, repo = get_org_and_repo(repo_info.repo_url)\n get_release_pr_mock.assert_called_once_with(GITHUB_ACCESS, org, repo)", "def test_reject_agreement(self):\n pass", "def testFailure():\n run(\"chariot-me\") #Start management-engine without initial deplflag\n egress()", "def test_read_artifact(self):\n pass", "def testGetBadManifest(self):\n dl = downloader.DockerImageDownloader('non/existing:image')\n with tempfile.TemporaryDirectory() as tmp_dir:\n dl._output_directory = tmp_dir\n with self.assertRaises(errors.DownloaderException):\n dl._GetManifest()", "def test_itar_restrict_asset(self):\n pass", "def test_itar_restrict_test_asset(self):\n pass", "def test_buildrequire_invalid_module(pkg_util, scenario, repo, koji):\n\n repo.bump()\n\n expected_error = \"Cannot find any module builds\"\n with pytest.raises(ErrorReturnCode) as excinfo:\n # Override 'baked' (_err=sys.stderr) stderr redirect:\n # Here we are fine with what plain sh.Command gives us\n # (otherwise ErrorReturnCode.stderr is incomplete).\n builds = pkg_util.run(\"--optional\", \"rebuild_strategy=all\", _err=None)\n try:\n for build in builds:\n print(\"Canceling module-build {}...\".format(build.id))\n pkg_util.cancel(build)\n except ErrorReturnCode:\n # Do nothing, this is just a clean-up of accidentally started builds\n # in case that the test-case fails\n pass\n assert expected_error in excinfo.value.stderr.decode(\"utf-8\")", "def test_failure(self):\n\n @sync_performer\n def fail(dispatcher, intent):\n raise intent\n\n dispatcher = lambda _: fail\n self.assertThat(\n sync_perform(\n dispatcher, Effect(ValueError(\"oh dear\")).on(error=lambda e: e)\n ),\n MatchesException(ValueError(\"oh dear\")),\n )", "def test_release_deployment_run(self):\n pass", "def _reject(self, reason):\n log.error('Rejected: %s' % reason)\n\n self._remove_changes()\n self._remove_files()\n\n if self.user is not None:\n email = Email('importer_reject_maintainer')\n package = self.changes.get('Source', '')\n\n self.send_email(email, [self.user.email], package=package, message=reason)\n sys.exit(1)", "def test_download_artifact(self, clean_mongo, test_case):\n self.logger.info(\"RUN: %s\", test_case[\"name\"])\n\n uuidv4 = str(uuid.uuid4())\n tenant, username, password = (\n \"test.mender.io-\" + uuidv4,\n \"some.user+\" + uuidv4 + \"@example.com\",\n \"secretsecret\",\n )\n tenant = create_org(tenant, username, password, \"enterprise\")\n create_roles(tenant.users[0].token, test_case[\"roles\"])\n test_case[\"user\"][\"name\"] = test_case[\"user\"][\"name\"].replace(\"UUID\", uuidv4)\n test_user = create_user(tid=tenant.id, **test_case[\"user\"])\n login(test_user, test_case[\"use_personal_access_token\"])\n\n # Create admin user so artifact can be uploaded\n admin_user_data = {\n \"name\": \"admin-UUID@example.com\",\n \"pwd\": \"password\",\n \"roles\": [\"RBAC_ROLE_PERMIT_ALL\"],\n }\n admin_user = create_user(tid=tenant.id, **admin_user_data)\n login(admin_user, test_case[\"use_personal_access_token\"])\n\n # Upload a bogus artifact\n artifact = Artifact(\"tester\", [\"qemux86-64\"], payload=\"bogus\")\n\n dplmnt_MGMT = ApiClient(deployments.URL_MGMT)\n rsp = dplmnt_MGMT.with_auth(admin_user.token).call(\n \"POST\",\n deployments.URL_DEPLOYMENTS_ARTIFACTS,\n files=(\n (\n \"artifact\",\n (\"artifact.mender\", artifact.make(), \"application/octet-stream\"),\n ),\n ),\n )\n assert rsp.status_code == 201, rsp.text\n\n # Attempt to download artifact with test user\n artifact_id = rsp.headers[\"Location\"].split(\"/\")[-1]\n rsp = dplmnt_MGMT.with_auth(test_user.token).call(\n \"GET\", deployments.URL_DEPLOYMENTS_ARTIFACTS_DOWNLOAD.format(id=artifact_id)\n )\n assert rsp.status_code == test_case[\"status_code\"], rsp.text\n self.logger.info(\"PASS: %s\" % test_case[\"name\"])", "def test_10_unsupported_actions(self):\n\n def __count_pulled_packages(pth):\n self.pkgrepo(\"list -F tsv -H -s {0}\".format(pth))\n return len(self.output.splitlines())\n\n def __check_errout(pfmri):\n s1 = \"invalid action in package {0}\".format(pfmri)\n s2 = \"Malformed action in package '{0}'\".format(pfmri)\n self.assert_(s1 in self.errout or s2 in self.errout,\n \"{0} not in error\".format(pfmri))\n\n def __empty_repo(uri, arg_string):\n if uri.startswith(\"http://\"):\n rurl = self.dcs[4].get_repo_url()\n self.pkgrepo(\"remove -s {0} '*'\".format(rurl))\n # Refresh the depot to get it to realize that\n # the catalog has changed.\n self.dcs[4].refresh()\n elif arg_string:\n portable.remove(uri)\n else:\n self.pkgrepo(\"remove -s {0} '*'\".format(uri))\n\n\n def __test_rec(duri, arg_string, pfmris):\n self.debug(\"\\n\\nNow pkgrecv'ing to {0}\".format(duri))\n\n # It's necessary to use the -D option below because\n # otherwise pkgrecv will fail because the manifest\n # doesn't validate.\n\n novalidate = \"-D manifest_validate=Never \"\n # Check that invalid action attributes don't cause\n # tracebacks.\n self.pkgrecv(self.durl1, novalidate +\n \"-d {0} {1} {2}\".format(duri, arg_string,\n \" \".join(pfmris)), exit=pkgdefs.EXIT_OOPS)\n for pfmri in pfmris:\n __check_errout(pfmri)\n self.assertEqual(__count_pulled_packages(duri), 0)\n if arg_string:\n portable.remove(duri)\n\n self.pkgrecv(self.rurl1, novalidate +\n \"-d {0} {1} {2}\".format(duri, arg_string,\n \" \".join(pfmris)), exit=pkgdefs.EXIT_OOPS)\n for pfmri in pfmris:\n __check_errout(pfmri)\n self.assertEqual(__count_pulled_packages(duri), 0)\n if arg_string:\n portable.remove(duri)\n\n # Check that other packages are retrieved and the exit\n # code reflects partial success.\n self.pkgrecv(self.durl1, novalidate +\n \"-d {0} {1} -m all-timestamps '*'\".format(\n duri, arg_string), exit=pkgdefs.EXIT_PARTIAL)\n for pfmri in pfmris:\n __check_errout(pfmri)\n self.assertEqual(__count_pulled_packages(duri),\n len(self.published) - len(pfmris))\n __empty_repo(duri, arg_string)\n\n self.pkgrecv(self.rurl1, novalidate +\n \"-d {0} {1} -m all-timestamps '*'\".format(\n duri, arg_string), exit=pkgdefs.EXIT_PARTIAL)\n for pfmri in pfmris:\n __check_errout(pfmri)\n self.assertEqual(__count_pulled_packages(duri),\n len(self.published) - len(pfmris))\n __empty_repo(duri, arg_string)\n\n self.rurl1 = self.dcs[1].get_repo_url()\n repo = self.dcs[1].get_repo()\n rd = repo.get_pub_rstore()\n pfmri = fmri.PkgFmri(self.published[4])\n mp = rd.manifest(pfmri)\n\n with open(mp, \"rb\") as fh:\n original_txt = fh.read()\n txt = original_txt.replace(\"type=require\", \"type=foo\")\n with open(mp, \"wb\") as fh:\n fh.write(txt)\n\n rpth = tempfile.mkdtemp(dir=self.test_root)\n self.pkgrepo(\"create {0}\".format(rpth))\n adir = tempfile.mkdtemp(dir=self.test_root)\n\n # The __empty repo function above assumes that the only http uri\n # used is the one for depot number 4.\n dest_uris = ((rpth, \"\"), (self.durl4, \"\"),\n (os.path.join(adir, \"archive.p5p\"), \"-a\"))\n for duri, arg_string in dest_uris:\n __test_rec(duri, arg_string, [self.published[4]])\n\n # Test that multiple packages failing are handled correctly.\n for i in range(5, 7):\n pfmri = fmri.PkgFmri(self.published[i])\n mp = rd.manifest(pfmri)\n with open(mp, \"rb\") as fh:\n original_txt = fh.read()\n txt = \"foop\\n\" + original_txt\n with open(mp, \"wb\") as fh:\n fh.write(txt)\n\n for duri, arg_string, in dest_uris:\n __test_rec(duri, arg_string, self.published[4:7])", "def test_verification_failed(self):\n pass", "def test_retry_run(self):\n pass", "def test_reject_negative(self):\n self.spawn(\"./binary\").stdin(\"-1\").reject()", "def test_valid_python_raise_exception(self):\n \n data_file = testutils.DataFile(\"integration_module_valid_raise\")\n\n rtn = self.run_cmd(\"pm install --force --single module --install_name test_raise --name %s --auto\" % str(data_file))\n assert(rtn.return_code == 0)\n\n rtn = self.run_cmd(\"test_raise test\")\n\n assert(rtn.return_code == 246)", "def test_use_exit_status(self): # suppress(no-self-use)\n subprocess.call.return_value = 1\n GreenTestCommand(Distribution()).run()\n sys.exit.assert_called_with(1)", "async def rejected(error: Exception) -> Any:\n raise error", "def test_deploy_exit_code(deploy_result: Result) -> None:\n assert deploy_result.exit_code != 0", "def test_deploy_no_change_exit_code(deploy_no_change_result: Result) -> None:\n assert deploy_no_change_result.exit_code == 0", "def test_launch_failures_hw(self):\n self.test_launch_failures()", "def test_uninstall_subprocess_error_should_fail(self, *args):\n manifest = self.generate_mock_manifest(cfg={\n EXTCFG_SECTION.UNINSTALL: {\n EXTCFG_OPTION.EXEC_EXT_CMD: ['command'],\n }\n })\n ext_manager = PkgInstExtrasManager(manifest)\n with pytest.raises(exceptions.InstExtrasManagerError):\n ext_manager.handle_uninstall_extras()", "def test_clean_exit(self):\n ch = connection_helper()\n qr = list_test_artifacts(None, ch.tables)\n self.assertFalse(bool(qr), \"\"\"Run 'removefacts --conf <config> --removetestlist' or \nexecute 'tests/scripts/removetestfacts.py' to fix\"\"\")", "def test_failed():\n assert False", "def test_cmd_cs_subscription_bad_action(self):\n bad_action = 'blahblah'\n\n result = self.runner.invoke(cli, ['subscription', bad_action])\n assert f\"invalid choice: {bad_action}\" in result.output\n assert result.exception", "async def test_warning_without_indirect_dependencies(self):\n dependency = \"laravel/laravel@6.18.34\"\n self.vulnerabilities_json[\"vulnerabilities\"].append(\n {\n \"id\": \"SNYK-PHP-LARAVELLARAVEL-609736\",\n \"severity\": \"high\",\n \"title\": \"Improper Input Validation\",\n \"from\": [dependency],\n },\n )\n expected_entities = [\n self.expected_entity,\n {\n \"key\": \"laravel-laravel@6_18_34\",\n \"dependency\": dependency,\n \"nr_vulnerabilities\": 1,\n \"example_vulnerability\": \"SNYK-PHP-LARAVELLARAVEL-609736\",\n \"url\": \"https://snyk.io/vuln/SNYK-PHP-LARAVELLARAVEL-609736\",\n \"example_path\": dependency,\n \"highest_severity\": \"high\",\n },\n ]\n response = await self.collect(get_request_json_return_value=self.vulnerabilities_json)\n self.assert_measurement(response, value=\"2\", entities=expected_entities)", "async def test_release_bad_version(doof, repo_info, event_loop, command):\n command_words = command.split() + ['a.b.c']\n await doof.run_command(\n manager='mitodl_user',\n channel_id=repo_info.channel_id,\n words=command_words,\n loop=event_loop,\n )\n assert doof.said(\n 'having trouble figuring out what that means',\n )", "def non_existing_package_error_test(self):\n client = TestClient()\n error = client.run(\"upload Pkg/0.1@user/channel -p hash1\", ignore_error=True)\n self.assertTrue(error)\n self.assertIn(\"ERROR: There is no local conanfile exported as Pkg/0.1@user/channel\",\n client.user_io.out)", "def test_send_event_raises():\n send_event('pytest-reportportal', '5.0.5')", "def test_incorrect_dependency(self):\n load_manifest(StringIO(manifest_incorrect_dependency))", "def test_ab_reject(self):\n fixture_file = 'fixtures/simple/ab_reject.json'\n events = self.run_and_get_events(fixture_file)\n\n expected_events = [\n ('on_b_dial', {\n 'caller': 'SIP/150010001-00000008',\n 'targets': ['SIP/150010002-00000009'],\n }),\n ('on_hangup', {\n 'caller': 'SIP/150010001-00000008',\n 'reason': 'busy',\n }),\n ]\n\n self.assertEqualChannels(expected_events, events)", "def test_retest_deployment_run(self):\n pass", "def test_badstageerror_raise(self, mock_isdir):\n # Set the mocked functions returned values\n mock_isdir.side_effect = [True]\n\n # Test execution\n wrong_kwargs = copy.copy(self.kwargs)\n wrong_kwargs[\"reconstruction_stage\"] = \"WRONG\"\n self.assertRaises(ValueError, recon_all, **wrong_kwargs)", "def test_failed():\n build()\n sh(\"%s %s --last-failed\" % (PYTHON, RUNNER_PY))", "def test_reject_foo(self):\n self.spawn(\"./binary\").stdin(\"foo\").reject()", "def non_existing_recipe_error_test(self):\n client = TestClient()\n error = client.run(\"upload Pkg/0.1@user/channel\", ignore_error=True)\n self.assertTrue(error)\n self.assertIn(\"ERROR: There is no local conanfile exported as Pkg/0.1@user/channel\",\n client.user_io.out)", "def test_deploy_policy_fail_flavor(self):\n\n self._check_deploy_failure(\n self._create_test_app(flavor='really.bad.flavor',\n key='test-key'),\n 'bad flavor')", "def test_runSignaled(self):\n builder = BookBuilder()\n exc = self.assertRaises(\n CommandFailed, builder.run,\n [sys.executable, '-c',\n 'import sys; print \"hi\"; sys.stdout.flush(); '\n 'import os; os.kill(os.getpid(), 9)'])\n self.assertEquals(exc.exitSignal, 9)\n self.assertEquals(exc.exitStatus, None)\n self.assertEquals(exc.output, \"hi\\n\")", "def test_fail(self) -> defer.Deferred[None]:\n return deferLater(reactor, 0, self.fail, \"I fail later\") # type: ignore[arg-type]", "def test_only_rejected_devices_are_rejected(self):\n self.assertEqual(\n self._request(\n self._make_dummy_notification([DEVICE_REJECTED, DEVICE_ACCEPTED])\n ),\n {\"rejected\": [DEVICE_REJECTED[\"pushkey\"]]},\n )", "def test_fails(self):\n raise FoolishError(\"I am a broken test\")", "def test_that_test_can_fail():\n try:\n verify_atomic_weight_for_substance(\"O2\", 1.0)\n except AssertionError as e:\n return\n\n raise AssertionError(\"test_that_test_can_fail() didn't fail\")", "def test_rejected(self):\n actions = signoff_actions(appversions={\"code\": \"fx1.0\"},\n locales={\"code\": \"fr\"})\n actions = list(actions)\n eq_(len(actions), 1)\n eq_(actions[0][1], Action.REJECTED)\n so = Signoff.objects.get(action=actions[0][0])\n eq_(so.push.tip.shortrev, \"l10n fr 0003\")\n eq_(so.locale.code, \"fr\")\n eq_(so.action_set.count(), 2)", "def test_download_deployment_run_test_report(self):\n pass", "def test_failure_result(self):\n dr = EventualResult(fail(RuntimeError()), None)\n self.assertRaises(RuntimeError, dr.wait, 0.1)", "def reject(self):\n pass", "def test_fail(make_runner: Callable[..., TargetFunctionRunner]) -> None:\n runner = make_runner(target_failed, use_instances=True)\n run_info = TrialInfo(config=2, instance=\"test\", seed=0, budget=0.0)\n\n runner.submit_trial(run_info)\n run_info, run_value = next(runner.iter_results())\n\n # Make sure the traceback message is included\n assert \"traceback\" in run_value.additional_info\n assert \"RuntimeError\" in run_value.additional_info[\"traceback\"]", "def test_relaunch_deployment_run(self):\n pass", "def test_xfailed_but_passed():\n pass", "def test_rejected_devices_are_rejected(self):\n self.assertEqual(\n self._request(self._make_dummy_notification([DEVICE_REJECTED])),\n {\"rejected\": [DEVICE_REJECTED[\"pushkey\"]]},\n )", "async def test_release_in_progress(doof, repo_info, event_loop, mocker, command):\n version = '1.2.3'\n url = 'http://fake.release.pr'\n mocker.patch('bot.get_release_pr', autospec=True, return_value=ReleasePR(\n version=version,\n url=url,\n body='Release PR body',\n ))\n\n command_words = command.split() + [version]\n with pytest.raises(ReleaseException) as ex:\n await doof.run_command(\n manager='mitodl_user',\n channel_id=repo_info.channel_id,\n words=command_words,\n loop=event_loop,\n )\n assert ex.value.args[0] == \"A release is already in progress: {}\".format(url)", "def test_case_01(self):\n if True:\n self.fail()", "def test_deploy_exit_code(deploy_result: Result) -> None:\n assert deploy_result.exit_code == 0", "def test_collect_playbooks_dependencies_skip_unavailable(self, module_repo):\n expected_result = {\n # playbooks:\n (\"Slack\", False),\n (\"Indeni\", True),\n # integrations:\n (\"FeedAlienVault\", False),\n (\"ipinfo\", True),\n (\"FeedAutofocus\", True),\n # scripts:\n (\"GetServerURL\", False),\n (\"HelloWorld\", True),\n }\n test_input = [\n {\n \"Dummy Playbook\": {\n \"name\": \"Dummy Playbook\",\n \"file_path\": \"dummy_path\",\n \"fromversion\": \"dummy_version\",\n \"implementing_scripts\": [\n \"GetServerURL\",\n \"HelloWorldScript\",\n ],\n \"implementing_playbooks\": [\n \"Failed Login Playbook - Slack v2\",\n \"Indeni Demo\",\n ],\n \"command_to_integration\": {\n \"alienvault-get-indicators\": \"\",\n \"ip\": \"ipinfo\",\n \"autofocus-get-indicators\": \"\",\n },\n \"tests\": [\"dummy_playbook\"],\n \"pack\": \"dummy_pack\",\n \"incident_fields\": [],\n \"skippable_tasks\": [\n \"Print\",\n \"Failed Login Playbook - Slack v2\",\n \"alienvault-get-indicators\",\n \"GetServerURL\",\n ],\n }\n },\n ]\n\n found_result = PackDependencies._collect_playbooks_dependencies(\n pack_playbooks=test_input,\n id_set=module_repo.id_set.read_json_as_dict(),\n )\n\n assert set(found_result) == set(expected_result)", "def test_package_can_not_upgraded_cause_required(self):\n with self.with_config_update():\n with patch(\n \"aea.cli.upgrade.ItemRemoveHelper.check_remove\",\n return_value=(\n set([PackageId(\"connection\", PublicId(\"test\", \"test\", \"0.0.1\"))]),\n set(),\n dict(),\n ),\n ), pytest.raises(\n ClickException,\n match=r\"Can not upgrade .* because it is required by '.*'\",\n ):\n self.runner.invoke(\n cli,\n [\n \"upgrade\",\n *self.LOCAL,\n self.ITEM_TYPE,\n f\"{self.ITEM_PUBLIC_ID.author}/{self.ITEM_PUBLIC_ID.name}:latest\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )", "def test_fail_repo(self, default_hooks):\n result = default_hooks.act_on_cloned_repo(FAIL_REPO)\n\n assert result.status == Status.WARNING\n assert (\n _output.test_result_header(\n \"PrimeCheckerTest\",\n NUM_PRIME_CHECKER_TESTS,\n NUM_PRIME_CHECKER_TESTS - 2,\n _output.FAILURE_COLOR,\n )\n in result.msg\n )", "def test_download_package__not_found(bucket_and_keys):\n\n with pytest.raises(SystemExit) as exit_error:\n download.download_package(\n bucket_and_keys[0],\n parse_package(\"package-unknown\"),\n )\n\n assert \"Package package-unknown not found\" in exit_error.value.args\n\n with pytest.raises(SystemExit) as specific_error:\n download.download_package(\n bucket_and_keys[0],\n parse_package(\"something==1.2.3\")\n )\n\n assert \"Package something==1.2.3 not found\" in specific_error.value.args", "async def test_unpacker_do_work_raise_exception(config, mocker, path_map_mock):\n BUNDLE_OBJ = {\n \"uuid\": \"f74db80e-9661-40cc-9f01-8d087af23f56\"\n }\n logger_mock = mocker.MagicMock()\n lta_rc_mock = mocker.patch(\"rest_tools.client.RestClient.request\", new_callable=AsyncMock)\n lta_rc_mock.return_value = {\n \"bundle\": BUNDLE_OBJ,\n }\n dwb_mock = mocker.patch(\"lta.unpacker.Unpacker._do_work_bundle\", new_callable=AsyncMock)\n dwb_mock.side_effect = Exception(\"LTA DB started on fire again\")\n qb_mock = mocker.patch(\"lta.unpacker.Unpacker._quarantine_bundle\", new_callable=AsyncMock)\n p = Unpacker(config, logger_mock)\n with pytest.raises(Exception):\n await p._do_work_claim()\n lta_rc_mock.assert_called_with(\"POST\", '/Bundles/actions/pop?source=NERSC&dest=WIPAC&status=unpacking', mocker.ANY)\n dwb_mock.assert_called_with(lta_rc_mock, BUNDLE_OBJ)\n qb_mock.assert_called_with(lta_rc_mock, BUNDLE_OBJ, \"LTA DB started on fire again\")", "def test_delete_software_asset_bundle(self):\n pass", "async def test_webhook_finish_release_fail(doof, event_loop, mocker):\n get_release_pr_mock = mocker.patch('bot.get_release_pr', autospec=True)\n finish_release_mock = mocker.patch('bot.finish_release', autospec=True, side_effect=KeyError)\n\n with pytest.raises(KeyError):\n await doof.handle_webhook(\n loop=event_loop,\n webhook_dict={\n \"token\": \"token\",\n \"callback_id\": FINISH_RELEASE_ID,\n \"channel\": {\n \"id\": \"doof\"\n },\n \"user\": {\n \"id\": \"doofenshmirtz\"\n },\n \"message_ts\": \"123.45\",\n \"original_message\": {\n \"text\": \"Doof's original text\",\n }\n },\n )\n\n assert get_release_pr_mock.called is True\n assert finish_release_mock.called is True\n assert doof.said(\"Merging...\")\n assert doof.said(\"Error\")", "def test_error_downloading_click_counts(self):\n self.service.get_clicks_for_date.side_effect = AnalyticsError\n with self.assertRaises(CommandError):\n self.command.handle()", "def test_issue_post_issue_reaction(self):\n pass", "async def test_unpacker_quarantine_bundle_with_reason_raises(config, mocker, path_map_mock):\n logger_mock = mocker.MagicMock()\n lta_rc_mock = mocker.patch(\"rest_tools.client.RestClient\", new_callable=AsyncMock)\n lta_rc_mock.request.side_effect = Exception(\"Marshmellows were poisoned\")\n p = Unpacker(config, logger_mock)\n await p._quarantine_bundle(lta_rc_mock, {\"uuid\": \"c4b345e4-2395-4f9e-b0eb-9cc1c9cdf003\"}, \"Rucio caught fire, then we roasted marshmellows.\")\n lta_rc_mock.request.assert_called_with(\"PATCH\", \"/Bundles/c4b345e4-2395-4f9e-b0eb-9cc1c9cdf003\", mocker.ANY)", "def fail(self):\n self.cleanup()\n self.runner.report_job_fail(self.id)", "def answerFailure( self, reason ):\n\t\tlog.warn( \n\t\t\t\"\"\"Unable to answer channel %r: %s\"\"\", \n\t\t\tself.agi.variables['agi_channel'], reason.getTraceback(),\n\t\t)\n\t\tself.agi.finish()", "def test_deny_pending_payment(self):\n pass", "def test_xfail_with_run_false_and_with_reason():\n pass", "def test_artifactpriority_list_api_unauthorized(self):\n\n # get response\n response = self.client.get('/api/artifactpriority/')\n # compare\n self.assertEqual(response.status_code, 401)", "def test_verify_rcedit_download_failure(mock_tools, tmp_path):\n mock_tools.download.file.side_effect = NetworkFailure(\"mock\")\n\n with pytest.raises(NetworkFailure, match=\"Unable to mock\"):\n RCEdit.verify(mock_tools)\n\n # A download was invoked\n mock_tools.download.file.assert_called_with(\n url=\"https://github.com/electron/rcedit/\"\n \"releases/download/v1.1.1/rcedit-x64.exe\",\n download_path=tmp_path / \"tools\",\n role=\"RCEdit\",\n )", "def assertImageResponsePyPI(self, package_name):\n BadgeTestCase._assertImageResponsePyPI(\n self, package_name, main.BadgeStatus.SELF_INCOMPATIBLE)", "def test_mark_manual_build_failed_when_container_has_not_latest_rpms_from_advisory(\n self, get_binary_rpm_nvrs, KojiService):\n get_binary_rpm_nvrs.return_value = set(['foo-1.2.1-23.el7'])\n\n koji_service = KojiService.return_value\n koji_service.get_build_rpms.return_value = [\n {'build_id': 634904, 'nvr': 'foo-1.2.1-23.el7', 'name': 'foo'},\n {'build_id': 634904, 'nvr': 'foo-1.1.1-22.el7', 'name': 'foo'},\n ]\n koji_service.get_rpms_in_container.return_value = set(\n ['foo-1.2.1-22.el7']\n )\n\n e1 = models.Event.create(db.session, \"test_msg_id\", \"2018001\", events.ManualRebuildWithAdvisoryEvent)\n event = self.get_event_from_msg(get_fedmsg('brew_container_task_closed'))\n build = models.ArtifactBuild.create(db.session, e1, 'test-product-docker', ArtifactType.IMAGE, event.task_id)\n\n self.handler.handle(event)\n self.assertEqual(build.state, ArtifactBuildState.FAILED.value)\n self.assertRegex(build.state_reason, r\"The following RPMs in container build.*\")", "def _fail(self, reason, use_log=True):\n if use_log:\n log.critical(reason)\n else:\n print >> sys.stderr, reason\n\n self._remove_files()\n\n if self.user is not None:\n email = Email('importer_fail_maintainer')\n package = self.changes.get('Source', '')\n\n self.send_email(email, [self.user.email], package=package)\n\n email = Email('importer_fail_admin')\n self.send_email(email, [pylons.config['debexpo.email']], message=reason)\n\n sys.exit(1)", "def test_not_exectuable(self):\n (status, output, imlog, makelog) = \\\n self.run_instmake_build(log_prefix=\"not-executable\",\n make_opts=[\"not-executable\"])\n\n self.assertEqual(status, util.SUCCESS, output)", "def test_valid_python_wrong_sub(self):\n \n data_file = testutils.DataFile(\"integration_module_valid_subs\")\n\n rtn = self.run_cmd(\"pm install --force --single module --install_name test_sub --name %s --auto\" % str(data_file))\n assert(rtn.return_code == 0)\n\n rtn = self.run_cmd(\"test_sub nope\")\n\n assert(rtn.return_code == 241)", "def test_failedInteraction(self):\n self.session.start_interaction()\n node = self.session.resolve(\"service1\", \"1.0\")\n another_node = self.session.resolve(\"service2\", \"1.0\")\n self.session.fail_interaction(\"OHNO\")\n self.session.finish_interaction()\n expected_failed = [self.disco.failurePolicy(node),\n self.disco.failurePolicy(another_node)]\n expected_nothing = list(self.disco.failurePolicy(n) for n in\n self.all_nodes if\n n.address not in [node.address, another_node.address])\n self.assertPolicyState(expected_failed, 0, 1)\n self.assertPolicyState(expected_nothing, 0, 0)", "async def test_unpacker_run_exception(config, mocker, path_map_mock):\n logger_mock = mocker.MagicMock()\n p = Unpacker(config, logger_mock)\n p.last_work_end_timestamp = None\n p._do_work = AsyncMock()\n p._do_work.side_effect = [Exception(\"bad thing happen!\")]\n await p.run()\n p._do_work.assert_called()\n assert p.last_work_end_timestamp", "def test_runFailed(self):\n builder = BookBuilder()\n exc = self.assertRaises(\n CommandFailed, builder.run,\n [sys.executable, '-c', 'print \"hi\"; raise SystemExit(1)'])\n self.assertEquals(exc.exitStatus, 1)\n self.assertEquals(exc.exitSignal, None)\n self.assertEquals(exc.output, \"hi\\n\")", "def test_failing_action(self):\n dummy_calls = []\n\n self.ch_core.hookenv.action_fail.side_effect = dummy_calls.append\n\n def dummy_action(args):\n raise ValueError('uh oh')\n\n with mock.patch.dict(actions.ACTIONS, {'foo': dummy_action}):\n actions.main(['foo'])\n self.assertEqual(dummy_calls, ['Action \"foo\" failed: \"uh oh\"'])", "def test_break_security_group_failed():", "def test_error(self) -> None:\n context: Dict[str, ArtifactDescriptor] = dict()\n cmd = ModuleCommand(\n package_id='error', \n command_id='error',\n arguments=[],\n packages=None\n )\n controller = FakeWorkflowController()\n self.backend.execute_async(\n task=TaskHandle(\n task_id='000',\n project_id=self.PROJECT_ID,\n controller=controller\n ),\n command=cmd,\n artifacts=context\n )\n time.sleep(2)\n self.assertEqual(controller.task_id, '000')\n self.assertEqual(controller.state, 'ERROR')\n self.assertEqual(len(controller.outputs.stdout), 0)\n self.assertNotEqual(len(controller.outputs.stderr), 0)", "def test_upgrade_linuxdeploy_download_failure(linuxdeploy, mock_tools, tmp_path):\n # Mock the existence of an install\n appimage_path = tmp_path / \"plugin\" / \"linuxdeploy-dummy-wonky.AppImage\"\n appimage_path.parent.mkdir(parents=True)\n appimage_path.touch()\n\n mock_tools.download.file.side_effect = NetworkFailure(\"mock\")\n\n # Updated the linuxdeploy wrapper; the upgrade will fail\n with pytest.raises(NetworkFailure, match=\"Unable to mock\"):\n linuxdeploy.upgrade()\n\n # The mock file will be deleted\n assert not appimage_path.exists()\n\n # A download was invoked\n mock_tools.download.file.assert_called_with(\n url=\"https://example.com/path/to/linuxdeploy-dummy-wonky.AppImage\",\n download_path=tmp_path / \"plugin\",\n role=\"Dummy plugin\",\n )", "def test_uninstalled(self):\n self.assertFalse(self.qi.isProductInstalled(PROJECTNAME))", "def test_failing_test(tmp_path):\n project_dir = tmp_path / \"project\"\n output_dir = tmp_path / \"output\"\n project_with_a_failing_test.generate(project_dir)\n\n with pytest.raises(subprocess.CalledProcessError):\n utils.cibuildwheel_run(\n project_dir,\n output_dir=output_dir,\n add_env={\n \"CIBW_TEST_REQUIRES\": \"nose\",\n \"CIBW_TEST_COMMAND\": \"nosetests {project}/test\",\n # manylinux1 has a version of bash that's been shown to have\n # problems with this, so let's check that.\n \"CIBW_MANYLINUX_I686_IMAGE\": \"manylinux1\",\n \"CIBW_MANYLINUX_X86_64_IMAGE\": \"manylinux1\",\n # CPython 3.8 when running on macOS arm64 is unusual. The build\n # always runs in x86_64, so the arm64 tests are not run. See\n # #1169 for reasons why. That means the build succeeds, which\n # we don't want. So we skip that build.\n \"CIBW_SKIP\": \"cp38-macosx_arm64\",\n },\n )\n\n assert len(os.listdir(output_dir)) == 0", "def test_publish_deployment_run(self):\n pass", "def test_uninstall(self):\n pass", "def fail(self, msg=None):\r\n raise self.failureException(msg)", "def test_upgrade_non_vendor(self):\n with pytest.raises(\n ClickException,\n match=r\"The .* with id '.*' already has version .*. Nothing to upgrade.\",\n ):\n self.runner.invoke(\n cli,\n [\n \"upgrade\",\n *self.LOCAL,\n self.ITEM_TYPE,\n f\"{self.ITEM_PUBLIC_ID.author}/{self.ITEM_PUBLIC_ID.name}:100.0.0\",\n ],\n standalone_mode=False,\n catch_exceptions=False,\n )", "def test_invalid_target_option(self): # suppress(no-self-use)\n with ExpectedException(DistutilsArgError):\n cmd = GreenTestCommand(Distribution())\n cmd.target = True\n cmd.ensure_finalized()\n cmd.run()", "def test_rescoring_failure(self):\r\n problem_url_name = 'H1P1'\r\n self.define_option_problem(problem_url_name)\r\n self.submit_student_answer('u1', problem_url_name, [OPTION_1, OPTION_1])\r\n\r\n expected_message = \"bad things happened\"\r\n with patch('capa.capa_problem.LoncapaProblem.rescore_existing_answers') as mock_rescore:\r\n mock_rescore.side_effect = ZeroDivisionError(expected_message)\r\n instructor_task = self.submit_rescore_all_student_answers('instructor', problem_url_name)\r\n self._assert_task_failure(instructor_task.id, 'rescore_problem', problem_url_name, expected_message)", "def test_install_error_message(self):\n\n fail_msg = \"Failure message\"\n\n fail_file = Path(self.dockerfile_dirpath) / \"matlab-install\" / \"FAIL\"\n\n with open(str(fail_file), \"w\") as ff:\n ff.write(fail_msg + \"\\n\")\n self.addCleanup(utils.remove_file, fail_file)\n\n build_msg = utils.get_build_output(\n docker_api_client=self.client.api,\n dockerfile_dirpath=self.dockerfile_dirpath,\n release=\"latest\",\n )\n\n self.assertTrue(any([fail_msg in msg for msg in build_msg]))", "def test_import_software_asset(self):\n pass", "def test_delete_deployment_run(self):\n pass", "def test_task_failed(self):\n\n task1 = FailedTask(mock.Mock(), total_retries=0)\n task2 = mock.Mock(execute_after=0)\n\n g = TaskDependencyGraph(MockWorkflowContext())\n seq = g.sequence()\n seq.add(task1, task2)\n\n with limited_sleep_mock():\n self.assertRaisesRegex(WorkflowFailed, 'failtask', g.execute)\n self.assertTrue(task1.is_terminated)\n self.assertFalse(task2.apply_async.called)", "def test_request_failure(self, api_client):\n runner = CliRunner()\n\n api_client.quick.side_effect = RequestFailure(\n 401, {\"error\": \"forbidden\", \"status\": \"error\"}\n )\n expected = \"API error: forbidden\"\n\n result = runner.invoke(subcommand.quick, [\"0.0.0.0\"])\n assert result.exit_code == -1\n assert expected in result.output", "def test_mark_build_fail_when_container_not_has_latest_rpms_from_advisory(self, get_binary_rpm_nvrs, KojiService):\n get_binary_rpm_nvrs.return_value = set(['foo-1.2.1-23.el7'])\n\n koji_service = KojiService.return_value\n koji_service.get_build_rpms.return_value = [\n {'build_id': 634904, 'nvr': 'foo-debuginfo-1.2.1-23.el7', 'name': 'foo-debuginfo'},\n {'build_id': 634904, 'nvr': 'foo-1.2.1-23.el7', 'name': 'foo'},\n {'build_id': 634904, 'nvr': 'foo-debuginfo-1.1.1-22.el7', 'name': 'foo-debuginfo'},\n {'build_id': 634904, 'nvr': 'foo-1.1.1-22.el7', 'name': 'foo'},\n ]\n koji_service.get_rpms_in_container.return_value = set(\n ['foo-1.2.1-22.el7', 'bar-1.2.3-1.el7']\n )\n\n e1 = models.Event.create(db.session, \"test_msg_id\", \"2018001\", events.ErrataAdvisoryRPMsSignedEvent)\n event = self.get_event_from_msg(get_fedmsg('brew_container_task_closed'))\n build = models.ArtifactBuild.create(db.session, e1, 'test-product-docker', ArtifactType.IMAGE, event.task_id)\n\n self.handler.handle(event)\n self.assertEqual(build.state, ArtifactBuildState.FAILED.value)\n self.assertRegex(build.state_reason, r\"The following RPMs in container build.*\")", "def test_send_to_grader_fail(self):\r\n\r\n student_response = \"This is a student submission\"\r\n self.mock_xqueue.send_to_queue.return_value = (1, \"Not Queued\")\r\n result, __ = self.openendedmodule.send_to_grader(student_response, self.test_system)\r\n self.assertFalse(result)", "def test_unlock_failure(self):\n # Make sure the image file doesn't exist.\n if os.path.exists(IMAGE_FILE):\n os.unlink(IMAGE_FILE)\n # Ask rsync-system-backup to use the encrypted filesystem on the image\n # file anyway, because we know it will fail and that's exactly what\n # we're interested in :-).\n program = RsyncSystemBackup(\n crypto_device=CRYPTO_NAME,\n destination=os.path.join(MOUNT_POINT, 'latest'),\n mount_point=MOUNT_POINT,\n )\n # When `cryptdisks_start' fails it should exit with a nonzero exit\n # code, thereby causing executor to raise an ExternalCommandFailed\n # exception that obscures the FailedToUnlockError exception that we're\n # interested in. The check=False option enables our `last resort error\n # handling' code path to be reached.\n program.destination_context.options['check'] = False\n self.assertRaises(FailedToUnlockError, program.execute)", "def reject_waiting_call(self) -> None:" ]
[ "0.6357305", "0.6324513", "0.6190728", "0.6092561", "0.5969728", "0.5934762", "0.5933897", "0.59015524", "0.58819264", "0.5881338", "0.58585626", "0.5845605", "0.5825424", "0.577213", "0.5720475", "0.5704311", "0.56921726", "0.56621385", "0.56546986", "0.56332654", "0.56303424", "0.5607382", "0.5597223", "0.5594438", "0.55894405", "0.5582241", "0.5575497", "0.55692357", "0.55662864", "0.55653024", "0.5556097", "0.55508554", "0.5545378", "0.55317926", "0.5530835", "0.5528834", "0.5523022", "0.5521507", "0.55195093", "0.5513249", "0.55113024", "0.55080265", "0.5502735", "0.5501158", "0.5498997", "0.54976326", "0.5497112", "0.54919326", "0.54890865", "0.5488215", "0.54849976", "0.547835", "0.5469767", "0.54647964", "0.54603326", "0.5457577", "0.5445942", "0.54449487", "0.54442555", "0.5442962", "0.5417409", "0.5414795", "0.5411088", "0.5410121", "0.5407116", "0.5403963", "0.54017705", "0.54015046", "0.5401196", "0.5399076", "0.5393897", "0.53872615", "0.538545", "0.53714085", "0.5362742", "0.53590435", "0.5357789", "0.5356731", "0.535256", "0.53522825", "0.53516585", "0.5351459", "0.5347251", "0.5346386", "0.53460014", "0.53448725", "0.5341323", "0.5340554", "0.53404534", "0.5338044", "0.5335213", "0.5335005", "0.5333848", "0.5331033", "0.53184813", "0.5317568", "0.5312351", "0.5299302", "0.5298332", "0.5287691", "0.527335" ]
0.0
-1
Convert from camera_frame to world_frame
def cam_to_world(cam_point, world_to_cam): # cam_point = np.array([cam_pose[0], cam_pose[1], cam_pose[2]]) obj_vector = np.concatenate((cam_point, np.ones(1))).reshape((4, 1)) world_point = np.dot(world_to_cam, obj_vector) world_point = [p[0] for p in world_point] return world_point[0:3]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def camera_to_world(self, X):\n raise NotImplementedError", "def world_to_camera(self, X):\n raise NotImplementedError", "def cameraToWorld(self, p):\n result = self.camPos\n result += p[2] * self.camZ # result is now in the middle of the view-plane\n result += p[0] * self.camX # result is now in the middle-left of the view-plane\n result += p[1] * self.camY # result is now the world-space equivalent of p\n return result", "def transform_camera_pose_to_world_pose(self):\n for pose in self.close_positions_camera:\n self.close_positions_world.append(self.get_world_pose_for_camera_pose(pose))\n\n for pose in self.medium_positions_camera:\n self.medium_positions_world.append(self.get_world_pose_for_camera_pose(pose))\n\n for pose in self.far_positions_camera:\n self.far_positions_world.append(self.get_world_pose_for_camera_pose(pose))", "def camera_2_world(self, o, d):\r\n wo = self.camera2world_point @ ti.Vector([o.x, o.y, o.z, 1.0])\r\n wd = self.camera2world_vec @ d\r\n return ti.Vector([wo.x,wo.y,wo.z]), wd", "def telescope_to_camera(telescope_coord, camera_frame):\n x_pos = telescope_coord.cartesian.x\n y_pos = telescope_coord.cartesian.y\n rot = telescope_coord.rotation * -1 # reverse the rotation applied to get to this system\n\n if rot ==0: #if no rotation applied save a few cycles\n x=x_pos\n y=y_pos\n else: # or else rotate all positions around the camera centre\n x = x_pos*cos(rot) - y_pos*sin(rot)\n y = y_pos*sin(rot) + y_pos*cos(rot)\n\n f = telescope_coord.focal_length\n x = x*(f/u.m) # Remove distance units here as we are using small angle approx\n y = y*(f/u.m)\n\n representation = CartesianRepresentation(x.value*u.m ,y.value*u.m,0*u.m)\n\n return camera_frame.realize_frame(representation)", "def camera_to_telescope(camera_coord, telescope_frame):\n x_pos = camera_coord.cartesian.x\n y_pos = camera_coord.cartesian.y\n\n rot = telescope_frame.rotation\n if rot ==0:\n x=x_pos\n y=y_pos\n else:\n x = x_pos*cos(rot) - y_pos*sin(rot)\n y = y_pos*sin(rot) + y_pos*cos(rot)\n\n f = telescope_frame.focal_length\n\n x = (x/f) * u.deg\n y = (y/f) * u.deg\n representation = CartesianRepresentation(x,y,0*u.deg)\n\n return telescope_frame.realize_frame(representation)", "def create_cam2world_matrix(forward_vector,\n origin,\n device=None):\n \"\"\"\"\"\"\n\n forward_vector = normalize_vecs(forward_vector)\n up_vector = torch.tensor([0, 1, 0], dtype=torch.float, device=device) \\\n .expand_as(forward_vector)\n\n left_vector = normalize_vecs(\n torch.cross(up_vector,\n forward_vector,\n dim=-1))\n\n up_vector = normalize_vecs(\n torch.cross(forward_vector,\n left_vector,\n dim=-1))\n\n rotation_matrix = torch.eye(4, device=device) \\\n .unsqueeze(0) \\\n .repeat(forward_vector.shape[0], 1, 1)\n rotation_matrix[:, :3, :3] = torch.stack(\n (-left_vector, up_vector, -forward_vector), axis=-1)\n\n translation_matrix = torch.eye(4, device=device) \\\n .unsqueeze(0) \\\n .repeat(forward_vector.shape[0], 1, 1)\n translation_matrix[:, :3, 3] = origin\n\n cam2world = translation_matrix @ rotation_matrix\n\n return cam2world", "def transform(orbit, frame_orig, frame_dest):\n\n orbit_orig = frame_orig(\n x=orbit.r[0],\n y=orbit.r[1],\n z=orbit.r[2],\n v_x=orbit.v[0],\n v_y=orbit.v[1],\n v_z=orbit.v[2],\n representation=CartesianRepresentation,\n differential_type=CartesianDifferential,\n )\n\n orbit_dest = orbit_orig.transform_to(frame_dest(obstime=orbit.epoch))\n orbit_dest.representation = CartesianRepresentation\n\n return Orbit.from_vectors(\n orbit.attractor,\n orbit_dest.data.xyz,\n orbit_dest.data.differentials[\"s\"].d_xyz,\n epoch=orbit.epoch,\n )", "def r2n2_cam2world(self):\n if not hasattr(self, '_r2n2_cam2world'):\n ms = []\n for i in range(24):\n cam2v1 = assert_is_4x4(self.r2n2_cam2v1[i, ...])\n v12occnet = assert_is_4x4(self.v12occnet)\n occnet2gaps = assert_is_4x4(self.occnet2gaps)\n cam2occnet = np.matmul(v12occnet, cam2v1)\n cam2gaps = np.matmul(occnet2gaps, cam2occnet)\n ms.append(assert_is_4x4(cam2gaps))\n self._r2n2_cam2world = np.stack(ms).astype(np.float32)\n return self._r2n2_cam2world", "def camera_to_object_transform(self):\n # form the full object to camera transform\n T_stp_camera = self.stp_to_camera_transform()\n T_obj_stp = self.object_to_stp_transform()\n T_obj_camera = T_stp_camera.dot(T_obj_stp)\n return T_obj_camera", "def to_image_frame(loc):\n loc = np.dot(Hinv, loc) # to camera frame\n return loc / loc[2] # to pixels (from millimeters)", "def to_world(self, uv):\n return self._projective_transform(self.A, uv)", "def project_to_image_plane(self, point_in_world, timestamp):\n\n camera_info = CameraInfo()\n\n fx = self.config['camera_info']['focal_length_x']\n fy = self.config['camera_info']['focal_length_y']\n\n camera_info.width = self.config['camera_info']['image_width']\n camera_info.height = self.config['camera_info']['image_height']\n\n #print(\"fx {}, fy {}\".format(fx, fy))\n\n camera_info.K = np.array([[fx, 0, camera_info.width / 2],\n [0, fy, camera_info.height / 2],\n [0, 0, 1.]], dtype=np.float32)\n camera_info.P = np.array([[fx, 0, camera_info.width / 2, 0],\n [0, fy, camera_info.height / 2, 0],\n [0, 0, 1., 0]])\n camera_info.R = np.array([[1., 0, 0],\n [0, 1., 0],\n [0, 0, 1.]], dtype=np.float32)\n\n camera = PinholeCameraModel()\n camera.fromCameraInfo(camera_info)\n\n #print(\"point_in_world = {}\".format(str(point_in_world)))\n #print(\"camera projection matrix \", camera.P)\n\n # get transform between pose of camera and world frame\n trans = None\n point_in_camera_space = None\n point_in_image = None\n bbox_points_camera_image = []\n\n euler_transforms = (\n math.radians(90), # roll along X to force Y axis 'up'\n math.radians(-90 + -.75), # pitch along Y to force X axis towards 'right', with slight adjustment for camera's 'yaw'\n math.radians(-9) # another roll to orient the camera slightly 'upwards', (camera's 'pitch')\n )\n euler_axes = 'sxyx'\n\n try:\n self.listener.waitForTransform(\"/base_link\",\n \"/world\", timestamp, rospy.Duration(0.1))\n (trans, rot) = self.listener.lookupTransform(\"/base_link\",\n \"/world\", timestamp)\n\n camera_orientation_adj = tf.transformations.quaternion_from_euler(*euler_transforms, axes=euler_axes)\n\n trans_matrix = self.listener.fromTranslationRotation(trans, rot)\n camera_orientation_adj = self.listener.fromTranslationRotation((0, 0, 0), camera_orientation_adj)\n\n #print(\"trans {}, rot {}\".format(trans, rot))\n #print(\"transform matrix {}\".format(trans_matrix))\n\n point = np.array([point_in_world.x, point_in_world.y, point_in_world.z, 1.0])\n\n # this point should match what you'd see from being inside the vehicle looking straight ahead.\n point_in_camera_space = trans_matrix.dot(point)\n\n #print(\"point in camera frame {}\".format(point_in_camera_space))\n\n final_trans_matrix = camera_orientation_adj.dot(trans_matrix)\n\n # this point is from the view point of the camera (oriented along the camera's rotation quaternion)\n point_in_camera_space = final_trans_matrix.dot(point)\n\n #print(\"point in camera frame adj {}\".format(point_in_camera_space))\n\n bbox_points = [(point_in_camera_space[0] - 0.5, point_in_camera_space[1] - 1.1, point_in_camera_space[2], 1.0),\n (point_in_camera_space[0] + 0.5, point_in_camera_space[1] + 1.1, point_in_camera_space[2], 1.0),\n (point_in_camera_space[0] - 0.5, point_in_camera_space[1] - 1.1, point_in_camera_space[2], 1.0),\n (point_in_camera_space[0] + 0.5, point_in_camera_space[1] + 1.1, point_in_camera_space[2], 1.0)]\n\n # these points represent the bounding box within the camera's image\n for p in bbox_points:\n bbox_points_camera_image.append(camera.project3dToPixel(p))\n\n # print(\"point in image {}\".format(bbox_points_camera_image))\n\n except (tf.Exception, tf.LookupException, tf.ConnectivityException):\n rospy.logerr(\"Failed to find camera to map transform\")\n\n return bbox_points_camera_image", "def _vehicle_to_world(cords, vehicle):\n\n bb_transform = carla.Transform(vehicle.bounding_box.location)\n bb_vehicle_matrix = ClientSideBoundingBoxes.get_matrix(bb_transform)\n vehicle_world_matrix = ClientSideBoundingBoxes.get_matrix(vehicle.get_transform())\n bb_world_matrix = np.dot(vehicle_world_matrix, bb_vehicle_matrix)\n world_cords = np.dot(bb_world_matrix, np.transpose(cords))\n return world_cords", "def get_world_pose_for_camera_pose(self, pose):\n\n # Create a point stamped from the given position\n camera_point = geometry_msgs.msg.PointStamped()\n camera_point.header.stamp = rospy.Time.now()\n camera_point.header.frame_id = 'camera'\n camera_point.point.x = pose[0]\n camera_point.point.y = pose[1]\n camera_point.point.z = pose[2]\n\n # Wait for the transformation to be available\n time = rospy.Time().now()\n self.listener.waitForTransform('camera', 'world', time, rospy.Duration(5))\n world_point = self.listener.transformPoint('world', camera_point)\n\n # Return the new coordinates\n return [world_point.point.x, world_point.point.y, world_point.point.z]", "def _omni_frame_to_omni_frame_projection(self, agent_rel_pose, agent_rel_mat, uniform_sphere_pixel_coords,\n sphere_pix_coords_f1, sphere_depth_f1, sphere_feat_f1, agent_rel_pose_cov,\n image_var_f1, holes_prior, holes_prior_var, batch_size):\n\n # Frame 1 #\n # --------#\n\n # combined\n\n # B x OH x OW x 3\n angular_pixel_coords_f1 = ivy.concatenate((sphere_pix_coords_f1, sphere_depth_f1), -1)\n\n # sphere coords\n\n # B x OH x OW x 3\n sphere_coords_f1 = \\\n ivy_vision.angular_pixel_to_sphere_coords(angular_pixel_coords_f1, self._pixels_per_degree)\n\n # Frame 2 #\n # --------#\n\n # sphere to sphere pixel projection\n\n sphere_coords_f2 = ivy_vision.sphere_to_sphere_coords(\n sphere_coords_f1, agent_rel_mat, [batch_size], self._sphere_img_dims)\n image_var_f2 = image_var_f1\n\n # to angular pixel coords\n\n # B x OH x OW x 3\n angular_pixel_coords_f2 = \\\n ivy_vision.sphere_to_angular_pixel_coords(sphere_coords_f2, self._pixels_per_degree)\n\n # constant feature projection\n\n # B x OH x OW x (3+F)\n projected_coords_f2 = ivy.concatenate([angular_pixel_coords_f2] + [sphere_feat_f1], -1)\n\n # reshaping to fit quantization dimension requirements\n\n # B x (OHxOW) x (3+F)\n projected_coords_f2_flat = ivy.reshape(projected_coords_f2,\n [batch_size] + [self._sphere_img_dims[0] * self._sphere_img_dims[1]]\n + [3 + self._feat_dim])\n\n # B x (OHxOW) x (3+F)\n image_var_f2_flat = ivy.reshape(image_var_f2,\n [batch_size] + [self._sphere_img_dims[0] * self._sphere_img_dims[1]]\n + [3 + self._feat_dim])\n\n # quantize the projection\n\n # B x N x OH x OW x (3+F) # B x N x OH x OW x (3+F)\n return ivy_vision.quantize_to_image(\n pixel_coords=projected_coords_f2_flat[..., 0:2],\n final_image_dims=self._sphere_img_dims,\n feat=projected_coords_f2_flat[..., 2:],\n feat_prior=holes_prior,\n with_db=self._with_depth_buffer,\n pixel_coords_var=image_var_f2_flat[..., 0:2],\n feat_var=image_var_f2_flat[..., 2:],\n pixel_coords_prior_var=holes_prior_var[..., 0:2],\n feat_prior_var=holes_prior_var[..., 2:],\n var_threshold=self._var_threshold[:, 0],\n uniform_pixel_coords=uniform_sphere_pixel_coords,\n batch_shape=(batch_size,),\n dev_str=self._dev_str)[0:2]", "def get_camera_transform(self):\r\n if not self.pose:\r\n rospy.loginfo(\"no pose!\")\r\n return None\r\n if self.pose.header.frame_id != self.role_name:\r\n rospy.logwarn(\"Unsupported frame received. Supported {}, received {}\".format(\r\n self.role_name, self.pose.header.frame_id))\r\n return None\r\n sensor_location = carla.Location(x=self.pose.pose.position.x,\r\n y=-self.pose.pose.position.y,\r\n z=self.pose.pose.position.z)\r\n quaternion = (\r\n self.pose.pose.orientation.x,\r\n self.pose.pose.orientation.y,\r\n self.pose.pose.orientation.z,\r\n self.pose.pose.orientation.w\r\n )\r\n roll, pitch, yaw = euler_from_quaternion(quaternion)\r\n # rotate to CARLA\r\n sensor_rotation = carla.Rotation(pitch=math.degrees(roll)-90,\r\n roll=math.degrees(pitch),\r\n yaw=-math.degrees(yaw)-90)\r\n return carla.Transform(sensor_location, sensor_rotation)", "def _frame_to_omni_frame_projection(self, cam_rel_poses, cam_rel_mats, uniform_sphere_pixel_coords, cam_coords_f1,\n cam_feat_f1, rel_pose_covs, image_var_f1, holes_prior, holes_prior_var,\n batch_size, num_timesteps, num_cams, image_dims):\n\n # cam 1 to cam 2 coords\n\n cam_coords_f2 = ivy_vision.cam_to_cam_coords(\n ivy_mech.make_coordinates_homogeneous(\n cam_coords_f1, [batch_size, num_timesteps, num_cams] + image_dims), cam_rel_mats,\n [batch_size, num_timesteps, num_cams], image_dims)\n\n # cam 2 to sphere 2 coords\n\n sphere_coords_f2 = ivy_vision.cam_to_sphere_coords(cam_coords_f2)\n image_var_f2 = image_var_f1\n\n # angular pixel coords\n\n # B x N x C x H x W x 3\n angular_pixel_coords_f2 = \\\n ivy_vision.sphere_to_angular_pixel_coords(sphere_coords_f2, self._pixels_per_degree)\n\n # constant feature projection\n\n # B x N x C x H x W x (3+F)\n projected_coords_f2 = ivy.concatenate([angular_pixel_coords_f2] + [cam_feat_f1], -1)\n\n # reshaping to fit quantization dimension requirements\n\n # B x N x (CxHxW) x (3+F)\n projected_coords_f2_flat = \\\n ivy.reshape(projected_coords_f2,\n [batch_size, num_timesteps, num_cams * image_dims[0] * image_dims[1], -1])\n\n # B x N x (CxHxW) x (3+F)\n image_var_f2_flat = ivy.reshape(image_var_f2,\n [batch_size, num_timesteps, num_cams * image_dims[0] * image_dims[1], -1])\n\n # quantized result from all scene cameras\n\n # B x N x OH x OW x (3+F) # B x N x OH x OW x (3+F)\n return ivy_vision.quantize_to_image(\n pixel_coords=projected_coords_f2_flat[..., 0:2],\n final_image_dims=self._sphere_img_dims,\n feat=projected_coords_f2_flat[..., 2:],\n feat_prior=holes_prior,\n with_db=self._with_depth_buffer,\n pixel_coords_var=image_var_f2_flat[..., 0:2],\n feat_var=image_var_f2_flat[..., 2:],\n pixel_coords_prior_var=holes_prior_var[..., 0:2],\n feat_prior_var=holes_prior_var[..., 2:],\n var_threshold=self._var_threshold,\n uniform_pixel_coords=uniform_sphere_pixel_coords,\n batch_shape=(batch_size, num_timesteps),\n dev_str=self._dev_str)[0:2]", "def camera_frame_directions(self) -> _BFRAME_TYPE:\n pass", "def compute_right_camera_pose(left_camera_to_world, left_to_right):\n left_world_to_camera = np.linalg.inv(left_camera_to_world)\n right_world_to_camera = np.matmul(left_to_right, left_world_to_camera)\n right_camera_to_world = np.linalg.inv(right_world_to_camera)\n return right_camera_to_world", "def read_camera(self):\n _, frame = self.camera.read()\n return self.mirror(frame)", "def _trig_to_world(self, bb, parent, trigger): \r\n bb_transform = carla.Transform(trigger.location)\r\n bb_vehicle_matrix = self.get_matrix(bb_transform)\r\n vehicle_world_matrix = self.get_matrix(parent.get_transform())\r\n bb_world_matrix = vehicle_world_matrix @ bb_vehicle_matrix\r\n world_cords = bb_world_matrix @ bb.T\r\n return world_cords", "def pixel2cam(self, depth, intrinsics_inv):\n b, _, h, w = depth.size()\n i_range = torch.arange(0, h).view(1, h, 1).expand(1,h,w).type_as(depth) # [1, H, W]\n j_range = torch.arange(0, w).view(1, 1, w).expand(1,h,w).type_as(depth) # [1, H, W]\n ones = torch.ones(1,h,w).type_as(depth)\n pixel_coords = torch.stack((j_range, i_range, ones), dim=1) # [1, 3, H, W]\n ###pixel_coords is an array of camera pixel coordinates (x,y,1) where x,y origin is the upper left corner of the image.\n current_pixel_coords = pixel_coords[:,:,:h,:w].expand(b,3,h,w).view(b,3,-1) #.contiguous().view(b, 3, -1) # [B, 3, H*W]\n #cam_coords = intrinsic_inv.expand(b,3,3).bmm(current_pixel_coords).view(b,3,h,w)\n cam_coords = intrinsics_inv.bmm(current_pixel_coords).view(b,3,h,w)\n return cam_coords * depth", "def pointToWorld( nImageX, nImageY, rDepth, rMaxX = 320, rMaxY = 240, rFieldOfViewX = 60, rFieldOfViewY = 40 ):\n # convert to [-0.5,0.5]\n rCenteredX = ( nImageX / rMaxX ) - 0.5;\n rCenteredY = ( nImageY / rMaxY ) - 0.5;", "def toworld(self, *args, **kwargs):\n return _image.image_toworld(self, *args, **kwargs)", "def transform_world2base(self,world_pose):\n worldpose_Matrix=trans_tools.quaternion_matrix(world_pose[3:])\n worldpose_Matrix[0:3,3]=np.array(world_pose[:3]).T\n\n basepose_Matrix=self.trans_world2base.dot(worldpose_Matrix)\n rot=trans_tools.quaternion_from_matrix(basepose_Matrix)\n trans=basepose_Matrix[0:3,3].T\n\n base_pose=np.hstack([trans,rot])\n return base_pose", "def world_to_screen(self, x, y):\n return x-self.x, self.h-(y-self.y)", "def to_world(self, x, y, **kwargs):", "def screenToCamera(self,x,y):\n #self.x = x\n #self.y = y\n new_x = x / (self.surf.get_width() - 1) - 0.5\n #-(new_x)\n new_y = y / (self.surf.get_height() - 1)\n new_y = (1.0 - cy) - 0.5\n new_z = -self.camNear\n formula = math3dsol.VectorN((new_x,new_y,new_z))\n return formula\n\n # FINISH ME!!!", "def to_image_frame(self, Hinv, loc):\n if loc.ndim > 1:\n locHomogenous = np.hstack((loc, np.ones((loc.shape[0], 1))))\n loc_tr = np.transpose(locHomogenous)\n loc_tr = np.matmul(Hinv, loc_tr) # to camera frame\n locXYZ = np.transpose(loc_tr/loc_tr[2]) # to pixels (from millimeters)\n return locXYZ[:, :2].astype(int)\n else:\n locHomogenous = np.hstack((loc, 1))\n locHomogenous = np.dot(Hinv, locHomogenous) # to camera frame\n locXYZ = locHomogenous / locHomogenous[2] # to pixels (from millimeters)\n return locXYZ[:2].astype(int)", "def vox2world(vol_origin, vox_coords, vox_size):\n vol_origin = vol_origin.astype(np.float32)\n vox_coords = vox_coords.astype(np.float32)\n cam_pts = np.empty_like(vox_coords, dtype=np.float32)\n for i in prange(vox_coords.shape[0]):\n for j in range(3):\n cam_pts[i, j] = vol_origin[j] + (vox_size * vox_coords[i, j])\n return cam_pts", "def max_cam2world(self):\n if not hasattr(self, '_max_cam2world'):\n occnet2gaps = self.occnet2gaps\n cam2occnet = self.max_cam2occnet\n assert cam2occnet.shape[0] == 16\n assert cam2occnet.shape[1] == 4\n assert cam2occnet.shape[2] == 4\n assert occnet2gaps.shape[0] == 4\n assert occnet2gaps.shape[1] == 4\n cam2worlds = []\n for i in range(16):\n cam2worlds.append(np.matmul(occnet2gaps, cam2occnet[i, :, :]))\n self._max_cam2world = np.stack(cam2worlds)\n return self._max_cam2world", "def transform_to(self, frame):\n if self.component_type == \"healpix\":\n raise ValueError(\n \"Direct coordinate transformation between frames is not valid \"\n \"for `healpix` type catalogs. Please use the `healpix_interp_transform` \"\n \"to transform to a new frame and interpolate to the new pixel centers. \"\n \"Alternatively, you can call `healpix_to_point` to convert the healpix map \"\n \"to a point source catalog before calling this function.\"\n )\n\n new_skycoord = self.skycoord.transform_to(frame)\n self.skycoord = new_skycoord\n\n return", "def translate_frame(frame, x, y):\n trans_mat = np.float32([[1, 0, x], [0, 1, y]])\n dimensions = (frame.shape[1], frame.shape[0])\n return cv.warpAffine(frame, trans_mat, dimensions)", "def pinhole_projection_world_to_image(world_pos, K, camera_to_world=None):\n\n world_pos_vec = np.append(world_pos, 1)\n\n # transform to camera frame if camera_to_world is not None\n if camera_to_world is not None:\n world_pos_vec = np.dot(np.linalg.inv(camera_to_world), world_pos_vec)\n\n # scaled position is [X/Z, Y/Z, 1] where X,Y,Z is the position in camera frame\n scaled_pos = np.array([world_pos_vec[0]/world_pos_vec[2], world_pos_vec[1]/world_pos_vec[2], 1])\n uv = np.dot(K, scaled_pos)[:2]\n return uv", "def _get_slice_to_world(self) :\n \n return self._slice_to_world", "def stp_to_camera_transform(self):\n # setup variables\n camera_xyz_w = self.cam_pos\n camera_rot_w = self.cam_rot\n camera_int_pt_w = self.cam_interest_pt\n camera_xyz_obj_p = camera_xyz_w - camera_int_pt_w\n \n # get the distance from the camera to the world\n camera_dist_xy = np.linalg.norm(camera_xyz_w[:2])\n z = [0,0,np.linalg.norm(camera_xyz_w[:3])]\n\n # form the rotations about the x and z axis for the object on the tabletop\n theta = camera_rot_w[0] * np.pi / 180.0\n phi = -camera_rot_w[2] * np.pi / 180.0 + np.pi / 2.0\n camera_rot_obj_p_z = np.array([[np.cos(phi), -np.sin(phi), 0],\n [np.sin(phi), np.cos(phi), 0],\n [0, 0, 1]])\n\n camera_rot_obj_p_x = np.array([[1, 0, 0],\n [0, np.cos(theta), -np.sin(theta)],\n [0, np.sin(theta), np.cos(theta)]])\n \n # form the full rotation matrix, swapping axes to match maya\n camera_md = np.array([[0, 1, 0],\n [1, 0, 0],\n [0, 0, -1]])\n camera_rot_obj_p = camera_md.dot(camera_rot_obj_p_z.dot(camera_rot_obj_p_x))\n camera_rot_obj_p = camera_rot_obj_p.T\n \n # form the full object to camera transform\n R_stp_camera = camera_rot_obj_p\n t_stp_camera = np.array(z)\n return RigidTransform(rotation=R_stp_camera,\n translation=t_stp_camera,\n from_frame='stp', to_frame='camera')", "def retrieve(self, clone: bool = False, *args, **kwargs) -> np.ndarray:\n if not self._frame_in_buffer:\n _, img = super(Camera, self).retrieve(*args, **kwargs)\n self._frame_buffer = cv.remap(img, *self.undistort_rectify_map, cv.INTER_LINEAR)\n self._frame_in_buffer = True\n\n if clone:\n return self._frame_buffer.copy()\n\n return self._frame_buffer", "def snapFrame(camera):\n return camera.read()[1]", "def proj_to_velo(calib_data):\n rect = calib_data[\"R0_rect\"].reshape(3, 3)\n #to transform a point from Lidar framce to camera frame\n #reshape the flat line with 12 elements to 3X4 matrix\n velo_to_cam = calib_data[\"Tr_velo_to_cam\"].reshape(3, 4)\n#print('velo2cam', velo_to_cam)\n inv_rect = np.linalg.inv(rect)\n #select all rows and only first three columns\n#print('velo_to_cam[:, :3]', velo_to_cam[:, :3])\n #select all rows and only first three columns\n inv_velo_to_cam = np.linalg.pinv(velo_to_cam[:, :3])\n return np.dot(inv_velo_to_cam, inv_rect)", "def camera_coords_to_world_coords(point, cam_height, cam_angle):\n\n # adjust the axis order\n point = np.array([point[2], point[0], point[1]])\n\n # calculate the vectors of the camera axis in the desired coordinate system\n cam_direction = np.array([np.cos(cam_angle), 0, -np.sin(cam_angle)])\n z = cam_direction\n x = np.cross(np.array([0, 0, 1]), cam_direction)\n y = np.cross(z, x)\n\n # transposed rotation matrix\n rotation = np.vstack([x, y, z])\n\n # translation vector\n translation = np.array([0, 0, cam_height])\n\n return rotation @ (point - translation)", "def read(self):\r\n\t\t# get data from camera\r\n\t\tarray = self.ueye.get_data(self.pcImageMemory, self.width, self.height, self.nBitsPerPixel, self.pitch, copy=False)\r\n\t\t# get frame as numpy array\r\n\t\tframe = np.reshape(array,(self.height.value, self.width.value, self.bytes_per_pixel))\r\n\t\t\r\n\t\t\"\"\"\r\n\t\tcamera_matrix = np.array([\r\n\t\t\t[4.5330796457901283e+02, 0., 6.1902229288626302e+02],\r\n\t\t\t[0., 4.5369175559310276e+02, 5.1298362120979994e+02],\r\n\t\t\t[0., 0., 1.]])\r\n\t\t\r\n\t\tdist_coeffs = np.array([\r\n\t\t\t-3.1812973406286371e-01, 9.6396352148682182e-02,\r\n\t\t\t2.9601124432187590e-03, 9.7700591472463412e-04,\r\n\t\t\t-1.1929681608809075e-02\r\n\t\t])\r\n\r\n\t\tframe = cv2.undistort(frame, camera_matrix, dist_coeffs, camera_matrix)\r\n\t\t\"\"\"\r\n\r\n\t\treturn frame", "def vid2tensor( self, current_frame):", "def to_input_image(camera_frame):\n\tcv2_img = IMAGES_BRIDGE.imgmsg_to_cv2(camera_frame, \"bgr8\")\n\tcv2_img = cv2.resize(cv2_img, PROCESSED_IMG_SIZE)\n\timg = cv2_img[...,::-1].astype(np.float32) # Converts from GBR to RGB\n\n\ttensor = tf.convert_to_tensor(img_to_array(img))\n\ttensor = tensor / 255\n\n\treturn tensor", "def world_to_pixel(self, X):\n _X = self.world_to_camera(X)\n return self.camera_to_pixel(_X)", "def worldToCameraCentricXform(self):\n return self.rotateAlignXform().dot(self.translateToOriginXform())", "def armature_coor2world_coor():\n return armature.matrix_world @ bones_list[0].head", "def get_world_transform(axes):\n if is_wcsaxes(axes):\n transform = axes.get_transform('world')\n else:\n transform = axes.transData\n\n return transform", "def world_to_camera_normals(inverted_camera_quaternation, world_normals):\n exr_x, exr_y, exr_z = world_normals[0], world_normals[1], world_normals[2]\n camera_normal = np.empty([exr_x.shape[0], exr_x.shape[1], 3], dtype=np.float32)\n for i in range(exr_x.shape[0]):\n for j in range(exr_x.shape[1]):\n pixel_camera_normal = _multiply_quaternion_vec3(inverted_camera_quaternation,\n [exr_x[i][j], exr_y[i][j], exr_z[i][j]])\n camera_normal[i][j][0] = pixel_camera_normal[0]\n camera_normal[i][j][1] = pixel_camera_normal[1]\n camera_normal[i][j][2] = pixel_camera_normal[2]\n\n camera_normal = camera_normal.transpose(2, 0, 1)\n return camera_normal", "def publish_camera_frame(self):\n executive.get_camera_orientation()\n self.t.start()\n # Wait for transformation to be published\n rospy.sleep(2)", "def get_projection_mapping(self, cam_pos, cam_rot, local_frame=False, range1=True):\n\n cam_pos = cam_pos.copy()\n cam_pos[2] += self.h_offset\n\n K = self.make_camera_matrix()\n R_opt = self.make_optical_rotation_matrix()\n T_opt = affines.compose([0, 0, 0], R_opt, [1.0, 1.0, 1.0])\n T_opt_inv = np.linalg.inv(T_opt)\n T = self.make_world_to_camera_mat(cam_pos, cam_rot)\n Tinv = np.linalg.inv(T)\n\n # Get the map position encodings (MxMx3)\n pts_w = self.get_world_coord_grid()[..., np.newaxis]\n\n # Get the coordinates in camera frame:\n if not local_frame:\n # If we're using a global map frame, transform the map coordinates into the camera frame\n pts_cam = np.matmul(Tinv[np.newaxis, ...], pts_w)\n else:\n # If we're using local frame, camera is centered in the map, but pitch must still be taken into account!\n # TODO: Fix this and add pitch\n pts_cam = pts_w\n pts_cam[:, 0:2] = pts_cam[:, 0:2] - self.map_world_size_px / 2\n\n # Get the coordinates in optical frame\n pts_opt = np.matmul(T_opt_inv[np.newaxis, ...], pts_cam)\n\n # Get the 3D coordinates of the map pixels in the image frame:\n pts_img = np.matmul(K[np.newaxis, ...], pts_opt[:, 0:3, :])\n\n # Convert to homogeneous (image-plane) coordinates\n valid_z = pts_img[:, 2:3, :] > 0\n\n pts_img = pts_img / (pts_img[:, 2:3] + 1e-9)\n #pts_img[:, 0] = pts_img[:, 0] / (pts_img[:, 2] + 1e-9)\n #pts_img[:, 1] = pts_img[:, 1] / (pts_img[:, 2] + 1e-9)\n\n # Mask out all the map elements that don't project on the image\n valid_y1 = pts_img[:, 0:1, :] > 0\n valid_y2 = pts_img[:, 0:1, :] < self.res_x\n valid_x1 = pts_img[:, 1:2, :] > 0\n valid_x2 = pts_img[:, 1:2, :] < self.res_y\n\n # Throw away the homogeneous Z coordinate\n pts_img = pts_img[:, 0:2]\n\n valid = valid_y1 * valid_y2 * valid_x1 * valid_x2 * valid_z\n\n # PyTorch takes projection mappings in -1 to 1 range:\n if range1:\n pts_img[:, 0] = (-pts_img[:, 0] + self.res_x / 2) / (self.res_x / 2)\n pts_img[:, 1] = (-pts_img[:, 1] + self.res_y / 2) / (self.res_y / 2)\n\n # Make sure the invalid points are out of range\n pts_img = pts_img * valid + 2 * np.ones_like(pts_img) * (1 - valid)\n else:\n pts_img = pts_img * valid\n\n # Remove the extra 1-length dimension\n pts_img = pts_img.squeeze()\n\n # Reshape into the 2D map representation\n pts_img = np.reshape(pts_img, [self.map_size_px, self.map_size_px, 2])\n\n return pts_img", "def get_render_obs(self):\n x, y, z = self.robot.body_xyz\n # print (x, y, z)\n cameraEyePosition = list([x, y-0.75, 1.0])\n cameraTargetPosition = [x, y, 1.0]\n cameraUpVector = [0, 0, 1]\n\n fov = 120\n aspect = self.render_dims[0] / self.render_dims[1]\n nearPlane = 0.05 # this ensures outside body, may see limbs\n farPlane = 100.0\n\n # TODO: fix me to be along moving axis\n viewMatrix = p.computeViewMatrix(cameraEyePosition, cameraTargetPosition, cameraUpVector, physicsClientId=self.physicsClientId)\n # viewMatrix = p.computeViewMatrixFromYawPitchRoll(camTargetPos, camDistance, yaw, pitch, roll, upAxisIndex)\n projectionMatrix = p.computeProjectionMatrixFOV(fov, aspect, nearPlane, farPlane);\n img_arr = p.getCameraImage(self.render_dims[0], self.render_dims[1], viewMatrix, projectionMatrix, renderer=p.ER_BULLET_HARDWARE_OPENGL, physicsClientId=self.physicsClientId)\n\n # w=img_arr[0] #width of the image, in pixels\n # h=img_arr[1] #height of the image, in pixels\n rgb=img_arr[2] #color data RGB\n gray = cv2.cvtColor(rgb, cv2.COLOR_RGB2GRAY)\n gray = gray.reshape((1, *self.render_dims))\n gray[gray > 0] = 255\n return gray", "def polarCameraToCartesian(self):\n x = self.cameraPolar[0] * np.sin(self.cameraPolar[1] * np.pi / 180) * np.sin(self.cameraPolar[2] * np.pi / 180)\n y = self.cameraPolar[0] * np.cos(self.cameraPolar[2] * np.pi / 180)\n z = self.cameraPolar[0] * np.cos(self.cameraPolar[1] * np.pi / 180) * np.sin(self.cameraPolar[2] * np.pi / 180)\n self.cameraPosition = [x, y, z]", "def cam_to_body(self, vector):\n cam2hand = generate_frame_transform(self._cam2hand_t[0:3,:],self._cam2hand_R[0:3,0:3],False)\n # Possibly GRASP specific function?\n hand_pose = baxter.get_right_arm_pose()\n (t,R) = get_t_R(hand_pose)\n hand2body = generate_frame_transform(t[0:3,:],R[0:3,0:3],True)\n return np.dot(hand2body,np.dot(cam2hand,vector))", "def det_to_world(self, x, y):\n ra, dec = self._wcs.all_pix2world(x, y, 0)\n return ra, dec", "def ConvertScreenToWorld(self, x, y):\r\n return b2.b2Vec2((x + self.viewOffset.x) / self.viewZoom,\r\n ((self.screenSize.y - y + self.viewOffset.y)\r\n / self.viewZoom))", "def registerDepthFrame(self, frame):\n h, w = frame.shape[:2]\n frame = cv2.warpAffine(frame,self.depth2rgb_affine,(w,h))\n\n return frame", "def convert_to(self, dst, rt_mat=None):\n from .box_3d_mode import Box3DMode\n return Box3DMode.convert(\n box=self, src=Box3DMode.CAM, dst=dst, rt_mat=rt_mat)", "def transform_vec_to_egocentric_frame(self, physics, vec_in_world_frame):\n vec_in_world_frame = np.asarray(vec_in_world_frame)\n\n xmat = np.reshape(physics.bind(self.root_body).xmat, (3, 3))\n # The ordering of the np.dot is such that the transformation holds for any\n # matrix whose final dimensions are (2,) or (3,).\n if vec_in_world_frame.shape[-1] == 2:\n return np.dot(vec_in_world_frame, xmat[:2, :2])\n elif vec_in_world_frame.shape[-1] == 3:\n return np.dot(vec_in_world_frame, xmat)\n else:\n raise ValueError('`vec_in_world_frame` should have shape with final '\n 'dimension 2 or 3: got {}'.format(\n vec_in_world_frame.shape))", "def get_frame(cap):\n\n #camera matrix for camera calibration\n mtx = np.array(np.mat(\"588.4525598886621, 0, 301.8008794717551; 0, 588.9763096391521, 242.617026416902; 0, 0, 1\"))\n\n #distrotion coefficients for camera calibration\n dist = np.array(np.mat(\"-0.4351555722591889, 0.2082765081608728, -0.006072767012672472, 0.008139871640987759, 0\"))\n\n #get image frame from the camera\n ret, frame = cap.read()\n\n return frame\n\n h, w = frame.shape[:2]\n\n #get the new optimal camera matrix and the roi which can be used to crop the result\n newcameramtx, roi=cv2.getOptimalNewCameraMatrix(mtx,dist,(w,h),0,(w,h))\n\n #get the undistroted image\n dst = cv2.undistort(frame, mtx, dist, None, newcameramtx)\n\n x,y,w,h = roi\n\n #get the cropped image\n dst = dst[y:y+h, x:x+w]\n h, w = dst.shape[:2]\n\n #furthur crop the image to reduce the size of arena\n dst = dst[int(h/7):int(h*6/7), int(w/7):int(w*6/7)]\n\n #resize the arena to ARENA_SIZE\n dst = cv2.resize(dst, ARENA_SIZE, interpolation= cv2.INTER_CUBIC)\n\n return dst", "def from_simulator_frame(cls,\n frame,\n camera_setup,\n save_original_frame=False):\n original_frame = None\n # Convert an image containing simulator encoded depth-map to a 2D\n # array containing the depth value of each pixel normalized\n # between [0.0, 1.0]\n _frame = np.frombuffer(frame.raw_data, dtype=np.dtype(\"uint8\"))\n _frame = np.reshape(_frame, (frame.height, frame.width, 4))\n frame = _frame.astype(np.float32)\n if save_original_frame:\n original_frame = copy.deepcopy(frame[:, :, :3])\n # Apply (R + G * 256 + B * 256 * 256) / (256 * 256 * 256 - 1).\n frame = np.dot(frame[:, :, :3], [65536.0, 256.0, 1.0])\n frame /= 16777215.0 # (256.0 * 256.0 * 256.0 - 1.0)\n return cls(frame, camera_setup, original_frame)", "def _to_parent_frame(self, *args, **kwargs):\n lat, lon, _ = self.latlonalt\n m = rot3(-lon) @ rot2(lat - np.pi / 2.0) @ rot3(self.heading)\n offset = np.zeros(6)\n offset[:3] = self.coordinates\n return self._convert(m, m), offset", "def get_render_obs(self):\n x, y, z = self.robot.body_xyz\n # print (x, y, z)\n\n if self.camera_type == 'follow':\n cameraEyePosition = [x, y-1.25, 1.0]\n cameraTargetPosition = [x, y, 1.0]\n elif self.camera_type == 'fixed':\n cameraEyePosition = [2.0, y-2.5, 1.0]\n cameraTargetPosition = [2.0, y, 1.0]\n\n cameraUpVector = [0, 0, 1]\n\n fov = 90\n aspect = self.render_dims[0] / self.render_dims[1]\n nearPlane = 0.05 # this ensures outside body, may see limbs\n farPlane = 100.0\n\n viewMatrix = p.computeViewMatrix(cameraEyePosition, cameraTargetPosition, cameraUpVector, physicsClientId=self.physicsClientId)\n projectionMatrix = p.computeProjectionMatrixFOV(fov, aspect, nearPlane, farPlane);\n img_arr = p.getCameraImage(self.render_dims[0], self.render_dims[1], viewMatrix, projectionMatrix, renderer=p.ER_BULLET_HARDWARE_OPENGL, physicsClientId=self.physicsClientId)\n\n rgb=img_arr[2] #color data RGB\n gray = cv2.cvtColor(rgb, cv2.COLOR_RGB2GRAY)\n gray = gray.reshape((1, *self.render_dims))\n gray[gray > 0] = 255\n\n # assign patch at bottom to show distance, this is to differentiate frames\n # bar_width_pix = int(y/5.0*self.render_dims[1])\n # bar_height_pix = 10\n # gray[0][self.render_dims[0]-bar_height_pix:, 0:bar_width_pix] = 255\n return gray", "def plot_local_coordinate_frame(world, origin_transform, axis_length_scale = 3):\n\n # for test\n # origin_transform = transform\n # axis_length_scale = 3\n\n # longitudinal direction(x-axis)\n yaw = np.deg2rad(origin_transform.rotation.yaw)\n # x axis\n cy = np.cos(yaw)\n sy = np.sin(yaw)\n\n Origin_coord = np.array(\n [origin_transform.location.x, origin_transform.location.y, origin_transform.location.z+1])\n # elevate z coordinate\n Origin_location = carla.Location(Origin_coord[0], Origin_coord[1], Origin_coord[2])\n # x axis destination\n x_des_coord = Origin_coord + axis_length_scale * np.array([cy, sy, 0])\n x_des = carla.Location(x_des_coord[0], x_des_coord[1], x_des_coord[2])\n # y axis destination\n y_des_coord = Origin_coord + axis_length_scale * np.array([-sy, cy, 0])\n y_des = carla.Location(y_des_coord[0], y_des_coord[1], y_des_coord[2])\n # z axis destination\n z_des_coord = Origin_coord + axis_length_scale * np.array([0, 0, 1])\n z_des = carla.Location(z_des_coord[0], z_des_coord[1], z_des_coord[2])\n\n \"\"\"\n color set for each axis \n x-axis red: (255, 0, 0)\n x-axis red: (0, 255, 0)\n x-axis red: (0, 0, 255)\n \"\"\"\n x_axis_color = carla.Color(255, 0, 0)\n y_axis_color = carla.Color(0, 255, 0)\n z_axis_color = carla.Color(0, 0, 255)\n\n # axis feature\n # thickness = 0.1f\n # arrow_size = 0.1f\n\n # draw x axis\n world.debug.draw_arrow(Origin_location, x_des, color=x_axis_color)\n # draw y axis\n world.debug.draw_arrow(Origin_location, y_des, color=y_axis_color)\n # draw z axis\n world.debug.draw_arrow(Origin_location, z_des, color=z_axis_color)", "def complex_camera(camera, target_rect):\n l, t, _, _ = target_rect\n _, _, w, h = camera\n l, t, _, _ = -l+300, -t+300, w, h\n\n l = min(0, l)\n l = max(-(camera.width-600), l)\n t = max(-(camera.height-600), t)\n t = min(0, t)\n\n return pygame.Rect(l, t, w, h)", "def translateTrame(self,inTrame):\n rawConvertedY=int((inTrame.data1+inTrame.data0),16)\n rawConvertedX=int((inTrame.data3+inTrame.data2),16)\n absX=int(round(rawConvertedX/(16**4-1.0)*self.maxX))\n absY=int(round(rawConvertedY/(16**4-1.0)*self.maxY))\n LOGGER.info(\"Position sensor {} with new coordonate {} -- {}\".format(self.physic_id,absX,absY))\n return {\"coordX\":absX,\"coordY\":absY}", "def to_coord_frame(self, frame, galactocentric_frame=None, **kwargs):\n\n if self.ndim != 3:\n raise ValueError(\"Can only change representation for \"\n \"ndim=3 instances.\")\n\n if galactocentric_frame is None:\n galactocentric_frame = coord.Galactocentric()\n\n pos_keys = list(self.pos_components.keys())\n vel_keys = list(self.vel_components.keys())\n if (getattr(self, pos_keys[0]).unit == u.one or\n getattr(self, vel_keys[0]).unit == u.one):\n raise u.UnitConversionError(\"Position and velocity must have \"\n \"dimensioned units to convert to a \"\n \"coordinate frame.\")\n\n # first we need to turn the position into a Galactocentric instance\n gc_c = galactocentric_frame.realize_frame(\n self.pos.with_differentials(self.vel))\n c = gc_c.transform_to(frame)\n return c", "def get_render_obs(self):\n x, y, z = self.robot.body_xyz\n\n if self.camera_type == 'follow':\n cameraEyePosition = [x, y-1.25, 1.0]\n cameraTargetPosition = [x, y, 1.0]\n elif self.camera_type == 'fixed':\n # y-2.7 not 2.5 since cheetah is longer\n cameraEyePosition = [2.0, y-2.7, 1.0]\n cameraTargetPosition = [2.0, y, 1.0]\n\n cameraUpVector = [0, 0, 1]\n\n fov = 90\n aspect = self.render_dims[0] / self.render_dims[1]\n nearPlane = 0.05 # this ensures outside body, may see limbs\n farPlane = 100.0\n\n viewMatrix = p.computeViewMatrix(cameraEyePosition, cameraTargetPosition, cameraUpVector, physicsClientId=self.physicsClientId)\n projectionMatrix = p.computeProjectionMatrixFOV(fov, aspect, nearPlane, farPlane);\n img_arr = p.getCameraImage(self.render_dims[0], self.render_dims[1], viewMatrix, projectionMatrix, renderer=p.ER_BULLET_HARDWARE_OPENGL, physicsClientId=self.physicsClientId)\n\n rgb=img_arr[2] #color data RGB\n gray = cv2.cvtColor(rgb, cv2.COLOR_RGB2GRAY)\n gray = gray.reshape((1, *self.render_dims))\n gray[gray > 0] = 255\n\n # assign patch at bottom to show distance, this is to differentiate frames\n # bar_width_pix = int(y/5.0*self.render_dims[1])\n # bar_height_pix = 10\n # gray[0][self.render_dims[0]-bar_height_pix:, 0:bar_width_pix] = 255\n return gray", "def simple_camera(camera, target_rect):\n l, t, _, _ = target_rect\n _, _, w, h = camera\n return pygame.Rect(-l+300, -t+300, w, h)", "def get_image(self):\n self.flush_buffer()\n _, frame = self.cam.read()\n shift_frame = self.perspective_shift(frame)\n #shift_frame = None\n return frame, shift_frame", "def registerDepthFrame(self, frame):\n frame = cv2.warpAffine(frame, self.depth2rgb_affine,\n (frame.shape[1], frame.shape[0]))\n return frame", "def add_frame(self, frame, player_box):\n # ROI is a small box around the player\n box_center = center_of_box(player_box)\n patch = frame[int(box_center[1] - self.box_margin): int(box_center[1] + self.box_margin),\n int(box_center[0] - self.box_margin): int(box_center[0] + self.box_margin)].copy()\n patch = imutils.resize(patch, 299)\n frame_t = patch.transpose((2, 0, 1)) / 255\n frame_tensor = torch.from_numpy(frame_t).type(self.dtype)\n frame_tensor = self.normalize(frame_tensor).unsqueeze(0)\n with torch.no_grad():\n # forward pass\n features = self.feature_extractor(frame_tensor)\n features = features.unsqueeze(1)\n # Concatenate the features to previous features\n if self.frames_features_seq is None:\n self.frames_features_seq = features\n else:\n self.frames_features_seq = torch.cat([self.frames_features_seq, features], dim=1)", "def CoordTrans(frame1, frame2, original_vec, oe=np.zeros(6), \n theta_gst=float('NaN'), lla_gs=np.zeros(3), mu=c.mu_earth, \n r_body=c.r_earth):\n\n # Orbital Elements\n a, e, inc, raan, w, nu = oe\n\n # Warnings\n oe_frames = ['ric', 'ntw', 'pqw']\n if any(frame in oe_frames for frame in (frame1, frame2)):\n if oe.dot(oe) == 0:\n print('ERROR: You forgot to define the orbital elements!')\n\n topocentric_frames = ['sez']\n if any(frame in topocentric_frames for frame in (frame1, frame2)):\n if lla_gs.dot(lla_gs) == 0:\n print('ERROR: You forgot lla for the ground stations!')\n\n # Coordinate System Logic\n if frame1.lower() == 'bci':\n if frame2.lower() == 'bcbf':\n rotated_vec = bci2bcbf(original_vec, theta_gst)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'ric':\n rotated_vec = bci2ric(original_vec, raan, inc, w, nu)\n \n elif frame2.lower() == 'ntw':\n rotated_vec = bci2ntw(original_vec, e, raan, inc, w, nu)\n \n elif frame2.lower() == 'pqw':\n rotated_vec = bci2pqw(original_vec, raan, inc, w)\n \n elif frame2.lower() == 'lla':\n rotated_vec1 = bci2bcbf(original_vec, theta_gst)\n rotated_vec = bcbf2lla(rotated_vec1, r_body=r_body)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'sez':\n rotated_vec1 = bci2bcbf(original_vec, theta_gst)\n rotated_vec = bcbf2sez(rotated_vec1, lla_gs, r_body=r_body)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n\n else:\n print('ERROR: Frame2 is not included in this function!')\n\n elif frame1.lower() == 'bcbf':\n if frame2.lower() == 'bci':\n rotated_vec = bcbf2bci(original_vec, theta_gst)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'ric':\n rotated_vec1 = bcbf2bci(original_vec, theta_gst)\n rotated_vec = bci2ric(rotated_vec1, raan, inc, w, nu)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'ntw':\n rotated_vec1 = bcbf2bci(original_vec, theta_gst)\n rotated_vec = bci2ntw(rotated_vec1, e, raan, inc, w, nu)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'pqw':\n rotated_vec1 = bcbf2bci(original_vec, theta_gst)\n rotated_vec = bci2pqw(rotated_vec1, raan, inc, w)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'lla':\n rotated_vec = bcbf2lla(original_vec, r_body=r_body)\n \n elif frame2.lower() == 'sez':\n rotated_vec = bcbf2sez(original_vec, lla_gs, r_body=r_body)\n\n else:\n print('ERROR: Frame2 is not included in this function!')\n\n elif frame1.lower() == 'ric':\n rotated_vec1 = ric2bci(original_vec, raan, inc, w, nu)\n if frame2.lower() == 'bcbf':\n rotated_vec = bci2bcbf(rotated_vec1, theta_gst)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'bci':\n rotated_vec = rotated_vec1\n \n elif frame2.lower() == 'ntw':\n rotated_vec = bci2ntw(rotated_vec1, e, raan, inc, w, nu)\n \n elif frame2.lower() == 'pqw':\n rotated_vec = bci2pqw(rotated_vec1, raan, inc, w)\n \n elif frame2.lower() == 'lla':\n rotated_vec2 = bci2bcbf(rotated_vec1, theta_gst)\n rotated_vec = bcbf2lla(rotated_vec2, r_body=r_body)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'sez':\n rotated_vec2 = bci2bcbf(rotated_vec1, theta_gst)\n rotated_vec = bcbf2sez(rotated_vec2, lla_gs, r_body=r_body)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n\n else:\n print('ERROR: Frame2 is not included in this function!')\n\n elif frame1.lower() == 'ntw':\n rotated_vec1 = ntw2bci(original_vec, e, raan, inc, w, nu)\n if frame2.lower() == 'bcbf':\n rotated_vec = bci2bcbf(rotated_vec1, theta_gst)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'ric':\n rotated_vec = bci2ric(rotated_vec1, raan, inc, w, nu)\n \n elif frame2.lower() == 'bci':\n rotated_vec = rotated_vec1\n \n elif frame2.lower() == 'pqw':\n rotated_vec = bci2pqw(rotated_vec1, raan, inc, w)\n \n elif frame2.lower() == 'lla':\n rotated_vec2 = bci2bcbf(rotated_vec1, theta_gst)\n rotated_vec = bcbf2lla(rotated_vec2, r_body=r_body)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'sez':\n rotated_vec2 = bci2bcbf(rotated_vec1, theta_gst)\n rotated_vec = bcbf2sez(rotated_vec2, lla_gs, r_body=r_body)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n\n else:\n print('ERROR: Frame2 is not included in this function!')\n\n elif frame1.lower() == 'pqw':\n rotated_vec1 = pqw2bci(original_vec, raan, inc, w)\n if frame2.lower() == 'bcbf':\n rotated_vec = bci2bcbf(rotated_vec1, theta_gst)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'ric':\n rotated_vec = bci2ric(rotated_vec1, raan, inc, w, nu)\n \n elif frame2.lower() == 'ntw':\n rotated_vec = bci2ntw(rotated_vec1, e, raan, inc, w, nu)\n \n elif frame2.lower() == 'bci':\n rotated_vec = rotated_vec1\n \n elif frame2.lower() == 'lla':\n rotated_vec2 = bci2bcbf(rotated_vec1, theta_gst)\n rotated_vec = bcbf2lla(rotated_vec2, r_body=r_body)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'sez':\n rotated_vec2 = bci2bcbf(rotated_vec1, theta_gst)\n rotated_vec = bcbf2sez(rotated_vec2, lla_gs, r_body=r_body)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n\n else:\n print('ERROR: Frame2 is not included in this function!')\n\n elif frame1.lower() == 'lla':\n rotated_vec1 = lla2bcbf(original_vec, r_body=r_body)\n if frame2.lower() == 'bcbf':\n rotated_vec = rotated_vec1\n \n elif frame2.lower() == 'ric':\n rotated_vec2 = bcbf2bci(rotated_vec1, theta_gst)\n rotated_vec = bci2ric(rotated_vec2, raan, inc, w, nu)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'ntw':\n rotated_vec2 = bcbf2bci(rotated_vec1, theta_gst)\n rotated_vec = bci2ntw(rotated_vec2, e, raan, inc, w, nu)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'pqw':\n rotated_vec2 = bcbf2bci(rotated_vec1, theta_gst)\n rotated_vec = bci2pqw(rotated_vec2, raan, inc, w)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'bci':\n rotated_vec = bcbf2bci(rotated_vec1, theta_gst)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n elif frame2.lower() == 'sez':\n rotated_vec = bcbf2sez(rotated_vec1, lla_gs, r_body=r_body)\n\n else:\n print('ERROR: Frame2 is not included in this function!')\n\n elif frame1.lower() == 'sez':\n rotated_vec1 = sez2bcbf(original_vec, lla_gs, r_body=r_body)\n rotated_vec2 = bcbf2bci(rotated_vec1, theta_gst)\n if np.isnan(theta_gst):\n print('ERROR: You forgot to define theta_gst!')\n \n if frame2.lower() == 'bcbf':\n rotated_vec = rotated_vec1\n \n elif frame2.lower() == 'ric':\n rotated_vec = bci2ric(rotated_vec2, raan, inc, w, nu)\n \n elif frame2.lower() == 'ntw':\n rotated_vec = bci2ntw(rotated_vec2, e, raan, inc, w, nu)\n \n elif frame2.lower() == 'pqw':\n rotated_vec = bci2pqw(rotated_vec2, raan, inc, w)\n \n elif frame2.lower() == 'lla':\n rotated_vec = bcbf2lla(rotated_vec1, r_body=r_body)\n \n elif frame2.lower() == 'bci':\n rotated_vec = rotated_vec2\n\n else:\n print('ERROR: Frame2 is not included in this function!')\n\n else:\n print('ERROR: Frame1 is not included in this function!')\n\n return rotated_vec", "def get_world_trans(m_obj):\n plug = get_world_matrix_plug(m_obj, 0)\n matrix_obj = plug.asMObject()\n matrix_data = oMa.MFnMatrixData(matrix_obj)\n matrix = matrix_data.matrix()\n\n trans_matrix = oMa.MTransformationMatrix(matrix)\n trans = trans_matrix.translation(oMa.MSpace.kWorld)\n\n return trans", "def world_to_map(self,pos):\n pos_center_map_x = pos[1]-self.map_info.origin.position.y\n pos_center_map_y = pos[0]-self.map_info.origin.position.x\n pos_center_map_x = pos_center_map_x/self.map_info.resolution\n pos_center_map_y = pos_center_map_y/self.map_info.resolution\n pos_center_map_x = int(pos_center_map_x )\n pos_center_map_y = int(pos_center_map_y)\n return (pos_center_map_x,pos_center_map_y)", "def relative_pose_cam_to_body(\n relative_scene_pose, Rt_cam2_gt\n ):\n relative_scene_pose = (\n np.linalg.inv(Rt_cam2_gt)\n @ relative_scene_pose\n @ Rt_cam2_gt\n )\n return relative_scene_pose", "def copy(self):\n return CameraExtrinsic(self.position, self.direction, self.up)", "def pixel2cam_torch(depth, pixel_coords, intrinsics, is_homogeneous=True):\n batch, height, width = depth.shape\n depth = torch.reshape(depth, [batch, 1, -1])\n pixel_coords = torch.reshape(pixel_coords, [batch, 3, -1])\n cam_coords = torch.matmul(torch.inverse(intrinsics), pixel_coords) * depth\n\n if is_homogeneous:\n ones = torch.ones([batch, 1, height * width], device=pixel_coords.device)\n cam_coords = torch.cat([cam_coords, ones], axis=1)\n cam_coords = torch.reshape(cam_coords, [batch, -1, height, width])\n return cam_coords", "def camera_matrix(self) -> TransformationMatrixType:\n return numpy.matmul(\n self.rotation_matrix(*self.rotation),\n displacement_matrix(*-numpy.array(self.location)),\n )", "def camera_to_pixel(self, X):\n raise NotImplementedError", "def export_camera(file, scene, global_matrix, render, tab_write):\n camera = scene.camera\n\n # DH disabled for now, this isn't the correct context\n active_object = None # bpy.context.active_object # does not always work MR\n matrix = global_matrix @ camera.matrix_world\n focal_point = camera.data.dof.focus_distance\n\n # compute resolution\n q_size = render.resolution_x / render.resolution_y\n tab_write(file, \"#declare camLocation = <%.6f, %.6f, %.6f>;\\n\" % matrix.translation[:])\n tab_write(\n file,\n (\n \"#declare camLookAt = <%.6f, %.6f, %.6f>;\\n\"\n % tuple(degrees(e) for e in matrix.to_3x3().to_euler())\n ),\n )\n\n tab_write(file, \"camera {\\n\")\n if scene.pov.baking_enable and active_object and active_object.type == \"MESH\":\n tab_write(file, \"mesh_camera{ 1 3\\n\") # distribution 3 is what we want here\n tab_write(file, \"mesh{%s}\\n\" % active_object.name)\n tab_write(file, \"}\\n\")\n tab_write(file, \"location <0,0,.01>\")\n tab_write(file, \"direction <0,0,-1>\")\n\n else:\n if camera.data.type == \"ORTHO\":\n # XXX todo: track when SensorHeightRatio was added to see if needed (not used)\n sensor_height_ratio = (\n render.resolution_x * camera.data.ortho_scale / render.resolution_y\n )\n tab_write(file, \"orthographic\\n\")\n # Blender angle is radian so should be converted to degrees:\n # % (camera.data.angle * (180.0 / pi) )\n # but actually argument is not compulsory after angle in pov ortho mode\n tab_write(file, \"angle\\n\")\n tab_write(file, \"right <%6f, 0, 0>\\n\" % -camera.data.ortho_scale)\n tab_write(file, \"location <0, 0, 0>\\n\")\n tab_write(file, \"look_at <0, 0, -1>\\n\")\n tab_write(file, \"up <0, %6f, 0>\\n\" % (camera.data.ortho_scale / q_size))\n\n elif camera.data.type == \"PANO\":\n tab_write(file, \"panoramic\\n\")\n tab_write(file, \"location <0, 0, 0>\\n\")\n tab_write(file, \"look_at <0, 0, -1>\\n\")\n tab_write(file, \"right <%s, 0, 0>\\n\" % -q_size)\n tab_write(file, \"up <0, 1, 0>\\n\")\n tab_write(file, \"angle %f\\n\" % (360.0 * atan(16.0 / camera.data.lens) / pi))\n elif camera.data.type == \"PERSP\":\n # Standard camera otherwise would be default in pov\n tab_write(file, \"location <0, 0, 0>\\n\")\n tab_write(file, \"look_at <0, 0, -1>\\n\")\n tab_write(file, \"right <%s, 0, 0>\\n\" % -q_size)\n tab_write(file, \"up <0, 1, 0>\\n\")\n tab_write(\n file,\n \"angle %f\\n\"\n % (2 * atan(camera.data.sensor_width / 2 / camera.data.lens) * 180.0 / pi),\n )\n\n tab_write(\n file,\n \"rotate <%.6f, %.6f, %.6f>\\n\" % tuple(degrees(e) for e in matrix.to_3x3().to_euler()),\n )\n\n tab_write(file, \"translate <%.6f, %.6f, %.6f>\\n\" % matrix.translation[:])\n if camera.data.dof.use_dof and (focal_point != 0 or camera.data.dof.focus_object):\n tab_write(\n file, \"aperture %.3g\\n\" % (1 / (camera.data.dof.aperture_fstop * 10000) * 1000)\n )\n tab_write(\n file,\n \"blur_samples %d %d\\n\"\n % (camera.data.pov.dof_samples_min, camera.data.pov.dof_samples_max),\n )\n tab_write(file, \"variance 1/%d\\n\" % camera.data.pov.dof_variance)\n tab_write(file, \"confidence %.3g\\n\" % camera.data.pov.dof_confidence)\n if camera.data.dof.focus_object:\n focal_ob = scene.objects[camera.data.dof.focus_object.name]\n matrix_blur = global_matrix @ focal_ob.matrix_world\n tab_write(file, \"focal_point <%.4f,%.4f,%.4f>\\n\" % matrix_blur.translation[:])\n else:\n tab_write(file, \"focal_point <0, 0, %f>\\n\" % focal_point)\n if camera.data.pov.normal_enable:\n tab_write(\n file,\n \"normal {%s %.4f turbulence %.4f scale %.4f}\\n\"\n % (\n camera.data.pov.normal_patterns,\n camera.data.pov.cam_normal,\n camera.data.pov.turbulence,\n camera.data.pov.scale,\n ),\n )\n tab_write(file, \"}\\n\")", "def locate_camera_frame(self, exp_id, left_cam_id, right_cam_id, frame_id):\n return Response(self.gen_locate_camera_frame(exp_id,\n left_cam_id, right_cam_id,\n frame_id),\n mimetype='multipart/x-mixed-replace; boundary=frame')", "def camera_frame_directions(self) -> _BFRAME_TYPE:\n\n return self._base_frame_directions", "def rot_world_space_to_local_space(m_obj, parent_m_obj):\n obj_world_mat = get_world_matrix(m_obj, 0)\n parent_inv_mat = get_world_inv_matrix(parent_m_obj, 0)\n\n local_space_mat = obj_world_mat * parent_inv_mat\n trans_matrix = oMa.MTransformationMatrix(local_space_mat)\n rot = trans_matrix.rotation()\n\n return rot", "def cam():\n\treturn Response(gen(camera),\n\t\t\t\t\tmimetype='multipart/x-mixed-replace; boundary=frame'), 200", "def to_frame(self, frame, current_frame=None, **kwargs):\n\n from ..potential.frame.builtin import transformations as frame_trans\n\n if self.frame is None and current_frame is None:\n raise ValueError(f\"If no frame was specified when this {self} was \"\n \"initialized, you must pass the current frame in \"\n \"via the current_frame argument to transform to a \"\n \"new frame.\")\n\n elif self.frame is not None and current_frame is None:\n current_frame = self.frame\n\n name1 = current_frame.__class__.__name__.rstrip('Frame').lower()\n name2 = frame.__class__.__name__.rstrip('Frame').lower()\n func_name = f\"{name1}_to_{name2}\"\n\n if not hasattr(frame_trans, func_name):\n raise ValueError(\"Unsupported frame transformation: {} to {}\"\n .format(current_frame, frame))\n else:\n trans_func = getattr(frame_trans, func_name)\n\n pos, vel = trans_func(current_frame, frame, self, **kwargs)\n return PhaseSpacePosition(pos=pos, vel=vel, frame=frame)", "def adjust_camera(self):\n pose = deepcopy(self.data['poses']['marker']) # PoseStamped()\n eye_pose = deepcopy(pose)\n eye_pose.pose.position.x += 0.60\n eye_pose.pose.position.z += 0.20\n focus_pose = PoseStamped()\n base_eye_pose = PoseStamped()\n\n try:\n # Convert pose to base frame\n pose.header.stamp = self.tfl. \\\n getLatestCommonTime(self.params['world'], pose.header.frame_id)\n focus_pose = self.tfl.transformPose(self.params['world'], pose)\n except (TfE, LookupException, ConnectivityException):\n Me.error_message(\"Error transforming pose \" + pose.header.frame_id)\n\n try:\n # Convert pose to base frame\n pose.header.stamp = self.tfl. \\\n getLatestCommonTime(self.params['world'],\n eye_pose.header.frame_id)\n base_eye_pose = self.tfl.transformPose(self.params['world'],\n eye_pose)\n except (TfE, LookupException, ConnectivityException):\n Me.error_message(\"Error transforming pose \" + pose.header.frame_id)\n\n cam_place = CameraPlacement()\n cam_place.target_frame = self.params['world']\n cam_place.time_from_start = Duration(1)\n # Position of the camera relative to target_frame\n cam_place.eye.header.frame_id = cam_place.target_frame\n cam_place.eye.point = base_eye_pose.pose.position\n # Target_frame-relative point for the focus\n cam_place.focus.header.frame_id = cam_place.target_frame\n cam_place.focus.point = focus_pose.pose.position\n # Target_frame-relative vector that maps to \"up\" in the view plane.\n cam_place.up.header.frame_id = cam_place.target_frame\n cam_place.up.vector.x = 0\n cam_place.up.vector.y = 0\n cam_place.up.vector.z = 1\n self.pub.publish(cam_place)\n return", "def project_roi(self, roi, frame_id=None):\n response = self.project_rois(rois=[roi]).points[0]\n\n # Convert to VectorStamped\n result = VectorStamped(x=response.point.x, y=response.point.y, z=response.point.z,\n frame_id=response.header.frame_id)\n\n # If necessary, transform the point\n if frame_id is not None:\n print(\"Transforming roi to {}\".format(frame_id))\n result = result.projectToFrame(frame_id=frame_id, tf_listener=self.tf_listener)\n\n # Return the result\n return result", "def _world_to_sensor(cords, sensor):\n\n sensor_world_matrix = ClientSideBoundingBoxes.get_matrix(sensor.get_transform())\n world_sensor_matrix = np.linalg.inv(sensor_world_matrix)\n sensor_cords = np.dot(world_sensor_matrix, cords)\n return sensor_cords", "def _prepare_frame(self, frame):\n\n initial_h, initial_w = frame.shape[:2]\n scale_h, scale_w = initial_h / float(self.input_height), initial_w / float(self.input_width)\n\n in_frame = cv2.resize(frame, (self.input_width, self.input_height))\n in_frame = in_frame.transpose((2, 0, 1))\n in_frame = in_frame.reshape(self.input_size)\n\n return in_frame, scale_h, scale_w", "def translate_frames(model, frames):\n frames_v = autograd.Variable(torch.FloatTensor(frames).cuda())\n out_frames = model(frames_v)\n return out_frames.cpu().numpy()", "def _get_world_to_slice(self) :\n \n return self._world_to_slice", "def cam2pixel(self, cam_coords, pose):\n\n b, _, h, w = cam_coords.size()\n cam_coords_flat = cam_coords.view(b,3,-1) # [B,3,H*W]\n pcoords = pose[:,:,0:3].bmm(cam_coords_flat) + pose[:,:,3].view(b,3,1) #Bx[3x3 x 3xH*W] = [B x 3 x H*W]\n X, Y, Z = pcoords[:,0,:].clamp(-1e20,1e20), pcoords[:,1,:].clamp(-1e20,1e20), pcoords[:,2,:].clamp(1e-20,1e20) #each are [B x H*W] \n X_norm = 2*(X / Z)/(w-1) - 1 # Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1) [B, H*W]\n Y_norm = 2*(Y / Z)/(h-1) - 1 # Idem [B, H*W]\n\n X_mask = ((X_norm > 1)+(X_norm < -1)).detach()\n X_norm[X_mask] = 2 # make sure that no point in warped image is a combinaison of im and gray\n Y_mask = ((Y_norm > 1)+(Y_norm < -1)).detach()\n Y_norm[Y_mask] = 2\n\n pixel_coords = torch.stack([X_norm, Y_norm], dim=2) # [B, H*W, 2]\n return pixel_coords.view(b,h,w,2)", "def tanp_to_world(self, x, y):\n crpix1, crpix2 = self._wcs.wcs.crpix\n x = x + crpix1\n y = y + crpix2\n ra, dec = self._wcslin.all_pix2world(x, y, 1)\n return ra, dec", "def map_to_matrix(x, y):\n x_pos = round(x * ((MATRIX_SIZE_X - 1)/(FRAME_W - 1)))\n y_pos = round(y * ((MATRIX_SIZE_Y - 1)/(FRAME_H - 1)))\n\n x_pos = (MATRIX_SIZE_X - 1) - x_pos #invert x direction (left and right) to account for camera perspective\n\n return x_pos, y_pos", "def world_projection(self, aspect):\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n if aspect < 1:\n gluOrtho2D(\n -self.scale,\n +self.scale,\n -self.scale / aspect,\n +self.scale / aspect)\n else:\n gluOrtho2D(\n -self.scale * aspect,\n +self.scale * aspect,\n -self.scale,\n +self.scale)\n\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n gluLookAt(\n self.x, self.y, +1.0,\n self.x, self.y, -1.0,\n sin(self.angle), cos(self.angle), 0.0)", "def world_xyz_images_from_dodeca(self):\n if not hasattr(self, '_world_xyz_images_from_dodeca'):\n world_images = []\n for i in range(20):\n im_i = geom_util_np.apply_4x4(\n self.cam_xyz_images_from_dodeca[i, ...],\n self.dodeca_cam2world[i, ...],\n are_points=True)\n mask = np_util.make_mask(self.depth_images[0, i, ...])\n world_images.append(np_util.zero_by_mask(mask, im_i).astype(np.float32))\n self._world_xyz_images_from_dodeca = np.stack(world_images)\n return self._world_xyz_images_from_dodeca", "def get_frame(self, camera: int = 0) -> Tuple[float, np.ndarray]:\n return self.video.read()", "def world2uv(self, vertices):\n batch_size = vertices.shape[0]\n face_vertices_ = face_vertices(vertices, self.faces.expand(batch_size, -1, -1))\n uv_vertices = self.uv_rasterizer(self.uvcoords.expand(batch_size, -1, -1), self.uvfaces.expand(batch_size, -1, -1), face_vertices_)[:, :3]\n return uv_vertices" ]
[ "0.77324754", "0.7427758", "0.6999884", "0.6942515", "0.68882954", "0.630541", "0.62254226", "0.6142121", "0.6108579", "0.60515493", "0.6012601", "0.6009316", "0.60076267", "0.5937143", "0.5847855", "0.58359575", "0.58199376", "0.57920545", "0.57797766", "0.5773777", "0.57097495", "0.5685125", "0.5676643", "0.56354815", "0.5628643", "0.5621301", "0.5607606", "0.5598095", "0.5580759", "0.55767304", "0.55711716", "0.55673087", "0.55535585", "0.55519515", "0.5549465", "0.5545186", "0.55242985", "0.55216634", "0.5477241", "0.54630417", "0.54529333", "0.5436564", "0.5402903", "0.53906494", "0.5389497", "0.538679", "0.5370528", "0.5369277", "0.5364022", "0.5357669", "0.53565705", "0.5355464", "0.53541154", "0.5347307", "0.53366023", "0.5335447", "0.5323356", "0.5318404", "0.5305692", "0.529951", "0.52877724", "0.5279158", "0.5276242", "0.52741134", "0.5272424", "0.52695584", "0.52586395", "0.5246883", "0.52387166", "0.5217055", "0.5213611", "0.52087003", "0.5206924", "0.5206358", "0.5204192", "0.52023673", "0.5194751", "0.5193375", "0.5184837", "0.51826584", "0.51768357", "0.5163645", "0.5162217", "0.5155786", "0.5154167", "0.5150125", "0.5148481", "0.5142963", "0.5141515", "0.5135498", "0.51312596", "0.5127293", "0.511607", "0.51144254", "0.5098668", "0.5092825", "0.50817925", "0.50762695", "0.50725126", "0.5071066" ]
0.7209461
2
Test GenBank parsing invalid product line raises ValueError
def test_invalid_product_line_raises_value_error(self): def parse_invalid_product_line(): rec = SeqIO.read(path.join('GenBank', 'invalid_product.gb'), 'genbank') self.assertRaises(ValueError, parse_invalid_product_line)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testBadLine(self):\n\n self.assertRaises(\n ValueError,\n tools._trackInfo,\n 'not a real line'\n )", "def test_readbadformat(self):\n\n self.assertRaises(ParseError, self.hw, self.badfile)", "def test_invalid_regref(self, parse_input_mocked_metadata):\n with pytest.raises(BlackbirdSyntaxError, match=\"reserved for register references\"):\n parse_input_mocked_metadata(\"float q0 = 5\")\n\n with pytest.raises(BlackbirdSyntaxError, match=\"reserved for register references\"):\n parse_input_mocked_metadata(\"float array q4 =\\n\\t-0.1, 0.2\")", "def test_uss_num_bad_values(self):\n for val in self.bad_values:\n self.assertRaises(line_format_errors.FieldParseError,\n lambda: self.line._parse_uss_num(val))", "def test_constructor_bad_value(self):\n self.assertRaises(line_format_errors.InputLineError,\n lambda: D1Line(self.bad_line))", "def test_step_with_non_number():\n svl_string = \"\"\"\n DATASETS bigfoot \"bigfoot.csv\"\n HISTOGRAM bigfoot STEP \"hello there\" X temperature_mid\n \"\"\"\n\n # TODO make exception more specific if possible.\n with pytest.raises(SvlSyntaxError):\n parse_svl(svl_string)", "def test_constructor_bad_value(self):\n self.assertRaises(line_format_errors.InputLineError,\n lambda: C2Line(self.bad_line))", "def test_wrong_type_error(self, parse_input_mocked_metadata):\n with pytest.raises(ValueError, match=\"invalid value\"):\n bb = parse_input_mocked_metadata(\n \"for int m in [1, 4.2, 9]\\n\\tMZgate(0, 1) | [0, 1]\"\n )", "def doomed_parser(line):\n raise exceptions.LineParseException('Error occurred')", "def test_validate_input_valid(self):\n final_config = self.dtm1.validate_input('00001111')\n nose.assert_equal(final_config[0], 'q4')\n nose.assert_equal(str(final_config[1]), 'TMTape(\\'xxxxyyyy.\\')')", "def test_read_line(self):\n\n expected_data = ['\\\"lu, jr\\\"','ming-yuan','\\\"DRUG,1\\\"',135.999,True,3]\n input_string = '001,\\\"LU, JR\\\",MING-YUAN,\\\"DRUG,1\\\",135.999\\n'\n data = read_line(input_string)\n self.assertEqual(expected_data[0],data[0])\n self.assertEqual(expected_data[1],data[1])\n self.assertEqual(expected_data[2],data[2])\n self.assertAlmostEqual(expected_data[3],data[3])\n self.assertEqual(expected_data[4],data[4])\n self.assertAlmostEqual(expected_data[5],data[5])\n\n #Check for odd numers of quotation marks\n input_string = '001,\\\"LU\\\",\\\"MING-YUAN,DRUG1,135\\n'\n data = read_line(input_string)\n self.assertFalse(data[4])\n\n #Check for missing fields\n input_string = '001,,MING-YUAN,DRUG1,135\\n'\n data = read_line(input_string)\n self.assertFalse(data[4])\n\n input_string = '001,LU,MING-YUAN,DRUG1,\\n'\n data = read_line(input_string)\n self.assertFalse(data[4])\n\n #Check for corrupted fields\n input_string = '001x,LU,MING-YUAN,DRUG1,135\\n'\n data = read_line(input_string)\n self.assertFalse(data[4])\n\n input_string = '001,LU,MING-YUAN,DRUG1,1ag5\\n'\n data = read_line(input_string)\n self.assertFalse(data[4])", "def testBadFormatISBNAgain(self): \n val = format_isbn(\"12345678\")\n self.assertFalse(val)", "def test_invalid_format(self):\n input_file = self.copy_and_mark_for_cleanup(\"Medline/pubmed_result1.txt\")\n\n cline = XXmotifCommandline(outdir=self.out_dir, seqfile=input_file)\n\n try:\n stdout, stderr = cline()\n except ApplicationError as err:\n self.assertEqual(err.returncode, 255)\n else:\n self.fail(f\"Should have failed, returned:\\n{stdout}\\n{stderr}\")", "def testBadFormatISBN(self): \n val = format_isbn(\"1234567843534594123\")\n self.assertFalse(val)", "def test_addr_zip_bad_values(self):\n for val in self.bad_values:\n self.assertRaises(line_format_errors.FieldParseError,\n lambda: self.line._parse_addr_zip(val))", "def test_team_reg_bad_value(self):\n for val in self.bad_values:\n self.assertRaises(line_format_errors.FieldParseError,\n lambda: self.line._parse_team_reg(val))", "def test_gender_bad_value(self):\n for val in self.bad_values:\n self.assertRaises(line_format_errors.FieldParseError,\n lambda: self.line._parse_gender(val))", "def test_release_tag_for_invalid_version(self) -> None:\n with self.assertRaisesRegexp(ValueError, \"Unable to parse version foo.bar.ba\"):\n release_tag()", "def test_validate_input_rejection_invalid_symbol(self):\n with nose.assert_raises(exceptions.RejectionError):\n self.dtm1.validate_input('02')", "def test_invalid_aggregation():\n svl_string = \"\"\"\n DATASETS bigfoot \"bigfoot.csv\"\n LINE bigfoot X date BY YEAR Y temperature_mid ARGMIN\n \"\"\"\n\n # TODO Make this exception more specific if possible.\n with pytest.raises(SvlSyntaxError):\n parse_svl(svl_string)", "def test_parse_invalid_file(self):\n with pytest.raises(ParserError):\n self.parser.parse(\"invalid csv\")", "def test_no_specification_error():\n try:\n bad_arm = survey.get_spiral_slice()\n except SyntaxError:\n assert True\n else:\n assert False", "def test_buoy_format1():\n with pytest.raises(AssertionError) as err_info:\n Check_BuoyDC.check_buoy_format(buoy_format_fail_1)\n assert str(err_info.value) == 'Input formatted incorrectly, see instructions'", "def test_lsusb_parse_error_generic(self):\n self.assertRaises(ParseError, jc.parsers.lsusb.parse, self.generic_lsusb_t, quiet=True)", "def test_bins_with_non_number():\n svl_string = \"\"\"\n DATASETS bigfoot \"bigfoot.csv\"\n HISTOGRAM bigfoot X temperature_mid BINS hi\n \"\"\"\n\n # TODO Make exception more specific if possible.\n with pytest.raises(SvlSyntaxError):\n parse_svl(svl_string)", "def test_parse_invalid_version(self):\n version = VersionNumberScaleMeasurement.parse_version(\"This is not a version number\")\n self.assertEqual(Version(\"0\"), version)", "def test_noQuantity(self):\n # result = self.parser.parse(\"d6\")\n\n # TODO\n # self.assertIsNone(result)", "def test_schema_invalid_format(self):\n bad_schema = [int, int, float, float, str]\n with self.assertRaisesRegexp(Exception, \"more than one char\"):\n self.context.frame.import_csv(self.dataset, bad_schema)", "def test_constructor_short_value(self):\n self.assertRaises(line_format_errors.InputLineError,\n lambda: D1Line(self.short_line))", "def test_constructor_short_value(self):\n self.assertRaises(line_format_errors.InputLineError,\n lambda: C2Line(self.short_line))", "def test_addr_state_bad_values(self):\n for val in self.bad_values:\n self.assertRaises(line_format_errors.FieldParseError,\n lambda: self.line._parse_addr_state(val))", "def test_bad_data(self):\n # Bad checksum\n # If checksum is bad, skip the record and continue parsing.\n self.stream_handle = StringIO(AdcpsJlnStcParserUnitTestCase.BAD_CHECKSUM)\n self.parser = AdcpsJlnStcParser(self.config, self.start_state, self.stream_handle,\n self.state_callback, self.pub_callback, self.exception_callback)\n # Only the header and second record, particle_b should be returned.\n result = self.parser.get_records(3)\n self.assertEqual(self.publish_callback_value[0], self.particle_header_footer)\n self.assertEqual(self.publish_callback_value[1], self.particle_b)\n if len(result) != 2:\n self.fail(\"Expected two records and got %d. Record containing bad data should have been skipped.\", len(result))\n \n # Incorrect number of bytes\n # If numbytes is incorrect, skip the record and continue parsing.\n self.start_state = {StateKey.POSITION: 0}\n self.stream_handle = StringIO(AdcpsJlnStcParserUnitTestCase.BAD_NUM_BYTES)\n self.parser = AdcpsJlnStcParser(self.config, self.start_state, self.stream_handle,\n self.state_callback, self.pub_callback, self.exception_callback) \n result = self.parser.get_records(3)\n self.assertEqual(self.publish_callback_value[0], self.particle_header_footer)\n self.assertEqual(self.publish_callback_value[1], self.particle_b)\n if len(result) != 2:\n self.fail(\"Expected two records and got %d. Record containing bad data should have been skipped.\", len(result))", "def test_parser_exception(self):\n # file contains 1 invalid sample values, 17 PH records total\n self.create_sample_data_set_dir('node59p1_bad.dat', TELEM_DIR, \"node59p1.dat\")\n\n self.assert_initialize()\n\n self.event_subscribers.clear_events()\n result = self.get_samples(DataParticleType.CONTROL, 1)\n result = self.get_samples(DataParticleType.SAMPLE, 16, 30)\n self.assert_sample_queue_size(DataParticleType.CONTROL, 0)\n self.assert_sample_queue_size(DataParticleType.SAMPLE, 0)\n\n # Verify an event was raised and we are in our retry state\n self.assert_event_received(ResourceAgentErrorEvent, 10)\n self.assert_state_change(ResourceAgentState.STREAMING, 10)", "def test_error_basis_state_format(self, basis_state, wires):\n\n with pytest.raises(ValueError, match=\"'basis_state' must only contain\"):\n BasisStatePreparation(basis_state, wires)", "def test_old_data_format_error(self):\n assert_raises(ValueError, get_data, self.testv1)", "def test_init(self):\n # test invalid normalized_items\n with self.assertRaisesRegex(\n KeyError, \"Should be one of 'bbox', 'head', 'torso'\"):\n JhmdbPCKAccuracy(norm_item='invalid')", "def test_parse(self):\n report = (\n \"KJFK 032151Z 16008KT 10SM FEW034 FEW130 BKN250 27/23 A3013 RMK AO2 SLP201\"\n )\n data, units = metar.parse(report[:4], report)\n self.assertIsInstance(data, structs.MetarData)\n self.assertIsInstance(units, structs.Units)\n self.assertEqual(data.raw, report)", "def test_schema_invalid_type(self):\n bad_schema = -77\n with self.assertRaisesRegexp(Exception, \"more than one char\"):\n self.context.frame.import_csv(self.dataset, bad_schema)", "def test_exception(self):\n exception_message = \"Antimony: Error in model string, line 19: syntax error, unexpected end of file\"\n try:\n\n with model.BuildAntimony(self.copasi_file2) as loader:\n self.mod2 = loader.load(\n \"\"\"\n model model2\n compartment cell = 1.0\n var A in cell\n var B in cell\n \n vAProd = 0.1\n kADeg = 0.2\n kBProd = 0.3\n kBDeg = 0.4\n vBasalAProd = 0.001\n A = 0\n B = 0\n \n AProd: => A; cell*vAProd*B+vBasalAProd\n ADeg: A =>; cell*kADeg*A\n BProd: => B; cell*kBProd*A\n BDeg: B => ; cell*kBDeg*B\n \"\"\"\n )\n\n except Exception as E:\n self.assertEqual(str(E), str(exception_message))", "def test_buoy_format2():\n with pytest.raises(AssertionError) as err_info:\n Check_BuoyDC.check_buoy_format(buoy_format_fail_2)\n assert str(err_info.value) == 'Input length incorrect, see instructions'", "def test_init(self):\n # test invalid normalized_items\n with self.assertRaisesRegex(\n KeyError, \"Should be one of 'bbox', 'head', 'torso'\"):\n MpiiPCKAccuracy(norm_item='invalid')", "def parse_products(self, infile):\r\n raise NotImplementedError()", "def test_data_parse_invalid_json(self):\n lines = ['{\"a\": \"val\" \"b\": \"val2\"}']\n self.assertRaises(TypeError, parser._parse_data, lines)", "def test_missing_delim(self):", "def test_empty(self):\n record = ''\n\n self.assertRaises(ParseException, self.grammar.parseString, record)", "def test_examples():\n assert nz_bank_validate(*'01-902-0068389-00'.split('-'))\n assert nz_bank_validate(*'08-6523-1954512-001'.split('-'))\n assert nz_bank_validate(*'26-2600-0320871-032'.split('-'))", "def test_age_bad_value(self):\n for val in self.bad_values:\n self.assertRaises(line_format_errors.FieldParseError,\n lambda: self.line._parse_age(val))", "def test_init(self):\n # test invalid normalized_items\n with self.assertRaisesRegex(\n KeyError, \"Should be one of 'bbox', 'head', 'torso'\"):\n PCKAccuracy(norm_item='invalid')", "def test_non_int_value_raises_an_exception():\n test_file = StringIO(\n u'fri,wed\\na,6'\n )\n\n csv_parser = CSVParser(test_file)\n\n with pytest.raises(ValueError):\n csv_parser.parse()", "def test_missing_split_by_value():\n svl_string = \"\"\"\n DATASETS bigfoot \"bigfoot.csv\"\n HISTOGRAM bigfoot X temperature_mid SPLIT BY\n \"\"\"\n\n # TODO Get a more specific exception here.\n with pytest.raises(SvlSyntaxError):\n parse_svl(svl_string)", "def test_get_invalid_line(self):\n ars = self.ar[2009][11]['general']\n self.assertRaises(KeyError, ars.__getitem__, 'invalid_section')", "def test_parse_output_error(self):\n self.assertRaises(ValueError,\n parse_output,\n hgt_results_fp=self.consel_output_hgt_fp,\n method=\"Consel\")", "def test_addr_country_bad_values(self):\n for val in self.bad_values:\n self.assertRaises(line_format_errors.FieldParseError,\n lambda: self.line._parse_addr_country(val))", "def test_Product_name_cannot_contain_a_number(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_3',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Please enter strings in name and category!')\n self.assertEqual(resp.status_code, 400)", "def test_decode_raises_when_format_unknown(thing):\n with pytest.raises(ValueError):\n decode(thing)", "def test_xyzp_qm_7b():\n subject = subject7\n\n with pytest.raises(qcelemental.MoleculeFormatError):\n final, intermed = qcelemental.molparse.from_string(subject, return_processed=True, dtype='xyz')", "def test_psi4_qmefpformat_error_6c():\n\n subject = subject6.replace(' efp h2O', '0 1\\n efp h2O')\n\n with pytest.raises(qcelemental.MoleculeFormatError):\n final, intermed = qcelemental.molparse.from_string(subject, return_processed=True)", "def test_label_with_non_string():\n svl_string = \"\"\"\n DATASETS bigfoot \"bigfoot.csv\"\n LINE bigfoot\n Y classification COUNT\n SPLIT BY classification\n X date BY YEAR LABEL 1.2\n \"\"\"\n\n # TODO Make this exception more specific if possible.\n with pytest.raises(SvlSyntaxError):\n parse_svl(svl_string)", "def test_transform_with_non_string():\n svl_string = \"\"\"\n DATASETS bigfoot \"bigfoot.csv\"\n SCATTER bigfoot X TRANSFORM 1.2 Y temperature_mid\n \"\"\"\n\n with pytest.raises(SvlSyntaxError):\n parse_svl(svl_string)", "def test_bad_values(self):\n self.assertOK([60])\n self.assertRaisesInternalError([59.9])\n self.assertRaisesInternalError([''])\n self.assertRaisesInternalError([';'])\n self.assertRaisesInternalError(['('])\n self.assertRaisesInternalError([None])", "def test_hole_with_non_number():\n svl_string = \"\"\"\n DATASETS bigfoot \"bigfoot.csv\"\n PIE bigfoot HOLE hi AXIS classification\n \"\"\"\n\n with pytest.raises(SvlTypeError):\n parse_svl(svl_string)", "def test_parse_ingredients(self):\n pass", "def test_dataset_file_with_non_string():\n svl_string = \"\"\"\n DATASETS bigfoot 3\n LINE bigfoot X date BY YEAR Y date COUNT\n \"\"\"\n\n # TODO Make this exception more specific if possible.\n with pytest.raises(SvlSyntaxError):\n parse_svl(svl_string)", "def test_valid_crtf_line():\n line_str = 'coord=B1950_VLA, frame=BARY, corr=[I, Q], color=blue'\n\n with pytest.raises(CRTFRegionParserError) as excinfo:\n Regions.parse(line_str, format='crtf')\n\n assert 'Not a valid CRTF line:' in str(excinfo.value)", "def test_parse_invalid_df(self):\n mock_scraper = MockCtdScraper()\n mock_scraper.filename = \"sample_corrupt_ctd_db.csv\"\n scrape_gen = mock_scraper.scrape(TEST_CHUNKSIZE)\n with pytest.raises(ParserError):\n self.parser.parse(next(scrape_gen))", "def test_should_raise_error_if_type_is_invalid(self):\r\n with self.assertRaises(ValueError):\r\n self.spec_parser.parse_statement({'type': 'sugar'})", "def test_bad_data(self):\r\n # LB180210_3_corrupted.PD0 has three records in it, the 2nd record was corrupted\r\n with open(os.path.join(RESOURCE_PATH, 'LB180210_3_corrupted.PD0'), 'rb') as stream_handle:\r\n\r\n parser = AdcpPd0Parser(self.config_recov, stream_handle, self.exception_callback)\r\n\r\n # try to get 3 particles, should only get 2 back\r\n # the second one should correspond to ensemble 3\r\n parser.get_records(3)\r\n\r\n log.debug('Exceptions : %s', self.exception_callback_value[0])\r\n\r\n self.assertEqual(len(self.exception_callback_value), 1)\r\n self.assert_(isinstance(self.exception_callback_value[0], RecoverableSampleException))", "def test_from_knx_wrong_parameter_too_large(self):\n raw = [\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n ]\n with self.assertRaises(ConversionError):\n DPTString().from_knx(raw)", "def test_valid_region_type():\n reg_str = 'hyperbola[[18h12m24s, -23d11m00s], 2.3arcsec]'\n\n with pytest.raises(CRTFRegionParserError) as excinfo:\n Regions.parse(reg_str, format='crtf')\n\n assert 'Not a valid CRTF Region type: \"hyperbola\"' in str(excinfo.value)", "def test_from_knx_wrong_parameter_too_small(self):\n raw = [\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n 0x00,\n ]\n with self.assertRaises(ConversionError):\n DPTString().from_knx(raw)", "def test_150(self):\n self.assertRaises(\n exceptions.DataONEExceptionException, exceptions.deserialize,\n INVALID_ERROR_DOC[0]\n )", "def test_split_string_wrong_input_data(self):\n self.assertEqual(\"Wrong input data\", split_string(13))", "def test_color_by_on_wrong_chart():\n svl_string = \"\"\"\n DATASETS bigfoot \"bigfoot.csv\"\n PIE bigfoot AXIS classification COLOR BY humidity\n \"\"\"\n\n # TODO Make this exception more specific if possible.\n with pytest.raises(SvlSyntaxError):\n parse_svl(svl_string)", "def test__parse_sku():\n for input_data, expected_output in (\n ({}, ''),\n ({'sku': None}, ''),\n ({'sku': ''}, ''),\n ({'sku': 'a'}, 'a'),\n ):\n output = parse_sku(input_data)\n vampytest.assert_eq(output, expected_output)", "def test_valid_region_syntax():\n reg_str1 = 'circle[[18h12m24s, -23d11m00s], [2.3arcsec,4.5arcsec]'\n with pytest.raises(CRTFRegionParserError) as excinfo:\n Regions.parse(reg_str1, format='crtf')\n\n estr = (\"Not in proper format: ('2.3arcsec', '4.5arcsec') should be \"\n \"a single length\")\n assert estr in str(excinfo.value)\n\n reg_str2 = ('symbol[[32.1423deg, 12.1412deg], 12deg], linewidth=2, '\n 'coord=J2000, symsize=2')\n with pytest.raises(CRTFRegionParserError) as excinfo:\n Regions.parse(reg_str2, format='crtf')\n estr = 'Not in proper format: \"12deg\" should be a symbol'\n assert estr in str(excinfo.value)\n\n reg_str3 = 'circle[[18h12m24s, -23d11m00s]'\n with pytest.raises(CRTFRegionParserError) as excinfo:\n Regions.parse(reg_str3, format='crtf')\n estr = ('Does not contain expected number of parameters for the region '\n '\"circle\"')\n assert estr in str(excinfo.value)\n\n reg_str4 = 'poly[[1, 2], [4, 5]]'\n with pytest.raises(CRTFRegionParserError) as excinfo:\n Regions.parse(reg_str4, format='crtf')\n assert 'polygon should have >= 3 coordinates' in str(excinfo.value)\n\n reg_str6 = 'rotbox[[12h01m34.1s, 12d23m33s], [3arcmin,], 12deg]'\n with pytest.raises(CRTFRegionParserError) as excinfo:\n Regions.parse(reg_str6, format='crtf')\n assert \"('3arcmin', '') should be a pair of lengths\" in str(excinfo.value)", "def test_deserialize_missing_data(self):\n data = {\"product_id\": 1}\n recommendation = Recommendation()\n self.assertRaises(DataValidationError, recommendation.deserialize, data)", "def test_release_tag_for_empty(self) -> None:\n with self.assertRaisesRegexp(ValueError, \"Unable to parse version \"):\n release_tag()", "def test_basic_invalid_bill():\n b = toy_bill()\n b.identifier = None\n with pytest.raises(ValueError):\n b.validate()", "def test_probabilistic_parsers():", "def test_nonsense_decimal(self):\n test_passes = False\n try:\n self.parser.extract_zt(\"ZT.\")\n test_passes = False\n except Exception as e:\n test_passes = True\n self.assertTrue(test_passes)", "def test_fetch_nonexist_pdbfmt(self):\n pdbid = '6SL9'\n with self.assertRaisesRegex(ValueError,\n 'The PDB ID given is only represented in mmCIF format'):\n fetch(pdbid)", "def test_incompatible_rules():\n\n grammar = \"\"\"\n A: B | C;\n B: 'enumeration';\n C: value=INT;\n \"\"\"\n with pytest.raises(TextXSyntaxError):\n metamodel_from_str(grammar)", "def test_interpret_error(self):\r\n two_choice_two_input = self._make_problem(\r\n [\r\n (\"true\", {\"answer\": \"123\", \"tolerance\": \"1\"}),\r\n (\"false\", {})\r\n ],\r\n \"checkboxtextgroup\"\r\n )\r\n\r\n with self.assertRaisesRegexp(StudentInputError, \"Could not interpret\"):\r\n # Test that error is raised for input in selected correct choice.\r\n self.assert_grade(\r\n two_choice_two_input,\r\n self._make_answer_dict([(True, [\"Platypus\"])]),\r\n \"correct\"\r\n )\r\n\r\n with self.assertRaisesRegexp(StudentInputError, \"Could not interpret\"):\r\n # Test that error is raised for input in selected incorrect choice.\r\n self.assert_grade(\r\n two_choice_two_input,\r\n self._make_answer_dict([(True, [\"1\"]), (True, [\"Platypus\"])]),\r\n \"correct\"\r\n )", "def test_invalid_expression_type(self, parse_input_mocked_metadata):\n with pytest.raises(TypeError, match=r\"not of declared type int\"):\n parse_input_mocked_metadata(\"int Beta = -0.231e-6+5.21e-2j\")", "def test_invalid_variable_name(self, parse_input_mocked_metadata):\n with pytest.raises(BlackbirdSyntaxError, match=\"reserved Blackbird keyword\"):\n parse_input_mocked_metadata(\"float name = 5\")\n\n with pytest.raises(BlackbirdSyntaxError, match=\"reserved Blackbird keyword\"):\n parse_input_mocked_metadata(\"float target = 5\")\n\n with pytest.raises(BlackbirdSyntaxError, match=\"reserved Blackbird keyword\"):\n parse_input_mocked_metadata(\"float version = 5\")\n\n with pytest.raises(BlackbirdSyntaxError, match=\"reserved Blackbird keyword\"):\n parse_input_mocked_metadata(\"float array name =\\n\\t-0.1, 0.2\")\n\n with pytest.raises(BlackbirdSyntaxError, match=\"reserved Blackbird keyword\"):\n parse_input_mocked_metadata(\"float array target =\\n\\t-0.1, 0.2\")\n\n with pytest.raises(BlackbirdSyntaxError, match=\"reserved Blackbird keyword\"):\n parse_input_mocked_metadata(\"float array version =\\n\\t-0.1, 0.2\")", "def test_import_report_malformed():\n with pytest.raises(DmarcImportError):\n import_feedback_report('<malform></ed>')", "def test_missing_step_value():\n svl_string = \"\"\"\n DATASETS bigfoot \"bigfoot.csv\"\n HISTOGRAM bigfoot STEP X moon_phase\n \"\"\"\n\n with pytest.raises(SvlMissingValue):\n parse_svl(svl_string)", "def test_bad_data(self):\n # bad data file has:\n # 1 bad status\n # particle A has bad timestamp\n # particle B has bad dark fit\n # particle C has bad frame type\n # particle D has bad year\n stream_handle = open(os.path.join(RESOURCE_PATH,\n 'bad_SNA_SNA.txt'), MODE_ASCII_READ)\n\n self.create_parser(stream_handle, telem_flag=False)\n\n # get E, since it is first it will generate a metadata\n particles = self.parser.get_records(2)\n\n # check all the values against expected results.\n self.assert_particles(particles, 'last_and_meta_SNA_recov.yml', RESOURCE_PATH)\n\n # should have had 5 exceptions by now\n self.assertEqual(len(self.exception_callback_value), 5)\n\n for exception in self.exception_callback_value:\n self.assert_(isinstance(exception, RecoverableSampleException))", "def test05(self):\n\n s = \"a\"\n with self.assertRaises(ParserException):\n t = parse_newick(s)", "def test_negativeQuantity(self):\n result = self.parser.parse(\"-1d6\")\n\n # TODO\n # self.assertIsNone(result)", "def test_make_tool_plugin_parse_invalid():\n mtp = setup_make_tool_plugin()\n package = Package('valid_package', os.path.join(os.path.dirname(__file__),\n 'valid_package'))\n output = \"invalid text\"\n issues = mtp.parse_output(package, output)\n assert not issues", "def test_valid_meta_key():\n meta_test_str = ('annulus[[17h51m03.2s, -45d17m50s], [0.10deg, 4.12deg]], '\n 'hello=\"My label here\"')\n with pytest.raises(CRTFRegionParserError) as excinfo:\n Regions.parse(meta_test_str, format='crtf')\n assert '\"hello\" is not a valid meta key' in str(excinfo.value)", "def test_parse_redis_data_error():\n with pytest.raises(ValueError):\n redis_data.parse_redis_data(b\"this is some data\")", "def test_stock_and_price_must_be_numbers(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock='stock',\n price='money'\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'The Stock and Price must be numbers!')\n self.assertEqual(resp.status_code, 400)", "def test_old_grammar_fails():\n\n strings = [\n 'update schema for test set a = continuous',\n 'update schema for test set a = multinomial',\n # Cyclic is invalid because it should be followed by (min, max) parameters.\n 'update schema for test set a = cyclic'\n ]\n\n for query_string in strings:\n with pytest.raises(ParseException):\n ast = bql_statement.parseString(query_string, parseAll=True)", "def test_step_on_non_histogram():\n svl_string = \"\"\"\n datasets bigfoot \"bigfoot.csv\"\n BAR bigfoot X classification Y classification COUNT STEP 0.1\n \"\"\"\n\n with pytest.raises(SvlUnsupportedDeclaration):\n parse_svl(svl_string)", "def test_non_numberic_validation(self):", "def test_non_numberic_validation(self):", "def test_lpdaac_bad(self):\n with self.assertRaises(InventoryException):\n api.inventory.check(self.lpdaac_order_bad)", "def test_only_nums_are_valid_inputs():\n bad_inputs = [[\"boop\", \"boink\"], 10, 99.99, {\"one\": 2, \"three:\": 4}]\n\n for input in bad_inputs:\n with pytest.raises(AttributeError):\n song_decoder(bad_inputs)" ]
[ "0.6412361", "0.63490635", "0.6333902", "0.6294638", "0.62427443", "0.6210081", "0.6208923", "0.6203845", "0.6167732", "0.61114794", "0.6096026", "0.60889727", "0.6073056", "0.60688126", "0.6063661", "0.60588723", "0.6050436", "0.6005442", "0.5976354", "0.5972584", "0.59714663", "0.59529483", "0.59517145", "0.5950122", "0.59473485", "0.5932474", "0.59285265", "0.5907993", "0.5878064", "0.5860889", "0.5850617", "0.5830019", "0.5805635", "0.5802414", "0.5801592", "0.5793732", "0.5772458", "0.5768094", "0.57668674", "0.57667", "0.5764261", "0.5743585", "0.5730291", "0.5726231", "0.57242805", "0.5718931", "0.5713502", "0.5701276", "0.56588024", "0.5650914", "0.5646851", "0.5645258", "0.56391126", "0.56361043", "0.5636084", "0.5629738", "0.56221163", "0.562161", "0.5617644", "0.5612747", "0.56126666", "0.5598138", "0.559604", "0.5587212", "0.55857813", "0.55852056", "0.5572061", "0.5564212", "0.5560579", "0.55576605", "0.5557027", "0.5556849", "0.55490965", "0.5546512", "0.554301", "0.5533598", "0.55311394", "0.5530044", "0.5526834", "0.55155957", "0.55149406", "0.5514556", "0.5513806", "0.5512909", "0.5510758", "0.5508489", "0.5503166", "0.5498079", "0.5497648", "0.5497644", "0.54973817", "0.54889894", "0.5486173", "0.54739445", "0.5465119", "0.54637617", "0.5462643", "0.5462643", "0.54621214", "0.54532397" ]
0.8058622
0
Publish apps to the 21 Marketplace. \b Usage _____ Publish your app to the 21 Marketplace. $ 21 publish submit path_to_manifest/manifest.yaml To update your published listing, run the above command after modifying your manifest.yaml. \b Publish your app to the 21 Marketplace without strict checking of the manifest against your current IP. $ 21 publish submit s path_to_manifest/manifest.yaml \b See the help for submit. $ 21 publish submit help \b View all of your published apps. $ 21 publish list \b See the help for list. $ 21 publish list help \b Remove one of your published apps from the marketplace. $ 21 publish remove {app_id} \b See the help for remove. $ 21 publish remove help
def publish(): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _publish(client, manifest_path, marketplace, skip, overrides):\n try:\n manifest_json = check_app_manifest(manifest_path, overrides, marketplace)\n app_url = \"{}://{}\".format(manifest_json[\"schemes\"][0], manifest_json[\"host\"])\n app_ip = urlparse(app_url).hostname\n\n if not skip:\n address = get_zerotier_address(marketplace)\n\n if address != app_ip:\n wrong_ip = click.style(\"It seems that the IP address that you put in your manifest file (\") +\\\n click.style(\"{}\", bold=True) +\\\n click.style(\") is different than your current 21market IP (\") +\\\n click.style(\"{}\", bold=True) +\\\n click.style(\")\\nAre you sure you want to continue publishing with \") +\\\n click.style(\"{}\", bold=True) +\\\n click.style(\"?\")\n if not click.confirm(wrong_ip.format(app_ip, address, app_ip)):\n switch_host = click.style(\"Please edit \") +\\\n click.style(\"{}\", bold=True) +\\\n click.style(\" and replace \") +\\\n click.style(\"{}\", bold=True) +\\\n click.style(\" with \") +\\\n click.style(\"[{}].\", bold=True)\n logger.info(switch_host.format(manifest_path, app_ip, address))\n return\n\n except exceptions.ValidationError as ex:\n # catches and re-raises the same exception to enhance the error message\n publish_docs_url = click.style(\"https://21.co/learn/21-publish/\", bold=True)\n publish_instructions = \"For instructions on publishing your app, please refer to {}\".format(publish_docs_url)\n raise exceptions.ValidationError(\n \"The following error occurred while reading your manifest file at {}:\\n{}\\n\\n{}\"\n .format(manifest_path, ex.args[0], publish_instructions),\n json=ex.json)\n\n app_name = manifest_json[\"info\"][\"title\"]\n app_endpoint = \"{}://{}{}\".format(manifest_json[\"schemes\"][0],\n manifest_json[\"host\"],\n manifest_json[\"basePath\"])\n\n logger.info(\n (click.style(\"Publishing {} at \") + click.style(\"{}\", bold=True) + click.style(\" to {}.\"))\n .format(app_name, app_endpoint, marketplace))\n payload = {\"manifest\": manifest_json, \"marketplace\": marketplace}\n try:\n response = client.publish(payload)\n except ServerRequestError as e:\n if e.status_code == 403 and e.data.get(\"error\") == \"TO600\":\n logger.info(\n \"The endpoint {} specified in your manifest has already been registered in \"\n \"the marketplace by another user.\\nPlease check your manifest file and make \"\n \"sure your 'host' field is correct.\\nIf the problem persists please contact \"\n \"support@21.co.\".format(app_endpoint), fg=\"red\")\n return\n else:\n raise e\n\n if response.status_code == 201:\n response_data = response.json()\n mkt_url = response_data['mkt_url']\n permalink = response_data['permalink']\n logger.info(\n click.style(\n \"\\n\"\n \"You have successfully published {} to {}. \"\n \"You should be able to view the listing within a few minutes at {}\\n\\n\"\n \"Users will be able to purchase it, using 21 buy, at {} \",\n fg=\"magenta\")\n .format(app_name, marketplace, permalink, mkt_url)\n )", "def submit(ctx, manifest_path, marketplace, skip, parameters):\n if parameters is not None:\n try:\n parameters = _parse_parameters(parameters)\n except:\n logger.error(\n \"Manifest parameter overrides should be in the form 'key1=\\\"value1\\\" \"\n \"key2=\\\"value2\\\".\",\n fg=\"red\")\n return\n\n _publish(ctx.obj['client'], manifest_path, marketplace, skip, parameters)", "def publish():\n if sys.argv[-1] == 'publish':\n os.system('python setup.py sdist')\n os.system('twine upload dist/*')\n sys.exit()", "def publish_manifest(ctx, name, tag, image, signed_push=False):\n manifest_spec = {\"image\": \"{}:{}\".format(name, tag)}\n src_images = []\n\n for img in image:\n img_splitted = img.replace(' ', '').split(',')\n if len(img_splitted) != 2:\n print(\"Impossible to parse source format for: '{}'\".format(img))\n raise Exit(code=1)\n\n platform_splitted = img_splitted[1].split('/')\n if len(platform_splitted) != 2:\n print(\"Impossible to parse platform format for: '{}'\".format(img))\n raise Exit(code=1)\n\n src_images.append(\n {\"image\": img_splitted[0], \"platform\": {\"architecture\": platform_splitted[1], \"os\": platform_splitted[0]}}\n )\n manifest_spec[\"manifests\"] = src_images\n\n with tempfile.NamedTemporaryFile(mode='w', delete=False) as f:\n temp_file_path = f.name\n yaml.dump(manifest_spec, f, default_flow_style=False)\n\n print(\"Using temp file: {}\".format(temp_file_path))\n ctx.run(\"cat {}\".format(temp_file_path))\n\n try:\n result = retry_run(ctx, \"manifest-tool push from-spec {}\".format(temp_file_path))\n if result.stdout:\n out = result.stdout.split('\\n')[0]\n fields = out.split(\" \")\n\n if len(fields) != 3:\n print(\"Unexpected output when invoking manifest-tool\")\n raise Exit(code=1)\n\n digest_fields = fields[1].split(\":\")\n\n if len(digest_fields) != 2 or digest_fields[0] != \"sha256\":\n print(\"Unexpected digest format in manifest-tool output\")\n raise Exit(code=1)\n\n digest = digest_fields[1]\n length = fields[2]\n\n if signed_push:\n cmd = \"\"\"\n notary -s https://notary.docker.io -d {home}/.docker/trust addhash \\\n -p docker.io/{name} {tag} {length} --sha256 {sha256} \\\n -r targets/releases\n \"\"\"\n retry_run(ctx, cmd.format(home=os.path.expanduser(\"~\"), name=name, tag=tag, length=length, sha256=digest))\n finally:\n os.remove(temp_file_path)", "def pub_deploy(args, project=\"\", account=\"\", api_key=\"\"):\n base_url, api_key, updated = get_project_connect(\n 'djaodjin',\n base_url=DEFAULT_API_ENDPOINT,\n api_key=api_key)\n project, account, updated = get_project_account(\n project=project, account=account)\n if updated:\n save_config()\n\n api_container_url = \\\n \"%(base_url)s/api/containers/%(organization)s/apps/%(app)s/\" % {\n 'base_url': base_url,\n 'organization': str(account),\n 'app': str(project)}\n data = None\n container_location = args[0] if args else None\n if container_location:\n data = {'location': container_location}\n resp = requests.post(api_container_url, data=data, auth=(api_key, \"\"))\n LOGGER.info(\"POST %s returns %d %s\",\n api_container_url, resp.status_code, resp.text)", "def main(owner: str, repository: str, token: str, tag: Optional[str]) -> None:\n if tag is None:\n today = datetime.date.today()\n tag = f\"{today:%Y.%-m.%-d}\"\n\n try:\n publish_release(\n owner=owner,\n repository_name=repository,\n token=token,\n tag=tag,\n )\n except Exception as error:\n click.secho(f\"error: {error}\", fg=\"red\")\n sys.exit(1)", "def publish():\n fab.local(\"env/bin/python setup.py sdist\")\n tar_filename = fab.local(\n \"env/bin/python setup.py --fullname\", capture=True\n )\n dist_filename = \"dist/{}.tar.gz\".format(tar_filename)\n fab.put(dist_filename, PYREPO_DIR)", "def test_publish_deployment_run(self):\n pass", "def deploy_app(host_=None):\n run_command_on_selected_server(_deploy_app, host_=host_)", "def deploy():\n local('appcfg.py --no_cookies --email=mccutchen@gmail.com update .',\n capture=False)", "def run_post_publishers():\n from anima.env import mayaEnv\n m_env = mayaEnv.Maya()\n\n version = m_env.get_current_version()\n\n # check if we have a proper version\n if not version:\n return\n\n # check if it is a Representation\n from anima.repr import Representation\n if Representation.repr_separator in version.take_name:\n return\n\n if version.is_published:\n from anima.publish import (run_publishers, staging, PRE_PUBLISHER_TYPE,\n POST_PUBLISHER_TYPE)\n # before doing anything run all publishers\n type_name = ''\n if version.task.type:\n type_name = version.task.type.name\n\n # before running use the staging area to store the current version\n staging['version'] = version\n run_publishers(type_name, publisher_type=POST_PUBLISHER_TYPE)\n # do not forget to clean up the staging area\n staging.clear()", "def _deploy_apps_function():\n app_integration_package = AppIntegrationPackage(config=config, version=apps_version)\n app_integration_package.create_and_upload()\n return app_integration_package", "def deploy():\n build()\n collect()\n commit()\n push()", "def finish_publish(hash, metadata, engine_id=None, username=USER):\n identity = \"%s@%s\" % (username, get_config('domain'))\n library = Library.objects.get(identity=identity)\n library.add_item(\n engine_id=engine_id,\n origin=identity,\n metadata=metadata\n )\n return \"OK\"", "def publish_updates():\n run_subprocess(['osg-batch-update'])", "def cmd_gallery_publish(client, args):\n publish_to_imgur = client.share_on_imgur(args.item_id, args.title, args.terms)\n generate_output({'publish_to_imgur': publish_to_imgur})", "def deploy(fingerengine, fingerprint):\n\n base = 'http://{0}:{1}'.format(fingerengine.options.ip, fingerprint.port)\n uri = '/manager/html/upload'\n war_file = fingerengine.options.deploy\n war_path = parse_war_path(war_file)\n cookies = checkAuth(fingerengine.options.ip, fingerprint.port,\n fingerprint.title, fingerprint.version)\n if not cookies:\n utility.Msg(\"Could not get auth for %s:%s\" %\n (fingerengine.options.ip, fingerprint.port), LOG.ERROR)\n return\n\n utility.Msg(\"Preparing to deploy {0}...\".format(war_file))\n\n if fingerprint.version in ['6.0', '7.0', '8.0']:\n # deploying via the gui requires a CSRF token\n (csrf, c) = fetchCSRF(base, cookies)\n if not csrf:\n return\n else:\n # set CSRF and refresh session id\n uri += '?org.apache.catalina.filters.CSRF_NONCE={0}'\n uri = uri.format(csrf)\n cookies = (c, cookies[1])\n\n # read in payload\n try:\n tag = 'deployWar'\n if fingerprint.version in ['4.0', '4.1']:\n tag = 'installWar'\n files = {tag : (war_path + '.war', open(war_file, 'rb'))}\n except Exception, e:\n utility.Msg(e, LOG.ERROR)\n return\n\n # deploy\n response = utility.requests_post(base + uri, files=files, cookies=cookies[0],\n auth=cookies[1])\n\n if response.status_code is 200 and \"OK\" in response.content:\n utility.Msg(\"Deployed {0} to /{1}\".format(war_file, war_path), LOG.SUCCESS)\n elif 'Application already exists' in response.content:\n utility.Msg(\"Application {0} is already deployed\".format(war_file), LOG.ERROR)\n elif response.status_code is 403:\n utility.Msg(\"This account does not have permissions to remotely deploy. Try\"\\\n \" using manager_deploy\", LOG.ERROR)\n else:\n utility.Msg(\"Failed to deploy (HTTP %d)\" % response.status_code, LOG.ERROR)", "def deploy_app(device_id, app_id, app_version):\n kargs={'host': c.cfg['host'], \"api_version\": c.cfg['api_version'], \"url_path\": \"/applications\"}\n versions = esapp.App(kargs).get_app_version_by_id(app_id)\n\n kargs.update({\"url_path\": \"/tasks\"})\n if not app_version in versions:\n sys.exit(\"Fail: app_version \\\"%s\\\" not found, available list:%s\" \\\n %(str(app_version), str(jsn.dumps(versions))))\n\n task = estask.Task(kargs)\n try:\n dict_resp= task.create_app_task(device_id, app_version, app_id)\n except Exception as e:\n sys.exit(\"Fail: %s\" %str(e))\n\n if dict_resp == None:\n sys.exit(\"Fail: error response\")\n\n try:\n click.echo(\"Success to create a task id: %s\" %(str(dict_resp[\"task_id\"])))\n except Exception as e:\n sys.exit(\"Fail: %s %s\" %(str(e), str(dict_resp)))\n\n if 'status' in dict_resp and dict_resp['status'].lower() != 'success':\n sys.exit(1)", "def push_dockerhub(c, app, version, latest=False):\n if app.lower() == 'core':\n c.run('sudo docker push kinecosystem/stellar-core:{version}'.format(version=version))\n if latest:\n c.run('sudo docker push kinecosystem/stellar-core:latest')\n elif app.lower() == 'horizon':\n c.run('sudo docker push kinecosystem/horizon:{version}'.format(version=version))\n if latest:\n c.run('sudo docker push kinecosystem/horizon:latest')\n elif app.lower() == 'friendbot':\n c.run('sudo docker push kinecosystem/friendbot:{version}'.format(version=version))\n if latest:\n c.run('sudo docker push kinecosystem/friendbot:latest')\n else:\n Exit('Unknown application {}'.format(app))", "def publish(digest, pid, api_key, tag):\n url = \"https://connect.redhat.com/api/v2/projects/{}/containers/{}/tags/{}/publish\".format(pid, digest, tag)\n headers = {\"accept\": \"*/*\",\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer {}\".format(api_key)}\n\n response = requests.post(url, headers=headers)\n\n if response.status_code != 201:\n print(\"Unable to publish, invalid status code: {}.\".format(response.status_code))\n print(response)\n print(response.content)\n sys.exit(1)\n else:\n print(\"Docker image '{}' successfully scheduled for publishing.\".format(digest))", "def publish_files():\n print(\"Publishing files to the internet...\", end=\"\", flush=True)\n import subprocess\n try:\n subprocess.run(\"./upload.sh\", timeout=120.0)\n print(\"done.\\n\")\n except:\n print(\"failed.\\n\")", "def cli():\n update_all_posts()\n push_updates()", "def run():\n\n parser = OptionParser()\n parser.add_option(\"-d\", \"--dir\", dest=\"dir\", help=\"The app local directory\")\n parser.add_option(\"-r\", \"--remote_dir\", dest=\"remote_dir\", help=\"The app remote directory\")\n parser.add_option(\"-n\", \"--name\", dest=\"name\", help=\"The django app name\")\n parser.add_option(\"-f\", \"--full\", help=\"Provision before deploy\", default=False)\n parser.add_option(\"-o\", \"--no_files\", help=\"Don't copy the app files\", default=False)\n\n (options, args) = parser.parse_args()\n\n execute(deploy, **options.__dict__)", "def publish_messages(line): \n command = \"gcloud beta pubsub topics publish \"+ topic_name+\" --message \"+'\"'+str(line)+'\"'\n os.system(command)", "def upload():\n sh('python setup.py register sdist upload')", "def publish():\n reset()\n compress()\n build()\n s3deploy()\n log_success()", "def publish_release(ctx):\n rel = _get_release()\n rel.update_release(rel.title, rel.raw_data[\"body\"], draft=False)", "def publish(\n self,\n db: PysonDB,\n *,\n production: bool, # PyPI or Test-PyPi\n build=False, #\n force=False, # publish even if no changes\n dry_run=False, # do not actually publish\n clean: bool = False, # clean up afterwards\n ) -> (\n bool\n ): # sourcery skip: assign-if-exp, default-mutable-arg, extract-method, remove-unnecessary-else, require-parameter-annotation, swap-if-else-branches, swap-if-expression\n log.info(f\"Publish: {self.package_path.name}\")\n # count .pyi files in the package\n filecount = len(list(self.package_path.rglob(\"*.pyi\")))\n if filecount == 0:\n log.debug(f\"{self.package_name}: starting build as no .pyi files found\")\n build = True\n\n if build or force or self.is_changed():\n self.build(production=production, force=force)\n\n if not self._publish:\n log.debug(f\"{self.package_name}: skip publishing\")\n return False\n\n self.update_pkg_version(production=production)\n # Publish the package to PyPi, Test-PyPi or Github\n if self.is_changed() or force:\n if self.mpy_version == \"latest\":\n log.warning(\"version: `latest` package will only be available on Github, and not published to PyPi.\")\n self.status[\"result\"] = \"Published to GitHub\"\n else:\n self.update_hashes() # resets is_changed to False\n if not dry_run:\n pub_ok = self.poetry_publish(production=production)\n else:\n log.warning(f\"{self.package_name}: Dry run, not publishing to {'' if production else 'Test-'}PyPi\")\n pub_ok = True\n if not pub_ok:\n log.warning(f\"{self.package_name}: Publish failed for {self.pkg_version}\")\n self.status[\"error\"] = \"Publish failed\"\n return False\n self.status[\"result\"] = \"Published to PyPi\" if production else \"Published to Test-PyPi\"\n self.update_hashes()\n if dry_run:\n log.warning(f\"{self.package_name}: Dry run, not saving to database\")\n else:\n # get the package state and add it to the database\n db.add(self.to_dict())\n db.commit()\n return True\n else:\n log.info(f\"No changes to package : {self.package_name} {self.pkg_version}\")\n\n if clean:\n self.clean()\n return True", "def deploy_app(self, app_info):\n raise NotImplementedError", "def upload(ctx, release, rebuild, version):\n\n dist_path = Path(DIST_PATH)\n if rebuild is False:\n if not dist_path.exists() or not list(dist_path.glob('*')):\n print(\"No distribution files found. Please run 'build' command first\")\n return\n else:\n ctx.invoke(build, force=True, version=version)\n\n if release:\n args = ['twine', 'upload', 'dist/*']\n else:\n repository = 'https://test.pypi.org/legacy/'\n args = ['twine', 'upload', '--repository-url', repository, 'dist/*']\n\n env = os.environ.copy()\n\n p = subprocess.Popen(args, env=env)\n p.wait()", "def deploy():", "def deploy(ctx):\n click.echo('deploying')\n ctx.deploy()\n click.echo('done')", "def PublishIt(name, path, comments, task=os.getenv('TASK'), status=\"WORK IN PROGRESS\"):\n\n db = get_connection()\n\n PubCollections = db['submissions']\n\n # creation of the dailies submission entry\n publishDict = dict()\n publishDict['date'] = now\n publishDict['type'] = \"publish\"\n publishDict['user_name'] = main_user\n publishDict['task'] = task\n publishDict['status'] = status\n publishDict['asset'] = name\n publishDict['path'] = path\n publishDict['comment'] = comments\n PubCollections.save(publishDict)\n notifications.push_notifications({\"name\": main_user, \"email\": os.getenv('USER_EMAIL')}, users_list, \"publish\", shot, now)", "def website_publish_button(self):\n if self.website_published:\n self.write({'website_published': False})\n else:\n self.write({'website_published': True})", "def deploy():\n return do_deploy(do_pack())", "def deploy():\n return do_deploy(do_pack())", "def deploy():\n return do_deploy(do_pack())", "def deploy(self):\n\n netlify_cli = getattr(settings, \"NETLIFY_PATH\", None)\n if not netlify_cli:\n raise CommandError(\"NETLIFY_PATH is not defined in settings\")\n\n deployment = Deployment()\n deployment.save()\n\n command = [netlify_cli, \"deploy\"]\n command.append(\"--dir={}\".format(settings.BUILD_DIR))\n command.append(\"--prod\")\n command.append('--message=\"Wagtail Deployment #{}\"'.format(deployment.pk))\n\n site_id = getattr(settings, \"NETLIFY_SITE_ID\", None)\n if site_id:\n command.append(\"--site={}\".format(site_id))\n\n auth_token = getattr(settings, \"NETLIFY_API_TOKEN\", None)\n if auth_token:\n command.append(\"--auth={}\".format(auth_token))\n\n subprocess.call(command)", "def submit_feed(self, feed, feed_type, marketplaceids=None,\n content_type=\"text/xml\", purge='false'):\n md = to_md5(feed)\n data = dict(Action='SubmitFeed',\n FeedType=feed_type,\n PurgeAndReplace=purge,\n ContentMD5Value=md)\n data.update(utils.enumerate_param('MarketplaceIdList.Id.', marketplaceids))\n return self.make_request(data, method=\"POST\", body=feed,\n extra_headers={'Content-Type': content_type})", "async def publish(self, topic: str, *args: aiowamp.WAMPType,\n kwargs: aiowamp.WAMPDict = None,\n acknowledge: bool = None,\n blackwhitelist: aiowamp.BlackWhiteList = None,\n exclude_me: bool = None,\n disclose_me: bool = None,\n resource_key: str = None,\n options: aiowamp.WAMPDict = None) -> None:\n ...", "def push(self) -> None:\n\n with ImportExtensions(required=True):\n import requests\n\n pkg_path = Path(self.args.path)\n if not pkg_path.exists():\n self.logger.critical(f'`{self.args.path}` is not a valid path!')\n exit(1)\n\n request_headers = self._get_request_header()\n\n try:\n # archive the executor package\n with TimeContext(f'Packaging {self.args.path}', self.logger):\n md5_hash = hashlib.md5()\n bytesio = archive_package(pkg_path)\n content = bytesio.getvalue()\n md5_hash.update(content)\n\n md5_digest = md5_hash.hexdigest()\n\n # upload the archived package\n form_data = {\n 'public': self.args.public if hasattr(self.args, 'public') else False,\n 'private': self.args.private\n if hasattr(self.args, 'private')\n else False,\n 'md5sum': md5_digest,\n 'force': self.args.force,\n 'secret': self.args.secret,\n }\n\n method = 'put' if self.args.force else 'post'\n\n hubble_url = get_hubble_url()\n # upload the archived executor to Jina Hub\n with TimeContext(\n f'Pushing to {hubble_url} ({method.upper()})',\n self.logger,\n ):\n resp = getattr(requests, method)(\n hubble_url,\n files={'file': content},\n data=form_data,\n headers=request_headers,\n )\n\n if 200 <= resp.status_code < 300:\n # TODO: only support single executor now\n image = resp.json()['executors'][0]\n\n uuid8 = image['id']\n secret = image['secret']\n visibility = image['visibility']\n\n info_table = [\n f'\\t🔑 ID:\\t\\t' + colored(f'{uuid8}', 'cyan'),\n f'\\t🔒 Secret:\\t'\n + colored(\n f'{secret}',\n 'cyan',\n )\n + colored(\n ' (👈 Please store this secret carefully, it wont show up again)',\n 'red',\n ),\n f'\\t👀 Visibility:\\t' + colored(f'{visibility}', 'cyan'),\n ]\n\n if 'alias' in image:\n info_table.append(f'\\t📛 Alias:\\t' + colored(image['alias'], 'cyan'))\n\n self.logger.success(f'🎉 Executor `{pkg_path}` is pushed successfully!')\n self.logger.info('\\n' + '\\n'.join(info_table))\n\n usage = (\n f'jinahub://{uuid8}'\n if visibility == 'public'\n else f'jinahub://{uuid8}:{secret}'\n )\n\n self.logger.info(f'You can use it via `uses={usage}` in the Flow/CLI.')\n elif resp.text:\n # NOTE: sometimes resp.text returns empty\n raise Exception(resp.text)\n else:\n resp.raise_for_status()\n except Exception as e: # IO related errors\n self.logger.error(\n f'Error while pushing `{self.args.path}` with session_id={request_headers[\"jinameta-session-id\"]}: '\n f'\\n{e!r}'\n )", "def PostApps(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def release_pypi():\n local('python setup.py clean sdist register upload')", "def post_publish(request, pk):\n post = get_object_or_404(Post, pk=pk)\n post.publish()\n return redirect('post_detail', pk=pk)", "def deploy_go_app(app_name, uri):\n execute(local_fetch_s3_artifact, uri)\n execute(deploy_artifact, app_name, uri)\n execute(create_symlink,\n '{}/config/config.yaml'.format(get_app_basedir(app_name)),\n '{}/etc/config.yaml'.format(get_current_release_dir(app_name)))", "def main(pkg_dir, years):\n pkgname = os.path.basename(pkg_dir)\n identifier = clean_name('archlinux_pkg_' + pkgname)\n metadata = {\n #'collection': ['test_collection', 'open_source_software'],\n #'collection': ['open_source_software'],\n 'collection': ['archlinuxarchive'],\n 'mediatype': 'software',\n 'publisher': 'Arch Linux',\n 'creator': 'Arch Linux',\n 'subject': ['archlinux', 'archlinux package'],\n }\n metadata['title'] = pkgname + \" package archive from Arch Linux\"\n metadata['subject'].append(pkgname)\n upload_pkg(identifier, pkgname, metadata, pkg_dir, years)", "def submit_manifest(\n request: ValidateManifestRequest = Body(...),\n schema: Any = Depends(get_description_schema),\n) -> ManifestSubmitResponse:\n manifest, validation = _validate_manifest(request, schema)\n if not manifest or validation.status == ResponseStatus.FAIL:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST, detail=validation.details\n )\n\n try:\n with get_repository(get_client_id(), DataCollection.MANIFEST) as repository:\n manifest_hash = manifest.crypto_hash().to_hex()\n _ = repository.set(\n {\"manifest_hash\": manifest_hash, \"manifest\": manifest.to_json_object()}\n )\n return ManifestSubmitResponse(manifest_hash=manifest_hash)\n except Exception as error:\n print(sys.exc_info())\n raise HTTPException(\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,\n detail=\"Submit manifest failed\",\n ) from error", "def publish_release(release_id):\n logger.info(f\"Publishing release {release_id}\")\n\n release = Release.objects.select_related().get(kf_id=release_id)\n studies = [study.kf_id for study in release.studies.all()]\n release.publish()\n tasks = release.tasks.all()\n\n # Should always have at least one task service for a release, but if there\n # are none, publish skip to published\n if not tasks:\n release.complete()\n\n release.save()\n\n for task in tasks:\n body = {\n \"action\": \"publish\",\n \"task_id\": task.kf_id,\n \"release_id\": release.kf_id,\n \"studies\": studies,\n }\n failed = False\n resp = None\n try:\n resp = requests.post(\n task.task_service.url + \"/tasks\",\n headers=headers(),\n json=body,\n timeout=settings.REQUEST_TIMEOUT,\n )\n resp.raise_for_status()\n except requests.exceptions.RequestException as err:\n logger.error(\n f\"problem requesting task for publish: \" + f\"{resp.content}\"\n )\n failed = True\n\n ev = Event(\n event_type=\"error\",\n message=f\"request to publish task failed: {err}\",\n release=release,\n task=task,\n task_service=task_service,\n )\n ev.save()\n\n # Check that command was accepted\n if resp and resp.status_code != 200:\n logger.error(\n f\"invalid code from task for publish: \" + \"{resp.status_code}\"\n )\n failed = True\n\n if (\n resp\n and \"state\" in resp.json()\n and resp.json()[\"state\"] != \"publishing\"\n ):\n logger.error(\n f\"invalid state returned from task for publish: \"\n + f\"{resp.content}\"\n )\n failed = True\n\n if failed:\n release.cancel()\n release.save()\n task.failed()\n task.save()\n django_rq.enqueue(cancel_release, release.kf_id, True)\n break\n\n task.publish()\n task.save()", "def deploy():\n new_archive = do_pack()\n\n if new_archive is None:\n return False\n\n res = do_deploy(new_archive)\n return res", "def publish(self,toolname):\n\n self.logger.info(\"publishing '%s'\" % (toolname))\n\n po = self.catalog.load_pageobject('ToolsStatusApprovedAdminPage',toolname)\n po.goto_page()\n\n # click the publish link\n publish_status,output = po.do_publish()\n\n # wait for the output success / failure block to appear\n if publish_status is False:\n raise RuntimeError(\"finalizetool failed: %s\" % (output))\n\n # mark project as created\n self.flip_tool_status('ToolsStatusApprovedAdminPage',toolname,'Published')\n\n # check that the tool is in the published state\n tool_state = po.get_tool_state()\n if tool_state.lower() != 'Published'.lower():\n raise Exception('Incorrect tool state: %s, expected \"Published\"'\\\n % tool_state)", "def pub_upload(args, project=\"\", base_url=\"\", api_key=\"\"):\n project, base_url, api_key, updated = get_project_config(\n project=project, base_url=base_url, api_key=api_key)\n if updated:\n save_config()\n upload_theme(args, base_url, api_key, prefix=project)", "def execute(\n name: str,\n *args: Any,\n **kwargs: Any\n ) -> None:\n cherrypy.engine.publish(name, *args, **kwargs) # type: ignore", "def deploy(args):\n from scrapyd_client import deploy\n\n sys.argv.pop(1)\n deploy.main()", "def cvmfsPublish(reponame = None):\n if reponame == None:\n reponame = _getRepoName()\n\n rc = subprocess.call([\"cvmfs_server\", \"publish\", \"-f\", reponame])\n if rc != 0:\n raise RuntimeError(\"Could not publish CVMFS transaction\")", "def deploy(parameters):\n\n print(\"In deploy module\")", "def publish_bulk(ctx, platform, src_template, dst_template, signed_push=False):\n for p in platform:\n parts = p.split(\"/\")\n\n if len(parts) != 2:\n print(\"Invalid platform format: expected 'OS/ARCH' parameter, got {}\".format(p))\n raise Exit(code=1)\n\n def evalTemplate(s):\n s = s.replace(\"OS\", parts[0].lower())\n s = s.replace(\"ARCH\", parts[1].lower())\n return s\n\n publish(ctx, evalTemplate(src_template), evalTemplate(dst_template), signed_push=signed_push)", "def push_blog():\n\n\twarn(green(\"Update blog on github pages.\"))\n\t_setup_virtualenv()\n\n\twith cd(PROJECT_PATH):\n\t\twith prefix(env.activate):\n\t\t\tlocal('python blog.py build', shell='/bin/bash')\n\n\t\tlocal('cd {}'.format(FREEZER_DESTINATION), shell='/bin/bash')\n\t\tlocal('git status')\n\t\task_msg = red(\"Force push new content to blog?\")\n\t\tif console.confirm(ask_msg, default=False) is True:\n\t\t\tlocal('git add --all')\n\t\t\tlocal('git commit -m \"new articles\"')\n\t\t\tlocal('git push --force origin master')", "def deploy_application(target_environment, config_file, branch, force): # noqa\n # read in and parse configuration\n app = config.AppConfiguration.load(\n config_file or\n os.path.join(settings.app_conf_dir, '%s.conf' % target_environment)\n )\n app_name = app.app_name\n branch = branch or app.default_branch or git.get_current_branch()\n\n # get the contents of the proposed deployment\n release = heroku.HerokuRelease.get_latest_deployment(app_name)\n\n remote_hash = release.commit\n if app.use_pipeline:\n # if we are using pipelines, then the commit we need is not the\n # local one, but the latest version on the upstream app, as this\n # is the one that will be deployed.\n upstream_release = heroku.HerokuRelease.get_latest_deployment(app.upstream_app) # noqa\n local_hash = upstream_release.commit\n else:\n local_hash = git.get_branch_head(branch)\n\n if local_hash == remote_hash:\n click.echo(u\"Heroku application is up-to-date, aborting deployment.\")\n return\n\n files = git.get_files(remote_hash, local_hash)\n commits = git.get_commits(remote_hash, local_hash)\n\n post_deploy_tasks = app.post_deploy_tasks\n\n click.echo(\"\")\n click.echo(\"Comparing %s..%s\" % (remote_hash, local_hash))\n click.echo(\"\")\n click.echo(\"The following files have changed since the last deployment:\\n\") # noqa\n if len(files) == 0:\n click.echo(\" (no change)\")\n else:\n click.echo(\"\".join([\" * %s\\n\" % f for f in files]))\n click.echo(\"\")\n click.echo(\"The following commits will be included in this deployment:\\n\") # noqa\n if len(commits) == 0:\n click.echo(\" (no change)\")\n else:\n click.echo(\"\".join([\" [%s] %s\\n\" % (c[0], c[1]) for c in commits]))\n\n # ============== summarise actions ==========================\n click.echo(\"\")\n click.echo(\"Summary of deployment options:\") # noqa\n click.echo(\"\")\n click.echo(\" ----- Deployment SETTINGS -----------\")\n click.echo(\"\")\n click.echo(\" Git branch: %s\" % branch)\n click.echo(\" Target env: %s (%s)\" % (target_environment, app_name))\n click.echo(\" Force push: %s\" % force)\n # pipeline promotion - buildpack won't run\n click.echo(\" Pipeline: %s\" % app.use_pipeline)\n if app.use_pipeline:\n click.echo(\" Promote: %s\" % app.upstream_app)\n click.echo(\" Release tag: %s\" % app.add_tag)\n click.echo(\"\")\n click.echo(\" ----- Post-deployment commands ------\")\n click.echo(\"\")\n\n if not post_deploy_tasks:\n click.echo(\" (None specified)\")\n else:\n [click.echo(\" %s\" % x) for x in post_deploy_tasks]\n\n click.echo(\"\")\n # ============== / summarise actions ========================\n\n # put up the maintenance page if required\n maintenance = utils.prompt_for_action(\n u\"Do you want to put up the maintenance page?\",\n False\n )\n\n if not utils.prompt_for_pin(\"\"):\n exit(0)\n\n if maintenance:\n click.echo(\"Putting up maintenance page\")\n heroku.toggle_maintenance(app_name, True)\n\n if app.use_pipeline:\n click.echo(\"Promoting upstream app: %s\" % app.upstream_app)\n heroku.promote_app(app.upstream_app)\n else:\n click.echo(\"Pushing to git remote\")\n git.push(\n remote=git.get_remote_url(app_name),\n local_branch=branch,\n remote_branch='master',\n force=force\n )\n\n if post_deploy_tasks:\n click.echo(\"Running post-deployment tasks:\")\n run_post_deployment_tasks(post_deploy_tasks)\n\n if maintenance:\n click.echo(\"Pulling down maintenance page\")\n heroku.toggle_maintenance(app_name, False)\n\n release = heroku.HerokuRelease.get_latest_deployment(app_name)\n\n if app.add_tag:\n click.echo(\"Applying git tag\")\n message = \"Deployed to %s by %s\" % (app_name, release.deployed_by)\n git.apply_tag(commit=local_hash, tag=release.version, message=message)\n\n click.echo(release)", "def _deploy_app():\n rsync_project(env.remote_directory, env.local_directory,\n exclude=['.git/', '*.pyc', 'tests.py', 'migrations/'])\n sudo('service installer_app restart')", "def test_01_app_post(self):\r\n url = '/api/app?api_key=' + self.api_key\r\n self.check_limit(url, 'post', 'app')", "def test_publish(self):\n\n adminuser,adminpass = self.testdata.find_account_for('toolmanager')\n\n self.utils.account.login_as(adminuser,adminpass)\n\n self.contribtool.publish(TOOLNAME)", "def publishUploads(self, manualVerify = True):\n for key in self.nbDetails:\n # Skip metadata key if present\n if key!='proc' and self.nbDetails[key]['pkg'] and self.nbDetails[key]['archFilesOK']:\n self.publishRepoItem(key, manualVerify = manualVerify)", "def save_publish():\n import mop\n\n path = cmds.file(query=True, location=True)\n work_dir = os.path.dirname(path)\n publish_dir = os.path.join(work_dir, \"release\")\n\n highest_publish = None\n highest_version = -1\n\n for f in os.listdir(publish_dir):\n ext = os.path.splitext(f)[-1]\n if ext == \".ma\":\n pattern = r\"v(?P<version>\\d{3})\"\n regex = re.compile(pattern)\n match = regex.search(f)\n if match:\n version = int(match.group(\"version\"))\n if version > highest_version:\n highest_version = version\n highest_publish = f\n\n new_path = mop.increment_version(os.path.join(publish_dir, highest_publish))\n cmds.file(rename=new_path)\n cmds.file(save=True, force=True)", "def publish(self, review_request):\r\n self.debug('Publishing')\r\n self.api_call('api/review-requests/%s/publish/' %\r\n review_request['id'])", "def publish(self, kpi_dict):\n pass", "def deploy():\n archive_path = do_pack()\n if archive_path is None:\n print(\"pass\")\n return False\n return do_deploy(archive_path)", "def main(event):\n post = Post(frontmatter.load(event))\n if post.meetup_id is None:\n resp = create_meetup(post)\n post.meetup_id = resp[\"id\"]\n post.write(event)\n else:\n resp = update_meetup(post)\n # buttons(post)", "def deploy(fingerengine, fingerprint):\n\n\tcfm_path = abspath(fingerengine.options.deploy)\n\tcfm_file = parse_war_path(cfm_path, True)\n\tdip = fingerengine.options.ip\n\n\tcookie = checkAuth(dip, fingerprint.port, title, fingerprint.version)[0]\n\tif not cookie:\n\t\tutility.Msg(\"Could not get auth\", LOG.ERROR)\n\t\treturn\n\n\tutility.Msg(\"Preparing to deploy {0}...\".format(cfm_file))\n\tutility.Msg(\"Fetching web root...\", LOG.DEBUG)\n\n\troot = fetch_webroot(dip, fingerprint, cookie)\n\tif not root:\n\t\tutility.Msg(\"Unable to fetch web root.\", LOG.ERROR)\n\t\treturn\n\t\n\t# create the scheduled task\n\tutility.Msg(\"Web root found at %s\" % root, LOG.DEBUG)\n\tutility.Msg(\"Creating scheduled task...\")\n\n\tif not create_task(dip, fingerprint, cfm_file, root, cookie):\n\t\treturn\n\n\t# invoke the task\n\tutility.Msg(\"Task %s created, invoking...\" % cfm_file)\n\trun_task(dip, fingerprint, cfm_path, cookie)\n\n\t# cleanup\n\tutility.Msg(\"Cleaning up...\")\n\tif not delete_task(dip, fingerprint, cfm_file, cookie):\n\t\tutility.Msg(\"Failed to remove task. May require manual removal.\", LOG.ERROR)", "def deploy_action(self, **kwargs):\n pytan.utils.check_for_help(kwargs=kwargs)\n\n # the human string describing the sensors/filter that user wants\n # to deploy the action against\n action_filters = kwargs.get('action_filters', [])\n\n # the question options to use on the pre-action question and on the\n # group for the action filters\n action_options = kwargs.get('action_options', [])\n\n # name of package to deploy with params as {key=value1,key2=value2}\n package = kwargs.get('package', '')\n\n action_filter_defs = pytan.utils.dehumanize_sensors(action_filters, 'action_filters', True)\n action_option_defs = pytan.utils.dehumanize_question_options(action_options)\n package_def = pytan.utils.dehumanize_package(package)\n\n clean_keys = ['package_def', 'action_filter_defs', 'action_option_defs']\n clean_kwargs = pytan.utils.clean_kwargs(kwargs=kwargs, keys=clean_keys)\n\n deploy_result = self._deploy_action(\n action_filter_defs=action_filter_defs,\n action_option_defs=action_option_defs,\n package_def=package_def,\n **clean_kwargs\n )\n return deploy_result", "def publish(\n hass: HomeAssistant,\n topic: str,\n payload: PublishPayloadType,\n qos: int | None = 0,\n retain: bool | None = False,\n encoding: str | None = DEFAULT_ENCODING,\n) -> None:\n hass.add_job(async_publish, hass, topic, payload, qos, retain, encoding)", "def deploy():\n build()\n copy()\n install()", "def deploy():\n with cd('/apps/sharejs-rethinkdb-example'):\n run('pwd')\n run('git stash')\n run('git pull -f origin master')\n run('fig -f prod.yml stop')\n run('fig -f prod.yml build')\n run('fig -f prod.yml up -d')", "def post_to_writeas(token, publication, post_title, post_date, post_content):\n writeas_post_url = 'https://write.as/api/collections/{}/posts'.format(\n publication)\n writeas_auth_header = {\n 'Authorization': token,\n 'Content-Type': 'application/json'\n }\n payload = {\n 'title': post_title,\n 'created': post_date,\n 'body': post_content\n }\n r = requests.post(\n writeas_post_url,\n headers=writeas_auth_header,\n json=payload)\n\n if r.status_code == 201:\n result = r.json()\n post_id = result['data']['id']\n print('Post {} successfully created!'.format(post_id))\n else:\n print(\"Publishing post FAILED. Response: {}\".format(r.text))\n writeas_logout(writeas_auth_token)\n sys.exit(1)", "def generate_publish_form(self, formid=\"deform\"):\n from phoenix.schema import PublishSchema\n return Form(schema=PublishSchema(), buttons=('publish',), formid=formid)", "def deploy_api(dist_file, apt_req_file):\n _set_credentials()\n provision()\n _deploy_apt_requirements(apt_req_file)\n _deploy_python_package(dist_file)\n _sighup_api()\n _verify_api_heartbeat()\n send_build_stat(PROJECT_NAME, env.stage)", "def deploy(env='development', update_settings='n', upgrade_apps='n'):\n update_site(env, update_settings, upgrade_apps)\n restart_site(env)", "def publish_action(self, action):\n raise NotImplementedError", "def process_deployapp ( vpc_conn,\n ec2_conn,\n elb_conn,\n cloudwatch_conn,\n s3_infra_conn,\n r53_conn,\n vpc,\n base_name,\n base_topicarn,\n app_name,\n app_type,\n region_name,\n aws_account_type,\n params,\n monitor_params = None ) :\n target_env = base_name\n APP_NAME = app_name.upper( )\n deployment_ami_name = params.get( 'source-ami' )\n source_env = params[ 'source-env' ]\n TARGET_ENV = target_env.upper( )\n SOURCE_ENV = source_env.upper( )\n load_balancer = get_elb_name( target_env, app_name )\n instance_name = get_instance_name( target_env, app_name )\n wait_on_launch = params.get( 'wait-on-launch', 'YES' ) == 'YES'\n if not monitor_params :\n monitor_params = params.get( 'monitors' )\n\n instance_secgrp_name = get_secgrp_name( target_env, app_name )\n instance_keypair = get_keypair_name( aws_account_type, vpc.region.name, app_type )\n\n ##\n ## Find the correct AMI to use for deployment\n ##\n if not deployment_ami_name or len( deployment_ami_name ) < 1 :\n deployment_ami_name = get_current_ami( s3_infra_conn, region_name, get_env_type( SOURCE_ENV ), app_name )\n if not deployment_ami_name :\n print \"Could not find AMI to use for deployment! \" + deployment_ami_name\n sys.exit( 2 )\n\n deployment_ami = get_ami_by_name( ec2_conn, deployment_ami_name )\n if not deployment_ami :\n print \"Could not find AMI to use for deployment! \" + deployment_ami_name\n sys.exit( 2 )\n\n subnets = get_vpc_subnets( vpc_conn, vpc, params.get( 'subnet-type', 'PRIVATE' ) )\n secgrps = ec2_conn.get_all_security_groups( filters = { \"group-name\" : [ instance_secgrp_name ] } )\n \n userdata = get_userdata( app_type, TARGET_ENV, app_name )\n \n new_instances = []\n num_instances = int( params.get( 'num-instances', len( subnets ) ) )\n if num_instances > len( subnets ) :\n num_instances = len( subnets )\n\n while num_instances > 0 :\n instance = launch_instance_vpc( ec2_conn,\n deployment_ami,\n base_name = base_name,\n instance_type = app_name,\n keypair = instance_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = secgrps[ 0 ].id ,\n subnet_id = subnets[ num_instances - 1 ].id,\n user_data = userdata,\n public_ip = False,\n wait_for_running = wait_on_launch )\n new_instances.append( instance )\n\n if monitor_params :\n print \"Setting alarms on the instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, instance.id, APP_NAME, base_topicarn, monitor_params )\n\n num_instances -= 1\n\n new_instance_ids = [ i.id for i in new_instances ]\n\n if ( wait_on_launch ) :\n print \"Waiting for instances to be ready\"\n aws_waits( ec2_conn.get_only_instances, new_instance_ids )\n\n print \"Creating AMI from instance server.\"\n timestamp = get_current_datetime_string( )\n new_ami_name = target_env + '-' + APP_NAME + '-' + timestamp\n ami_instance = new_instances[ 0 ]\n if not wait_on_launch :\n # We must wait for at least the ami instance to be available so we can create a new AMI from it.\n wait_on_object_state( ami_instance, 'running' )\n new_ami = create_ami_from_instance( aws_account_type, ec2_conn, new_instances[ 0 ], new_ami_name )\n if not new_ami :\n print \"Could not create new AMI!\"\n sys.exit( 5 )\n\n print \"Storing new AMI as the current.\"\n save_current_ami( s3_infra_conn, region_name, get_env_type( TARGET_ENV ), app_name, new_ami.name )\n\n print \"Adding the new app instances into the load balancer.\"\n elb = find_elb( elb_conn, load_balancer )\n status = swap_elb_instances( elb_conn = elb_conn,\n elb = elb,\n new_instance_ids = new_instance_ids,\n terminate_old_instances = True,\n ec2_conn = ec2_conn,\n cloudwatch_conn = cloudwatch_conn,\n swap_smoothly = wait_on_launch )\n if not status :\n print \"WARNING: Not all new app instances came up in the load balancer! Check the load balancer.\"\n\n print \"Deployment complete.\"", "def deploy():\n myfile = do_pack()\n if myfile is None:\n return False\n return do_deploy(myfile)", "def docker_push(c):\n cli_tasks.docker_push.run(c)", "def publish(id):\n event = Event.query.get_or_404(id)\n if (\n not current_user.is_organizer(event) and not current_user.is_administrator()\n ) or event.has_ended():\n return redirect(url_for(\"main.index\"))\n if event.description is None or event.pitch is None:\n flash(\"You cannot publish an event without adding a description or pitch.\", \"danger\")\n return redirect(url_for(\"events.event_details\", id=event.id))\n if event.packages.count() == 0:\n flash(\"You cannot publish an event without adding any packages.\", \"danger\")\n return redirect(url_for(\"events.packages\", id=event.id))\n event.published = True\n db.session.commit()\n flash(\"Your event has been published.\", \"success\")\n return redirect(url_for(\"main.index\"))", "def django_start_app(appname):\r\n \r\n actions = []\r\n errs = []\r\n \r\n app = wingapi.gApplication\r\n cmdline, dirname, err = _get_base_cmdline()\r\n if err is not None:\r\n title = _(\"Failed to Start App\")\r\n msg = _(\"The Django app could not be created: %s\") % err\r\n app.ShowMessageDialog(title, msg)\r\n return\r\n cmdline += ['startapp', appname]\r\n err, output = app.ExecuteCommandLine(cmdline, dirname, None, 5.0, return_stderr=True)\r\n if err != 0 or output[1]:\r\n title = _(\"Failed to Start App\")\r\n msg = _(\"The command %s failed with error code %i and output:\\n\\n%s\\n\\n%s\") % (cmdline, err, _get_output(output), _kMissingPythonMessage)\r\n app.ShowMessageDialog(title, msg)\r\n return\r\n actions.append(_(\"Created Django app %s in %s\") % (appname, dirname))\r\n \r\n # Add the new app to INSTALLED_APPS in settings.py\r\n manage_py, settings_py = _CDjangoPluginActivator._instance._FindKeyFiles()\r\n try:\r\n f = open(settings_py)\r\n txt = f.read()\r\n f.close()\r\n except:\r\n errs.append(_(\"Unable to read %s to update INSTALLED_APPS\"))\r\n else:\r\n lines = txt.splitlines()\r\n eol = _get_eol(txt)\r\n insert_line = None\r\n in_installed_apps = False\r\n for i, line in enumerate(lines):\r\n if line.lstrip().startswith('INSTALLED_APPS'):\r\n in_installed_apps = True\r\n elif in_installed_apps and line.strip().startswith(')'):\r\n in_installed_apps = False\r\n insert_line = i\r\n if insert_line is None:\r\n lines.extend(['', 'INSTALLED_APPS =', \" '%s',\" % appname, ')', ''])\r\n else:\r\n lines = lines[:insert_line] + [\" '%s',\" % appname] + lines[insert_line:]\r\n try:\r\n txt = eol.join(lines)\r\n f = open(settings_py, 'w')\r\n f.write(txt)\r\n f.close()\r\n except:\r\n errs.append(_(\"Unable to write %s to update INSTALLED_APPS\"))\r\n else:\r\n actions.append(_(\"Added %s to INSTALLED_APPS in %s\") % (appname, settings_py))\r\n \r\n title = _(\"The App was Created\")\r\n msg = _(\"The application was created. \")\r\n if errs:\r\n msg += _get_errors_list(errs)\r\n msg += _get_actions_list(actions)\r\n app.ShowMessageDialog(title, msg, modal=False)", "def push_updates():\n check_call(['git', 'push', '--tags', '--force'])", "def deploy(verbose, app, archive):\n return _deploy_in_mode(\n mode=\"live\", verbose=verbose, log=log, app=app, archive=archive\n )", "def add_publish_command(\n self, relative_manifest_path: str, asset_selector: str\n ) -> None:\n return jsii.invoke(\n self, \"addPublishCommand\", [relative_manifest_path, asset_selector]\n )", "def deploy(fingerengine, fingerprint):\n\n global cookie \n\n cfm_path = abspath(fingerengine.options.deploy) \n cfm_file = parse_war_path(cfm_path, True)\n dip = fingerengine.options.ip\n\n # set our session cookie\n cookie = checkAuth(dip, fingerprint.port, title)\n if not cookie:\n utility.Msg(\"Could not get auth to %s:%s\" % (dip, fingerprint.port),\n LOG.ERROR)\n return\n\n utility.Msg(\"Preparing to deploy {0}..\".format(cfm_file))\n utility.Msg(\"Fetching web root..\", LOG.DEBUG)\n\n # fetch web root; i.e. where we can read the shell\n root = fetch_webroot(dip, fingerprint)\n if not root:\n utility.Msg(\"Unable to fetch web root.\", LOG.ERROR)\n return\n\n # create the scheduled task \n utility.Msg(\"Web root found at %s\" % root, LOG.DEBUG)\n utility.Msg(\"Creating scheduled task...\")\n\n if not create_task(dip, fingerprint, cfm_file, root):\n return\n\n # invoke the task\n utility.Msg(\"Task %s created, invoking...\" % cfm_file)\n run_task(dip, fingerprint, cfm_path)\n \n # remove the task\n utility.Msg(\"Cleaning up...\")\n delete_task(dip, fingerprint, cfm_file)", "def full_deploy(api_version='HEAD', renderer_version='HEAD',\n markup_renderer_version=None):\n setup()\n\n api.full_deploy(api_version)\n renderer.full_deploy(renderer_version)\n markup_renderer.full_deploy(markup_renderer_version)\n\n upload_nginx_conf()\n upload_uwsgi_conf()\n install_systemd_services()", "def publishRepoItem(self, key, manualVerify = True):\n\n ACCESS_TOKEN = initZenodo(self.hostDefn['localhost']['localSettings']/'zenodoSettings.dat')\n\n print(f\"\\n***Publish repo for job: {self.nbDetails[key]['title']}, with {self.nbDetails[key]['repo']}\")\n if manualVerify and self.nbDetails[key]['pkg']:\n uploadFlag = input(f\"Confirm publishing? (y/n) \")\n\n if uploadFlag == 'y':\n r = requests.post('https://zenodo.org/api/deposit/depositions/%s/actions/publish' % self.nbDetails[key]['repoInfo']['id'],\n params={'access_token': ACCESS_TOKEN} )\n print(r.json())\n\n else:\n print(\"Skipping publication.\")\n\n\n elif self.nbDetails[key]['pkg']:\n r = requests.post('https://zenodo.org/api/deposit/depositions/%s/actions/publish' % self.nbDetails[key]['repoInfo']['id'],\n params={'access_token': ACCESS_TOKEN} )\n print(r.json())\n\n else:\n print(f\"Item not set for packaging, skipping key = {key}.\")", "def deploy():\n require(\"hosts\", provided_by=[production, staging])\n env.release = time.strftime(\"%Y-%m-%d_%H:%M:%S\")\n upload_tar_from_git()\n install_requirements()\n setup_webserver()\n symlink_current_release()\n restart_webserver()", "def publish(self, message: str) -> None:", "async def create_app(self, data: dict) -> dict:\r\n return await self.post(API_APPS, data)", "def publish(request):\n context = RequestContext(request)\n page = Page.objects.get(website=request.website, url=request.POST['page_route'])\n try:\n page.publish(context)\n page.clear_cache(context)\n purge_varnish(request)\n except Exception, e:\n # log error\n raise e\n return HttpResponse('error')\n messages.success(request, 'Your changes have been published successfully.')\n return HttpResponse('true')", "def post( self, application=None, event=None,\n description=None,priority=0, providerkey = None):\n\n # Create the http object\n h = Https(API_DOMAIN)\n \n # Perform the request and get the response headers and content\n data = {\n 'apikey': self.apikey,\n 'application': application,\n 'event': event,\n 'description': description,\n 'priority': priority\n }\n\n if providerkey is not None:\n data['providerkey'] = providerkey\n\n h.request( \"POST\",\n \"/publicapi/add\",\n headers = self.headers,\n body = urlencode(data))\n response = h.getresponse()\n request_status = response.status\n\n if request_status == 200:\n return True\n elif request_status == 401:\n raise Exception(\"Auth Failed: %s\" % response.reason)\n else:\n raise Exception(\"Failed\")", "def run(syncdb=False):\n from fabdeploy.django import migrate as django_migrate, syncdb as django_syncdb\n import time\n env.release = time.strftime('%Y%m%d%H%M%S')\n prepare_deploy() # pull, test, push\n git.remote_pull()\n app.install_requirements()\n django_migrate(syncdb) # syncdb in case is first time\n deploy_static()", "def full_deploy():\n refresh_cts()\n push_mockups()\n deploy()", "def push(args, image_name_tag):\n if args.push is True:\n cmd_push = f\"docker push {image_name_tag}\"\n print(f\"COMMAND: {cmd_push}\")\n print(\"\", flush=True)\n return_code = subprocess.call(cmd_push, shell=True)\n if return_code != 0:\n exit(f\"Error with {cmd_push}\")\n return 0", "def push(images, tag, registry):\n manager = Manager('push', tag, images=images, registry_url=registry)\n manager.run()", "def post(self):\n msg = latest_deployment()\n msgToSend = msg[0] + \" was deployed at \" + msg[1]\n send_slack_log(msgToSend)\n return msgToSend", "def run_package(m):\n\n if m.args.upload:\n doc = find_fs_package_from_dir(m.args.source)\n else:\n doc = find_csv_package(m)\n\n url, user, password = get_site_config(m.args.site_name)\n wp = Client(url, user, password)\n\n post = get_or_new_post(m, wp, doc)\n\n assert post is not None\n\n if m.args.upload:\n upload_to_wordpress(wp, post, doc)\n\n content = html(doc, m.args.template)\n\n post.excerpt = doc['Root'].get_value('Root.Description') or content[:200]\n\n post_tags = list(set(\n [t.value for t in doc['Root'].find('Root.Tag')] +\n [t.value for t in doc['Root'].find('Root.Group')] +\n [doc['Root'].get_value('Root.Origin')] +\n list(split_groups_tags(m.args.group)) +\n list(split_groups_tags(m.args.tag))\n ))\n\n post.terms_names = {\n 'post_tag': post_tags,\n 'category': ['Dataset'] + list(split_groups_tags(m.args.group))\n }\n\n post.title = doc.get_value('Root.Title')\n post.slug = slugify(doc.nonver_name)\n post.content = content\n\n if m.args.publish:\n post.post_status = 'publish'\n\n try:\n if m.args.no_op:\n r = {}\n else:\n r = wp.call(EditPost(post.id, post))\n except Fault as e:\n\n if 'taxonomies' in e.faultString:\n err((\"User {} does not have permissions to add terms to taxonomies. \"\n \"Terms are: {}\").format(user, post.terms_names))\n\n raise\n\n return r", "def cmd_apps__create(args):\n \n if args.name is None:\n args.name = os.path.basename(os.getcwd())\n\n url = remote.create_project(args.name)\n \n if in_git_repo():\n if get_push_url('tinyserv') is None:\n git(None, 'remote', 'add', 'tinyserv', url)\n print \"Added remote 'tinyserv'.\"\n else:\n print \"This repository is already configured for app '%s'.\" % \\\n _get_current_project_name()\n \n print \"Remote repository URL is %s.\" % url" ]
[ "0.70976746", "0.6921972", "0.6737742", "0.61851317", "0.60648894", "0.5921255", "0.59202874", "0.57908696", "0.5747466", "0.5709409", "0.56947315", "0.5662847", "0.5600021", "0.55838025", "0.55629", "0.5550522", "0.55285674", "0.55028474", "0.54907167", "0.54901814", "0.5488438", "0.54800475", "0.54592156", "0.54592085", "0.5453356", "0.5436236", "0.5408566", "0.53787965", "0.5368691", "0.53586906", "0.5342109", "0.5340734", "0.53395414", "0.5304019", "0.5303967", "0.5303967", "0.5303967", "0.5287985", "0.5278524", "0.52783644", "0.52723235", "0.52717227", "0.5264677", "0.5261593", "0.5242708", "0.5233601", "0.52332306", "0.52310574", "0.52250046", "0.52096516", "0.5199581", "0.51863503", "0.5177601", "0.5174021", "0.51721543", "0.5161077", "0.51371753", "0.5123479", "0.5114374", "0.5087773", "0.5086943", "0.5062582", "0.5060069", "0.50569206", "0.5054281", "0.50399446", "0.5032733", "0.5017611", "0.50097567", "0.50061077", "0.49984118", "0.49899882", "0.4987441", "0.4980616", "0.495641", "0.49563572", "0.49419433", "0.4940206", "0.49380866", "0.4936254", "0.4935091", "0.49349076", "0.4933868", "0.49208292", "0.4916649", "0.49128062", "0.49126658", "0.49120316", "0.49102846", "0.49095604", "0.49075752", "0.49067202", "0.49042934", "0.4900372", "0.48995847", "0.4897512", "0.48961154", "0.48871323", "0.48794627", "0.48775575" ]
0.6171967
4
\b Lists all your published apps. $ 21 publish list Results from the list command are paginated. Use 'n' to move to the next page and 'p' to move to the previous page. You can view detailed admin information about an app by specifying it's id at the prompt.
def list(ctx): # pylint: disable=redefined-builtin _list_apps(ctx.obj['config'], ctx.obj['client'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _list_apps(config, client):\n logger.info(\"Listing all the published apps by {}: \".format(config.username), fg=\"green\")\n current_page = 0\n total_pages = get_search_results(config, client, current_page)\n if total_pages < 1:\n return\n\n while 0 <= current_page < total_pages:\n try:\n prompt_resp = click.prompt(uxstring.UxString.pagination,\n type=str)\n\n next_page = get_next_page(prompt_resp, current_page)\n\n if next_page == -1:\n model_id = prompt_resp\n display_app_info(config, client, model_id)\n elif next_page >= total_pages or next_page < 0:\n continue\n else:\n get_search_results(config, client, next_page)\n current_page = next_page\n\n except click.exceptions.Abort:\n return", "def ls():\n cfgmgr = ConfigManager()\n apps = cfgmgr['apps']\n for i in apps:\n print(fc(\"- {g}{appname}{rst}\", appname=i))", "def ListApps(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "async def app_list(self) -> List[interface.App]:\n return await self.relay(\"app_list\")()", "def list_apps(self) -> list:\n apps = self.app.list_apps()\n app_list = [app[\"title\"] for app in apps]\n return app_list", "def list_apps(self):\n with hide(\"output\", \"running\"):\n result = local((\"redis-cli -h {host} -p 6379 -n {db} keys \\\"*\\\"\"\n .format(host=self.host,\n db=REDIS_APPLICATION_DB_NUM)),\n capture=True)\n\n if len(result.stdout) > 0:\n return result.stdout\n else:\n print(\"Clipper has no applications registered\")\n return \"\"", "def listapps(self):\n return jsoncall.do_call(\"listapps\", {'modelname':self.modelname,\\\n 'user':self.user,\\\n 'password':self.password},\n self.connection)", "def app_list():\n user = users.get_current_user()\n if user:\n if users.is_current_user_admin() or is_local_admin():\n appls_query = Applic.query(ancestor = base_key).order(-Applic.date)\n appls = appls_query.fetch()\n output = template('applist', appls=appls, name=g_name, log_in_out = users.create_logout_url('/'), opt = 'Выход', user = user.nickname())\n return output\n else:\n userid = user.user_id()\n #return userid\n appls_query = Applic.query(Applic.user==userid).order(-Applic.date)\n appls = appls_query.fetch()\n output = template('applist', appls=appls, name=g_name, log_in_out = users.create_logout_url('/'), opt = 'Выход', user = user.nickname())\n return output\n else:\n redirect('/')", "def app_list(request):\n return render(request, 'mdm/app_list.html', {})", "def list_apps(request, pk=0):\n context = {'items': [], 'resource_type': 'App'}\n\n if pk == 0:\n context['h2'] = \"Managed Applications\"\n context['header_1'] = \"Developer\"\n context['header_2'] = \"Version\"\n refresh_managed_software_status()\n apps = MacOSApp.objects.filter(merged_into__isnull=True).reverse()\n if not request.user.has_perm('devices.manage_apps'):\n apps = apps.filter(managed=True).exclude(installed__isnull=True, pending_install__isnull=True)\n for app in apps:\n assignment_count = app.pending_install.count()\n installed_on = app.installed.all()\n data = {'meta': app, 'assignment_count': assignment_count, 'installed': installed_on}\n context['items'].append(data)\n else:\n if not request.user.has_perm('devices.manage_apps'):\n raise PermissionDenied\n\n device = get_object_or_404(Laptop, pk=pk)\n context['h2'] = \"Applications on {}\".format(device.name)\n context['header_1'] = \"Developer\"\n context['header_2'] = \"Version\"\n context['device_view'] = True\n context['device_id'] = pk\n apps = MacOSApp.objects.filter(pending_install__in=[device])\n apps |= MacOSApp.objects.filter(installed__in=[device])\n for app in apps:\n status = 'Not assigned'\n for entry in app.installed.all():\n if entry == device:\n status = 'Installed'\n for entry in app.pending_install.all():\n if entry == device:\n status = 'Assigned'\n data = {'meta': app, 'status': status}\n context['items'].append(data)\n\n return render(request, 'mdm/resource_list.html', context)", "def apps():\r\n section = document.add_section()\r\n new_width, new_height = section.page_height, section.page_width\r\n section.orientation = WD_ORIENT.LANDSCAPE\r\n section.page_width = 10058400\r\n section.page_height = 7772400\r\n document.add_heading('Applications', level=1)\r\n apps = get_qlik_sense.get_apps()\r\n num_of_apps = len(apps)\r\n table = document.add_table(rows=num_of_apps+1, cols=7)\r\n table.style = 'Grid Table 1 Light Accent 1'\r\n row = table.rows[0]\r\n row.cells[0].text = 'App name'\r\n row.cells[1].text = 'App description'\r\n row.cells[2].text = 'Publish time'\r\n row.cells[3].text = 'Stream'\r\n row.cells[4].text = 'File size'\r\n row.cells[5].text = 'Owner userId'\r\n row.cells[6].text = 'Owner userName'\r\n for app in range(num_of_apps):\r\n row = table.rows[app+1]\r\n row.cells[0].text = str(apps[app][0])\r\n row.cells[1].text = str(apps[app][1])\r\n row.cells[2].text = str(apps[app][2])\r\n row.cells[3].text = str(apps[app][3])\r\n row.cells[4].text = str(apps[app][4])\r\n row.cells[5].text = str(apps[app][5])\r\n row.cells[6].text = str(apps[app][6])\r\n document.add_page_break()", "def listapps(parser):\n\n print('Function List')\n subparsers_actions = [\n # pylint: disable=protected-access\n action for action in parser._actions\n # pylint: disable=W0212\n if isinstance(action, argparse._SubParsersAction)]\n # there will probably only be one subparser_action,\n # but better safe than sorry\n for subparsers_action in subparsers_actions:\n # get all subparsers and print help\n for choice, subparser in subparsers_action.choices.items():\n print(\"Function: '{}'\".format(choice))\n print(subparser.format_help())\n # print(parser.format_help())", "def show(ctx, appeui):\n if '.' in appeui:\n appeui = str(hexStringInt(str(appeui)))\n \n # Form the url and payload\n server = ctx.obj['server']\n payload = {'token': ctx.obj['token']}\n url = 'http://{}/api/v{}'.format(server, str(version))\n url += '/apps' if appeui == 'all' else '/app/{}'.format(appeui)\n \n # Make the request\n data = restRequest(server, url, 'get', payload, 200)\n if data is None:\n return\n \n # Single application\n if appeui != 'all':\n a = data\n indent = ' ' * 10\n if a['appinterface_id'] == 0:\n a['appinterface_id'] = '-'\n if a['domain'] is None:\n a['domain'] = '-'\n click.echo('Application EUI: ' + euiString(a['appeui']))\n click.echo('{}name: {}'.format(indent, a['name']))\n click.echo('{}domain: {}'.format(indent, a['domain']))\n click.echo('{}fport: {}'.format(indent, a['fport']))\n click.echo('{}interface: {}'.format(indent, a['appinterface_id']))\n if a['appinterface_id'] != '-':\n click.echo('{}Properties:'.format(indent))\n properties = sorted(a['properties'].values(), key=lambda k: k['port'])\n for p in properties:\n click.echo('{} {} {}:{}'.format(indent, p['port'], p['name'], p['type']))\n return\n \n # All applications\n click.echo('{:14}'.format('Application') + \\\n '{:24}'.format('AppEUI') + \\\n '{:15}'.format('Domain') + \\\n '{:6}'.format('Fport') + \\\n '{:10}'.format('Interface'))\n for i,a in data.iteritems():\n if a['appinterface_id'] == 0:\n a['appinterface_id'] = '-'\n if a['domain'] is None:\n a['domain'] = '-'\n click.echo('{:13.13}'.format(a['name']) + ' ' + \\\n '{:23}'.format(euiString(a['appeui'])) + ' ' + \\\n '{:14.14}'.format(a['domain']) + ' ' + \\\n '{:5.5}'.format(str(a['fport'])) + ' ' + \\\n '{:10}'.format(str(a['appinterface_id'])))", "def ListApps(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def ListApps(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def view(args):\n print(\"List of all available phonebooks:\")\n for file in glob.glob(\"*.ph\"):\n print(file)", "def ListApps(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "async def get_apps(self, params: Optional = None) -> dict:\r\n return await self.get_items(API_APPS, params=params)", "async def getList(author, page):\n availableCommands = await _generateList(author, False)\n availableCommands.sort(key=lambda x: x['name'])\n totalPages = math.floor(len(availableCommands)/10) + 1\n if page == 100:\n page = totalPages\n if page > totalPages or page < 1:\n return False\n availableCommands = availableCommands[(page-1)*10:(page)*10]\n return assembleEmbed(\n title=f\"List of Commands for `{author}` (Page {page}/{totalPages})\",\n desc=\"\\n\".join([f\"`{c['name']}` - {c['description']}\" for c in availableCommands])\n )", "def get_search_results(config, client, page):\n resp = client.get_published_apps(config.username, page)\n resp_json = resp.json()\n search_results = resp_json[\"results\"]\n if search_results is None or len(search_results) == 0:\n logger.info(\n click.style(\"You haven't published any apps to the marketplace yet. Use \", fg=\"blue\") +\n click.style(\"21 publish submit {PATH_TO_MANIFEST_FILE}\", bold=True, fg=\"blue\") +\n click.style(\" to publish your apps to the marketplace.\", fg=\"blue\"), fg=\"blue\")\n return 0\n\n total_pages = resp_json[\"total_pages\"]\n logger.info(\"\\nPage {}/{}\".format(page + 1, total_pages), fg=\"green\")\n headers = [\"id\", \"Title\", \"Url\", \"Rating\", \"Is up\", \"Is healthy\", \"Average Uptime\",\n \"Last Update\"]\n rows = []\n for r in search_results:\n rating = \"Not yet Rated\"\n if r[\"rating_count\"] > 0:\n rating = \"{:.1f} ({} rating\".format(r[\"average_rating\"],\n int(r[\"rating_count\"]))\n if r[\"rating_count\"] > 1:\n rating += \"s\"\n rating += \")\"\n rows.append([r[\"id\"],\n r[\"title\"],\n r[\"app_url\"],\n rating,\n str(r[\"is_up\"]),\n str(r[\"is_healthy\"]),\n \"{:.2f}%\".format(r[\"average_uptime\"] * 100),\n util.format_date(r[\"last_update\"])])\n\n logger.info(tabulate(rows, headers, tablefmt=\"simple\"))\n\n return total_pages", "def get_apps(self, limit, offset=None):\n params = {'v': WIT_API_VERSION}\n if limit:\n params['limit'] = limit\n if offset:\n params['offset'] = offset\n return req(self.logger, self.access_token, 'GET', '/apps', params)", "def django_show_docs():\r\n app = wingapi.gApplication\r\n app.ExecuteCommand('show-document', section=\"howtos/django\")", "def list(default_view):\n ListCommandExecutor(default_view).list()", "def publish_info_in_pagebrowser():\n env.run('bin/django create_pagebrowser_books')", "def show_applications_toc():\n if not cache.get(APPLICATIONS_TOC_CACHE_KEY):\n from django.utils.importlib import import_module\n from sveedocuments.models import Page\n \n apps_infos = []\n for appname, apptitle, appdesc, appkwargs in settings.PUBLISHED_APPS:\n title = apptitle or appname\n desc = appdesc\n doc_link = appkwargs.get('doc_link', None)\n demo_link = appkwargs.get('demo_link', None)\n download_link = appkwargs.get('download_link', None)\n github_link = None\n \n # Links can be tuple, that is assumed to be passed by a reverse url with first \n # element as url name and second argument as args list\n if doc_link and not isinstance(doc_link, basestring):\n doc_link = reverse(doc_link[0], args=doc_link[1])\n \n if demo_link and not isinstance(demo_link, basestring):\n demo_link = reverse(demo_link[0], args=demo_link[1])\n \n if download_link and not isinstance(download_link, basestring):\n download_link = reverse(download_link[0], args=download_link[1])\n \n # Determine some optionnals urls from a schema where we insert the appname\n if not download_link and appkwargs.get('pypi', False):\n download_link = \"http://pypi.python.org/pypi/{0}\".format(appname)\n \n if appkwargs.get('github', False):\n github_link = \"https://github.com/sveetch/{0}\".format(appname)\n if not download_link:\n download_link = \"{0}/tags\".format(github_link)\n \n # Try to get introduction from the module __doc__ attribute\n if not desc:\n try:\n mod = import_module(appname)\n except ImportError:\n pass\n else:\n if mod.__doc__.strip():\n desc = mod.__doc__.strip()\n \n # Try to get some informations from the document Page if it exists\n try:\n page_instance = Page.objects.get(slug=appname)\n except Page.DoesNotExist:\n pass\n else:\n title = page_instance.title\n doc_link = page_instance.get_absolute_url() or doc_link\n \n apps_infos.append({\n 'title': title,\n 'desc': desc,\n 'doc_link': doc_link,\n 'demo_link': demo_link,\n 'download_link': download_link,\n 'github_link': github_link,\n })\n \n cache.set(APPLICATIONS_TOC_CACHE_KEY, {'application_toc': tuple(apps_infos)})\n \n return cache.get(APPLICATIONS_TOC_CACHE_KEY)", "def retr_auth_apps() :\n\n\t\t\t_logger.info( '...retr_auth_apps...' )\n\t\t\toutput = []\n\t\t\tdb = mongo.db.auth_apps\n\n\t\t\tcur = db.find()\n\t\t\tif cur.count() == 0 :\n\t\t\t\traise mongo_no_resource_exception( 'no authorized apps found' )\n\t\t\tfor app in db.find() :\n\t\t\t\toutput.append( { 'moniker' : app['moniker'] ,\n\t\t\t\t\t\t\t 'description' : app['description'] ,\n\t\t\t\t\t\t\t\t 'url' : app['url'] } )\n\n\t\t\treturn jsonify( {'result' : output} )", "def get_all_apps(self):\n return list(self.apps.values())", "def list():\n index = config.index\n output_format = \"%-7s %-20s %s\"\n click.secho(output_format % (\"ID\", \"CREATED\", \"BACKENDS\"), fg=\"cyan\")\n for archive in sorted(index.archives(), key=lambda x: x[\"id\"]):\n # Print it out\n click.echo(\n output_format\n % (\n archive[\"id\"],\n datetime.datetime.fromtimestamp(archive[\"created\"]).strftime(\n \"%Y-%m-%d %H:%M:%S\"\n ),\n \", \".join(sorted(archive[\"backend_names\"])),\n )\n )", "def get_app_list(self):\n return self.get_setting('applications', 'installed_apps')", "def app_list(self, third_only=False):\n return self.adb.app_list(third_only)", "def list_apps(self, ns_name):\n\n return self.helm_client.list(namespace=ns_name)", "def get(self, args):\n\t\tif len(args) >= 2:\n\t\t\tif args[1] == \"list\":\n\t\t\t\tself.write_line(\"LIST {0}\".format(self.config[\"daemon\"][\"rootdir\"] + \"/package-index.json\"))", "def get_publishers(self):", "def describe_apps(StackId=None, AppIds=None):\n pass", "def publish_list(self, messages: list) -> None:", "def publish_list(self, messages: list) -> None:", "def list():\n\n page_limit = app.config['PAGINATION_LIMIT']\n page = request.args.get('page') if 'page' in request.args else 1\n per_page = request.args.get('per_page') if 'per_page' in request.args else page_limit\n\n # TODO: Can be done in much more elegant way\n try:\n page = int(page)\n except:\n page = 1\n\n try:\n per_page = int(per_page)\n except:\n per_page = page_limit\n if per_page > page_limit:\n per_page = page_limit\n\n # Get all rows and order by published datetime and paginate by page count and per_page\n posts = YTSearch.query.order_by(desc(YTSearch.published_at)) \\\n .paginate(page, per_page, error_out=True)\n\n # Get JSON data from list of objects\n result = [i.serialize() for i in posts.items]\n return jsonify({'data': result, 'has_next': posts.has_next, 'next_page': posts.next_num,\n 'has_prev': posts.has_prev, 'prev_page': posts.prev_num, 'length': len(result)}), 200", "def handle(self, *args, **options):\n app_labels = [app.split('.')[-1] for app in settings.INSTALLED_APPS]\n if not args:\n args = app_labels\n for app in args:\n if app not in app_labels:\n print \"%s is not a valid application\" % app\n continue\n\n app_module = get_app(app_label=app, emptyOK=True)\n if app_module is None:\n continue\n\n print \"Models of %s:\" % app\n for model in get_models(app_module):\n print \" - %s has %d entries\" % (\n model.__name__,\n model.objects.count()\n )", "def cli():\n update_all_posts()\n push_updates()", "def get(self):\n return read_heroku_apps(request.args)", "def admin_applog(request):\r\n rdict = request.GET\r\n\r\n # Support optional filter parameters\r\n days = int(rdict.get('days', 1))\r\n status = rdict.get('status', None)\r\n message = rdict.get('message', None)\r\n\r\n log_list = AppLogMgr.find(\r\n days=days,\r\n message_filter=message,\r\n status=status,\r\n )\r\n\r\n ret = {\r\n 'count': len(log_list),\r\n 'logs': [dict(l) for l in log_list],\r\n }\r\n return _api_response(request, ret)", "def applications(name):\r\n user = User.query.filter_by(name=name).first()\r\n if not user:\r\n return abort(404)\r\n if current_user.name != name:\r\n return abort(403)\r\n\r\n user = db.session.query(model.user.User).get(current_user.id)\r\n apps_published, apps_draft = _get_user_apps(user.id)\r\n\r\n return render_template('account/applications.html',\r\n title=gettext(\"Applications\"),\r\n apps_published=apps_published,\r\n apps_draft=apps_draft)", "def _get_all_app_ids(config, client):\n rv = set()\n total_pages = client.get_published_apps(config.username, 0).json()[\"total_pages\"]\n for current_page in range(total_pages):\n current_page_results = client.get_published_apps(config.username, current_page).json()['results']\n for result in current_page_results:\n rv.add(result['id'])\n return rv", "def _discover_apps(api, limit=None):\n categories = api.categories()\n subcategories = []\n for category in categories:\n subcategories.extend(api.subcategories(category))\n app_lists = []\n app_count = 0\n LOGGER.info(f'Found {len(subcategories)} subcategories for {len(categories)} categories')\n for subcategory in subcategories:\n app_list = api.discover_apps(subcategory)\n if not app_list:\n continue\n while ALL:\n if limit:\n if len(app_list) >= limit:\n app_list = app_list.limit(app_list[:limit])\n LOGGER.info(f'Subcategory \"{app_list.name()}\" reached the threshhold of {limit}, moving on.')\n break\n try:\n app_list.more()\n except Maximum:\n LOGGER.info(f'Subcategory \"{app_list.name()}\" yielded {len(app_list)} apps')\n break\n app_lists.append(app_list)\n app_count += len(app_list)\n app_set = set()\n for app_list in app_lists:\n for app in app_list:\n app_set.add(app.package_name())\n LOGGER.info(f'{\"#\" * 60}\\n'\n f'\\tFinished discovering Apps!\\n'\n f'\\tGot {app_count} apps in {len(app_lists)} subcategories of {len(categories)} categories\\n'\n f'\\tOut of those {app_count} apps, {len(app_set)} apps had a unique package name\\n'\n f'\\t{\"#\" * 60}')\n return app_lists", "def program_list():\n items = []\n\n soup = abcradionational.get_soup(URL + \"/podcasts/program\")\n \n program_heading = abcradionational.get_podcast_heading(soup)\n\n for program in program_heading:\n items.append({\n 'label': program['title'],\n 'path': plugin.url_for('program_item', url=program['url']),\n })\n\n return items", "def publish_action(modeladmin, request, queryset):\n\n count = queryset.filter(published=False).update(published=True)\n messages.info(request, f\"Published {count} objects\")", "def apps(self):\n if \"apps\" in self._prop_dict:\n return AppsCollectionPage(self._prop_dict[\"apps\"])\n else:\n return None", "def list(host, mqtt_port, rest_port):\n\n click.echo(\"Listing things from host \" + host + \".\")\n service = Service(host, mqtt_port, str(rest_port))\n things = service.get_all_things()\n for thing in things:\n click.echo(thing.__dict__)\n if len(things) == 0:\n click.secho(\"Unable to retrieve any things!\", fg=\"red\", bold=True)", "def application_list(p_engine, p_username, format, appname):\n\n ret = 0\n\n enginelist = get_list_of_engines(p_engine, p_username)\n\n if enginelist is None:\n return 1\n\n data = DataFormatter()\n data_header = [\n (\"Engine name\", 30),\n (\"Application name\", 30),\n ]\n data.create_header(data_header)\n data.format_type = format\n for engine_tuple in enginelist:\n engine_obj = DxMaskingEngine(engine_tuple)\n if engine_obj.get_session():\n continue\n applist = DxApplicationList()\n # load all objects\n applist.LoadApplications()\n\n if appname is None:\n applications = applist.get_allref()\n else:\n applications = applist.get_applicationId_by_name(appname)\n if len(applications) == 0:\n ret = ret + 1\n\n for appref in applications:\n appobj = applist.get_by_ref(appref)\n data.data_insert(\n engine_tuple[0],\n appobj.application_name\n )\n\n print(\"\")\n print (data.data_output(False))\n print(\"\")\n \n \n return ret", "def command_list(self):\n # Get buckets\n project_bucket_mappings = {\n 'all-of-us-rdr-prod': PUBSUB_NOTIFICATION_BUCKETS_PROD,\n 'all-of-us-rdr-stable': PUBSUB_NOTIFICATION_BUCKETS_STABLE,\n 'all-of-us-rdr-sandbox': PUBSUB_NOTIFICATION_BUCKETS_SANDBOX,\n }\n\n bucket_list = [self.args.bucket] if self.args.bucket else project_bucket_mappings[self.gcp_env.project]\n\n notifications_dict = {\n \"notifications\": []\n }\n\n for bucket_name in bucket_list:\n # call storage api\n client = storage.Client()\n bucket = client.get_bucket(bucket_name)\n notifications = bucket.list_notifications(client)\n\n for notification in notifications:\n # Skip the default topic notification (which won't have an integer ID\"\n try:\n id_int = int(notification.notification_id)\n except ValueError:\n continue\n\n if self.args.id and self.args.id != id_int:\n continue\n\n output_dict = dict()\n\n try:\n output_dict['bucket'] = bucket_name\n output_dict['id'] = notification.notification_id\n output_dict['topic_name'] = notification.topic_name\n output_dict['topic_project'] = notification.topic_project\n output_dict['payload_format'] = notification.payload_format\n output_dict['object_name_prefix'] = notification._properties['object_name_prefix']\n output_dict['event_types'] = notification.event_types\n except KeyError:\n pass\n\n notifications_dict['notifications'].append(output_dict)\n\n pprint(notifications_dict)\n\n return 0", "def get_apps(self) -> List[str]:\n return list(self.config[\"apps\"].keys())", "def do_list(self, args):\n if args.option == 'config':\n print(list_config())\n if args.option == 'queries':\n for k,v in list_queries().items():\n print(k, \":\", json.dumps(v, indent=4))\n if args.option == 'jobs':\n update_jobs(CLI_GLOBALS.ENGAGEMENT)\n for k,v in list_jobs().items():\n print(k, \":\", json.dumps(v, indent=4))\n if args.option == 'results':\n for i in list_results():\n print(i)\n if args.option == 'key':\n for k,v in list_key().items():\n print(k, \":\", json.dumps(v, indent=4))\n if args.option == 'engagement':\n print(list_engagement())", "def cmd_list(arguments):\r\n\r\n # get a producer\r\n producer = getProducer()\r\n\r\n # get the list of pending jobs\r\n jobs = producer.list()\r\n\r\n # print que list size and the entries\r\n print \"Number of jobs: %d\" % len(jobs)\r\n for j in range(len(jobs)):\r\n print \" job %06d - %s\" % (j, jobs[j])\r\n\r\n return 0", "def dock_app_list(data):\n apps = []\n count = data['extra_dock'] + 1\n for i in range(count):\n name = data['app_name_%s' % str(i)]\n path = data['app_path_%s' % str(i)]\n if name not in [None, '']:\n apps.append({'name': name, 'path': path})\n return apps", "def get_apps(self):\n return self.apps", "def list_tasks(ctx):\n ctx.run(\"invoke --list\")", "def get(category, page=1, per_page=5):\r\n\r\n count = n_count(category)\r\n\r\n sql = text('''SELECT app.id, app.name, app.short_name, app.description,\r\n app.info, app.created, app.category_id, \"user\".fullname AS owner,\r\n featured.app_id as featured\r\n FROM \"user\", task, app\r\n LEFT OUTER JOIN category ON app.category_id=category.id\r\n LEFT OUTER JOIN featured ON app.id=featured.app_id\r\n WHERE\r\n category.short_name=:category\r\n AND app.hidden=0\r\n AND \"user\".id=app.owner_id\r\n AND app.info LIKE('%task_presenter%')\r\n AND task.app_id=app.id\r\n GROUP BY app.id, \"user\".id, featured.app_id ORDER BY app.name\r\n OFFSET :offset\r\n LIMIT :limit;''')\r\n\r\n offset = (page - 1) * per_page\r\n results = db.engine.execute(sql, category=category, limit=per_page, offset=offset)\r\n apps = []\r\n for row in results:\r\n app = dict(id=row.id,\r\n name=row.name, short_name=row.short_name,\r\n created=row.created,\r\n description=row.description,\r\n owner=row.owner,\r\n featured=row.featured,\r\n last_activity=pretty_date(last_activity(row.id)),\r\n last_activity_raw=last_activity(row.id),\r\n overall_progress=overall_progress(row.id),\r\n info=dict(json.loads(row.info)))\r\n apps.append(app)\r\n return apps, count", "def applications():\n storeapps = APP.config[\"storage\"]\n base_url = request.host_url + \"application/\"\n\n response = {\"applications\": []}\n for application in nativeapps.io.ls(storeapps, r\".*\\.(apk|ipa)$\"):\n tokens = application.decode(\"utf-8\").split(os.path.sep)\n directory = tokens[-2]\n name, version = os.path.basename(directory).split(\"-\", 1)\n meta_path = os.path.join(os.path.dirname(application), \"metadata.json\")\n\n link = base_url + \"/\".join(tokens[-3:])\n if application.endswith(\".ipa\"):\n link = \"itms-services://?action=download-manifest&url=\" + \\\n base_url + \"/\".join(tokens[-3:-1]) + \"/\" + \"manifest.plist\"\n\n response[\"applications\"].append({\n \"url\": base_url + \"/\".join(tokens[-3:]),\n \"name\": name,\n \"version\": version,\n \"metadata\": nativeapps.io.readfile(meta_path),\n \"link\": link,\n \"type\": application.split(\".\")[-1],\n })\n return flask.jsonify(response)", "def app_index(page, lookup, category, fallback, use_count):\r\n\r\n per_page = current_app.config['APPS_PER_PAGE']\r\n\r\n apps, count = lookup(category, page, per_page)\r\n\r\n data = []\r\n for app in apps:\r\n data.append(dict(app=app, n_tasks=cached_apps.n_tasks(app['id']),\r\n overall_progress=cached_apps.overall_progress(app['id']),\r\n last_activity=app['last_activity'],\r\n last_activity_raw=app['last_activity_raw'],\r\n n_completed_tasks=cached_apps.n_completed_tasks(app['id']),\r\n n_volunteers=cached_apps.n_volunteers(app['id'])))\r\n\r\n\r\n if fallback and not apps: # pragma: no cover\r\n return redirect(url_for('.index'))\r\n\r\n pagination = Pagination(page, per_page, count)\r\n categories = cached_cat.get_all()\r\n # Check for pre-defined categories featured and draft\r\n featured_cat = model.category.Category(name='Featured',\r\n short_name='featured',\r\n description='Featured applications')\r\n if category == 'featured':\r\n active_cat = featured_cat\r\n elif category == 'draft':\r\n active_cat = model.category.Category(name='Draft',\r\n short_name='draft',\r\n description='Draft applications')\r\n else:\r\n active_cat = db.session.query(model.category.Category)\\\r\n .filter_by(short_name=category).first()\r\n\r\n # Check if we have to add the section Featured to local nav\r\n if cached_apps.n_featured() > 0:\r\n categories.insert(0, featured_cat)\r\n template_args = {\r\n \"apps\": data,\r\n \"title\": gettext(\"Applications\"),\r\n \"pagination\": pagination,\r\n \"active_cat\": active_cat,\r\n \"categories\": categories}\r\n\r\n if use_count:\r\n template_args.update({\"count\": count})\r\n return render_template('/applications/index.html', **template_args)", "def search_app(self, search_pattern):\n\n url_params = {'limit': SearchAPI.SCAN_LIMIT, 'expand': 'true'}\n first_search = self.get('mgmt-pop/apps', params=url_params)\n data = first_search.json()\n app_found = 0\n app_scanned = 0\n\n # CLI ouput header\n cli.header('#app_id,type,name,host,cname,cert_id,status,reachable')\n stats = self.process_page(data, search_pattern)\n app_scanned += stats[0]\n app_found += stats[1]\n\n if data.get(\"meta\"):\n\n app_count = data.get(\"meta\").get(\"total_count\")\n page_offset = data.get(\"meta\").get(\"offset\")\n page_limit = data.get(\"meta\").get(\"limit\")\n page_total = ceil(app_count / page_limit)\n\n logging.debug(\"app_count: {}, scanned: {}, offset: {}, limit: {}, pages: {}\".format(\n app_count, app_scanned, page_offset, page_limit, page_total))\n\n for page in range(1, page_total):\n logging.debug(\"Loading application page {} of {}\".format(page, page_total))\n url_params['offset'] = page * page_limit\n search = self.get('mgmt-pop/apps', params=url_params)\n stats = self.process_page(search.json(), search_pattern)\n app_scanned += stats[0]\n app_found += stats[1]\n\n # CLI ouput footer\n if not config.batch:\n if app_found != app_count:\n cli.footer(\"Found %s app(s), total %s app(s)\" % (app_found, app_count))\n else:\n cli.footer(\"%s app(s)\" % app_count)", "def do_list_items(self, arg):\n try:\n cprint (\"These are your items: \\n\", 'blue')\n my_items = arg[\"<all_items>\"]\n choice = arg[\"--choice\"]\n if choice == \"name\":\n my_items_str = \" \".join(my_items)\n print(my_items_str)\n elif choice == \"id\":\n my_items_str = int(\" \".join(my_items))\n print (my_items_str)\n app.ToDoApp.to_view_items(my_items_str)\n \n\n\n \n except ValueError as e:\n cprint((e), 'red')", "def show_list():\n\n response = []\n docs = SUPERHEROES.stream()\n for doc in docs:\n response.append(doc.to_dict())\n return jsonify(response), 201", "def apps(self):\n filters = {\n 'disabled_by_user': False,\n 'status': mkt.STATUS_PUBLIC\n }\n return self._apps.order_by(self.membership_relation).filter(**filters)", "def view(args):\n if args.available:\n printAvailableCampaigns()\n if args.search_help:\n print(getSearchQueryHelp())", "def main(appinfo, args):\n parser = optparse.OptionParser(\n usage='%prog list [OPTS] [--] [SEARCH..]',\n )\n parser.add_option(\n '-v', '--verbose',\n help='show more information',\n action='count',\n )\n parser.add_option(\n '--tag',\n help='only list tickets having this tag',\n action='append',\n )\n parser.add_option(\n '--order',\n help='sort listing according to criteria',\n )\n parser.add_option(\n '--hide',\n metavar='FIELD',\n help='hide field from listing',\n )\n parser.add_option(\n '--show',\n metavar='FIELD',\n help='show field in listing',\n )\n (options, args) = parser.parse_args(args)\n\n if args:\n raise NotImplementedError(\n 'TODO Full text search not supported yet.')\n\n def list_tickets():\n for (mode, type_, object, basename) in storage.git_ls_tree(\n path='',\n children=True,\n ):\n yield basename\n\n for ticket in list_tickets():\n number = storage.get(os.path.join(ticket, 'number'))\n if number is not None:\n number = number.rstrip()\n ident = '#%s' % number\n else:\n ident = ticket[:7]\n description = storage.get(os.path.join(ticket, 'description')).rstrip()\n tags = set(storage.ls(os.path.join(ticket, 'tags')))\n if options.tag:\n must = frozenset(options.tag)\n if not tags & must:\n continue\n tags = tagsort.human_friendly_tagsort(tags)\n if options.verbose:\n raise NotImplementedError\n if options.order:\n raise NotImplementedError\n if options.show:\n raise NotImplementedError\n if options.hide:\n raise NotImplementedError\n (title, description) = util.extract_title(description)\n print '%(ident)s\\t%(title)s' % dict(\n ident=ident,\n title=title,\n )\n if tags:\n print textwrap.fill(\n ' '.join(tags),\n initial_indent=' ',\n subsequent_indent=' ',\n break_long_words=False,\n )", "def list(limit, export):\n GetArticles.get_all_articles(limit, export)", "def cmd_pagetplaylists(self, data, client, cmd):\n for n, p in sorted(self._playlists.iteritems()):\n cmd.sayLoudOrPM(client, '%s - %s' % (n, p))\n time.sleep(1)", "def _go_list(self, *args):\n return subprocess.check_output((\"go\", \"list\") + self.tag_args + args).strip().split(\"\\n\")", "def publish():\n pass", "def fw_app_list(data):\n apps = []\n count = data['extra_firewall']\n for i in range(count):\n bundle = data['id_%s' % str(i + 1)]\n allowed = data['permit_%s' % str(i + 1)]\n if bundle not in [None, '']:\n apps.append({'bundle_id': bundle, 'allowed': allowed})\n return apps", "def list_cmd(feed):\n if ARGV.get(REV_OPT):\n feed_list = reversed(feed.entries)\n else:\n feed_list = feed.entries\n index = 0\n for entry in feed_list:\n if not ARGV.get(UNREAD_OPT) \\\n or (ARGV.get(UNREAD_OPT) and not has_been_read(entry)):\n print(format_list_item(entry, index))\n index += 1", "def list_all(request):\n\n entries = BlogEntry.objects.all()\n data = {'entries': paginate_objects(request, entries),\n 'blog_info': get_blog_info(), 'action_str': 'All Blogs Shown'}\n\n return render_to_response('blog/list_entries.html', data,\n context_instance=get_rq(request))", "def get_applications(status):\n return status['applications']", "def get_app_list(self):\n\n return self._get().keys()", "def view_approved():\n global approved\n global appr_ind\n appr = approved.get_all_values()\n headings = appr[0]\n first_appl = appr[appr_ind]\n for head, app in zip(headings, first_appl):\n head = head.ljust(15, ' ')\n print(f'{head} {app}')\n keep_viewing = True\n while keep_viewing:\n view_next = input('\\nPress V to view next, Q to quit, M for main '\n 'menu.\\n')\n if view_next.lower() == 'q':\n logout()\n elif view_next.lower() == 'v':\n appr_ind += 1\n if appr_ind < len(appr):\n print('Next approved application: \\n')\n view_approved()\n else:\n print('\\nNo more approved applications to view \\n')\n keep_viewing = False\n next_action()\n elif view_next.lower() == 'm':\n keep_viewing = False\n hr_main()\n break\n else:\n is_invalid()", "def list_(ctx, search, backend):\n projects = ctx.obj['projects_db'].search(search, backend=backend)\n projects = sorted(projects, key=lambda project: project.name.lower())\n ctx.obj['view'].search_results(projects)", "def list_cmd(ctx):\n client = ctx.obj['CLIENT']\n models = client.list_models()\n\n x = PrettyTable()\n x.field_names = [\"Name\",\"Tag\",\"Created\"]\n for m in models:\n x.add_row([m[\"name\"],m[\"tag\"],m[\"uploaded_at\"]])\n print(x)", "def list_quickstreams():\n quickstreams = read_quickstreams()\n print()\n if quickstreams == {}:\n print(\"No Quickstream bookmarks found.\")\n else:\n print(json.dumps(quickstreams, indent=4))", "def n_published():\r\n sql = text('''\r\n WITH published_apps as\r\n (SELECT app.id FROM app, task WHERE\r\n app.id=task.app_id AND app.hidden=0 AND app.info\r\n LIKE('%task_presenter%') GROUP BY app.id)\r\n SELECT COUNT(id) FROM published_apps;\r\n ''')\r\n results = db.engine.execute(sql)\r\n for row in results:\r\n count = row[0]\r\n return count", "def show_linkages():\n appnames = tuple(linkages.keys())\n appcount = len(appnames)\n plural = (appcount == 1) and \"app\" or \"apps\"\n print(f\"LINKAGES ({appcount} {plural} total):\")\n \n for appname in appnames:\n LoaderCls = linkages[appname]\n qname = qualified_name(LoaderCls)\n instancedict = dict(LoaderCls.instances)\n instancecount = len(LoaderCls.instances)\n instanceplural = (instancecount == 1) and \"instance\" or \"instances\"\n string = pformat(instancedict, indent=4, width=consts.SEPARATOR_WIDTH)\n print()\n print(f\" «{appname}» ({qname}, {instancecount} {instanceplural}):\")\n print(f\"{string}\")", "async def list(self, ctx: MyContext):\n if ctx.subcommand_passed is None:\n await ctx.send_help(\"wormhole list\")", "def list():\n manager = Actions()\n tasks_list = manager.get_tasks_list()\n console_utils.print_tree(manager, tasks_list)", "def cmd_list(args):", "def ls():\n if not g.userpl:\n g.message = F('no playlists')\n g.content = g.content or generate_songlist_display(zeromsg=g.message)\n\n else:\n g.content = playlists_display()\n g.message = F('pl help')", "def get(self):\n apps = Application.objects()\n\n # TODO return more information\n apps_clean = []\n for app in apps:\n # don't include invalid apps\n if app[\"validated\"] is True:\n apps_clean.append(\n {\"name\": app[\"name\"]}\n )\n\n return apps_clean, 200", "def run(self):\n logging.debug('List Installed Programs')\n if self.short:\n print(' '.join([ent for ent in pakit.conf.IDB]))\n return\n\n nchars = 12\n fmt = str(nchars).join(['{prog:', '} {repo:',\n '} {hash:', '} {date}'])\n installed = ['Program Repo Hash Date']\n for prog in pakit.conf.IDB:\n entry = pakit.conf.IDB[prog]\n installed.append(fmt.format(prog=prog[0:nchars],\n repo=entry['repo'][0:nchars],\n date=entry['date'],\n hash=entry['hash'][0:nchars]))\n\n msg = 'Installed Programs:'\n msg += PREFIX + PREFIX.join(installed)\n print(msg)\n return msg", "def action_list(args):\n\n module_root = Path(\"modules/\")\n modules = load_modules(module_root)\n\n print(\"Available modules:\")\n for module in modules:\n print(f\"- {module}\")", "def summary(app):\n click.echo(get_summary(app))", "def docs_list(directory):\n context = toolkit.load_data_context_with_error_handling(directory)\n\n docs_sites_url_dicts = context.get_docs_sites_urls()\n docs_sites_strings = [\n \" - <cyan>{}</cyan>: {}\".format(\n docs_site_dict[\"site_name\"],\n docs_site_dict.get(\"site_url\")\n or f\"site configured but does not exist. Run the following command to build site: great_expectations \"\n f'docs build --site-name {docs_site_dict[\"site_name\"]}',\n )\n for docs_site_dict in docs_sites_url_dicts\n ]\n\n if len(docs_sites_strings) == 0:\n cli_message(\"No Data Docs sites found\")\n else:\n list_intro_string = _build_intro_string(docs_sites_strings)\n cli_message_list(docs_sites_strings, list_intro_string)\n\n toolkit.send_usage_message(\n data_context=context, event=\"cli.docs.list\", success=True\n )", "def show_blog_list():\r\n\tblog_list = Page.objects.filter(page_type=3).order_by('-created')[:4]\r\n\treturn {'blog_list': blog_list}", "def print_application_name_and_id_list(self, application_list):\n if not application_list:\n print \"Application list is empty\"\n return\n\n for item in application_list.items:\n print \"{} => {}\".format(item.application_id, item.application_name)", "def _list_all(root_pkg, prog):\n res = \"\\n\".join(\n sorted(\n pkinspect.package_module_names(_import(root_pkg)),\n key=str.lower,\n ),\n )\n sys.stderr.write(f\"usage: {prog} module command [args...]\\nModules:\\n{res}\\n\")\n return 1", "def home():\r\n\r\n page = 1\r\n per_page = current_app.config.get('APPS_PER_PAGE')\r\n if per_page is None: # pragma: no cover\r\n per_page = 5\r\n d = {'featured': cached_apps.get_featured_front_page(),\r\n 'top_apps': cached_apps.get_top(),\r\n 'top_users': None}\r\n\r\n # Get all the categories with apps\r\n categories = cached_cat.get_used()\r\n d['categories'] = categories\r\n d['categories_apps'] = {}\r\n for c in categories:\r\n tmp_apps, count = cached_apps.get(c['short_name'], page, per_page)\r\n d['categories_apps'][str(c['short_name'])] = tmp_apps\r\n\r\n # Add featured\r\n tmp_apps, count = cached_apps.get_featured('featured', page, per_page)\r\n if count > 0:\r\n featured = model.category.Category(name='Featured', short_name='featured')\r\n d['categories'].insert(0,featured)\r\n d['categories_apps']['featured'] = tmp_apps\r\n\r\n if current_app.config['ENFORCE_PRIVACY'] and current_user.is_authenticated():\r\n if current_user.admin:\r\n d['top_users'] = cached_users.get_top()\r\n if not current_app.config['ENFORCE_PRIVACY']:\r\n d['top_users'] = cached_users.get_top()\r\n return render_template('/home/index.html', **d)", "def apps(self):\n return list(self.ctx.keys())", "def main_list(args):\n return list_commands(args.directory)", "def django_start_app(appname):\r\n \r\n actions = []\r\n errs = []\r\n \r\n app = wingapi.gApplication\r\n cmdline, dirname, err = _get_base_cmdline()\r\n if err is not None:\r\n title = _(\"Failed to Start App\")\r\n msg = _(\"The Django app could not be created: %s\") % err\r\n app.ShowMessageDialog(title, msg)\r\n return\r\n cmdline += ['startapp', appname]\r\n err, output = app.ExecuteCommandLine(cmdline, dirname, None, 5.0, return_stderr=True)\r\n if err != 0 or output[1]:\r\n title = _(\"Failed to Start App\")\r\n msg = _(\"The command %s failed with error code %i and output:\\n\\n%s\\n\\n%s\") % (cmdline, err, _get_output(output), _kMissingPythonMessage)\r\n app.ShowMessageDialog(title, msg)\r\n return\r\n actions.append(_(\"Created Django app %s in %s\") % (appname, dirname))\r\n \r\n # Add the new app to INSTALLED_APPS in settings.py\r\n manage_py, settings_py = _CDjangoPluginActivator._instance._FindKeyFiles()\r\n try:\r\n f = open(settings_py)\r\n txt = f.read()\r\n f.close()\r\n except:\r\n errs.append(_(\"Unable to read %s to update INSTALLED_APPS\"))\r\n else:\r\n lines = txt.splitlines()\r\n eol = _get_eol(txt)\r\n insert_line = None\r\n in_installed_apps = False\r\n for i, line in enumerate(lines):\r\n if line.lstrip().startswith('INSTALLED_APPS'):\r\n in_installed_apps = True\r\n elif in_installed_apps and line.strip().startswith(')'):\r\n in_installed_apps = False\r\n insert_line = i\r\n if insert_line is None:\r\n lines.extend(['', 'INSTALLED_APPS =', \" '%s',\" % appname, ')', ''])\r\n else:\r\n lines = lines[:insert_line] + [\" '%s',\" % appname] + lines[insert_line:]\r\n try:\r\n txt = eol.join(lines)\r\n f = open(settings_py, 'w')\r\n f.write(txt)\r\n f.close()\r\n except:\r\n errs.append(_(\"Unable to write %s to update INSTALLED_APPS\"))\r\n else:\r\n actions.append(_(\"Added %s to INSTALLED_APPS in %s\") % (appname, settings_py))\r\n \r\n title = _(\"The App was Created\")\r\n msg = _(\"The application was created. \")\r\n if errs:\r\n msg += _get_errors_list(errs)\r\n msg += _get_actions_list(actions)\r\n app.ShowMessageDialog(title, msg, modal=False)", "def overview():\n pages_list = g.db.pages.find().sort('name')\n return render_template('{}/index.html'.format(MODULE_DIR), **locals() )", "def list_app_devices(request, pk):\n context = {}\n app = get_object_or_404(MacOSApp, pk=pk)\n pending = Laptop.objects.filter(apps_pending__in=[app])\n installed = InstallationRecord.objects.filter(app=app, device__apps_installed__in=[app], active=True)\n context['resource'] = app\n context['resource_type'] = 'App'\n context['pending'] = pending\n context['installed'] = installed\n return render(request, 'mdm/device_list.html', context)", "def list(self, topic, **options):\n pass", "def applicationsdetails():\n appdicts = db.hgetall('applications')\n finaldict = OrderedDict()\n for appname in sorted(appdicts):\n instances = json.loads(appdicts.get(appname))\n instance_map = OrderedDict()\n for key in sorted(instances):\n instance_map.__setitem__(key,instances.get(key))\n finaldict.__setitem__(appname,instance_map)\n return render_template('robots.html', appdicts=finaldict)" ]
[ "0.71768415", "0.6380013", "0.6237213", "0.61973405", "0.6124043", "0.60990864", "0.60854894", "0.5944503", "0.5898133", "0.58913547", "0.5877315", "0.5836518", "0.5767014", "0.5754838", "0.5688991", "0.56420964", "0.56412876", "0.55870926", "0.5555698", "0.5543384", "0.5500558", "0.5498956", "0.54931974", "0.5486188", "0.5478611", "0.54709584", "0.5469451", "0.5467937", "0.54616815", "0.5453046", "0.5439428", "0.5404549", "0.54009867", "0.539967", "0.53450865", "0.53450865", "0.53338873", "0.53291744", "0.53282154", "0.5321905", "0.5320619", "0.5304125", "0.52864105", "0.5283571", "0.5279911", "0.5275675", "0.5273271", "0.52494085", "0.52306426", "0.5213608", "0.52020425", "0.51987016", "0.5177878", "0.5177072", "0.51750416", "0.5173282", "0.51720977", "0.5161809", "0.51565397", "0.5129071", "0.5118267", "0.51167744", "0.5091028", "0.5079457", "0.5069662", "0.5049123", "0.50490683", "0.50463045", "0.50452447", "0.5045", "0.504355", "0.5032024", "0.50296074", "0.50227016", "0.5018808", "0.5000307", "0.49992388", "0.4990032", "0.49795705", "0.4978248", "0.497142", "0.49651745", "0.49598542", "0.49543893", "0.49460548", "0.49445215", "0.49335173", "0.49309894", "0.49267915", "0.49264762", "0.49231613", "0.49192604", "0.49157166", "0.49063873", "0.4894507", "0.48922387", "0.48904753", "0.4886401", "0.4885107", "0.48828533" ]
0.64636713
1
\b Removes a published app from the Marketplace. $ 21 publish remove [yes] {app_id} \b Removes all published apps from the Marketplace. $ 21 publish remove [yes] all \b
def remove(ctx, app_id, all, assume_yes): if all and not app_id: for _app_id in _get_all_app_ids(ctx.obj['config'], ctx.obj['client']): _delete_app(ctx.obj['config'], ctx.obj['client'], _app_id, assume_yes) elif app_id and not all: _delete_app(ctx.obj['config'], ctx.obj['client'], app_id, assume_yes) else: logger.info(ctx.command.get_help(ctx)) sys.exit(1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove():\n run('pew rm {0}'.format(package_name()))", "def app_delete(self, name):\n self.core.api.os.shell.cmd('{0} delete app /app.name:\"{1}\"'.format(self.APP_CMD, name))", "def remove_app(self):\n \n pass", "def delete_app(AppId=None):\n pass", "def delete_app(short_name):\r\n delete_memoized(get_app, short_name)", "def delete():\n run('rm -r {}'.format(utils.home('apps', env.PROJECT_NAME)))", "def delete_app(self, name):\n raise NotImplementedError", "def remove_hero(apps, schema_editor):\n pass", "def delete(self, application_id):", "def remove_app(self, app_name):\n self.remove_list_setting('applications', 'installed_apps',\n app_name)", "def _delete_app(config, client, app_id, assume_yes):\n title = client.get_app_full_info(config.username, app_id).json()['app_info']['title']\n if assume_yes or click.confirm(\n \"Are you sure that you want to delete App '{} ({})'?\".format(app_id, title)):\n try:\n resp = client.delete_app(config.username, app_id)\n resp_json = resp.json()\n deleted_title = resp_json[\"deleted_title\"]\n logger.info(\"App {} ({}) was successfully removed from the marketplace.\".format(app_id, deleted_title))\n except ServerRequestError as e:\n if e.status_code == 404:\n logger.info(\"The app with id '{}' does not exist in the marketplace.\".format(app_id), fg=\"red\")\n elif e.status_code == 403:\n logger.info(\n \"You don't have permission to delete the app with id '{}'. You \"\n \"can only delete apps that you have published.\".format(app_id), fg=\"red\")", "def remove(self, package):\n self.driver.remove_app(package)", "def remove(name):\n if name==\"autopy\":\n print(\"\\n\\tUNINSTALLING WORKING MODULE WILL CAUSE ERRORS AND MAKE YOUR CODE UNUSABLE\\n\")\n choice=input(f\"Are you sure to remove {name}?\\nEnter YES,PROCEED to continue:\")\n if choice == 'YES,PROCEED':os.system(f'python -m pip uninstall {name}')\n else:print(\"Operetion Cancelled\")", "def delete_app(self,*app_names):\n\n for app in app_names:\n shutil.rmtree(os.path.join(self._main,app))\n \n self._remove_extra_css_apps()\n self._remove_extra_templates_apps()\n self._update_delete_app_or_page()", "def _uninstall(package_name, remove_all, app_id, cli, app):\n\n package_manager = _get_package_manager()\n err = package.uninstall(\n package_manager, package_name, remove_all, app_id, cli, app)\n if err is not None:\n emitter.publish(err)\n return 1\n\n return 0", "def remove_app(self, app):\n try:\n membership = self.membership_class.objects.get(obj=self, app=app)\n except self.membership_class.DoesNotExist:\n return False\n else:\n membership.delete()\n index_webapps.delay([app.pk])\n return True", "def remove_tag(args):", "def uninstall_app(self, package, keepdata=False):\n return self.adb.uninstall(package, keepdata)", "def rm(args):\n args.delete = True\n return remove(args)", "def clear_app(package):\n G.DEVICE.clear_app(package)", "def delete_application(self, method=\"POST\", short_name=\"sampleapp\"):\r\n if method == \"POST\":\r\n return self.app.post(\"/app/%s/delete\" % short_name,\r\n follow_redirects=True)\r\n else:\r\n return self.app.get(\"/app/%s/delete\" % short_name,\r\n follow_redirects=True)", "def clear_app(self, package):\n return self.adb.clear_app(package)", "def removeItem(*args):", "def removeItem(*args):", "def remove_apps(self):\n self.membership_class.objects.filter(obj=self).delete()", "def remove_compiled_app():\r\n app = get_app()\r\n remove_compiled_application(apath(app, r=request))\r\n session.flash = T('compiled application removed')\r\n redirect(URL('site'))", "def do_remove(self, arg):\n jail_destroy('remove', arg)", "def cli(env, dry_run):\n\n tag_manager = TagManager(env.client)\n empty_tags = tag_manager.get_unattached_tags()\n\n for tag in empty_tags:\n if dry_run:\n click.secho(f\"(Dry Run) Removing {tag.get('name')}\", fg='yellow')\n else:\n result = tag_manager.delete_tag(tag.get('name'))\n color = 'green' if result else 'red'\n click.secho(f\"Removing {tag.get('name')}\", fg=color)", "def _installed_apps_remove(self):\n config.remove_plugin(self.module_path)", "def deleteApp(appName):\n logger.debug('[FLASKWEB /delete/app/<appName>] Request to delete App `%s`', appName)\n applist = [a['name'] for a in db.getAllApps()]\n if appName not in applist:\n return returnError(\"Application %s does not exist\" % appName, 404)\n\n logger.info(\"[FLASKWEB] DELETING all versions of app, `%s`\")\n db.deleteAllApps(appName)\n\n if request.headers['Accept'] == 'application/json':\n return jsonify(dict(app=appName, status='DELETED, files remain on server')), 200\n else:\n applist = db.getAllApps()\n versions = {a['name']: db.getVersions(a['name'], limit=5) for a in applist}\n return render_template('apps.html', applist=applist, versions=versions)", "def DeleteApp(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def AptUninstall(vm):\n remove_str = 'sudo apt-get --purge autoremove -y '\n for package in APT_PACKAGES:\n vm.RemoteCommand(remove_str + package)", "def filter_del(name):\n\n\tweechat.command(weechat.buffer_search_main(), \"/mute filter del %s\" % name)", "def cmd_gallery_remove(client, args):\n gallery_remove = client.remove_from_gallery(args.item_id)\n generate_output({'gallery_remove': gallery_remove})", "def retract_application(request, application_id):\n appl = get_object_or_404(Application, pk=application_id)\n if appl.Project.distributions.filter(Student=request.user).exists():\n raise PermissionDenied(\"You cannot retract this application, because you are distributed to this project. If this distribution is incorrect, please contact the responsible staff member of the project.\")\n\n track = ApplicationTracking()\n track.Project = appl.Project\n track.Student = request.user\n track.Type = 'r'\n track.save()\n\n appl.delete()\n return render(request, \"base.html\", context={\n \"Message\": \"Deleted application\",\n \"return\": 'students:list_applications',\n })", "def reinstall_app(self, pbz_path, launch_on_install=True):\n\t\tdef endpoint_check(result, pbz_path):\n\t\t\tif result == 'app removed':\n\t\t\t\tprint result\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\tif DEBUG_PROTOCOL:\n\t\t\t\t\tlog.warn(\"Failed to remove supplied app, app manager message was: \" + result)\n\t\t\t\treturn False\n\n\t\t# get the bundle's metadata to identify the app being replaced\n\t\tbundle = PebbleBundle(pbz_path)\n\t\tif not bundle.is_app_bundle():\n\t\t\traise PebbleError(self.id, \"This is not an app bundle\")\n\t\tapp_metadata = bundle.get_app_metadata()\n\n\t\t# attempt to remove an app by its UUID\n\t\tresult_uuid = self.remove_app_by_uuid(app_metadata['uuid'].bytes, uuid_is_string=False)\n\t\tif endpoint_check(result_uuid, pbz_path):\n\t\t\treturn self.install_app(pbz_path, launch_on_install)\n\n\t\tif DEBUG_PROTOCOL:\n\t\t\tlog.warn(\"UUID removal failure, attempting to remove existing app by app name\")\n\n\t\t# attempt to remove an app by its name\n\t\tapps = self.get_appbank_status()\n\t\tfor app in apps[\"apps\"]:\n\t\t\tif app[\"name\"] == app_metadata['app_name']:\n\t\t\t\tresult_name = self.remove_app(app[\"id\"], app[\"index\"])\n\t\t\t\tif endpoint_check(result_name, pbz_path):\n\t\t\t\t\treturn self.install_app(pbz_path, launch_on_install)\n\n\t\tlog.warn(\"Unable to locate previous instance of supplied application\")", "def remove(self, egg):", "def wipe_application(self):\n\n self.resin.models.application.base_request.request(\n 'application', 'DELETE',\n endpoint=self.resin.settings.get('pine_endpoint'), login=True\n )", "def remove():", "def uninstall(package):\n return G.DEVICE.uninstall_app(package)", "async def remove(message, client, extra_args):\n\n if await funnypts_transaction(message, client, extra_args, \"remove\"):\n await message.channel.send(\"BRUH, THAT WAS CRINGE. SOMEONE JUST REVOKED YOUR FUNNYPOINT\")", "def uninstall(des):\n if \"Xblog/docs/notebooks/\" == des.replace(os.path.basename(des), \"\"):\n removeFromNavBar(des)\n else:\n removeFromParentIndex(des)\n ccc.success(\"uninstallation procedures for \" + des)", "def test_07_settings_application_delete(self):\n # get applications\n r = requests.get('%s/settings/applications' % self.url, cookies=self.cookies)\n self.assertEqual(r.status_code, 200)\n application_id = re.search('<form id=\"application(\\d+)\"', r.content).group(1)\n\n # delete the application -> 400\n r = requests.delete('%s/settings/applications/aa' % self.url, cookies=self.cookies)\n self.assertEqual(r.status_code, 400)\n\n # delete the application -> 417\n r = requests.delete('%s/settings/applications/11111' % self.url, cookies=self.cookies)\n self.assertEqual(r.status_code, 417)\n\n # delete the application -> 200\n r = requests.delete('%s/settings/applications/%s' % (self.url, application_id), cookies=self.cookies)\n self.assertEqual(r.status_code, 200)", "def main_remove(args):\n return remove_command(args.directory, args.name)", "def remove_robots(): #py:remove_robots\n RUR._remove_robots_()", "def remove(name):", "def remove_apps(self, app_names):\n groups = self['__store']\n for name in app_names:\n supergroup = groups.first(name='a_' + name)\n if supergroup:\n supergroup.remove_subgroup(self)", "async def remove_process(self, ctx, *name):\n print(name)\n name = self.fix_emoji_escapes(\" \".join(name))\n complete = False\n for process in PROCESSES.keys():\n if PROCESSES.get(process) == name:\n PROCESSES.pop(process)\n self.update_processes_config()\n await ctx.send(f\"The process {name} has been removed\")\n complete = True\n break\n if not complete:\n await ctx.send(f\"The process {name} doesn't exist\")", "def prune_unused_shared_packages(self, all_apps, apps_to_remove):\n chunk_pattern = r'(\\d+):\"([\\da-f]+)\"'\n used_chunks = {}\n removed_used_chunks = {}\n build_dir = self.manager.output_dir / \"build\"\n\n for app in all_apps:\n bundle = build_dir / app / \"bundle.js\"\n if not bundle.exists(): # pragma: no cover\n continue\n bundle_txt = bundle.read_text(**UTF8)\n chunks = dict(re.findall(chunk_pattern, bundle_txt, re.VERBOSE))\n if app in apps_to_remove:\n removed_used_chunks.update(chunks)\n else:\n used_chunks.update(chunks)\n\n for chunk_id, chunk_hash in sorted(removed_used_chunks.items()):\n if chunk_id in used_chunks:\n continue\n unused = sorted(build_dir.glob(f\"{chunk_id}.{chunk_hash}.*\"))\n if unused:\n self.log.debug(\n f\"[static] pruning unused shared package {chunk_id}: \"\n f\"{len(unused)} files\"\n )\n self.delete_one(*unused)", "def unpublish_item_action(modeladmin, request, queryset):\n\n count = queryset.filter(published=True).update(published=False)\n asset_count = Asset.objects.filter(item__in=queryset, published=True).update(\n published=False\n )\n\n messages.info(request, f\"Unpublished {count} items and {asset_count} assets\")", "def _delete_spam_action(act, session):\n if act is None:\n return\n act.item.spam_flag_counter -= 1\n session.delete(act)", "def delete_tag(filename, tag_name):\n storeapps = APP.config[\"storage\"]\n filename = filename.encode(\"utf-8\")\n\n try:\n application = list(nativeapps.io.ls(storeapps, r\".*\" + filename + \"$\"))[0]\n meta_path = os.path.join(os.path.dirname(application), \"metadata.json\")\n metadata = json.loads(nativeapps.io.readfile(meta_path))\n tags = metadata.get(\"tags\", [])\n if tag_name in tags:\n tags.remove(tag_name)\n metadata[\"tags\"] = tags\n nativeapps.io.writefile(meta_path, json.dumps(metadata))\n except IndexError:\n return \"Unknown application: %s\" % (application), 404\n\n return \"removed\", 200", "def YumUninstall(vm):\n _Uninstall(vm)", "def delete_app(self):\n contract = jc.Contract()\n return st.OperationContract(\n self.agent.make_delete_app_operation(\n application=self.TEST_APP,\n account_name=self.bindings[\"SPINNAKER_KUBERNETES_V2_ACCOUNT\"],\n ),\n contract=contract,\n )", "def spiderbotDelete(spiderbotid):\n sclogic.spiderbotDelete(spiderbotid)", "def cmd_apps__destroy(args):\n \n if args.name is None and in_git_repo():\n args.name = _get_current_project_name()\n\n if args.name is None:\n print \"Please provide a project name.\"\n sys.exit(1)\n\n print \"Destroying project %s...\" % args.name\n remote.destroy_project(args.name)\n print \"Project %s destroyed.\" % args.name\n if in_git_repo() and _get_current_project_name() == args.name:\n git(None, 'remote', 'rm', 'tinyserv')\n print \"Removed remote '%s'.\" % args.name", "def delete_book(code: str):\n pass", "def delete_run(arn=None):\n pass", "def remove():\n pass", "def test_02_app_delete(self):\r\n for i in range(300):\r\n app = App(name=str(i), short_name=str(i),\r\n description=str(i), owner_id=1)\r\n db.session.add(app)\r\n db.session.commit()\r\n\r\n url = '?api_key=%s' % (self.api_key)\r\n self.check_limit(url, 'delete', 'app')", "def remove_app(self, appid, index, async=False):\n\n\t\tdata = pack(\"!bII\", 2, appid, index)\n\t\tself._send_message(\"APP_MANAGER\", data)\n\n\t\tif not async:\n\t\t\treturn EndpointSync(self, \"APP_MANAGER\").get_data()", "def remove_item_page(request):\n validate(instance=request.body, schema=item_schema_remove)\n body = json.loads(request.body)\n Item.remove_item(body['item_id'])\n return HttpResponse('success')", "def delete_alarm():\r\n name = request.args.get('alarm_item')\r\n logging.info(\"Alarm deleted in delete_alarm(): \" + name)\r\n for alarm in alarms:\r\n if alarm['title'] == name:\r\n alarms.remove(alarm)", "def delete_command(update):\r\n try:\r\n update.message.delete()\r\n\r\n except error.BadRequest:\r\n pass", "def site_delete(self, name):\n self.core.api.os.shell.cmd('{0} delete site \"{1}\"'.format(self.APP_CMD, name))", "def remove(html, *args):\n reobj = re.compile(\"|\".join(args), re.IGNORECASE)\n return reobj.sub(\" \", html)", "def unset(bot, update, chat_data):\n if 'job' not in chat_data:\n update.message.reply_text('Sem notificacoes ativadas')\n return\n\n job = chat_data['job']\n job.schedule_removal()\n del chat_data['job']\n check = emojize(\":white_check_mark:\", use_aliases=True)\n update.message.reply_text('Notificacao cancelada com sucesso'+check+'')", "def rm_task():\n # get task label from user\n responses = accept_inputs([\"Task label\"])\n label = responses[\"Task label\"]\n # check for existence of task\n results = query_with_results(\"select * from task where label = ?\", [label])\n if len(results) == 0:\n print(\"No task found with label '%s' that we could remove.\" % label)\n return\n # the task exists, so remove it\n query_no_results(\"delete from task where label = ?\", [label]) \n # remove all person associations\n query_no_results(\"delete from task_person_pair where task = ?\", [label])\n print(\"Task with label '%s' removed.\" % label)", "def event_remove_manual(event_name:str):\n event_log(\"user deleted alarm: \",str(event_name))\n global removed_manually\n removed_manually = True\n event_remove(event_name)\n return render_template(\"alarm.html\", Events_list=Events_list, notification_list=notification_list)", "def remove():\n osname = None\n is_64bit = sys.maxsize > 2**32\n bitsize_dict = {True: 64, False: 32}\n bitsize = bitsize_dict[is_64bit]\n if platform in LINUX_PLATFORMS:\n printos('Linux', bitsize)\n ubuntu_remove()\n elif platform == \"darwin\":\n printos('Mac OS X', bitsize)\n mac_remove()\n elif platform in WINDOWS_PLATFORMS:\n printos('Windows', bitsize)\n windows_remove(bitsize)\n print('Done!')", "async def remove(self, ctx, *, link):\r\n try:\r\n if link not in self.adkillr[ctx.message.server.id]['filters']:\r\n await self.bot.say(\"That link is not in the current filters.\")\r\n else:\r\n self.adkillr[ctx.message.server.id]['filters'].remove(link)\r\n await self.bot.say(\"Filter removed.\")\r\n except KeyError:\r\n await self.bot.say(\"There are no filters set.\")", "async def wordfilter_remove(self, ctx, *, phrase):\n phrase = phrase.lower()\n await self.bot.redis.lrem('wordfilter', 0, phrase)\n self.words.remove(phrase)\n await ctx.send(f'Removed `{phrase}` from the filtered words')", "def delete_word(event):\n get_by_name(\"backward-kill-word\").call(event)", "def remove_package(package, remote):\n flavor = remote.os.package_type\n if flavor == 'deb':\n pkgcmd = ['DEBIAN_FRONTEND=noninteractive',\n 'sudo',\n '-E',\n 'apt-get',\n '-y',\n 'purge',\n '{package}'.format(package=package)]\n elif flavor == 'rpm':\n # FIXME: zypper\n pkgcmd = ['sudo',\n 'yum',\n '-y',\n 'erase',\n '{package}'.format(package=package)]\n else:\n log.error('remove_package: bad flavor ' + flavor + '\\n')\n return False\n return remote.run(args=pkgcmd)", "def snap_remove(packages, *flags):\n if type(packages) is not list:\n packages = [packages]\n\n flags = list(flags)\n\n message = 'Removing snap(s) \"%s\"' % ', '.join(packages)\n if flags:\n message += ' with options \"%s\"' % ', '.join(flags)\n\n log(message, level='INFO')\n return _snap_exec(['remove'] + flags + packages)", "def delete_app(self, app_id):\n return req(self.logger, self.access_token, 'DELETE', '/apps/'+app_id, {})", "def test_remove_pkg_conanmanifest(self, setup):\n client, pref = setup\n server = client.servers[\"default\"]\n\n path = server.test_server.server_store.package(pref)\n manifest = os.path.join(path, \"conanmanifest.txt\")\n os.unlink(manifest)\n client.run(\"install --requires=hello/0.1\", assert_error=True)\n assert \"ERROR: Binary package not found: 'hello/0.1\" in client.out", "def filter(q_words):\n filtered_words = [\"how\",\"what\"]\n for word in q_words:\n if word in filtered_words:\n q_words.remove(word)", "def madlib_remove(text, types_to_remove):\n # Tokenize the text\n tokens = word_tokenize(text)\n # Gets rid of weirdness\n tokens = remove_weirdness(tokens)\n # Gets parts of speech\n orignial = pos_tag(tokens)\n word_lst = []\n \n word_index = 0\n # next_remove is used along with word_index\n # to determine if we want to remove a word\n next_remove = random.randint(1, 5) \n \n # While our counter (word_index) is less than the length of the text... \n while word_index < len(orignial):\n # extract a word and its PoS\n word, pos = orignial[word_index]\n \n remove = False\n # If it's part of speech is one of the correct ones\n if pos in types_to_remove:\n target_type = True\n else:\n target_type = False\n \n # If word_index == next_remove, then we want to remove this word,\n if word_index == next_remove:\n # But only if it's of the correct PoS\n if target_type:\n remove = True\n # If it is, we want to increment next_remove a little bit more,\n # So we don't remove a bunch of words in a row\n next_remove += random.randint(3, 6)\n else:\n # If it's not of the correct type, we increment next_remove\n # more slowly, so we can hopefully find the next word that is of\n # the correct part of speech\n next_remove += random.randint(1, 2)\n \n # And lastly if we want to remove the word, we append it's PoS, and if not,\n # we append it's word\n if remove:\n word_lst.append(pos)\n else:\n word_lst.append(word)\n \n # Increment Counter\n word_index += 1\n \n \n return word_lst", "def stop_app(self, package: str) -> None:\n self.shell(['am', 'force-stop', package])", "def main(self, *extra_args):\n self.arguments.message = 'Pruned MediaWiki: %s' % self.arguments.branch\n if not self.arguments.delete:\n self.arguments.message += ' [keeping static files]'\n self.arguments.force = False\n return super(Clean, self).main(*extra_args)", "async def remove(message: discord.Message, opt: options):\n for q in db.data[\"questions\"]:\n if q[\"choices\"][0] == opt[0] and q[\"choices\"][1] == opt[1]:\n db.data[\"questions\"].remove(q)\n db.save()\n await client.say(message, \"**Entry removed.**\")\n break\n else:\n await client.say(message, \"**Could not find the question.**\")", "def remove(self, *args):\n self.__execute(self.pkgin_bin, \"remove\", *args)", "def remove_wearable(self, pid: str):\n if pid in self.wearables:\n del self.wearables[pid]", "def delete_app(self, app_full_name):\n ai = self.get_app_instances_configs(app_full_name=app_full_name)\n log.info(\"Deleting app %s\" % app_full_name)\n for ai_item in ai:\n log.info(\"Deleting app instance %s\" % ai_item[\"alias\"])\n self.delete_app_instance(ai_item[\"id\"])\n log.info(\"Deleting application manifest\")\n ad = self.get_app_manifest(app_full_name)\n self.app_manifests.remove(ad)\n app_dir = os.path.join(self.apps_dir_path, \"lib\", app_full_name)\n log.info(\"Deleting app folder %s\" % app_dir)\n shutil.rmtree(app_dir)\n log.info(\"App %s was deleted\" % app_full_name)", "def do_del_item(self, arg):\n try:\n del_item = arg[\"<list_name>\"]\n choice = arg[\"--choice\"]\n if choice == \"name\":\n del_item_str = \" \".join(del_item)\n print(del_item_str)\n elif choice == \"id\":\n del_item_str = int(\" \".join(del_item))\n print (del_item_str)\n app.ToDoApp.to_delete_item(del_item_str)\n print (\"Item deleted\")\n\n\n \n except ValueError as e:\n cprint((e), 'red')", "def test_05d_get_nonexistant_app_delete(self):\r\n self.register()\r\n # GET\r\n res = self.app.get('/app/noapp/delete', follow_redirects=True)\r\n assert res.status == '404 NOT FOUND', res.data\r\n # POST\r\n res = self.delete_application(short_name=\"noapp\")\r\n assert res.status == '404 NOT FOUND', res.status", "def esp32_app_clean(ctx):\n _run_idf_script(ctx, \"fullclean\")", "def remove(ip):\n return __apf_cmd(\"-u {}\".format(ip))", "def test_remove_share(self):\n self.app.delete(url=\"/config/shares?share=80&destination=gsiftp://nowhere&vo=dteam\", status=400)\n self.app.delete(url=\"/config/shares?share=80&destination=gsiftp://nowhere&vo=dteam&source=gsiftp://source\", status=204)", "def delete_notification():\r\n name = request.args.get('notif')\r\n logging.info(\"Notification deleted in delete_notification(): \" + name)\r\n for notif in notifications:\r\n if notif['title'] == name:\r\n notifications.remove(notif)", "def delete_app_by_name(self, name):\n app = self.get_app_by_name(name)\n if app is False:\n Log.an().error('cannot get app by name: %s', name)\n return False\n\n if not app:\n Log.an().error('app \"%s\" not found', name)\n return False\n\n if len(app) > 1:\n Log.an().error(\n 'non-unique app \"%s\", try deleting by id instead', name\n )\n return False\n\n return self.delete_app_by_id(app[0]['id'])", "def remove_items(todofile, items):\n for item in filter(lambda x: x.itemid in items, todofile.fetch_items()):\n todofile.remove_todo(item)", "def remove(self, word):\n\t\tif word in self.link_words:\n\t\t\tself.link_words.remove(word)", "def delete(tbd, tipe):\n des = \"Xblog/docs/\" + tbd.replace(\".ipynb\", \".html\")\n uninstall(des)\n if tipe == \"Xpage\":\n os.remove(des)\n ccc.success(\"deleting \" + des)\n des_pdf = des.replace(\".html\",\".pdf\").replace(\"notebooks\", \"pdfs\")\n os.remove(des_pdf)\n ccc.success(\"deleting \" + des_pdf)\n if tbd == \"notebooks/welcome.ipynb\":\n if os.path.isfile(\"Xblog/README.md\"):\n cnv.md2html()\n else:\n with open(\"Xblog/docs/notebooks/welcomme.html\", 'w') as f:\n f.write(\"<html>\\n<body>\\n<h1 align=\\\"center\\\">Welcome to Xbooks blogs!</h1>\\n<h4 align=\\\"center\\\">This blog has no welcome page<br/>if you're maintainer of this blog, kindly write either README.md or notebooks/welcome.ipynb file!</h4>\\n</body>\\n</html>\\n\")\n f.close()\n if tipe == \"Xbook\":\n shutil.rmtree(des)\n ccc.success(\"deleting \" + des)\n return True", "def test_remove_pacakge_latest_version(self, type_, public_id):\n assert public_id.package_version.is_latest\n # we need this because there isn't a default contract/connection\n if type_ == \"connection\":\n self.add_item(\"connection\", str(public_id))\n if type_ == \"contract\":\n self.add_item(\"contract\", str(public_id))\n\n # first, check the package is present\n items_path = os.path.join(self.agent_name, \"vendor\", \"fetchai\", type_ + \"s\")\n items_folders = os.listdir(items_path)\n item_name = public_id.name\n assert item_name in items_folders\n\n # remove the package\n with patch(\"aea.cli.remove.RemoveItem.is_required_by\", False):\n self.run_cli_command(\n *[\"remove\", type_, str(public_id)], cwd=self._get_cwd()\n )\n\n # check that the 'aea remove' took effect.\n items_folders = os.listdir(items_path)\n assert item_name not in items_folders", "def purge(self, message_list, action, userId='me'):\n\n count = 0\n for item in message_list:\n if action.lower() == 'archive':\n resource = getattr(self.connection.users().messages(), 'modify')\n dynamic_request = resource(userId=userId, id=message_list[item], body=\n {\n \"removeLabelIds\": [ \"INBOX\" ]\n })\n else:\n resource = getattr(self.connection.users().messages(), action)\n dynamic_request = resource(userId=userId, id=message_list[item])\n\n try:\n response = dynamic_request.execute()\n count += 1\n print(f'[√] Action: {action} - {count} of {len(message_list)} - Message ID: {message_list[item]}')\n except googleapiclient.errors.HttpError as error:\n if error.resp.status == 404:\n print(f'[X] Error: ID {message_list[item]} Not Found')\n else:\n print(f'[X] Error: ID {mesage_list[item]} {error}')\n count -= 1\n print(f'[√] Processed: {count} of {len(message_list)} Messages')\n return True", "def remove_app(request, app, device=0):\n context = {}\n app = get_object_or_404(MacOSApp, pk=app)\n if device == 0:\n # Completely remove Application from MDM\n mode = 'delete'\n if app.installed.all().count() == 0:\n app.delete()\n messages.success(request, \"Application was successfully deleted\", extra_tags='success')\n return HttpResponseRedirect(reverse(\"mdm:apps\"))\n else:\n context['form'] = UninstallAppForm(mode=mode)\n else:\n # Unlink app from device\n laptop = get_object_or_404(Laptop, pk=device)\n if app in laptop.apps_pending.all():\n laptop.apps_pending.remove(app)\n messages.success(request, \"Application is no longer assigned to {}\".format(laptop.name),\n extra_tags='success')\n return HttpResponseRedirect(reverse(\"mdm:apps\"))\n\n # If pending removal reset to installed status\n if app in laptop.apps_remove.all():\n laptop.apps_installed.add(app)\n laptop.apps_remove.remove(app)\n messages.success(request, \"Removal request cancelled\", extra_tags='success')\n return HttpResponseRedirect(reverse(\"mdm:apps\"))\n\n if app in laptop.apps_installed.all():\n mode = 'disassociate'\n context['form'] = UninstallAppForm(mode=mode)\n else:\n raise Http404\n\n # Handle form data\n if request.method == 'POST':\n form = UninstallAppForm(request.POST, mode=mode)\n if form.is_valid():\n if mode == 'disassociate':\n record = get_object_or_404(InstallationRecord, app=app, device=laptop, active=True)\n record.active = False\n record.expires = timezone.now()\n record.save()\n laptop.apps_installed.remove(app)\n messages.success(request, \"Application successfully removed from {}\".format(laptop.name),\n extra_tags='success')\n else:\n app.delete()\n messages.success(request, \"Application deleted successfully\")\n return HttpResponseRedirect(reverse(\"mdm:apps\"))\n else:\n context['form'] = form\n return render(request, 'form_crispy.html', context)", "def disintegrate():\n click.confirm('Do you really want to uninstall?', abort=True)\n if click.confirm('Do you want to remove installed AppImages?'):\n cfgmgr = ConfigManager()\n if os.path.exists(cfgmgr['bin']):\n print(fc(\"{y}Removing bin for appimages{rst}\"))\n shutil.rmtree(cfgmgr['bin'], ignore_errors=True)\n if os.path.exists(cfgmgr['storageDirectory']):\n print(fc(\"{y}Removing storageDirectory for appimages{rst}\"))\n shutil.rmtree(cfgmgr['storageDirectory'], ignore_errors=True)\n print(fc(\"{y}Removing zap binary entrypoint{rst}\"))\n for path in os.getenv('PATH').split(os.pathsep):\n zap_bin = os.path.join(path, 'zap')\n if os.path.exists(zap_bin):\n os.remove(zap_bin)\n break\n print(fc(\"{y}Removing zap AppImage {rst}\"))\n dot_zap = os.path.join(os.path.expanduser('~'), '.zap')\n if os.path.exists(dot_zap):\n shutil.rmtree(dot_zap, ignore_errors=True)", "def test_duo_application_delete(self):\n pass" ]
[ "0.6542366", "0.63983655", "0.635603", "0.62508076", "0.6105173", "0.60880226", "0.5995787", "0.59228504", "0.5885536", "0.58168525", "0.57712317", "0.5742939", "0.56851757", "0.56586546", "0.5657139", "0.5622254", "0.5595414", "0.5593344", "0.55384624", "0.55296105", "0.55245054", "0.5517181", "0.5510308", "0.5510308", "0.55049574", "0.5477803", "0.5466714", "0.54657143", "0.54256463", "0.54005426", "0.5352143", "0.5351118", "0.53442717", "0.53383195", "0.53327423", "0.53285146", "0.5326282", "0.52951545", "0.52776575", "0.52765065", "0.5270382", "0.5256349", "0.52515244", "0.52283615", "0.52261406", "0.52163744", "0.5214007", "0.5203568", "0.51926726", "0.5174133", "0.51677233", "0.51590407", "0.51527494", "0.51371896", "0.5136094", "0.51317436", "0.5130655", "0.51196337", "0.5104204", "0.51009893", "0.508285", "0.50794697", "0.50759995", "0.5057723", "0.5054439", "0.5054052", "0.5051596", "0.5051144", "0.50502366", "0.5047483", "0.5046766", "0.5045884", "0.5043605", "0.5042346", "0.50385356", "0.502668", "0.5022292", "0.5020289", "0.50189763", "0.50169706", "0.50160974", "0.5015271", "0.50148374", "0.50027657", "0.50021845", "0.4997918", "0.49897635", "0.49890533", "0.49890435", "0.4983633", "0.49788573", "0.49740064", "0.49725953", "0.4966552", "0.49640572", "0.49602672", "0.4958609", "0.4956995", "0.49518782", "0.4944319" ]
0.67598
0
\b Publishes an app to 21 Marketplace. $ 21 publish submit path_to_manifest/manifest.yaml The contents of the manifest file should follow the guidelines specified at
def submit(ctx, manifest_path, marketplace, skip, parameters): if parameters is not None: try: parameters = _parse_parameters(parameters) except: logger.error( "Manifest parameter overrides should be in the form 'key1=\"value1\" " "key2=\"value2\".", fg="red") return _publish(ctx.obj['client'], manifest_path, marketplace, skip, parameters)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _publish(client, manifest_path, marketplace, skip, overrides):\n try:\n manifest_json = check_app_manifest(manifest_path, overrides, marketplace)\n app_url = \"{}://{}\".format(manifest_json[\"schemes\"][0], manifest_json[\"host\"])\n app_ip = urlparse(app_url).hostname\n\n if not skip:\n address = get_zerotier_address(marketplace)\n\n if address != app_ip:\n wrong_ip = click.style(\"It seems that the IP address that you put in your manifest file (\") +\\\n click.style(\"{}\", bold=True) +\\\n click.style(\") is different than your current 21market IP (\") +\\\n click.style(\"{}\", bold=True) +\\\n click.style(\")\\nAre you sure you want to continue publishing with \") +\\\n click.style(\"{}\", bold=True) +\\\n click.style(\"?\")\n if not click.confirm(wrong_ip.format(app_ip, address, app_ip)):\n switch_host = click.style(\"Please edit \") +\\\n click.style(\"{}\", bold=True) +\\\n click.style(\" and replace \") +\\\n click.style(\"{}\", bold=True) +\\\n click.style(\" with \") +\\\n click.style(\"[{}].\", bold=True)\n logger.info(switch_host.format(manifest_path, app_ip, address))\n return\n\n except exceptions.ValidationError as ex:\n # catches and re-raises the same exception to enhance the error message\n publish_docs_url = click.style(\"https://21.co/learn/21-publish/\", bold=True)\n publish_instructions = \"For instructions on publishing your app, please refer to {}\".format(publish_docs_url)\n raise exceptions.ValidationError(\n \"The following error occurred while reading your manifest file at {}:\\n{}\\n\\n{}\"\n .format(manifest_path, ex.args[0], publish_instructions),\n json=ex.json)\n\n app_name = manifest_json[\"info\"][\"title\"]\n app_endpoint = \"{}://{}{}\".format(manifest_json[\"schemes\"][0],\n manifest_json[\"host\"],\n manifest_json[\"basePath\"])\n\n logger.info(\n (click.style(\"Publishing {} at \") + click.style(\"{}\", bold=True) + click.style(\" to {}.\"))\n .format(app_name, app_endpoint, marketplace))\n payload = {\"manifest\": manifest_json, \"marketplace\": marketplace}\n try:\n response = client.publish(payload)\n except ServerRequestError as e:\n if e.status_code == 403 and e.data.get(\"error\") == \"TO600\":\n logger.info(\n \"The endpoint {} specified in your manifest has already been registered in \"\n \"the marketplace by another user.\\nPlease check your manifest file and make \"\n \"sure your 'host' field is correct.\\nIf the problem persists please contact \"\n \"support@21.co.\".format(app_endpoint), fg=\"red\")\n return\n else:\n raise e\n\n if response.status_code == 201:\n response_data = response.json()\n mkt_url = response_data['mkt_url']\n permalink = response_data['permalink']\n logger.info(\n click.style(\n \"\\n\"\n \"You have successfully published {} to {}. \"\n \"You should be able to view the listing within a few minutes at {}\\n\\n\"\n \"Users will be able to purchase it, using 21 buy, at {} \",\n fg=\"magenta\")\n .format(app_name, marketplace, permalink, mkt_url)\n )", "def publish():\n if sys.argv[-1] == 'publish':\n os.system('python setup.py sdist')\n os.system('twine upload dist/*')\n sys.exit()", "def publish_manifest(ctx, name, tag, image, signed_push=False):\n manifest_spec = {\"image\": \"{}:{}\".format(name, tag)}\n src_images = []\n\n for img in image:\n img_splitted = img.replace(' ', '').split(',')\n if len(img_splitted) != 2:\n print(\"Impossible to parse source format for: '{}'\".format(img))\n raise Exit(code=1)\n\n platform_splitted = img_splitted[1].split('/')\n if len(platform_splitted) != 2:\n print(\"Impossible to parse platform format for: '{}'\".format(img))\n raise Exit(code=1)\n\n src_images.append(\n {\"image\": img_splitted[0], \"platform\": {\"architecture\": platform_splitted[1], \"os\": platform_splitted[0]}}\n )\n manifest_spec[\"manifests\"] = src_images\n\n with tempfile.NamedTemporaryFile(mode='w', delete=False) as f:\n temp_file_path = f.name\n yaml.dump(manifest_spec, f, default_flow_style=False)\n\n print(\"Using temp file: {}\".format(temp_file_path))\n ctx.run(\"cat {}\".format(temp_file_path))\n\n try:\n result = retry_run(ctx, \"manifest-tool push from-spec {}\".format(temp_file_path))\n if result.stdout:\n out = result.stdout.split('\\n')[0]\n fields = out.split(\" \")\n\n if len(fields) != 3:\n print(\"Unexpected output when invoking manifest-tool\")\n raise Exit(code=1)\n\n digest_fields = fields[1].split(\":\")\n\n if len(digest_fields) != 2 or digest_fields[0] != \"sha256\":\n print(\"Unexpected digest format in manifest-tool output\")\n raise Exit(code=1)\n\n digest = digest_fields[1]\n length = fields[2]\n\n if signed_push:\n cmd = \"\"\"\n notary -s https://notary.docker.io -d {home}/.docker/trust addhash \\\n -p docker.io/{name} {tag} {length} --sha256 {sha256} \\\n -r targets/releases\n \"\"\"\n retry_run(ctx, cmd.format(home=os.path.expanduser(\"~\"), name=name, tag=tag, length=length, sha256=digest))\n finally:\n os.remove(temp_file_path)", "def main(owner: str, repository: str, token: str, tag: Optional[str]) -> None:\n if tag is None:\n today = datetime.date.today()\n tag = f\"{today:%Y.%-m.%-d}\"\n\n try:\n publish_release(\n owner=owner,\n repository_name=repository,\n token=token,\n tag=tag,\n )\n except Exception as error:\n click.secho(f\"error: {error}\", fg=\"red\")\n sys.exit(1)", "def publish():\n pass", "def publish():\n fab.local(\"env/bin/python setup.py sdist\")\n tar_filename = fab.local(\n \"env/bin/python setup.py --fullname\", capture=True\n )\n dist_filename = \"dist/{}.tar.gz\".format(tar_filename)\n fab.put(dist_filename, PYREPO_DIR)", "def pub_deploy(args, project=\"\", account=\"\", api_key=\"\"):\n base_url, api_key, updated = get_project_connect(\n 'djaodjin',\n base_url=DEFAULT_API_ENDPOINT,\n api_key=api_key)\n project, account, updated = get_project_account(\n project=project, account=account)\n if updated:\n save_config()\n\n api_container_url = \\\n \"%(base_url)s/api/containers/%(organization)s/apps/%(app)s/\" % {\n 'base_url': base_url,\n 'organization': str(account),\n 'app': str(project)}\n data = None\n container_location = args[0] if args else None\n if container_location:\n data = {'location': container_location}\n resp = requests.post(api_container_url, data=data, auth=(api_key, \"\"))\n LOGGER.info(\"POST %s returns %d %s\",\n api_container_url, resp.status_code, resp.text)", "def deploy():\n local('appcfg.py --no_cookies --email=mccutchen@gmail.com update .',\n capture=False)", "def deploy(fingerengine, fingerprint):\n\n base = 'http://{0}:{1}'.format(fingerengine.options.ip, fingerprint.port)\n uri = '/manager/html/upload'\n war_file = fingerengine.options.deploy\n war_path = parse_war_path(war_file)\n cookies = checkAuth(fingerengine.options.ip, fingerprint.port,\n fingerprint.title, fingerprint.version)\n if not cookies:\n utility.Msg(\"Could not get auth for %s:%s\" %\n (fingerengine.options.ip, fingerprint.port), LOG.ERROR)\n return\n\n utility.Msg(\"Preparing to deploy {0}...\".format(war_file))\n\n if fingerprint.version in ['6.0', '7.0', '8.0']:\n # deploying via the gui requires a CSRF token\n (csrf, c) = fetchCSRF(base, cookies)\n if not csrf:\n return\n else:\n # set CSRF and refresh session id\n uri += '?org.apache.catalina.filters.CSRF_NONCE={0}'\n uri = uri.format(csrf)\n cookies = (c, cookies[1])\n\n # read in payload\n try:\n tag = 'deployWar'\n if fingerprint.version in ['4.0', '4.1']:\n tag = 'installWar'\n files = {tag : (war_path + '.war', open(war_file, 'rb'))}\n except Exception, e:\n utility.Msg(e, LOG.ERROR)\n return\n\n # deploy\n response = utility.requests_post(base + uri, files=files, cookies=cookies[0],\n auth=cookies[1])\n\n if response.status_code is 200 and \"OK\" in response.content:\n utility.Msg(\"Deployed {0} to /{1}\".format(war_file, war_path), LOG.SUCCESS)\n elif 'Application already exists' in response.content:\n utility.Msg(\"Application {0} is already deployed\".format(war_file), LOG.ERROR)\n elif response.status_code is 403:\n utility.Msg(\"This account does not have permissions to remotely deploy. Try\"\\\n \" using manager_deploy\", LOG.ERROR)\n else:\n utility.Msg(\"Failed to deploy (HTTP %d)\" % response.status_code, LOG.ERROR)", "def test_publish_deployment_run(self):\n pass", "def finish_publish(hash, metadata, engine_id=None, username=USER):\n identity = \"%s@%s\" % (username, get_config('domain'))\n library = Library.objects.get(identity=identity)\n library.add_item(\n engine_id=engine_id,\n origin=identity,\n metadata=metadata\n )\n return \"OK\"", "def deploy_go_app(app_name, uri):\n execute(local_fetch_s3_artifact, uri)\n execute(deploy_artifact, app_name, uri)\n execute(create_symlink,\n '{}/config/config.yaml'.format(get_app_basedir(app_name)),\n '{}/etc/config.yaml'.format(get_current_release_dir(app_name)))", "def main(pkg_dir, years):\n pkgname = os.path.basename(pkg_dir)\n identifier = clean_name('archlinux_pkg_' + pkgname)\n metadata = {\n #'collection': ['test_collection', 'open_source_software'],\n #'collection': ['open_source_software'],\n 'collection': ['archlinuxarchive'],\n 'mediatype': 'software',\n 'publisher': 'Arch Linux',\n 'creator': 'Arch Linux',\n 'subject': ['archlinux', 'archlinux package'],\n }\n metadata['title'] = pkgname + \" package archive from Arch Linux\"\n metadata['subject'].append(pkgname)\n upload_pkg(identifier, pkgname, metadata, pkg_dir, years)", "def upload():\n sh('python setup.py register sdist upload')", "def deploy():\n build()\n collect()\n commit()\n push()", "def submit_manifest(\n request: ValidateManifestRequest = Body(...),\n schema: Any = Depends(get_description_schema),\n) -> ManifestSubmitResponse:\n manifest, validation = _validate_manifest(request, schema)\n if not manifest or validation.status == ResponseStatus.FAIL:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST, detail=validation.details\n )\n\n try:\n with get_repository(get_client_id(), DataCollection.MANIFEST) as repository:\n manifest_hash = manifest.crypto_hash().to_hex()\n _ = repository.set(\n {\"manifest_hash\": manifest_hash, \"manifest\": manifest.to_json_object()}\n )\n return ManifestSubmitResponse(manifest_hash=manifest_hash)\n except Exception as error:\n print(sys.exc_info())\n raise HTTPException(\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,\n detail=\"Submit manifest failed\",\n ) from error", "def publish_release(ctx):\n rel = _get_release()\n rel.update_release(rel.title, rel.raw_data[\"body\"], draft=False)", "def publish_files():\n print(\"Publishing files to the internet...\", end=\"\", flush=True)\n import subprocess\n try:\n subprocess.run(\"./upload.sh\", timeout=120.0)\n print(\"done.\\n\")\n except:\n print(\"failed.\\n\")", "def pub_upload(args, project=\"\", base_url=\"\", api_key=\"\"):\n project, base_url, api_key, updated = get_project_config(\n project=project, base_url=base_url, api_key=api_key)\n if updated:\n save_config()\n upload_theme(args, base_url, api_key, prefix=project)", "def deploy_api(dist_file, apt_req_file):\n _set_credentials()\n provision()\n _deploy_apt_requirements(apt_req_file)\n _deploy_python_package(dist_file)\n _sighup_api()\n _verify_api_heartbeat()\n send_build_stat(PROJECT_NAME, env.stage)", "def run_post_publishers():\n from anima.env import mayaEnv\n m_env = mayaEnv.Maya()\n\n version = m_env.get_current_version()\n\n # check if we have a proper version\n if not version:\n return\n\n # check if it is a Representation\n from anima.repr import Representation\n if Representation.repr_separator in version.take_name:\n return\n\n if version.is_published:\n from anima.publish import (run_publishers, staging, PRE_PUBLISHER_TYPE,\n POST_PUBLISHER_TYPE)\n # before doing anything run all publishers\n type_name = ''\n if version.task.type:\n type_name = version.task.type.name\n\n # before running use the staging area to store the current version\n staging['version'] = version\n run_publishers(type_name, publisher_type=POST_PUBLISHER_TYPE)\n # do not forget to clean up the staging area\n staging.clear()", "def deploy_app(self, app_info):\n raise NotImplementedError", "def publish(self,toolname):\n\n self.logger.info(\"publishing '%s'\" % (toolname))\n\n po = self.catalog.load_pageobject('ToolsStatusApprovedAdminPage',toolname)\n po.goto_page()\n\n # click the publish link\n publish_status,output = po.do_publish()\n\n # wait for the output success / failure block to appear\n if publish_status is False:\n raise RuntimeError(\"finalizetool failed: %s\" % (output))\n\n # mark project as created\n self.flip_tool_status('ToolsStatusApprovedAdminPage',toolname,'Published')\n\n # check that the tool is in the published state\n tool_state = po.get_tool_state()\n if tool_state.lower() != 'Published'.lower():\n raise Exception('Incorrect tool state: %s, expected \"Published\"'\\\n % tool_state)", "def deploy_application(target_environment, config_file, branch, force): # noqa\n # read in and parse configuration\n app = config.AppConfiguration.load(\n config_file or\n os.path.join(settings.app_conf_dir, '%s.conf' % target_environment)\n )\n app_name = app.app_name\n branch = branch or app.default_branch or git.get_current_branch()\n\n # get the contents of the proposed deployment\n release = heroku.HerokuRelease.get_latest_deployment(app_name)\n\n remote_hash = release.commit\n if app.use_pipeline:\n # if we are using pipelines, then the commit we need is not the\n # local one, but the latest version on the upstream app, as this\n # is the one that will be deployed.\n upstream_release = heroku.HerokuRelease.get_latest_deployment(app.upstream_app) # noqa\n local_hash = upstream_release.commit\n else:\n local_hash = git.get_branch_head(branch)\n\n if local_hash == remote_hash:\n click.echo(u\"Heroku application is up-to-date, aborting deployment.\")\n return\n\n files = git.get_files(remote_hash, local_hash)\n commits = git.get_commits(remote_hash, local_hash)\n\n post_deploy_tasks = app.post_deploy_tasks\n\n click.echo(\"\")\n click.echo(\"Comparing %s..%s\" % (remote_hash, local_hash))\n click.echo(\"\")\n click.echo(\"The following files have changed since the last deployment:\\n\") # noqa\n if len(files) == 0:\n click.echo(\" (no change)\")\n else:\n click.echo(\"\".join([\" * %s\\n\" % f for f in files]))\n click.echo(\"\")\n click.echo(\"The following commits will be included in this deployment:\\n\") # noqa\n if len(commits) == 0:\n click.echo(\" (no change)\")\n else:\n click.echo(\"\".join([\" [%s] %s\\n\" % (c[0], c[1]) for c in commits]))\n\n # ============== summarise actions ==========================\n click.echo(\"\")\n click.echo(\"Summary of deployment options:\") # noqa\n click.echo(\"\")\n click.echo(\" ----- Deployment SETTINGS -----------\")\n click.echo(\"\")\n click.echo(\" Git branch: %s\" % branch)\n click.echo(\" Target env: %s (%s)\" % (target_environment, app_name))\n click.echo(\" Force push: %s\" % force)\n # pipeline promotion - buildpack won't run\n click.echo(\" Pipeline: %s\" % app.use_pipeline)\n if app.use_pipeline:\n click.echo(\" Promote: %s\" % app.upstream_app)\n click.echo(\" Release tag: %s\" % app.add_tag)\n click.echo(\"\")\n click.echo(\" ----- Post-deployment commands ------\")\n click.echo(\"\")\n\n if not post_deploy_tasks:\n click.echo(\" (None specified)\")\n else:\n [click.echo(\" %s\" % x) for x in post_deploy_tasks]\n\n click.echo(\"\")\n # ============== / summarise actions ========================\n\n # put up the maintenance page if required\n maintenance = utils.prompt_for_action(\n u\"Do you want to put up the maintenance page?\",\n False\n )\n\n if not utils.prompt_for_pin(\"\"):\n exit(0)\n\n if maintenance:\n click.echo(\"Putting up maintenance page\")\n heroku.toggle_maintenance(app_name, True)\n\n if app.use_pipeline:\n click.echo(\"Promoting upstream app: %s\" % app.upstream_app)\n heroku.promote_app(app.upstream_app)\n else:\n click.echo(\"Pushing to git remote\")\n git.push(\n remote=git.get_remote_url(app_name),\n local_branch=branch,\n remote_branch='master',\n force=force\n )\n\n if post_deploy_tasks:\n click.echo(\"Running post-deployment tasks:\")\n run_post_deployment_tasks(post_deploy_tasks)\n\n if maintenance:\n click.echo(\"Pulling down maintenance page\")\n heroku.toggle_maintenance(app_name, False)\n\n release = heroku.HerokuRelease.get_latest_deployment(app_name)\n\n if app.add_tag:\n click.echo(\"Applying git tag\")\n message = \"Deployed to %s by %s\" % (app_name, release.deployed_by)\n git.apply_tag(commit=local_hash, tag=release.version, message=message)\n\n click.echo(release)", "def PublishIt(name, path, comments, task=os.getenv('TASK'), status=\"WORK IN PROGRESS\"):\n\n db = get_connection()\n\n PubCollections = db['submissions']\n\n # creation of the dailies submission entry\n publishDict = dict()\n publishDict['date'] = now\n publishDict['type'] = \"publish\"\n publishDict['user_name'] = main_user\n publishDict['task'] = task\n publishDict['status'] = status\n publishDict['asset'] = name\n publishDict['path'] = path\n publishDict['comment'] = comments\n PubCollections.save(publishDict)\n notifications.push_notifications({\"name\": main_user, \"email\": os.getenv('USER_EMAIL')}, users_list, \"publish\", shot, now)", "def _deploy_apps_function():\n app_integration_package = AppIntegrationPackage(config=config, version=apps_version)\n app_integration_package.create_and_upload()\n return app_integration_package", "def deploy(fingerengine, fingerprint):\n\n\tcfm_path = abspath(fingerengine.options.deploy)\n\tcfm_file = parse_war_path(cfm_path, True)\n\tdip = fingerengine.options.ip\n\n\tcookie = checkAuth(dip, fingerprint.port, title, fingerprint.version)[0]\n\tif not cookie:\n\t\tutility.Msg(\"Could not get auth\", LOG.ERROR)\n\t\treturn\n\n\tutility.Msg(\"Preparing to deploy {0}...\".format(cfm_file))\n\tutility.Msg(\"Fetching web root...\", LOG.DEBUG)\n\n\troot = fetch_webroot(dip, fingerprint, cookie)\n\tif not root:\n\t\tutility.Msg(\"Unable to fetch web root.\", LOG.ERROR)\n\t\treturn\n\t\n\t# create the scheduled task\n\tutility.Msg(\"Web root found at %s\" % root, LOG.DEBUG)\n\tutility.Msg(\"Creating scheduled task...\")\n\n\tif not create_task(dip, fingerprint, cfm_file, root, cookie):\n\t\treturn\n\n\t# invoke the task\n\tutility.Msg(\"Task %s created, invoking...\" % cfm_file)\n\trun_task(dip, fingerprint, cfm_path, cookie)\n\n\t# cleanup\n\tutility.Msg(\"Cleaning up...\")\n\tif not delete_task(dip, fingerprint, cfm_file, cookie):\n\t\tutility.Msg(\"Failed to remove task. May require manual removal.\", LOG.ERROR)", "def installApp(dev, apkFile=None, appPackage=None, outFile=None, local=False):\n certFile = scriptRoot + '/certs/localtest.me.pem'\n with ServerContext(LocalMarketServer(certFile, config.officialServer)) as server:\n if apkFile:\n server.setApk(apkFile.read())\n elif appPackage:\n print('Downloading apk')\n apps = listApps(True)\n if appPackage not in apps:\n raise Exception('Unknown app: %s' % appPackage)\n server.setApk(apps[appPackage].release.asset)\n\n print('Starting task')\n xpdData = server.getXpd()\n\n print('Starting communication')\n # Point the camera to the web api\n result = installer.install(dev, server.host, server.port, xpdData, printStatus)\n if result.code != 0:\n raise Exception('Communication error %d: %s' % (result.code, result.message))\n\n result = server.getResult()\n\n if not local:\n try:\n RemoteAppStore(config.appengineServer).sendStats(result)\n except:\n pass\n\n print('Task completed successfully')\n\n if outFile:\n print('Writing to output file')\n json.dump(result, outFile, indent=2)\n\n return result", "def deploy_app(host_=None):\n run_command_on_selected_server(_deploy_app, host_=host_)", "def test_publish(self):\n\n adminuser,adminpass = self.testdata.find_account_for('toolmanager')\n\n self.utils.account.login_as(adminuser,adminpass)\n\n self.contribtool.publish(TOOLNAME)", "def deploy(self):\n\n netlify_cli = getattr(settings, \"NETLIFY_PATH\", None)\n if not netlify_cli:\n raise CommandError(\"NETLIFY_PATH is not defined in settings\")\n\n deployment = Deployment()\n deployment.save()\n\n command = [netlify_cli, \"deploy\"]\n command.append(\"--dir={}\".format(settings.BUILD_DIR))\n command.append(\"--prod\")\n command.append('--message=\"Wagtail Deployment #{}\"'.format(deployment.pk))\n\n site_id = getattr(settings, \"NETLIFY_SITE_ID\", None)\n if site_id:\n command.append(\"--site={}\".format(site_id))\n\n auth_token = getattr(settings, \"NETLIFY_API_TOKEN\", None)\n if auth_token:\n command.append(\"--auth={}\".format(auth_token))\n\n subprocess.call(command)", "def deploy(args):\n from scrapyd_client import deploy\n\n sys.argv.pop(1)\n deploy.main()", "def _create(self, parsed_args):\n if self.create:\n try:\n resp = self.tapis_client.apps.add(body=self.document)\n self.messages.append(\n ('create', 'Created Tapis app {} revision {}'.format(\n resp.get('id'), resp.get('revision'))))\n return True\n except Exception as exc:\n if self.ignore_errors:\n self.messages.append(('create', exc))\n return False\n else:\n raise\n\n return True", "def update_manifest(builder):\r\n\r\n manifest_path = join(builder.Config.SourceRootPath, builder.Config.WMAppManifest)\r\n dom = parse(manifest_path)\r\n\r\n #import pdb;pdb.set_trace()\r\n #version = make_version_string(builder)\r\n version = builder.AppVersion\r\n\r\n update_manifest_with_values(dom,\r\n Title = builder.CustomCfg.Title,\r\n #ProductID = builder.CustomCfg.ProductID,\r\n #PublisherID = builder.Config.PublisherID,\r\n Version = version,\r\n Languages = getattr(builder.CustomCfg, \"Languages\", None ) )\r\n\r\n with open(manifest_path, 'wb') as f:\r\n data = dom.toprettyxml(indent = \" \")\r\n # toprettyxml adds extra new lines\r\n lines = [ x for x in data.split(\"\\n\") if len(x.strip()) > 0]\r\n data = \"\\n\".join(lines)\r\n f.write(data)\r\n\r\n return True", "def publish_asset(\n self,\n *,\n asset_id: str,\n asset_manifest_path: str,\n asset_selector: str,\n asset_type: \"AssetType\",\n ) -> None:\n ...", "def deploy():", "def deploy(ctx):\n click.echo('deploying')\n ctx.deploy()\n click.echo('done')", "def push_blog():\n\n\twarn(green(\"Update blog on github pages.\"))\n\t_setup_virtualenv()\n\n\twith cd(PROJECT_PATH):\n\t\twith prefix(env.activate):\n\t\t\tlocal('python blog.py build', shell='/bin/bash')\n\n\t\tlocal('cd {}'.format(FREEZER_DESTINATION), shell='/bin/bash')\n\t\tlocal('git status')\n\t\task_msg = red(\"Force push new content to blog?\")\n\t\tif console.confirm(ask_msg, default=False) is True:\n\t\t\tlocal('git add --all')\n\t\t\tlocal('git commit -m \"new articles\"')\n\t\t\tlocal('git push --force origin master')", "async def publish(self, topic: str, *args: aiowamp.WAMPType,\n kwargs: aiowamp.WAMPDict = None,\n acknowledge: bool = None,\n blackwhitelist: aiowamp.BlackWhiteList = None,\n exclude_me: bool = None,\n disclose_me: bool = None,\n resource_key: str = None,\n options: aiowamp.WAMPDict = None) -> None:\n ...", "def deploy(fingerengine, fingerprint):\n\n global cookie \n\n cfm_path = abspath(fingerengine.options.deploy) \n cfm_file = parse_war_path(cfm_path, True)\n dip = fingerengine.options.ip\n\n # set our session cookie\n cookie = checkAuth(dip, fingerprint.port, title)\n if not cookie:\n utility.Msg(\"Could not get auth to %s:%s\" % (dip, fingerprint.port),\n LOG.ERROR)\n return\n\n utility.Msg(\"Preparing to deploy {0}..\".format(cfm_file))\n utility.Msg(\"Fetching web root..\", LOG.DEBUG)\n\n # fetch web root; i.e. where we can read the shell\n root = fetch_webroot(dip, fingerprint)\n if not root:\n utility.Msg(\"Unable to fetch web root.\", LOG.ERROR)\n return\n\n # create the scheduled task \n utility.Msg(\"Web root found at %s\" % root, LOG.DEBUG)\n utility.Msg(\"Creating scheduled task...\")\n\n if not create_task(dip, fingerprint, cfm_file, root):\n return\n\n # invoke the task\n utility.Msg(\"Task %s created, invoking...\" % cfm_file)\n run_task(dip, fingerprint, cfm_path)\n \n # remove the task\n utility.Msg(\"Cleaning up...\")\n delete_task(dip, fingerprint, cfm_file)", "def deploy_app(device_id, app_id, app_version):\n kargs={'host': c.cfg['host'], \"api_version\": c.cfg['api_version'], \"url_path\": \"/applications\"}\n versions = esapp.App(kargs).get_app_version_by_id(app_id)\n\n kargs.update({\"url_path\": \"/tasks\"})\n if not app_version in versions:\n sys.exit(\"Fail: app_version \\\"%s\\\" not found, available list:%s\" \\\n %(str(app_version), str(jsn.dumps(versions))))\n\n task = estask.Task(kargs)\n try:\n dict_resp= task.create_app_task(device_id, app_version, app_id)\n except Exception as e:\n sys.exit(\"Fail: %s\" %str(e))\n\n if dict_resp == None:\n sys.exit(\"Fail: error response\")\n\n try:\n click.echo(\"Success to create a task id: %s\" %(str(dict_resp[\"task_id\"])))\n except Exception as e:\n sys.exit(\"Fail: %s %s\" %(str(e), str(dict_resp)))\n\n if 'status' in dict_resp and dict_resp['status'].lower() != 'success':\n sys.exit(1)", "def upload(ctx, release, rebuild, version):\n\n dist_path = Path(DIST_PATH)\n if rebuild is False:\n if not dist_path.exists() or not list(dist_path.glob('*')):\n print(\"No distribution files found. Please run 'build' command first\")\n return\n else:\n ctx.invoke(build, force=True, version=version)\n\n if release:\n args = ['twine', 'upload', 'dist/*']\n else:\n repository = 'https://test.pypi.org/legacy/'\n args = ['twine', 'upload', '--repository-url', repository, 'dist/*']\n\n env = os.environ.copy()\n\n p = subprocess.Popen(args, env=env)\n p.wait()", "def run():\n\n parser = OptionParser()\n parser.add_option(\"-d\", \"--dir\", dest=\"dir\", help=\"The app local directory\")\n parser.add_option(\"-r\", \"--remote_dir\", dest=\"remote_dir\", help=\"The app remote directory\")\n parser.add_option(\"-n\", \"--name\", dest=\"name\", help=\"The django app name\")\n parser.add_option(\"-f\", \"--full\", help=\"Provision before deploy\", default=False)\n parser.add_option(\"-o\", \"--no_files\", help=\"Don't copy the app files\", default=False)\n\n (options, args) = parser.parse_args()\n\n execute(deploy, **options.__dict__)", "def push(self) -> None:\n\n with ImportExtensions(required=True):\n import requests\n\n pkg_path = Path(self.args.path)\n if not pkg_path.exists():\n self.logger.critical(f'`{self.args.path}` is not a valid path!')\n exit(1)\n\n request_headers = self._get_request_header()\n\n try:\n # archive the executor package\n with TimeContext(f'Packaging {self.args.path}', self.logger):\n md5_hash = hashlib.md5()\n bytesio = archive_package(pkg_path)\n content = bytesio.getvalue()\n md5_hash.update(content)\n\n md5_digest = md5_hash.hexdigest()\n\n # upload the archived package\n form_data = {\n 'public': self.args.public if hasattr(self.args, 'public') else False,\n 'private': self.args.private\n if hasattr(self.args, 'private')\n else False,\n 'md5sum': md5_digest,\n 'force': self.args.force,\n 'secret': self.args.secret,\n }\n\n method = 'put' if self.args.force else 'post'\n\n hubble_url = get_hubble_url()\n # upload the archived executor to Jina Hub\n with TimeContext(\n f'Pushing to {hubble_url} ({method.upper()})',\n self.logger,\n ):\n resp = getattr(requests, method)(\n hubble_url,\n files={'file': content},\n data=form_data,\n headers=request_headers,\n )\n\n if 200 <= resp.status_code < 300:\n # TODO: only support single executor now\n image = resp.json()['executors'][0]\n\n uuid8 = image['id']\n secret = image['secret']\n visibility = image['visibility']\n\n info_table = [\n f'\\t🔑 ID:\\t\\t' + colored(f'{uuid8}', 'cyan'),\n f'\\t🔒 Secret:\\t'\n + colored(\n f'{secret}',\n 'cyan',\n )\n + colored(\n ' (👈 Please store this secret carefully, it wont show up again)',\n 'red',\n ),\n f'\\t👀 Visibility:\\t' + colored(f'{visibility}', 'cyan'),\n ]\n\n if 'alias' in image:\n info_table.append(f'\\t📛 Alias:\\t' + colored(image['alias'], 'cyan'))\n\n self.logger.success(f'🎉 Executor `{pkg_path}` is pushed successfully!')\n self.logger.info('\\n' + '\\n'.join(info_table))\n\n usage = (\n f'jinahub://{uuid8}'\n if visibility == 'public'\n else f'jinahub://{uuid8}:{secret}'\n )\n\n self.logger.info(f'You can use it via `uses={usage}` in the Flow/CLI.')\n elif resp.text:\n # NOTE: sometimes resp.text returns empty\n raise Exception(resp.text)\n else:\n resp.raise_for_status()\n except Exception as e: # IO related errors\n self.logger.error(\n f'Error while pushing `{self.args.path}` with session_id={request_headers[\"jinameta-session-id\"]}: '\n f'\\n{e!r}'\n )", "def deploy():\n new_archive = do_pack()\n\n if new_archive is None:\n return False\n\n res = do_deploy(new_archive)\n return res", "def publish(self, kpi_dict):\n pass", "def publish_updates():\n run_subprocess(['osg-batch-update'])", "def deploy():\n build()\n copy()\n install()", "def deploy():\n myfile = do_pack()\n if myfile is None:\n return False\n return do_deploy(myfile)", "def release_pypi():\n local('python setup.py clean sdist register upload')", "def post_package():\n package_file = BytesIO()\n with tarfile.open(mode='w', fileobj=package_file) as tar:\n # metadata\n meta_content = b'encoding: utf-8\\npost: post.md'\n file_info = tarfile.TarInfo('package.yml')\n file_info.size = len(meta_content)\n tar.addfile(file_info, BytesIO(meta_content))\n\n # post\n post_content = b'''---\ntitle: A title\ntopic: A topic\n---\n\n[summary]\nA summary\n\nA paragraph\n'''\n file_info = tarfile.TarInfo('post.md')\n file_info.size = len(post_content)\n tar.addfile(file_info, BytesIO(post_content))\n package_file.seek(0)\n\n return package_file", "def deploy():\n archive_path = do_pack()\n if archive_path is None:\n print(\"pass\")\n return False\n return do_deploy(archive_path)", "def deploy():\n require(\"hosts\", provided_by=[production, staging])\n env.release = time.strftime(\"%Y-%m-%d_%H:%M:%S\")\n upload_tar_from_git()\n install_requirements()\n setup_webserver()\n symlink_current_release()\n restart_webserver()", "def publish(\n self,\n db: PysonDB,\n *,\n production: bool, # PyPI or Test-PyPi\n build=False, #\n force=False, # publish even if no changes\n dry_run=False, # do not actually publish\n clean: bool = False, # clean up afterwards\n ) -> (\n bool\n ): # sourcery skip: assign-if-exp, default-mutable-arg, extract-method, remove-unnecessary-else, require-parameter-annotation, swap-if-else-branches, swap-if-expression\n log.info(f\"Publish: {self.package_path.name}\")\n # count .pyi files in the package\n filecount = len(list(self.package_path.rglob(\"*.pyi\")))\n if filecount == 0:\n log.debug(f\"{self.package_name}: starting build as no .pyi files found\")\n build = True\n\n if build or force or self.is_changed():\n self.build(production=production, force=force)\n\n if not self._publish:\n log.debug(f\"{self.package_name}: skip publishing\")\n return False\n\n self.update_pkg_version(production=production)\n # Publish the package to PyPi, Test-PyPi or Github\n if self.is_changed() or force:\n if self.mpy_version == \"latest\":\n log.warning(\"version: `latest` package will only be available on Github, and not published to PyPi.\")\n self.status[\"result\"] = \"Published to GitHub\"\n else:\n self.update_hashes() # resets is_changed to False\n if not dry_run:\n pub_ok = self.poetry_publish(production=production)\n else:\n log.warning(f\"{self.package_name}: Dry run, not publishing to {'' if production else 'Test-'}PyPi\")\n pub_ok = True\n if not pub_ok:\n log.warning(f\"{self.package_name}: Publish failed for {self.pkg_version}\")\n self.status[\"error\"] = \"Publish failed\"\n return False\n self.status[\"result\"] = \"Published to PyPi\" if production else \"Published to Test-PyPi\"\n self.update_hashes()\n if dry_run:\n log.warning(f\"{self.package_name}: Dry run, not saving to database\")\n else:\n # get the package state and add it to the database\n db.add(self.to_dict())\n db.commit()\n return True\n else:\n log.info(f\"No changes to package : {self.package_name} {self.pkg_version}\")\n\n if clean:\n self.clean()\n return True", "def _deploy_app():\n rsync_project(env.remote_directory, env.local_directory,\n exclude=['.git/', '*.pyc', 'tests.py', 'migrations/'])\n sudo('service installer_app restart')", "def publish(self, review_request):\r\n self.debug('Publishing')\r\n self.api_call('api/review-requests/%s/publish/' %\r\n review_request['id'])", "def process_manifest(vb, options):\n if not options.manifest:\n return\n\n vb.add_manifest(options.manifest_id, options.manifest_service, options.manifest_version, options.manifest_version_id,\n options.manifest_release_version)", "def publish():\n reset()\n compress()\n build()\n s3deploy()\n log_success()", "def publish_messages(line): \n command = \"gcloud beta pubsub topics publish \"+ topic_name+\" --message \"+'\"'+str(line)+'\"'\n os.system(command)", "def deploy():\n filepath = do_pack()\n if (filepath is None):\n return False\n return do_deploy(filepath)", "def post( self, application=None, event=None,\n description=None,priority=0, providerkey = None):\n\n # Create the http object\n h = Https(API_DOMAIN)\n \n # Perform the request and get the response headers and content\n data = {\n 'apikey': self.apikey,\n 'application': application,\n 'event': event,\n 'description': description,\n 'priority': priority\n }\n\n if providerkey is not None:\n data['providerkey'] = providerkey\n\n h.request( \"POST\",\n \"/publicapi/add\",\n headers = self.headers,\n body = urlencode(data))\n response = h.getresponse()\n request_status = response.status\n\n if request_status == 200:\n return True\n elif request_status == 401:\n raise Exception(\"Auth Failed: %s\" % response.reason)\n else:\n raise Exception(\"Failed\")", "def main(args):\n\n data = {\n 'id': '00353',\n 'expanded_folder': '00353.1/9a0f0b0d-1f0b-47c8-88ef-050bd9cdff92',\n 'version': '1',\n 'status': 'VOR',\n 'updated_date': datetime.strftime(datetime.utcnow(), \"%Y-%m-%dT%H:%M:%S\")\n }\n\n settings = settings_lib.get_settings('exp')\n identity = \"resize_%s\" % int(random.random() * 1000)\n log_file = \"worker.log\"\n logger = log.logger(log_file, settings.setLevel, identity)\n conn = boto.swf.layer1.Layer1(settings.aws_access_key_id, settings.aws_secret_access_key)\n act = activity_ArchiveArticle(settings, logger, conn=conn)\n act.do_activity(data)", "def add_publish_command(\n self, relative_manifest_path: str, asset_selector: str\n ) -> None:\n return jsii.invoke(\n self, \"addPublishCommand\", [relative_manifest_path, asset_selector]\n )", "def main():\n args = parse_args(sys.argv[1:])\n try:\n push_script_path = get_push_executable()\n bintray = Bintray(args.bintray_credential, args.bintray_subject, args.bintray_repo, push_script_path, component=args.bintray_component, distribution=args.bintray_distribution, architecture=args.bintray_architecture)\n\n return_dict_detail = upload_debs(args.build_directory, args.debian_depth, bintray)\n for key, value in return_dict_detail.items():\n print \"{key}: {value}\".format(key=key, value=value)\n except Exception, e:\n print e\n sys.exit(1)", "def publish(digest, pid, api_key, tag):\n url = \"https://connect.redhat.com/api/v2/projects/{}/containers/{}/tags/{}/publish\".format(pid, digest, tag)\n headers = {\"accept\": \"*/*\",\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer {}\".format(api_key)}\n\n response = requests.post(url, headers=headers)\n\n if response.status_code != 201:\n print(\"Unable to publish, invalid status code: {}.\".format(response.status_code))\n print(response)\n print(response.content)\n sys.exit(1)\n else:\n print(\"Docker image '{}' successfully scheduled for publishing.\".format(digest))", "def deploy(parameters):\n\n print(\"In deploy module\")", "def main(event):\n post = Post(frontmatter.load(event))\n if post.meetup_id is None:\n resp = create_meetup(post)\n post.meetup_id = resp[\"id\"]\n post.write(event)\n else:\n resp = update_meetup(post)\n # buttons(post)", "def create_manifest(\n upload_dir,\n study_id,\n analysis_id,\n song_url,\n auth_token\n):\n files_dir = os.path.join(upload_dir, 'files')\n manifest_dir = os.path.join(upload_dir, 'manifests')\n song_client = SongClient(\n song_url,\n auth_token,\n VERIFY_CERTIFICATES\n )\n manifest = song_client.get_analysis_manifest(\n study_id,\n analysis_id,\n files_dir\n )\n if os.path.isdir(manifest_dir):\n shutil.rmtree(manifest_dir)\n os.makedirs(manifest_dir)\n manifest.write(\n os.path.join(manifest_dir, 'manifest.txt'),\n overwrite=True\n )", "def publish(\n hass: HomeAssistant,\n topic: str,\n payload: PublishPayloadType,\n qos: int | None = 0,\n retain: bool | None = False,\n encoding: str | None = DEFAULT_ENCODING,\n) -> None:\n hass.add_job(async_publish, hass, topic, payload, qos, retain, encoding)", "def deploy():\n return do_deploy(do_pack())", "def deploy():\n return do_deploy(do_pack())", "def deploy():\n return do_deploy(do_pack())", "def create_application(name=None, description=None):\n pass", "def archiveApp(appName, appUID):\n logger.debug('[FLASKWEB /app/<appName>/<appUID>] %s Request for App Archive `%s`, UID=`%s`' % (request.method, appName, appUID))\n applist = [a['name'] for a in db.getAllApps()]\n uname = AppID.getAppId(appName, appUID)\n\n # if appName not in applist:\n # logger.warning(\"Archive request for app that does not exist: %s\", appName)\n # return returnError(\"Application %s does not exist\" % appName, 404)\n\n if request.method == 'POST':\n file = request.files['file']\n if file:\n filename = secure_filename(file.filename)\n path = os.path.join(webapp.config['UPLOADED_BUILD_DEST'], uname).encode(encoding='utf8', errors='ignore')\n logger.debug(\"Archiving file, %s, to %s\" % (filename, path))\n if not os.path.exists(path):\n os.mkdir(path)\n file.save(os.path.join(path, filename))\n return \"File Uploaded & archived\\n\", 202\n else:\n logger.warning(\"Archive request, but no file provided.\")\n return \"No file received\\n\", 400\n\n elif request.method == 'GET':\n path = os.path.join(webapp.config['UPLOADED_BUILD_URL'], uname)\n return redirect(path, 302)", "def main():\n Log.info('Installing...')\n app = Application()\n app.run()\n Log.info(\"Done successfully.\")", "def __submit_application(self, app, resubmit, targets, **extra_args):\n\n gc3libs.log.debug(\"Submitting %s ...\", app)\n\n # auto_enable_auth = extra_args.get(\n # 'auto_enable_auth', self.auto_enable_auth)\n\n job = app.execution\n if resubmit:\n job.state = Run.State.NEW\n elif job.state != Run.State.NEW:\n return\n\n # Validate Application local input files\n for input_ref in app.inputs:\n if input_ref.scheme == 'file':\n # Local file, check existence before proceeding\n if not os.path.exists(input_ref.path):\n raise gc3libs.exceptions.UnrecoverableDataStagingError(\n \"Input file '%s' does not exist\" % input_ref.path,\n do_log=True)\n\n if targets is not None:\n assert len(targets) > 0\n else: # targets is None\n enabled_resources = [\n r for r in self.resources.itervalues() if r.enabled]\n if len(enabled_resources) == 0:\n raise gc3libs.exceptions.NoResources(\n \"Could not initialize any computational resource\"\n \" - please check log and configuration file.\")\n\n # decide which resource to use\n compatible_resources = self.matchmaker.filter(\n app, enabled_resources)\n if len(compatible_resources) == 0:\n raise gc3libs.exceptions.NoResources(\n \"No available resource can accomodate the application\"\n \" requirements\")\n gc3libs.log.debug(\n \"Application compatibility check returned %d matching\"\n \" resources\", len(compatible_resources))\n\n if len(compatible_resources) <= 1:\n # shortcut: no brokering to do, just use what we've got\n targets = compatible_resources\n else:\n # update status of selected resources\n self.update_resources(compatible_resources)\n updated_resources = [r for r in compatible_resources if r.updated]\n if len(updated_resources) == 0:\n raise gc3libs.exceptions.LRMSSubmitError(\n \"No computational resource found reachable during\"\n \" update! Aborting submission of task '%s'\" %\n app)\n\n # sort resources according to Application's preferences\n targets = self.matchmaker.rank(app, updated_resources)\n\n exs = []\n # after brokering we have a sorted list of valid resource\n for resource in targets:\n gc3libs.log.debug(\"Attempting submission to resource '%s'...\",\n resource.name)\n try:\n job.timestamp[Run.State.NEW] = time.time()\n job.info = (\"Submitting to '%s'\" % (resource.name,))\n resource.submit_job(app)\n except gc3libs.exceptions.LRMSSkipSubmissionToNextIteration as ex:\n gc3libs.log.info(\"Submission of job %s delayed\", app)\n # Just raise the exception\n raise\n # pylint: disable=broad-except\n except Exception as ex:\n gc3libs.log.info(\n \"Error in submitting job to resource '%s': %s: %s\",\n resource.name, ex.__class__.__name__, str(ex),\n exc_info=True)\n exs.append(ex)\n continue\n gc3libs.log.info(\"Successfully submitted %s to: %s\",\n str(app), resource.name)\n job.state = Run.State.SUBMITTED\n job.resource_name = resource.name\n job.info = (\"Submitted to '%s'\" % (job.resource_name,))\n app.changed = True\n app.submitted()\n # job submitted; return to caller\n return\n # if wet get here, all submissions have failed; call the\n # appropriate handler method if defined\n ex = app.submit_error(exs)\n if isinstance(ex, Exception):\n app.execution.info = (\"Submission failed: %s\" % str(ex))\n raise ex\n else:\n return", "def cli():\n update_all_posts()\n push_updates()", "def create(self, adt=None, url=None, params=None, dryrun=False):\n if self._id_exists():\n abort(400, \"The application ID already exists\")\n elif self.engine.app_list:\n abort(400, \"Multiple applications are not supported\")\n\n path = self._get_path(adt, url)\n tpl, adaps = self._validate(path, params, dryrun)\n try:\n self.engine.launch(tpl, adaps, self.app_id, dryrun)\n except Exception as error:\n abort(500, f\"Error while deploying: {error}\")\n\n return {\"message\": f\"Application {self.app_id} successfully deployed\"}", "def save_publish():\n import mop\n\n path = cmds.file(query=True, location=True)\n work_dir = os.path.dirname(path)\n publish_dir = os.path.join(work_dir, \"release\")\n\n highest_publish = None\n highest_version = -1\n\n for f in os.listdir(publish_dir):\n ext = os.path.splitext(f)[-1]\n if ext == \".ma\":\n pattern = r\"v(?P<version>\\d{3})\"\n regex = re.compile(pattern)\n match = regex.search(f)\n if match:\n version = int(match.group(\"version\"))\n if version > highest_version:\n highest_version = version\n highest_publish = f\n\n new_path = mop.increment_version(os.path.join(publish_dir, highest_publish))\n cmds.file(rename=new_path)\n cmds.file(save=True, force=True)", "def upload_package(self, __contents):\n raise NotImplementedError", "def main():\n if len(sys.argv) != 2:\n print('Usage: release.py <version>', file=sys.stderr)\n exit(1)\n version = sys.argv[1]\n with open('./manifest.json', 'r+') as f:\n manifest = json.load(f)\n manifest['version'] = version\n f.seek(0)\n json.dump(manifest, f, indent=2)\n f.truncate()\n\n os.system(f'zip cses-filter-v{version}.zip -r icons/ src/ manifest.json')", "def submit(self, root=None, force=False, repo=None):\n import ambry.util as du\n \n if repo:\n self.repo_name = repo\n self.set_api()\n \n import os\n from os.path import basename\n \n ckb = self.remote.update_or_new_bundle_extract(self.bundle)\n \n sent = set()\n \n self.remote.put_package(ckb)\n \n for doc in self.bundle.config.group('about').get('documents',[]):\n self.store_document(ckb, doc)\n\n zip_inputs = {}\n\n for extract_data in self.generate_extracts(root=root):\n\n zip = extract_data.get('zip', False)\n will_zip = False\n \n if zip == 'dir':\n zip_inputs[os.path.dirname(extract_data['path'])] = extract_data\n will_zip = True\n elif zip == 'file':\n zip_inputs[extract_data['path']] = extract_data\n will_zip = True\n\n file_ = self._do_extract(extract_data, force=force)\n \n if will_zip:\n self.bundle.log(\"{} will get submitted as a zip\".format(file_))\n elif file_ not in sent:\n r = self._send(ckb, extract_data,file_)\n sent.add(file_)\n url = r['ckan_url']\n self.bundle.log(\"Submitted {} to {}\".format(basename(file_), url))\n else:\n self.bundle.log(\"Already processed {}, not sending.\".format(basename(file_)))\n \n \n zip_outputs = self.zip(zip_inputs.keys() )\n \n \n print zip_outputs\n \n for in_zf, out_zf in zip_outputs.items():\n extract_data = zip_inputs[in_zf]\n extract_data['name'] = extract_data['zipname'] if 'zipname' in extract_data else extract_data['name']\n r = self._send(ckb, extract_data,out_zf)\n \n url = r['ckan_url']\n self.bundle.log(\"Submitted {} to {}\".format(basename(out_zf), url))\n \n \n return True", "def deploy(version):\n toolkit.readmegen(version)", "def serve_manifest(app):\n storeapps = APP.config[\"storage\"]\n manifest = os.path.join(storeapps, \"IPA\", app, \"manifest.plist\")\n app_url = request.host_url + \"application/IPA/\" + app + \"/\" + app + \".ipa\"\n if not os.path.isfile(manifest):\n return \"File not found\", 404\n logging.debug(\"Serving manifest with application url: %s\", app_url)\n return flask.Response(open(manifest).read().replace(\"{{ APPLICATION_URL }}\", app_url.encode(\"utf-8\")),\n mimetype='text/xml')", "def service_apply(file, parameters, detach, hide_manifest, from_archive):\n\n if from_archive:\n\n _deploy_from_archive(service_name=from_archive,\n detach=detach)\n else:\n if not file:\n file = click.prompt('File address')\n\n _deploy_from_manifest(file=file,\n parameters=parameters,\n hide_manifest=hide_manifest,\n detach=detach)", "def execute(\n name: str,\n *args: Any,\n **kwargs: Any\n ) -> None:\n cherrypy.engine.publish(name, *args, **kwargs) # type: ignore", "def website_publish_button(self):\n if self.website_published:\n self.write({'website_published': False})\n else:\n self.write({'website_published': True})", "def _addAppYaml(self):\n if self.wc.exists(self._branchPath('app/app.yaml')):\n raise ObstructionError('app/app.yaml exists already')\n\n yaml_path = self._branchPath('app/app.yaml')\n self.wc.copy(yaml_path + '.template', yaml_path)\n\n yaml = io.fileToLines(self.wc.path(yaml_path))\n out = []\n for i, line in enumerate(yaml):\n stripped_line = line.strip()\n if 'TODO' in stripped_line:\n continue\n elif stripped_line == '# application: FIXME':\n out.append('application: socghop')\n elif stripped_line.startswith('version:'):\n out.append(line.lstrip() + 'g0')\n out.append('# * initial Google fork of Melange ' + self.branch)\n else:\n out.append(line)\n io.linesToFile(self.wc.path(yaml_path), out)\n\n self.wc.commit('Create app.yaml with Google patch version g0 '\n 'in branch ' + self.branch)", "def cmd_gallery_publish(client, args):\n publish_to_imgur = client.share_on_imgur(args.item_id, args.title, args.terms)\n generate_output({'publish_to_imgur': publish_to_imgur})", "def publishUploads(self, manualVerify = True):\n for key in self.nbDetails:\n # Skip metadata key if present\n if key!='proc' and self.nbDetails[key]['pkg'] and self.nbDetails[key]['archFilesOK']:\n self.publishRepoItem(key, manualVerify = manualVerify)", "def publish(self):\n return", "def publish_info_in_pagebrowser():\n env.run('bin/django create_pagebrowser_books')", "def compose_package(app_name, manifest, package_dir,\n require=None, archive_dir=None):\n with open(manifest, 'w') as f:\n fqn = 'io.murano.apps.' + app_name\n mfest_copy = MANIFEST.copy()\n mfest_copy['FullName'] = fqn\n mfest_copy['Name'] = app_name\n mfest_copy['Classes'] = {fqn: 'mock_muranopl.yaml'}\n if require:\n mfest_copy['Require'] = require\n f.write(yaml.dump(mfest_copy, default_flow_style=False))\n\n name = app_name + '.zip'\n\n if not archive_dir:\n archive_dir = os.path.dirname(os.path.abspath(__file__))\n archive_path = os.path.join(archive_dir, name)\n\n with zipfile.ZipFile(archive_path, 'w') as zip_file:\n for root, dirs, files in os.walk(package_dir):\n for f in files:\n zip_file.write(\n os.path.join(root, f),\n arcname=os.path.join(os.path.relpath(root, package_dir), f)\n )\n\n return archive_path, name", "async def create_app(self, data: dict) -> dict:\r\n return await self.post(API_APPS, data)", "def run_package(m):\n\n if m.args.upload:\n doc = find_fs_package_from_dir(m.args.source)\n else:\n doc = find_csv_package(m)\n\n url, user, password = get_site_config(m.args.site_name)\n wp = Client(url, user, password)\n\n post = get_or_new_post(m, wp, doc)\n\n assert post is not None\n\n if m.args.upload:\n upload_to_wordpress(wp, post, doc)\n\n content = html(doc, m.args.template)\n\n post.excerpt = doc['Root'].get_value('Root.Description') or content[:200]\n\n post_tags = list(set(\n [t.value for t in doc['Root'].find('Root.Tag')] +\n [t.value for t in doc['Root'].find('Root.Group')] +\n [doc['Root'].get_value('Root.Origin')] +\n list(split_groups_tags(m.args.group)) +\n list(split_groups_tags(m.args.tag))\n ))\n\n post.terms_names = {\n 'post_tag': post_tags,\n 'category': ['Dataset'] + list(split_groups_tags(m.args.group))\n }\n\n post.title = doc.get_value('Root.Title')\n post.slug = slugify(doc.nonver_name)\n post.content = content\n\n if m.args.publish:\n post.post_status = 'publish'\n\n try:\n if m.args.no_op:\n r = {}\n else:\n r = wp.call(EditPost(post.id, post))\n except Fault as e:\n\n if 'taxonomies' in e.faultString:\n err((\"User {} does not have permissions to add terms to taxonomies. \"\n \"Terms are: {}\").format(user, post.terms_names))\n\n raise\n\n return r", "def deploy():\n archive_path = do_pack()\n\n if not archive_path:\n return False\n\n return do_deploy(archive_path)", "def cmd_apps__create(args):\n \n if args.name is None:\n args.name = os.path.basename(os.getcwd())\n\n url = remote.create_project(args.name)\n \n if in_git_repo():\n if get_push_url('tinyserv') is None:\n git(None, 'remote', 'add', 'tinyserv', url)\n print \"Added remote 'tinyserv'.\"\n else:\n print \"This repository is already configured for app '%s'.\" % \\\n _get_current_project_name()\n \n print \"Remote repository URL is %s.\" % url", "def deploy():\n require('hosts', provided_by=[prod])\n require('whole_path', provided_by=[prod])\n require('code_root')\n upload_tar_from_git(env.whole_path)\n install_requirements()\n symlink_current_release()\n migrate()\n restart_webservers()\n setup_permissions()\n collectstatic()", "def deploy():\n git_pull()\n if confirm(\"Install/upgrade requirements with pip?\"):\n install_requeriments()\n django_command('collectstatic')\n django_command('migrate')\n restart()", "def run(syncdb=False):\n from fabdeploy.django import migrate as django_migrate, syncdb as django_syncdb\n import time\n env.release = time.strftime('%Y%m%d%H%M%S')\n prepare_deploy() # pull, test, push\n git.remote_pull()\n app.install_requirements()\n django_migrate(syncdb) # syncdb in case is first time\n deploy_static()" ]
[ "0.7300059", "0.67660546", "0.6331592", "0.6170584", "0.60555214", "0.6053848", "0.5974832", "0.58902156", "0.5870017", "0.58022076", "0.56858075", "0.56083417", "0.5599532", "0.5585206", "0.5573604", "0.5570114", "0.5532284", "0.5491696", "0.5456021", "0.5449518", "0.5443404", "0.54393977", "0.54283106", "0.54259217", "0.54234827", "0.5418377", "0.5414494", "0.5389343", "0.53819203", "0.537537", "0.53557426", "0.5355508", "0.5350404", "0.53434914", "0.53417695", "0.5336773", "0.53363436", "0.5331533", "0.5327678", "0.5299297", "0.5296476", "0.5293611", "0.5290709", "0.52725655", "0.5263819", "0.5259142", "0.52472764", "0.5245503", "0.52337384", "0.52317995", "0.5227428", "0.5222342", "0.52069944", "0.5191768", "0.5189354", "0.51750475", "0.51700324", "0.516829", "0.51583374", "0.51516694", "0.5147757", "0.5144056", "0.5141573", "0.5133712", "0.51273286", "0.5118323", "0.51178044", "0.5112097", "0.5109913", "0.5108901", "0.5108901", "0.5108901", "0.51018625", "0.509727", "0.50949156", "0.50864035", "0.50795734", "0.50781953", "0.5077134", "0.5072349", "0.50680536", "0.50647116", "0.5048374", "0.5032396", "0.5029648", "0.50216377", "0.5019273", "0.50180167", "0.5002563", "0.49963972", "0.49923182", "0.49883577", "0.4984749", "0.49800345", "0.4978339", "0.49732405", "0.49692893", "0.49577639", "0.4954164", "0.49530053" ]
0.69711465
1
Parses parameters string and returns a dict of overrides. This function assumes that parameters string is in the form of '"key1="value1" key2="value2"'. Use of single quotes is optional but is helpful for strings that contain spaces.
def _parse_parameters(parameters): if not re.match(r'^(\w+)="([^=]+)"(\s{1}(\w+)="([^=]+)")*$', parameters): raise ValueError # first we add tokens that separate key/value pairs. # in case of key='ss sss ss', we skip tokenizing when we se the first single quote # and resume when we see the second replace_space = True tokenized = "" for c in parameters: if c == '\"': replace_space = not replace_space elif c == ' ' and replace_space: tokenized += "$$" else: tokenized += c # now get the tokens tokens = tokenized.split('$$') result = {} for token in tokens: # separate key/values key_value = token.split("=") result[key_value[0]] = key_value[1] return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _parse_params( self ):\n paramDic={}\n # Parameters are on the 3rd arg passed to the script\n paramStr=sys.argv[2]\n print paramStr\n if len(paramStr)>1:\n paramStr = paramStr.replace('?','')\n \n # Ignore last char if it is a '/'\n if (paramStr[len(paramStr)-1]=='/'):\n paramStr=paramStr[0:len(paramStr)-2]\n \n # Processing each parameter splited on '&' \n for param in paramStr.split(\"&\"):\n try:\n # Spliting couple key/value\n key,value=param.split(\"=\")\n except:\n key=param\n value=\"\"\n \n key = urllib.unquote_plus(key)\n value = urllib.unquote_plus(value)\n \n # Filling dictionnary\n paramDic[key]=value\n print paramDic\n return paramDic", "def parse_key_value_pairs(arg_string):\n try:\n return {key: value for (key, value) in [tuple(str(arg).split('=', 1)) for arg in arg_string]}\n except ValueError:\n raise click.ClickException(\"argument string must be in the form x=y\")", "def _parse_config_args(args):\r\n config_dict = dict()\r\n for config_str in args:\r\n try:\r\n components = config_str.split('=')\r\n if len(components) >= 2:\r\n config_dict[components[0]] = \"=\".join(components[1:])\r\n\r\n except:\r\n print \"Warning: could not interpret config value '{0}'\".format(config_str)\r\n pass\r\n\r\n return config_dict", "def parse_from_str(self, config_str):\n if not config_str:\n return {}\n config_dict = {}\n try:\n for kv_pair in config_str.split(','):\n if not kv_pair: # skip empty string\n continue\n k, v = kv_pair.split('=')\n config_dict[k.strip()] = eval_str_fn(v.strip())\n return config_dict\n except ValueError:\n raise ValueError('Invalid config_str: {}'.format(config_str))", "def update_drum_params(input_args, default_params):\n try:\n as_dict = ast.literal_eval(str(input_args))\n except ValueError:\n base_params = get_base_params(0, 0, 0, 0)\n print(f'The input string: `{input_args}` is not in the right format.')\n print('The input and each key should be enclosed in quotes.')\n print('Heres an example:')\n example = \"\"\" -kick \"{'div':2}\" \"\"\"\n print('\\t', example)\n print('Poissible parameters are: ')\n [print('\\t', k) for k in base_params.keys()]\n except Exception as e:\n print(e)\n\n for k, v in as_dict.items():\n if k in default_params:\n default_params[k] = v\n return default_params", "def urllib_unquote_parameters(inputstring):\r\n\r\n if type(inputstring) is not str:\r\n raise TypeError(\"urllib_unquote_parameters' inputstring parameter must be a string, not '\"+str(type(inputstring))+\"'\")\r\n\r\n keyvalpairs = inputstring.split(\"&\")\r\n res = {}\r\n\r\n for quotedkeyval in keyvalpairs:\r\n # Throw ValueError if there is more or less than one '='.\r\n quotedkey, quotedval = quotedkeyval.split(\"=\")\r\n key = urllib_unquote_plus(quotedkey)\r\n val = urllib_unquote_plus(quotedval)\r\n res[key] = val\r\n\r\n return res", "def parse_params(params):\n pairs = params.split(' ')\n content = dict()\n for key, value in [pair.split('=') for pair in pairs]:\n content[key] = int(value)\n return content", "def __parse_options_dict(options_str):\n # type: (str) -> Dict[str, str]\n opts = options_str.split('&') # type: List[str]\n res = {} # Type: Dict\n\n for opt in opts:\n key, value = opt.split('=') # type: List[str, str]\n res[key] = value # type: str\n\n return res", "def parse_request_arg_dict(arg, exception_class=Exception):\n arg_dict = {}\n arg_pairs = arg.split(';')\n for arg_pair in arg_pairs:\n try:\n arg_name, arg_value = arg_pair.split('=', 1)\n except Exception as error:\n logging.exception(error)\n raise exception_class(\n 'there is no `=` in %s' % arg_pair\n )\n arg_dict[arg_name] = arg_value\n return arg_dict", "def _parse_params(params):\n for key, value in params.items():\n if value.lower() in ('none', 'null', ''):\n params[key] = None\n elif value.lower() == 'true':\n params[key] = True\n elif value.lower() == 'false':\n params[key] = False\n elif value.isdigit() or (value[0] == '-' and value[1:].isdigit()):\n params[key] = int(value)\n elif ',' in value:\n params[key] = list(map(lambda x: x.strip(), value.split(',')))\n else:\n try:\n params[key] = float(value)\n except:\n pass\n return params", "def parse_function_params(params: Text) -> Dict:\n function_meta = {\"args\": [], \"kwargs\": {}}\n\n params_str = params.strip()\n if params_str == \"\":\n return function_meta\n\n args_list = params_str.split(\",\")\n for arg in args_list:\n arg = arg.strip()\n if \"=\" in arg:\n key, value = arg.split(\"=\")\n function_meta[\"kwargs\"][key.strip()] = parse_string_value(value.strip())\n else:\n function_meta[\"args\"].append(parse_string_value(arg))\n\n return function_meta", "def split_params(param_string):\n\t#TODO: check for negatives i.e. alpha--1\n\tparts = param_string.split('_')\n\tparams = {}\n\n\tfor i in range(len(parts)):\n\t\tparam = split_items(parts[i])\n\t\tif len(param) < 2:\n\t\t\ttry:\n\t\t\t\tparts[i+1] = parts[i] + \"_\" + parts[i+1]\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\tcontinue\n\t\telif len(param) == 2:\n\t\t\tparams[param[0]] = param[1]\n\t\telif len(param) == 3 and len(param[1]) == 0:\n\t\t\tparams[param[0]] = -param[2]\n\t\telse:\n\t\t\tparams[param[0]] = param[1:]\n\treturn params", "def str2dict(string):\n res_dict = {}\n for keyvalue in string.split(','):\n (key, value) = keyvalue.split('=', 1)\n res_dict[key] = value\n return res_dict", "def params_commandline(lista):\n if len(lista)%2!=0:\n print('Error: The number of parameter names and values does not match')\n sys.exit()\n dict={}\n for i in range(0,len(lista),2):\n key=lista[i]\n if type(key)!=type(''):\n raise 'Keyword not string!'\n #replace commas in case they're present\n if key[0]=='-':key=key[1:]\n lista[i+1]=replace(lista[i+1],',',' ')\n values=tuple(split(lista[i+1]))\n if len(values)<1:\n mensaje='No value(s) for parameter '+key\n raise mensaje\n dict[key]=values\n if len(dict[key])==1: dict[key]=dict[key][0]\n return dict", "def parseCommandLine(argv):\n parameters = {}\n for p in argv[1:]: # skip 0th element (module name)\n pair = split(p, '=', 1)\n if (2 != len(pair)):\n print 'bad parameter: %s (had no equals sign for pairing)' % p\n sys.exit()\n else:\n parameters[pair[0]] = pair[1]\n return parameters", "def parse_query_string(self, params):\n results = {}\n\n for key, val in params.items():\n lookup_len = len(self.query_string_lookup) + 1\n\n if key[0:lookup_len] == '{}['.format(self.query_string_lookup) and key[-1] == ']':\n results[key[lookup_len:-1]] = val\n\n return results", "def parse_config_string(config_string, issue_warnings=True):\n config_dict = {}\n my_splitter = shlex.shlex(config_string, posix=True)\n my_splitter.whitespace = ','\n my_splitter.whitespace_split = True\n for kv_pair in my_splitter:\n kv_pair = kv_pair.strip()\n if not kv_pair:\n continue\n kv_tuple = kv_pair.split('=', 1)\n if len(kv_tuple) == 1:\n if issue_warnings:\n MsafConfigWarning.warn(\n (\"Config key '%s' has no value, ignoring it\" %\n kv_tuple[0]), stacklevel=1)\n else:\n k, v = kv_tuple\n # subsequent values for k will override earlier ones\n config_dict[k] = v\n return config_dict", "def _parse_parameter_overrides(self, **parameter_overrides):\n\n par = self.parameter_set.parameters.copy()\n\n array_len = None\n array_keys = []\n\n for key, val in parameter_overrides.items():\n if key not in par.keys(): # don't add invalid keys\n msg = '{} is not a valid parameter, ignoring'\n warnings.warn(msg.format(key))\n else:\n if np.isscalar(val):\n par[key] = float(val)\n else: # is an array\n if array_len is None:\n array_len = len(val)\n if len(val) != array_len:\n msg = ('All array valued parameters must have the '\n 'same length.')\n raise ValueError(msg)\n array_keys.append(key)\n par[key] = val\n\n return par, array_keys, array_len", "def handle_log_output(original_parameters_string: Optional[Any]) -> Dict[str, Any]:\n if original_parameters_string is None:\n return {}\n\n if isinstance(original_parameters_string, bytes):\n mystr = original_parameters_string.decode(\"utf-8\")\n elif isinstance(original_parameters_string, str):\n mystr = original_parameters_string\n else:\n mystr = str(original_parameters_string)\n\n if mystr.strip() == \"\":\n return {}\n\n urlencoded = False\n try:\n parameters = orjson.loads(mystr)\n except orjson.JSONDecodeError:\n try:\n parameters = urllib.parse.parse_qs(mystr)\n urlencoded = True\n except Exception: # pragma: no cover\n return original_parameters_string\n\n return obfuscate_dict(parameters, urlencoded=urlencoded)", "def override(self, config_dict_or_str, allow_new_keys=False):\r\n if isinstance(config_dict_or_str, str):\r\n if not config_dict_or_str:\r\n return\r\n elif '=' in config_dict_or_str:\r\n config_dict = self.parse_from_str(config_dict_or_str)\r\n elif config_dict_or_str.endswith('.yaml'):\r\n config_dict = self.parse_from_yaml(config_dict_or_str)\r\n else:\r\n raise ValueError(\r\n 'Invalid string {}, must end with .yaml or contains \"=\".'.format(\r\n config_dict_or_str))\r\n elif isinstance(config_dict_or_str, dict):\r\n config_dict = config_dict_or_str\r\n else:\r\n raise ValueError('Unknown value type: {}'.format(config_dict_or_str))\r\n\r\n self._update(config_dict, allow_new_keys)", "def input_arguments(lines, lower = False):\n\n var_dict = {}\n if len(lines) == 0: return var_dict\n if lines[-1] != '\\n': lines += '\\n'\n lines = re.sub('#.*\\n', '#', lines) # convert all comments into _delimiters\n for block in lines.split('#'):\n name = None\n # look for assignment\n for item in block.split('='):\n value = None\n new_name = item\n # if value is string\n for s in _quote:\n item_str = item.split(s)\n if len(item_str) > 2: # found quotation marks\n value = item_str[1] # the string in the first _quote\n new_name = item_str[-1].strip() # last term\n # value not a string\n if value is None:\n value = item\n for s in _delimiter:\n try:\n value = list(filter(None, value.split(s)))[0] # always take the first meaningful string\n except IndexError:\n value = ''\n break\n for s in _delimiter:\n try:\n new_name = list(filter(None, new_name.split(s)))[-1] # always take the last meaningful string\n except IndexError:\n new_name = ''\n break\n if is_valid_variable_name(name) and value is not None:\n if lower: name = name.lower()\n var_dict.update({name : value})\n name = new_name\n return var_dict", "def parse_query_string(self, params):\n results = {}\n\n for key, val in params.items():\n lookup_len = len(self.query_string_lookup) + 1\n\n if key[0:lookup_len] == '{}['.format(self.query_string_lookup) and key[-1] == ']':\n results[key[lookup_len:-1]] = val if val.lower() != 'null' else None\n\n return results", "def _parseOptions(self, optionsString):\n\n options = dict()\n pairs = optionsString.split(\";\")\n for pair in pairs:\n if not pair or \"=\" not in pair:\n continue\n\n key, value = pair.split(\"=\")\n options[key] = int(value)\n\n return options", "def parse_kwargs(kwargs_list: List[str]) -> Dict[str, Any]:\n\n kwargs_dict = {}\n\n for kwarg in kwargs_list:\n key = kwarg[2:].split('=')[0]\n value = '='.join(kwarg.split('=')[1:])\n\n try:\n if re.match(r'^(-)?[0-9]+$', value):\n value = int(value)\n\n elif re.match(r'^(-)?[0-9]*.[0-9]+$', value) or re.match(r'^(-)?[0-9]*(\\.)?[0-9]+e(-|\\+)[0-9]+$', value):\n value = float(value)\n\n elif re.match(r'^\\[.*]$', value) or re.match(r'^\\{.*}$', value):\n value = json.loads(value)\n\n elif value.lower() in ('true', 'false'):\n value = value.lower() == 'true'\n\n elif value.lower() == 'none':\n value = None\n\n except:\n logging.warning(f'Could not automatically parse argument \"{key}.\" Its type will remain string.')\n\n kwargs_dict[key] = value\n\n return kwargs_dict", "def _parse_args(argv):\n result = {}\n for arg in argv:\n k, v = arg.split(\"=\")\n result[k] = v\n return result", "def fromstring(self, description):\n self.header = {}\n # Split string either on commas or whitespace, for good measure\n param_vals = [p.strip() for p in description.split(',')] \\\n if ',' in description else description.split()\n params = [p for p in self]\n min_len = min(len(params), len(param_vals))\n for param, param_val in zip(params[:min_len], param_vals[:min_len]):\n param.value_str = param_val\n for param in params[min_len:]:\n param.value = param.default_value", "def parsekv(inputString):\n mDict = dict()\n parts = inputString.split('&')\n for item in parts:\n if (item.count('=') != 1):\n raise ValueError(\"Need a singular = sign in str. %s\" % (item, ))\n key, value = item.split('=')\n # If we can convert the string value to an int, great, otherwise\n # leave it as a string.\n try:\n mDict[key] = int(value)\n except ValueError:\n mDict[key] = value\n return mDict", "def parseConfig(self, filename):\n parameters = {}\n try:\n f = open(filename)\n except Exception as inst:\n print(type(inst))\n print(inst.args)\n print(inst)\n print('cannot open', filename)\n raise\n else:\n for line in f:\n # Remove text after comment character.\n if self.comment_char in line:\n line, comment = line.split(self.comment_char,\n 1) # Split on comment character, keep only the text before the character\n\n # Find lines with parameters (param=something)\n if self.param_char in line:\n parameter, value = line.split(self.param_char, 1) # Split on parameter character\n parameter = parameter.strip() # Strip spaces\n value = value.strip()\n parameters[parameter] = value # Store parameters in a dictionary\n\n f.close()\n\n return parameters", "def update_params(argv: list, prm: dict):\n\n\tfor a in argv[1:]:\n\t\ttoks = a.split('=',1)\n\t\tif len(toks)<2: continue\n\t\tk,v = toks[:2]\n\t\tif k not in prm: continue\n\t\tprm[k] = v", "def quote_all(parameters: Dict[str, Any]) -> Dict[str, Any]:\n return {key: quote_plus(value) if isinstance(value, str) else value for key, value in parameters.items()}", "def parse_parameters_file(lines):\n param_dict = {}\n for line in lines:\n line = line.strip()\n if line:\n (param, values) = line.split('\\t')\n param_dict[param] = values.split(',')\n return param_dict", "def str_to_args(line):\n args_in = line.split()\n args_out = []\n kwargs_out = {}\n gadget_lookup = {g.name: g for g in Gadget.getinstances()}\n for a in args_in:\n if '=' in a:\n key, val = a.split('=')\n if ('*' in val) or ('?' in val):\n matching_names = filter(gadget_lookup.keys(), val)\n kwargs_out[key] = [gadget_lookup[name] for name in matching_names]\n elif val in gadget_lookup.keys():\n kwargs_out[key] = gadget_lookup[val]\n else:\n kwargs_out[key] = eval(val)\n else:\n if ('*' in a) or ('?' in a):\n matching_names = filter(gadget_lookup.keys(), a)\n args_out += [gadget_lookup[name] for name in matching_names]\n elif a in gadget_lookup.keys():\n args_out.append(gadget_lookup[a])\n else:\n try:\n args_out.append(eval(a))\n except NameError:\n args_out.append(a)\n return args_out, kwargs_out", "def _parse_parameters(self, parameters_text):\n for mo in re.finditer(self._PARAMETERS_RE, parameters_text):\n self._parameters.append(Parameter(mo.group(\"param_name\"), mo.group(\"default_value\")))", "def override_paramset(self, override_str):\n\n paramset = ParamSet()\n if not override_str:\n return paramset\n\n override = eval(override_str, {}, {})\n if not override:\n return paramset\n\n for override_name in override:\n # The override can have a node_name/parm format which allows for point\n # instance overrides to override parms in a network.\n\n cached_override = self.override_cache.get(override_name, None)\n if cached_override is not None:\n # Hint to just skip\n if cached_override == -1:\n continue\n if isinstance(cached_override, PBRTParam):\n # textures which can't be overriden\n paramset.add(cached_override)\n continue\n pbrt_name, pbrt_type, tuple_names = cached_override\n if tuple_names:\n value = [override[x] for x in tuple_names]\n else:\n value = override[override_name]\n pbrt_param = PBRTParam(pbrt_type, pbrt_name, value)\n paramset.add(pbrt_param)\n continue\n\n override_match = self.override_pat.match(override_name)\n spectrum_type = override_match.group(\"spectrum\")\n parm_name = override_match.group(\"parm\")\n override_node = override_match.group(\"node\")\n if override_node is not None and override_node != self.name:\n self.override_cache[override_name] = -1\n continue\n\n # There can be two style of \"overrides\" one is a straight parm override\n # which is similar to what Houdini does. The other style of override is\n # for the spectrum type parms. Since spectrum parms can be of different\n # types and the Material Overrides only support \"rgb\" we are limited\n # in the types of spectrum overrides we can do. To work around this we'll\n # support a different style, override_parm:spectrum_type. If the parm name\n # ends in one of the \"rgb/color\" types then we'll handle it differently.\n # TODO add a comment as to what the value would look like\n\n # NOTE: The material SOP will use a parm style dictionary if there\n # parm name matches exactly\n # ie) if there is a color parm you will get\n # {'colorb':0.372511,'colorg':0.642467,'colorr':0.632117,}\n # But if the parm name doesn't match (which we are allowing\n # for you will get something like this -\n # {'colora':(0.632117,0.642467,0.372511),}\n\n # Once we have a parm name, we need to determine what \"style\" it is.\n # Whether its a hou.ParmTuple or hou.Parm style.\n tuple_names = tuple()\n parm_tuple = self.node.parmTuple(parm_name)\n if parm_tuple is None:\n # We couldn't find a tuple of that name, so let's try a parm\n parm = self.node.parm(parm_name)\n if parm is None:\n # Nope, not valid either, let's move along\n self.override_cache[override_name] = -1\n continue\n # if its a parm but not a parmtuple it must be a split.\n parm_tuple = parm.tuple()\n # we need to \"combine\" these and process them all at once and\n # then skip any other occurances. The skipping is handled by\n # the overall caching mechanism. self.override_cache\n tuple_names = tuple([x.name() for x in parm_tuple])\n\n # This is for wrangling parm names of texture nodes due to having a\n # signature parm.\n pbrt_parm_name = self.pbrt_parm_name(parm_tuple.name())\n\n if spectrum_type is None and tuple_names:\n # This is a \"traditional\" override, no spectrum or node name prefix\n value = [override[x] for x in tuple_names]\n pbrt_param = self._hou_parm_to_pbrt_param(\n parm_tuple, pbrt_parm_name, value\n )\n elif spectrum_type in (\"spectrum\", \"xyz\", \"blackbody\"):\n pbrt_param = PBRTParam(\n spectrum_type, pbrt_parm_name, override[override_name]\n )\n elif not tuple_names:\n pbrt_param = self._hou_parm_to_pbrt_param(\n parm_tuple, pbrt_parm_name, override[override_name]\n )\n else:\n raise ValueError(\"Unable to wrangle override name: %s\" % override_name)\n\n paramset.add(pbrt_param)\n\n # From here to the end of the loop is to allow for caching\n\n if pbrt_param.type == \"texture\":\n self.override_cache[override_name] = pbrt_param\n continue\n\n # we are making an assumption a split parm will never be a spectrum\n # or have a node prefix. The Material SOP doesn't allow for it as well.\n for name in tuple_names:\n # The -1 means \"continue\"\n self.override_cache[name] = -1\n # Sanity check\n if tuple_names and override_name not in tuple_names:\n raise ValueError(\n \"Override name: %s, not valid for a parmTuple\" % override_name\n )\n # override_name must match one of the tuple_names\n self.override_cache[override_name] = (\n pbrt_param.name,\n pbrt_param.param_type,\n tuple_names,\n )\n return paramset", "def getModelParameters(parameterstring):\n \n def getFormattedValue(strval):\n if '\\'' in strval:\n return strval.replace('\\'', '')\n elif '\"' in strval:\n return strval.replace('\"', '')\n elif '.' in strval:\n return float(strval)\n elif strval == 'True':\n return True\n elif strval == 'False':\n return False\n else:\n return int(strval)\n \n ((25,),)\n def parseTuple(strval):\n idx = strval.find(\"(\")+1\n values = []\n i = idx\n while i < len(strval):\n if strval[i] == '(':\n nested, lnested = parseTuple(strval[i:])\n print(i)\n i += lnested\n idx = i+1\n print(i)\n values.append(nested)\n elif strval[i] == ')':\n newval = strval[idx:i].strip()\n if newval != '':\n values.append(getFormattedValue(newval))\n return tuple(values), i\n elif strval[i] == ',':\n newval = strval[idx:i].strip()\n if newval != '':\n values.append(getFormattedValue(newval))\n idx = i+1\n i += 1\n \n rv = dict()\n if parameterstring is None:\n return rv\n params = parameterstring.strip().split(\"=\")\n nextkey = params[0]\n for pi in range(1,len(params)):\n cur = params[pi]\n if '(' in cur:\n if cur.count(\"(\") != cur.count(\")\"):\n raise InvalidParameters(\"Unequal number of paranthesis.\")\n value, _ = parseTuple(cur)\n rv[nextkey] = value\n nextkey = cur[cur.rfind(',')].strip()\n else:\n commasplit = cur.split(\",\")\n value = commasplit[0].strip()\n rv[nextkey] = getFormattedValue(value)\n nextkey = commasplit[1].strip()\n \n return rv", "def split_config(s):\n x = re.split(r\";\", s)\n d = {k: v for (k, v) in [i.split(\"=\") for i in x]}\n return d", "def parseAccept(aString):\n\tres = {}\n\tif aString is not None:\n\t\tfor item in aString.split(\",\"):\n\t\t\tif \";\" in item:\n\t\t\t\tkey, params = item.split(\";\", 1)\n\t\t\telse:\n\t\t\t\tkey, params = item, \"\"\n\t\t\tres[key.strip()] = params.strip()\n\t\n\treturn res", "def get_parameters_from_input_string(string):\n parameter_array = []\n start_found = False\n item = str(\"\")\n for i in range(len(string)): \n if start_found == True and string[i] != \",\" and string[i] !=\")\":\n item += string[i]\n elif start_found == True and string[i] == \",\":\n if item not in parameter_array:\n parameter_array.append(item)\n item = str(\"\")\n elif start_found == True and string[i] == \")\":\n start_found = False\n if item not in parameter_array:\n parameter_array.append(item)\n item = str(\"\")\n # Start here and set start_found to True\n elif string[i] == \"(\":\n start_found = True\n return parameter_array", "def _parse_item(item: str) -> dict:\n delimiter = _get_delimiter(item)\n key, value = item.split(delimiter)\n if delimiter == '=':\n return {key: value}\n else:\n try:\n return {key: json.loads(value)}\n except json.JSONDecodeError:\n raise click.UsageError(JSON_ERROR_MESSAGE.format(item))", "def _cmd_params_to_dict(params):\n return {t[0]: t[1] for t in params}", "def split(s):\n args_str, kwargs_str = s.split('\\n')\n args = tuple(args_str.split(', '))\n kwargs = {}\n for s in kwargs_str.split(', '):\n k, v = s.split('=')\n kwargs[k] = v\n print args\n print kwargs", "def translation_parameters(parameters):\n param = dict()\n if parameters:\n float_num = re.compile(r'^[-+]?[0-9]+\\.[0-9]+$')\n scientific_notation = re.compile(\n r\"^[-+]?[1-9]?\\.?[0-9]+[eE][-+]?[0-9]+$\")\n int_num = re.compile(r'^[-+]?[0-9]+$')\n formula = re.compile(r'^(\\s*)(.*)[0-9]+(\\s*)(.*)[*+-\\](\\s*)(.*)[0-9]+(\\s*)(.*)$')\n for key, value in parameters.items():\n if isinstance(value, str):\n value = value.strip()\n scientific_res = scientific_notation.match(value)\n float_res = float_num.match(value)\n int_res = int_num.match(value)\n formula_res = formula.match(value)\n if float_res or scientific_res:\n value = float(value)\n elif int_res:\n value = int(value)\n elif value.startswith(\"(\") and value.endswith(\")\"):\n value = tuple(json.loads(value))\n elif value.lower() in [\"false\", \"true\"]:\n value = value.lower() == \"true\"\n elif formula_res:\n value = four_operations(value)\n elif value == \"\":\n value = None\n param.update({key: value})\n return param", "def parse_session_overrides_str(self, overrides_str):\n overrides = []\n if overrides_str is None or overrides_str == \"\":\n return []\n for section in overrides_str.split(\";\"):\n splitted_array = section.split(\"=\")\n if (\n len(splitted_array) != 2\n or splitted_array[0] == \"\"\n or splitted_array[1] == \"\"\n ):\n raise OSCError(\"INCORRECT_SESSION_OVERRIDE\", {\"section\": section})\n overrides.append(splitted_array)\n return overrides", "def parse(cls, value: str) -> Tuple[str, Dict[str, str]]:\n raw_value = read_value_from_path(value)\n args: Dict[str, str] = {}\n\n if \"@\" in raw_value:\n args[\"region\"], raw_value = raw_value.split(\"@\", 1)\n\n # now find any other arguments that can be filters\n matches = re.findall(r\"([0-9a-zA-z_-]+:[^\\s$]+)\", raw_value)\n for match in matches:\n k, v = match.split(\":\", 1)\n args[k] = v\n\n return args.pop(\"name_regex\"), args", "def arglist_parse_to_dict(arg_l):\n\n prop_d = {}\n for prop in arg_l:\n if len(prop) == 2:\n prop_l = prop\n elif ':' in prop:\n prop_l = prop.split(':')\n elif '=' in prop:\n prop_l = prop.split('=')\n else:\n exit( \"==> ERROR: invalid config. Use '=' or ':'.\" )\n if not len(prop_l) == 2:\n exit( \"==> ERROR: invalid config. Use one '=' per setting.\" )\n prop_d[prop_l[0]] = prop_l[1]\n return prop_d", "def _convert_param_list_to_dict(param_list: list, parameters_dict: dict) -> dict:\n for param in param_list:\n param_array: list = param.split(\"=\")\n key: str = param_array[0]\n value: str = None\n if len(param_array) > 1:\n value = param_array[1]\n parameters_dict[key] = value\n return parameters_dict", "def _parse_params(self, params):\r\n if params[0] == \":\":\r\n params = [params[1:]]\r\n else:\r\n params = params.split(\" :\", 1)\r\n if len(params) == 1:\r\n last_arg = None\r\n else:\r\n last_arg = params[1]\r\n params = params[0].split(None)\r\n if last_arg != None:\r\n params.append(last_arg)\r\n return params", "def parse(args: list, keyword_set: set) -> dict:\n parsed_dict = {'': []}\n while args:\n keyword = get_keyword(arg=args[0], keyword_set=keyword_set)\n\n if keyword is not None:\n args.pop(0)\n keyword_name = keyword.keyword_name\n\n if keyword_name in parsed_dict:\n raise necrobot.exception.DoubledArgException(keyword=keyword.keyword)\n\n if keyword.param_for is not None:\n parsed_dict[keyword_name] = [keyword.keyword]\n else:\n parsed_dict[keyword_name] = []\n num_args_pulled = 0\n while num_args_pulled < keyword.num_args:\n if not args:\n raise necrobot.exception.NumParametersException(\n keyword=keyword,\n num_expected=keyword.num_args,\n num_given=num_args_pulled\n )\n else:\n num_args_pulled += 1\n parsed_dict[keyword_name].append(args[0])\n args.pop(0)\n else:\n parsed_dict[''].append(args[0])\n args.pop(0)\n\n return parsed_dict", "def _parse_kwargs(self):\n re_kwargs = r'^[\\w_][\\w\\d_]*=.+$'\n kwargs = [a.split('=') for a in self.args if re.findall(re_kwargs, a)]\n self.kwargs = {k: self._load_json(v) for k, v in kwargs}\n self.args = [a for a in self.args if not re.findall(re_kwargs, a)]", "def args2dict(args, dict_args={}):\n \n for arg in args:\n #this_entry = re.findall(r'[^\"\\s]\\S*|\".+?\"', arg)\n p_arg = arg.split('=')\n if len(p_arg) > 1:\n dict_args[p_arg[0]] = False if p_arg[1].lower() == 'false' else \\\n True if p_arg[1].lower() == 'true' else \\\n None if p_arg[1].lower() == 'none' else \\\n '='.join(p_arg[1:]) if len(p_arg) > 2 else \\\n p_arg[1]\n \n return(dict_args)", "def extract_key_value_pairs(string, joiner='=', separator=','):\n return dict([x.strip() for x in s.split(joiner, 1)] for s in string.split(separator))", "def get_args():\n\n params = {}\n\n if len(argv) == 1:\n\n input_file = input('Please enter the path to the parameter file: ')\n\n else:\n\n input_file = argv[1]\n\n if path.isfile(input_file) == False:\n\n print('ERROR: Cannot find input parameter file')\n exit()\n\n flines = open(input_file,'r').readlines()\n\n str_keys = ['catalog_file', 'red_dir',\n 'target_ra', 'target_dec',\n 'star_class', 'isochrone_file',\n 'target_lc_file_g', 'target_lc_file_r', 'target_lc_file_i']\n\n for line in flines:\n\n (key, value) = line.replace('\\n','').split()\n\n if key in str_keys:\n\n params[key] = value\n\n else:\n\n if 'none' not in str(value).lower():\n params[key] = float(value)\n else:\n params[key] = None\n\n return params", "def _process_ps_parameters(parameters):\n param_string = ''\n for param in parameters:\n if '=' in param:\n n, v = param.split('=', 1)\n param_string += '-{name} {value} '.format(name=n, value=v)\n else:\n param_string += '{} '.format(param)\n\n return param_string.strip(' ')", "def parse_transport(transport: str) -> Tuple[List[str], Mapping[str, str]]:\n params = []\n options = {}\n for option in transport.split(\";\"):\n if \"=\" in option:\n key, value = option.split(\"=\", maxsplit=1)\n options[key] = value\n else:\n params.append(option)\n return params, options", "def params_to_dict(tags):\n tags_dict = {}\n tags_name_value_list = [tag[0].split(':') for tag in tags]\n for tag_name, tag_value in tags_name_value_list:\n tags_dict.setdefault(tag_name, []).append(tag_value)\n return tags_dict", "def params_file(file):\n dict={}\n for line in open(file,'r').readlines():\n if line[0]==' ' or line[0]=='#': continue \n halves=line.split('#')\n\t#replace commas in case they're present\n halves[0]=halves[0].replace(',',' ') \t\n pieces=halves[0].split()\n if len(pieces)==0: continue\n key=pieces[0]\n #if type(key)<>type(''):\n # raise 'Keyword not string!'\n if len(pieces)<2:\n mensaje='No value(s) for parameter '+key\n raise mensaje\n dict[key]=tuple(pieces[1:]) \n if len(dict[key])==1: dict[key]=dict[key][0]\n return dict", "def convertParams(name, params, to_string=False):\r\n \r\n new_params = {}\r\n \r\n for key, value in params.items():\r\n \r\n validator = RadiusAuthRestHandler.FIELD_VALIDATORS.get(key)\r\n\r\n if validator is not None:\r\n if to_string:\r\n new_params[key] = validator.to_string(key, value)\r\n else:\r\n new_params[key] = validator.to_python(key, value)\r\n else:\r\n new_params[key] = value\r\n\r\n return new_params", "def builddict(fname,ignorestrings=['#'],dictdelim='='):\n\tf = open(fname, \"r\")\n\tline = f.readline()\n\ti = 0\n\t\n\tparamdict={}\n\twhile line != '':\n\t\ttmp = line.strip()\n\t\tif tmp :\n\t\t\tfor st in ignorestrings:\n\t\t\t\ttmp = tmp.split(st)[0]\n\t\t\t\tif len(tmp) >1:\n\t\t\t\t\ttp = tmp.split(dictdelim)\n\t\t\t\t\tkey = tp[0].strip()\n\t\t\t\t\tval = tp[1].strip()\n\t\t\t\t\tparamdict[str(key)] = str(val) \n\t\tline=f.readline()\n\t\n\tf.close()\n\treturn paramdict", "def str2dic(self, string):\n dic = {}\n list0=string.split(\"&\")\n for i in list0:\n list2 = i.split(\"=\")\n dic[list2[0]] = list2[1]\n return dic", "def _split_url_string(param_str):\n parameters = parse_qs(param_str, keep_blank_values=False)\n for key, val in parameters.iteritems():\n parameters[key] = urllib.unquote(val[0])\n return parameters", "def parse_from_string(config_pair):\n key, value = config_pair.split(\"=\")\n value = literal_eval(value)\n current_config_keys = key.split('.')[::-1]\n last_config_value = {current_config_keys[0]: value}\n for current_config_subkey in current_config_keys[1:]:\n last_config_value = {current_config_subkey: last_config_value}\n return last_config_value", "def replace_param(string, param, value, param_format=None):\n\n if param_format == \"json\":\n return sub(r\"(?P<json_replacement>\\\"%s\\\"\\s*:\\s*)\\\"\\s*\\\"\" %\n escape(str(param)), \"\\\\1\\\"%s\\\"\" % value, string)\n elif param_format == \"header\":\n return sub(r\"%s=[^\\\\n]*\" % escape(str(param)), r\"%s=%s\" %\n (str(param).encode('string-escape'),\n str(value).encode('string-escape')), string)\n else:\n return sub(r\"%s=[^&]*\" % escape(str(param)), r\"%s=%s\" %\n (str(param).encode('string-escape'),\n str(value).encode('string-escape')), string)", "def parse_header_parameters(line):\n parts = _parseparam(\";\" + line)\n key = parts.__next__().lower()\n pdict = {}\n for p in parts:\n i = p.find(\"=\")\n if i >= 0:\n has_encoding = False\n name = p[:i].strip().lower()\n if name.endswith(\"*\"):\n # Lang/encoding embedded in the value (like \"filename*=UTF-8''file.ext\")\n # https://tools.ietf.org/html/rfc2231#section-4\n name = name[:-1]\n if p.count(\"'\") == 2:\n has_encoding = True\n value = p[i + 1 :].strip()\n if len(value) >= 2 and value[0] == value[-1] == '\"':\n value = value[1:-1]\n value = value.replace(\"\\\\\\\\\", \"\\\\\").replace('\\\\\"', '\"')\n if has_encoding:\n encoding, lang, value = value.split(\"'\")\n value = unquote(value, encoding=encoding)\n pdict[name] = value\n return key, pdict", "def readArgs(args):\n params = {}\n for k in args.keys():\n k2 = k.replace(\"<\", \"\").replace(\">\", \"\").replace(\"-\", \"\")\n try: # Convert strings to int or floats when required\n params[k2] = int(args[k])\n except:\n try:\n params[k2] = float(args[k])\n except:\n try:\n params[k2] = str2bool(args[k])\n except:\n params[k2] = args[k]\n return params", "def process_cli_config_args(config_args:List[str]) -> Dict:\n # assert len(config_args) % 3 == 0, \\\n # \"You should pass config args in [--config.arg_name arg_value arg_type] format\"\n assert len(config_args) % 2 == 0, \\\n \"You should pass config args in [--config.arg_name arg_value] format\"\n arg_names = [config_args[i] for i in range(0, len(config_args), 2)]\n arg_values = [config_args[i] for i in range(1, len(config_args), 2)]\n\n result = {}\n\n for name, value in zip(arg_names, arg_values):\n assert name.startswith(CONFIG_ARG_PREFIX), \\\n f\"Argument {name} is unkown and does not start with `config.` prefix. Cannot parse it.\"\n\n result[name[len(CONFIG_ARG_PREFIX):]] = infer_type_and_convert(value)\n\n return result", "def args_to_params(args: list) -> dict:\n found = {}\n\n # Setup the dictionary identifying the parameters\n found['sensor'] = args.sensor\n found['filename'] = args.filename\n found['working_space'] = args.working_space\n if args.userid:\n found['userid'] = args.userid\n\n # Note: Return an empty dict if we're missing mandatory parameters\n return found", "def parse_attr_str(attr_str):\r\n if not attr_str:\r\n print(\"Empty att_str in parse_attr_str()\")\r\n return {}\r\n\r\n if sys.version_info[0] == 3:\r\n string_types = str\r\n else:\r\n string_types = basestring\r\n\r\n if not isinstance(attr_str, string_types):\r\n print(\"Invalid attr_str while parsing:{0}\".format(attr_str))\r\n return {}\r\n\r\n # initialize a lexer, in POSIX mode (to properly handle escaping)\r\n lexer = shlex(attr_str, posix=True)\r\n # include '=' as a word character\r\n # (this is done so that the lexer returns a list of key-value pairs)\r\n # (if your option key or value contains any unquoted special character, you will need to add it here)\r\n lexer.wordchars += \"=\"\r\n # make sure attribute support 'data-parsley-length' attribute name\r\n lexer.wordchars += \"-\"\r\n # then we separate option keys and values to build the resulting dictionary\r\n # (maxsplit is required to make sure that '=' in value will not be a problem)\r\n # sometimes as HTML will has some key like attribute without value, like: 'required', 'disabled'...\r\n # it need extract those single attribute from the string\r\n pairs_attrs = []\r\n single_attrs = []\r\n for word in lexer:\r\n if \"=\" in word:\r\n # str.split() changed 'maxsplit' to keyword arguments\r\n # see: https://docs.python.org/3.3/library/stdtypes.html#str.split\r\n if sys.version_info >= (3, 3):\r\n pairs_attrs.append(word.split(\"=\", maxsplit=1))\r\n else:\r\n pairs_attrs.append(word.split(\"=\", 1))\r\n else:\r\n single_attrs.append(word)\r\n\r\n # convert pairs attribute list to dict\r\n pairs_attr_dict = dict(pairs_attrs)\r\n # add single atrribute to dict\r\n for attr in single_attrs:\r\n pairs_attr_dict[attr] = None\r\n\r\n return pairs_attr_dict", "def load_cli_kwargs(kwargs_list, delimiter='='):\n kwargs = {}\n for kv in kwargs_list:\n k, v = kv.split(delimiter, 1)\n kwargs[k] = v\n return kwargs", "def parse_line(self, line):\n line = line.strip()\n log.debug(\"Parsing line: '{}'\".format(line))\n if len(line) == 0:\n log.warning(\"Zero length line detected\")\n return\n split = line.split(DELIMETER)\n key = split[0]\n if key in FORMATS:\n log.debug(\"Using formatter for key: {}\".format(key))\n formatter = FORMATS[key]\n for (name, parser), value in zip(formatter,split[1:]):\n self._params[name] = parser(value)\n log.info(\"Parameters: {}\".format(self._params))\n self.notify_watchers()\n else:\n log.debug(\"Invalid key: {}\".format(key))", "def parse_config_overrides(\n args: List[str], env_var: Optional[str] = ENV_VARS.CONFIG_OVERRIDES\n) -> Dict[str, Any]:\n env_string = os.environ.get(env_var, \"\") if env_var else \"\"\n env_overrides = _parse_overrides(split_arg_string(env_string))\n cli_overrides = _parse_overrides(args, is_cli=True)\n if cli_overrides:\n keys = [k for k in cli_overrides if k not in env_overrides]\n logger.debug(\"Config overrides from CLI: %s\", keys)\n if env_overrides:\n logger.debug(\"Config overrides from env variables: %s\", list(env_overrides))\n return {**cli_overrides, **env_overrides}", "def get_parameter(parameters):\n parameter_dict = {'freq': 1, 'notify': \"true\"}\n\n if parameters:\n for parameter in parameters:\n key, value = parameter.split(\"=\")\n parameter_dict[key] = value\n\n return parameter_dict", "def _handle_dict(string):\n dict_lines = [line.split(Parser.FIELD_DELIM) for line in string.split(Parser.LINE_DELIM)\n if Parser.FIELD_DELIM in line]\n cur_dict = 0\n results = [{}]\n for line in dict_lines:\n if line[0] in results[cur_dict]:\n results.append({})\n cur_dict += 1\n results[cur_dict][line[0]] = line[1]\n return results", "def replace_param_occurrences(string, params):\n for k, v in params.items():\n string = string.replace(k, str(v))\n return string", "def _parse_parameter_list(\n parameter_list: abc.Iterable[str],\n normalize_parameter_names: bool = False,\n normalize_parameter_values: bool = True,\n strip_interior_whitespace: bool = False) -> list[tuple[str, str]]:\n parameters = []\n for param in parameter_list:\n param = param.strip()\n if param:\n name, value = param.split('=')\n if strip_interior_whitespace:\n name, value = name.strip(), value.strip()\n if normalize_parameter_names:\n name = name.lower()\n if normalize_parameter_values:\n value = value.lower()\n parameters.append((name, _dequote(value.strip())))\n return parameters", "def parse_query_string(s):\n res = {}\n pairs = s.split('&')\n for p in pairs:\n vals = [urldecode_plus(x) for x in p.split('=', 1)]\n if len(vals) == 1:\n res[vals[0]] = ''\n else:\n res[vals[0]] = vals[1]\n return res", "def parse_arguments(self):\n \n for arg in sys.argv[1:]:\n (key, sep, value) = arg.partition(\"=\")\n if sep != \"=\":\n raise ProcessorError(\"Illegal argument '%s'\" % arg)\n self.update_data(key, value)", "def makeDict(self, s):\n out = {}\n entries = s.split(self.dataDelimiterEntry)\n for e in entries:\n if e == \"\":\n continue\n c = e.split(self.dataDelimiterKey)\n out[c[0]] = c[1]\n return out", "def parse_generate_arguments(arguments):\n return_value = {}\n for key in arguments:\n return_value[key] = CONFIG_KEY_PARSER[key](arguments[key])\n\n return return_value", "def parse_params(self,tokenized_lines):\n\n # extract key-value pairs\n conversions = {\n # Space\n \"nuclide\" : tools.tuple_of(int), # use tuple so parameter is hashable when used as analysis key\n \"A\" : tools.singleton_of(int),\n \"Nsigma\" : tools.singleton_of(float),\n \"Nsigmamax\" : tools.singleton_of(int),\n \"N1v\" : tools.singleton_of(int),\n \"Nmax\" : tools.singleton_of(int),\n # Interaction\n \"interaction\" : tools.singleton_of(str),\n \"use_coulomb\" : tools.singleton_of(tools.bool_from_str),\n # Relative observables\n \"observable_names\" : tools.list_of(str),\n # Calculation\n \"hw\" : tools.singleton_of(float)\n }\n key_value_dict = tools.extract_key_value_pairs(\n tokenized_lines,conversions\n )\n\n # legacy support: force interaction to \"JISP16\" for early runs where\n # interaction field was provided as reserved field but not set to \"JISP16\"\n if (\"interaction\" in key_value_dict):\n if (key_value_dict[\"interaction\"] == \"RESERVED\"):\n key_value_dict[\"interaction\"]=\"JISP16\"\n\n # provide \"coulomb\" as preferred field name to match mfdn results analysis\n if (\"use_coulomb\" in key_value_dict):\n key_value_dict[\"coulomb\"] = key_value_dict[\"use_coulomb\"]\n \n # update to params dictionary\n self.params.update(key_value_dict)", "def update_args(self, args):\n for cfg in args:\n keys, v = cfg.split('=', maxsplit=1)\n keylist = keys.split('.')\n dic = self\n for i, k in enumerate(keylist[:-1]):\n assert k in dir(dic), \"Unknown config key: {}\".format(keys)\n dic = getattr(dic, k)\n key = keylist[-1]\n oldv = getattr(dic, key)\n if not isinstance(oldv, str):\n v = eval(v)\n setattr(dic, key, v)", "def input_parse(filename,params_flag=False):\r\n\r\n input_parameters ={}\r\n with open(filename, 'r') as f:\r\n count = 0\r\n\r\n for line in f:\r\n line=line.strip()\r\n if line:\r\n if line.find('#') == -1:\r\n if not params_flag:\r\n var_name,var_value = line.split(',')[0],\",\".join(line.split(',')[1:]) # handle lines with more than 1 comma\r\n try:\r\n input_parameters[var_name] = float(var_value)\r\n except ValueError: # This occurs when python cannot convert list into a float.\r\n # Evaluate the python expression as a list\r\n input_parameters[var_name] = ast.literal_eval(var_value)\r\n else:\r\n if count==0:\r\n var_name = line.strip('\\n')\r\n input_parameters[var_name] = []\r\n count+=1\r\n else:\r\n try:\r\n input_parameters[var_name].append(float(line.strip('\\n')))\r\n except ValueError: # This occurs when python cannot convert list into a float.\r\n # Evaluate the python expression as a list\r\n input_parameters[var_name].append(ast.literal_eval(line.strip('\\n')))\r\n return input_parameters", "def import_param(self, param_str):\n param_groups = None\n \"\"\" Initial param cosmetic adjustment \"\"\"\n # re.sub(r\"\\s+\", \"\", param, flags=re.UNICODE) # Removes all white spaces\n param_str = param_str.strip()\n\n \"\"\" Cherck the param syntax \"\"\"\n for key, pattern in self.patterns.items():\n param_groups = re.findall(pattern, param_str)\n if param_groups:\n # Range\n if len(param_groups[0]) == 3:\n self.first_number = int(param_groups[0][0])\n self.operator = param_groups[0][1]\n self.second_number = int(param_groups[0][2])\n else:\n # Other equality, inequality\n self.operator = param_groups[0][0]\n self.first_number = int(param_groups[0][1])\n\n return True\n\n return False", "def parse_args(string):\n return re.findall('[-=][^ ]*', string)", "def read_config_from_string(string, entry_char='>', attribution_char='=',\n comment_char='#'):\n\n entry_char_len = len(entry_char)\n attr_char_len = len(attribution_char)\n\n # Gets each line of the string and stores into a list.\n string_content = string.splitlines()\n\n # Main loop along the lines of the string\n result_dictio = {}\n for line in string_content:\n\n # Gets only lines which have the entry character at the start\n if line[0:entry_char_len] != entry_char:\n continue\n\n # Line text processing\n # Ignores everything after a comment character\n line = line.split(comment_char)[0]\n # Eliminates the initial (entry) character\n line = line[entry_char_len:]\n\n # Separation between key and value\n # Finds where is the attribution char, which separates key from\n # value.\n attr_index = line.find(attribution_char)\n # If no attribution char is found, raises an exception.\n if attr_index == -1:\n raise ValueError(\n \"Heyy, the attribution character '\" + attribution_char +\n \"' was not found in line: '\" + line + \"'\")\n key = remove_border_spaces(line[:attr_index])\n value = remove_border_spaces(line[attr_index + attr_char_len:])\n\n # Finally adds the entry to the dictionary\n result_dictio[key] = value\n\n return result_dictio", "def update_args(self, args):\n for cfg in args:\n keys, v = cfg.split(\"=\", maxsplit=1)\n keylist = keys.split(\".\")\n dic = self\n # print(keylist)\n if len(keylist) == 1:\n assert keylist[0] in dir(dic), \"Unknown config key: {}\".format(\n keylist[0]\n )\n for i, k in enumerate(keylist[:-1]):\n assert k in dir(dic), \"Unknown config key: {}\".format(k)\n dic = getattr(dic, k)\n key = keylist[-1]\n assert key in dir(dic), \"Unknown config key: {}\".format(key)\n oldv = getattr(dic, key)\n if not isinstance(oldv, str):\n v = eval(v)\n setattr(dic, key, v)", "def _sanitize_params(command, params):\n general_params_maps = {\n 'b': 'base_directory'\n }\n specific_params_map = {\n # find-clusters\n ('find-clusters', 't'): 'threshold',\n ('find-clusters', 'c'): 'K_g', ('find-clusters', 'clusters-per-group'): 'K_g',\n # predict-observables\n ('predict-observables', 'B'): 'begin',\n ('predict-observables', 'E'): 'end',\n # sample-mcmc\n ('sample-mcmc', 'n'): 'pre_N', ('sample-mcmc', 'prerun-samples'): 'pre_N',\n ('sample-mcmc', 'p'): 'preruns',\n ('sample-mcmc', 'S'): 'stride',\n # sample-pmc\n ('sample-pmc', 'n'): 'step_N', ('sample-pmc', 'step-samples'): 'step_N',\n ('sample-pmc', 's'): 'steps',\n ('sample-pmc', 'N'): 'final_N', ('sample-pmc', 'final-samples'): 'final_N'\n\n }\n return {\n specific_params_map[(command, k)] if (command, k) in specific_params_map\n else general_params_map[k] if k in general_params_map\n else k\n for k, v in params.items()\n }", "def parm_values(overrides):\n\n originals = []\n try:\n for parm, value in overrides:\n originals.append((parm, parm.eval()))\n parm.set(value)\n yield\n finally:\n for parm, value in originals:\n # Parameter might not exist anymore so first\n # check whether it's still valid\n if hou.parm(parm.path()):\n parm.set(value)", "def reparam(string_, dictionary):\n dictionary = dictionary.copy() # eval mucks with it\n # disable builtins to avoid risk for remote code exection.\n dictionary['__builtins__'] = object()\n vals = []\n result = []\n for live, chunk in _interpolate(string_):\n if live:\n v = eval(chunk, dictionary)\n result.append(sqlquote(v))\n else: \n result.append(chunk)\n return SQLQuery.join(result, '')", "def parse_launch_arguments(launch_arguments: List[Text]) -> List[Tuple[Text, Text]]:\n parsed_launch_arguments = OrderedDict() # type: ignore\n for argument in launch_arguments:\n count = argument.count(':=')\n if count == 0 or argument.startswith(':=') or (count == 1 and argument.endswith(':=')):\n raise RuntimeError(\n \"malformed launch argument '{}', expected format '<name>:=<value>'\"\n .format(argument))\n name, value = argument.split(':=', maxsplit=1)\n parsed_launch_arguments[name] = value # last one wins is intentional\n return parsed_launch_arguments.items()", "def parse_var(s):\n items = s.split('=')\n key = items[0].strip() # we remove blanks around keys, as is logical\n if len(items) > 1:\n # rejoin the rest:\n value = '='.join(items[1:])\n return (key, value)", "def read_string(self, string, **kwds):\n self._dict.update(json.loads(string))", "def merge_parameter(base_params, override_params):\n if override_params is None:\n return base_params\n is_dict = isinstance(base_params, dict)\n for k, v in override_params.items():\n if is_dict:\n if k not in base_params:\n raise ValueError('Key \\'%s\\' not found in base parameters.' % k)\n if type(base_params[k]) != type(v) and base_params[k] is not None:\n raise TypeError('Expected \\'%s\\' in override parameters to have type \\'%s\\', but found \\'%s\\'.' %\n (k, type(base_params[k]), type(v)))\n base_params[k] = v\n else:\n if not hasattr(base_params, k):\n raise ValueError('Key \\'%s\\' not found in base parameters.' % k)\n if type(getattr(base_params, k)) != type(v) and getattr(base_params, k) is not None:\n raise TypeError('Expected \\'%s\\' in override parameters to have type \\'%s\\', but found \\'%s\\'.' %\n (k, type(getattr(base_params, k)), type(v)))\n setattr(base_params, k, v)\n return base_params", "def get_params(url=None):\r\n dict = {}\r\n if not url:\r\n url = sys.argv[2]\r\n pairs = url.lstrip(\"?\").split(\"&\")\r\n for pair in pairs:\r\n if len(pair) < 3:\r\n continue\r\n kv = pair.split(\"=\", 1)\r\n k = kv[0]\r\n v = urllib.parse.unquote_plus(kv[1])\r\n dict[k] = v\r\n return dict", "def retrieve_args_dict():\n process_args = sys.argv[1:]\n dictionary = dict()\n for process_arg in process_args:\n splitted = process_arg.split(\":\")\n if len(splitted) > 1:\n key = splitted[0]\n value = \"\".join(splitted[1:])\n dictionary[key] = value\n return dictionary", "def parse_key_value_arg(self, arg_value, argname):\n result = {}\n for data in arg_value:\n\n # Split at first '=' from left\n key_value_pair = data.split(\"=\", 1)\n\n if len(key_value_pair) != 2:\n raise exceptions.InvalidKeyValuePairArgumentError(\n argname=argname,\n value=key_value_pair)\n\n result[key_value_pair[0]] = key_value_pair[1]\n\n return result", "def parse_var(s):\n items = s.split(\"=\")\n key = items[0].strip() # we remove blanks around keys, as is logical\n value = \"\"\n if len(items) > 1:\n # rejoin the rest:\n value = \"=\".join(items[1:])\n return key, value", "def replace_in_string(s, args_dict):\n for key, value in args_dict.items():\n s = s.replace(key, value)\n for key, value in args_dict.items():\n s = s.replace(key, value)\n for key, value in args_dict.items():\n s = s.replace(key, value)\n return s", "def parse_defaults(defaults_string):\n if not defaults_string:\n return\n current = \"\"\n in_quote = None\n for char in defaults_string:\n if current == \"\" and char == \" \":\n # Skip space after comma separating default expressions\n continue\n if char == '\"' or char == \"'\":\n if in_quote and char == in_quote:\n # End quote\n in_quote = None\n elif not in_quote:\n # Begin quote\n in_quote = char\n elif char == \",\" and not in_quote:\n # End of expression\n yield current\n current = \"\"\n continue\n current += char\n yield current", "def parse_string_dict(dict_as_string):\n new_dict = ast.literal_eval(dict_as_string[1:-1])\n new_dict = {key: parse_string(val) for key, val in new_dict.items()}\n return new_dict", "def parse_config_string(config_string, issue_warnings=True):\r\n config_dict = {}\r\n for kv_pair in THEANO_FLAGS.split(','):\r\n kv_pair = kv_pair.strip()\r\n if not kv_pair:\r\n continue\r\n kv_tuple = kv_pair.split('=', 1)\r\n if len(kv_tuple) == 1:\r\n if issue_warnings:\r\n TheanoConfigWarning.warn(\r\n (\"Config key '%s' has no value, ignoring it\"\r\n % kv_tuple[0]),\r\n stacklevel=1)\r\n else:\r\n k, v = kv_tuple\r\n # subsequent values for k will override earlier ones\r\n config_dict[k] = v\r\n return config_dict" ]
[ "0.65870714", "0.65518653", "0.65437955", "0.65169543", "0.63385206", "0.6322526", "0.63106567", "0.6298732", "0.6283422", "0.6264256", "0.62140757", "0.6202048", "0.6163683", "0.60954064", "0.6093808", "0.6043404", "0.60339767", "0.6015657", "0.5971249", "0.5954985", "0.5940959", "0.59400064", "0.59167373", "0.58862746", "0.5881694", "0.58210146", "0.5772958", "0.575196", "0.57452476", "0.5730277", "0.57096636", "0.5685965", "0.5684772", "0.56731015", "0.5668372", "0.5667305", "0.5653163", "0.56466585", "0.5607535", "0.5606086", "0.5585155", "0.5576487", "0.55706275", "0.55627006", "0.5562251", "0.556202", "0.55612254", "0.5543901", "0.55115974", "0.54941267", "0.54867715", "0.5485462", "0.5477189", "0.54761714", "0.5476148", "0.54563236", "0.5452044", "0.54508454", "0.54494256", "0.5445725", "0.54279935", "0.5424064", "0.54175264", "0.54129785", "0.5405808", "0.5385684", "0.5382999", "0.5373817", "0.53731793", "0.5372575", "0.5353813", "0.5349455", "0.5347593", "0.53398246", "0.533502", "0.5319905", "0.53110296", "0.52976376", "0.52965224", "0.52964103", "0.529436", "0.52718544", "0.52708054", "0.5270173", "0.523485", "0.5233215", "0.52320766", "0.5231606", "0.5227524", "0.52157044", "0.520966", "0.5207469", "0.5202064", "0.5189562", "0.5188926", "0.51885045", "0.5188062", "0.51590526", "0.5151183", "0.5142898" ]
0.74429375
0
Lists all apps that have been published to the 21 marketplace
def _list_apps(config, client): logger.info("Listing all the published apps by {}: ".format(config.username), fg="green") current_page = 0 total_pages = get_search_results(config, client, current_page) if total_pages < 1: return while 0 <= current_page < total_pages: try: prompt_resp = click.prompt(uxstring.UxString.pagination, type=str) next_page = get_next_page(prompt_resp, current_page) if next_page == -1: model_id = prompt_resp display_app_info(config, client, model_id) elif next_page >= total_pages or next_page < 0: continue else: get_search_results(config, client, next_page) current_page = next_page except click.exceptions.Abort: return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_app_list(self):\n return self.get_setting('applications', 'installed_apps')", "def list_apps(self) -> list:\n apps = self.app.list_apps()\n app_list = [app[\"title\"] for app in apps]\n return app_list", "def ListApps(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def get_all_apps(self):\n return list(self.apps.values())", "async def app_list(self) -> List[interface.App]:\n return await self.relay(\"app_list\")()", "async def get_apps(self, params: Optional = None) -> dict:\r\n return await self.get_items(API_APPS, params=params)", "async def get_installed_apps(self, params: Optional = None) -> dict:\r\n return await self.get_items(API_INSTALLEDAPPS, params=params)", "def listapps(self):\n return jsoncall.do_call(\"listapps\", {'modelname':self.modelname,\\\n 'user':self.user,\\\n 'password':self.password},\n self.connection)", "def list(ctx):\n # pylint: disable=redefined-builtin\n _list_apps(ctx.obj['config'], ctx.obj['client'])", "def get_app_manifests(self,sfilter = None):\n if sfilter:\n try:\n return filter(lambda app: app[\"developer\"] == sfilter[\"developer\"] and\n app[\"name\"] == sfilter[\"name\"] and\n app[\"version\"] == sfilter[\"version\"], self.app_manifests)\n except:\n return []\n else :\n return self.app_manifests", "def get_apps(self) -> List[str]:\n return list(self.config[\"apps\"].keys())", "def get_apps(self):\n return self.apps", "def list_apps(self, ns_name):\n\n return self.helm_client.list(namespace=ns_name)", "def app_list(self, third_only=False):\n return self.adb.app_list(third_only)", "def list_apps(self):\n with hide(\"output\", \"running\"):\n result = local((\"redis-cli -h {host} -p 6379 -n {db} keys \\\"*\\\"\"\n .format(host=self.host,\n db=REDIS_APPLICATION_DB_NUM)),\n capture=True)\n\n if len(result.stdout) > 0:\n return result.stdout\n else:\n print(\"Clipper has no applications registered\")\n return \"\"", "def get_apps(provider, query):\n\n workdir = os.path.dirname(os.path.realpath(__file__))\n with open(os.path.join(workdir, '..', 'config.yml')) as f:\n config = yaml.load(f)\n ex = Explorer()\n logging.info('Read bucket: %s', config['SCOOP_BUCKET'])\n apps = ex.get_apps(os.path.expandvars(config['SCOOP_BUCKET']), query)\n logging.info(\"Apps count = %d\", len(apps))\n installed = provider.get_installed()\n\n # check if already installed\n for app in apps:\n app['installed'] = app['name'] in installed\n\n return apps", "def ls():\n cfgmgr = ConfigManager()\n apps = cfgmgr['apps']\n for i in apps:\n print(fc(\"- {g}{appname}{rst}\", appname=i))", "def get_app_list(self):\n\n return self._get().keys()", "def get_apps(self, limit, offset=None):\n params = {'v': WIT_API_VERSION}\n if limit:\n params['limit'] = limit\n if offset:\n params['offset'] = offset\n return req(self.logger, self.access_token, 'GET', '/apps', params)", "def ListApps(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def apps(self):\n filters = {\n 'disabled_by_user': False,\n 'status': mkt.STATUS_PUBLIC\n }\n return self._apps.order_by(self.membership_relation).filter(**filters)", "def list_apps(request, pk=0):\n context = {'items': [], 'resource_type': 'App'}\n\n if pk == 0:\n context['h2'] = \"Managed Applications\"\n context['header_1'] = \"Developer\"\n context['header_2'] = \"Version\"\n refresh_managed_software_status()\n apps = MacOSApp.objects.filter(merged_into__isnull=True).reverse()\n if not request.user.has_perm('devices.manage_apps'):\n apps = apps.filter(managed=True).exclude(installed__isnull=True, pending_install__isnull=True)\n for app in apps:\n assignment_count = app.pending_install.count()\n installed_on = app.installed.all()\n data = {'meta': app, 'assignment_count': assignment_count, 'installed': installed_on}\n context['items'].append(data)\n else:\n if not request.user.has_perm('devices.manage_apps'):\n raise PermissionDenied\n\n device = get_object_or_404(Laptop, pk=pk)\n context['h2'] = \"Applications on {}\".format(device.name)\n context['header_1'] = \"Developer\"\n context['header_2'] = \"Version\"\n context['device_view'] = True\n context['device_id'] = pk\n apps = MacOSApp.objects.filter(pending_install__in=[device])\n apps |= MacOSApp.objects.filter(installed__in=[device])\n for app in apps:\n status = 'Not assigned'\n for entry in app.installed.all():\n if entry == device:\n status = 'Installed'\n for entry in app.pending_install.all():\n if entry == device:\n status = 'Assigned'\n data = {'meta': app, 'status': status}\n context['items'].append(data)\n\n return render(request, 'mdm/resource_list.html', context)", "def ListApps(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def _discover_apps(api, limit=None):\n categories = api.categories()\n subcategories = []\n for category in categories:\n subcategories.extend(api.subcategories(category))\n app_lists = []\n app_count = 0\n LOGGER.info(f'Found {len(subcategories)} subcategories for {len(categories)} categories')\n for subcategory in subcategories:\n app_list = api.discover_apps(subcategory)\n if not app_list:\n continue\n while ALL:\n if limit:\n if len(app_list) >= limit:\n app_list = app_list.limit(app_list[:limit])\n LOGGER.info(f'Subcategory \"{app_list.name()}\" reached the threshhold of {limit}, moving on.')\n break\n try:\n app_list.more()\n except Maximum:\n LOGGER.info(f'Subcategory \"{app_list.name()}\" yielded {len(app_list)} apps')\n break\n app_lists.append(app_list)\n app_count += len(app_list)\n app_set = set()\n for app_list in app_lists:\n for app in app_list:\n app_set.add(app.package_name())\n LOGGER.info(f'{\"#\" * 60}\\n'\n f'\\tFinished discovering Apps!\\n'\n f'\\tGot {app_count} apps in {len(app_lists)} subcategories of {len(categories)} categories\\n'\n f'\\tOut of those {app_count} apps, {len(app_set)} apps had a unique package name\\n'\n f'\\t{\"#\" * 60}')\n return app_lists", "def get_applications(status):\n return status['applications']", "def ListApps(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def applications():\n storeapps = APP.config[\"storage\"]\n base_url = request.host_url + \"application/\"\n\n response = {\"applications\": []}\n for application in nativeapps.io.ls(storeapps, r\".*\\.(apk|ipa)$\"):\n tokens = application.decode(\"utf-8\").split(os.path.sep)\n directory = tokens[-2]\n name, version = os.path.basename(directory).split(\"-\", 1)\n meta_path = os.path.join(os.path.dirname(application), \"metadata.json\")\n\n link = base_url + \"/\".join(tokens[-3:])\n if application.endswith(\".ipa\"):\n link = \"itms-services://?action=download-manifest&url=\" + \\\n base_url + \"/\".join(tokens[-3:-1]) + \"/\" + \"manifest.plist\"\n\n response[\"applications\"].append({\n \"url\": base_url + \"/\".join(tokens[-3:]),\n \"name\": name,\n \"version\": version,\n \"metadata\": nativeapps.io.readfile(meta_path),\n \"link\": link,\n \"type\": application.split(\".\")[-1],\n })\n return flask.jsonify(response)", "def dock_app_list(data):\n apps = []\n count = data['extra_dock'] + 1\n for i in range(count):\n name = data['app_name_%s' % str(i)]\n path = data['app_path_%s' % str(i)]\n if name not in [None, '']:\n apps.append({'name': name, 'path': path})\n return apps", "def listapps(parser):\n\n print('Function List')\n subparsers_actions = [\n # pylint: disable=protected-access\n action for action in parser._actions\n # pylint: disable=W0212\n if isinstance(action, argparse._SubParsersAction)]\n # there will probably only be one subparser_action,\n # but better safe than sorry\n for subparsers_action in subparsers_actions:\n # get all subparsers and print help\n for choice, subparser in subparsers_action.choices.items():\n print(\"Function: '{}'\".format(choice))\n print(subparser.format_help())\n # print(parser.format_help())", "def app_list():\n user = users.get_current_user()\n if user:\n if users.is_current_user_admin() or is_local_admin():\n appls_query = Applic.query(ancestor = base_key).order(-Applic.date)\n appls = appls_query.fetch()\n output = template('applist', appls=appls, name=g_name, log_in_out = users.create_logout_url('/'), opt = 'Выход', user = user.nickname())\n return output\n else:\n userid = user.user_id()\n #return userid\n appls_query = Applic.query(Applic.user==userid).order(-Applic.date)\n appls = appls_query.fetch()\n output = template('applist', appls=appls, name=g_name, log_in_out = users.create_logout_url('/'), opt = 'Выход', user = user.nickname())\n return output\n else:\n redirect('/')", "def retr_auth_apps() :\n\n\t\t\t_logger.info( '...retr_auth_apps...' )\n\t\t\toutput = []\n\t\t\tdb = mongo.db.auth_apps\n\n\t\t\tcur = db.find()\n\t\t\tif cur.count() == 0 :\n\t\t\t\traise mongo_no_resource_exception( 'no authorized apps found' )\n\t\t\tfor app in db.find() :\n\t\t\t\toutput.append( { 'moniker' : app['moniker'] ,\n\t\t\t\t\t\t\t 'description' : app['description'] ,\n\t\t\t\t\t\t\t\t 'url' : app['url'] } )\n\n\t\t\treturn jsonify( {'result' : output} )", "def get_all_applications():\n cursor.execute(\n f'SELECT * FROM public.applications where status = %s', (\"pending\",))\n rows = cursor.fetchall()\n application_dicts = []\n\n for item in rows:\n application = Application(id=item[0], party_name=item[1], office_name=item[2], user_id=item[3],\n date_created=item[4],status=item[5])\n application = application.json_dumps()\n application_dicts.append(application)\n return application_dicts", "def program_list():\n items = []\n\n soup = abcradionational.get_soup(URL + \"/podcasts/program\")\n \n program_heading = abcradionational.get_podcast_heading(soup)\n\n for program in program_heading:\n items.append({\n 'label': program['title'],\n 'path': plugin.url_for('program_item', url=program['url']),\n })\n\n return items", "def fw_app_list(data):\n apps = []\n count = data['extra_firewall']\n for i in range(count):\n bundle = data['id_%s' % str(i + 1)]\n allowed = data['permit_%s' % str(i + 1)]\n if bundle not in [None, '']:\n apps.append({'bundle_id': bundle, 'allowed': allowed})\n return apps", "def get_apps(self, request, app_ids):\n sq = WebappIndexer.search()\n if request.query_params.get('filtering', '1') == '1':\n # With filtering (default).\n for backend in self.filter_backends:\n sq = backend().filter_queryset(request, sq, self)\n sq = WebappIndexer.filter_by_apps(app_ids, sq)\n\n # Store the apps to attach to feed elements later.\n with statsd.timer('mkt.feed.views.apps_query'):\n apps = sq.execute().hits\n return dict((app.id, app) for app in apps)", "def list_freelancer_applications(self, status=None):\n data = {}\n\n if status:\n data['status'] = status\n\n url = 'contractors/applications'\n return self.get(url, data)", "def get(self):\n return read_heroku_apps(request.args)", "def application_list(p_engine, p_username, format, appname):\n\n ret = 0\n\n enginelist = get_list_of_engines(p_engine, p_username)\n\n if enginelist is None:\n return 1\n\n data = DataFormatter()\n data_header = [\n (\"Engine name\", 30),\n (\"Application name\", 30),\n ]\n data.create_header(data_header)\n data.format_type = format\n for engine_tuple in enginelist:\n engine_obj = DxMaskingEngine(engine_tuple)\n if engine_obj.get_session():\n continue\n applist = DxApplicationList()\n # load all objects\n applist.LoadApplications()\n\n if appname is None:\n applications = applist.get_allref()\n else:\n applications = applist.get_applicationId_by_name(appname)\n if len(applications) == 0:\n ret = ret + 1\n\n for appref in applications:\n appobj = applist.get_by_ref(appref)\n data.data_insert(\n engine_tuple[0],\n appobj.application_name\n )\n\n print(\"\")\n print (data.data_output(False))\n print(\"\")\n \n \n return ret", "def describe_apps(StackId=None, AppIds=None):\n pass", "def get_search_results(config, client, page):\n resp = client.get_published_apps(config.username, page)\n resp_json = resp.json()\n search_results = resp_json[\"results\"]\n if search_results is None or len(search_results) == 0:\n logger.info(\n click.style(\"You haven't published any apps to the marketplace yet. Use \", fg=\"blue\") +\n click.style(\"21 publish submit {PATH_TO_MANIFEST_FILE}\", bold=True, fg=\"blue\") +\n click.style(\" to publish your apps to the marketplace.\", fg=\"blue\"), fg=\"blue\")\n return 0\n\n total_pages = resp_json[\"total_pages\"]\n logger.info(\"\\nPage {}/{}\".format(page + 1, total_pages), fg=\"green\")\n headers = [\"id\", \"Title\", \"Url\", \"Rating\", \"Is up\", \"Is healthy\", \"Average Uptime\",\n \"Last Update\"]\n rows = []\n for r in search_results:\n rating = \"Not yet Rated\"\n if r[\"rating_count\"] > 0:\n rating = \"{:.1f} ({} rating\".format(r[\"average_rating\"],\n int(r[\"rating_count\"]))\n if r[\"rating_count\"] > 1:\n rating += \"s\"\n rating += \")\"\n rows.append([r[\"id\"],\n r[\"title\"],\n r[\"app_url\"],\n rating,\n str(r[\"is_up\"]),\n str(r[\"is_healthy\"]),\n \"{:.2f}%\".format(r[\"average_uptime\"] * 100),\n util.format_date(r[\"last_update\"])])\n\n logger.info(tabulate(rows, headers, tablefmt=\"simple\"))\n\n return total_pages", "def RApps(self):\n\t\treturn self.acad.ActiveDocument.RegisteredApplications", "def app_list(request):\n return render(request, 'mdm/app_list.html', {})", "def get_applications(rest, sessionsArg, option):\n applications = []\n if option == 'heartbeat':\n appsString = rest.get_environment_applications(sessionsArg).strip();\n else:\n appsString = rest.get_all_applications().strip();\n rawList = appsString.split('\\n<\\n')\n for raw in rawList:\n if printtrace: print '_' * 20\n if applicationdataok(raw):\n attributes = [a.split(': ')[1] for a in raw.split('\\n')]\n if printtrace: print attributes\n\n a = Application()\n a.sessionId = attributes[0]\n a.nameInEnvironmentView = attributes[1]\n a.fileName = attributes[2]\n a.processString = attributes[3]\n a.discoveryChecks = attributes[4:]\n a.isgeneric = a.nameInEnvironmentView == 'generic application' or a.fileName.find('generic-application') > 0\n if not a.isgeneric:\n applications.append(a)\n return applications", "def get_applications(self):\n status_code_dict = {\n codes.ok: ApplicationListResponse,\n codes.bad_request: ErrorResponse,\n }\n return self.get_request(APPLICATION_URL,\n status_code_response_class_dict=status_code_dict,\n )", "def apps(self):\n return list(self.ctx.keys())", "def _get_all_app_ids(config, client):\n rv = set()\n total_pages = client.get_published_apps(config.username, 0).json()[\"total_pages\"]\n for current_page in range(total_pages):\n current_page_results = client.get_published_apps(config.username, current_page).json()['results']\n for result in current_page_results:\n rv.add(result['id'])\n return rv", "def get_app_ids(self, feed_element):\n if hasattr(feed_element, 'app'):\n return [feed_element.app]\n return feed_element.apps", "def __get_data_from_store(term):\n url_search = PLAY_STORE_URL + \"/search\"\n response = requests.get(url_search, {'c': 'apps', 'q': term})\n soup = BeautifulSoup(response.content, \"html.parser\")\n apps = soup.find_all(\"div\", {\"class\": \"card no-rationale square-cover apps small\"})\n\n result = []\n print(result)\n for i, app in enumerate(apps):\n app_details_basic = app.find(\"div\", {\"class\": \"details\"})\n app_id = app['data-docid']\n app_data = {\n 'uid': app_id,\n 'name': app_details_basic.find(\"a\", {\"class\": \"title\"})['title'].strip().encode('utf-8'),\n 'dev_name': app_details_basic.find(\"a\", {\"class\": \"subtitle\"})['title'].strip(),\n 'icon_url': \"http://\" + app.find(\n \"div\", {\"class\": \"cover-inner-align\"}).img['data-cover-large'].strip(\"//\")\n }\n\n url_app_detail = PLAY_STORE_URL + \"/apps/details\"\n response = requests.get(url_app_detail, {'id': app_id})\n soup = BeautifulSoup(response.content, \"html.parser\")\n\n app_data.update({\n 'category': soup.find(\"a\", {\"itemprop\": \"genre\"}).text,\n 'description': soup.find(\"div\", {\"itemprop\": \"description\"}).text.strip().encode('utf-8'),\n \n })\n\n \n dev_links = soup.find_all(\"a\", {\"class\": \"dev-link\", \"rel\": \"nofollow\"})\n if dev_links:\n for dev_link in dev_links:\n if \"mailto\" in dev_link['href']:\n app_data['dev_email'] = dev_link['href'].replace(\"mailto:\", \"\")\n break\n\n result.append(app_data)\n\n if i + 1 == SEARCH_RESULT_COUNT:\n break\n print(result)\n return result", "def apps():\r\n section = document.add_section()\r\n new_width, new_height = section.page_height, section.page_width\r\n section.orientation = WD_ORIENT.LANDSCAPE\r\n section.page_width = 10058400\r\n section.page_height = 7772400\r\n document.add_heading('Applications', level=1)\r\n apps = get_qlik_sense.get_apps()\r\n num_of_apps = len(apps)\r\n table = document.add_table(rows=num_of_apps+1, cols=7)\r\n table.style = 'Grid Table 1 Light Accent 1'\r\n row = table.rows[0]\r\n row.cells[0].text = 'App name'\r\n row.cells[1].text = 'App description'\r\n row.cells[2].text = 'Publish time'\r\n row.cells[3].text = 'Stream'\r\n row.cells[4].text = 'File size'\r\n row.cells[5].text = 'Owner userId'\r\n row.cells[6].text = 'Owner userName'\r\n for app in range(num_of_apps):\r\n row = table.rows[app+1]\r\n row.cells[0].text = str(apps[app][0])\r\n row.cells[1].text = str(apps[app][1])\r\n row.cells[2].text = str(apps[app][2])\r\n row.cells[3].text = str(apps[app][3])\r\n row.cells[4].text = str(apps[app][4])\r\n row.cells[5].text = str(apps[app][5])\r\n row.cells[6].text = str(apps[app][6])\r\n document.add_page_break()", "def get_app_list(self, request):\n app_dict = self._build_app_dict(request)\n\n # Sort the apps alphabetically.\n app_list = sorted(app_dict.values(), key=lambda x: x['order'])\n\n # Sort the models alphabetically within each app.\n for app in app_list:\n app['models'].sort(key=lambda x: x['order'])\n\n return app_list", "def list_package(all: bool = False) -> List[List[str]]:\n if not all:\n pkgs_info = read_installation_records()\n else:\n pkgs_info = []\n for pkg in pkg_resources.working_set:\n pkgs_info.append([pkg.project_name, pkg.version])\n\n return pkgs_info", "def get_app_ids(self):\n return self.apps", "def getAppInfo(self):\n data = self._client.Application.find(self.app_id)\n return data", "def all_registered_appnames():\n yield from sorted(Registry.monomers.keys())", "def get_app_list(self, request):\n ordering = {\n \"Sujets\":1,\n \"Secteurs\":2,\n \"Pages\":3,\n \"Liens\":4,\n \"Illustrations\":5,\n \"Pictures\":6,\n \"Picture dims\":7,\n \"Icons\":8,\n }\n app_dict = self._build_app_dict(request)\n # a.sort(key=lambda x: b.index(x[0]))\n # Sort the apps alphabetically.\n app_list = sorted(app_dict.values(), key=lambda x: x['name'].lower())\n\n # Sort the models alphabetically within each app.\n for app in app_list:\n app['models'].sort(key=lambda x: ordering[x['name']])\n\n return app_list", "def spark_list(provider):\n api.available(provider)", "def add_app(self, app_name):\n self.add_list_setting('applications', 'installed_apps', app_name)", "def get_apps(exclude=(), append=(), current={'apps': INSTALLED_APPS}):\n\n current['apps'] = tuple(\n [a for a in current['apps'] if a not in exclude]\n ) + tuple(append)\n return current['apps']", "def applications(self) -> List[ApplicationRequestResponse]:\n return self._applications", "def get_my_app_list(app_list):\n all_excluded_models = getattr(settings, 'EXCLUDE_ADMIN_APPS_MODELS', {})\n\n for app in app_list:\n models = app['models']\n match_app_models = [app_model_name.split('.')[1] for app_model_name in all_excluded_models if\n app_model_name.split('.')[0] == str(app['name'])]\n filter_models = [model for model in models if model['object_name'] not in match_app_models]\n app['models'] = filter_models\n\n return app_list", "def get_local_app_list():\n\t\tapp_list = [\n\t\t\t{\n\t\t\t\t'name': app,\n\t\t\t\t'dir': os.path.dirname(os.path.abspath(import_module(app).__file__)),\n\t\t\t}\n\t\t\tfor app in settings.INSTALLED_APPS\n\t\t]\n\t\treturn [app for app in app_list if settings.BASE_DIR in app['dir']]", "def extract_programs():\n if settings.XPRO_CATALOG_API_URL:\n return requests.get(settings.XPRO_CATALOG_API_URL, timeout=20).json()\n return []", "def get_applications(site) -> list:\n collection = site.Collection\n result = []\n for i in range(collection.Count):\n prop = collection[i].Properties\n result.append(SiteApplication(\n prop[\"path\"].Value,\n prop[\"applicationPool\"].Value\n ))\n\n return result", "def _load_installed_applications(self):\n for application in self.settings.get('apps', None) or []:\n path = None\n if isinstance(application, six.string_types):\n application_name = application\n if application.startswith('gordon.contrib.'):\n app_parts = application.split('.')\n path = os.path.join(self.root, 'contrib', app_parts[-1])\n application_name = '_'.join(app_parts[1:])\n settings = {}\n elif isinstance(application, dict):\n application_name = application.keys()[0]\n settings = application.values()[0]\n else:\n raise exceptions.InvalidAppFormatError(application)\n\n with indent(2):\n self.puts(colored.cyan(\"{}:\".format(application_name)))\n\n self.add_application(\n App(\n name=application_name,\n settings=settings,\n project=self,\n path=path\n )\n )", "def applications(self):\r\n return applications.Applications(self)", "def get_installations():\n github_app = get_default_app()\n pprint(github_app.get_installations())", "def run(self):\n logging.debug('List Installed Programs')\n if self.short:\n print(' '.join([ent for ent in pakit.conf.IDB]))\n return\n\n nchars = 12\n fmt = str(nchars).join(['{prog:', '} {repo:',\n '} {hash:', '} {date}'])\n installed = ['Program Repo Hash Date']\n for prog in pakit.conf.IDB:\n entry = pakit.conf.IDB[prog]\n installed.append(fmt.format(prog=prog[0:nchars],\n repo=entry['repo'][0:nchars],\n date=entry['date'],\n hash=entry['hash'][0:nchars]))\n\n msg = 'Installed Programs:'\n msg += PREFIX + PREFIX.join(installed)\n print(msg)\n return msg", "def get(self):\n apps = Application.objects()\n\n # TODO return more information\n apps_clean = []\n for app in apps:\n # don't include invalid apps\n if app[\"validated\"] is True:\n apps_clean.append(\n {\"name\": app[\"name\"]}\n )\n\n return apps_clean, 200", "def list_app_devices(request, pk):\n context = {}\n app = get_object_or_404(MacOSApp, pk=pk)\n pending = Laptop.objects.filter(apps_pending__in=[app])\n installed = InstallationRecord.objects.filter(app=app, device__apps_installed__in=[app], active=True)\n context['resource'] = app\n context['resource_type'] = 'App'\n context['pending'] = pending\n context['installed'] = installed\n return render(request, 'mdm/device_list.html', context)", "def get_publishers(self):", "def test_get_hyperflex_app_catalog_list(self):\n pass", "def discover_glitter_apps(self):\n for app_name in settings.INSTALLED_APPS:\n module_name = '{app_name}.glitter_apps'.format(app_name=app_name)\n try:\n glitter_apps_module = import_module(module_name)\n if hasattr(glitter_apps_module, 'apps'):\n self.glitter_apps.update(glitter_apps_module.apps)\n except ImportError:\n pass\n\n self.discovered = True", "def applications(name):\r\n user = User.query.filter_by(name=name).first()\r\n if not user:\r\n return abort(404)\r\n if current_user.name != name:\r\n return abort(403)\r\n\r\n user = db.session.query(model.user.User).get(current_user.id)\r\n apps_published, apps_draft = _get_user_apps(user.id)\r\n\r\n return render_template('account/applications.html',\r\n title=gettext(\"Applications\"),\r\n apps_published=apps_published,\r\n apps_draft=apps_draft)", "def _list_all(root_pkg, prog):\n res = \"\\n\".join(\n sorted(\n pkinspect.package_module_names(_import(root_pkg)),\n key=str.lower,\n ),\n )\n sys.stderr.write(f\"usage: {prog} module command [args...]\\nModules:\\n{res}\\n\")\n return 1", "def apps_information(self):\n with open(self.app_data_path, 'r') as app_csv_file:\n csv_reader = csv.reader(app_csv_file)\n apps = [self.AppInformation(app[0], app[1], app[2], app[3], app[4], app[5]) for app in csv_reader]\n return apps", "def get_owned_apps(self):\n user = users.get_current_user()\n if not user:\n return []\n email = user.email()\n try:\n user_info = self.get_by_id(UserInfo, email)\n if user_info:\n return user_info.owned_apps\n else:\n return []\n except Exception as err:\n logging.exception(err)\n return []", "def export_applications(self):\n print('\\n=== Exporting all application data...')\n\n for application in self.client.applications:\n print('- Exporting application:', application.name)\n\n json = {\n 'id': self.get_id(application),\n 'href': application.href,\n 'name': application.name,\n 'description': application.description,\n 'status': application.status,\n 'createdAt': application.created_at.isoformat(),\n 'modifiedAt': application.modified_at.isoformat(),\n 'customData': self.get_custom_data(application),\n 'default_account_store_mapping': None,\n 'default_group_store_mapping': None,\n 'account_store_mappings': [],\n #'verificationEmails': [],\n }\n\n default_account_store_mapping = application.default_account_store_mapping\n default_group_store_mapping = application.default_group_store_mapping\n\n if default_account_store_mapping:\n json['default_account_store_mapping'] = {\n 'id': application.default_account_store_mapping.href.split('/')[-1],\n 'href': application.default_account_store_mapping.href,\n 'type': application.default_account_store_mapping.account_store.__class__.__name__,\n 'name': application.default_account_store_mapping.account_store.name,\n 'list_index': application.default_account_store_mapping.list_index,\n }\n\n if default_group_store_mapping:\n json['default_group_store_mapping'] = {\n 'id': application.default_group_store_mapping.href.split('/')[-1],\n 'href': application.default_group_store_mapping.href,\n 'type': application.default_group_store_mapping.account_store.__class__.__name__,\n 'name': application.default_group_store_mapping.account_store.name,\n 'list_index': application.default_group_store_mapping.list_index,\n }\n\n for account_store_mapping in application.account_store_mappings:\n json['account_store_mappings'].append({\n 'id': self.get_id(account_store_mapping),\n 'href': account_store_mapping.href,\n 'account_store': {\n 'type': account_store_mapping.account_store.__class__.__name__,\n 'id': self.get_id(account_store_mapping.account_store),\n 'href': account_store_mapping.account_store.href,\n 'name': account_store_mapping.account_store.name,\n 'description': account_store_mapping.account_store.description,\n 'status': account_store_mapping.account_store.status,\n },\n 'list_index': account_store_mapping.list_index,\n 'is_default_account_store': account_store_mapping.is_default_account_store,\n 'is_default_group_store': account_store_mapping.is_default_group_store,\n })\n\n tenant = self.get_id(application.tenant)\n self.write('%s/%s/applications/%s' % (self.location, tenant, json['id']), json)\n\n print('=== Done!\\n')", "def AppGetApp(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def getValidGamesList(self):\n\n data = requests.get('http://api.steampowered.com/ISteamApps/GetAppList/v2/').json()\n df = json_normalize(data['applist'], 'apps')\n df.to_csv('Resources/allgames.csv.gz', compression='gzip', index=False)", "def get_applications(self):\n applications = []\n\n # Isolate all of the bnodes referring to target applications\n for target_app in self.get_objects(None,\n self.uri('targetApplication')):\n applications.append({\n 'guid': self.get_object(target_app, self.uri('id')),\n 'min_version': self.get_object(target_app,\n self.uri('minVersion')),\n 'max_version': self.get_object(target_app,\n self.uri('maxVersion'))})\n return applications", "def update_all(app, env):\n if (\n app.config.html_theme_options.get(\"theme_dev_mode\", False)\n and env.book_theme_resources_changed\n ):\n return list(env.all_docs.keys())", "def app_names(self):\n return self.get_app_names()", "def show(ctx, appeui):\n if '.' in appeui:\n appeui = str(hexStringInt(str(appeui)))\n \n # Form the url and payload\n server = ctx.obj['server']\n payload = {'token': ctx.obj['token']}\n url = 'http://{}/api/v{}'.format(server, str(version))\n url += '/apps' if appeui == 'all' else '/app/{}'.format(appeui)\n \n # Make the request\n data = restRequest(server, url, 'get', payload, 200)\n if data is None:\n return\n \n # Single application\n if appeui != 'all':\n a = data\n indent = ' ' * 10\n if a['appinterface_id'] == 0:\n a['appinterface_id'] = '-'\n if a['domain'] is None:\n a['domain'] = '-'\n click.echo('Application EUI: ' + euiString(a['appeui']))\n click.echo('{}name: {}'.format(indent, a['name']))\n click.echo('{}domain: {}'.format(indent, a['domain']))\n click.echo('{}fport: {}'.format(indent, a['fport']))\n click.echo('{}interface: {}'.format(indent, a['appinterface_id']))\n if a['appinterface_id'] != '-':\n click.echo('{}Properties:'.format(indent))\n properties = sorted(a['properties'].values(), key=lambda k: k['port'])\n for p in properties:\n click.echo('{} {} {}:{}'.format(indent, p['port'], p['name'], p['type']))\n return\n \n # All applications\n click.echo('{:14}'.format('Application') + \\\n '{:24}'.format('AppEUI') + \\\n '{:15}'.format('Domain') + \\\n '{:6}'.format('Fport') + \\\n '{:10}'.format('Interface'))\n for i,a in data.iteritems():\n if a['appinterface_id'] == 0:\n a['appinterface_id'] = '-'\n if a['domain'] is None:\n a['domain'] = '-'\n click.echo('{:13.13}'.format(a['name']) + ' ' + \\\n '{:23}'.format(euiString(a['appeui'])) + ' ' + \\\n '{:14.14}'.format(a['domain']) + ' ' + \\\n '{:5.5}'.format(str(a['fport'])) + ' ' + \\\n '{:10}'.format(str(a['appinterface_id'])))", "def find_gp_app_links(html):\n links = []\n for m in re.finditer('href=\"(/store/apps/details[^\"]+)\"', html):\n #print '%02d-%02d: %s' % (m.start(), m.end(), m.group(1))\n links.append(m.group(1))\n return links", "def sync_apps(self):\n pass", "def get_bungie_applications(self):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/App/FirstParty/\"))", "def test_get_top_doesnt_return_hidden_apps(self):\r\n\r\n ranked_3_app = self.create_app_with_contributors(8, 0, name='three')\r\n ranked_2_app = self.create_app_with_contributors(9, 0, name='two')\r\n ranked_1_app = self.create_app_with_contributors(10, 0, name='one')\r\n hidden_app = self.create_app_with_contributors(11, 0, name='hidden')\r\n hidden_app.hidden = 1\r\n db.session.add(hidden_app)\r\n db.session.commit()\r\n\r\n top_apps = cached_apps.get_top()\r\n\r\n assert len(top_apps) is 3, len(top_apps)\r\n for app in top_apps:\r\n assert app['name'] != 'hidden', app['name']", "def apps(self):\n db = self['__store'].db\n my_apps = {\n group_id\n for group_id, in db(\"\"\"\n select distinct\n group_id\n from subgroups, groups\n where\n groups.id = subgroups.group_id\n and subgroup_id = %s\n and groups.type = 'A'\n \"\"\",\n self._id)\n }\n return my_apps", "def show_applications_toc():\n if not cache.get(APPLICATIONS_TOC_CACHE_KEY):\n from django.utils.importlib import import_module\n from sveedocuments.models import Page\n \n apps_infos = []\n for appname, apptitle, appdesc, appkwargs in settings.PUBLISHED_APPS:\n title = apptitle or appname\n desc = appdesc\n doc_link = appkwargs.get('doc_link', None)\n demo_link = appkwargs.get('demo_link', None)\n download_link = appkwargs.get('download_link', None)\n github_link = None\n \n # Links can be tuple, that is assumed to be passed by a reverse url with first \n # element as url name and second argument as args list\n if doc_link and not isinstance(doc_link, basestring):\n doc_link = reverse(doc_link[0], args=doc_link[1])\n \n if demo_link and not isinstance(demo_link, basestring):\n demo_link = reverse(demo_link[0], args=demo_link[1])\n \n if download_link and not isinstance(download_link, basestring):\n download_link = reverse(download_link[0], args=download_link[1])\n \n # Determine some optionnals urls from a schema where we insert the appname\n if not download_link and appkwargs.get('pypi', False):\n download_link = \"http://pypi.python.org/pypi/{0}\".format(appname)\n \n if appkwargs.get('github', False):\n github_link = \"https://github.com/sveetch/{0}\".format(appname)\n if not download_link:\n download_link = \"{0}/tags\".format(github_link)\n \n # Try to get introduction from the module __doc__ attribute\n if not desc:\n try:\n mod = import_module(appname)\n except ImportError:\n pass\n else:\n if mod.__doc__.strip():\n desc = mod.__doc__.strip()\n \n # Try to get some informations from the document Page if it exists\n try:\n page_instance = Page.objects.get(slug=appname)\n except Page.DoesNotExist:\n pass\n else:\n title = page_instance.title\n doc_link = page_instance.get_absolute_url() or doc_link\n \n apps_infos.append({\n 'title': title,\n 'desc': desc,\n 'doc_link': doc_link,\n 'demo_link': demo_link,\n 'download_link': download_link,\n 'github_link': github_link,\n })\n \n cache.set(APPLICATIONS_TOC_CACHE_KEY, {'application_toc': tuple(apps_infos)})\n \n return cache.get(APPLICATIONS_TOC_CACHE_KEY)", "def listExclusiveItems(appStore):\n appStores = ('Xbox', 'Amazon', 'iOS', 'Google Play')\n if appStore not in appStores:\n Exception(f\"No valid app store was provided. Valid choices are {appStores}.\")\n else:\n url = f\"https://catalog.roblox.com/v1/exclusive-items/{appStore}/bundles\"\n r = requests.get(url)\n j = json.loads(r.text)\n return j['data']", "def get_app_icon_list(name, limit, offset, quiet, marketplace_use=False):\n\n client = get_api_client()\n params = {\"length\": limit, \"offset\": offset}\n if name:\n params[\"filter\"] = get_name_query([name])\n\n app_icon_name_uuid_map = client.app_icon.get_name_uuid_map(params)\n if quiet:\n for name in app_icon_name_uuid_map.keys():\n click.echo(highlight_text(name))\n return\n\n table = PrettyTable()\n field_names = [\"NAME\", \"UUID\"]\n if marketplace_use:\n field_names.append(\"IS_MARKETPLACE_ICON\")\n\n table.field_names = field_names\n for name, uuid in app_icon_name_uuid_map.items():\n data_row = [highlight_text(name), highlight_text(uuid)]\n if marketplace_use:\n res, err = client.app_icon.is_marketplace_icon(uuid)\n if err:\n LOG.error(\"[{}] - {}\".format(err[\"code\"], err[\"error\"]))\n sys.exit(-1)\n res = res.json()\n data_row.append(highlight_text(res[\"is_marketplaceicon\"]))\n\n table.add_row(data_row)\n\n click.echo(table)", "def installed_appnames():\n appnames = set()\n for finder in sys.meta_path:\n if hasattr(finder, 'appname'):\n appnames.add(finder.appname)\n return appnames", "def connect_apps(self):\r\n return applications.ConnectApps(self)", "def list_application_change(request):\r\n return render(request, \"tracking/listTrackingApplication.html\", {\r\n 'trackinglist': ApplicationTracking.objects.order_by('-Timestamp')\r\n })", "def GetAllowedAndroidApplications(args, messages):\n allowed_applications = []\n for application in getattr(args, 'allowed_application', []) or []:\n android_application = messages.V2AndroidApplication(\n sha1Fingerprint=application['sha1_fingerprint'],\n packageName=application['package_name'])\n allowed_applications.append(android_application)\n return allowed_applications", "def find_applications_for_candidate(email):\n\n applications = Application.objects.filter(\n authorized_email__iexact=email\n ).order_by('-create_dt')\n return applications", "def api_list(self):\n return self._get('apis')", "def get_matching_apps(\n service: str, instance: str, marathon_apps: Sequence[MarathonApp]\n) -> Sequence[MarathonApp]:\n return [\n app for app in marathon_apps if does_app_id_match(service, instance, app.id)\n ]", "def applications(self):\n return [self.app] + self.mounts.values()", "def list_programs(channel_name, uri):\r\n # Set plugin category. It is displayed in some skins as the name\r\n # of the current section.\r\n xbmcplugin.setPluginCategory(_handle, channel_name)\r\n # Get the list of videos in the category.\r\n result = _get_data(uri)\r\n # Iterate through videos.\r\n #logger.info(\"######: {}, log: {}########\".format('rk', result['items']))\r\n for program in result['items']:\r\n # {\r\n # \"title\": \"Raja Rani\",\r\n # \"categoryId\": 14064,\r\n # \"contentId\": 14230,\r\n # \"uri\": \"https://api.hotstar.com/o/v1/show/detail?id=\r\n # 1101&avsCategoryId=14064&contentId=14230&offset=0&size=20&tao=0&tas=5\",\r\n # \"description\": \"Due to certain circumstances, Karthik marries the maid of his family,\",\r\n # \"assetType\": \"SHOW\",\r\n # \"genre\": [\r\n # \"Family\"\r\n # ],\r\n # \"lang\": [\r\n # \"Tamil\"\r\n # ],\r\n # \"channelName\": \"Star Vijay\",\r\n # \"episodeCnt\": 407,\r\n # \"premium\": false\r\n # },\r\n _add_directory_item(\r\n parent_title=channel_name,\r\n title=program['title'],\r\n content_id=program['contentId'],\r\n genre=program.get('genre') or program['title'],\r\n description=program.get('description'),\r\n uri=program['uri'],\r\n action='programs' if program.get('assetType') == 'GENRE' else 'program_details',\r\n image=get_thumbnail_image(program)\r\n )\r\n\r\n _add_next_page_and_search_item(result['nextPage'], 'programs', channel_name)\r\n\r\n # Add a sort method for the virtual folder items (alphabetically, ignore articles)\r\n xbmcplugin.addSortMethod(_handle, xbmcplugin.SORT_METHOD_LABEL)\r\n\r\n # Finish creating a virtual folder.\r\n xbmcplugin.endOfDirectory(_handle)" ]
[ "0.70584273", "0.69872224", "0.69679195", "0.69105625", "0.66437083", "0.66229135", "0.657946", "0.64917386", "0.6440265", "0.63986486", "0.6345584", "0.63214445", "0.6319517", "0.63044786", "0.6269743", "0.6268732", "0.6233685", "0.61907285", "0.6159065", "0.6152657", "0.60849243", "0.60683453", "0.60596514", "0.604095", "0.6035981", "0.5982991", "0.5953988", "0.58085346", "0.58027077", "0.5802313", "0.57896316", "0.5769953", "0.5743291", "0.57230055", "0.5713464", "0.5692929", "0.5673559", "0.5669089", "0.56656677", "0.5646431", "0.5619692", "0.5615241", "0.5612501", "0.56107867", "0.5585722", "0.55360454", "0.55316144", "0.5504282", "0.5489184", "0.54851973", "0.54824436", "0.5482303", "0.5466328", "0.5465985", "0.54606855", "0.54450655", "0.5441723", "0.54354846", "0.5412416", "0.5408299", "0.53968775", "0.5393191", "0.539178", "0.53665584", "0.53440344", "0.5334556", "0.53229153", "0.53153735", "0.5312366", "0.531124", "0.5309209", "0.5308448", "0.529805", "0.5296721", "0.5290589", "0.528966", "0.5287616", "0.5272945", "0.52724326", "0.52634376", "0.52495813", "0.5247215", "0.5216479", "0.52059454", "0.5205433", "0.51974815", "0.5189029", "0.5184961", "0.5184196", "0.5182346", "0.51684517", "0.5154978", "0.514921", "0.5147728", "0.5145559", "0.51248336", "0.51242006", "0.5116296", "0.5102309", "0.5095395" ]
0.6918819
3
Gets all apps published by the current user to the 21 marketplace
def _get_all_app_ids(config, client): rv = set() total_pages = client.get_published_apps(config.username, 0).json()["total_pages"] for current_page in range(total_pages): current_page_results = client.get_published_apps(config.username, current_page).json()['results'] for result in current_page_results: rv.add(result['id']) return rv
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_apps(self):\n return list(self.apps.values())", "async def get_apps(self, params: Optional = None) -> dict:\r\n return await self.get_items(API_APPS, params=params)", "def get_app_list(self):\n return self.get_setting('applications', 'installed_apps')", "def get_apps(self):\n return self.apps", "def get_apps(provider, query):\n\n workdir = os.path.dirname(os.path.realpath(__file__))\n with open(os.path.join(workdir, '..', 'config.yml')) as f:\n config = yaml.load(f)\n ex = Explorer()\n logging.info('Read bucket: %s', config['SCOOP_BUCKET'])\n apps = ex.get_apps(os.path.expandvars(config['SCOOP_BUCKET']), query)\n logging.info(\"Apps count = %d\", len(apps))\n installed = provider.get_installed()\n\n # check if already installed\n for app in apps:\n app['installed'] = app['name'] in installed\n\n return apps", "def _list_apps(config, client):\n logger.info(\"Listing all the published apps by {}: \".format(config.username), fg=\"green\")\n current_page = 0\n total_pages = get_search_results(config, client, current_page)\n if total_pages < 1:\n return\n\n while 0 <= current_page < total_pages:\n try:\n prompt_resp = click.prompt(uxstring.UxString.pagination,\n type=str)\n\n next_page = get_next_page(prompt_resp, current_page)\n\n if next_page == -1:\n model_id = prompt_resp\n display_app_info(config, client, model_id)\n elif next_page >= total_pages or next_page < 0:\n continue\n else:\n get_search_results(config, client, next_page)\n current_page = next_page\n\n except click.exceptions.Abort:\n return", "async def get_installed_apps(self, params: Optional = None) -> dict:\r\n return await self.get_items(API_INSTALLEDAPPS, params=params)", "def apps(self):\n filters = {\n 'disabled_by_user': False,\n 'status': mkt.STATUS_PUBLIC\n }\n return self._apps.order_by(self.membership_relation).filter(**filters)", "def listapps(self):\n return jsoncall.do_call(\"listapps\", {'modelname':self.modelname,\\\n 'user':self.user,\\\n 'password':self.password},\n self.connection)", "def ListApps(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def get_owned_apps(self):\n user = users.get_current_user()\n if not user:\n return []\n email = user.email()\n try:\n user_info = self.get_by_id(UserInfo, email)\n if user_info:\n return user_info.owned_apps\n else:\n return []\n except Exception as err:\n logging.exception(err)\n return []", "def get_apps(self, limit, offset=None):\n params = {'v': WIT_API_VERSION}\n if limit:\n params['limit'] = limit\n if offset:\n params['offset'] = offset\n return req(self.logger, self.access_token, 'GET', '/apps', params)", "async def app_list(self) -> List[interface.App]:\n return await self.relay(\"app_list\")()", "def list_apps(self) -> list:\n apps = self.app.list_apps()\n app_list = [app[\"title\"] for app in apps]\n return app_list", "def retr_auth_apps() :\n\n\t\t\t_logger.info( '...retr_auth_apps...' )\n\t\t\toutput = []\n\t\t\tdb = mongo.db.auth_apps\n\n\t\t\tcur = db.find()\n\t\t\tif cur.count() == 0 :\n\t\t\t\traise mongo_no_resource_exception( 'no authorized apps found' )\n\t\t\tfor app in db.find() :\n\t\t\t\toutput.append( { 'moniker' : app['moniker'] ,\n\t\t\t\t\t\t\t 'description' : app['description'] ,\n\t\t\t\t\t\t\t\t 'url' : app['url'] } )\n\n\t\t\treturn jsonify( {'result' : output} )", "def get(self):\n return read_heroku_apps(request.args)", "def get_apps(self) -> List[str]:\n return list(self.config[\"apps\"].keys())", "def get_app_list(self):\n\n return self._get().keys()", "def get_apps(self, request, app_ids):\n sq = WebappIndexer.search()\n if request.query_params.get('filtering', '1') == '1':\n # With filtering (default).\n for backend in self.filter_backends:\n sq = backend().filter_queryset(request, sq, self)\n sq = WebappIndexer.filter_by_apps(app_ids, sq)\n\n # Store the apps to attach to feed elements later.\n with statsd.timer('mkt.feed.views.apps_query'):\n apps = sq.execute().hits\n return dict((app.id, app) for app in apps)", "def app_list(self, third_only=False):\n return self.adb.app_list(third_only)", "def get_app_manifests(self,sfilter = None):\n if sfilter:\n try:\n return filter(lambda app: app[\"developer\"] == sfilter[\"developer\"] and\n app[\"name\"] == sfilter[\"name\"] and\n app[\"version\"] == sfilter[\"version\"], self.app_manifests)\n except:\n return []\n else :\n return self.app_manifests", "def list_apps(self):\n with hide(\"output\", \"running\"):\n result = local((\"redis-cli -h {host} -p 6379 -n {db} keys \\\"*\\\"\"\n .format(host=self.host,\n db=REDIS_APPLICATION_DB_NUM)),\n capture=True)\n\n if len(result.stdout) > 0:\n return result.stdout\n else:\n print(\"Clipper has no applications registered\")\n return \"\"", "def ListApps(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def applications():\n storeapps = APP.config[\"storage\"]\n base_url = request.host_url + \"application/\"\n\n response = {\"applications\": []}\n for application in nativeapps.io.ls(storeapps, r\".*\\.(apk|ipa)$\"):\n tokens = application.decode(\"utf-8\").split(os.path.sep)\n directory = tokens[-2]\n name, version = os.path.basename(directory).split(\"-\", 1)\n meta_path = os.path.join(os.path.dirname(application), \"metadata.json\")\n\n link = base_url + \"/\".join(tokens[-3:])\n if application.endswith(\".ipa\"):\n link = \"itms-services://?action=download-manifest&url=\" + \\\n base_url + \"/\".join(tokens[-3:-1]) + \"/\" + \"manifest.plist\"\n\n response[\"applications\"].append({\n \"url\": base_url + \"/\".join(tokens[-3:]),\n \"name\": name,\n \"version\": version,\n \"metadata\": nativeapps.io.readfile(meta_path),\n \"link\": link,\n \"type\": application.split(\".\")[-1],\n })\n return flask.jsonify(response)", "def get_applications(self):\n status_code_dict = {\n codes.ok: ApplicationListResponse,\n codes.bad_request: ErrorResponse,\n }\n return self.get_request(APPLICATION_URL,\n status_code_response_class_dict=status_code_dict,\n )", "def _discover_apps(api, limit=None):\n categories = api.categories()\n subcategories = []\n for category in categories:\n subcategories.extend(api.subcategories(category))\n app_lists = []\n app_count = 0\n LOGGER.info(f'Found {len(subcategories)} subcategories for {len(categories)} categories')\n for subcategory in subcategories:\n app_list = api.discover_apps(subcategory)\n if not app_list:\n continue\n while ALL:\n if limit:\n if len(app_list) >= limit:\n app_list = app_list.limit(app_list[:limit])\n LOGGER.info(f'Subcategory \"{app_list.name()}\" reached the threshhold of {limit}, moving on.')\n break\n try:\n app_list.more()\n except Maximum:\n LOGGER.info(f'Subcategory \"{app_list.name()}\" yielded {len(app_list)} apps')\n break\n app_lists.append(app_list)\n app_count += len(app_list)\n app_set = set()\n for app_list in app_lists:\n for app in app_list:\n app_set.add(app.package_name())\n LOGGER.info(f'{\"#\" * 60}\\n'\n f'\\tFinished discovering Apps!\\n'\n f'\\tGot {app_count} apps in {len(app_lists)} subcategories of {len(categories)} categories\\n'\n f'\\tOut of those {app_count} apps, {len(app_set)} apps had a unique package name\\n'\n f'\\t{\"#\" * 60}')\n return app_lists", "def get_applications(status):\n return status['applications']", "def list_apps(self, ns_name):\n\n return self.helm_client.list(namespace=ns_name)", "def get_applications(site) -> list:\n collection = site.Collection\n result = []\n for i in range(collection.Count):\n prop = collection[i].Properties\n result.append(SiteApplication(\n prop[\"path\"].Value,\n prop[\"applicationPool\"].Value\n ))\n\n return result", "def list(ctx):\n # pylint: disable=redefined-builtin\n _list_apps(ctx.obj['config'], ctx.obj['client'])", "def app_list():\n user = users.get_current_user()\n if user:\n if users.is_current_user_admin() or is_local_admin():\n appls_query = Applic.query(ancestor = base_key).order(-Applic.date)\n appls = appls_query.fetch()\n output = template('applist', appls=appls, name=g_name, log_in_out = users.create_logout_url('/'), opt = 'Выход', user = user.nickname())\n return output\n else:\n userid = user.user_id()\n #return userid\n appls_query = Applic.query(Applic.user==userid).order(-Applic.date)\n appls = appls_query.fetch()\n output = template('applist', appls=appls, name=g_name, log_in_out = users.create_logout_url('/'), opt = 'Выход', user = user.nickname())\n return output\n else:\n redirect('/')", "def applications(name):\r\n user = User.query.filter_by(name=name).first()\r\n if not user:\r\n return abort(404)\r\n if current_user.name != name:\r\n return abort(403)\r\n\r\n user = db.session.query(model.user.User).get(current_user.id)\r\n apps_published, apps_draft = _get_user_apps(user.id)\r\n\r\n return render_template('account/applications.html',\r\n title=gettext(\"Applications\"),\r\n apps_published=apps_published,\r\n apps_draft=apps_draft)", "def getAppInfo(self):\n data = self._client.Application.find(self.app_id)\n return data", "def get_all_applications():\n cursor.execute(\n f'SELECT * FROM public.applications where status = %s', (\"pending\",))\n rows = cursor.fetchall()\n application_dicts = []\n\n for item in rows:\n application = Application(id=item[0], party_name=item[1], office_name=item[2], user_id=item[3],\n date_created=item[4],status=item[5])\n application = application.json_dumps()\n application_dicts.append(application)\n return application_dicts", "def ListApps(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def ListApps(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def apps(self):\n return list(self.ctx.keys())", "def RApps(self):\n\t\treturn self.acad.ActiveDocument.RegisteredApplications", "def get_bungie_applications(self):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/App/FirstParty/\"))", "def get_app_ids(self):\n return self.apps", "def get_integrations_userapps(self, **kwargs):\n\n all_params = ['page_size', 'page_number', 'sort_by', 'expand', 'next_page', 'previous_page', 'app_host']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_integrations_userapps\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n\n resource_path = '/api/v2/integrations/userapps'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'page_size' in params:\n query_params['pageSize'] = params['page_size']\n if 'page_number' in params:\n query_params['pageNumber'] = params['page_number']\n if 'sort_by' in params:\n query_params['sortBy'] = params['sort_by']\n if 'expand' in params:\n query_params['expand'] = params['expand']\n if 'next_page' in params:\n query_params['nextPage'] = params['next_page']\n if 'previous_page' in params:\n query_params['previousPage'] = params['previous_page']\n if 'app_host' in params:\n query_params['appHost'] = params['app_host']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['PureCloud OAuth']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='UserAppEntityListing',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def get_all_applicant(self) -> List[NoSQLUserApplication]:\n return self.user_application_manager.all()", "def get_search_results(config, client, page):\n resp = client.get_published_apps(config.username, page)\n resp_json = resp.json()\n search_results = resp_json[\"results\"]\n if search_results is None or len(search_results) == 0:\n logger.info(\n click.style(\"You haven't published any apps to the marketplace yet. Use \", fg=\"blue\") +\n click.style(\"21 publish submit {PATH_TO_MANIFEST_FILE}\", bold=True, fg=\"blue\") +\n click.style(\" to publish your apps to the marketplace.\", fg=\"blue\"), fg=\"blue\")\n return 0\n\n total_pages = resp_json[\"total_pages\"]\n logger.info(\"\\nPage {}/{}\".format(page + 1, total_pages), fg=\"green\")\n headers = [\"id\", \"Title\", \"Url\", \"Rating\", \"Is up\", \"Is healthy\", \"Average Uptime\",\n \"Last Update\"]\n rows = []\n for r in search_results:\n rating = \"Not yet Rated\"\n if r[\"rating_count\"] > 0:\n rating = \"{:.1f} ({} rating\".format(r[\"average_rating\"],\n int(r[\"rating_count\"]))\n if r[\"rating_count\"] > 1:\n rating += \"s\"\n rating += \")\"\n rows.append([r[\"id\"],\n r[\"title\"],\n r[\"app_url\"],\n rating,\n str(r[\"is_up\"]),\n str(r[\"is_healthy\"]),\n \"{:.2f}%\".format(r[\"average_uptime\"] * 100),\n util.format_date(r[\"last_update\"])])\n\n logger.info(tabulate(rows, headers, tablefmt=\"simple\"))\n\n return total_pages", "def applications(self) -> List[ApplicationRequestResponse]:\n return self._applications", "def apps(self):\n db = self['__store'].db\n my_apps = {\n group_id\n for group_id, in db(\"\"\"\n select distinct\n group_id\n from subgroups, groups\n where\n groups.id = subgroups.group_id\n and subgroup_id = %s\n and groups.type = 'A'\n \"\"\",\n self._id)\n }\n return my_apps", "def applications(self):\r\n return applications.Applications(self)", "def AppGetApp(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def get_applications(rest, sessionsArg, option):\n applications = []\n if option == 'heartbeat':\n appsString = rest.get_environment_applications(sessionsArg).strip();\n else:\n appsString = rest.get_all_applications().strip();\n rawList = appsString.split('\\n<\\n')\n for raw in rawList:\n if printtrace: print '_' * 20\n if applicationdataok(raw):\n attributes = [a.split(': ')[1] for a in raw.split('\\n')]\n if printtrace: print attributes\n\n a = Application()\n a.sessionId = attributes[0]\n a.nameInEnvironmentView = attributes[1]\n a.fileName = attributes[2]\n a.processString = attributes[3]\n a.discoveryChecks = attributes[4:]\n a.isgeneric = a.nameInEnvironmentView == 'generic application' or a.fileName.find('generic-application') > 0\n if not a.isgeneric:\n applications.append(a)\n return applications", "def get_developer_apps_by_user(user_id: int) -> List[Dict]:\n db = db_session.get_db_read_replica()\n with db.scoped_session() as session:\n developer_apps = (\n session.query(DeveloperApp)\n .filter(\n DeveloperApp.user_id == user_id,\n DeveloperApp.is_current == True,\n DeveloperApp.is_delete == False,\n )\n .all()\n )\n return query_result_to_list(developer_apps)", "def get_apps(exclude=(), append=(), current={'apps': INSTALLED_APPS}):\n\n current['apps'] = tuple(\n [a for a in current['apps'] if a not in exclude]\n ) + tuple(append)\n return current['apps']", "def get_app_ids(self, feed_element):\n if hasattr(feed_element, 'app'):\n return [feed_element.app]\n return feed_element.apps", "def get_applications(self):\n applications = []\n\n # Isolate all of the bnodes referring to target applications\n for target_app in self.get_objects(None,\n self.uri('targetApplication')):\n applications.append({\n 'guid': self.get_object(target_app, self.uri('id')),\n 'min_version': self.get_object(target_app,\n self.uri('minVersion')),\n 'max_version': self.get_object(target_app,\n self.uri('maxVersion'))})\n return applications", "def get_apps(self):\n try:\n result = self._session.query(AppEntity).all()\n result_dict = self.result_dict(result)\n except SQLAlchemyError as err:\n Log.an().error('sql exception [%s]', str(err))\n return False\n\n return result_dict", "def apps(self):\n if \"apps\" in self._prop_dict:\n return AppsCollectionPage(self._prop_dict[\"apps\"])\n else:\n return None", "def get_app_list(self, request):\n app_dict = self._build_app_dict(request)\n\n # Sort the apps alphabetically.\n app_list = sorted(app_dict.values(), key=lambda x: x['order'])\n\n # Sort the models alphabetically within each app.\n for app in app_list:\n app['models'].sort(key=lambda x: x['order'])\n\n return app_list", "def __get_data_from_store(term):\n url_search = PLAY_STORE_URL + \"/search\"\n response = requests.get(url_search, {'c': 'apps', 'q': term})\n soup = BeautifulSoup(response.content, \"html.parser\")\n apps = soup.find_all(\"div\", {\"class\": \"card no-rationale square-cover apps small\"})\n\n result = []\n print(result)\n for i, app in enumerate(apps):\n app_details_basic = app.find(\"div\", {\"class\": \"details\"})\n app_id = app['data-docid']\n app_data = {\n 'uid': app_id,\n 'name': app_details_basic.find(\"a\", {\"class\": \"title\"})['title'].strip().encode('utf-8'),\n 'dev_name': app_details_basic.find(\"a\", {\"class\": \"subtitle\"})['title'].strip(),\n 'icon_url': \"http://\" + app.find(\n \"div\", {\"class\": \"cover-inner-align\"}).img['data-cover-large'].strip(\"//\")\n }\n\n url_app_detail = PLAY_STORE_URL + \"/apps/details\"\n response = requests.get(url_app_detail, {'id': app_id})\n soup = BeautifulSoup(response.content, \"html.parser\")\n\n app_data.update({\n 'category': soup.find(\"a\", {\"itemprop\": \"genre\"}).text,\n 'description': soup.find(\"div\", {\"itemprop\": \"description\"}).text.strip().encode('utf-8'),\n \n })\n\n \n dev_links = soup.find_all(\"a\", {\"class\": \"dev-link\", \"rel\": \"nofollow\"})\n if dev_links:\n for dev_link in dev_links:\n if \"mailto\" in dev_link['href']:\n app_data['dev_email'] = dev_link['href'].replace(\"mailto:\", \"\")\n break\n\n result.append(app_data)\n\n if i + 1 == SEARCH_RESULT_COUNT:\n break\n print(result)\n return result", "def find_applications_for_candidate(email):\n\n applications = Application.objects.filter(\n authorized_email__iexact=email\n ).order_by('-create_dt')\n return applications", "def user_credential_applications(request):\n applications = CredentialApplication.objects.filter(\n user=request.user).order_by('-application_datetime')\n\n return render(request, 'user/user_credential_applications.html',\n {'applications':applications})", "def list_apps(request, pk=0):\n context = {'items': [], 'resource_type': 'App'}\n\n if pk == 0:\n context['h2'] = \"Managed Applications\"\n context['header_1'] = \"Developer\"\n context['header_2'] = \"Version\"\n refresh_managed_software_status()\n apps = MacOSApp.objects.filter(merged_into__isnull=True).reverse()\n if not request.user.has_perm('devices.manage_apps'):\n apps = apps.filter(managed=True).exclude(installed__isnull=True, pending_install__isnull=True)\n for app in apps:\n assignment_count = app.pending_install.count()\n installed_on = app.installed.all()\n data = {'meta': app, 'assignment_count': assignment_count, 'installed': installed_on}\n context['items'].append(data)\n else:\n if not request.user.has_perm('devices.manage_apps'):\n raise PermissionDenied\n\n device = get_object_or_404(Laptop, pk=pk)\n context['h2'] = \"Applications on {}\".format(device.name)\n context['header_1'] = \"Developer\"\n context['header_2'] = \"Version\"\n context['device_view'] = True\n context['device_id'] = pk\n apps = MacOSApp.objects.filter(pending_install__in=[device])\n apps |= MacOSApp.objects.filter(installed__in=[device])\n for app in apps:\n status = 'Not assigned'\n for entry in app.installed.all():\n if entry == device:\n status = 'Installed'\n for entry in app.pending_install.all():\n if entry == device:\n status = 'Assigned'\n data = {'meta': app, 'status': status}\n context['items'].append(data)\n\n return render(request, 'mdm/resource_list.html', context)", "def get(self):\n apps = Application.objects()\n\n # TODO return more information\n apps_clean = []\n for app in apps:\n # don't include invalid apps\n if app[\"validated\"] is True:\n apps_clean.append(\n {\"name\": app[\"name\"]}\n )\n\n return apps_clean, 200", "def get_publishers(self):", "def get_apps(enable_details: Optional[bool] = None,\n ids: Optional[Sequence[str]] = None,\n name_regex: Optional[str] = None,\n os_type: Optional[str] = None,\n output_file: Optional[str] = None,\n product_id: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAppsResult:\n __args__ = dict()\n __args__['enableDetails'] = enable_details\n __args__['ids'] = ids\n __args__['nameRegex'] = name_regex\n __args__['osType'] = os_type\n __args__['outputFile'] = output_file\n __args__['productId'] = product_id\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('alicloud:mhub/getApps:getApps', __args__, opts=opts, typ=GetAppsResult).value\n\n return AwaitableGetAppsResult(\n apps=pulumi.get(__ret__, 'apps'),\n enable_details=pulumi.get(__ret__, 'enable_details'),\n id=pulumi.get(__ret__, 'id'),\n ids=pulumi.get(__ret__, 'ids'),\n name_regex=pulumi.get(__ret__, 'name_regex'),\n names=pulumi.get(__ret__, 'names'),\n os_type=pulumi.get(__ret__, 'os_type'),\n output_file=pulumi.get(__ret__, 'output_file'),\n product_id=pulumi.get(__ret__, 'product_id'))", "def get_local_app_list():\n\t\tapp_list = [\n\t\t\t{\n\t\t\t\t'name': app,\n\t\t\t\t'dir': os.path.dirname(os.path.abspath(import_module(app).__file__)),\n\t\t\t}\n\t\t\tfor app in settings.INSTALLED_APPS\n\t\t]\n\t\treturn [app for app in app_list if settings.BASE_DIR in app['dir']]", "def application_list(p_engine, p_username, format, appname):\n\n ret = 0\n\n enginelist = get_list_of_engines(p_engine, p_username)\n\n if enginelist is None:\n return 1\n\n data = DataFormatter()\n data_header = [\n (\"Engine name\", 30),\n (\"Application name\", 30),\n ]\n data.create_header(data_header)\n data.format_type = format\n for engine_tuple in enginelist:\n engine_obj = DxMaskingEngine(engine_tuple)\n if engine_obj.get_session():\n continue\n applist = DxApplicationList()\n # load all objects\n applist.LoadApplications()\n\n if appname is None:\n applications = applist.get_allref()\n else:\n applications = applist.get_applicationId_by_name(appname)\n if len(applications) == 0:\n ret = ret + 1\n\n for appref in applications:\n appobj = applist.get_by_ref(appref)\n data.data_insert(\n engine_tuple[0],\n appobj.application_name\n )\n\n print(\"\")\n print (data.data_output(False))\n print(\"\")\n \n \n return ret", "def list_freelancer_applications(self, status=None):\n data = {}\n\n if status:\n data['status'] = status\n\n url = 'contractors/applications'\n return self.get(url, data)", "def _load_installed_applications(self):\n for application in self.settings.get('apps', None) or []:\n path = None\n if isinstance(application, six.string_types):\n application_name = application\n if application.startswith('gordon.contrib.'):\n app_parts = application.split('.')\n path = os.path.join(self.root, 'contrib', app_parts[-1])\n application_name = '_'.join(app_parts[1:])\n settings = {}\n elif isinstance(application, dict):\n application_name = application.keys()[0]\n settings = application.values()[0]\n else:\n raise exceptions.InvalidAppFormatError(application)\n\n with indent(2):\n self.puts(colored.cyan(\"{}:\".format(application_name)))\n\n self.add_application(\n App(\n name=application_name,\n settings=settings,\n project=self,\n path=path\n )\n )", "def authorized_gae_applications(self) -> Optional[pulumi.Input[List[pulumi.Input[str]]]]:\n return pulumi.get(self, \"authorized_gae_applications\")", "def _get_implied_apps(self, detected_apps):\n\n def __get_implied_apps(apps):\n _implied_apps = set()\n for app in apps:\n if 'implies' in self.apps[app]:\n _implied_apps.update(set(self.apps[app]['implies']))\n return _implied_apps\n\n implied_apps = __get_implied_apps(detected_apps)\n all_implied_apps = set()\n\n # Descend recursively until we've found all implied apps\n while not all_implied_apps.issuperset(implied_apps):\n all_implied_apps.update(implied_apps)\n implied_apps = __get_implied_apps(all_implied_apps)\n\n return all_implied_apps", "def export_applications(self):\n print('\\n=== Exporting all application data...')\n\n for application in self.client.applications:\n print('- Exporting application:', application.name)\n\n json = {\n 'id': self.get_id(application),\n 'href': application.href,\n 'name': application.name,\n 'description': application.description,\n 'status': application.status,\n 'createdAt': application.created_at.isoformat(),\n 'modifiedAt': application.modified_at.isoformat(),\n 'customData': self.get_custom_data(application),\n 'default_account_store_mapping': None,\n 'default_group_store_mapping': None,\n 'account_store_mappings': [],\n #'verificationEmails': [],\n }\n\n default_account_store_mapping = application.default_account_store_mapping\n default_group_store_mapping = application.default_group_store_mapping\n\n if default_account_store_mapping:\n json['default_account_store_mapping'] = {\n 'id': application.default_account_store_mapping.href.split('/')[-1],\n 'href': application.default_account_store_mapping.href,\n 'type': application.default_account_store_mapping.account_store.__class__.__name__,\n 'name': application.default_account_store_mapping.account_store.name,\n 'list_index': application.default_account_store_mapping.list_index,\n }\n\n if default_group_store_mapping:\n json['default_group_store_mapping'] = {\n 'id': application.default_group_store_mapping.href.split('/')[-1],\n 'href': application.default_group_store_mapping.href,\n 'type': application.default_group_store_mapping.account_store.__class__.__name__,\n 'name': application.default_group_store_mapping.account_store.name,\n 'list_index': application.default_group_store_mapping.list_index,\n }\n\n for account_store_mapping in application.account_store_mappings:\n json['account_store_mappings'].append({\n 'id': self.get_id(account_store_mapping),\n 'href': account_store_mapping.href,\n 'account_store': {\n 'type': account_store_mapping.account_store.__class__.__name__,\n 'id': self.get_id(account_store_mapping.account_store),\n 'href': account_store_mapping.account_store.href,\n 'name': account_store_mapping.account_store.name,\n 'description': account_store_mapping.account_store.description,\n 'status': account_store_mapping.account_store.status,\n },\n 'list_index': account_store_mapping.list_index,\n 'is_default_account_store': account_store_mapping.is_default_account_store,\n 'is_default_group_store': account_store_mapping.is_default_group_store,\n })\n\n tenant = self.get_id(application.tenant)\n self.write('%s/%s/applications/%s' % (self.location, tenant, json['id']), json)\n\n print('=== Done!\\n')", "def get_app_list(self, request):\n ordering = {\n \"Sujets\":1,\n \"Secteurs\":2,\n \"Pages\":3,\n \"Liens\":4,\n \"Illustrations\":5,\n \"Pictures\":6,\n \"Picture dims\":7,\n \"Icons\":8,\n }\n app_dict = self._build_app_dict(request)\n # a.sort(key=lambda x: b.index(x[0]))\n # Sort the apps alphabetically.\n app_list = sorted(app_dict.values(), key=lambda x: x['name'].lower())\n\n # Sort the models alphabetically within each app.\n for app in app_list:\n app['models'].sort(key=lambda x: ordering[x['name']])\n\n return app_list", "def GetAllowedAndroidApplications(args, messages):\n allowed_applications = []\n for application in getattr(args, 'allowed_application', []) or []:\n android_application = messages.V2AndroidApplication(\n sha1Fingerprint=application['sha1_fingerprint'],\n packageName=application['package_name'])\n allowed_applications.append(android_application)\n return allowed_applications", "def list_client_applications(self, buyer_team__reference, job_key,\n status=None, profile_key=None,\n agency_team__reference=None,\n order_by=None, page_offset=None,\n page_size=None):\n data = {}\n\n data['buyer_team__reference'] = buyer_team__reference\n data['job_key'] = job_key\n\n if status:\n data['status'] = status\n\n if profile_key:\n data['profile_key'] = profile_key\n\n if agency_team__reference:\n data['agency_team__reference'] = agency_team__reference\n\n if order_by:\n data['order_by'] = order_by\n\n data['page'] = '{0};{1}'.format(page_offset, page_size)\n\n url = 'clients/applications'\n return self.get(url, data)", "def get(category, page=1, per_page=5):\r\n\r\n count = n_count(category)\r\n\r\n sql = text('''SELECT app.id, app.name, app.short_name, app.description,\r\n app.info, app.created, app.category_id, \"user\".fullname AS owner,\r\n featured.app_id as featured\r\n FROM \"user\", task, app\r\n LEFT OUTER JOIN category ON app.category_id=category.id\r\n LEFT OUTER JOIN featured ON app.id=featured.app_id\r\n WHERE\r\n category.short_name=:category\r\n AND app.hidden=0\r\n AND \"user\".id=app.owner_id\r\n AND app.info LIKE('%task_presenter%')\r\n AND task.app_id=app.id\r\n GROUP BY app.id, \"user\".id, featured.app_id ORDER BY app.name\r\n OFFSET :offset\r\n LIMIT :limit;''')\r\n\r\n offset = (page - 1) * per_page\r\n results = db.engine.execute(sql, category=category, limit=per_page, offset=offset)\r\n apps = []\r\n for row in results:\r\n app = dict(id=row.id,\r\n name=row.name, short_name=row.short_name,\r\n created=row.created,\r\n description=row.description,\r\n owner=row.owner,\r\n featured=row.featured,\r\n last_activity=pretty_date(last_activity(row.id)),\r\n last_activity_raw=last_activity(row.id),\r\n overall_progress=overall_progress(row.id),\r\n info=dict(json.loads(row.info)))\r\n apps.append(app)\r\n return apps, count", "def connect_apps(self):\r\n return applications.ConnectApps(self)", "def get_installations():\n github_app = get_default_app()\n pprint(github_app.get_installations())", "def dock_app_list(data):\n apps = []\n count = data['extra_dock'] + 1\n for i in range(count):\n name = data['app_name_%s' % str(i)]\n path = data['app_path_%s' % str(i)]\n if name not in [None, '']:\n apps.append({'name': name, 'path': path})\n return apps", "async def get_app_users(\n self, **kwargs\n ) -> friends.GetAppUsersResponseModel:\n\n params = self.get_set_params(locals())\n response = await self.api.request(\"friends.getAppUsers\", params)\n model = friends.GetAppUsersResponse\n return model(**response).response", "def home_feeds(request):\n result = {}\n \n result['feeds'] = []\n\n u = request.user\n\n\n # get other people's feeds, filter by friends if in social group\n feeds = Feed.objects.exclude(actor=u).order_by('-timestamp')\n result['feeds'] = [ f.get_json(me=u, android=True) for f in feeds ]\n\n return JSONHttpResponse(result)", "def get_queryset(self, request, is_list=False, local_site=None,\n *args, **kwargs):\n if not request.user.is_authenticated:\n return Application.objects.none()\n\n q = Q(local_site=local_site)\n\n # Unless the user is a super user or local site admin, the query will\n # be limited to that user's applications.\n if (not (request.user.is_superuser or\n (local_site and\n local_site.admins.filter(pk=request.user.pk).exists()))):\n q &= Q(user=request.user)\n\n username = request.GET.get('username')\n\n if username:\n q &= Q(user__username=username)\n\n return Application.objects.filter(q)", "def get(self):\n api_url = 'https://api.line.me/liff/v1/apps'\n result = requests.get(api_url, headers={\"Authorization\": self._headers[\"Authorization\"]})\n if result.status_code == 401:\n raise ErrorResponse(\"[401 Error] Certification failed.\")\n elif result.status_code == 404:\n raise ErrorResponse(\"[404 Error] There is no LIFF application on the channel.\")\n return json.loads(result.content)['apps']", "def apps():\r\n section = document.add_section()\r\n new_width, new_height = section.page_height, section.page_width\r\n section.orientation = WD_ORIENT.LANDSCAPE\r\n section.page_width = 10058400\r\n section.page_height = 7772400\r\n document.add_heading('Applications', level=1)\r\n apps = get_qlik_sense.get_apps()\r\n num_of_apps = len(apps)\r\n table = document.add_table(rows=num_of_apps+1, cols=7)\r\n table.style = 'Grid Table 1 Light Accent 1'\r\n row = table.rows[0]\r\n row.cells[0].text = 'App name'\r\n row.cells[1].text = 'App description'\r\n row.cells[2].text = 'Publish time'\r\n row.cells[3].text = 'Stream'\r\n row.cells[4].text = 'File size'\r\n row.cells[5].text = 'Owner userId'\r\n row.cells[6].text = 'Owner userName'\r\n for app in range(num_of_apps):\r\n row = table.rows[app+1]\r\n row.cells[0].text = str(apps[app][0])\r\n row.cells[1].text = str(apps[app][1])\r\n row.cells[2].text = str(apps[app][2])\r\n row.cells[3].text = str(apps[app][3])\r\n row.cells[4].text = str(apps[app][4])\r\n row.cells[5].text = str(apps[app][5])\r\n row.cells[6].text = str(apps[app][6])\r\n document.add_page_break()", "def get_most_popular_app(engine, publisher_id):\n connection = engine.connect()\n result = connection.execute(\n '''\n SELECT a.period,\n a.company_name,\n a.active_users,\n a.publisher_id,\n a.app_id\n FROM aa_months a\n WHERE a.publisher_id='{0}'\n AND a.period >= (SELECT MAX(period) FROM aa_months)\n AND a.period < DATE_ADD((SELECT MAX(period) FROM aa_months), INTERVAL 1 DAY)\n ORDER BY a.active_users DESC\n LIMIT 1\n '''.format(publisher_id))\n connection.close()\n return result.fetchone()", "def get_my_app_list(app_list):\n all_excluded_models = getattr(settings, 'EXCLUDE_ADMIN_APPS_MODELS', {})\n\n for app in app_list:\n models = app['models']\n match_app_models = [app_model_name.split('.')[1] for app_model_name in all_excluded_models if\n app_model_name.split('.')[0] == str(app['name'])]\n filter_models = [model for model in models if model['object_name'] not in match_app_models]\n app['models'] = filter_models\n\n return app_list", "def authorized_connect_apps(self):\r\n return applications.AuthorizedConnectApps(self)", "def apps_information(self):\n with open(self.app_data_path, 'r') as app_csv_file:\n csv_reader = csv.reader(app_csv_file)\n apps = [self.AppInformation(app[0], app[1], app[2], app[3], app[4], app[5]) for app in csv_reader]\n return apps", "def test_get_top_returns_four_apps_by_default(self):\r\n\r\n ranked_3_app = self.create_app_with_contributors(8, 0, name='three')\r\n ranked_2_app = self.create_app_with_contributors(9, 0, name='two')\r\n ranked_1_app = self.create_app_with_contributors(10, 0, name='one')\r\n ranked_4_app = self.create_app_with_contributors(7, 0, name='four')\r\n ranked_5_app = self.create_app_with_contributors(7, 0, name='five')\r\n\r\n top_apps = cached_apps.get_top()\r\n\r\n assert len(top_apps) is 4, len(top_apps)", "def fw_app_list(data):\n apps = []\n count = data['extra_firewall']\n for i in range(count):\n bundle = data['id_%s' % str(i + 1)]\n allowed = data['permit_%s' % str(i + 1)]\n if bundle not in [None, '']:\n apps.append({'bundle_id': bundle, 'allowed': allowed})\n return apps", "def listExclusiveItems(appStore):\n appStores = ('Xbox', 'Amazon', 'iOS', 'Google Play')\n if appStore not in appStores:\n Exception(f\"No valid app store was provided. Valid choices are {appStores}.\")\n else:\n url = f\"https://catalog.roblox.com/v1/exclusive-items/{appStore}/bundles\"\n r = requests.get(url)\n j = json.loads(r.text)\n return j['data']", "def ls():\n cfgmgr = ConfigManager()\n apps = cfgmgr['apps']\n for i in apps:\n print(fc(\"- {g}{appname}{rst}\", appname=i))", "def extract_programs():\n if settings.XPRO_CATALOG_API_URL:\n return requests.get(settings.XPRO_CATALOG_API_URL, timeout=20).json()\n return []", "def get_matching_apps(\n service: str, instance: str, marathon_apps: Sequence[MarathonApp]\n) -> Sequence[MarathonApp]:\n return [\n app for app in marathon_apps if does_app_id_match(service, instance, app.id)\n ]", "def get_developer_apps_with_grant_for_user(user_id: int) -> List[Dict]:\n\n db = db_session.get_db_read_replica()\n with db.scoped_session() as session:\n rows = (\n session.query(\n DeveloperApp.address,\n DeveloperApp.name,\n DeveloperApp.description,\n Grant.user_id.label(\"grantor_user_id\"),\n Grant.created_at.label(\"grant_created_at\"),\n Grant.updated_at.label(\"grant_updated_at\"),\n ) # Note: will want to grab Grant permissions too once we have those\n .outerjoin(Grant, Grant.grantee_address == DeveloperApp.address)\n .filter(\n Grant.user_id == user_id,\n Grant.is_revoked == False,\n Grant.is_current == True,\n DeveloperApp.is_current == True,\n DeveloperApp.is_delete == False,\n )\n .order_by(asc(Grant.updated_at))\n .all()\n )\n return [\n {\n \"address\": row[0],\n \"name\": row[1],\n \"description\": row[2],\n \"grantor_user_id\": row[3],\n \"grant_created_at\": row[4],\n \"grant_updated_at\": row[5],\n }\n for row in rows\n ]", "def applications(self):\n return [self.app] + self.mounts.values()", "def sync_apps(self):\n pass", "def get_app_names(self):\n groups = self['__store']\n lookup = {\n g.group_id: g.name[2:]\n for g in groups\n if (g.name.startswith('a_'))\n }\n return set(map(lookup.get, self.get_app_ids()))", "def all_registered_appnames():\n yield from sorted(Registry.monomers.keys())", "def search_app(self, search_pattern):\n\n url_params = {'limit': SearchAPI.SCAN_LIMIT, 'expand': 'true'}\n first_search = self.get('mgmt-pop/apps', params=url_params)\n data = first_search.json()\n app_found = 0\n app_scanned = 0\n\n # CLI ouput header\n cli.header('#app_id,type,name,host,cname,cert_id,status,reachable')\n stats = self.process_page(data, search_pattern)\n app_scanned += stats[0]\n app_found += stats[1]\n\n if data.get(\"meta\"):\n\n app_count = data.get(\"meta\").get(\"total_count\")\n page_offset = data.get(\"meta\").get(\"offset\")\n page_limit = data.get(\"meta\").get(\"limit\")\n page_total = ceil(app_count / page_limit)\n\n logging.debug(\"app_count: {}, scanned: {}, offset: {}, limit: {}, pages: {}\".format(\n app_count, app_scanned, page_offset, page_limit, page_total))\n\n for page in range(1, page_total):\n logging.debug(\"Loading application page {} of {}\".format(page, page_total))\n url_params['offset'] = page * page_limit\n search = self.get('mgmt-pop/apps', params=url_params)\n stats = self.process_page(search.json(), search_pattern)\n app_scanned += stats[0]\n app_found += stats[1]\n\n # CLI ouput footer\n if not config.batch:\n if app_found != app_count:\n cli.footer(\"Found %s app(s), total %s app(s)\" % (app_found, app_count))\n else:\n cli.footer(\"%s app(s)\" % app_count)", "def find_app(self, app_name):\n self._app = None\n for p in self.policy_list.response:\n apps = [app for app in p.resource.applications if app.appName == app_name]\n if len(apps) > 0:\n return apps[0]", "def discover_glitter_apps(self):\n for app_name in settings.INSTALLED_APPS:\n module_name = '{app_name}.glitter_apps'.format(app_name=app_name)\n try:\n glitter_apps_module = import_module(module_name)\n if hasattr(glitter_apps_module, 'apps'):\n self.glitter_apps.update(glitter_apps_module.apps)\n except ImportError:\n pass\n\n self.discovered = True", "def all_shopping_items(request):\n # Get all the current users items\n items = Item.objects.filter(user=request.user).order_by('item')\n\n all_items = [item for item in items]\n\n # Add all the users items and their shopping partners items into all_items\n\n for shopping_partner in get_shopping_partners(request):\n if not shopping_partner == request.user:\n partners_shopping_list = Item.objects.filter(user=shopping_partner)\n for item in partners_shopping_list:\n all_items.append(item)\n\n return all_items" ]
[ "0.71890974", "0.71757436", "0.70074826", "0.68211126", "0.6804829", "0.6794968", "0.67782676", "0.6769873", "0.67512155", "0.6689302", "0.66789335", "0.66459394", "0.6527662", "0.64864933", "0.6406882", "0.63923347", "0.6377203", "0.63365346", "0.63237804", "0.62257147", "0.6223952", "0.6119307", "0.6091035", "0.6066698", "0.60655284", "0.6028913", "0.6023332", "0.6000356", "0.5990817", "0.5980773", "0.598006", "0.59670043", "0.5960581", "0.59585327", "0.5942707", "0.5933682", "0.59161955", "0.5856694", "0.5839692", "0.58382434", "0.58121437", "0.58100563", "0.58066624", "0.57937044", "0.5788376", "0.5784023", "0.5758927", "0.5755008", "0.57452416", "0.5743734", "0.5728211", "0.5661123", "0.56596935", "0.56359166", "0.5635745", "0.560877", "0.5529885", "0.5524278", "0.55047387", "0.5490836", "0.5462175", "0.5451006", "0.5450672", "0.5448969", "0.54404986", "0.54362434", "0.5434648", "0.5431098", "0.5422145", "0.5410595", "0.54104835", "0.5404847", "0.53913635", "0.53894633", "0.53734195", "0.5360386", "0.53553206", "0.5349394", "0.5342362", "0.53348976", "0.5313746", "0.53101", "0.5308448", "0.5304728", "0.5297761", "0.5291363", "0.52869606", "0.5273023", "0.52585375", "0.52584374", "0.52405655", "0.52334017", "0.52020955", "0.51904523", "0.51865625", "0.51834625", "0.5183312", "0.5177262", "0.5170443", "0.51689696" ]
0.5995079
28
Deletes an app that has been published to the 21 marketplace
def _delete_app(config, client, app_id, assume_yes): title = client.get_app_full_info(config.username, app_id).json()['app_info']['title'] if assume_yes or click.confirm( "Are you sure that you want to delete App '{} ({})'?".format(app_id, title)): try: resp = client.delete_app(config.username, app_id) resp_json = resp.json() deleted_title = resp_json["deleted_title"] logger.info("App {} ({}) was successfully removed from the marketplace.".format(app_id, deleted_title)) except ServerRequestError as e: if e.status_code == 404: logger.info("The app with id '{}' does not exist in the marketplace.".format(app_id), fg="red") elif e.status_code == 403: logger.info( "You don't have permission to delete the app with id '{}'. You " "can only delete apps that you have published.".format(app_id), fg="red")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_app(AppId=None):\n pass", "def app_delete(self, name):\n self.core.api.os.shell.cmd('{0} delete app /app.name:\"{1}\"'.format(self.APP_CMD, name))", "def delete_app(self, name):\n raise NotImplementedError", "def delete_app(self):\n contract = jc.Contract()\n return st.OperationContract(\n self.agent.make_delete_app_operation(\n application=self.TEST_APP,\n account_name=self.bindings[\"SPINNAKER_KUBERNETES_V2_ACCOUNT\"],\n ),\n contract=contract,\n )", "def delete(self, application_id):", "def delete_app(short_name):\r\n delete_memoized(get_app, short_name)", "def delete_app(self, app_id):\n return req(self.logger, self.access_token, 'DELETE', '/apps/'+app_id, {})", "def delete_app(self, app_full_name):\n ai = self.get_app_instances_configs(app_full_name=app_full_name)\n log.info(\"Deleting app %s\" % app_full_name)\n for ai_item in ai:\n log.info(\"Deleting app instance %s\" % ai_item[\"alias\"])\n self.delete_app_instance(ai_item[\"id\"])\n log.info(\"Deleting application manifest\")\n ad = self.get_app_manifest(app_full_name)\n self.app_manifests.remove(ad)\n app_dir = os.path.join(self.apps_dir_path, \"lib\", app_full_name)\n log.info(\"Deleting app folder %s\" % app_dir)\n shutil.rmtree(app_dir)\n log.info(\"App %s was deleted\" % app_full_name)", "def delete():\n run('rm -r {}'.format(utils.home('apps', env.PROJECT_NAME)))", "def remove_app(self):\n \n pass", "def remove_app(self, app_name):\n self.remove_list_setting('applications', 'installed_apps',\n app_name)", "def deleteApp(appName):\n logger.debug('[FLASKWEB /delete/app/<appName>] Request to delete App `%s`', appName)\n applist = [a['name'] for a in db.getAllApps()]\n if appName not in applist:\n return returnError(\"Application %s does not exist\" % appName, 404)\n\n logger.info(\"[FLASKWEB] DELETING all versions of app, `%s`\")\n db.deleteAllApps(appName)\n\n if request.headers['Accept'] == 'application/json':\n return jsonify(dict(app=appName, status='DELETED, files remain on server')), 200\n else:\n applist = db.getAllApps()\n versions = {a['name']: db.getVersions(a['name'], limit=5) for a in applist}\n return render_template('apps.html', applist=applist, versions=versions)", "def delete_app(self,*app_names):\n\n for app in app_names:\n shutil.rmtree(os.path.join(self._main,app))\n \n self._remove_extra_css_apps()\n self._remove_extra_templates_apps()\n self._update_delete_app_or_page()", "def uninstall_app(self, package, keepdata=False):\n return self.adb.uninstall(package, keepdata)", "def delete_application(self, method=\"POST\", short_name=\"sampleapp\"):\r\n if method == \"POST\":\r\n return self.app.post(\"/app/%s/delete\" % short_name,\r\n follow_redirects=True)\r\n else:\r\n return self.app.get(\"/app/%s/delete\" % short_name,\r\n follow_redirects=True)", "def delete_app_by_name(self, name):\n app = self.get_app_by_name(name)\n if app is False:\n Log.an().error('cannot get app by name: %s', name)\n return False\n\n if not app:\n Log.an().error('app \"%s\" not found', name)\n return False\n\n if len(app) > 1:\n Log.an().error(\n 'non-unique app \"%s\", try deleting by id instead', name\n )\n return False\n\n return self.delete_app_by_id(app[0]['id'])", "def DeleteApp(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def test_07_settings_application_delete(self):\n # get applications\n r = requests.get('%s/settings/applications' % self.url, cookies=self.cookies)\n self.assertEqual(r.status_code, 200)\n application_id = re.search('<form id=\"application(\\d+)\"', r.content).group(1)\n\n # delete the application -> 400\n r = requests.delete('%s/settings/applications/aa' % self.url, cookies=self.cookies)\n self.assertEqual(r.status_code, 400)\n\n # delete the application -> 417\n r = requests.delete('%s/settings/applications/11111' % self.url, cookies=self.cookies)\n self.assertEqual(r.status_code, 417)\n\n # delete the application -> 200\n r = requests.delete('%s/settings/applications/%s' % (self.url, application_id), cookies=self.cookies)\n self.assertEqual(r.status_code, 200)", "def wipe_application(self):\n\n self.resin.models.application.base_request.request(\n 'application', 'DELETE',\n endpoint=self.resin.settings.get('pine_endpoint'), login=True\n )", "def delete_application(self, application_id):\n status_code_dict = {\n codes.ok: DeleteApplicationResponse,\n codes.bad_request: SuccessAndErrorsResponse,\n }\n return self.delete_request(\n DELETE_APPLICAITON_URL.format(application_id=application_id),\n status_code_response_class_dict=status_code_dict,\n )", "def test_02_app_delete(self):\r\n for i in range(300):\r\n app = App(name=str(i), short_name=str(i),\r\n description=str(i), owner_id=1)\r\n db.session.add(app)\r\n db.session.commit()\r\n\r\n url = '?api_key=%s' % (self.api_key)\r\n self.check_limit(url, 'delete', 'app')", "def remove(ctx, app_id, all, assume_yes):\n if all and not app_id:\n for _app_id in _get_all_app_ids(ctx.obj['config'], ctx.obj['client']):\n _delete_app(ctx.obj['config'], ctx.obj['client'], _app_id, assume_yes)\n elif app_id and not all:\n _delete_app(ctx.obj['config'], ctx.obj['client'], app_id, assume_yes)\n else:\n logger.info(ctx.command.get_help(ctx))\n sys.exit(1)", "def delete(self, force=False):\n if not self._id_exists():\n abort(404, f\"Application with ID {self.app_id} does not exist\")\n elif not self.engine.app_list:\n abort(404, \"There are no currently running applications\")\n\n try:\n self.engine.undeploy(self.app_id, force)\n except Exception as error:\n abort(500, f\"Error while deleting: {error}\")\n\n TemplateHandler(self.app_id).delete_template()\n\n return {\"message\": f\"Application {self.app_id} successfully deleted\"}", "def remove_app(self, app):\n try:\n membership = self.membership_class.objects.get(obj=self, app=app)\n except self.membership_class.DoesNotExist:\n return False\n else:\n membership.delete()\n index_webapps.delay([app.pk])\n return True", "def delete(self):\n logging.info(\"DELETE method for API for ApplicationTypes not supported.\")\n pass", "def clear_app(self, package):\n return self.adb.clear_app(package)", "def test_05d_get_nonexistant_app_delete(self):\r\n self.register()\r\n # GET\r\n res = self.app.get('/app/noapp/delete', follow_redirects=True)\r\n assert res.status == '404 NOT FOUND', res.data\r\n # POST\r\n res = self.delete_application(short_name=\"noapp\")\r\n assert res.status == '404 NOT FOUND', res.status", "def remove_apps(self):\n self.membership_class.objects.filter(obj=self).delete()", "def test_duo_application_delete(self):\n pass", "def retract_application(request, application_id):\n appl = get_object_or_404(Application, pk=application_id)\n if appl.Project.distributions.filter(Student=request.user).exists():\n raise PermissionDenied(\"You cannot retract this application, because you are distributed to this project. If this distribution is incorrect, please contact the responsible staff member of the project.\")\n\n track = ApplicationTracking()\n track.Project = appl.Project\n track.Student = request.user\n track.Type = 'r'\n track.save()\n\n appl.delete()\n return render(request, \"base.html\", context={\n \"Message\": \"Deleted application\",\n \"return\": 'students:list_applications',\n })", "def delete_app_instance(self, instance_id):\n self.stop_app_instance(instance_id)\n aic = self.get_app_instances_configs(instance_id=instance_id)\n # invoking on_uninstall callback , so app can run cleanup routines .\n ai_obj = self.get_app_instance_obj(instance_id)\n try:\n if hasattr(ai_obj,\"on_uninstall\"):\n ai_obj.on_uninstall()\n except Exception as ex:\n log.exception(ex)\n if aic:\n self.app_instances_configs.remove(aic[0])\n self.serialize_instances_config()", "def reinstall_app(self, pbz_path, launch_on_install=True):\n\t\tdef endpoint_check(result, pbz_path):\n\t\t\tif result == 'app removed':\n\t\t\t\tprint result\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\tif DEBUG_PROTOCOL:\n\t\t\t\t\tlog.warn(\"Failed to remove supplied app, app manager message was: \" + result)\n\t\t\t\treturn False\n\n\t\t# get the bundle's metadata to identify the app being replaced\n\t\tbundle = PebbleBundle(pbz_path)\n\t\tif not bundle.is_app_bundle():\n\t\t\traise PebbleError(self.id, \"This is not an app bundle\")\n\t\tapp_metadata = bundle.get_app_metadata()\n\n\t\t# attempt to remove an app by its UUID\n\t\tresult_uuid = self.remove_app_by_uuid(app_metadata['uuid'].bytes, uuid_is_string=False)\n\t\tif endpoint_check(result_uuid, pbz_path):\n\t\t\treturn self.install_app(pbz_path, launch_on_install)\n\n\t\tif DEBUG_PROTOCOL:\n\t\t\tlog.warn(\"UUID removal failure, attempting to remove existing app by app name\")\n\n\t\t# attempt to remove an app by its name\n\t\tapps = self.get_appbank_status()\n\t\tfor app in apps[\"apps\"]:\n\t\t\tif app[\"name\"] == app_metadata['app_name']:\n\t\t\t\tresult_name = self.remove_app(app[\"id\"], app[\"index\"])\n\t\t\t\tif endpoint_check(result_name, pbz_path):\n\t\t\t\t\treturn self.install_app(pbz_path, launch_on_install)\n\n\t\tlog.warn(\"Unable to locate previous instance of supplied application\")", "def delete(self, request, local_site=None, *args, **kwargs):\n try:\n app = self.get_object(request, local_site=local_site, *args,\n **kwargs)\n except Application.DoesNotExist:\n return DOES_NOT_EXIST\n\n if not self.has_delete_permissions(request, app, local_site):\n return self.get_no_access_error(request)\n\n app.delete()\n\n return 204, {}", "def remove(self, package):\n self.driver.remove_app(package)", "def delete(self, sender, instance):\n adapter = self.adapters[sender]\n feed = adapter.get_feed_url(instance) or self.feed\n if adapter.can_delete(instance):\n client = self.get_client()\n event_id = CalendarEvent.objects.get_event_id(instance, feed)\n if event_id:\n client.events().delete(calendarId=feed, eventId=event_id).execute()\n CalendarEvent.objects.delete_event_id(instance, feed)", "def remove_app(request, app, device=0):\n context = {}\n app = get_object_or_404(MacOSApp, pk=app)\n if device == 0:\n # Completely remove Application from MDM\n mode = 'delete'\n if app.installed.all().count() == 0:\n app.delete()\n messages.success(request, \"Application was successfully deleted\", extra_tags='success')\n return HttpResponseRedirect(reverse(\"mdm:apps\"))\n else:\n context['form'] = UninstallAppForm(mode=mode)\n else:\n # Unlink app from device\n laptop = get_object_or_404(Laptop, pk=device)\n if app in laptop.apps_pending.all():\n laptop.apps_pending.remove(app)\n messages.success(request, \"Application is no longer assigned to {}\".format(laptop.name),\n extra_tags='success')\n return HttpResponseRedirect(reverse(\"mdm:apps\"))\n\n # If pending removal reset to installed status\n if app in laptop.apps_remove.all():\n laptop.apps_installed.add(app)\n laptop.apps_remove.remove(app)\n messages.success(request, \"Removal request cancelled\", extra_tags='success')\n return HttpResponseRedirect(reverse(\"mdm:apps\"))\n\n if app in laptop.apps_installed.all():\n mode = 'disassociate'\n context['form'] = UninstallAppForm(mode=mode)\n else:\n raise Http404\n\n # Handle form data\n if request.method == 'POST':\n form = UninstallAppForm(request.POST, mode=mode)\n if form.is_valid():\n if mode == 'disassociate':\n record = get_object_or_404(InstallationRecord, app=app, device=laptop, active=True)\n record.active = False\n record.expires = timezone.now()\n record.save()\n laptop.apps_installed.remove(app)\n messages.success(request, \"Application successfully removed from {}\".format(laptop.name),\n extra_tags='success')\n else:\n app.delete()\n messages.success(request, \"Application deleted successfully\")\n return HttpResponseRedirect(reverse(\"mdm:apps\"))\n else:\n context['form'] = form\n return render(request, 'form_crispy.html', context)", "def clear_app(package):\n G.DEVICE.clear_app(package)", "def site_delete(self, name):\n self.core.api.os.shell.cmd('{0} delete site \"{1}\"'.format(self.APP_CMD, name))", "def delete_applications(configurationIds=None):\n pass", "def remove_app(self, appid, index, async=False):\n\n\t\tdata = pack(\"!bII\", 2, appid, index)\n\t\tself._send_message(\"APP_MANAGER\", data)\n\n\t\tif not async:\n\t\t\treturn EndpointSync(self, \"APP_MANAGER\").get_data()", "def deleting_old_news() -> None:\n\n with app.app_context():\n delete_news_from_db()", "def delete_activity():\n pass", "def deleteManifestEntry(context, key):\n GenericMetadata.deleteEntryFromSection(context, GenericMetadata.MANIFEST_SECTION, key)", "def delete_last_activity(app_id):\r\n delete_memoized(last_activity, app_id)", "def remove_appointments():\n appointments = Appointment.objects.all()\n now = timezone.now()\n for appointment in appointments:\n if appointment.date < now:\n appointment.delete()", "def test_delete_hyperflex_app_catalog(self):\n pass", "def remove_app_sig(self, app_id):\n try:\n del self._app_sigs[app_id]\n except KeyError:\n raise MissingSignatureError(\n _('An application signature for \"%s\" could not be found.')\n % app_id)", "def remove_app_sig(self, app_id):\n try:\n del self._app_sigs[app_id]\n except KeyError:\n raise MissingSignatureError(\n _('An application signature for \"%s\" could not be found.')\n % app_id)", "def delete_app_by_id(self, app_id):\n # first check if app is linked to any steps\n try:\n count = self._session.query(StepEntity).\\\n filter(StepEntity.app_id == app_id).\\\n count()\n except SQLAlchemyError as err:\n Log.an().error('sql exception [%s]', str(err))\n return False\n\n if count > 0:\n Log.an().error('app with id \"%s\" still used by steps', app_id)\n return False\n\n # delete app\n try:\n self._session.query(AppEntity).\\\n filter(AppEntity.id == app_id).\\\n delete(synchronize_session=False)\n except SQLAlchemyError as err:\n Log.an().error('sql exception [%s]', str(err))\n return False\n\n return True", "def main():\n if not os.path.isdir(OFFENDING_DIR):\n # good, you don't have that junk\n return 0\n\n today = datetime.date.today()\n seen_app_dict = load_storage()\n deleted_app_dict = load_deleted()\n current_app_list = os.listdir(OFFENDING_DIR)\n\n # boot already disappeared apps\n for app in [app for app in seen_app_dict if app not in current_app_list]:\n seen_app_dict.pop(app)\n\n # add newly appeared apps\n for app in [app for app in current_app_list if app not in seen_app_dict]:\n seen_app_dict[app] = today\n\n # delete expired apps\n returncode = 0\n total_deleted_bytes = 0\n newly_deleted_apps = []\n for app in seen_app_dict:\n if today >= seen_app_dict[app] + DELETE_AFTER:\n app_path = os.path.join(OFFENDING_DIR, app)\n try:\n size = file_size(app_path)\n total_deleted_bytes += size\n # sys.stdout.write(\"Deleted: '%s' : %s\\n\" % (app, humanize.naturalsize(size)))\n logging.info(\"Deleted: '%s' : %s\" % (app, humanize.naturalsize(size)))\n os.remove(app_path)\n newly_deleted_apps.append(app)\n except OSError as err:\n sys.stderr.write(\"error: failed to remove '%s': %s\" % (app_path, str(err)))\n logging.error(\"failed to remove '%s': %s\" % (app_path, str(err)))\n returncode = 1\n\n for app in newly_deleted_apps:\n seen_app_dict.pop(app)\n deleted_app_dict[app] = today\n\n # sys.stdout.write(\"Total deleted: %s\\n\" % (humanize.naturalsize(total_deleted_bytes)))\n logging.info(\"Total deleted: %s\" % (humanize.naturalsize(total_deleted_bytes)))\n # write data to disk\n returncode |= write_storage(seen_app_dict)\n returncode |= write_deleted(deleted_app_dict)\n\n logging.debug(\"Cleanup completed\\n\")\n logging.shutdown()\n return returncode", "def test_14_delete_application(self, mock):\r\n with self.flask_app.app_context():\r\n self.create()\r\n self.register()\r\n self.new_application()\r\n res = self.delete_application(method=\"GET\")\r\n msg = \"Application: Sample App &middot; Delete\"\r\n assert self.html_title(msg) in res.data, res\r\n assert \"No, do not delete it\" in res.data, res\r\n\r\n app = db.session.query(App).filter_by(short_name='sampleapp').first()\r\n app.hidden = 1\r\n db.session.add(app)\r\n db.session.commit()\r\n res = self.delete_application(method=\"GET\")\r\n msg = \"Application: Sample App &middot; Delete\"\r\n assert self.html_title(msg) in res.data, res\r\n assert \"No, do not delete it\" in res.data, res\r\n\r\n res = self.delete_application()\r\n assert \"Application deleted!\" in res.data, res\r\n\r\n self.signin(email=Fixtures.email_addr2, password=Fixtures.password)\r\n res = self.delete_application(short_name=Fixtures.app_short_name)\r\n assert res.status_code == 403, res.status_code", "def _installed_apps_remove(self):\n config.remove_plugin(self.module_path)", "def delete(self):\n self.parser.add_argument('lp_id',\n help=\"Language pack id\")\n args = self.parser.parse_args()\n self.client.languagepacks.delete(lp_id=args.lp_id)", "def delete(self, request, app_id, addon_name):\n addon = Addon.objects.get(app__app_id=app_id, display_name=addon_name)\n provider = get_provider_from_provider_name(addon.provider_name)\n result = provider.deprovision(addon.provider_uuid)\n manager = StateMachineManager()\n with manager.transition(addon.id, AddonEvent.deprovision_success):\n pass\n manager.start_task(addon.id)\n return self.respond({'message': result['message']})", "def history_delete(name, version):\n if click.confirm(format_text('Deleting service history is a permanent action, are you sure you want to delete '\n 'this record?',\n TextStyle.WARNING)):\n click.echo(remove_service_history(name, version))", "def delete_entry(key):\n db = sh.open(the_phone_book_name, flag='c', writeback=True)\n if key in db:\n confirm = input(\"Delete {name} [y/n]: \".format(name=key))\n if confirm.lower() == 'y':\n print(\"Deleting entry ..... {name}\\n\".format(name=key))\n del db[key]", "def test_20_admin_delete_app(self, mock):\r\n self.register()\r\n self.signout()\r\n self.register(fullname=\"Juan Jose\", name=\"juan\",\r\n email=\"juan@juan.com\", password=\"juan\")\r\n self.new_application()\r\n self.signout()\r\n # Sign in with the root user\r\n self.signin()\r\n res = self.delete_application(method=\"GET\")\r\n assert \"Yes, delete it\" in res.data,\\\r\n \"The app should be deleted by admin users\"\r\n res = self.delete_application()\r\n err_msg = \"The app should be deleted by admin users\"\r\n assert \"Application deleted!\" in res.data, err_msg", "def uninstall(package):\n return G.DEVICE.uninstall_app(package)", "def delete_version(self):\n pass", "def delete(self, liff_id):\n api_url = 'https://api.line.me/liff/v1/apps/{0}'.format(liff_id)\n result = requests.delete(api_url, headers={\"Authorization\": self._headers[\"Authorization\"]})\n if result.status_code == 401:\n raise ErrorResponse(\"[401 Error] Certification failed.\")\n elif result.status_code == 404:\n raise ErrorResponse(\"\"\"\\\n[404 Error] The following error reasons are possible.\n・The specified LIFF application does not exist.\n・The specified LIFF application belongs to another channel.\"\"\")", "def delete_api_key(api_key):\n api.delete(api_key)", "def delete_meal():", "def test_delete_app_cascade(self):\r\n app = AppFactory.create()\r\n tasks = TaskFactory.create_batch(2, app=app)\r\n task_runs = TaskRunFactory.create_batch(2, app=app)\r\n url = '/api/app/%s?api_key=%s' % (1, app.owner.api_key)\r\n self.app.delete(url)\r\n\r\n tasks = db.session.query(Task)\\\r\n .filter_by(app_id=1)\\\r\n .all()\r\n assert len(tasks) == 0, \"There should not be any task\"\r\n\r\n task_runs = db.session.query(TaskRun)\\\r\n .filter_by(app_id=1)\\\r\n .all()\r\n assert len(task_runs) == 0, \"There should not be any task run\"", "def clean(app_id):\r\n reset()\r\n delete_n_tasks(app_id)\r\n delete_n_completed_tasks(app_id)\r\n delete_n_task_runs(app_id)\r\n delete_overall_progress(app_id)\r\n delete_last_activity(app_id)\r\n delete_n_registered_volunteers(app_id)\r\n delete_n_anonymous_volunteers(app_id)\r\n delete_n_volunteers(app_id)", "def delete_old(self, fetch_from_api=False, dry_run=False):\n latest_org_names = [\n re.sub(' +', ' ', org['preferredLabel'].strip())\n for org in self._all(fetch_from_api=fetch_from_api)\n ]\n for org_page in self._all_pages_simple():\n org_page_title = org_page.page_title\n if org_page_title not in latest_org_names:\n if dry_run:\n print(f'SHOULD BE DELETED: {org_page_title}')\n else:\n reason = 'Παλιός φορέας (δεν υπάρχει πια στην Απογραφή)'\n cat_str = ''\n org_page.delete(reason=reason)\n org_category_page = self._get_site_page(\n org_page_title, is_category=True)\n if org_category_page.exists:\n org_category_page.delete(reason=reason)\n cat_str = 'AND CATEGORY '\n print(f'PAGE {cat_str}WAS DELETED: {org_page_title}')", "def delete_alarm():\r\n name = request.args.get('alarm_item')\r\n logging.info(\"Alarm deleted in delete_alarm(): \" + name)\r\n for alarm in alarms:\r\n if alarm['title'] == name:\r\n alarms.remove(alarm)", "def purge_old() -> None:\n conn = sqlite3.connect('rss.db')\n c = conn.cursor()\n c.execute(\n \"\"\"delete from entries where title not in\n (select title from entries order by year desc, month desc, day desc\n limit 20)\n \"\"\"\n )\n conn.commit()\n conn.close()", "def delete_book(code: str):\n pass", "def remove_compiled_app():\r\n app = get_app()\r\n remove_compiled_application(apath(app, r=request))\r\n session.flash = T('compiled application removed')\r\n redirect(URL('site'))", "def uninstall_app(device, app_identifier):\n command = 'uninstall \"%s\" \"%s\"' % (device.udid, app_identifier)\n _run_command(command)", "def test_delete_app_with_custom_database(self):\n self._perform_delete_app_test('DeleteApplication', database='db_multi')", "def delete(self, uuid):\n try:\n pmanager = PushManager.query.filter_by(\n uuid=uuid\n ).one_or_none()\n if pmanager is None:\n raise GatlinException(\"App not exist\", 404)\n self._provider.delete_platform(pmanager.sns_arn)\n pmanager.delete()\n except GatlinException as exception:\n raise exception", "def delete(filename):\n storeapps = APP.config[\"storage\"]\n extension = os.path.basename(filename).split(\".\")[-1].upper()\n dirname = \".\".join(os.path.basename(filename).split(\".\")[:-1])\n directory = os.path.join(storeapps, extension, dirname)\n\n try:\n directory = directory.encode(\"utf-8\")\n except UnicodeDecodeError:\n pass\n\n if os.path.isdir(directory):\n shutil.rmtree(directory)\n if os.path.isdir(directory):\n return \"Unable to remove application (check server logs): %s\" % (filename), 500\n return \"Removed: %s\" % (filename), 200\n\n return \"File not found: %s\" % (filename), 404", "def delete_app(self, file_id: str = None, group_id: str = None) -> list:\n if file_id:\n url = self.get_method_url('storage', 'files', file_id)\n if group_id:\n url = self.get_method_url('storage', 'groups', group_id)\n json_data = self.request(url, method='DELETE')\n return json_data", "def delete_feed(request, feed_id):\n\n __time_update(request.user)\n\n try:\n Feed.objects.get(id=feed_id, user=request.user).delete()\n except:\n pass\n\n return redirect('/feeds')", "def delete(self, app, name):\n self.set_header('content-type', 'application/json')\n #所有策略的修改都要检查 @todo\n try:\n StrategyCustDao().delete_strategy_by_app_and_name(app, name)\n self.finish(json_dumps({\"status\": 200, \"msg\": \"ok\", \"values\": []}))\n except Exception as err:\n logger.error(err)\n self.finish(json_dumps({\"status\": -1, \"msg\": \"fail to get data from database\"}))", "def delete_entry():\n if not check_admin_logged() :\n abort(403)\n\n title = request.args.get('title', default='')\n category = request.args.get('category', default='')\n buydate = request.args.get('buydate', default='')\n ssid = decrypt_book_record(request.args.get('ssid'))\n\n pre_delete_entry = Entries.query.filter_by(\n id=ssid, title=title, category=category, \\\n buydate=buydate).first()\n\n if pre_delete_entry is not None :\n try :\n db.session.delete(pre_delete_entry)\n db.session.commit()\n flash(u'删除成功')\n except InvalidRequestError as e :\n log_error('error when delete:')\n log_error(e.message)\n #log_error(u'when delete item %s ' % str(pre_delete_entry))\n # DO NOT use the above one for the F UnicodeEncodeError\n log_error(u'when delete item %s ' % pre_delete_entry)\n db.session.flush()\n flash(u'因为数据库操作原因,删除失败')\n else :\n flash(u'删除失败')\n\n return redirect(url_for('show_entries_admin'))", "def remove():\n run('pew rm {0}'.format(package_name()))", "def delete_entry(self, user, entry):\r\n try:\r\n self.curs.execute(f\"\"\"DELETE FROM {user} WHERE application = ? \"\"\", (entry))\r\n except sq.OperationalError:\r\n return self.err_find", "def destroy(ctx, app, expire_hit, sandbox):\n if expire_hit:\n ctx.invoke(expire, app=app, sandbox=sandbox, exit=False)\n HerokuApp(app).destroy()", "def test_delete_api_key_from_org(self):\n pass", "def delete_run(arn=None):\n pass", "def delete():", "def delete(self):\r\n self.domain.delete_item(self)", "def test_blogpost_is_deleted_after_app_deletion(self):\r\n self.configure_fixtures()\r\n blogpost = Blogpost(title='title', body=\"body\", app=self.app)\r\n db.session.add(blogpost)\r\n db.session.commit()\r\n\r\n assert self.app in db.session\r\n assert blogpost in db.session\r\n\r\n db.session.delete(self.app)\r\n db.session.commit()\r\n assert self.app not in db.session\r\n assert blogpost not in db.session", "def delete(self):\n appraisal_service = c_app.service_locator.get_by_name(u\"appraises\")\n appraisal_service.remove_collection(c_user._get_current_object())\n return self.build_response(None)", "def delete_package(pkg_id):\n sql = 'delete from document where id = ' + str(pkg_id)\n util.executeSQL(conn, sql)\n return", "def delete(isamAppliance, name, check_mode=False, force=False):\n ret_obj = search(isamAppliance, name, check_mode=check_mode, force=force)\n chain_id = ret_obj['data']\n\n if chain_id == {}:\n logger.info(\"STS Chain {0} not found, skipping delete.\".format(name))\n else:\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True)\n else:\n return isamAppliance.invoke_delete(\n \"Delete a specific STS chain\",\n \"{0}/{1}\".format(uri, chain_id),\n requires_modules=requires_modules,\n requires_version=requires_version)\n\n return isamAppliance.create_return_object()", "def delete(id):\n program = Programa.query.get(id)\n if program:\n try:\n db.session.delete(program)\n db.session.commit()\n return \"\", 200\n except Exception as e:\n logging.error(str(e))\n return jsonify({\"message\": str(e)}), 500\n return \"\", 404", "def delete_entry():\n u_id = request.args(0) or redirect(URL('moderation', 'new_entries'))\n db(db.lioli_main.unique_id == u_id).delete()\n redirect(URL('new_entries'))\n return dict()", "def remove_hero(apps, schema_editor):\n pass", "def test_blogpost_deletion_doesnt_delete_app(self):\r\n self.configure_fixtures()\r\n blogpost = Blogpost(title='title', body=\"body\", app=self.app)\r\n db.session.add(blogpost)\r\n db.session.commit()\r\n\r\n assert self.app in db.session\r\n assert blogpost in db.session\r\n\r\n db.session.delete(blogpost)\r\n db.session.commit()\r\n assert self.app in db.session\r\n assert blogpost not in db.session", "def delete_spot_datafeed_subscription(self):\r\n return self.get_status('DeleteSpotDatafeedSubscription',\r\n None, verb='POST')", "def test_03_delete(self, application=None):\n self.direct_login_user_1()\n self.my_context_dict[\"new_application\"].delete()\n\n if self.my_context_dict[\"new_application\"] in self._objects_to_delete:\n ## no need to delete it in tearDownClass if delete succeeded\n self._objects_to_delete.remove(self.my_context_dict[\"new_application\"])", "def delete_deployment(request, deployment, **_kwargs):\n pass", "def cmd_apps__destroy(args):\n \n if args.name is None and in_git_repo():\n args.name = _get_current_project_name()\n\n if args.name is None:\n print \"Please provide a project name.\"\n sys.exit(1)\n\n print \"Destroying project %s...\" % args.name\n remote.destroy_project(args.name)\n print \"Project %s destroyed.\" % args.name\n if in_git_repo() and _get_current_project_name() == args.name:\n git(None, 'remote', 'rm', 'tinyserv')\n print \"Removed remote '%s'.\" % args.name", "async def delete_star_code(self):\n\n e = await self.request.request(url=f'https://accountinformation.roblox.com/v1/star-code-affiliates',\n method='delete',\n )\n return e", "def delete_temp_dir(app_name):\n sudo('rm -rf /tmp/.fab-deploy-{}'.format(app_name))", "def delete_habit():\n analytics.remove_habit('Play Piano')", "def delete():\n\n from slicr.extensions import db\n\n click.echo('deleting database...')\n\n db.drop_all()" ]
[ "0.77574676", "0.73077077", "0.7193325", "0.6995305", "0.6962425", "0.69525915", "0.69459826", "0.68724674", "0.6756195", "0.6666602", "0.6640245", "0.6541444", "0.65118265", "0.64288366", "0.64217", "0.63251245", "0.6289479", "0.62437224", "0.62267774", "0.62075216", "0.61701506", "0.61564916", "0.60747194", "0.6054197", "0.5946228", "0.59396756", "0.58159137", "0.58150136", "0.5791744", "0.5775345", "0.5769719", "0.57192045", "0.568128", "0.564948", "0.56316423", "0.5630713", "0.55991405", "0.55889225", "0.5538603", "0.55248404", "0.55229425", "0.55207103", "0.54647475", "0.5463852", "0.5426128", "0.54191875", "0.5412583", "0.5412583", "0.54122275", "0.54118884", "0.5385275", "0.538169", "0.53563225", "0.5350248", "0.5343568", "0.533782", "0.5332165", "0.5326356", "0.5309971", "0.5286216", "0.5285583", "0.52773", "0.52697116", "0.5236477", "0.52222717", "0.5222269", "0.5220948", "0.5218061", "0.5210968", "0.5206109", "0.5180541", "0.51704437", "0.5165999", "0.5165879", "0.51569223", "0.5155375", "0.5148182", "0.514736", "0.51463425", "0.5120987", "0.5112315", "0.51097953", "0.5098177", "0.50925857", "0.5069121", "0.50662935", "0.50592285", "0.5044488", "0.50436294", "0.50435114", "0.5040412", "0.5029716", "0.5029474", "0.50030446", "0.49975714", "0.49952886", "0.49837697", "0.49533087", "0.49532935", "0.49521086" ]
0.73056835
2
Publishes application by uploading the manifest to the given marketplace
def _publish(client, manifest_path, marketplace, skip, overrides): try: manifest_json = check_app_manifest(manifest_path, overrides, marketplace) app_url = "{}://{}".format(manifest_json["schemes"][0], manifest_json["host"]) app_ip = urlparse(app_url).hostname if not skip: address = get_zerotier_address(marketplace) if address != app_ip: wrong_ip = click.style("It seems that the IP address that you put in your manifest file (") +\ click.style("{}", bold=True) +\ click.style(") is different than your current 21market IP (") +\ click.style("{}", bold=True) +\ click.style(")\nAre you sure you want to continue publishing with ") +\ click.style("{}", bold=True) +\ click.style("?") if not click.confirm(wrong_ip.format(app_ip, address, app_ip)): switch_host = click.style("Please edit ") +\ click.style("{}", bold=True) +\ click.style(" and replace ") +\ click.style("{}", bold=True) +\ click.style(" with ") +\ click.style("[{}].", bold=True) logger.info(switch_host.format(manifest_path, app_ip, address)) return except exceptions.ValidationError as ex: # catches and re-raises the same exception to enhance the error message publish_docs_url = click.style("https://21.co/learn/21-publish/", bold=True) publish_instructions = "For instructions on publishing your app, please refer to {}".format(publish_docs_url) raise exceptions.ValidationError( "The following error occurred while reading your manifest file at {}:\n{}\n\n{}" .format(manifest_path, ex.args[0], publish_instructions), json=ex.json) app_name = manifest_json["info"]["title"] app_endpoint = "{}://{}{}".format(manifest_json["schemes"][0], manifest_json["host"], manifest_json["basePath"]) logger.info( (click.style("Publishing {} at ") + click.style("{}", bold=True) + click.style(" to {}.")) .format(app_name, app_endpoint, marketplace)) payload = {"manifest": manifest_json, "marketplace": marketplace} try: response = client.publish(payload) except ServerRequestError as e: if e.status_code == 403 and e.data.get("error") == "TO600": logger.info( "The endpoint {} specified in your manifest has already been registered in " "the marketplace by another user.\nPlease check your manifest file and make " "sure your 'host' field is correct.\nIf the problem persists please contact " "support@21.co.".format(app_endpoint), fg="red") return else: raise e if response.status_code == 201: response_data = response.json() mkt_url = response_data['mkt_url'] permalink = response_data['permalink'] logger.info( click.style( "\n" "You have successfully published {} to {}. " "You should be able to view the listing within a few minutes at {}\n\n" "Users will be able to purchase it, using 21 buy, at {} ", fg="magenta") .format(app_name, marketplace, permalink, mkt_url) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def submit(ctx, manifest_path, marketplace, skip, parameters):\n if parameters is not None:\n try:\n parameters = _parse_parameters(parameters)\n except:\n logger.error(\n \"Manifest parameter overrides should be in the form 'key1=\\\"value1\\\" \"\n \"key2=\\\"value2\\\".\",\n fg=\"red\")\n return\n\n _publish(ctx.obj['client'], manifest_path, marketplace, skip, parameters)", "def publish():\n if sys.argv[-1] == 'publish':\n os.system('python setup.py sdist')\n os.system('twine upload dist/*')\n sys.exit()", "def _deploy_apps_function():\n app_integration_package = AppIntegrationPackage(config=config, version=apps_version)\n app_integration_package.create_and_upload()\n return app_integration_package", "def installApp(dev, apkFile=None, appPackage=None, outFile=None, local=False):\n certFile = scriptRoot + '/certs/localtest.me.pem'\n with ServerContext(LocalMarketServer(certFile, config.officialServer)) as server:\n if apkFile:\n server.setApk(apkFile.read())\n elif appPackage:\n print('Downloading apk')\n apps = listApps(True)\n if appPackage not in apps:\n raise Exception('Unknown app: %s' % appPackage)\n server.setApk(apps[appPackage].release.asset)\n\n print('Starting task')\n xpdData = server.getXpd()\n\n print('Starting communication')\n # Point the camera to the web api\n result = installer.install(dev, server.host, server.port, xpdData, printStatus)\n if result.code != 0:\n raise Exception('Communication error %d: %s' % (result.code, result.message))\n\n result = server.getResult()\n\n if not local:\n try:\n RemoteAppStore(config.appengineServer).sendStats(result)\n except:\n pass\n\n print('Task completed successfully')\n\n if outFile:\n print('Writing to output file')\n json.dump(result, outFile, indent=2)\n\n return result", "def process_manifest(vb, options):\n if not options.manifest:\n return\n\n vb.add_manifest(options.manifest_id, options.manifest_service, options.manifest_version, options.manifest_version_id,\n options.manifest_release_version)", "def install_app(self, pbz_path, launch_on_install=True):\n\n\t\tbundle = PebbleBundle(pbz_path)\n\t\tif not bundle.is_app_bundle():\n\t\t\traise PebbleError(self.id, \"This is not an app bundle\")\n\t\tapp_metadata = bundle.get_app_metadata()\n\n\t\tbinary = bundle.zip.read(bundle.get_application_info()['name'])\n\t\tif bundle.has_resources():\n\t\t\tresources = bundle.zip.read(bundle.get_resources_info()['name'])\n\t\telse:\n\t\t\tresources = None\n\n\t\tapps = self.get_appbank_status()\n\n\t\tif not apps:\n\t\t\traise PebbleError(self.id, \"could not obtain app list; try again\")\n\n\t\tfirst_free = 1\n\t\tfor app in apps[\"apps\"]:\n\t\t\tif app[\"index\"] == first_free:\n\t\t\t\tfirst_free += 1\n\t\tif first_free == apps[\"banks\"]:\n\t\t\traise PebbleError(self.id, \"All %d app banks are full\" % apps[\"banks\"])\n\t\tlog.debug(\"Attempting to add app to bank %d of %d\" % (first_free, apps[\"banks\"]))\n\n\t\tclient = PutBytesClient(self, first_free, \"BINARY\", binary)\n\t\tself.register_endpoint(\"PUTBYTES\", client.handle_message)\n\t\tclient.init()\n\t\twhile not client._done and not client._error:\n\t\t\tpass\n\t\tif client._error:\n\t\t\traise PebbleError(self.id, \"Failed to send application binary %s/pebble-app.bin\" % pbz_path)\n\n\t\tif resources:\n\t\t\tclient = PutBytesClient(self, first_free, \"RESOURCES\", resources)\n\t\t\tself.register_endpoint(\"PUTBYTES\", client.handle_message)\n\t\t\tclient.init()\n\t\t\twhile not client._done and not client._error:\n\t\t\t\tpass\n\t\t\tif client._error:\n\t\t\t\traise PebbleError(self.id, \"Failed to send application resources %s/app_resources.pbpack\" % pbz_path)\n\n\t\ttime.sleep(2)\n\t\tself._add_app(first_free)\n\t\ttime.sleep(2)\n\n\t\tif launch_on_install:\n\t\t\tself.launcher_message(app_metadata['uuid'].bytes, \"RUNNING\", uuid_is_string=False)", "def publish_manifest(ctx, name, tag, image, signed_push=False):\n manifest_spec = {\"image\": \"{}:{}\".format(name, tag)}\n src_images = []\n\n for img in image:\n img_splitted = img.replace(' ', '').split(',')\n if len(img_splitted) != 2:\n print(\"Impossible to parse source format for: '{}'\".format(img))\n raise Exit(code=1)\n\n platform_splitted = img_splitted[1].split('/')\n if len(platform_splitted) != 2:\n print(\"Impossible to parse platform format for: '{}'\".format(img))\n raise Exit(code=1)\n\n src_images.append(\n {\"image\": img_splitted[0], \"platform\": {\"architecture\": platform_splitted[1], \"os\": platform_splitted[0]}}\n )\n manifest_spec[\"manifests\"] = src_images\n\n with tempfile.NamedTemporaryFile(mode='w', delete=False) as f:\n temp_file_path = f.name\n yaml.dump(manifest_spec, f, default_flow_style=False)\n\n print(\"Using temp file: {}\".format(temp_file_path))\n ctx.run(\"cat {}\".format(temp_file_path))\n\n try:\n result = retry_run(ctx, \"manifest-tool push from-spec {}\".format(temp_file_path))\n if result.stdout:\n out = result.stdout.split('\\n')[0]\n fields = out.split(\" \")\n\n if len(fields) != 3:\n print(\"Unexpected output when invoking manifest-tool\")\n raise Exit(code=1)\n\n digest_fields = fields[1].split(\":\")\n\n if len(digest_fields) != 2 or digest_fields[0] != \"sha256\":\n print(\"Unexpected digest format in manifest-tool output\")\n raise Exit(code=1)\n\n digest = digest_fields[1]\n length = fields[2]\n\n if signed_push:\n cmd = \"\"\"\n notary -s https://notary.docker.io -d {home}/.docker/trust addhash \\\n -p docker.io/{name} {tag} {length} --sha256 {sha256} \\\n -r targets/releases\n \"\"\"\n retry_run(ctx, cmd.format(home=os.path.expanduser(\"~\"), name=name, tag=tag, length=length, sha256=digest))\n finally:\n os.remove(temp_file_path)", "def deploy(fingerengine, fingerprint):\n\n base = 'http://{0}:{1}'.format(fingerengine.options.ip, fingerprint.port)\n uri = '/manager/html/upload'\n war_file = fingerengine.options.deploy\n war_path = parse_war_path(war_file)\n cookies = checkAuth(fingerengine.options.ip, fingerprint.port,\n fingerprint.title, fingerprint.version)\n if not cookies:\n utility.Msg(\"Could not get auth for %s:%s\" %\n (fingerengine.options.ip, fingerprint.port), LOG.ERROR)\n return\n\n utility.Msg(\"Preparing to deploy {0}...\".format(war_file))\n\n if fingerprint.version in ['6.0', '7.0', '8.0']:\n # deploying via the gui requires a CSRF token\n (csrf, c) = fetchCSRF(base, cookies)\n if not csrf:\n return\n else:\n # set CSRF and refresh session id\n uri += '?org.apache.catalina.filters.CSRF_NONCE={0}'\n uri = uri.format(csrf)\n cookies = (c, cookies[1])\n\n # read in payload\n try:\n tag = 'deployWar'\n if fingerprint.version in ['4.0', '4.1']:\n tag = 'installWar'\n files = {tag : (war_path + '.war', open(war_file, 'rb'))}\n except Exception, e:\n utility.Msg(e, LOG.ERROR)\n return\n\n # deploy\n response = utility.requests_post(base + uri, files=files, cookies=cookies[0],\n auth=cookies[1])\n\n if response.status_code is 200 and \"OK\" in response.content:\n utility.Msg(\"Deployed {0} to /{1}\".format(war_file, war_path), LOG.SUCCESS)\n elif 'Application already exists' in response.content:\n utility.Msg(\"Application {0} is already deployed\".format(war_file), LOG.ERROR)\n elif response.status_code is 403:\n utility.Msg(\"This account does not have permissions to remotely deploy. Try\"\\\n \" using manager_deploy\", LOG.ERROR)\n else:\n utility.Msg(\"Failed to deploy (HTTP %d)\" % response.status_code, LOG.ERROR)", "def deploy_app(self, app_info):\n raise NotImplementedError", "def publish():\n fab.local(\"env/bin/python setup.py sdist\")\n tar_filename = fab.local(\n \"env/bin/python setup.py --fullname\", capture=True\n )\n dist_filename = \"dist/{}.tar.gz\".format(tar_filename)\n fab.put(dist_filename, PYREPO_DIR)", "def _deploy_app():\n rsync_project(env.remote_directory, env.local_directory,\n exclude=['.git/', '*.pyc', 'tests.py', 'migrations/'])\n sudo('service installer_app restart')", "def deploy_go_app(app_name, uri):\n execute(local_fetch_s3_artifact, uri)\n execute(deploy_artifact, app_name, uri)\n execute(create_symlink,\n '{}/config/config.yaml'.format(get_app_basedir(app_name)),\n '{}/etc/config.yaml'.format(get_current_release_dir(app_name)))", "def upload_package(self, __contents):\n raise NotImplementedError", "def update_manifest(builder):\r\n\r\n manifest_path = join(builder.Config.SourceRootPath, builder.Config.WMAppManifest)\r\n dom = parse(manifest_path)\r\n\r\n #import pdb;pdb.set_trace()\r\n #version = make_version_string(builder)\r\n version = builder.AppVersion\r\n\r\n update_manifest_with_values(dom,\r\n Title = builder.CustomCfg.Title,\r\n #ProductID = builder.CustomCfg.ProductID,\r\n #PublisherID = builder.Config.PublisherID,\r\n Version = version,\r\n Languages = getattr(builder.CustomCfg, \"Languages\", None ) )\r\n\r\n with open(manifest_path, 'wb') as f:\r\n data = dom.toprettyxml(indent = \" \")\r\n # toprettyxml adds extra new lines\r\n lines = [ x for x in data.split(\"\\n\") if len(x.strip()) > 0]\r\n data = \"\\n\".join(lines)\r\n f.write(data)\r\n\r\n return True", "def serve_manifest(app):\n storeapps = APP.config[\"storage\"]\n manifest = os.path.join(storeapps, \"IPA\", app, \"manifest.plist\")\n app_url = request.host_url + \"application/IPA/\" + app + \"/\" + app + \".ipa\"\n if not os.path.isfile(manifest):\n return \"File not found\", 404\n logging.debug(\"Serving manifest with application url: %s\", app_url)\n return flask.Response(open(manifest).read().replace(\"{{ APPLICATION_URL }}\", app_url.encode(\"utf-8\")),\n mimetype='text/xml')", "def publish_asset(\n self,\n *,\n asset_id: str,\n asset_manifest_path: str,\n asset_selector: str,\n asset_type: \"AssetType\",\n ) -> None:\n ...", "def finish_publish(hash, metadata, engine_id=None, username=USER):\n identity = \"%s@%s\" % (username, get_config('domain'))\n library = Library.objects.get(identity=identity)\n library.add_item(\n engine_id=engine_id,\n origin=identity,\n metadata=metadata\n )\n return \"OK\"", "def upload():\n sh('python setup.py register sdist upload')", "def pub_upload(args, project=\"\", base_url=\"\", api_key=\"\"):\n project, base_url, api_key, updated = get_project_config(\n project=project, base_url=base_url, api_key=api_key)\n if updated:\n save_config()\n upload_theme(args, base_url, api_key, prefix=project)", "def compose_package(app_name, manifest, package_dir,\n require=None, archive_dir=None):\n with open(manifest, 'w') as f:\n fqn = 'io.murano.apps.' + app_name\n mfest_copy = MANIFEST.copy()\n mfest_copy['FullName'] = fqn\n mfest_copy['Name'] = app_name\n mfest_copy['Classes'] = {fqn: 'mock_muranopl.yaml'}\n if require:\n mfest_copy['Require'] = require\n f.write(yaml.dump(mfest_copy, default_flow_style=False))\n\n name = app_name + '.zip'\n\n if not archive_dir:\n archive_dir = os.path.dirname(os.path.abspath(__file__))\n archive_path = os.path.join(archive_dir, name)\n\n with zipfile.ZipFile(archive_path, 'w') as zip_file:\n for root, dirs, files in os.walk(package_dir):\n for f in files:\n zip_file.write(\n os.path.join(root, f),\n arcname=os.path.join(os.path.relpath(root, package_dir), f)\n )\n\n return archive_path, name", "def deploy():\n build()\n copy()\n install()", "def push(self) -> None:\n\n with ImportExtensions(required=True):\n import requests\n\n pkg_path = Path(self.args.path)\n if not pkg_path.exists():\n self.logger.critical(f'`{self.args.path}` is not a valid path!')\n exit(1)\n\n request_headers = self._get_request_header()\n\n try:\n # archive the executor package\n with TimeContext(f'Packaging {self.args.path}', self.logger):\n md5_hash = hashlib.md5()\n bytesio = archive_package(pkg_path)\n content = bytesio.getvalue()\n md5_hash.update(content)\n\n md5_digest = md5_hash.hexdigest()\n\n # upload the archived package\n form_data = {\n 'public': self.args.public if hasattr(self.args, 'public') else False,\n 'private': self.args.private\n if hasattr(self.args, 'private')\n else False,\n 'md5sum': md5_digest,\n 'force': self.args.force,\n 'secret': self.args.secret,\n }\n\n method = 'put' if self.args.force else 'post'\n\n hubble_url = get_hubble_url()\n # upload the archived executor to Jina Hub\n with TimeContext(\n f'Pushing to {hubble_url} ({method.upper()})',\n self.logger,\n ):\n resp = getattr(requests, method)(\n hubble_url,\n files={'file': content},\n data=form_data,\n headers=request_headers,\n )\n\n if 200 <= resp.status_code < 300:\n # TODO: only support single executor now\n image = resp.json()['executors'][0]\n\n uuid8 = image['id']\n secret = image['secret']\n visibility = image['visibility']\n\n info_table = [\n f'\\t🔑 ID:\\t\\t' + colored(f'{uuid8}', 'cyan'),\n f'\\t🔒 Secret:\\t'\n + colored(\n f'{secret}',\n 'cyan',\n )\n + colored(\n ' (👈 Please store this secret carefully, it wont show up again)',\n 'red',\n ),\n f'\\t👀 Visibility:\\t' + colored(f'{visibility}', 'cyan'),\n ]\n\n if 'alias' in image:\n info_table.append(f'\\t📛 Alias:\\t' + colored(image['alias'], 'cyan'))\n\n self.logger.success(f'🎉 Executor `{pkg_path}` is pushed successfully!')\n self.logger.info('\\n' + '\\n'.join(info_table))\n\n usage = (\n f'jinahub://{uuid8}'\n if visibility == 'public'\n else f'jinahub://{uuid8}:{secret}'\n )\n\n self.logger.info(f'You can use it via `uses={usage}` in the Flow/CLI.')\n elif resp.text:\n # NOTE: sometimes resp.text returns empty\n raise Exception(resp.text)\n else:\n resp.raise_for_status()\n except Exception as e: # IO related errors\n self.logger.error(\n f'Error while pushing `{self.args.path}` with session_id={request_headers[\"jinameta-session-id\"]}: '\n f'\\n{e!r}'\n )", "def pub_deploy(args, project=\"\", account=\"\", api_key=\"\"):\n base_url, api_key, updated = get_project_connect(\n 'djaodjin',\n base_url=DEFAULT_API_ENDPOINT,\n api_key=api_key)\n project, account, updated = get_project_account(\n project=project, account=account)\n if updated:\n save_config()\n\n api_container_url = \\\n \"%(base_url)s/api/containers/%(organization)s/apps/%(app)s/\" % {\n 'base_url': base_url,\n 'organization': str(account),\n 'app': str(project)}\n data = None\n container_location = args[0] if args else None\n if container_location:\n data = {'location': container_location}\n resp = requests.post(api_container_url, data=data, auth=(api_key, \"\"))\n LOGGER.info(\"POST %s returns %d %s\",\n api_container_url, resp.status_code, resp.text)", "def deploy():\n local('appcfg.py --no_cookies --email=mccutchen@gmail.com update .',\n capture=False)", "def create_manifest(\n upload_dir,\n study_id,\n analysis_id,\n song_url,\n auth_token\n):\n files_dir = os.path.join(upload_dir, 'files')\n manifest_dir = os.path.join(upload_dir, 'manifests')\n song_client = SongClient(\n song_url,\n auth_token,\n VERIFY_CERTIFICATES\n )\n manifest = song_client.get_analysis_manifest(\n study_id,\n analysis_id,\n files_dir\n )\n if os.path.isdir(manifest_dir):\n shutil.rmtree(manifest_dir)\n os.makedirs(manifest_dir)\n manifest.write(\n os.path.join(manifest_dir, 'manifest.txt'),\n overwrite=True\n )", "def install_apps(self, app_installers):\n print('[?] Installing missing APK(s) and IPA(s).')\n for app_installer in app_installers:\n with request.urlopen(app_installer[1]) as response, open(app_installer[0], 'wb') as out_app_file:\n if response.getcode() != 200:\n print(f'[-] Failed to install {app_installer[1]}.')\n return\n print(f'[+] Successfully installed {app_installer[1]}.')\n shutil.copyfileobj(response, out_app_file)", "def deploy_app(host_=None):\n run_command_on_selected_server(_deploy_app, host_=host_)", "def mergeManifest(channel, targetManifest, sdkManifest):\n\n if not os.path.exists(targetManifest) or not os.path.exists(sdkManifest):\n utils_log.error(\"the manifest file is not exists.targetManifest:%s;sdkManifest:%s\", targetManifest, sdkManifest)\n return False\n\n ET.register_namespace('android', androidNS)\n targetTree = ET.parse(targetManifest)\n targetRoot = targetTree.getroot()\n\n ET.register_namespace('android', androidNS)\n sdkTree = ET.parse(sdkManifest)\n sdkRoot = sdkTree.getroot()\n\n f = open(targetManifest)\n targetContent = f.read()\n f.close()\n\n permissionConfigNode = sdkRoot.find('permissionConfig')\n if permissionConfigNode != None and len(permissionConfigNode) > 0:\n for child in list(permissionConfigNode):\n key = '{' + androidNS + '}name'\n val = child.get(key)\n if val != None and len(val) > 0:\n attrIndex = targetContent.find(val)\n if -1 == attrIndex:\n targetRoot.append(child)\n\n appConfigNode = sdkRoot.find('applicationConfig')\n appNode = targetRoot.find('application')\n\n if appConfigNode != None:\n\n proxyApplicationName = appConfigNode.get('proxyApplication')\n if proxyApplicationName != None and len(proxyApplicationName) > 0:\n\n if 'PYW_APPLICATION_PROXY_NAME' in channel:\n\n channel['PYW_APPLICATION_PROXY_NAME'] = channel[\n 'PYW_APPLICATION_PROXY_NAME'] + ',' + proxyApplicationName\n else:\n\n channel['PYW_APPLICATION_PROXY_NAME'] = proxyApplicationName\n\n # 获取渠道闪屏名称\n launcherName = appConfigNode.get('channelLauncherName')\n # appKeyWord = appConfigNode.get('keyword')\n\n # exists = appKeyWord != None and len(appKeyWord.strip()) > 0 and targetContent.find(appKeyWord) != -1\n\n # if not exists:\n # remove keyword check...\n for child in list(appConfigNode):\n targetRoot.find('application').append(child)\n\n targetTree.write(targetManifest, 'UTF-8')\n # 修改闪屏 如果渠道 需要闪屏文件则增加此方法 不要则注释掉\n if launcherName != None and len(launcherName) > 0:\n mergeLauncher(launcherName, targetManifest)\n\n return True", "def _TransferPublishManifest(self, publish_manifest, db_path_prefix,\n force_copy):\n for item in publish_manifest:\n src_path = item.current_path\n dest_path = \"%s/%s\" % (db_path_prefix, item.orig_path)\n logger.debug(\"TransferPublishManifest - src_path: %s, dest_path: %s.\",\n src_path, dest_path)\n\n # Transfer manifest file to published database directory.\n tries = 2\n sleep_secs = 5\n while (not serve_utils.LocalTransfer(\n src_path, dest_path,\n force_copy, prefer_copy=True, allow_symlinks=False)):\n tries -= 1\n if tries == 0:\n raise exceptions.PublishServeException(\n \"Could not transfer publish manifest file %s to %s.\" %\n (src_path, dest_path))\n logger.debug(\"Retrying Local Transfer.\")\n time.sleep(sleep_secs)\n sleep_secs *= 2 # Double the sleep time after each retry.", "def check_app_manifest(api_docs_path, overrides, marketplace):\n if not os.path.exists(api_docs_path):\n raise exceptions.ValidationError(\n click.style(\"Could not find the manifest file at {}.\", fg=\"red\").format(api_docs_path))\n\n if os.path.isdir(api_docs_path):\n raise exceptions.ValidationError(\n click.style(\"{} is a directory. Please enter the direct path to the manifest file.\",\n fg=\"red\").format(api_docs_path))\n\n file_size = os.path.getsize(api_docs_path) / 1e6\n if file_size > 2:\n raise exceptions.ValidationError(\n click.style(\"The size of the manifest file at {} exceeds the maximum limit of 2MB.\", fg=\"red\")\n .format(api_docs_path))\n\n try:\n with open(api_docs_path, \"r\") as f:\n original_manifest_dict = yaml.load(f.read())\n\n manifest_dict = transform_manifest(original_manifest_dict, overrides, marketplace)\n\n # write back the manifest in case some clean up or overriding has happend\n with open(api_docs_path, \"w\") as f:\n yaml.dump(manifest_dict, f)\n\n return manifest_dict\n except (YAMLError, ValueError):\n raise exceptions.ValidationError(\n click.style(\"Your manifest file at {} is not valid YAML.\", fg=\"red\")\n .format(api_docs_path))", "def deploy():", "def main(pkg_dir, years):\n pkgname = os.path.basename(pkg_dir)\n identifier = clean_name('archlinux_pkg_' + pkgname)\n metadata = {\n #'collection': ['test_collection', 'open_source_software'],\n #'collection': ['open_source_software'],\n 'collection': ['archlinuxarchive'],\n 'mediatype': 'software',\n 'publisher': 'Arch Linux',\n 'creator': 'Arch Linux',\n 'subject': ['archlinux', 'archlinux package'],\n }\n metadata['title'] = pkgname + \" package archive from Arch Linux\"\n metadata['subject'].append(pkgname)\n upload_pkg(identifier, pkgname, metadata, pkg_dir, years)", "def submit_manifest(\n request: ValidateManifestRequest = Body(...),\n schema: Any = Depends(get_description_schema),\n) -> ManifestSubmitResponse:\n manifest, validation = _validate_manifest(request, schema)\n if not manifest or validation.status == ResponseStatus.FAIL:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST, detail=validation.details\n )\n\n try:\n with get_repository(get_client_id(), DataCollection.MANIFEST) as repository:\n manifest_hash = manifest.crypto_hash().to_hex()\n _ = repository.set(\n {\"manifest_hash\": manifest_hash, \"manifest\": manifest.to_json_object()}\n )\n return ManifestSubmitResponse(manifest_hash=manifest_hash)\n except Exception as error:\n print(sys.exc_info())\n raise HTTPException(\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,\n detail=\"Submit manifest failed\",\n ) from error", "def assets_push(ctx, metadata, dir, brizo, price, service_endpoint, timeout):\n try:\n files = [f for f in os.listdir(dir) if os.path.isfile(dir+'/'+f)]\n except NotADirectoryError:\n files = [dir]\n\n response = []\n metadata = json.load(open(metadata, 'r'))\n\n for f in files:\n metadata['base']['files'][0]['url'] = f\n response += [ctx.invoke(assets_publish,\n metadata=metadata,\n brizo=brizo,\n price=price,\n service_endpoint=service_endpoint,\n timeout=timeout)]", "def archiveApp(appName, appUID):\n logger.debug('[FLASKWEB /app/<appName>/<appUID>] %s Request for App Archive `%s`, UID=`%s`' % (request.method, appName, appUID))\n applist = [a['name'] for a in db.getAllApps()]\n uname = AppID.getAppId(appName, appUID)\n\n # if appName not in applist:\n # logger.warning(\"Archive request for app that does not exist: %s\", appName)\n # return returnError(\"Application %s does not exist\" % appName, 404)\n\n if request.method == 'POST':\n file = request.files['file']\n if file:\n filename = secure_filename(file.filename)\n path = os.path.join(webapp.config['UPLOADED_BUILD_DEST'], uname).encode(encoding='utf8', errors='ignore')\n logger.debug(\"Archiving file, %s, to %s\" % (filename, path))\n if not os.path.exists(path):\n os.mkdir(path)\n file.save(os.path.join(path, filename))\n return \"File Uploaded & archived\\n\", 202\n else:\n logger.warning(\"Archive request, but no file provided.\")\n return \"No file received\\n\", 400\n\n elif request.method == 'GET':\n path = os.path.join(webapp.config['UPLOADED_BUILD_URL'], uname)\n return redirect(path, 302)", "def publish():\n reset()\n compress()\n build()\n s3deploy()\n log_success()", "def upload_job_manifest(bucket: Bucket, manifest: JobManifest):\n path = f\"thor_jobs/v1/job-{manifest.job_id}/manifest.json\"\n bucket.blob(path).upload_from_string(manifest.to_str())", "def deploy_artifact(app_name, artifact_uri, owner=DEFAULT_OWNER):\n artifact = path.basename(artifact_uri)\n\n upload_build_artifact(artifact, app_name)\n\n with cd(get_temp_dir(app_name)):\n # handle apps that use the 'current' symlink.\n vhost_dir = get_app_basedir(app_name)\n deploy_dir = get_current_release_dir(app_name)\n current_sym = '{}/current'.format(vhost_dir)\n\n # If a 'current' symlink exists, find out what it points to\n # and rotate it to 'prev'. If current is not a symlink, return\n # an error.\n if files.exists(current_sym):\n if files.is_link(current_sym):\n deploy_dir = sudo('readlink {}'.format(current_sym)).stdout\n else:\n raise Exception('[{}] is not a symlink?!?'.format(current_sym))\n\n # delete 'prev' release directory and rotate 'curr' to 'prev'\n # (only if 'current' wasn't pointing to 'prev')\n prev_dir = '{}/releases/prev'.format(vhost_dir)\n if deploy_dir != prev_dir:\n sudo('rm -rf {}'.format(prev_dir))\n if files.exists(deploy_dir):\n sudo('mv {} {}'.format(deploy_dir, prev_dir))\n\n # now, deploy new version into the 'curr' release directory\n sudo('mkdir -pv {}'.format(deploy_dir))\n sudo('tar -C {}/ -xzf {}'.format(deploy_dir, artifact))\n\n # it's possible 'current' isn't pointing at 'curr', so let's\n # fix it to point there now\n sudo('rm -fv {}'.format(current_sym))\n sudo('ln -svf {} {}'.format(deploy_dir, current_sym))\n\n # fix up file ownership on newly-deployed files\n sudo('chown -R {} {}'.format(owner, deploy_dir))\n\n delete_temp_dir(app_name)", "def push_application(self):\n raise NotImplementedError()", "def create_application(name=None, description=None):\n pass", "def assets_publish(ctx, metadata, brizo, price, service_endpoint, timeout):\n from .api.assets import create\n response = create(metadata,\n secret_store=not brizo,\n price=price,\n service_endpoint=service_endpoint,\n timeout=timeout,\n ocean=ctx.obj['ocean'])\n echo(response)", "def deploy_installer(l_dir=env.local_directory):\n env.local_directory = l_dir\n deploy_app(host_=env.myhost)", "def upload_app_version(app_name, bundled_zip):\n bucket = elasticbeanstalk.get_storage_location()\n key = app_name + '/' + os.path.basename(bundled_zip)\n try:\n ebs3.get_object_info(bucket, key)\n logger.info('S3 Object already exists. Skipping upload.')\n except NotFoundError:\n logger.info('Uploading archive to s3 location: ' + key)\n ebs3.upload_application_version(bucket, key, bundled_zip)\n return bucket, key", "def deploy_api(dist_file, apt_req_file):\n _set_credentials()\n provision()\n _deploy_apt_requirements(apt_req_file)\n _deploy_python_package(dist_file)\n _sighup_api()\n _verify_api_heartbeat()\n send_build_stat(PROJECT_NAME, env.stage)", "def install():\n execute(generate)\n execute(upload)", "def manifest_upload(request):\n form = ManifestForm(request.POST or None, request.FILES or None)\n\n if form.is_valid():\n manifest = form.save()\n return HttpResponseRedirect(reverse('file-upload', args=(manifest.id,)))\n\n return render(request, 'file_manager/manifest_upload.html', {\n 'form': form,\n })", "def create(self, content, **kwargs):\n with open(self._manifest.path, 'w') as manifest_file:\n base_info = {\n 'version': self._manifest.VERSION,\n 'type': self._manifest.TYPE,\n }\n for key, value in base_info.items():\n json_item = json.dumps({key: value}, separators=(',', ':'))\n manifest_file.write(f'{json_item}\\n')\n\n for item in content:\n json_item = json.dumps({\n key: value for key, value in item.items()\n }, separators=(',', ':'))\n manifest_file.write(f\"{json_item}\\n\")\n self._manifest.is_created = True", "def deploy():\n build()\n collect()\n commit()\n push()", "def add_publish_command(\n self, relative_manifest_path: str, asset_selector: str\n ) -> None:\n return jsii.invoke(\n self, \"addPublishCommand\", [relative_manifest_path, asset_selector]\n )", "def publish_files():\n print(\"Publishing files to the internet...\", end=\"\", flush=True)\n import subprocess\n try:\n subprocess.run(\"./upload.sh\", timeout=120.0)\n print(\"done.\\n\")\n except:\n print(\"failed.\\n\")", "def upload_pkg(identifier, pkgname, metadata, directory, years):\n files = []\n for f in os.scandir(directory):\n if not f.is_symlink():\n continue\n path = os.readlink(f)\n match = re.match(SYMLINK_YEAR_REGEXP, path)\n if not match:\n continue\n year = match[1]\n if year not in years:\n continue\n files.append(f.path)\n if not files:\n return\n # Get last package, to extract a description\n last_pkg = sorted(filter(lambda x: not x.endswith('.sig'), files))[-1]\n pkginfo = extract_pkginfo(last_pkg)\n pkgdesc = pkginfo['pkgdesc'] if 'pkgdesc' in pkginfo else ''\n metadata['description'] = DESCRIPTION.format(pkgname=pkgname, pkgdesc=pkgdesc, url=pkginfo['url'], license=pkginfo['license'])\n metadata['rights'] = 'License: ' + pkginfo['license']\n #print(pkgname, len(files))\n #print(metadata)\n try:\n res = ia.upload(identifier, files=files, metadata=metadata)\n if not all([x.status_code == 200 for x in res]):\n ok = len([x for x in res if x.status_code == 200])\n nok = len([x for x in res if x.status_code != 200])\n codes = set([x.status_code for x in res])\n print(\"{}: only {}/{} files uploaded, status codes: {}\".format(identifier, ok, ok+nok, codes), file=sys.stderr)\n print(directory)\n except Exception as e:\n print(\"{}: exception raised\".format(identifier), file=sys.stderr)\n print(e, file=sys.stderr)\n print(directory)", "def write_manifest ( self, **manifest_kw ):\n for package in self._subdirs.values():\n package.write_manifest ( **manifest_kw )", "def add_app(self, app_name):\n self.add_list_setting('applications', 'installed_apps', app_name)", "def create_manifest():\n dirpath = os.getcwd()\n file_path_ori = dirpath + \"/manifest.json\"\n file_path_new = dirpath + \"/manifests3.json\"\n\n with open(file_path_ori, \"rt\") as fin:\n with open(file_path_new, \"wt\") as fout:\n for line in fin:\n fout.write(line.replace('bucket-name', bucketName))", "def upload(version=minv.__version__, release=\"1\"):\n version = version or minv.__version__\n put(\n join(\n env.builder_path,\n \"build/RPMS/minv-%s-%s.noarch.rpm\" % (version, release)\n ), \"\"\n )\n put(\"minv/package/minv_install_postgresql.sh\", \"\")\n sudo(\"chmod a+x minv_install_postgresql.sh\")\n with lcd(env.ink_path):\n for rpm in RPMS:\n put(rpm, \"\")", "def s3deploy():\n # using aws cli since boto is busted with buckets that have periods (.) in the name\n local('cd {} && aws s3 cp --recursive --acl public-read build/ s3://{}/{}'.format(\n settings.BASE_DIR, AWS_BUCKET_NAME, VERBOSE_APP_NAME))\n log('Deployed! visit http://{}/{}/\\n'.format(AWS_BUCKET_NAME, VERBOSE_APP_NAME), 'green')", "def main():\n Log.info('Installing...')\n app = Application()\n app.run()\n Log.info(\"Done successfully.\")", "def deploy():\n require('hosts', provided_by=[prod])\n require('whole_path', provided_by=[prod])\n require('code_root')\n upload_tar_from_git(env.whole_path)\n install_requirements()\n symlink_current_release()\n migrate()\n restart_webservers()\n setup_permissions()\n collectstatic()", "def deploy_app(device_id, app_id, app_version):\n kargs={'host': c.cfg['host'], \"api_version\": c.cfg['api_version'], \"url_path\": \"/applications\"}\n versions = esapp.App(kargs).get_app_version_by_id(app_id)\n\n kargs.update({\"url_path\": \"/tasks\"})\n if not app_version in versions:\n sys.exit(\"Fail: app_version \\\"%s\\\" not found, available list:%s\" \\\n %(str(app_version), str(jsn.dumps(versions))))\n\n task = estask.Task(kargs)\n try:\n dict_resp= task.create_app_task(device_id, app_version, app_id)\n except Exception as e:\n sys.exit(\"Fail: %s\" %str(e))\n\n if dict_resp == None:\n sys.exit(\"Fail: error response\")\n\n try:\n click.echo(\"Success to create a task id: %s\" %(str(dict_resp[\"task_id\"])))\n except Exception as e:\n sys.exit(\"Fail: %s %s\" %(str(e), str(dict_resp)))\n\n if 'status' in dict_resp and dict_resp['status'].lower() != 'success':\n sys.exit(1)", "def test_publish_deployment_run(self):\n pass", "def write_manifest_xml(cls, document, manifest_content):\n with zipfile.ZipFile(document, 'a') as open_document:\n open_document.writestr(DOCUMENT_MANIFEST_PATH, ''.join(manifest_content))", "def deploy():\n new_archive = do_pack()\n\n if new_archive is None:\n return False\n\n res = do_deploy(new_archive)\n return res", "def deploy():\n myfile = do_pack()\n if myfile is None:\n return False\n return do_deploy(myfile)", "async def create_app(self, data: dict) -> dict:\r\n return await self.post(API_APPS, data)", "def publish():\n pass", "def django_start_app(appname):\r\n \r\n actions = []\r\n errs = []\r\n \r\n app = wingapi.gApplication\r\n cmdline, dirname, err = _get_base_cmdline()\r\n if err is not None:\r\n title = _(\"Failed to Start App\")\r\n msg = _(\"The Django app could not be created: %s\") % err\r\n app.ShowMessageDialog(title, msg)\r\n return\r\n cmdline += ['startapp', appname]\r\n err, output = app.ExecuteCommandLine(cmdline, dirname, None, 5.0, return_stderr=True)\r\n if err != 0 or output[1]:\r\n title = _(\"Failed to Start App\")\r\n msg = _(\"The command %s failed with error code %i and output:\\n\\n%s\\n\\n%s\") % (cmdline, err, _get_output(output), _kMissingPythonMessage)\r\n app.ShowMessageDialog(title, msg)\r\n return\r\n actions.append(_(\"Created Django app %s in %s\") % (appname, dirname))\r\n \r\n # Add the new app to INSTALLED_APPS in settings.py\r\n manage_py, settings_py = _CDjangoPluginActivator._instance._FindKeyFiles()\r\n try:\r\n f = open(settings_py)\r\n txt = f.read()\r\n f.close()\r\n except:\r\n errs.append(_(\"Unable to read %s to update INSTALLED_APPS\"))\r\n else:\r\n lines = txt.splitlines()\r\n eol = _get_eol(txt)\r\n insert_line = None\r\n in_installed_apps = False\r\n for i, line in enumerate(lines):\r\n if line.lstrip().startswith('INSTALLED_APPS'):\r\n in_installed_apps = True\r\n elif in_installed_apps and line.strip().startswith(')'):\r\n in_installed_apps = False\r\n insert_line = i\r\n if insert_line is None:\r\n lines.extend(['', 'INSTALLED_APPS =', \" '%s',\" % appname, ')', ''])\r\n else:\r\n lines = lines[:insert_line] + [\" '%s',\" % appname] + lines[insert_line:]\r\n try:\r\n txt = eol.join(lines)\r\n f = open(settings_py, 'w')\r\n f.write(txt)\r\n f.close()\r\n except:\r\n errs.append(_(\"Unable to write %s to update INSTALLED_APPS\"))\r\n else:\r\n actions.append(_(\"Added %s to INSTALLED_APPS in %s\") % (appname, settings_py))\r\n \r\n title = _(\"The App was Created\")\r\n msg = _(\"The application was created. \")\r\n if errs:\r\n msg += _get_errors_list(errs)\r\n msg += _get_actions_list(actions)\r\n app.ShowMessageDialog(title, msg, modal=False)", "def upload(ctx, release, rebuild, version):\n\n dist_path = Path(DIST_PATH)\n if rebuild is False:\n if not dist_path.exists() or not list(dist_path.glob('*')):\n print(\"No distribution files found. Please run 'build' command first\")\n return\n else:\n ctx.invoke(build, force=True, version=version)\n\n if release:\n args = ['twine', 'upload', 'dist/*']\n else:\n repository = 'https://test.pypi.org/legacy/'\n args = ['twine', 'upload', '--repository-url', repository, 'dist/*']\n\n env = os.environ.copy()\n\n p = subprocess.Popen(args, env=env)\n p.wait()", "def packaging(src):\n\twork_copy = osp.dirname(src)\n\t\n\taddon_info = \"\".join(open(work_copy + osp.sep + \"install.rdf\"))\n\taddon_name = re.search(\"(?<=em\\:name\\=\\\").*(?=\\\")\",addon_info).group(0)\n\taddon_version = re.search(\"(?<=em\\:version\\=\\\").*(?=\\\")\",addon_info).group(0)\n\n\ttemp_copy_base = tempfile.mkdtemp()\n\ttemp_copy = osp.join(temp_copy_base,addon_name)\n\t\n\txpi_name = \"%s-%s.xpi\" % (addon_name,addon_version)\n\txpi_fullpath = osp.join(work_copy,xpi_name);\n\t\n\tprint \"\"\"\n\tAdd-on : %s\n\tVersion : %s\n\tWork Copy : %s\n\tTemp Copy : %s\n\tXPI File : %s\n\t\"\"\" % (addon_name,addon_version,work_copy,temp_copy, xpi_name)\n\n\tprint \"copying work to temp dir...\"\n\tcopytree(work_copy,temp_copy,ignore=ignore_patterns('scriptdemo','*.xpi','.*','*.bat','*.py','*LOG','*~','*.swp'))\n\n\tprint \"packaging xpi...\"\n\tcompress(temp_copy,xpi_fullpath);\n\n\tprint \"cleaning...\"\n\trmtree(temp_copy_base)", "def deploy_application(target_environment, config_file, branch, force): # noqa\n # read in and parse configuration\n app = config.AppConfiguration.load(\n config_file or\n os.path.join(settings.app_conf_dir, '%s.conf' % target_environment)\n )\n app_name = app.app_name\n branch = branch or app.default_branch or git.get_current_branch()\n\n # get the contents of the proposed deployment\n release = heroku.HerokuRelease.get_latest_deployment(app_name)\n\n remote_hash = release.commit\n if app.use_pipeline:\n # if we are using pipelines, then the commit we need is not the\n # local one, but the latest version on the upstream app, as this\n # is the one that will be deployed.\n upstream_release = heroku.HerokuRelease.get_latest_deployment(app.upstream_app) # noqa\n local_hash = upstream_release.commit\n else:\n local_hash = git.get_branch_head(branch)\n\n if local_hash == remote_hash:\n click.echo(u\"Heroku application is up-to-date, aborting deployment.\")\n return\n\n files = git.get_files(remote_hash, local_hash)\n commits = git.get_commits(remote_hash, local_hash)\n\n post_deploy_tasks = app.post_deploy_tasks\n\n click.echo(\"\")\n click.echo(\"Comparing %s..%s\" % (remote_hash, local_hash))\n click.echo(\"\")\n click.echo(\"The following files have changed since the last deployment:\\n\") # noqa\n if len(files) == 0:\n click.echo(\" (no change)\")\n else:\n click.echo(\"\".join([\" * %s\\n\" % f for f in files]))\n click.echo(\"\")\n click.echo(\"The following commits will be included in this deployment:\\n\") # noqa\n if len(commits) == 0:\n click.echo(\" (no change)\")\n else:\n click.echo(\"\".join([\" [%s] %s\\n\" % (c[0], c[1]) for c in commits]))\n\n # ============== summarise actions ==========================\n click.echo(\"\")\n click.echo(\"Summary of deployment options:\") # noqa\n click.echo(\"\")\n click.echo(\" ----- Deployment SETTINGS -----------\")\n click.echo(\"\")\n click.echo(\" Git branch: %s\" % branch)\n click.echo(\" Target env: %s (%s)\" % (target_environment, app_name))\n click.echo(\" Force push: %s\" % force)\n # pipeline promotion - buildpack won't run\n click.echo(\" Pipeline: %s\" % app.use_pipeline)\n if app.use_pipeline:\n click.echo(\" Promote: %s\" % app.upstream_app)\n click.echo(\" Release tag: %s\" % app.add_tag)\n click.echo(\"\")\n click.echo(\" ----- Post-deployment commands ------\")\n click.echo(\"\")\n\n if not post_deploy_tasks:\n click.echo(\" (None specified)\")\n else:\n [click.echo(\" %s\" % x) for x in post_deploy_tasks]\n\n click.echo(\"\")\n # ============== / summarise actions ========================\n\n # put up the maintenance page if required\n maintenance = utils.prompt_for_action(\n u\"Do you want to put up the maintenance page?\",\n False\n )\n\n if not utils.prompt_for_pin(\"\"):\n exit(0)\n\n if maintenance:\n click.echo(\"Putting up maintenance page\")\n heroku.toggle_maintenance(app_name, True)\n\n if app.use_pipeline:\n click.echo(\"Promoting upstream app: %s\" % app.upstream_app)\n heroku.promote_app(app.upstream_app)\n else:\n click.echo(\"Pushing to git remote\")\n git.push(\n remote=git.get_remote_url(app_name),\n local_branch=branch,\n remote_branch='master',\n force=force\n )\n\n if post_deploy_tasks:\n click.echo(\"Running post-deployment tasks:\")\n run_post_deployment_tasks(post_deploy_tasks)\n\n if maintenance:\n click.echo(\"Pulling down maintenance page\")\n heroku.toggle_maintenance(app_name, False)\n\n release = heroku.HerokuRelease.get_latest_deployment(app_name)\n\n if app.add_tag:\n click.echo(\"Applying git tag\")\n message = \"Deployed to %s by %s\" % (app_name, release.deployed_by)\n git.apply_tag(commit=local_hash, tag=release.version, message=message)\n\n click.echo(release)", "def upload_public_app(self):\n # NOTE(gibi): Listing apps for a user is out of scope so an app ref is\n # not stored in the user now. In a list-app-by-user scenario it might\n # be beneficial to have a bidirectional link between App and User.\n return PublicApp(owner=self)", "def build_manifest(self, root):\n manifest = ET.SubElement(root, \"manifest\")\n for sid, href, media_type in self.manifest:\n args = {\"id\": sid, \"href\": href, \"media-type\": media_type}\n ET.SubElement(manifest, \"item\", **args) # pylint: disable-msg=W0142", "def deploy():\n require(\"hosts\", provided_by=[production, staging])\n env.release = time.strftime(\"%Y-%m-%d_%H:%M:%S\")\n upload_tar_from_git()\n install_requirements()\n setup_webserver()\n symlink_current_release()\n restart_webserver()", "def deploy(fingerengine, fingerprint):\n\n\tcfm_path = abspath(fingerengine.options.deploy)\n\tcfm_file = parse_war_path(cfm_path, True)\n\tdip = fingerengine.options.ip\n\n\tcookie = checkAuth(dip, fingerprint.port, title, fingerprint.version)[0]\n\tif not cookie:\n\t\tutility.Msg(\"Could not get auth\", LOG.ERROR)\n\t\treturn\n\n\tutility.Msg(\"Preparing to deploy {0}...\".format(cfm_file))\n\tutility.Msg(\"Fetching web root...\", LOG.DEBUG)\n\n\troot = fetch_webroot(dip, fingerprint, cookie)\n\tif not root:\n\t\tutility.Msg(\"Unable to fetch web root.\", LOG.ERROR)\n\t\treturn\n\t\n\t# create the scheduled task\n\tutility.Msg(\"Web root found at %s\" % root, LOG.DEBUG)\n\tutility.Msg(\"Creating scheduled task...\")\n\n\tif not create_task(dip, fingerprint, cfm_file, root, cookie):\n\t\treturn\n\n\t# invoke the task\n\tutility.Msg(\"Task %s created, invoking...\" % cfm_file)\n\trun_task(dip, fingerprint, cfm_path, cookie)\n\n\t# cleanup\n\tutility.Msg(\"Cleaning up...\")\n\tif not delete_task(dip, fingerprint, cfm_file, cookie):\n\t\tutility.Msg(\"Failed to remove task. May require manual removal.\", LOG.ERROR)", "def deploy():\n return do_deploy(do_pack())", "def deploy():\n return do_deploy(do_pack())", "def deploy():\n return do_deploy(do_pack())", "def upload_platform_manifests_and_config(self, platform_manifest_root, platform_config):\n assert os.path.isdir(platform_manifest_root), \"platform_manifest_root must be a directory\"\n assert os.path.isfile(platform_config), \"platform_config must be a file\"\n logger.info(\"Uploading platform manifests and config ...\")\n\n # Upload all manifests\n for f in os.listdir(platform_manifest_root):\n full_path = os.path.join(platform_manifest_root, f)\n if os.path.isfile(full_path):\n s3_path = self._s3_platform_manifest_dir + f\n logger.info(\"Uploading platform manifest %s -> %s\", full_path, s3_path)\n self._bucket.put_file(\n local_file_name=full_path,\n s3_key=s3_path\n )\n\n # Upload platform config\n logger.info(\"Uploading platform config %s\", platform_config)\n self._bucket.put_file(\n local_file_name=platform_config,\n s3_key=self._s3_platform_config\n )\n\n logger.info(\"Uploading platform manifests and config ... Done\")", "def run():\n\n parser = OptionParser()\n parser.add_option(\"-d\", \"--dir\", dest=\"dir\", help=\"The app local directory\")\n parser.add_option(\"-r\", \"--remote_dir\", dest=\"remote_dir\", help=\"The app remote directory\")\n parser.add_option(\"-n\", \"--name\", dest=\"name\", help=\"The django app name\")\n parser.add_option(\"-f\", \"--full\", help=\"Provision before deploy\", default=False)\n parser.add_option(\"-o\", \"--no_files\", help=\"Don't copy the app files\", default=False)\n\n (options, args) = parser.parse_args()\n\n execute(deploy, **options.__dict__)", "def submit_feed(self, feed, feed_type, marketplaceids=None,\n content_type=\"text/xml\", purge='false'):\n md = to_md5(feed)\n data = dict(Action='SubmitFeed',\n FeedType=feed_type,\n PurgeAndReplace=purge,\n ContentMD5Value=md)\n data.update(utils.enumerate_param('MarketplaceIdList.Id.', marketplaceids))\n return self.make_request(data, method=\"POST\", body=feed,\n extra_headers={'Content-Type': content_type})", "def deploy():\n archive_path = do_pack()\n if archive_path is None:\n print(\"pass\")\n return False\n return do_deploy(archive_path)", "def init_new_app(self, developer, name, version=\"\"):\n version = str(version)\n app_full_name = compose_app_full_name(developer, name, version)\n new_app_dir = os.path.join(self.apps_dir_path, \"lib\", app_full_name)\n if not os.path.exists(new_app_dir):\n # 1. creating application folder\n os.makedirs(new_app_dir)\n open(os.path.join(new_app_dir, \"__init__.py\"), \"w\").close()\n # 2. reading application template\n with open(os.path.join(\"blackflow\", \"libs\", \"app_template.py\"), \"r\") as f:\n app_template = f.read()\n app_template = app_template.replace(\"BfApplicationTemplate\", name)\n # 3. writing application template\n with open(os.path.join(new_app_dir, \"%s.py\" % name), \"w\") as f:\n f.write(app_template)\n # 4. writing application descriptor\n descr_template = {\"name\": name, \"version\": version,\"developer\": developer, \"description\": \"\", \"sub_for\": {}, \"pub_to\": {}, \"configs\": {}}\n with open(os.path.join(new_app_dir, \"manifest.json\"), \"w\") as f:\n f.write(json.dumps(descr_template))\n self.app_manifests.append(descr_template)\n log.info(\"Manifest for %s app was loaded\" % (app_full_name))\n return (True, \"\")\n else:\n warn_msg = \"App with name %s and version %s already exists , specify another name or version\" % (name, version)\n log.warn(warn_msg)\n return (False, warn_msg)", "def deploy(fingerengine, fingerprint):\n\n global cookie \n\n cfm_path = abspath(fingerengine.options.deploy) \n cfm_file = parse_war_path(cfm_path, True)\n dip = fingerengine.options.ip\n\n # set our session cookie\n cookie = checkAuth(dip, fingerprint.port, title)\n if not cookie:\n utility.Msg(\"Could not get auth to %s:%s\" % (dip, fingerprint.port),\n LOG.ERROR)\n return\n\n utility.Msg(\"Preparing to deploy {0}..\".format(cfm_file))\n utility.Msg(\"Fetching web root..\", LOG.DEBUG)\n\n # fetch web root; i.e. where we can read the shell\n root = fetch_webroot(dip, fingerprint)\n if not root:\n utility.Msg(\"Unable to fetch web root.\", LOG.ERROR)\n return\n\n # create the scheduled task \n utility.Msg(\"Web root found at %s\" % root, LOG.DEBUG)\n utility.Msg(\"Creating scheduled task...\")\n\n if not create_task(dip, fingerprint, cfm_file, root):\n return\n\n # invoke the task\n utility.Msg(\"Task %s created, invoking...\" % cfm_file)\n run_task(dip, fingerprint, cfm_path)\n \n # remove the task\n utility.Msg(\"Cleaning up...\")\n delete_task(dip, fingerprint, cfm_file)", "def convert_manifest(\n self, allowed_mediatypes, namespace_name, repo_name, tag_name, content_retriever\n ):\n pass", "def convert_manifest(\n self, allowed_mediatypes, namespace_name, repo_name, tag_name, content_retriever\n ):\n pass", "def upload_build_artifact(filename, app_name):\n temp_dir = get_temp_dir(app_name)\n\n # pre-clean and setup the remote upload directory\n sudo('rm -rf {}'.format(temp_dir))\n sudo('mkdir -p {}'.format(temp_dir))\n sudo('chown {} {}'.format(env['user'], temp_dir))\n\n # upload build artifact to host's temp_dir\n put(filename, temp_dir, mode=664)", "def publishUploads(self, manualVerify = True):\n for key in self.nbDetails:\n # Skip metadata key if present\n if key!='proc' and self.nbDetails[key]['pkg'] and self.nbDetails[key]['archFilesOK']:\n self.publishRepoItem(key, manualVerify = manualVerify)", "def _provision_package(self):", "def upload(\n path: Path = typer.Argument(..., help=\"Path to your source code\"),\n entrypoint: str = typer.Argument(..., help=\"Your program entrypoint\"),\n channel: Optional[str] = typer.Option(default=None, help=help_strings.CHANNEL),\n memory: int = typer.Option(\n sdk_settings.DEFAULT_VM_MEMORY, help=\"Maximum memory allocation on vm in MiB\"\n ),\n vcpus: int = typer.Option(\n sdk_settings.DEFAULT_VM_VCPUS, help=\"Number of virtual cpus to allocate.\"\n ),\n timeout_seconds: float = typer.Option(\n sdk_settings.DEFAULT_VM_TIMEOUT,\n help=\"If vm is not called after [timeout_seconds] it will shutdown\",\n ),\n private_key: Optional[str] = typer.Option(\n sdk_settings.PRIVATE_KEY_STRING, help=help_strings.PRIVATE_KEY\n ),\n private_key_file: Optional[Path] = typer.Option(\n sdk_settings.PRIVATE_KEY_FILE, help=help_strings.PRIVATE_KEY_FILE\n ),\n print_messages: bool = typer.Option(False),\n print_code_message: bool = typer.Option(False),\n print_program_message: bool = typer.Option(False),\n runtime: str = typer.Option(\n None,\n help=\"Hash of the runtime to use for your program. Defaults to aleph debian with Python3.8 and node. You can also create your own runtime and pin it\",\n ),\n beta: bool = typer.Option(False),\n debug: bool = False,\n persistent: bool = False,\n persistent_volume: Optional[List[str]] = typer.Option(\n None,\n help=\"\"\"Takes 3 parameters \n A persistent volume is allocated on the host machine at any time \n eg: Use , to seperate the parameters and no spaces \n --persistent_volume persistence=host,name=my-volume,size=100 ./my-program main:app\n \"\"\",\n ),\n ephemeral_volume: Optional[List[str]] = typer.Option(\n None,\n help=\"\"\"Takes 1 parameter Only \n Ephemeral volumes can move and be removed by the host,Garbage collected basically, when the VM isn't running \n eg: Use , to seperate the parameters and no spaces \n --ephemeral-volume size_mib=100 ./my-program main:app \"\"\",\n ),\n immutable_volume: Optional[List[str]] = typer.Option(\n None,\n help=\"\"\"Takes 3 parameters \n Immutable volume is one whose contents do not change \n eg: Use , to seperate the parameters and no spaces \n --immutable-volume ref=25a393222692c2f73489dc6710ae87605a96742ceef7b91de4d7ec34bb688d94,use_latest=true,mount=/mnt/volume ./my-program main:app\n \"\"\",\n ),\n):\n\n setup_logging(debug)\n\n path = path.absolute()\n\n try:\n path_object, encoding = create_archive(path)\n except BadZipFile:\n typer.echo(\"Invalid zip archive\")\n raise typer.Exit(3)\n except FileNotFoundError:\n typer.echo(\"No such file or directory\")\n raise typer.Exit(4)\n\n account: AccountFromPrivateKey = _load_account(private_key, private_key_file)\n\n runtime = (\n runtime\n or input(f\"Ref of runtime ? [{sdk_settings.DEFAULT_RUNTIME_ID}] \")\n or sdk_settings.DEFAULT_RUNTIME_ID\n )\n\n volumes = []\n\n # Check if the volumes are empty\n if (\n persistent_volume is None\n or ephemeral_volume is None\n or immutable_volume is None\n ):\n for volume in prompt_for_volumes():\n volumes.append(volume)\n typer.echo(\"\\n\")\n\n # else Parse all the volumes that have passed as the cli parameters and put it into volume list\n else:\n if len(persistent_volume) > 0:\n persistent_volume_dict = volume_to_dict(volume=persistent_volume)\n volumes.append(persistent_volume_dict)\n if len(ephemeral_volume) > 0:\n ephemeral_volume_dict = volume_to_dict(volume=ephemeral_volume)\n volumes.append(ephemeral_volume_dict)\n if len(immutable_volume) > 0:\n immutable_volume_dict = volume_to_dict(volume=immutable_volume)\n volumes.append(immutable_volume_dict)\n\n subscriptions: Optional[List[Dict]]\n if beta and yes_no_input(\"Subscribe to messages ?\", default=False):\n content_raw = input_multiline()\n try:\n subscriptions = json.loads(content_raw)\n except json.decoder.JSONDecodeError:\n typer.echo(\"Not valid JSON\")\n raise typer.Exit(code=2)\n else:\n subscriptions = None\n\n with AuthenticatedAlephClient(\n account=account, api_server=sdk_settings.API_HOST\n ) as client:\n # Upload the source code\n with open(path_object, \"rb\") as fd:\n logger.debug(\"Reading file\")\n # TODO: Read in lazy mode instead of copying everything in memory\n file_content = fd.read()\n storage_engine = (\n StorageEnum.ipfs\n if len(file_content) > 4 * 1024 * 1024\n else StorageEnum.storage\n )\n logger.debug(\"Uploading file\")\n user_code: StoreMessage\n status: MessageStatus\n user_code, status = client.create_store(\n file_content=file_content,\n storage_engine=storage_engine,\n channel=channel,\n guess_mime_type=True,\n ref=None,\n )\n logger.debug(\"Upload finished\")\n if print_messages or print_code_message:\n typer.echo(f\"{user_code.json(indent=4)}\")\n program_ref = user_code.item_hash\n\n # Register the program\n message, status = client.create_program(\n program_ref=program_ref,\n entrypoint=entrypoint,\n runtime=runtime,\n storage_engine=StorageEnum.storage,\n channel=channel,\n memory=memory,\n vcpus=vcpus,\n timeout_seconds=timeout_seconds,\n persistent=persistent,\n encoding=encoding,\n volumes=volumes,\n subscriptions=subscriptions,\n )\n logger.debug(\"Upload finished\")\n if print_messages or print_program_message:\n typer.echo(f\"{message.json(indent=4)}\")\n\n item_hash: ItemHash = message.item_hash\n hash_base32 = (\n b32encode(b16decode(item_hash.upper())).strip(b\"=\").lower().decode()\n )\n\n typer.echo(\n f\"Your program has been uploaded on aleph.im .\\n\\n\"\n \"Available on:\\n\"\n f\" {settings.VM_URL_PATH.format(hash=item_hash)}\\n\"\n f\" {settings.VM_URL_HOST.format(hash_base32=hash_base32)}\\n\"\n \"Visualise on:\\n https://explorer.aleph.im/address/\"\n f\"{message.chain}/{message.sender}/message/PROGRAM/{item_hash}\\n\"\n )", "def deploy():\n filepath = do_pack()\n if (filepath is None):\n return False\n return do_deploy(filepath)", "def installed_app_fixture(location, app):\n return _create_installed_app(location.location_id, app.app_id)", "def release_pypi():\n local('python setup.py clean sdist register upload')", "def deploy_user_media(env=None, haus_vars={} ):\n print green('Deploying user media')\n with cd(\"/var/www\"):\n run('./manage.py sync_media_s3 --prefix=uploads')", "def deploy_package(package_path, host):\n\n package_name = package_path.name\n\n result = Connection(host).put(package_path, remote=\"/tmp/\", preserve_mode=False)\n result = Connection(host).sudo(\n \"dpkg --force-confdef --force-confold -i /tmp/{}\".format(package_name)\n )", "def create(self, adt=None, url=None, params=None, dryrun=False):\n if self._id_exists():\n abort(400, \"The application ID already exists\")\n elif self.engine.app_list:\n abort(400, \"Multiple applications are not supported\")\n\n path = self._get_path(adt, url)\n tpl, adaps = self._validate(path, params, dryrun)\n try:\n self.engine.launch(tpl, adaps, self.app_id, dryrun)\n except Exception as error:\n abort(500, f\"Error while deploying: {error}\")\n\n return {\"message\": f\"Application {self.app_id} successfully deployed\"}", "def upload(self, connection):\n if not self.already_deployed(connection):\n if self.config.project_type == \"java\":\n print(blue('Pushing jar to nexus server'))\n connection.local('mvn deploy')\n self._already_deployed = True\n else:\n raise Exception(f\"Unsupported project type: {self.config.project_type}\")", "def event_push_datapackage():\n key = request.headers.get('key')\n if not key or key != current_app.config['SECRET_API']:\n return jsonify(status='Error', errors=['Invalid API key'])\n data = request.get_json(force=True)\n results = import_event_package(data)\n if 'errors' in results:\n return jsonify(status='Error', errors=results['errors'])\n return jsonify(status='Complete', results=results)", "def process_deployapp ( vpc_conn,\n ec2_conn,\n elb_conn,\n cloudwatch_conn,\n s3_infra_conn,\n r53_conn,\n vpc,\n base_name,\n base_topicarn,\n app_name,\n app_type,\n region_name,\n aws_account_type,\n params,\n monitor_params = None ) :\n target_env = base_name\n APP_NAME = app_name.upper( )\n deployment_ami_name = params.get( 'source-ami' )\n source_env = params[ 'source-env' ]\n TARGET_ENV = target_env.upper( )\n SOURCE_ENV = source_env.upper( )\n load_balancer = get_elb_name( target_env, app_name )\n instance_name = get_instance_name( target_env, app_name )\n wait_on_launch = params.get( 'wait-on-launch', 'YES' ) == 'YES'\n if not monitor_params :\n monitor_params = params.get( 'monitors' )\n\n instance_secgrp_name = get_secgrp_name( target_env, app_name )\n instance_keypair = get_keypair_name( aws_account_type, vpc.region.name, app_type )\n\n ##\n ## Find the correct AMI to use for deployment\n ##\n if not deployment_ami_name or len( deployment_ami_name ) < 1 :\n deployment_ami_name = get_current_ami( s3_infra_conn, region_name, get_env_type( SOURCE_ENV ), app_name )\n if not deployment_ami_name :\n print \"Could not find AMI to use for deployment! \" + deployment_ami_name\n sys.exit( 2 )\n\n deployment_ami = get_ami_by_name( ec2_conn, deployment_ami_name )\n if not deployment_ami :\n print \"Could not find AMI to use for deployment! \" + deployment_ami_name\n sys.exit( 2 )\n\n subnets = get_vpc_subnets( vpc_conn, vpc, params.get( 'subnet-type', 'PRIVATE' ) )\n secgrps = ec2_conn.get_all_security_groups( filters = { \"group-name\" : [ instance_secgrp_name ] } )\n \n userdata = get_userdata( app_type, TARGET_ENV, app_name )\n \n new_instances = []\n num_instances = int( params.get( 'num-instances', len( subnets ) ) )\n if num_instances > len( subnets ) :\n num_instances = len( subnets )\n\n while num_instances > 0 :\n instance = launch_instance_vpc( ec2_conn,\n deployment_ami,\n base_name = base_name,\n instance_type = app_name,\n keypair = instance_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = secgrps[ 0 ].id ,\n subnet_id = subnets[ num_instances - 1 ].id,\n user_data = userdata,\n public_ip = False,\n wait_for_running = wait_on_launch )\n new_instances.append( instance )\n\n if monitor_params :\n print \"Setting alarms on the instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, instance.id, APP_NAME, base_topicarn, monitor_params )\n\n num_instances -= 1\n\n new_instance_ids = [ i.id for i in new_instances ]\n\n if ( wait_on_launch ) :\n print \"Waiting for instances to be ready\"\n aws_waits( ec2_conn.get_only_instances, new_instance_ids )\n\n print \"Creating AMI from instance server.\"\n timestamp = get_current_datetime_string( )\n new_ami_name = target_env + '-' + APP_NAME + '-' + timestamp\n ami_instance = new_instances[ 0 ]\n if not wait_on_launch :\n # We must wait for at least the ami instance to be available so we can create a new AMI from it.\n wait_on_object_state( ami_instance, 'running' )\n new_ami = create_ami_from_instance( aws_account_type, ec2_conn, new_instances[ 0 ], new_ami_name )\n if not new_ami :\n print \"Could not create new AMI!\"\n sys.exit( 5 )\n\n print \"Storing new AMI as the current.\"\n save_current_ami( s3_infra_conn, region_name, get_env_type( TARGET_ENV ), app_name, new_ami.name )\n\n print \"Adding the new app instances into the load balancer.\"\n elb = find_elb( elb_conn, load_balancer )\n status = swap_elb_instances( elb_conn = elb_conn,\n elb = elb,\n new_instance_ids = new_instance_ids,\n terminate_old_instances = True,\n ec2_conn = ec2_conn,\n cloudwatch_conn = cloudwatch_conn,\n swap_smoothly = wait_on_launch )\n if not status :\n print \"WARNING: Not all new app instances came up in the load balancer! Check the load balancer.\"\n\n print \"Deployment complete.\"", "def create(self, request, *args, **kwargs):\n data = self.request.data\n packaged = 'upload' in data\n form = (NewPackagedForm(data) if packaged\n else NewManifestForm(data))\n\n if not form.is_valid():\n return Response(form.errors, status=HTTP_400_BAD_REQUEST)\n\n if not packaged:\n upload = FileUpload.objects.create(\n user=request.user if request.user.is_authenticated() else None)\n # The hosted app validator is pretty fast.\n tasks.fetch_manifest(form.cleaned_data['manifest'], upload.pk)\n else:\n upload = form.file_upload\n # The packaged app validator is much heavier.\n tasks.validator.delay(upload.pk)\n\n log.info('Validation created: %s' % upload.pk)\n self.kwargs = {'pk': upload.pk}\n # Re-fetch the object, fetch_manifest() might have altered it.\n upload = self.get_object()\n serializer = self.get_serializer(upload)\n status = HTTP_201_CREATED if upload.processed else HTTP_202_ACCEPTED\n return Response(serializer.data, status=status)", "def add_manifest(self, sid, src, media_type):\n tmp = (sid, src, media_type)\n self.manifest.append(tmp)", "def test_upload_manifest(cidc_api, clean_db, monkeypatch, caplog):\n user_id = setup_trial_and_user(cidc_api, monkeypatch)\n mocks = UploadMocks(\n monkeypatch,\n prismify_extra=PBMC_PATCH,\n )\n\n client = cidc_api.test_client()\n\n # NCI users can upload manifests without explicit permission\n make_nci_biobank_user(user_id, cidc_api)\n with caplog.at_level(logging.DEBUG):\n res = client.post(\n MANIFEST_UPLOAD,\n data=form_data(\n \"pbmc.xlsx\",\n io.BytesIO(b\"a\"),\n \"pbmc\",\n ),\n )\n assert res.status_code == 200\n\n # Check that upload alert email was \"sent\"\n assert \"Would send email with subject '[UPLOAD SUCCESS]\" in caplog.text\n\n # Check that we tried to publish a patient/sample update\n mocks.publish_patient_sample_update.assert_called_once()\n\n # Check that we tried to upload the excel file\n mocks.make_all_assertions()" ]
[ "0.68291336", "0.6059394", "0.6052995", "0.6010262", "0.5933436", "0.5847316", "0.5775076", "0.57516134", "0.5748247", "0.5703469", "0.56963223", "0.56790406", "0.5647197", "0.56322503", "0.5598916", "0.5595371", "0.5584658", "0.5561256", "0.54689205", "0.5453668", "0.54502493", "0.5399678", "0.53928393", "0.53469676", "0.53417796", "0.5337742", "0.53009677", "0.5287064", "0.5279658", "0.526841", "0.52449816", "0.5243194", "0.5233702", "0.52333933", "0.521151", "0.5204436", "0.51975477", "0.5173013", "0.5163458", "0.51373905", "0.5134008", "0.513319", "0.5113326", "0.51104575", "0.510731", "0.5101897", "0.50955683", "0.509289", "0.50842726", "0.5080517", "0.50771636", "0.50762063", "0.5075628", "0.5072478", "0.5063507", "0.5060318", "0.5058783", "0.5056765", "0.5055694", "0.5054481", "0.5045753", "0.5039018", "0.5029148", "0.5027193", "0.50202507", "0.5006343", "0.5000237", "0.4989064", "0.49580497", "0.49476403", "0.49450576", "0.49431074", "0.49424976", "0.4933715", "0.4933715", "0.4933715", "0.49321285", "0.49208355", "0.49194983", "0.49159867", "0.4914547", "0.49140656", "0.49138436", "0.49138436", "0.49109524", "0.48995495", "0.48952585", "0.48946527", "0.48916155", "0.4891414", "0.48884866", "0.48635802", "0.48612875", "0.4855355", "0.48513222", "0.4850372", "0.48469362", "0.4845782", "0.4845661", "0.48401642" ]
0.7366084
0
Queries the marketplace for published apps
def get_search_results(config, client, page): resp = client.get_published_apps(config.username, page) resp_json = resp.json() search_results = resp_json["results"] if search_results is None or len(search_results) == 0: logger.info( click.style("You haven't published any apps to the marketplace yet. Use ", fg="blue") + click.style("21 publish submit {PATH_TO_MANIFEST_FILE}", bold=True, fg="blue") + click.style(" to publish your apps to the marketplace.", fg="blue"), fg="blue") return 0 total_pages = resp_json["total_pages"] logger.info("\nPage {}/{}".format(page + 1, total_pages), fg="green") headers = ["id", "Title", "Url", "Rating", "Is up", "Is healthy", "Average Uptime", "Last Update"] rows = [] for r in search_results: rating = "Not yet Rated" if r["rating_count"] > 0: rating = "{:.1f} ({} rating".format(r["average_rating"], int(r["rating_count"])) if r["rating_count"] > 1: rating += "s" rating += ")" rows.append([r["id"], r["title"], r["app_url"], rating, str(r["is_up"]), str(r["is_healthy"]), "{:.2f}%".format(r["average_uptime"] * 100), util.format_date(r["last_update"])]) logger.info(tabulate(rows, headers, tablefmt="simple")) return total_pages
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_apps(provider, query):\n\n workdir = os.path.dirname(os.path.realpath(__file__))\n with open(os.path.join(workdir, '..', 'config.yml')) as f:\n config = yaml.load(f)\n ex = Explorer()\n logging.info('Read bucket: %s', config['SCOOP_BUCKET'])\n apps = ex.get_apps(os.path.expandvars(config['SCOOP_BUCKET']), query)\n logging.info(\"Apps count = %d\", len(apps))\n installed = provider.get_installed()\n\n # check if already installed\n for app in apps:\n app['installed'] = app['name'] in installed\n\n return apps", "async def get_apps(self, params: Optional = None) -> dict:\r\n return await self.get_items(API_APPS, params=params)", "async def get_installed_apps(self, params: Optional = None) -> dict:\r\n return await self.get_items(API_INSTALLEDAPPS, params=params)", "def _list_apps(config, client):\n logger.info(\"Listing all the published apps by {}: \".format(config.username), fg=\"green\")\n current_page = 0\n total_pages = get_search_results(config, client, current_page)\n if total_pages < 1:\n return\n\n while 0 <= current_page < total_pages:\n try:\n prompt_resp = click.prompt(uxstring.UxString.pagination,\n type=str)\n\n next_page = get_next_page(prompt_resp, current_page)\n\n if next_page == -1:\n model_id = prompt_resp\n display_app_info(config, client, model_id)\n elif next_page >= total_pages or next_page < 0:\n continue\n else:\n get_search_results(config, client, next_page)\n current_page = next_page\n\n except click.exceptions.Abort:\n return", "def get_app_list(self):\n return self.get_setting('applications', 'installed_apps')", "def get_apps(self):\n return self.apps", "def get_all_apps(self):\n return list(self.apps.values())", "def get_apps(self, request, app_ids):\n sq = WebappIndexer.search()\n if request.query_params.get('filtering', '1') == '1':\n # With filtering (default).\n for backend in self.filter_backends:\n sq = backend().filter_queryset(request, sq, self)\n sq = WebappIndexer.filter_by_apps(app_ids, sq)\n\n # Store the apps to attach to feed elements later.\n with statsd.timer('mkt.feed.views.apps_query'):\n apps = sq.execute().hits\n return dict((app.id, app) for app in apps)", "def get(self):\n return read_heroku_apps(request.args)", "def __get_data_from_store(term):\n url_search = PLAY_STORE_URL + \"/search\"\n response = requests.get(url_search, {'c': 'apps', 'q': term})\n soup = BeautifulSoup(response.content, \"html.parser\")\n apps = soup.find_all(\"div\", {\"class\": \"card no-rationale square-cover apps small\"})\n\n result = []\n print(result)\n for i, app in enumerate(apps):\n app_details_basic = app.find(\"div\", {\"class\": \"details\"})\n app_id = app['data-docid']\n app_data = {\n 'uid': app_id,\n 'name': app_details_basic.find(\"a\", {\"class\": \"title\"})['title'].strip().encode('utf-8'),\n 'dev_name': app_details_basic.find(\"a\", {\"class\": \"subtitle\"})['title'].strip(),\n 'icon_url': \"http://\" + app.find(\n \"div\", {\"class\": \"cover-inner-align\"}).img['data-cover-large'].strip(\"//\")\n }\n\n url_app_detail = PLAY_STORE_URL + \"/apps/details\"\n response = requests.get(url_app_detail, {'id': app_id})\n soup = BeautifulSoup(response.content, \"html.parser\")\n\n app_data.update({\n 'category': soup.find(\"a\", {\"itemprop\": \"genre\"}).text,\n 'description': soup.find(\"div\", {\"itemprop\": \"description\"}).text.strip().encode('utf-8'),\n \n })\n\n \n dev_links = soup.find_all(\"a\", {\"class\": \"dev-link\", \"rel\": \"nofollow\"})\n if dev_links:\n for dev_link in dev_links:\n if \"mailto\" in dev_link['href']:\n app_data['dev_email'] = dev_link['href'].replace(\"mailto:\", \"\")\n break\n\n result.append(app_data)\n\n if i + 1 == SEARCH_RESULT_COUNT:\n break\n print(result)\n return result", "def apps(self):\n filters = {\n 'disabled_by_user': False,\n 'status': mkt.STATUS_PUBLIC\n }\n return self._apps.order_by(self.membership_relation).filter(**filters)", "def list_apps(self) -> list:\n apps = self.app.list_apps()\n app_list = [app[\"title\"] for app in apps]\n return app_list", "def listapps(self):\n return jsoncall.do_call(\"listapps\", {'modelname':self.modelname,\\\n 'user':self.user,\\\n 'password':self.password},\n self.connection)", "def ListApps(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def list_apps(self):\n with hide(\"output\", \"running\"):\n result = local((\"redis-cli -h {host} -p 6379 -n {db} keys \\\"*\\\"\"\n .format(host=self.host,\n db=REDIS_APPLICATION_DB_NUM)),\n capture=True)\n\n if len(result.stdout) > 0:\n return result.stdout\n else:\n print(\"Clipper has no applications registered\")\n return \"\"", "def retr_auth_apps() :\n\n\t\t\t_logger.info( '...retr_auth_apps...' )\n\t\t\toutput = []\n\t\t\tdb = mongo.db.auth_apps\n\n\t\t\tcur = db.find()\n\t\t\tif cur.count() == 0 :\n\t\t\t\traise mongo_no_resource_exception( 'no authorized apps found' )\n\t\t\tfor app in db.find() :\n\t\t\t\toutput.append( { 'moniker' : app['moniker'] ,\n\t\t\t\t\t\t\t 'description' : app['description'] ,\n\t\t\t\t\t\t\t\t 'url' : app['url'] } )\n\n\t\t\treturn jsonify( {'result' : output} )", "async def app_list(self) -> List[interface.App]:\n return await self.relay(\"app_list\")()", "def getAppInfo(self):\n data = self._client.Application.find(self.app_id)\n return data", "def search_app(self, search_pattern):\n\n url_params = {'limit': SearchAPI.SCAN_LIMIT, 'expand': 'true'}\n first_search = self.get('mgmt-pop/apps', params=url_params)\n data = first_search.json()\n app_found = 0\n app_scanned = 0\n\n # CLI ouput header\n cli.header('#app_id,type,name,host,cname,cert_id,status,reachable')\n stats = self.process_page(data, search_pattern)\n app_scanned += stats[0]\n app_found += stats[1]\n\n if data.get(\"meta\"):\n\n app_count = data.get(\"meta\").get(\"total_count\")\n page_offset = data.get(\"meta\").get(\"offset\")\n page_limit = data.get(\"meta\").get(\"limit\")\n page_total = ceil(app_count / page_limit)\n\n logging.debug(\"app_count: {}, scanned: {}, offset: {}, limit: {}, pages: {}\".format(\n app_count, app_scanned, page_offset, page_limit, page_total))\n\n for page in range(1, page_total):\n logging.debug(\"Loading application page {} of {}\".format(page, page_total))\n url_params['offset'] = page * page_limit\n search = self.get('mgmt-pop/apps', params=url_params)\n stats = self.process_page(search.json(), search_pattern)\n app_scanned += stats[0]\n app_found += stats[1]\n\n # CLI ouput footer\n if not config.batch:\n if app_found != app_count:\n cli.footer(\"Found %s app(s), total %s app(s)\" % (app_found, app_count))\n else:\n cli.footer(\"%s app(s)\" % app_count)", "def AppGetApp(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def get_app_manifests(self,sfilter = None):\n if sfilter:\n try:\n return filter(lambda app: app[\"developer\"] == sfilter[\"developer\"] and\n app[\"name\"] == sfilter[\"name\"] and\n app[\"version\"] == sfilter[\"version\"], self.app_manifests)\n except:\n return []\n else :\n return self.app_manifests", "def get_apps(self, limit, offset=None):\n params = {'v': WIT_API_VERSION}\n if limit:\n params['limit'] = limit\n if offset:\n params['offset'] = offset\n return req(self.logger, self.access_token, 'GET', '/apps', params)", "def applications():\n storeapps = APP.config[\"storage\"]\n base_url = request.host_url + \"application/\"\n\n response = {\"applications\": []}\n for application in nativeapps.io.ls(storeapps, r\".*\\.(apk|ipa)$\"):\n tokens = application.decode(\"utf-8\").split(os.path.sep)\n directory = tokens[-2]\n name, version = os.path.basename(directory).split(\"-\", 1)\n meta_path = os.path.join(os.path.dirname(application), \"metadata.json\")\n\n link = base_url + \"/\".join(tokens[-3:])\n if application.endswith(\".ipa\"):\n link = \"itms-services://?action=download-manifest&url=\" + \\\n base_url + \"/\".join(tokens[-3:-1]) + \"/\" + \"manifest.plist\"\n\n response[\"applications\"].append({\n \"url\": base_url + \"/\".join(tokens[-3:]),\n \"name\": name,\n \"version\": version,\n \"metadata\": nativeapps.io.readfile(meta_path),\n \"link\": link,\n \"type\": application.split(\".\")[-1],\n })\n return flask.jsonify(response)", "def _get_all_app_ids(config, client):\n rv = set()\n total_pages = client.get_published_apps(config.username, 0).json()[\"total_pages\"]\n for current_page in range(total_pages):\n current_page_results = client.get_published_apps(config.username, current_page).json()['results']\n for result in current_page_results:\n rv.add(result['id'])\n return rv", "def get_apps(self) -> List[str]:\n return list(self.config[\"apps\"].keys())", "def get_publishers(self):", "def app_list(self, third_only=False):\n return self.adb.app_list(third_only)", "def get_owned_apps(self):\n user = users.get_current_user()\n if not user:\n return []\n email = user.email()\n try:\n user_info = self.get_by_id(UserInfo, email)\n if user_info:\n return user_info.owned_apps\n else:\n return []\n except Exception as err:\n logging.exception(err)\n return []", "def apps(self):\n if \"apps\" in self._prop_dict:\n return AppsCollectionPage(self._prop_dict[\"apps\"])\n else:\n return None", "def ListApps(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get_applications(self):\n status_code_dict = {\n codes.ok: ApplicationListResponse,\n codes.bad_request: ErrorResponse,\n }\n return self.get_request(APPLICATION_URL,\n status_code_response_class_dict=status_code_dict,\n )", "def ListApps(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get_applications(status):\n return status['applications']", "def get_apps(self):\n try:\n result = self._session.query(AppEntity).all()\n result_dict = self.result_dict(result)\n except SQLAlchemyError as err:\n Log.an().error('sql exception [%s]', str(err))\n return False\n\n return result_dict", "def get(category, page=1, per_page=5):\r\n\r\n count = n_count(category)\r\n\r\n sql = text('''SELECT app.id, app.name, app.short_name, app.description,\r\n app.info, app.created, app.category_id, \"user\".fullname AS owner,\r\n featured.app_id as featured\r\n FROM \"user\", task, app\r\n LEFT OUTER JOIN category ON app.category_id=category.id\r\n LEFT OUTER JOIN featured ON app.id=featured.app_id\r\n WHERE\r\n category.short_name=:category\r\n AND app.hidden=0\r\n AND \"user\".id=app.owner_id\r\n AND app.info LIKE('%task_presenter%')\r\n AND task.app_id=app.id\r\n GROUP BY app.id, \"user\".id, featured.app_id ORDER BY app.name\r\n OFFSET :offset\r\n LIMIT :limit;''')\r\n\r\n offset = (page - 1) * per_page\r\n results = db.engine.execute(sql, category=category, limit=per_page, offset=offset)\r\n apps = []\r\n for row in results:\r\n app = dict(id=row.id,\r\n name=row.name, short_name=row.short_name,\r\n created=row.created,\r\n description=row.description,\r\n owner=row.owner,\r\n featured=row.featured,\r\n last_activity=pretty_date(last_activity(row.id)),\r\n last_activity_raw=last_activity(row.id),\r\n overall_progress=overall_progress(row.id),\r\n info=dict(json.loads(row.info)))\r\n apps.append(app)\r\n return apps, count", "def _discover_apps(api, limit=None):\n categories = api.categories()\n subcategories = []\n for category in categories:\n subcategories.extend(api.subcategories(category))\n app_lists = []\n app_count = 0\n LOGGER.info(f'Found {len(subcategories)} subcategories for {len(categories)} categories')\n for subcategory in subcategories:\n app_list = api.discover_apps(subcategory)\n if not app_list:\n continue\n while ALL:\n if limit:\n if len(app_list) >= limit:\n app_list = app_list.limit(app_list[:limit])\n LOGGER.info(f'Subcategory \"{app_list.name()}\" reached the threshhold of {limit}, moving on.')\n break\n try:\n app_list.more()\n except Maximum:\n LOGGER.info(f'Subcategory \"{app_list.name()}\" yielded {len(app_list)} apps')\n break\n app_lists.append(app_list)\n app_count += len(app_list)\n app_set = set()\n for app_list in app_lists:\n for app in app_list:\n app_set.add(app.package_name())\n LOGGER.info(f'{\"#\" * 60}\\n'\n f'\\tFinished discovering Apps!\\n'\n f'\\tGot {app_count} apps in {len(app_lists)} subcategories of {len(categories)} categories\\n'\n f'\\tOut of those {app_count} apps, {len(app_set)} apps had a unique package name\\n'\n f'\\t{\"#\" * 60}')\n return app_lists", "def list_apps(request, pk=0):\n context = {'items': [], 'resource_type': 'App'}\n\n if pk == 0:\n context['h2'] = \"Managed Applications\"\n context['header_1'] = \"Developer\"\n context['header_2'] = \"Version\"\n refresh_managed_software_status()\n apps = MacOSApp.objects.filter(merged_into__isnull=True).reverse()\n if not request.user.has_perm('devices.manage_apps'):\n apps = apps.filter(managed=True).exclude(installed__isnull=True, pending_install__isnull=True)\n for app in apps:\n assignment_count = app.pending_install.count()\n installed_on = app.installed.all()\n data = {'meta': app, 'assignment_count': assignment_count, 'installed': installed_on}\n context['items'].append(data)\n else:\n if not request.user.has_perm('devices.manage_apps'):\n raise PermissionDenied\n\n device = get_object_or_404(Laptop, pk=pk)\n context['h2'] = \"Applications on {}\".format(device.name)\n context['header_1'] = \"Developer\"\n context['header_2'] = \"Version\"\n context['device_view'] = True\n context['device_id'] = pk\n apps = MacOSApp.objects.filter(pending_install__in=[device])\n apps |= MacOSApp.objects.filter(installed__in=[device])\n for app in apps:\n status = 'Not assigned'\n for entry in app.installed.all():\n if entry == device:\n status = 'Installed'\n for entry in app.pending_install.all():\n if entry == device:\n status = 'Assigned'\n data = {'meta': app, 'status': status}\n context['items'].append(data)\n\n return render(request, 'mdm/resource_list.html', context)", "def find_app(self, app_name):\n self._app = None\n for p in self.policy_list.response:\n apps = [app for app in p.resource.applications if app.appName == app_name]\n if len(apps) > 0:\n return apps[0]", "def n_published():\r\n sql = text('''\r\n WITH published_apps as\r\n (SELECT app.id FROM app, task WHERE\r\n app.id=task.app_id AND app.hidden=0 AND app.info\r\n LIKE('%task_presenter%') GROUP BY app.id)\r\n SELECT COUNT(id) FROM published_apps;\r\n ''')\r\n results = db.engine.execute(sql)\r\n for row in results:\r\n count = row[0]\r\n return count", "def test_installed_apps(self):\n self.assertIn(__package__, settings.INSTALLED_APPS)", "def ListApps(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def find_applications_for_candidate(email):\n\n applications = Application.objects.filter(\n authorized_email__iexact=email\n ).order_by('-create_dt')\n return applications", "def list_apps(self, ns_name):\n\n return self.helm_client.list(namespace=ns_name)", "def get_app_list(self):\n\n return self._get().keys()", "def get_all_applications():\n cursor.execute(\n f'SELECT * FROM public.applications where status = %s', (\"pending\",))\n rows = cursor.fetchall()\n application_dicts = []\n\n for item in rows:\n application = Application(id=item[0], party_name=item[1], office_name=item[2], user_id=item[3],\n date_created=item[4],status=item[5])\n application = application.json_dumps()\n application_dicts.append(application)\n return application_dicts", "def get_most_popular_app(engine, publisher_id):\n connection = engine.connect()\n result = connection.execute(\n '''\n SELECT a.period,\n a.company_name,\n a.active_users,\n a.publisher_id,\n a.app_id\n FROM aa_months a\n WHERE a.publisher_id='{0}'\n AND a.period >= (SELECT MAX(period) FROM aa_months)\n AND a.period < DATE_ADD((SELECT MAX(period) FROM aa_months), INTERVAL 1 DAY)\n ORDER BY a.active_users DESC\n LIMIT 1\n '''.format(publisher_id))\n connection.close()\n return result.fetchone()", "def sync_apps(self):\n pass", "def get_app_ids(self, feed_element):\n if hasattr(feed_element, 'app'):\n return [feed_element.app]\n return feed_element.apps", "def RApps(self):\n\t\treturn self.acad.ActiveDocument.RegisteredApplications", "def get_app_ids(self):\n return self.apps", "async def get_app(self, app_id: str) -> dict:\r\n return await self.get(API_APP.format(app_id=app_id))", "def check_app(self, package):\n return self.adb.check_app(package)", "def get(self):\n app_info = {\n 'developedBy': 'This app was developed by the Melbourne eResearch Group (www.eresearch.unimelb.edu.au) within the School of Computing and Information Systems (https://cis.unimelb.edu.au) at The University of Melbourne (www.unimelb.edu.au). ',\n 'description': 'The app uses artificial intelligence (convolutional neural networks) to identify the age, gender, and emotion of the people.',\n 'contact': 'https://eresearch.unimelb.edu.au',\n 'developedByHTML': '<p>This app was developed by the Melbourne eResearch Group (<a href=\\\"www.eresearch.unimelb.edu.au\\\" target=\\\"_blank\\\">www.eresearch.unimelb.edu.au</a>) within the School of Computing and Information Systems (<a href=\\\"https://cis.unimelb.edu.au\\\" target=\\\"_blank\\\">https://cis.unimelb.edu.au</a>) at The University of Melbourne (<a href=\\\"www.unimelb.edu.au\\\" target=\\\"_blank\\\">www.unimelb.edu.au</a>).</p>',\n 'descriptionHTML': '<p>The app uses artificial intelligence (convolutional neural networks) to identify the age, gender, and emotion of the people.</p>',\n 'contactHTML': '<p>Please contact us at: <a href=\\\"eresearch.unimelb.edu.au\\\" target=\\\"_blank\\\">eresearch.unimelb.edu.au</a></p>'\n }\n\n return send_json_response(app_info, 200)", "def apps(self):\n db = self['__store'].db\n my_apps = {\n group_id\n for group_id, in db(\"\"\"\n select distinct\n group_id\n from subgroups, groups\n where\n groups.id = subgroups.group_id\n and subgroup_id = %s\n and groups.type = 'A'\n \"\"\",\n self._id)\n }\n return my_apps", "def get_applications(site) -> list:\n collection = site.Collection\n result = []\n for i in range(collection.Count):\n prop = collection[i].Properties\n result.append(SiteApplication(\n prop[\"path\"].Value,\n prop[\"applicationPool\"].Value\n ))\n\n return result", "def connect_apps(self):\r\n return applications.ConnectApps(self)", "def oauth_app_exists(self):\n try:\n Application.objects.get(name=settings.OAUTH2_APPLICATION_NAME)\n except Application.DoesNotExist:\n\n return False\n\n return True", "def apps():\r\n section = document.add_section()\r\n new_width, new_height = section.page_height, section.page_width\r\n section.orientation = WD_ORIENT.LANDSCAPE\r\n section.page_width = 10058400\r\n section.page_height = 7772400\r\n document.add_heading('Applications', level=1)\r\n apps = get_qlik_sense.get_apps()\r\n num_of_apps = len(apps)\r\n table = document.add_table(rows=num_of_apps+1, cols=7)\r\n table.style = 'Grid Table 1 Light Accent 1'\r\n row = table.rows[0]\r\n row.cells[0].text = 'App name'\r\n row.cells[1].text = 'App description'\r\n row.cells[2].text = 'Publish time'\r\n row.cells[3].text = 'Stream'\r\n row.cells[4].text = 'File size'\r\n row.cells[5].text = 'Owner userId'\r\n row.cells[6].text = 'Owner userName'\r\n for app in range(num_of_apps):\r\n row = table.rows[app+1]\r\n row.cells[0].text = str(apps[app][0])\r\n row.cells[1].text = str(apps[app][1])\r\n row.cells[2].text = str(apps[app][2])\r\n row.cells[3].text = str(apps[app][3])\r\n row.cells[4].text = str(apps[app][4])\r\n row.cells[5].text = str(apps[app][5])\r\n row.cells[6].text = str(apps[app][6])\r\n document.add_page_break()", "def get(self):\n apps = Application.objects()\n\n # TODO return more information\n apps_clean = []\n for app in apps:\n # don't include invalid apps\n if app[\"validated\"] is True:\n apps_clean.append(\n {\"name\": app[\"name\"]}\n )\n\n return apps_clean, 200", "def more(self):\n self.subcategory.proto.dataUrl = self.next_page_url\n # LOGGER.info(f'{\"#\"*80}\\n{self.subcategory.proto.dataUrl}{\"#\"*80}\\n')\n try:\n return self.api.discover_apps(self.subcategory, self)\n except DecodeError:\n raise Maximum", "def get_installations():\n github_app = get_default_app()\n pprint(github_app.get_installations())", "def available(self, app):\n return self.xlist(\"testfor\", app)[0]", "def get_bungie_applications(self):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/App/FirstParty/\"))", "def getApp(appName):\n logger.debug('[FLASKWEB /apps/<appName>] GET request for app, `%s`' % appName)\n applist = [a['name'] for a in db.getAllApps()]\n if appName in applist:\n versionList = db.getVersions(appName)\n if request.headers['Accept'] == 'application/json':\n return jsonify(dict(name=appName, versions=versionList)), 200\n else:\n return render_template(\"apps.html\", name=appName, versionList=versionList)\n else:\n return returnError(\"Application %s does not exist\" % appName, 404)", "def test_if_app_gets_shoppinglists(self):\n li = self.client.get('/shoppinglists/?each_page=1&page_number=1',\n headers = {\n 'Content-Type':'application/json',\n 'x-access-token':self.tok})\n self.assertEqual(li.status_code, 200)", "def applications(name):\r\n user = User.query.filter_by(name=name).first()\r\n if not user:\r\n return abort(404)\r\n if current_user.name != name:\r\n return abort(403)\r\n\r\n user = db.session.query(model.user.User).get(current_user.id)\r\n apps_published, apps_draft = _get_user_apps(user.id)\r\n\r\n return render_template('account/applications.html',\r\n title=gettext(\"Applications\"),\r\n apps_published=apps_published,\r\n apps_draft=apps_draft)", "def checkapp(self, app):\n\n data = requests.get('http://store.steampowered.com/api/appdetails?appids={0}&format=json'.format(app)).json()\n\n if data[str(app)][\"success\"]:\n type = data[str(app)][\"data\"]['type']\n if (type != 'game'):\n return False\n return data[str(app)][\"success\"]", "def add_app(self, app_name):\n self.add_list_setting('applications', 'installed_apps', app_name)", "def describe_apps(StackId=None, AppIds=None):\n pass", "def test_query_app(self):\r\n AppFactory.create(short_name='test-app', name='My New App')\r\n # Test for real field\r\n res = self.app.get(\"/api/app?short_name=test-app\")\r\n data = json.loads(res.data)\r\n # Should return one result\r\n assert len(data) == 1, data\r\n # Correct result\r\n assert data[0]['short_name'] == 'test-app', data\r\n\r\n # Valid field but wrong value\r\n res = self.app.get(\"/api/app?short_name=wrongvalue\")\r\n data = json.loads(res.data)\r\n assert len(data) == 0, data\r\n\r\n # Multiple fields\r\n res = self.app.get('/api/app?short_name=test-app&name=My New App')\r\n data = json.loads(res.data)\r\n # One result\r\n assert len(data) == 1, data\r\n # Correct result\r\n assert data[0]['short_name'] == 'test-app', data\r\n assert data[0]['name'] == 'My New App', data", "def __extract_apps(self, mps_db_session):\n\n try:\n # Get all the apps defined in the database\n if (self.app_id == None):\n app_cards = mps_db_session.query(models.ApplicationCard).all()\n else:\n app_cards = mps_db_session.query(models.ApplicationCard).\\\n filter(models.ApplicationCard.global_id == self.app_id).all()\n link_nodes = mps_db_session.query(models.LinkNode).all()\n except exc.SQLAlchemyError as e:\n raise\n\n if (len(link_nodes) > 0):\n for ln in link_nodes:\n name = ln.get_name()\n cr = ln.crate.location\n ln_type = ln.get_type()\n ln_app_prefix = ln.get_app_prefix()\n self.link_nodes[name] = {}\n self.link_nodes[name]['type'] = ln_type # 'Analog', 'Digital' or 'Mixed'\n self.link_nodes[name]['slots'] = {}\n self.link_nodes[name]['app_prefix'] = ln_app_prefix\n self.link_nodes[name]['physical'] = \"Not Installed\"\n\n #self.link_nodes[cr] = {}\n #self.link_nodes[cr]['type'] = ln_type # 'Analog', 'Digital' or 'Mixed'\n #self.link_nodes[cr]['slots'] = {}\n #self.link_nodes[cr]['app_prefix'] = ln_app_prefix\n\n # Check if there were applications defined in the database\n if len(app_cards) == 0:\n return\n\n # Iterate over all the found applications\n for app_card in app_cards:\n\n # Look for all devices in this application\n devices = app_card.devices\n\n # Extract analog and digital devices\n analog_devices = []\n digital_devices= []\n for device in devices:\n\n if type(device) is models.device.AnalogDevice:\n analog_devices.append(device)\n\n if type(device) is models.device.DigitalDevice:\n digital_devices.append(device)\n\n # Process the analog devices\n if len(analog_devices):\n\n ln_name = app_card.link_node.get_name()\n phys = app_card.crate.location\n #ln_name = app_card.crate.location\n\n # Get this application data\n app_data = {}\n app_data[\"app_id\"] = app_card.global_id\n app_data[\"cpu_name\"] = app_card.link_node.cpu\n app_data[\"crate_id\"] = app_card.crate.crate_id\n app_data[\"crate_key\"] = app_card.link_node.crate_id\n app_data[\"slot_number\"] = app_card.slot_number\n app_data[\"link_node_name\"] = ln_name\n app_data[\"physical\"] = phys\n app_data[\"link_node_area\"] = app_card.link_node.area\n app_data[\"link_node_location\"] = app_card.link_node.location\n app_data[\"card_index\"] = app_card.get_card_id()\n app_data[\"lc1_node_id\"] = str(app_card.link_node.lcls1_id)\n app_data[\"app_prefix\"] = app_card.get_pv_name()\n app_data['link_node_name_prev'] = app_card.link_node.get_name()\n\n\n self.link_nodes[ln_name][\"lc1_node_id\"] = app_data[\"lc1_node_id\"]\n self.link_nodes[ln_name][\"crate_id\"] = app_data[\"crate_id\"]\n self.link_nodes[ln_name][\"cpu_name\"] = app_data[\"cpu_name\"]\n self.link_nodes[ln_name][\"physical\"] = app_data[\"physical\"]\n self.link_nodes[ln_name][\"sioc\"] = ln_name\n\n #self.link_nodes[phys][\"lc1_node_id\"] = app_data[\"lc1_node_id\"]\n #self.link_nodes[phys][\"crate_id\"] = app_data[\"crate_id\"]\n #self.link_nodes[phys][\"cpu_name\"] = app_data[\"cpu_name\"]\n #self.link_nodes[phys][\"physical\"] = app_data[\"physical\"]\n #self.link_nodes[phys][\"sioc\"] = ln_name\n\n self.__add_slot_information_by_name(mps_db_session, ln_name, app_card)\n #self.__add_slot_information_by_crate(mps_db_session, phys, app_card)\n\n # Defines whether the IOC_NAME env var should be added no the mps.env\n # file. In order to add only once we need to figure out if there are\n # other cards with the same SIOC. If there is a digital card the SIOC\n # is written in the digital section. If a card is a \"Generic ADC\" and\n # it is not in slot 2 then it has its own SIOC (there are only ~7 cases\n # like that)\n app_data[\"analog_link_node\"] = False\n if (app_card.link_node.slot_number != 2 and app_card.name == \"Generic ADC\"):\n app_data[\"analog_link_node\"] = True # Non-slot 2 link node\n self.link_nodes[ln_name]['analog_slot'] = app_card.link_node.slot_number\n #self.link_nodes[phys]['analog_slot'] = app_card.link_node.slot_number\n elif (app_card.link_node.slot_number == 2):\n self.link_nodes[ln_name]['analog_slot'] = 2\n #self.link_nodes[phys]['analog_slot'] = 2\n has_digital = False\n for c in app_card.link_node.cards:\n if (c.name == \"Digital Card\" or c.name == \"Generic ADC\" and c.id != app_card.id):\n has_digital = True\n\n if (not has_digital):\n app_data[\"analog_link_node\"] = True # Add if the digital card is not defined\n\n app_data[\"devices\"] = []\n\n # Iterate over all the analog devices in this application\n for device in analog_devices:\n\n # Look for fault inputs in this device\n fault_inputs = device.fault_outputs\n\n # Check if this devices has faults. Only devices with defined faults will be included\n if len(fault_inputs):\n\n # get this device data\n device_data = {}\n device_data[\"type_name\"] = self.__get_device_type_name(mps_db_session, device.device_type_id)\n device_data[\"bay_number\"], device_data[\"channel_number\"], device_data[\"channel_index\"] = \\\n self.__get_bay_ch_number(mps_db_session, device.channel_id, app_card.type_id)\n device_data[\"area\"] = device.area\n device_data[\"position\"] = device.position\n device_data[\"faults\"] = {}\n device_data[\"device_name\"] = device.name\n device_data[\"prefix\"] = '{}:{}:{}'.format(device_data[\"type_name\"], device.area, device.position)\n\n # Iterate over all the faults in this device\n for fault_input in fault_inputs:\n faults = mps_db_session.query(models.Fault).filter(models.Fault.id==fault_input.fault_id).all()\n if (len(faults) != 1):\n print 'ERROR: Fault not defined'\n exit(-1)\n fault_states = mps_db_session.query(models.FaultState).\\\n filter(models.FaultState.fault_id==faults[0].id).all()\n\n # Get the fault ID\n fault_id = fault_input.fault_id\n\n # Get this fault data\n if (not fault_id in device_data[\"faults\"]):\n # Get the fault corresponding to this fault input.\n fault = self.__get_fault(mps_db_session, fault_id)\n\n fault_data = {}\n fault_data[\"id\"] = fault_id\n fault_data[\"name\"] = fault.name\n fault_data[\"description\"] = fault.description[:39]\n fault_data[\"bit_positions\"] = []\n fault_data[\"integrators\"] = []\n for fs in fault_states:\n fault_data[\"bit_positions\"].append(fs.device_state.get_bit_position())\n fault_data[\"integrators\"].append(fs.device_state.get_integrator())\n \n # Add this fault to the list of faults of the current device\n device_data[\"faults\"][fault_id] = fault_data\n\n # Add this device to the list of devices of the current application\n app_data[\"devices\"].append(device_data)\n\n\n # Add this application to the list of applications, if its list of devices is not empty\n # The list of devices will be empty if not device in this app have defined fault inputs,\n # as devices without faults inputs won't be processed.\n if app_data[\"devices\"]:\n self.analog_apps.append(app_data)\n\n # Process the digital devices\n if len(digital_devices):\n\n ln_name = app_card.link_node.get_name()\n phys = app_card.crate.location\n\n # Get this application data\n app_data = {}\n app_data[\"app_id\"] = app_card.global_id\n app_data[\"cpu_name\"] = app_card.link_node.cpu\n app_data[\"crate_id\"] = app_card.crate.crate_id\n app_data[\"crate_key\"] = app_card.crate.id\n app_data[\"slot_number\"] = app_card.slot_number\n app_data[\"link_node_area\"] = app_card.link_node.area\n app_data[\"link_node_name\"] = ln_name\n app_data[\"physical\"] = phys\n app_data[\"link_node_location\"] = app_card.link_node.location\n app_data[\"card_index\"] = app_card.get_card_id()\n app_data[\"virtual\"] = False\n app_data[\"lc1_node_id\"] = str(app_card.link_node.lcls1_id)\n app_data[\"app_prefix\"] = app_card.get_pv_name()\n app_data[\"devices\"] = []\n if (app_card.has_virtual_channels()):\n app_data[\"virtual\"] = True\n\n self.link_nodes[ln_name][\"lc1_node_id\"] = app_data[\"lc1_node_id\"]\n self.link_nodes[ln_name][\"crate_id\"] = app_data[\"crate_id\"]\n self.link_nodes[ln_name][\"cpu_name\"] = app_data[\"cpu_name\"]\n self.link_nodes[ln_name][\"dig_app_id\"] = app_data[\"app_id\"]\n self.link_nodes[ln_name][\"physical\"] = app_data[\"physical\"]\n self.link_nodes[ln_name][\"sioc\"] = ln_name\n\n #self.link_nodes[phys][\"lc1_node_id\"] = app_data[\"lc1_node_id\"]\n #self.link_nodes[phys][\"crate_id\"] = app_data[\"crate_id\"]\n #self.link_nodes[phys][\"cpu_name\"] = app_data[\"cpu_name\"]\n #self.link_nodes[phys][\"dig_app_id\"] = app_data[\"app_id\"]\n #self.link_nodes[phys][\"physical\"] = app_data[\"physical\"]\n #self.link_nodes[phys][\"sioc\"] = ln_name\n\n # Iterate over all the analog devices in this application\n for device in digital_devices:\n\n # Look for all the inputs in this device\n inputs = device.inputs\n\n # Check if this device has inputs. Only devices with inputs defined will be included\n if len(inputs):\n\n # get this device data\n device_data = {}\n device_data[\"type_name\"] = self.__get_device_type_name(mps_db_session, device.device_type_id)\n device_data[\"area\"] = device.area\n device_data[\"position\"] = device.position\n device_data[\"inputs\"] = []\n device_data[\"device_name\"] = device.name\n device_data[\"prefix\"] = '{}:{}:{}'.format(device_data[\"type_name\"], device.area, device.position)\n\n for input in inputs:\n # Get the digital channel\n digital_channel = self.__get_digital_channel(mps_db_session, input.channel_id)\n\n # Get this channel data\n input_data = {}\n input_data[\"name\"] = digital_channel.name\n input_data[\"bit_position\"] = digital_channel.number\n input_data[\"zero_name\"] = digital_channel.z_name\n input_data[\"one_name\"] = digital_channel.o_name\n input_data[\"alarm_state\"] = digital_channel.alarm_state\n input_data[\"debounce\"] = digital_channel.debounce\n input_data[\"db_id\"] = digital_channel.id\n if (digital_channel.num_inputs == 1):\n input_data[\"input_pv\"] = digital_channel.monitored_pvs\n input_data[\"alarm_state\"] = digital_channel.alarm_state\n if (digital_channel.alarm_state == 0):\n input_data[\"zero_severity\"] = \"MAJOR\"\n input_data[\"one_severity\"] = \"NO_ALARM\"\n else:\n input_data[\"zero_severity\"] = \"NO_ALARM\"\n input_data[\"one_severity\"] = \"MAJOR\"\n\n # Add this input to the list of inputs of the current device\n device_data[\"inputs\"].append(input_data)\n\n # Add this device to the list of devices of the current application\n app_data[\"devices\"].append(device_data)\n\n\n # Add this application to the list of applications, if its list of devices is not empty\n # The list of devices will be empty if not device in this app have defined fault inputs,\n # as devices without faults inputs won't be processed.\n if app_data[\"devices\"]:\n self.digital_apps.append(app_data)\n for app_card in app_cards:\n ln_name = app_card.link_node.get_name()\n if (app_card.link_node.slot_number != 2 and app_card.name == \"Generic ADC\"):\n for ln_names, ln in self.link_nodes.items():\n if ln[\"physical\"] != \"Not Installed\":\n if ln[\"physical\"] == self.link_nodes[ln_name]['physical']:\n if ln_name != ln_names:\n print self.link_nodes[ln_name]['physical']\n self.__add_slot_information_by_name(mps_db_session, ln_names, app_card)", "def test_get_top_doesnt_return_hidden_apps(self):\r\n\r\n ranked_3_app = self.create_app_with_contributors(8, 0, name='three')\r\n ranked_2_app = self.create_app_with_contributors(9, 0, name='two')\r\n ranked_1_app = self.create_app_with_contributors(10, 0, name='one')\r\n hidden_app = self.create_app_with_contributors(11, 0, name='hidden')\r\n hidden_app.hidden = 1\r\n db.session.add(hidden_app)\r\n db.session.commit()\r\n\r\n top_apps = cached_apps.get_top()\r\n\r\n assert len(top_apps) is 3, len(top_apps)\r\n for app in top_apps:\r\n assert app['name'] != 'hidden', app['name']", "def list(ctx):\n # pylint: disable=redefined-builtin\n _list_apps(ctx.obj['config'], ctx.obj['client'])", "def listExclusiveItems(appStore):\n appStores = ('Xbox', 'Amazon', 'iOS', 'Google Play')\n if appStore not in appStores:\n Exception(f\"No valid app store was provided. Valid choices are {appStores}.\")\n else:\n url = f\"https://catalog.roblox.com/v1/exclusive-items/{appStore}/bundles\"\n r = requests.get(url)\n j = json.loads(r.text)\n return j['data']", "def __get_data_from_db(term):\n try:\n index = AppSearchIndex.objects.get(query=term)\n except AppSearchIndex.DoesNotExist:\n index = None\n if index:\n apps_data = index.apps.all().values()\n return apps_data\n return None", "def extract_programs():\n if settings.XPRO_CATALOG_API_URL:\n return requests.get(settings.XPRO_CATALOG_API_URL, timeout=20).json()\n return []", "async def get_installed_app(self, installed_app_id: str) -> dict:\r\n return await self.get(\r\n API_INSTALLEDAPP.format(installed_app_id=installed_app_id)\r\n )", "def AppGetApp(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def app_list():\n user = users.get_current_user()\n if user:\n if users.is_current_user_admin() or is_local_admin():\n appls_query = Applic.query(ancestor = base_key).order(-Applic.date)\n appls = appls_query.fetch()\n output = template('applist', appls=appls, name=g_name, log_in_out = users.create_logout_url('/'), opt = 'Выход', user = user.nickname())\n return output\n else:\n userid = user.user_id()\n #return userid\n appls_query = Applic.query(Applic.user==userid).order(-Applic.date)\n appls = appls_query.fetch()\n output = template('applist', appls=appls, name=g_name, log_in_out = users.create_logout_url('/'), opt = 'Выход', user = user.nickname())\n return output\n else:\n redirect('/')", "def _make_app_url(self, view, qs):\n\t\treturn self.make_url([\"app\",\"search_activity\",view], qs)", "def show(ctx, appeui):\n if '.' in appeui:\n appeui = str(hexStringInt(str(appeui)))\n \n # Form the url and payload\n server = ctx.obj['server']\n payload = {'token': ctx.obj['token']}\n url = 'http://{}/api/v{}'.format(server, str(version))\n url += '/apps' if appeui == 'all' else '/app/{}'.format(appeui)\n \n # Make the request\n data = restRequest(server, url, 'get', payload, 200)\n if data is None:\n return\n \n # Single application\n if appeui != 'all':\n a = data\n indent = ' ' * 10\n if a['appinterface_id'] == 0:\n a['appinterface_id'] = '-'\n if a['domain'] is None:\n a['domain'] = '-'\n click.echo('Application EUI: ' + euiString(a['appeui']))\n click.echo('{}name: {}'.format(indent, a['name']))\n click.echo('{}domain: {}'.format(indent, a['domain']))\n click.echo('{}fport: {}'.format(indent, a['fport']))\n click.echo('{}interface: {}'.format(indent, a['appinterface_id']))\n if a['appinterface_id'] != '-':\n click.echo('{}Properties:'.format(indent))\n properties = sorted(a['properties'].values(), key=lambda k: k['port'])\n for p in properties:\n click.echo('{} {} {}:{}'.format(indent, p['port'], p['name'], p['type']))\n return\n \n # All applications\n click.echo('{:14}'.format('Application') + \\\n '{:24}'.format('AppEUI') + \\\n '{:15}'.format('Domain') + \\\n '{:6}'.format('Fport') + \\\n '{:10}'.format('Interface'))\n for i,a in data.iteritems():\n if a['appinterface_id'] == 0:\n a['appinterface_id'] = '-'\n if a['domain'] is None:\n a['domain'] = '-'\n click.echo('{:13.13}'.format(a['name']) + ' ' + \\\n '{:23}'.format(euiString(a['appeui'])) + ' ' + \\\n '{:14.14}'.format(a['domain']) + ' ' + \\\n '{:5.5}'.format(str(a['fport'])) + ' ' + \\\n '{:10}'.format(str(a['appinterface_id'])))", "def get_media_apps_info(self, request):\n with self.lock:\n d = {app_id: str(app_info) for app_id, app_info in list(self.gstreamers.items())}\n return MediaAppsInfoResponse(json=json.dumps(d))", "def get_draft(category, page=1, per_page=5):\r\n\r\n count = n_draft()\r\n\r\n sql = text('''SELECT app.id, app.name, app.short_name, app.created,\r\n app.description, app.info, \"user\".fullname as owner\r\n FROM \"user\", app LEFT JOIN task ON app.id=task.app_id\r\n WHERE task.app_id IS NULL AND app.info NOT LIKE('%task_presenter%')\r\n AND app.hidden=0\r\n AND app.owner_id=\"user\".id\r\n OFFSET :offset\r\n LIMIT :limit;''')\r\n\r\n offset = (page - 1) * per_page\r\n results = db.engine.execute(sql, limit=per_page, offset=offset)\r\n apps = []\r\n for row in results:\r\n app = dict(id=row.id, name=row.name, short_name=row.short_name,\r\n created=row.created,\r\n description=row.description,\r\n owner=row.owner,\r\n last_activity=pretty_date(last_activity(row.id)),\r\n last_activity_raw=last_activity(row.id),\r\n overall_progress=overall_progress(row.id),\r\n info=dict(json.loads(row.info)))\r\n apps.append(app)\r\n return apps, count", "def test_get_all_saved_app_map_searches(self):\n pass", "def find_gp_app_links(html):\n links = []\n for m in re.finditer('href=\"(/store/apps/details[^\"]+)\"', html):\n #print '%02d-%02d: %s' % (m.start(), m.end(), m.group(1))\n links.append(m.group(1))\n return links", "def test_app_query(self):\r\n AppFactory.create(info={'total': 150})\r\n res = self.app.get('/api/app')\r\n data = json.loads(res.data)\r\n assert len(data) == 1, data\r\n app = data[0]\r\n assert app['info']['total'] == 150, data\r\n\r\n # The output should have a mime-type: application/json\r\n assert res.mimetype == 'application/json', res\r\n\r\n # Test a non-existant ID\r\n res = self.app.get('/api/app/3434209')\r\n err = json.loads(res.data)\r\n assert res.status_code == 404, err\r\n assert err['status'] == 'failed', err\r\n assert err['target'] == 'app', err\r\n assert err['exception_cls'] == 'NotFound', err\r\n assert err['action'] == 'GET', err", "def discover_glitter_apps(self):\n for app_name in settings.INSTALLED_APPS:\n module_name = '{app_name}.glitter_apps'.format(app_name=app_name)\n try:\n glitter_apps_module = import_module(module_name)\n if hasattr(glitter_apps_module, 'apps'):\n self.glitter_apps.update(glitter_apps_module.apps)\n except ImportError:\n pass\n\n self.discovered = True", "def upload_public_app(self):\n # NOTE(gibi): Listing apps for a user is out of scope so an app ref is\n # not stored in the user now. In a list-app-by-user scenario it might\n # be beneficial to have a bidirectional link between App and User.\n return PublicApp(owner=self)", "def applications(self):\r\n return applications.Applications(self)", "def apps(self):\n return list(self.ctx.keys())", "def package_all(q):\n\n query = (q.dict_of_lists())[\"q\"][0]\n datasets = p.toolkit.get_action(\"package_search\")(\n {}, data_dict={\"q\": query, \"include_private\": True}\n )\n\n result = datasets[\"results\"]\n results = []\n for res in result:\n results.append(res)\n return results", "def test_get_top_returns_four_apps_by_default(self):\r\n\r\n ranked_3_app = self.create_app_with_contributors(8, 0, name='three')\r\n ranked_2_app = self.create_app_with_contributors(9, 0, name='two')\r\n ranked_1_app = self.create_app_with_contributors(10, 0, name='one')\r\n ranked_4_app = self.create_app_with_contributors(7, 0, name='four')\r\n ranked_5_app = self.create_app_with_contributors(7, 0, name='five')\r\n\r\n top_apps = cached_apps.get_top()\r\n\r\n assert len(top_apps) is 4, len(top_apps)", "def program_list():\n items = []\n\n soup = abcradionational.get_soup(URL + \"/podcasts/program\")\n \n program_heading = abcradionational.get_podcast_heading(soup)\n\n for program in program_heading:\n items.append({\n 'label': program['title'],\n 'path': plugin.url_for('program_item', url=program['url']),\n })\n\n return items", "def _fetch_app_info(app_id):\n try:\n assert len(app_id), \"Empty string\"\n lookup_url = \"https://itunes.apple.com/lookup?id=\"\n target_url = lookup_url + app_id\n if sys.version_info < (3, 5):\n response = urllib2.urlopen(target_url)\n else:\n response = urllib.request.urlopen(target_url)\n data = response.read() # a `bytes` object\n text = data.decode('utf-8')\n app_info = json.loads(text)\n return app_info\n except AssertionError as e:\n print(e)\n sys.exit(\"Exit script with error code %s\" % e)\n except urllib2.URLError as e:\n print(e)\n sys.exit(\"Exit script with error code %s\" % e)\n except urllib.error.URLError as e:\n print(e)\n sys.exit(\"Exit script with error code %s\" % e)\n except urllib2.HTTPError as e:\n print(e)\n sys.exit(\"Exit script with error code %s\" % e)\n\n except:\n e = sys.exc_info()[0]\n print(\"Error: %s\" % e)\n sys.exit(\"Exit script with error code %s\" % e)", "def get_app(self, app_id):\n return req(self.logger, self.access_token, 'GET', '/apps/'+app_id, {})", "def AppGetApp(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def applications(self) -> List[ApplicationRequestResponse]:\n return self._applications", "def __GetPublishManifest(self, publish_def):\n assert serve_utils.IsFusionDb(publish_def.db_type)\n\n # Get publish URLs.\n # Build stream URL based on Virtual Host URL.\n vh_url, vh_ssl = self._publish_helper.QueryVh(publish_def.virtual_host_name)\n vh_base_url = self._publish_helper.GetVhBaseUrl(vh_url, vh_ssl)\n stream_url = urlparse.urljoin(vh_base_url, publish_def.target_path)\n\n logger.debug(\"Stream URL: %s\", stream_url)\n\n # Get database ID from gesearch database.\n search_db_id = self._publish_helper.GetSearchDbId(\n publish_def.client_host_name, publish_def.db_name)\n\n # Build end_snippet proto section.\n # Set end_snippet_proto to empty string - nothing to merge.\n snippets_set = None\n end_snippet_proto = None\n if publish_def.snippets_set_name:\n snippets_set = self._snippets_manager.GetSnippetSetDetails(\n publish_def.snippets_set_name)\n logger.debug(\"Snippets set: %s\", snippets_set)\n\n # Get list of search definitions.\n search_def_list = self.__GetSearchDefs(\n publish_def.search_tabs,\n False, # is_supplemental\n search_db_id,\n publish_def.poi_federated,\n publish_def.poi_suggestion)\n\n # Get list of supplemental search definitions.\n sup_search_def_list = self.__GetSearchDefs(\n publish_def.sup_search_tabs,\n True, # is_supplemental\n search_db_id,\n publish_def.poi_federated,\n publish_def.poi_suggestion)\n\n supplemental_ui_label = None\n if sup_search_def_list:\n if len(sup_search_def_list) == 1:\n supplemental_ui_label = sup_search_def_list[0].label\n else:\n supplemental_ui_label = PublishManager.SUPPLEMENTAL_UI_LABEL_DEFAULT\n if publish_def.supplemental_ui_label:\n # Override with user specified value for the supplemental UI label.\n supplemental_ui_label = publish_def.supplemental_ui_label\n\n if publish_def.db_type == basic_types.DbType.TYPE_GE:\n # Get end_snippet of protobuf dbroot - integrates snippets set and search\n # services.\n search_tab_id = None\n if publish_def.need_search_tab_id:\n # Using a target path as an ID for main search services by appending it\n # to a search service label allows us to differentiate search tabs from\n # different databases in Earth Client.\n search_tab_id = publish_def.target_path[1:]\n\n supplemental_search_url = PublishManager.SUPPLEMENTAL_SEARCH_URL\n end_snippet_proto = dbroot_writer.CreateEndSnippetProto(\n snippets_set,\n search_def_list,\n search_tab_id,\n supplemental_ui_label,\n supplemental_search_url,\n logger)\n\n # Note: useful for debugging.\n # if __debug__:\n # logger.debug(\"Proto end_snippet: %s\", end_snippet_proto)\n\n try:\n # Get publish manifest.\n publish_helper = libgepublishmanagerhelper.PublishManagerHelper(logger)\n publish_manifest = libgepublishmanagerhelper.ManifestEntryVector()\n\n # Prepare publish config.\n publish_config = libgepublishmanagerhelper.PublishConfig()\n publish_config.fusion_host = publish_def.client_host_name\n publish_config.db_path = publish_def.db_name\n publish_config.stream_url = stream_url\n publish_config.end_snippet_proto = (\n end_snippet_proto if end_snippet_proto else \"\")\n publish_config.server_prefix = self._publish_helper.server_prefix\n\n publish_helper.GetPublishManifest(publish_config, publish_manifest)\n\n logger.debug(\"PublishDatabase: publish manifest size %s.\",\n len(publish_manifest))\n\n if not publish_manifest:\n raise exceptions.PublishServeException(\n \"Unable to create publish manifest. Database is not pushed.\")\n\n # Creates the search.json file and append it to publish manifest.\n # Note: we append the search.json and supplemental search\n # UI html files to the publish manifest if it is needed.\n if sup_search_def_list:\n search_def_list.extend(sup_search_def_list)\n\n if search_def_list:\n publish_tmp_dir_path = os.path.normpath(publish_helper.TmpDir())\n search_json_local_path = (\n \"%s/%s\" % (publish_tmp_dir_path, PublishManager.SEARCH_JSON_PATH))\n self. __CreateSearchJsonFile(search_def_list, search_json_local_path)\n search_json_dbroot_path = PublishManager.SEARCH_JSON_PATH\n\n publish_manifest.push_back(\n libgepublishmanagerhelper.ManifestEntry(search_json_dbroot_path,\n search_json_local_path))\n\n # Append supplemental search UI html to publish manifest.\n if ((publish_def.db_type == basic_types.DbType.TYPE_GE) and\n supplemental_ui_label):\n search_html_local_path = os.path.join(\n PublishManager.HTDOCS_EARTH_PATH,\n PublishManager.SUPPLEMENTAL_SEARCH_UI_HTML)\n search_html_dbroot_path = PublishManager.SEARCH_HTML\n publish_manifest.push_back(\n libgepublishmanagerhelper.ManifestEntry(search_html_dbroot_path,\n search_html_local_path))\n\n # {gedb/,mapdb/} path is {server_prefix}/{fusion_host}{db_path}.\n gedb_path = self._publish_helper.BuildDbPublishPath(\n publish_def.client_host_name, publish_def.db_name)\n\n # Transfer publish manifest files into published DBs directory.\n db_path_prefix = self._publish_helper.BuildTargetPublishPath(\n gedb_path, publish_def.target_path)\n\n self._TransferPublishManifest(\n publish_manifest, db_path_prefix, publish_def.force_copy)\n except Exception:\n # Delete target's publish directory in case of any error.\n self._publish_helper.DeleteTargetPublishDir(\n publish_def.target_path,\n publish_def.client_host_name,\n publish_def.db_name)\n raise\n finally:\n # Reset PublishManagerHelper processor (deletes publish temp. directory\n # /tmp/publish.*).\n publish_helper.Reset()", "def export_applications(self):\n print('\\n=== Exporting all application data...')\n\n for application in self.client.applications:\n print('- Exporting application:', application.name)\n\n json = {\n 'id': self.get_id(application),\n 'href': application.href,\n 'name': application.name,\n 'description': application.description,\n 'status': application.status,\n 'createdAt': application.created_at.isoformat(),\n 'modifiedAt': application.modified_at.isoformat(),\n 'customData': self.get_custom_data(application),\n 'default_account_store_mapping': None,\n 'default_group_store_mapping': None,\n 'account_store_mappings': [],\n #'verificationEmails': [],\n }\n\n default_account_store_mapping = application.default_account_store_mapping\n default_group_store_mapping = application.default_group_store_mapping\n\n if default_account_store_mapping:\n json['default_account_store_mapping'] = {\n 'id': application.default_account_store_mapping.href.split('/')[-1],\n 'href': application.default_account_store_mapping.href,\n 'type': application.default_account_store_mapping.account_store.__class__.__name__,\n 'name': application.default_account_store_mapping.account_store.name,\n 'list_index': application.default_account_store_mapping.list_index,\n }\n\n if default_group_store_mapping:\n json['default_group_store_mapping'] = {\n 'id': application.default_group_store_mapping.href.split('/')[-1],\n 'href': application.default_group_store_mapping.href,\n 'type': application.default_group_store_mapping.account_store.__class__.__name__,\n 'name': application.default_group_store_mapping.account_store.name,\n 'list_index': application.default_group_store_mapping.list_index,\n }\n\n for account_store_mapping in application.account_store_mappings:\n json['account_store_mappings'].append({\n 'id': self.get_id(account_store_mapping),\n 'href': account_store_mapping.href,\n 'account_store': {\n 'type': account_store_mapping.account_store.__class__.__name__,\n 'id': self.get_id(account_store_mapping.account_store),\n 'href': account_store_mapping.account_store.href,\n 'name': account_store_mapping.account_store.name,\n 'description': account_store_mapping.account_store.description,\n 'status': account_store_mapping.account_store.status,\n },\n 'list_index': account_store_mapping.list_index,\n 'is_default_account_store': account_store_mapping.is_default_account_store,\n 'is_default_group_store': account_store_mapping.is_default_group_store,\n })\n\n tenant = self.get_id(application.tenant)\n self.write('%s/%s/applications/%s' % (self.location, tenant, json['id']), json)\n\n print('=== Done!\\n')", "def get_integrations_userapps(self, **kwargs):\n\n all_params = ['page_size', 'page_number', 'sort_by', 'expand', 'next_page', 'previous_page', 'app_host']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_integrations_userapps\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n\n resource_path = '/api/v2/integrations/userapps'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'page_size' in params:\n query_params['pageSize'] = params['page_size']\n if 'page_number' in params:\n query_params['pageNumber'] = params['page_number']\n if 'sort_by' in params:\n query_params['sortBy'] = params['sort_by']\n if 'expand' in params:\n query_params['expand'] = params['expand']\n if 'next_page' in params:\n query_params['nextPage'] = params['next_page']\n if 'previous_page' in params:\n query_params['previousPage'] = params['previous_page']\n if 'app_host' in params:\n query_params['appHost'] = params['app_host']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['PureCloud OAuth']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='UserAppEntityListing',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response" ]
[ "0.6542337", "0.65027314", "0.64569217", "0.6388464", "0.618664", "0.6088148", "0.6048241", "0.60335594", "0.60146016", "0.6012545", "0.5858998", "0.5831379", "0.57853997", "0.5776328", "0.5764713", "0.57061666", "0.5654142", "0.56492996", "0.56406236", "0.56354034", "0.56090736", "0.5595303", "0.5546365", "0.55348814", "0.5506419", "0.5478128", "0.5460476", "0.54420274", "0.54384553", "0.5433034", "0.5432172", "0.53806525", "0.537259", "0.536318", "0.5326883", "0.53188485", "0.53093916", "0.53064173", "0.53011274", "0.5299841", "0.52969056", "0.52845246", "0.525004", "0.52477145", "0.52393776", "0.5238079", "0.519403", "0.5187403", "0.5185733", "0.5179846", "0.51681006", "0.5157981", "0.51533973", "0.5149549", "0.51321834", "0.51092213", "0.5108948", "0.51073277", "0.510459", "0.5096653", "0.5079635", "0.5067096", "0.50597733", "0.505892", "0.50549895", "0.50517917", "0.5027571", "0.50179434", "0.5009164", "0.4988635", "0.4984753", "0.4977281", "0.49739194", "0.49687636", "0.49650326", "0.49475238", "0.49445912", "0.49433902", "0.49373278", "0.4937069", "0.49311772", "0.49227613", "0.49154887", "0.4892078", "0.48891482", "0.48852205", "0.4881458", "0.4881182", "0.48767203", "0.48525178", "0.48504743", "0.48490724", "0.48484406", "0.48447782", "0.48434374", "0.4829865", "0.48256403", "0.48232833", "0.481993", "0.48180205" ]
0.66877913
0
Displays info about the application selected
def display_app_info(config, client, app_id): try: resp = client.get_app_full_info(config.username, app_id) result = resp.json() app_info = result["app_info"] title = click.style("App Name : ", fg="blue") + click.style( "{}".format(app_info["title"])) if app_info["rating_count"] == 0: rating = "Not yet rated" else: rating = "{:.1f} ({} rating".format(app_info["average_rating"], int(app_info["rating_count"])) if app_info["rating_count"] > 1: rating += "s" rating += ")" rating_row = click.style("Rating : ", fg="blue") + click.style("{}".format(rating)) up_status = click.style("Status : ", fg="blue") if app_info["is_up"]: up_status += click.style("Up") else: up_status += click.style("Down") last_crawl_str = "Not yet crawled" if "last_crawl" in app_info: last_crawl_str = util.format_date(app_info["last_crawl"]) last_crawl = click.style("Last Crawl Time : ", fg="blue") + click.style( "{}".format(last_crawl_str)) version = click.style("Version : ", fg="blue") + click.style( "{}".format(app_info["version"])) last_updated_str = util.format_date(app_info["updated"]) last_update = click.style("Last Update : ", fg="blue") + click.style( "{}".format(last_updated_str)) availability = click.style("Availability : ", fg="blue") + click.style( "{:.2f}%".format(app_info["average_uptime"] * 100)) app_url = click.style("Public App URL : ", fg="blue") + click.style( "{}".format(app_info["app_url"])) original_url = click.style("Private App URL : ", fg="blue") + click.style( "{}".format(app_info["original_url"])) category = click.style("Category : ", fg="blue") + click.style( "{}".format(app_info["category"])) desc = click.style("Description : ", fg="blue") + click.style( "{}".format(app_info["description"])) price = click.style("Price Range : ", fg="blue") + click.style( "{} - {} Satoshis").format( app_info["min_price"], app_info["max_price"]) doc_url = click.style("Docs URL : ", fg="blue") + click.style( "{}".format(app_info["docs_url"])) quick_start = click.style("Quick Start\n\n", fg="blue") + click.style( app_info["quick_buy"]) usage_docs = None if "usage_docs" in app_info: usage_docs = click.style("Detailed usage\n\n", fg="blue") + click.style( app_info["usage_docs"]) page_components = [title, "\n", rating_row, up_status, availability, last_crawl, last_update, version, "\n", desc, app_url, original_url, doc_url, "\n", category, price, "\n", quick_start, "\n"] if usage_docs: page_components.append(usage_docs + "\n") final_str = "\n".join(page_components) logger.info(final_str, pager=True) except ServerRequestError as e: if e.status_code == 404: logger.info( "The specified id for the app ({}) does not match any apps in the " "marketplace.".format(app_id)) else: raise e
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_info():\r\n app = application.Application()\r\n\r\n app.start(r\"C:\\\\AL50022\\\\Circ\\\\bin\\\\Circ.exe\")\r\n\r\n app.Circ.menu_select(\"View\")", "def on_about(self):\n MessageBox.showinfo(\"SuperSID\", self.controller.about_app())", "def program_info():\n\n print(\n color.GREEN\n + color.UNDERLINE\n + color.BOLD\n + \"Program Info Center:\\n\"\n + color.END\n )\n print(\n color.UNDERLINE\n + color.BOLD\n + \"About The Program:\"\n + color.END\n + \" This program works with the Blockchain-19 protocols defined within it's respective project. Blockchain-19 is an adaptation of the cryptocurrency blockchain or the Blockchain game used for education purposes, instead relating the content on the Blockchain to COVID-19. Given patient information the program can calculate the hashes within the Blockchain, creating a solved ledger. The program offers users the option of creating a new ledger or importing a previously exported ledger.\\n\"\n )\n\n print(\n color.UNDERLINE\n + color.BOLD\n + \"Necessary Patient Info:\"\n + color.END\n + \"\\n* Hospital \\n* Patient ID \\n* Current Status\\n\"\n )\n\n print(\n color.UNDERLINE\n + color.BOLD\n + \"Current Patient Status Key:\"\n + color.END\n + \"\\n* A = Admitted \\n* B = Stable \\n* C = Moderate \\n* D = Severe \\n* E = Discharged \\n* F = ICU\\n\\n\"\n )", "def about( cls, ):\n url = r\"http://www.opencircuits.com/Python_Smart_Terminal\"\n __, mem_msg = cls.show_process_memory( )\n msg = ( f\"{cls.controller.app_name} version:{cls.controller.version} \\nmode: {cls.parameters.mode}\"\n f\"\\n by Russ Hensel\"\n f\"\\nMemory in use {mem_msg} \\nCheck <Help> or \\n{url} \\nfor more info.\" )\n messagebox.showinfo( \"About\", msg )", "def show(ctx, appeui):\n if '.' in appeui:\n appeui = str(hexStringInt(str(appeui)))\n \n # Form the url and payload\n server = ctx.obj['server']\n payload = {'token': ctx.obj['token']}\n url = 'http://{}/api/v{}'.format(server, str(version))\n url += '/apps' if appeui == 'all' else '/app/{}'.format(appeui)\n \n # Make the request\n data = restRequest(server, url, 'get', payload, 200)\n if data is None:\n return\n \n # Single application\n if appeui != 'all':\n a = data\n indent = ' ' * 10\n if a['appinterface_id'] == 0:\n a['appinterface_id'] = '-'\n if a['domain'] is None:\n a['domain'] = '-'\n click.echo('Application EUI: ' + euiString(a['appeui']))\n click.echo('{}name: {}'.format(indent, a['name']))\n click.echo('{}domain: {}'.format(indent, a['domain']))\n click.echo('{}fport: {}'.format(indent, a['fport']))\n click.echo('{}interface: {}'.format(indent, a['appinterface_id']))\n if a['appinterface_id'] != '-':\n click.echo('{}Properties:'.format(indent))\n properties = sorted(a['properties'].values(), key=lambda k: k['port'])\n for p in properties:\n click.echo('{} {} {}:{}'.format(indent, p['port'], p['name'], p['type']))\n return\n \n # All applications\n click.echo('{:14}'.format('Application') + \\\n '{:24}'.format('AppEUI') + \\\n '{:15}'.format('Domain') + \\\n '{:6}'.format('Fport') + \\\n '{:10}'.format('Interface'))\n for i,a in data.iteritems():\n if a['appinterface_id'] == 0:\n a['appinterface_id'] = '-'\n if a['domain'] is None:\n a['domain'] = '-'\n click.echo('{:13.13}'.format(a['name']) + ' ' + \\\n '{:23}'.format(euiString(a['appeui'])) + ' ' + \\\n '{:14.14}'.format(a['domain']) + ' ' + \\\n '{:5.5}'.format(str(a['fport'])) + ' ' + \\\n '{:10}'.format(str(a['appinterface_id'])))", "def app_title():\n print(\"*\" * 27)\n print(\" Stock App\")\n print(\"*\" * 27)", "def showInfoWindow():\n\treturn 0", "def show():\n info(str(Project))", "def view_app(request, pk):\n\n app = get_object_or_404(MacOSApp, pk=pk)\n\n context = {'app': app}\n return render(request, 'mdm/app_detail.html', context)", "def select_app():\n panel = Cocoa.NSOpenPanel.openPanel()\n panel.setCanChooseFiles_(True)\n panel.setCanChooseDirectories_(True)\n panel.setResolvesAliases_(True)\n\n if(panel.runModal() == Cocoa.NSOKButton):\n pathArray = panel.filenames()\n path = pathlib.Path(pathArray[0])\n\n plistPath = path /'Contents'/'Info.plist'\n infoFile = plistPath\n\n try:\n appSize = subprocess.check_output(['du', '-shg', str(path)]).split()[0].decode('utf-8')\n n.views['appSize'].setStringValue_(str(appSize))\n except Exception as err:\n print(err)\n\n n.views['appLocation'].setStringValue_(str(path))\n\n try:\n plist = str(infoFile)\n with open(plist, 'rb') as f:\n info = plistlib.load(f)\n\n if 'CFBundleName' in info:\n global collectedName\n collectedName = info['CFBundleName']\n n.views['appName'].setStringValue_(collectedName)\n else:\n n.views['appName'].setStringValue_('')\n\n if 'CFBundleShortVersionString' in info:\n global collectedVersion\n collectedVersion= info['CFBundleShortVersionString']\n n.views['appVersion'].setStringValue_(collectedVersion)\n else:\n n.views['appVersion'].setStringValue_('')\n\n if 'CFBundleIconFile' in info:\n global collectedIcon\n collectedIcon = pathlib.Path(plist).parent / 'Resources' / info['CFBundleIconFile']\n n.views['appIcon'].setStringValue_(str(collectedIcon))\n else:\n n.views['appIcon'].setStringValue_('')\n\n if 'CFBundleIdentifier' in info:\n global collectedIdentifier\n collectedIdentifier = info['CFBundleIdentifier']\n n.views['appIdentifier'].setStringValue_(collectedIdentifier)\n else:\n n.views['appIdentifier'].setStringValue_('')\n\n except Exception as err:\n print('An Error Occured: {0}'.format(err))", "def verbose_app_label(request):\n # import ipdb; ipdb.set_trace()\n \n # iterate through the app_list looking for a corresponding app with\n # a VERBOSE_APP_LABEL\n \n return {}", "def open_application(self):\n __index = self.ui.comboBox.currentIndex()\n # print(self.plugins.name_application[__index])\n self.plugins.application(self, __index)\n self.hide()", "def aboutmenu(self):\n tkMessageBox.showinfo(\"About This Program\", \"The project of PSIT subject in 2014.\\nThis program is unit converter program.\")", "def get_app_info(app_list, info_list):\n\n app_names = [app.__name__ for app in app_list]\n for app in info_list:\n if app in app_names:\n class_obj = next(i for i in app_list if i.__name__ == app)\n print(app)\n print(' {}'.format(class_obj.__doc__))\n print(' setup args: {}'.format(ARGS.get(app)))\n print(' setup kwargs: {}'.format(KWARGS.get(app)))\n print('')\n\n else:\n print('App {} does not exist'.format(app.__name__))", "def show(appname):\n z = Zap(appname)\n z.show()", "def show_version():\n print(\"===============================================================\")\n print(f\"Productivity App v{__VERSION__}\")\n print(f\"Made by {__AUTHOR__} (with the advices of {__ADVISOR__})\")\n print(\"Source : https://github.com/Ilade-s/productivite-app-TkVer\")\n print(\"Server (optionnal) : https://github.com/Tifiloow/productivite-app\")\n print(\"Assets : https://feathericons.com/\")\n print(\"===============================================================\")", "def show_info(self):\n txt = \"Brand: %s\\nModel: %s\\nHostname: %s\\n\"%(self.brand, self.model, self.hostname)\n return txt", "def displayShow(app, *options):\n\n inNb = app.inNb\n _browse = app._browse\n display = app.display\n display.setup()\n data = display.current\n return showDict(\"<b>current display options</b>\", data, _browse, inNb, *options)", "def show_about(self):\n\n msg = f\"Program: {__program__}\\nVersion: {__version__}\\nDate: {__date__}\"\n self._message_information(\"About\", msg)", "def about_developer(self):\r\n self.pop_window(title=\"About\", \r\n msg=\"ChikonEye Version: 2.0.1 \\nDeveloper Info:\\nName : Ashraf Minhaj \\nEmail : ashraf_minhaj@yahoo.com \\nsite : ashrafminhajfb.blogspot.com \\nyouTube : fusebatti\")", "def process_app_info(self):\n pass", "def applicationsdetails():\n appdicts = db.hgetall('applications')\n finaldict = OrderedDict()\n for appname in sorted(appdicts):\n instances = json.loads(appdicts.get(appname))\n instance_map = OrderedDict()\n for key in sorted(instances):\n instance_map.__setitem__(key,instances.get(key))\n finaldict.__setitem__(appname,instance_map)\n return render_template('robots.html', appdicts=finaldict)", "def current_app(self) -> str:\n app_id = self.app.get_current() # Returns the application ID (string) of the\n foreground_app = [x for x in self.app.list_apps() if app_id == x[\"id\"]][0]\n return foreground_app['title']", "def print_app_data(self):\n print(\"===================================\")\n print(\"== RESULTS: ==\")\n print(\"===================================\")\n\n # Analog application results\n print(\"--------------------------\")\n print(\"-- Analog applications --\")\n print(\"--------------------------\")\n print(\"Number of analog application processed: {}\".format(len(self.analog_apps)))\n if (self.verbose):\n for app in self.analog_apps:\n print(\" Application data:\")\n print(\" - - - - - - - - - - - - -\")\n print(' - EPICS PREFIX: MPLN:{}:{}:{}'.format(app[\"link_node_area\"].upper(), app[\"link_node_location\"].upper(), app[\"card_index\"]))\n print(\" - App ID : {}\".format(app[\"app_id\"]))\n print(\" - Cpu name : {}\".format(app[\"cpu_name\"]))\n print(\" - Crate ID : {}\".format(app[\"crate_id\"]))\n print(\" - Slot number : {}\".format(app[\"slot_number\"]))\n print(\" - Link node name : {}\".format(app[\"link_node_name\"]))\n print(\" - Link node area : {}\".format(app[\"link_node_area\"]))\n print(\" - Link node location : {}\".format(app[\"link_node_location\"]))\n print(\" - Card index : {}\".format(app[\"card_index\"]))\n print(\" - Number of devices : {}\".format(len(app[\"devices\"])))\n for device in app[\"devices\"]:\n print(\" Device data:\")\n print(\" .....................\")\n print(\" - EPICS PREFIX: {}:{}:{}\".format(device[\"type_name\"], device[\"area\"], device[\"position\"]))\n print(\" - Type name : {}\".format(device[\"type_name\"]))\n print(\" - Bay number : {}\".format(device[\"bay_number\"]))\n print(\" - Channel number : {}\".format(device[\"channel_number\"]))\n print(\" - Area : {}\".format(device[\"area\"]))\n print(\" - Position : {}\".format(device[\"position\"]))\n print(\" - Number of faults : {}\".format(len(device[\"faults\"])))\n for fault_id,fault_data in device[\"faults\"].items():\n print(\" Fault data:\")\n print(\" . . . . . . . . . . . . \")\n print(\" - EPICS PREFIX: {}_T{}\".format(fault_data[\"name\"], fault_data[\"bit_positions\"][0]))\n print(\" - ID : {}\".format(fault_id))\n print(\" - Name : {}\".format(fault_data[\"name\"]))\n print(\" - Description : {}\".format(fault_data[\"description\"]))\n print(\" - Bit positions : {}\".format(fault_data[\"bit_positions\"]))\n print(\" . . . . . . . . . . . . \")\n print(\" .....................\")\n print(\" - - - - - - - - - - - - -\")\n print(\"\")\n print(\"--------------------------\")\n\n # Digital application result\n print(\"----------------------------\")\n print(\"-- Digital applications --\")\n print(\"----------------------------\")\n print(\"Number of digital application processed: {}\".format(len(self.digital_apps)))\n if (self.verbose):\n for app in self.digital_apps:\n print(\" Application data:\")\n print(\" - - - - - - - - - - - - -\")\n print(' - EPICS PREFIX: MPLN:{}:{}:{}'.format(app[\"link_node_area\"].upper(), app[\"link_node_location\"].upper(), app[\"card_index\"]))\n print(\" - App ID : {}\".format(app[\"app_id\"]))\n print(\" - Cpu name : {}\".format(app[\"cpu_name\"]))\n print(\" - Crate ID : {}\".format(app[\"crate_id\"]))\n print(\" - Slot number : {}\".format(app[\"slot_number\"]))\n print(\" - Link node name : {}\".format(app[\"link_node_name\"]))\n print(\" - Link node area : {}\".format(app[\"link_node_area\"]))\n print(\" - Link node location : {}\".format(app[\"link_node_location\"]))\n print(\" - Card index : {}\".format(app[\"card_index\"]))\n print(\" - Number of devices : {}\".format(len(app[\"devices\"])))\n for device in app[\"devices\"]:\n print(\" Device data:\")\n print(\" .....................\")\n print(\" - EPICS PREFIX: {}:{}:{}\".format(device[\"type_name\"], device[\"area\"], device[\"position\"]))\n print(\" - Type name : {}\".format(device[\"type_name\"]))\n print(\" - Area : {}\".format(device[\"area\"]))\n print(\" - Position : {}\".format(device[\"position\"]))\n print(\" - Number of inputs : {}\".format(len(device[\"inputs\"])))\n for input in device[\"inputs\"]:\n print(\" Input data:\")\n print(\" . . . . . . . . . . . . \")\n print(\" - EPICS PREFIX: {}\".format(input[\"name\"]))\n print(\" - Name : {}\".format(input[\"name\"]))\n print(\" - Bit position : {}\".format(input[\"bit_position\"]))\n print(\" - Zero name : {}\".format(input[\"zero_name\"]))\n print(\" - One name : {}\".format(input[\"one_name\"]))\n print(\" - Alarm state : {}\".format(input[\"alarm_state\"]))\n print(\" - Debounce : {}\".format(input[\"debounce\"]))\n print(\" . . . . . . . . . . . . \")\n print(\" .....................\")\n print(\" - - - - - - - - - - - - -\")\n print(\"\")\n print(\"----------------------------\")\n\n\n print(\"===================================\")\n\n print('Found {} link nodes:'.format(len(self.link_nodes)))\n for k,v in self.link_nodes.items():\n print('{}: {}'.format(k, v['type']))", "def show_about():\r\n\tmsg = messagebox\r\n\tmsg.showinfo(\"\", '''Creator: Ellis, Kevin\r\nOrganization: n/a\r\nDescription: Retrieve the network information from a database\r\nDate: 2020208\r\nVersion: 1.4''')", "def show_home(self):\n print(self.home.name)", "def info(self):\n pp = pprint.PrettyPrinter(indent=4)\n print_text_box('Info')\n pp.pprint(self.manager.data[\"info\"])\n print('')", "def usage(self):\n\n # header\n self.usage_header()\n\n print _(\"\"\"Screen: %(screen)s\nDescription: %(description)s\n\nUsage: %(app_name)s %(screen)s [options]\"\"\") % {\n 'app_name': constants.App.NAME,\n 'screen': self.name,\n 'description': self.description,\n }\n # any additional info in between (see other classes for reference)\n self._usage_options_example()\n\n #footer\n self.usage_footer()", "def show_about():\n messagebox.showinfo(\n title='About', message=\"PyLNP - Lazy Newb Pack Python Edition\\n\\n\"\n \"Port by Pidgeot\\n\\nOriginal program: LucasUP, TolyK/aTolyK\")", "def show_help():\n messagebox.showinfo(title='How to Use', message=\"It's really easy.\")", "def ls():\n cfgmgr = ConfigManager()\n apps = cfgmgr['apps']\n for i in apps:\n print(fc(\"- {g}{appname}{rst}\", appname=i))", "def info(k=None):\n global program\n if program is None:\n print \"no program is loaded\"\n return\n infos = program.info()\n if k is None:\n for k in infos.keys():\n val = infos[k]\n if isinstance(val, int):\n print \"{:20} : 0x{:x}({})\".format(k, val, val)\n else:\n print \"{:20} : {}\".format(k, val)\n elif k in infos:\n print \"{:20} : {}\".format(k, infos[k])\n else:\n print \"no such entry\"", "def open_info_dialog(self):\n info_dialog = InfoDialog()\n info_dialog.exec_()", "def info ():\n\n info = {\n 'name' : app.config['APPLICATION_NAME'],\n 'short_name' : app.config['APPLICATION_SHORT_NAME'],\n 'main_page_url' : app.config['APPLICATION_MAIN_URL'],\n # 'css_url' : app.config.get ('APPLICATION_CSS_URL', ''),\n 'css' : 'span.smalltext { font-size: smaller }',\n 'supported_langs_query' : [ LANG ],\n }\n return make_json_response (info)", "def get_app_info(self, name):\n with hide(\"output\", \"running\"):\n result = local(\"redis-cli -h {host} -p 6379 -n {db} hgetall {name}\".format(\n host=self.host, name=name, db=REDIS_APPLICATION_DB_NUM), capture=True)\n\n if len(result.stdout) > 0:\n splits = result.stdout.split(\"\\n\")\n fmt_result = dict([(splits[i], splits[i+1])\n for i in range(0, len(splits), 2)])\n pp = pprint.PrettyPrinter(indent=2)\n pp.pprint(fmt_result)\n return fmt_result\n else:\n warn(\"Application \\\"%s\\\" not found\" % name)\n return None", "def about():\n return render_template(\n 'about.html',\n title='Tools',\n year=datetime.now().year,\n message='Your application description page.'\n )", "def view_system():\n\n pass", "def version_option():\n print \"%s %s <%s>\" % (PROGRAM_NAME,PROGRAM_VERSION,PROGRAM_SOURCE)", "def app_details(request, object_id):\n app = Application.objects.get(pk=object_id)\n app_class = BOOTSTRAP_LABEL.get(app.app_status.all()[0].name, '') # all()[0] for bogus M2M\n rels = Application.objects.filter(acronym=app.acronym).values('id', 'release', 'app_status__name').order_by('release').distinct() # worthless 'distinct'\n releases = []\n # Is there a away to do this to 'rels' in place, or with a comprehension?\n for rel in rels:\n rel.update({'app_class': BOOTSTRAP_LABEL.get(rel.pop('app_status__name'))})\n releases.append(rel)\n return render_to_response('application/application_details.html',\n {'app': app,\n 'app_class': app_class,\n 'releases': releases,\n 'bootstrap_label': BOOTSTRAP_LABEL,\n 'search_suggestions': _search_suggestions(),\n },\n context_instance=RequestContext(request));", "def __setDetails(self):\n self.MainWindow.setWindowTitle(\"{0} {1}\".format(\n const.APP_NAME, const.VERSION))\n return True", "def update_app_info(request, pk):\n context = {}\n\n app = get_object_or_404(MacOSApp, pk=pk)\n\n if request.method == 'POST':\n form = UpdateAppForm(request.POST, instance=app)\n if form.is_valid():\n if request.POST['save'] == \"Save Changes\":\n form.save()\n messages.success(request, \"Application info updated successfully\")\n elif request.POST['save'] == \"Merge\":\n form.save()\n return HttpResponseRedirect(reverse(\"mdm:merge-app\", args=[app.pk]))\n else:\n app = form.instance\n return HttpResponseRedirect(reverse(\"mdm:remove-app\", args=[app.pk]))\n return HttpResponseRedirect(reverse(\"mdm:apps\"))\n else:\n form = UpdateAppForm(instance=app)\n context['form'] = form\n context['msg'] = \"Application Info\"\n return render(request, 'form_crispy.html', context)", "def app_list(request):\n return render(request, 'mdm/app_list.html', {})", "def print_mini_help(app_name):\n print \"\\nExecute the script with either '-h' or '--help' to obtain detailed help on how to run the script:\"\n print 'python {0} -h'.format(app_name)\n print \"or\"\n print 'python {0} --help\\n'.format(app_name)", "def pr_info(self):\n process = self.backend.get_process(str(self.processBox.currentText()))\n\n if not process:\n return\n\n self.infoWindow2 = QDialog(parent=self)\n hbox2 = QHBoxLayout()\n info_box = QTextEdit()\n\n if process.returns:\n info_box.setText(\n str(str(process.id) + ': ' + str(process.description) + \"\\n\\n Returns: \\n\" +\n str(process.get_return_type()) + \"\\n\" + process.returns[\"description\"]))\n else:\n info_box.setText(\n str(str(process.id) + ': ' + str(process.description)))\n\n info_box.setReadOnly(True)\n info_box.setMinimumWidth(500)\n info_box.setMinimumHeight(500)\n hbox2.addWidget(info_box)\n self.infoWindow2.setLayout(hbox2)\n self.infoWindow2.setWindowTitle('Process Information')\n self.infoWindow2.show()", "def print_application_name_and_id_list(self, application_list):\n if not application_list:\n print \"Application list is empty\"\n return\n\n for item in application_list.items:\n print \"{} => {}\".format(item.application_id, item.application_name)", "def about():\r\n url = 'https://engineering.tau.ac.il/tauengalumni'\r\n source_url = 'https://github.com/EranPer/tauengalumni'\r\n\r\n layout = [[sg.Text('TAU Engineering Alumni Registering and Sticker Printing System.')],\r\n [sg.Text('Made by Eran Perelman. 2021')],\r\n [sg.Text('TAU Engineering Alumni Website',\r\n enable_events=True, key='-LINK-', font=('Arial underline', 11))],\r\n [sg.Text('Source Code',\r\n enable_events=True, key='-SOURCE_CODE-', font=('Arial underline', 11))],\r\n [sg.B('Ok')]]\r\n\r\n window = sg.Window(\"TAU Engineering Alumni Registering and Sticker Printing System\", layout)\r\n\r\n while True:\r\n event, values = window.read()\r\n if event == '-LINK-':\r\n webbrowser.open(url)\r\n if event == '-SOURCE_CODE-':\r\n webbrowser.open(source_url)\r\n if event == 'Ok':\r\n break\r\n\r\n window.close()", "def prog_info( self ):\r\n fll = AppGlobal.force_log_level\r\n logger = self.logger\r\n\r\n a_str = \"\"\r\n if ( self.no_restarts == 0 ) :\r\n\r\n a_str = f\"{a_str}\\n\"\r\n a_str = f\"{a_str}\\n\"\r\n a_str = f\"{a_str}\\n============================\"\r\n a_str = f\"{a_str}\\n\"\r\n\r\n a_str = f\"{a_str}\\nRunning {self.app_name} version = {self.version} mode = {self.parameters.mode}\"\r\n a_str = f\"{a_str}\\n\"\r\n #logger.log( fll, a_str )\r\n\r\n # ================================\r\n # logger.log( fll, \"\" ) # not really critical but want to show up would a number be better ?\r\n # logger.log( fll, \"\" )\r\n # logger.log( fll, \"============================\" )\r\n # logger.log( fll, \"\" )\r\n\r\n # logger.log( fll, \"Running \" + self.app_name + \" version = \" + self.version + \" mode = \" + self.parameters.mode )\r\n # logger.log( fll, \"\" )\r\n\r\n else:\r\n #a_str = \"\"\r\n a_str = f\"{a_str}\\n======\"\r\n a_str = f\"{a_str}\\nRestarting {self.app_name} version = {self.version} mode = {self.parameters.mode}\"\r\n a_str = f\"{a_str}\\n======\"\r\n\r\n if len( sys.argv ) == 0:\r\n a_str = f\"{a_str}\\nno command line arg \"\r\n else:\r\n for ix_arg, i_arg in enumerate( sys.argv ):\r\n a_str = f\"{a_str}\\ncommand line arg { ix_arg } = {sys.argv[ix_arg]}\"\r\n\r\n a_str = f\"{a_str}\\ncurrent directory {os.getcwd()}\"\r\n # a_str = f\"{a_str}\\nCOMPUTERNAME {os.getenv( 'COMPUTERNAME' )}\" # may not exist in now in running on\r\n logger.log( fll, a_str )\r\n\r\n logger.log( fll, f\"{self.parameters}\" )\r\n\r\n a_str = self.parameters.running_on.get_str()\r\n logger.log( fll, a_str )\r\n # next may not be best way or place\r\n # self.parameters.running_on.log_me( logger, logger_level = AppGlobal.force_log_level, print_flag = True )\r\n\r\n start_ts = time.time()\r\n dt_obj = datetime.datetime.utcfromtimestamp( start_ts )\r\n string_rep = dt_obj.strftime('%Y-%m-%d %H:%M:%S')\r\n logger.log( fll, f\"Time now: {string_rep}\" ) # but logging includes this in some format\r\n\r\n return", "def showinfo(self, msg):\n tkinter.messagebox.showinfo('Information', msg)", "async def info(self, ctx):\n self.logger.info(misolog.format_log(ctx, f\"\"))\n appinfo = await self.client.application_info()\n membercount = sum(1 for x in self.client.get_all_members())\n info_embed = discord.Embed(title=f\"Miso Bot | version {main.version}\",\n description=f\"Created by {appinfo.owner.mention}\\n\\n\"\n f\"Use `{self.client.command_prefix}help` to get the list of commands, \"\n f\"or visit the documention website for more help.\"\n f\"\\n\\nCurrently active in **{len(self.client.guilds)}** \"\n f\"servers totaling **{membercount}** unique users\",\n colour=discord.Colour.red())\n\n # info_embed.set_footer(text=f'version 2.0')\n info_embed.set_thumbnail(url=self.client.user.avatar_url)\n info_embed.add_field(name='Github', value='https://github.com/joinemm/miso-bot', inline=False)\n info_embed.add_field(name='Documentation', value=\"http://joinemm.me/misobot\", inline=False)\n info_embed.add_field(name='Patreon', value=\"https://www.patreon.com/joinemm\", inline=False)\n await ctx.send(embed=info_embed)", "def open_app(device, package_name):\n\n device.shell('am start -n ' + package_name + '/' + package_name + \".modules.overview.screen\" +\n '.OverviewActivity')", "def manage_info():", "def info():\n print(\"Made using the OOP RPG game creator (c) Claire.\\n\")", "def show(self):\n pass", "def info(msg):\n click.secho(msg, fg='blue')", "def process_info(process):\n\thelp(process)", "def _get_app_name(self):\n # TODO move app name into pyglet.app (also useful for OS X menu bar?).\n return sys.argv[0]", "def show(self):\n\n pass", "def show_info(title, message):\n\n pass", "def show(self) -> None:", "def info_cmd(args):\n livebox_info()", "def about(display=True):\n\n ABOUT_TEXT = \"\"\"\nPre-release version %s (%s) of Topographica; an updated\nversion may be available from topographica.org.\n\nThis program is free, open-source software available under the BSD\nlicense (http://www.opensource.org/licenses/bsd-license.php).\n\"\"\"%(release,version)\n if display:\n print ABOUT_TEXT\n else:\n return ABOUT_TEXT", "def about(self):\t\t\n\t\tQMessageBox.about(self,self.tr(\"Sobre o programa\"),\n\t\t\tself.tr(\"<center>O programa foi feito como trabalho da disciplina de CG em 2009 por Le&ocirc;nidas S. Barbosa(kirotawa)</center>\"))", "def about():\n\n\treturn render_template('about.html', title='About',\n\t\t\t\t\t\t year=datetime.now().year,\n\t\t\t\t\t\t message='Your application description page.')", "def appconfig():\r\n print('''\\n%s at %s acting as user %s\r\n\\nApp Configuration Menu''' % (PACKETMASTER.model, ADDRESS, USERNAME))\r\n choice = moves.input('''\r\n 1 - List Apps\r\n 2 - List Running Apps\r\n 3 - Start an App instance\r\n 4 - Modify an App instance\r\n 5 - Kill an App instance\r\n 6 - Call a custom App action\r\n 7 - Back\r\n 8 - Quit \\n\r\n Enter selection number: ''')\r\n try:\r\n choice = int(choice)\r\n except ValueError as reason:\r\n print(\"That is not a valid selection.\", reason)\r\n appconfig()\r\n execute = {1: PACKETMASTER.device_apps,\r\n 2: PACKETMASTER.apps_active,\r\n 3: PACKETMASTER.start_app_guided,\r\n 4: PACKETMASTER.mod_app_guided,\r\n 5: PACKETMASTER.kill_app_guided,\r\n 6: PACKETMASTER.call_app_action_guided,\r\n 7: manage,\r\n 8: exit}\r\n if choice in execute:\r\n try:\r\n select = execute[choice]\r\n run = select()\r\n print(run)\r\n appconfig()\r\n except KeyError as reason:\r\n print(reason)\r\n else:\r\n print(\"That is not a valid selection.\")\r\n appconfig()", "def showGUI(self,**kwargs):\n self.baxter.menu.select(self.modes[0])", "def info():\n print 'Loading info page'\n\n team_list = datastore.get_all_teams(engine)\n\n return render_template('info.html', rows=team_list)", "async def _info(self, ctx: Context):\n\n embed = discord.Embed(colour=await ctx.embed_colour())\n\n perm_int = discord.Permissions(268494928)\n\n data = await self.bot.application_info()\n invite_url = discord.utils.oauth_url(data.id, permissions=perm_int)\n\n embed.description = (\n \"TvM Assistant is a Discord bot with utility commands to make hosting TvMs easier.\"\n \"\\n\\nSome of the bot features include:\"\n \"\\n\\n- Setup roles and channel creation\"\n \"\\n- Management of sign-ups, sign-outs, spectators and replacements\"\n \"\\n- In-built logging to detect and ignore private channels\"\n \"\\n- Quick creation of player, mafia and spectator chats\"\n \"\\n- Vote counts and time since day/night started\"\n )\n\n links = (\n f\"\\n- [Invite to your server]({invite_url})\"\n f\"\\n- [Quickstart]({QUICKSTART})\"\n f\"\\n- [Commands Reference]({COMMANDS_REFERENCE})\"\n f\"\\n- [Source Code]({SOURCE_CODE})\"\n )\n\n embed.add_field(name=\"\\u200b\\nQuick Links\", value=links)\n embed.set_author(name=f\"About {ctx.me.name}\", icon_url=ctx.me.avatar_url)\n\n await ctx.send(embed=embed)", "def main():\n\n root = tk.Tk()\n root.title(\"Exploring US Bikeshare Data\")\n app = Application(master=root)\n print(\"Application loaded! Please use the GUI window to continue...\")\n app.mainloop()", "def appointment():\r\n return render_template(\r\n 'about.html',\r\n title='About',\r\n year=datetime.now().year,\r\n message='Your application description page.'\r\n )", "def about(args):\n print(ABOUT)", "def usage_header():\n print \"\"\"%(app_title)s v.%(app_version)s - %(app_description)s.\n\"\"\" % {\n 'app_title': constants.App.TITLE,\n 'app_version': constants.App.VERSION,\n 'app_description': constants.App.DESCRIPTION,\n }", "def homescreen(self):\r\n self.root.ids.items_box.clear_widgets()\r\n homescreen_information = Label(\r\n text='Welcome.\\n\\n\\nThis was designed by Charles to calculate the hours different\\n'\r\n 'workgroups work at CIPS and WPS.\\n\\nTo begin, press the Load Data button on'\r\n ' the \\nleft hand side of this window.', font_size=20, halign='center')\r\n self.root.ids.items_box.add_widget(homescreen_information)", "def show_main_screen():\n option = algo_selection(algos)\n if option == 1:\n print_factorial()\n show_main_screen()\n if option == 2:\n print_gcd()\n show_main_screen()\n if option == 3:\n print_pow()\n show_main_screen()\n if option == 4:\n print_towers()\n show_main_screen()\n if option == 5:\n print_permutations()\n show_main_screen()\n if option == 6:\n raise SystemExit(0)", "def showInfo(parent,message,title=_('Information')):\r\n return askStyled(parent,message,title,wx.OK|wx.ICON_INFORMATION)", "def about(self):\n self.main_window.message(\n width=200, aspect=100, justify=tkinter.CENTER,\n text=\"Jeu de Ping\\n\\n\"\n \"(C) Maximin Duvillard, August 2022.\\nLicence = GPL\")", "def backend_info(self):\n backend_info = self.backend.get_metadata()\n\n if \"description\" in backend_info:\n self.backendInfo.setText(str(backend_info[\"description\"]))", "def back_to_menu_info(cls):\n print(\n \"\"\"\n ________________________________________________\n\n HABITSBOX\n ________________________________________________\n Hint: Press 0 (zero) to return to the main menu\n ------------------------------------------------\"\"\")", "def info_view():\n output = {\n 'info': 'GET /api/v1',\n 'devices': 'GET /api/v1/devices',\n 'get device': 'GET /api/v1/devices/<device>',\n 'edit device': 'PUT /api/v1/devices/<device>',\n 'color schemes': 'GET /api/v1/schemes',\n 'get color scheme': 'GET /api/v1/schemes/<scheme>',\n 'edit color scheme': 'PUT /api/v1/schemes/<scheme>',\n 'delete color scheme': 'DELETE /api/v1/schemes/<scheme>'\n }\n return jsonify(output)", "def list_apps(request, pk=0):\n context = {'items': [], 'resource_type': 'App'}\n\n if pk == 0:\n context['h2'] = \"Managed Applications\"\n context['header_1'] = \"Developer\"\n context['header_2'] = \"Version\"\n refresh_managed_software_status()\n apps = MacOSApp.objects.filter(merged_into__isnull=True).reverse()\n if not request.user.has_perm('devices.manage_apps'):\n apps = apps.filter(managed=True).exclude(installed__isnull=True, pending_install__isnull=True)\n for app in apps:\n assignment_count = app.pending_install.count()\n installed_on = app.installed.all()\n data = {'meta': app, 'assignment_count': assignment_count, 'installed': installed_on}\n context['items'].append(data)\n else:\n if not request.user.has_perm('devices.manage_apps'):\n raise PermissionDenied\n\n device = get_object_or_404(Laptop, pk=pk)\n context['h2'] = \"Applications on {}\".format(device.name)\n context['header_1'] = \"Developer\"\n context['header_2'] = \"Version\"\n context['device_view'] = True\n context['device_id'] = pk\n apps = MacOSApp.objects.filter(pending_install__in=[device])\n apps |= MacOSApp.objects.filter(installed__in=[device])\n for app in apps:\n status = 'Not assigned'\n for entry in app.installed.all():\n if entry == device:\n status = 'Installed'\n for entry in app.pending_install.all():\n if entry == device:\n status = 'Assigned'\n data = {'meta': app, 'status': status}\n context['items'].append(data)\n\n return render(request, 'mdm/resource_list.html', context)", "def about():\n return render_template(\n 'about.html',\n title='About',\n year=datetime.now().year,\n message='My application description page.'\n )", "def OnAbout(self, event):\n description = \"\"\"%s is a pet project written by Josh VanderLinden using\nPython, wxPython, and MetaKit. Josh created this program\nmostly just for fun, but also to learn more about the three\nthings mentioned in the last sentence.\"\"\" % APP_TITLE\n try:\n about = wx.AboutDialogInfo()\n except AttributeError, err:\n # older version of python\n wx.MessageBox(description,\n 'About This Program',\n wx.OK|wx.ICON_INFORMATION)\n else:\n about.SetIcon(utils.get_icon('tray'))\n about.SetName(APP_TITLE)\n about.SetVersion(APP_VERSION)\n about.SetDescription(description)\n about.SetCopyright(\"Copyright 2008 Josh VanderLinden\")\n about.SetDevelopers([\"Josh VanderLinden\"])\n about.SetWebSite('http://code.google.com/p/py-todo/')\n wx.AboutBox(about)", "def help(self, *args):\n\n\t\tif self.available_cmds:\n\t\t\tdir_text = \"Enter commands in the format 'cmd [args]'. Available commands: \\n\"\n\t\t\tfor cmd in self.available_cmds.keys():\n\t\t\t\tdir_text += \" -\" + cmd + \"\\n\"\n\t\telse:\n\t\t\tdir_text = \"No commands available.\"\n\n\t\tif self.available_apps:\n\t\t\tapp_txt = \"Available applications to run: \\n\"\n\t\t\tfor app in self.available_apps.keys():\n\t\t\t\tapp_txt += \" -\" + app + \"\\n\"\n\t\telse:\n\t\t\tapp_txt = \"No applications available.\"\n\n\t\tprint(dir_text + \"\\n\" + app_txt + \"\\n\")", "def main_menu(self):\n welcome = \"\"\"\n ************************\n * WELCOME TO CARSHARE! *\n ************************\n \"\"\"\n intro = \"Are you a USER or an ENGINEER?\"\n option1 = \"[1] USER\"\n option2 = \"[2] ENGINEER\"\n print(welcome, intro, option1, option2, sep='\\n')", "def current_app(x):\n try:\n display = Xlib.display.Display()\n window = display.get_input_focus().focus\n wmclass = window.get_wm_class()\n\n if wmclass is None:\n window = window.query_tree().parent\n wmclass = window.get_wm_class()\n\n display.close()\n del display\n\n if wmclass:\n return(wmclass[1])\n else:\n return('UNKNOWN')\n except:\n return('ERROR!')", "def get_application_info( tree ):\n application_name = None\n # most machines store the machine name string in the tag 'ApplicationName'\n for application_name in tree.getroot().iter( 'ApplicationName' ):\n application_name = application_name.text\n break\n # NovaSeq stores the machine name string in the tag 'Application'\n if( application_name == None ):\n for application_name in tree.getroot().iter( 'Application' ):\n application_name = application_name.text\n break\n if( application_name == None ):\n raise ValueError( 'Unable to find Application* element in BCL RunParameters.xml' )\n\n application_version = None\n for application_version in tree.getroot().iter( 'ApplicationVersion' ):\n application_version = application_version.text\n break\n if( application_version == None ):\n raise ValueError( 'ApplicationVersion element missing in BCL RunParameters.xml' )\n\n re_models = '|'.join( application_name_dict.keys() )\n re_pattern = '(%s)' % re_models\n mobj = re.match( re_pattern, application_name )\n if( mobj == None ):\n raise ValueError( 'unrecognized ApplicationName in RunParameters.xml file' )\n instrument_model = application_name_dict[mobj.group( 1 )]\n\n # Distinguish between HiSeq models 3000 and 4000 using Andrew's(?) method.\n # Note: the p5 index orientations differ between these two models.\n if( instrument_model == 'HiSeq' ):\n application_major_version = int(application_version.split('.')[0])\n if application_major_version > 2:\n instrument_model = 'HiSeq4000'\n else:\n instrument_model = 'HiSeq3000'\n\n return( instrument_model, application_version )", "def display(self):\n\n print('\\n')\n for key, val in self.option.items():\n print(key, val, '\\n') # make it more confortable to read\n self.get_choice() # launch automaticly the choice method after display", "def _get_app_info_Primary(self):\n return self._Primary_app_info", "def info():\n f = Figlet(font='standard')\n click.echo(f.renderText('covtool'))\n click.secho(\n \"covtool: a simple CLI for fetching covid data\", fg='cyan')\n click.echo(\n \"Data Sources: https://www.worldometers.info/coronavirus\\nJohn Hopkins [https://github.com/CSSEGISandData/COVID-19] \")\n click.secho(\"Author: Amayo II <amayomordecai@gmail.com>\", fg='magenta')", "def apps():\r\n section = document.add_section()\r\n new_width, new_height = section.page_height, section.page_width\r\n section.orientation = WD_ORIENT.LANDSCAPE\r\n section.page_width = 10058400\r\n section.page_height = 7772400\r\n document.add_heading('Applications', level=1)\r\n apps = get_qlik_sense.get_apps()\r\n num_of_apps = len(apps)\r\n table = document.add_table(rows=num_of_apps+1, cols=7)\r\n table.style = 'Grid Table 1 Light Accent 1'\r\n row = table.rows[0]\r\n row.cells[0].text = 'App name'\r\n row.cells[1].text = 'App description'\r\n row.cells[2].text = 'Publish time'\r\n row.cells[3].text = 'Stream'\r\n row.cells[4].text = 'File size'\r\n row.cells[5].text = 'Owner userId'\r\n row.cells[6].text = 'Owner userName'\r\n for app in range(num_of_apps):\r\n row = table.rows[app+1]\r\n row.cells[0].text = str(apps[app][0])\r\n row.cells[1].text = str(apps[app][1])\r\n row.cells[2].text = str(apps[app][2])\r\n row.cells[3].text = str(apps[app][3])\r\n row.cells[4].text = str(apps[app][4])\r\n row.cells[5].text = str(apps[app][5])\r\n row.cells[6].text = str(apps[app][6])\r\n document.add_page_break()", "def help():\n print(UI.HELP)", "def display_library_info():\n print \"in display library info \\n\"\n library_list = model.get_libraries_info(model.db_session, session)\n return render_template('library.html', libraries=library_list)", "def getAppInfo(self):\n data = self._client.Application.find(self.app_id)\n return data", "async def info(self, ctx):\n\t\tembed = discord.Embed(\n\t\t\tdescription=\"Created By Seperoph#1399 and AkaBaka#4654\",\n\t\t\tcolor=config[\"success\"]\n\t\t)\n\t\tembed.set_author(\n\t\t\tname=\"Bot Information\"\n\t\t)\n\t\tembed.add_field(\n\t\t\tname=\"Head Programmers:\",\n\t\t\tvalue=\"Seperoph#1399 and AkaBaka#4654\",\n\t\t\tinline=True\n\t\t)\n\t\tembed.add_field(\n\t\t\tname=\"Python Version:\",\n\t\t\tvalue=f\"{platform.python_version()}\",\n\t\t\tinline=True\n\t\t)\n\t\tawait ctx.respond(embed=embed)", "def aboutAction(self):\n about = UI_about.UI_about()\n about.exec_()\n\n return", "def on_choose_program(self, widget, data = None):\n\t\tdialog = gtk.FileChooserDialog(_(\"Choose a Program\"), action = gtk.FILE_CHOOSER_ACTION_OPEN, buttons = (gtk.STOCK_OPEN, gtk.RESPONSE_ACCEPT, gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL))\n\n\t\tif dialog.run() == gtk.RESPONSE_ACCEPT:\n\t\t\tself.pm_cmd.set_text(dialog.get_filename())\n\t\tdialog.destroy()", "def about ():\n easygui.textbox(title='About olebrowse', text=__doc__)", "def display_menu():\n print(\"\"\"\\nChoose option:\n (1) List statistics\n (2) Display 3 cities with longest names\n (3) Display county's name with the largest number of communities\n (4) Display locations, that belong to more than one category\n (5) Advanced search\n (0) Exit program\"\"\")", "def do_info(self, args):\n if self.exploit is None:\n eprint(colorize('No exploit set; nothing to describe. Select an exploit with the \\'use\\' command',\n 'cyan'))\n else:\n eprint(colorize('\\n ' + self.exploit.DESCRIPTION + '\\n', 'green'))", "def About(referenceid,window):\n try:\n content = []\n appbuttons = getAppButtons(window)\n atomacclick(appbuttons[2])\n newappbuttons = getAppButtons(window)\n time.sleep(5)\n atomacclick(newappbuttons[11])\n childwindow = getChildwindows(referenceid)\n staticobjects = childwindow.staticTextsR()\n time.sleep(5)\n for i in staticobjects:\n content.append(i.AXValue)\n except Exception as er:\n return False\n return content", "def displayhelp(self):\n helper = HelpView(self)\n helper.activateWindow()\n helper.exec()\n self.activateWindow()" ]
[ "0.7198173", "0.6656836", "0.66274256", "0.6559104", "0.6542044", "0.6500156", "0.6425056", "0.6402444", "0.63951284", "0.6386804", "0.6373861", "0.63630366", "0.6341547", "0.63411224", "0.63381755", "0.63182807", "0.63173515", "0.62924206", "0.62774503", "0.6272408", "0.6257944", "0.62225115", "0.61792505", "0.6134508", "0.6107996", "0.6103502", "0.6067807", "0.6031984", "0.59904546", "0.59788424", "0.5975205", "0.5972346", "0.5965502", "0.5940332", "0.59024", "0.5880955", "0.58728576", "0.58717996", "0.5859943", "0.5842941", "0.58417314", "0.58330065", "0.5832867", "0.5830307", "0.5829835", "0.58289933", "0.5827453", "0.58204985", "0.5811049", "0.58072656", "0.57984203", "0.5798305", "0.57887924", "0.5783499", "0.57756734", "0.5768735", "0.5755907", "0.5752315", "0.57463264", "0.57425827", "0.57322276", "0.57298255", "0.572616", "0.5717025", "0.5707395", "0.57005316", "0.57002276", "0.5696272", "0.56936383", "0.5691329", "0.56892735", "0.5688501", "0.56848574", "0.56795865", "0.5679514", "0.5674225", "0.5673605", "0.5648773", "0.5647746", "0.56460726", "0.5645602", "0.5643516", "0.5635332", "0.5626934", "0.56234306", "0.56214553", "0.5616894", "0.5616515", "0.56136346", "0.5604518", "0.55999684", "0.55892545", "0.5586866", "0.55860794", "0.55846065", "0.55825675", "0.55820286", "0.5572534", "0.5564684", "0.55614966" ]
0.66377944
2
Runs validate_manifest and handles any errors that could occur
def check_app_manifest(api_docs_path, overrides, marketplace): if not os.path.exists(api_docs_path): raise exceptions.ValidationError( click.style("Could not find the manifest file at {}.", fg="red").format(api_docs_path)) if os.path.isdir(api_docs_path): raise exceptions.ValidationError( click.style("{} is a directory. Please enter the direct path to the manifest file.", fg="red").format(api_docs_path)) file_size = os.path.getsize(api_docs_path) / 1e6 if file_size > 2: raise exceptions.ValidationError( click.style("The size of the manifest file at {} exceeds the maximum limit of 2MB.", fg="red") .format(api_docs_path)) try: with open(api_docs_path, "r") as f: original_manifest_dict = yaml.load(f.read()) manifest_dict = transform_manifest(original_manifest_dict, overrides, marketplace) # write back the manifest in case some clean up or overriding has happend with open(api_docs_path, "w") as f: yaml.dump(manifest_dict, f) return manifest_dict except (YAMLError, ValueError): raise exceptions.ValidationError( click.style("Your manifest file at {} is not valid YAML.", fg="red") .format(api_docs_path))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_manifest(parser, options):\n if not options.manifest:\n return\n\n template = \"When specifying --manifest, {0} is also required\"\n\n if not options.manifest_id:\n parser.error(template.format(\"--manifest-id\"))\n \n if not options.manifest_service:\n parser.error(template.format(\"--manifest-service\"))\n\n if not options.manifest_version:\n parser.error(template.format(\"--manifest-version\"))", "def validate_manifest(manifest_json):\n manifest_json = copy.deepcopy(manifest_json)\n for field in [\"schemes\", \"host\", \"basePath\", \"info\"]:\n if field not in manifest_json:\n raise exceptions.ValidationError(\n click.style(\"Field '{}' is missing from the manifest file.\", fg=\"red\").format(field),\n json=manifest_json)\n\n for field in [\"contact\", \"title\", \"description\", \"x-21-total-price\", \"x-21-quick-buy\", \"x-21-category\"]:\n if field not in manifest_json[\"info\"]:\n raise exceptions.ValidationError(\n click.style(\n \"Field '{}' is missing from the manifest file under the 'info' section.\",\n fg=\"red\").format(field),\n json=manifest_json)\n\n for field in {\"name\", \"email\"}:\n if field not in manifest_json[\"info\"][\"contact\"]:\n raise exceptions.ValidationError(\n click.style(\n \"Field '{}' is missing from the manifest file under the 'contact' section.\", fg=\"red\")\n .format(field),\n json=manifest_json)\n\n for field in [\"min\", \"max\"]:\n if field not in manifest_json[\"info\"][\"x-21-total-price\"]:\n raise exceptions.ValidationError(\n click.style(\"Field '{}' is missing from the manifest file under the \"\n \"'x-21-total-price' section.\",\n fg=\"red\"),\n json=manifest_json)\n\n if len(manifest_json[\"schemes\"]) == 0:\n raise exceptions.ValidationError(\n click.style(\n \"You have to specify either HTTP or HTTPS for your endpoint under the \"\n \"`schemes` section.\",\n fg=\"red\"),\n json=manifest_json)\n\n valid_app_categories = {'blockchain', 'entertainment', 'social', 'markets', 'utilities', 'iot'}\n if manifest_json[\"info\"][\"x-21-category\"].lower() not in valid_app_categories:\n valid_categories = \", \".join(valid_app_categories)\n raise exceptions.ValidationError(\n click.style(\"'{}' is not a valid category for the 21 marketplace. Valid categories are {}.\",\n fg=\"red\").format(\n manifest_json[\"info\"][\"x-21-category\"], valid_categories),\n json=manifest_json)", "def check_manifest(manifest):\n if not manifest:\n raise Exception('manifest is null')\n\n for key in ['dublin_core', 'checking', 'projects']:\n if key not in manifest:\n raise Exception('manifest missing key \"{0}\"'.format(key))\n\n # check checking\n for key in ['checking_entity', 'checking_level']:\n if key not in manifest['checking']:\n raise Exception('manifest missing checking key \"{0}\"'.format(key))\n\n if not isinstance(manifest['checking']['checking_entity'], list):\n raise Exception('manifest key checking.checking_entity must be an array')\n\n # check projects\n if not isinstance(manifest['projects'], list):\n raise Exception('manifest key projects must be an array')\n\n for key in ['categories', 'identifier', 'path', 'sort', 'title', 'versification']:\n for project in manifest['projects']:\n if key not in project:\n raise Exception('manifest missing project key \"{0}\"'.format(key))\n\n # check dublin_core\n for key in ['conformsto', 'contributor', 'creator', 'description', 'format', 'identifier', 'issued', 'language',\n 'modified', 'publisher', 'relation', 'rights', 'source', 'subject', 'title', 'type', 'version']:\n if key not in manifest['dublin_core']:\n raise Exception('manifest missing dublin_core key \"{0}\"'.format(key))\n\n expectedRCVersion = 'rc0.2'\n if manifest['dublin_core']['conformsto'].lower() != expectedRCVersion:\n raise Exception('unsupported rc version {}. Expected {}'.format(manifest['dublin_core']['conformsto'], expectedRCVersion))\n\n for key in ['direction', 'identifier', 'title']:\n if key not in manifest['dublin_core']['language']:\n raise Exception('manifest missing dublin_core.language key \"{0}\"'.format(key))\n\n if not isinstance(manifest['dublin_core']['source'], list):\n raise Exception('manifest key dublin_core.source must be an array')\n\n for key in ['version', 'identifier', 'language']:\n for source in manifest['dublin_core']['source']:\n if key not in source:\n raise Exception('manifest missing dublin_core.source key \"{0}\"'.format(key))", "def test_invalid_manifest_filepath(self):\n load_manifest(\"./ehiiehaiehnatheita\")", "def validate_manifest(\n request: ValidateManifestRequest = Body(...),\n schema: Any = Depends(get_description_schema),\n) -> ValidateManifestResponse:\n\n _, response = _validate_manifest(request, schema)\n return response", "def validate_input_manifest(self, source, **kwargs):\n return self._validate_manifest(\"input_manifest\", source, **kwargs)", "def test_is_valid_manifest_format_with_many_types_of_errors(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_many_types_of_errors.tsv\",\n )\n error_log = caplog.text\n manifest_with_many_types_of_errors_helper(error_log)\n assert result == False", "def validate_output_manifest(self, source, **kwargs):\n return self._validate_manifest(\"output_manifest\", source, **kwargs)", "def test_theme_manifest(err, xpi_package=None):\n\n # Don't even both with the test(s) if there's no chrome.manifest.\n chrome = err.get_resource('chrome.manifest')\n if not chrome:\n return\n\n for triple in chrome.triples:\n subject = triple['subject']\n # Test to make sure that the triple's subject is valid\n if subject not in ('skin', 'style'):\n err.warning(\n err_id=('themes', 'test_theme_manifest',\n 'invalid_chrome_manifest_subject'),\n warning='Invalid chrome.manifest subject',\n description=('chrome.manifest files for full themes are only '\n \"allowed to have 'skin' and 'style' items. \"\n 'Other types of items are disallowed for '\n 'security reasons.',\n 'Invalid subject: %s' % subject),\n filename=triple['filename'],\n line=triple['line'],\n context=triple['context'])", "def process_manifest(vb, options):\n if not options.manifest:\n return\n\n vb.add_manifest(options.manifest_id, options.manifest_service, options.manifest_version, options.manifest_version_id,\n options.manifest_release_version)", "def load_manifest(filename):\n\n data = manifest.load(filename)\n for field in manifest.validate(data):\n name = field.cfg or ''\n if name and name[-1] != '.':\n name += '>'\n name += field.name\n for msg in field.warnings:\n print('WARNING: {}@{} {}'.format(filename, name, msg))\n for msg in field.errors:\n print('CRITICAL: {}@{} {}'.format(filename, name, msg))\n return data", "def hxlvalidate():\n run_script(hxlvalidate_main)", "def read_manifest(self): # -> None:\n ...", "def supports_manifest(manifest):\n pass", "def test_is_valid_manifest_format_with_no_errors(caplog):\n assert (\n is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_no_errors.tsv\"\n )\n == True\n )\n assert caplog.text == \"\"", "def validate(ctx):\n handler = ValidateCommandHandler(ctx.obj['qa_dir'])\n exit(0 if handler.validate() else 1)", "def test_is_valid_manifest_format_with_invalid_sizes(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_invalid_sizes.tsv\"\n )\n error_log = caplog.text\n assert \"-1\" in error_log\n assert \"not_an_int\" in error_log\n assert \"3.34\" in error_log\n assert \"string_with_42\" in error_log\n assert result == False", "def test_is_valid_manifest_format_using_error_on_empty_url(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_empty_url.tsv\",\n error_on_empty_url=True,\n )\n assert '\"\"' in caplog.text\n assert result == False", "def validate(settings):\n if not settings.filename.endswith(\".xml\"):\n print_error(\"{} must have a .xml extension to interoperate with build tool\".format(settings.filename))\n exit(1)\n \n failed = False\n \n print color(\"Validating: \", color_code(BLUE)), settings.filename\n if platform in [\"linux\", \"linux2\"] and not(settings.skip_permissions):\n stat_info = os.stat(settings.filename)\n gid = stat_info.st_gid\n mode = stat_info.st_mode & 0777\n group = getgrgid(gid)[0]\n if group != \"cs0220ta\":\n print_error(\"Wrong group, you MUST run `chgrp cs0220ta {}'\".format(settings.filename))\n failed = True\n if mode ^ 0660 != 0000:\n print_error(\"Wrong permissions, you MUST run `chmod 660 {}'\".format(settings.filename))\n failed = True\n \n invalid_lt = re.compile(\"<(?!/?(problem|usedin|version|authors?|year|topics?|types?|param|deps?|dependency|dependencies|body|solution|rubric|resource))\")\n invalid_amp = re.compile(r\"&(?!\\w{1,10};)\")\n invalid_char = re.compile(r\"[^\\x00-\\x7f]\")\n \n # Some more manual checking \n with open(settings.filename) as f:\n for num, line in enumerate(f):\n if len(string.rstrip(line)) > 80:\n print_warning(\"Line {} longer than 80 characters (has {})\".format(num+1, len(string.rstrip(line))))\n failed = True\n problem_lt = re.search(invalid_lt, line)\n if problem_lt:\n print_error(\"Invalid < character on line {} at character {}\".format(num+1, problem_lt.start()))\n print color(\"\\tA literal < can be escaped using \\\"&lt;\\\" instead.\", \n color_code(YELLOW, foreground=False) + color_code(BLACK))\n failed = True\n problem_amp = re.search(invalid_amp, line)\n if problem_amp:\n print_error(\"Invalid raw & character on line {} at character {}\".format(num+1, problem_amp.start()))\n print color(\"\\tA literal & can be escaped by using \\\"&amp;\\\" instead.\", \n color_code(YELLOW, foreground=False) + color_code(BLACK))\n failed = True\n problem_char = re.search(invalid_char, line)\n if problem_char:\n print_error(\"Invalid non-ASCII character on line {} at character {}\".format(num+1, problem_char.start()))\n failed = True\n \n try:\n tree = ET.parse(settings.filename)\n except Exception:\n print_error(\"XML in {} could not be parsed.\".format(settings.filename))\n print color(\"\\nPlease rerun validation once XML is fixed\", color_code(CYAN))\n exit(1)\n if tree.getroot().tag == 'assignment':\n print_error(\"This looks like an assignment xml file. Did you mean 22edit validate_doc?\")\n exit(1)\n try:\n problem = Problem(settings.filename)\n problem.parse_tree(tree, False)\n except ImproperXmlException as e:\n print_error(e.args[0])\n print color(\"\\nPlease rerun validation after fixing\", color_code(CYAN))\n exit(1)\n \n firstProblem = True\n for version in problem.get_versions():\n if not version.standalone and not firstProblem:\n continue\n firstProblem = False\n \n print color(\"\\n\\nVERSION {}:\\n\".format(version.vid),\n color_code(BLUE))\n validate_version(version, failed)", "def test_is_valid_manifest_format_with_invalid_md5_values(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_invalid_md5_values.tsv\"\n )\n\n error_log = caplog.text\n manifest_with_invalid_md5_values_helper(error_log)\n base64_encoded_md5 = '\"jd2L5LF5pSmvpfL/rkuYWA==\"'\n assert base64_encoded_md5 in error_log\n assert result == False", "def _cli_validate(self, settings, remaining_argv):\n return None", "def validate():\n if not os.path.exists(os.path.join(ROOT, APP, '__init__.py')):\n message = ansi.error() + ' Python module not found.'\n if os.environ.get('LORE_APP') is None:\n message += ' $LORE_APP is not set. Should it be different than \"%s\"?' % APP\n else:\n message += ' $LORE_APP is set to \"%s\". Should it be different?' % APP\n sys.exit(message)\n\n if exists():\n return\n\n if len(sys.argv) > 1:\n command = sys.argv[1]\n else:\n command = 'lore'\n sys.exit(\n ansi.error() + ' %s is only available in lore '\n 'app directories (missing %s)' % (\n ansi.bold(command),\n ansi.underline(VERSION_PATH)\n )\n )", "def validate_available(parser, options):\n if not options.available:\n return\n\n if not options.manifest_id:\n parser.error(\"When specifying --available, --manifest-id is also required\")", "def test_upload_invalid_manifest(cidc_api, some_file, clean_db, monkeypatch):\n user_id = setup_trial_and_user(cidc_api, monkeypatch)\n make_admin(user_id, cidc_api)\n\n mocks = UploadMocks(monkeypatch)\n\n mocks.iter_errors.return_value = [\"bad, bad error\"]\n\n grant_upload_permission(user_id, \"pbmc\", cidc_api)\n\n client = cidc_api.test_client()\n\n res = client.post(MANIFEST_UPLOAD, data=form_data(\"pbmc.xlsx\", some_file, \"pbmc\"))\n assert res.status_code == 400\n\n assert len(res.json[\"_error\"][\"message\"][\"errors\"]) > 0\n\n # Check that we tried to upload the excel file\n mocks.upload_xlsx.assert_not_called()", "def _bids_validate():\n vadlidator_args = ['--config.error=41']\n exe = os.getenv('VALIDATOR_EXECUTABLE', 'bids-validator')\n\n if platform.system() == 'Windows':\n shell = True\n else:\n shell = False\n\n bids_validator_exe = [exe, *vadlidator_args]\n\n def _validate(bids_root):\n cmd = [*bids_validator_exe, bids_root]\n run_subprocess(cmd, shell=shell)\n\n return _validate", "def test_is_valid_manifest_format_using_line_limit(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_invalid_sizes.tsv\",\n line_limit=3,\n )\n error_log = caplog.text\n assert \"line 2\" in error_log\n assert \"line 3\" in error_log\n assert \"line 4\" not in error_log\n assert \"line 5\" not in error_log\n assert result == False", "def manifest_with_many_types_of_errors_helper(error_log):\n assert '\"invalid_authz\"' in error_log\n assert '\"invalid_int\"' in error_log\n assert '\"invalid_md5\"' in error_log\n assert '\"invalid_url\"' in error_log", "def test_incorrect_dependency(self):\n load_manifest(StringIO(manifest_incorrect_dependency))", "def CheckPrereqs():\n logging.info('entering ...')\n\n if platform.system() != 'Linux' and platform.system() != 'Darwin':\n Die('Sorry, this script assumes Linux or Mac OS X thus far. '\n 'Please feel free to edit the source and fix it to your needs.')\n\n # Ensure source files are available.\n for f in [\n 'validator-main.protoascii', 'validator.proto', 'validator_gen_js.py',\n 'package.json', 'js/engine/validator.js', 'js/engine/validator_test.js',\n 'js/engine/validator-in-browser.js', 'js/engine/tokenize-css.js',\n 'js/engine/definitions.js', 'js/engine/parse-css.js',\n 'js/engine/parse-srcset.js', 'js/engine/parse-url.js'\n ]:\n if not os.path.exists(f):\n Die('%s not found. Must run in amp_validator source directory.' % f)\n\n # Ensure protoc is available.\n try:\n libprotoc_version = subprocess.check_output(['protoc', '--version'])\n except (subprocess.CalledProcessError, OSError):\n Die('Protobuf compiler not found. Try \"apt-get install protobuf-compiler\" '\n 'or follow the install instructions at '\n 'https://github.com/ampproject/amphtml/blob/main/validator/README.md#installation.'\n )\n\n # Ensure 'libprotoc 2.5.0' or newer.\n m = re.search(b'^(\\\\w+) (\\\\d+)\\\\.(\\\\d+)\\\\.(\\\\d+)', libprotoc_version)\n if (m.group(1) != b'libprotoc' or\n (int(m.group(2)), int(m.group(3)), int(m.group(4))) < (2, 5, 0)):\n Die('Expected libprotoc 2.5.0 or newer, saw: %s' % libprotoc_version)\n\n # Ensure that the Python protobuf package is installed.\n for m in ['descriptor', 'text_format', 'json_format']:\n module = 'google.protobuf.%s' % m\n try:\n __import__(module)\n except ImportError:\n # Python3 needs pip3. Python 2 needs pip.\n if sys.version_info < (3, 0):\n Die('%s not found. Try \"pip install protobuf\" or follow the install '\n 'instructions at https://github.com/ampproject/amphtml/blob/main/'\n 'validator/README.md#installation' % module)\n else:\n Die('%s not found. Try \"pip3 install protobuf\" or follow the install '\n 'instructions at https://github.com/ampproject/amphtml/blob/main/'\n 'validator/README.md#installation' % module)\n\n # Ensure JVM installed. TODO: Check for version?\n try:\n subprocess.check_output(['java', '-version'], stderr=subprocess.STDOUT)\n except (subprocess.CalledProcessError, OSError):\n Die('Java missing. Try \"apt-get install openjdk-7-jre\" or follow the'\n 'install instructions at'\n 'https://github.com/ampproject/amphtml/blob/main/validator/README.md#installation'\n )\n logging.info('... done')", "def test_is_valid_manifest_format_with_invalid_urls(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_invalid_urls.tsv\"\n )\n error_log = caplog.text\n assert '\"wrong_protocol://test_bucket/test.txt\"' in error_log\n assert '\"test/test.txt\"' in error_log\n assert '\"testaws/aws/test.txt\"' in error_log\n assert '\"://test_bucket/test.txt\"' in error_log\n assert '\"s3://\"' in error_log\n assert '\"gs://\"' in error_log\n assert '\"s3://bucket_without_object\"' in error_log\n assert '\"s3://bucket_without_object/\"' in error_log\n assert '\"test_bucket/aws/test.txt\"' in error_log\n assert '\"s3:/test_bucket/aws/test.txt\"' in error_log\n assert '\"s3:test_bucket/aws/test.txt\"' in error_log\n assert '\"://test_bucket/aws/test.txt\"' in error_log\n assert '\"s3test_bucket/aws/test.txt\"' in error_log\n assert '\"https://www.uchicago.edu\"' in error_log\n assert '\"https://www.uchicago.edu/about\"' in error_log\n assert '\"google.com/path\"' in error_log\n assert '\"\"\"\"' in error_log\n assert \"\\\"''\\\"\" in error_log\n assert '\"[]\"' in error_log\n assert \"\\\"['']\\\"\" in error_log\n assert '\"[\"\"]\"' in error_log\n assert '\"[\"\", \"\"]\"' in error_log\n assert '\"[\"\", \\'\\']\"' in error_log\n assert result == False", "def validate():", "def validate_document(settings):\n if not settings.filename.endswith(\".xml\"):\n print_error(\"{} must have a .xml extension to interoperate with build tool\".format(settings.filename))\n exit(1)\n \n failed = False\n \n print color(\"Validating: \", color_code(BLUE)), settings.filename\n if platform in [\"linux\", \"linux2\"]: \n stat_info = os.stat(settings.filename)\n gid = stat_info.st_gid\n mode = stat_info.st_mode & 0777\n group = getgrgid(gid)[0]\n if group != \"cs0220ta\":\n print_error(\"Wrong group, you MUST run `chgrp cs0220ta {}'\".format(settings.filename))\n failed = True\n if mode ^ 0660 != 0000:\n print_error(\"Wrong permissions, you MUST run `chmod 660 {}'\".format(settings.filename))\n failed = True\n \n invalid_lt = re.compile(\"<(?!/?(assignment|problem|year|title|name|blurb|due))\")\n invalid_amp = re.compile(r\"&(?!\\w{1,10};)\")\n invalid_char = re.compile(r\"[^\\x00-\\x7f]\")\n \n # Some more manual checking \n with open(settings.filename) as f:\n for num, line in enumerate(f):\n problem_lt = re.search(invalid_lt, line)\n if problem_lt:\n print_error(\"Invalid < character on line {} at character {}\".format(num+1, problem_lt.start()))\n print color(\"\\tMake sure the tags you are using are correct.\", \n color_code(YELLOW, foreground=False) + color_code(BLACK))\n failed = True\n problem_amp = re.search(invalid_amp, line)\n if problem_amp:\n print_error(\"Invalid raw & character on line {} at character {}\".format(num+1, problem_amp.start()))\n print color(\"\\tA literal & can be escaped by using \\\"&amp;\\\" instead.\", \n color_code(YELLOW, foreground=False) + color_code(BLACK))\n failed = True\n problem_char = re.search(invalid_char, line)\n if problem_char:\n print_error(\"Invalid non-ASCII character on line {} at character {}\".format(num+1, problem_char.start()))\n failed = True\n \n try:\n tree = ET.parse(settings.filename)\n except Exception:\n print_error(\"XML in {} could not be parsed at all.\".format(settings.filename))\n print color(\"\\tAre you sure all tags are closed?\", color_code(YELLOW))\n print color(\"\\nPlease rerun validation once XML is fixed\", color_code(CYAN))\n exit(1)\n try:\n document = Document(settings.filename)\n document.parse_tree(tree)\n document.validate()\n except ImproperXmlException as e:\n print_error(e.args[0])\n print color(\"\\nPlease rerun validation after fixing\", color_code(CYAN))\n exit(1)\n \n for i, version in enumerate(document.versions):\n print color(\"\\n\\nProblem {}: {}\\n\".format(i+1, version.filename),\n color_code(BLUE))\n validate_version(version, failed)", "def test_is_valid_manifest_with_missing_url_column_and_error_on_empty_url(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_missing_url_column.tsv\",\n error_on_empty_url=True,\n )\n missing_size_message = (\n 'could not find a column name corresponding to required \"Columns.URL\"'\n )\n assert missing_size_message in caplog.text\n assert result == False", "def validate():\n description = f\"Validate XML metadata.\"\n parser = argparse.ArgumentParser(\n description=description,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n\n help = \"XML file or URL\"\n parser.add_argument('infile', help=help)\n\n help = (\n \"Format ID for metadata standard. If this argument is supplied, \"\n \"only that format ID will be checked. If not, all format IDs will be \"\n \"checked.\"\n )\n parser.add_argument('--format-id',\n help=help,\n choices=d1_scimeta.util.get_supported_format_id_list())\n\n help = \"Verbosity of log messages.\"\n choices = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']\n parser.add_argument('-v', '--verbosity', help=help, choices=choices,\n default='INFO')\n\n args = parser.parse_args()\n\n validator = XMLValidator(verbosity=args.verbosity)\n validator.validate(args.infile, format_id=args.format_id)", "def require_manifest(func):\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n # Assume the manifest is in the current directory\n try:\n # If we are in a repository, we want to look in\n # the root of that repository for the manifest\n current_repo = vcs_git.RepoTool(Path.cwd(), search_parent=True)\n root_path = current_repo.get_root_path()\n except vcs_git.InvalidRepository:\n # Since we are not in a repository we will look\n # for the manifest in the current directory\n root_path = Path.cwd()\n\n manifest_path = root_path / manifest.MANIFEST_NAME\n\n try:\n loaded_manifest = manifest.load_manifest(manifest_path)\n return func(loaded_manifest, root_path, *args, **kwargs)\n except manifest.NotFound:\n ui.error(f\"Unable to load manifest: Not found: {str(manifest_path)}\")\n sys.exit(1)\n except manifest.ValidationFailed as exc:\n ui.error(f\"Unable to load manifest: Validation failed\")\n ui.error(str(exc))\n sys.exit(1)\n\n return wrapper", "def test_edit_manifest(self):\n \n manifest = copy.deepcopy(self.manifest)\n manifest['job']['interface']['command'] = ''\n \n json_data = {\n 'manifest': manifest,\n 'auto_update': False\n }\n\n url = '/%s/job-types/%s/%s/' % (self.api, self.job_type.name, self.job_type.version)\n response = self.client.generic('PATCH', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n \n results = json.loads(response.content)\n self.assertTrue(results['is_valid'])\n self.assertDictEqual(results, {u'errors': [], u'is_valid': True, u'warnings': []})\n \n # mismatch name\n manifest = copy.deepcopy(self.manifest)\n manifest['job']['name'] = 'new-name'\n json_data = {\n 'manifest': manifest,\n }\n url = '/%s/job-types/%s/%s/' % (self.api, self.job_type.name, self.job_type.version)\n response = self.client.generic('PATCH', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)\n \n # mismatch version\n manifest = copy.deepcopy(self.manifest)\n manifest['job']['jobVersion'] = '1.2.3'\n json_data = {\n 'manifest': manifest,\n }\n url = '/%s/job-types/%s/%s/' % (self.api, self.job_type.name, self.job_type.version)\n response = self.client.generic('PATCH', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)", "def test_sa_invalid_manifest_file(self):\n with open(str(Path(__file__).parent.parent.parent) +\n '/data/manifests/400/npmlist.json', 'rb') as fp:\n fs = FileStorage(stream=fp, filename='npmlist.json')\n sa_post_request = StackAnalysesPostRequest(manifest=fs, file_path='/tmp/bin',\n ecosystem='npm', show_transitive=True)\n sa = StackAnalyses(sa_post_request)\n with pytest.raises(Exception) as exception:\n sa.post_request()\n self.assertIs(exception.type, SAInvalidInputException)", "def _validate(self):\n if not self._contents.has_key('type'):\n raise ValidationFailed(\"Metadata file %s contains no type field\" % (self._filename))\n \n if not self._contents.has_key('version'):\n raise ValidationFailed(\"Metadata file %s contains no version field\" %\n (self._filename))", "def test_valid_and_empty_manifest(self):\n collector = PypiCollector()\n collector.parse_and_collect(MANIFEST_START + DEP_1, True)\n collector.parse_and_collect(None, True)\n packages = dict(collector.counter.most_common())\n assert packages == {\n 'daiquiri': 1\n }", "def main(args):\n p = OptionParser()\n p.add_option('-d', '--debug',\n action='store_true', default=False, dest='debug',\n help='debug')\n p.add_option('-w', '--w3c',\n action='store_true', default=False, dest='w3c',\n help='send file to validator.w3.org')\n p.add_option('-r', '--rm',\n action='store_true', default=False, dest='passrm',\n help='rm validation output on pass')\n p.add_option('-v', '--verbose',\n action='store_true', default=False, dest='verbose',\n help='more output')\n (o, a) = p.parse_args(args)\n \n if o.debug: pdb.set_trace()\n\n verbose(o.verbose)\n \n if 1 < len(a):\n flist = a[1:]\n else:\n flist = glob.glob(\"*.html\")\n\n for filename in flist:\n if verbose(): print filename\n if o.w3c:\n w3c_validate(filename)\n else:\n check_file(filename)\n\n sys.exit(exit_value())", "def _validate_manifest(self, kind, source, cls=None, **kwargs):\n data = self._load_json(kind, source, **kwargs)\n\n # TODO elegant way of cleaning up this nasty serialisation hack to manage conversion of outbound manifests to primitive\n inbound = True\n if hasattr(data, \"to_primitive\"):\n inbound = False\n data = data.to_primitive()\n\n self._validate_against_schema(kind, data)\n self._validate_all_expected_datasets_are_present_in_manifest(manifest_kind=kind, manifest=data)\n\n if cls and inbound:\n return cls(**data)\n\n return data", "def _validate(self):\n All = voluptuous.All\n Required = voluptuous.Required\n Length = voluptuous.Length\n Extra = voluptuous.Extra\n\n schema = voluptuous.Schema({\n Required('description'): voluptuous.All(str, Length(min=5)),\n Required('environments'): dict,\n Required('application'): {\n Required('name'): str,\n Required('scenario'): [{\n Required('driver'): str,\n Required('description'): All(str, Length(min=5)),\n Extra: object}]}})\n try:\n schema(self.marmite_tree)\n except voluptuous.MultipleInvalid as e:\n LOG.error(\"Failed to validate %s/marmite.yaml structure: %s\" %\n (self.fs_layer.base_dir, e))\n raise InvalidStructure()", "async def validate(self):\n pass", "def metadata_validate(self):\n # Set path to `service_schema` stored in the `resources` directory from cwd of `mpe_service.py`\n current_path = Path(__file__).parent\n relative_path = '../../snet/snet_cli/resources/service_schema'\n path_to_schema = (current_path / relative_path).resolve()\n with open(path_to_schema, 'r') as f:\n schema = json.load(f)\n metadata = load_mpe_service_metadata(self.args.metadata_file)\n try:\n validate(instance=metadata.m, schema=schema)\n except Exception as e:\n docs = \"http://snet-cli-docs.singularitynet.io/service.html\"\n error_message = f\"\\nVisit {docs} for more information.\"\n if e.validator == 'required':\n raise ValidationError(e.message + error_message)\n elif e.validator == 'minLength':\n raise ValidationError(f\"`{e.path[-1]}` -> cannot be empty.\" + error_message)\n elif e.validator == 'minItems':\n raise ValidationError(f\"`{e.path[-1]}` -> minimum 1 item required.\" + error_message)\n elif e.validator == 'type':\n raise ValidationError(f\"`{e.path[-1]}` -> {e.message}\" + error_message)\n elif e.validator == 'enum':\n raise ValidationError(f\"`{e.path[-1]}` -> {e.message}\" + error_message)\n elif e.validator == 'additionalProperties':\n if len(e.path) != 0:\n raise ValidationError(f\"{e.message} in `{e.path[-2]}`.\" + error_message)\n else:\n raise ValidationError(f\"{e.message} in main object.\" + error_message)\n else:\n exit(\"OK. Ready to publish.\")", "def test_is_valid_manifest_format_with_invalid_authz_resources(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_invalid_authz_resources.tsv\",\n )\n error_log = caplog.text\n assert '\"invalid_authz\"' in error_log\n assert '\"/\"' in error_log\n assert '\"//\"' in error_log\n assert '\"///\"' in error_log\n assert '\"invalid_authz2\"' in error_log\n assert result == False", "def app_validate(data):\n\n schema = json.load(open('schemas/app_description_schema.json', 'r'))\n try:\n jsonschema.validate(data, schema)\n except jsonschema.ValidationError as e:\n raise InvalidApplicationDescription(str(e))\n except jsonschema.SchemaError:\n log.exception('BUG: invalid schema for application descriptions')\n raise ZoeLibException('BUG: invalid schema for application descriptions')\n\n # Start non-schema, semantic checks\n if data['version'] != zoe_lib.version.ZOE_APPLICATION_FORMAT_VERSION:\n raise InvalidApplicationDescription('Application description version mismatch (expected: {}, found: {}'.format(zoe_lib.version.ZOE_APPLICATION_FORMAT_VERSION, data['version']))\n\n found_monitor = False\n for service in data['services']:\n if service['monitor']:\n found_monitor = True\n\n service['resources']['memory']['max'] = zoe_lib.config.get_conf().max_memory_limit * (1024 ** 3)\n if service['resources']['memory']['min'] is not None and service['resources']['memory']['min'] > service['resources']['memory']['max']:\n raise InvalidApplicationDescription(msg='service {} tries to reserve more memory than the administrative limit'.format(service['name']))\n\n if service['resources']['cores']['min'] is None:\n service['resources']['cores']['min'] = 0.1\n\n if not found_monitor:\n raise InvalidApplicationDescription(msg=\"at least one process should have the monitor property set to true\")", "def check_errors(self) -> None:", "def update_manifest(builder):\r\n\r\n manifest_path = join(builder.Config.SourceRootPath, builder.Config.WMAppManifest)\r\n dom = parse(manifest_path)\r\n\r\n #import pdb;pdb.set_trace()\r\n #version = make_version_string(builder)\r\n version = builder.AppVersion\r\n\r\n update_manifest_with_values(dom,\r\n Title = builder.CustomCfg.Title,\r\n #ProductID = builder.CustomCfg.ProductID,\r\n #PublisherID = builder.Config.PublisherID,\r\n Version = version,\r\n Languages = getattr(builder.CustomCfg, \"Languages\", None ) )\r\n\r\n with open(manifest_path, 'wb') as f:\r\n data = dom.toprettyxml(indent = \" \")\r\n # toprettyxml adds extra new lines\r\n lines = [ x for x in data.split(\"\\n\") if len(x.strip()) > 0]\r\n data = \"\\n\".join(lines)\r\n f.write(data)\r\n\r\n return True", "def test_manifest(self):\n self.parse_manifest()\n\n ids = {}\n errors = []\n collisions = []\n manifest = self.cryptomattes[self.selection][\"names_to_IDs\"]\n for name, idvalue in manifest.iteritems():\n if mm3hash_float(name) != idvalue:\n errors.append(\"computed ID doesn't match manifest ID: (%s, %s)\" % (idvalue, mm3hash_float(name)))\n else:\n if idvalue in ids:\n collisions.append(\"colliding: %s %s\" % (ids[idvalue], name))\n ids[idvalue] = name\n\n print \"Tested %s, %s names\" % (self.nuke_node.name(), len(manifest))\n print \" \", len(errors), \"non-matching IDs between python and c++.\"\n print \" \", len(collisions), \"hash collisions in manifest.\"\n\n return errors, collisions", "def split_manifest(root_path, manifest_file_path):\n\n train_manifest = open(os.path.join(root_path,\"dataset\", \"train_manifest.txt\"), \"w+\")\n test_manifest = open(os.path.join(root_path, \"dataset\",\"test_manifest.txt\"), \"w+\")\n val_manifest = open(os.path.join(root_path,\"dataset\" ,\"valid_manifest.txt\"), \"w+\")\n with open(os.path.join(root_path, manifest_file_path), 'r') as f:\n data_manifest = f.read().strip().split('\\n')\n data_len = len(data_manifest)\n k = 0\n for i in data_manifest:\n if k == 0:\n k = k+1\n continue\n elif k == 1:\n train_manifest.write(i+'\\n')\n test_manifest.write(i+'\\n')\n val_manifest.write(i+'\\n')\n elif k <= data_len*0.6: # 60% on train set\n train_manifest.write(i+'\\n')\n elif k > data_len*0.6 and k <= data_len*0.8: # 20 % on test\n test_manifest.write(i+'\\n')\n else: #20 % on test\n val_manifest.write(i+'\\n')\n k = k+1\n print(\"Spliting attritutes Done!\")", "def validate(self, module, config):\n from clarity_ext.extensions import ExtensionService\n extension_svc = ExtensionService(lambda _: None)\n config_obj = ConfigFromConventionProvider.get_extension_config(module)\n exception_count = 0\n\n for entry in config_obj:\n module = entry[\"module\"]\n try:\n extension_svc.run_test(config, None, module, False, True, True)\n print(\"- {}: SUCCESS\".format(module))\n except NoTestsFoundException:\n print(\"- {}: WARNING - No tests were found\".format(module))\n except Exception as e:\n # It's OK to use a catch-all exception handler here since this is only used while\n # running tests, so we want to be optimistic and try to run all tests:\n print(\"- {}: ERROR - {}\".format(module, e))\n print(\" Fresh run: clarity-ext extension {} test-fresh\".format(module))\n print(\" Review, then: clarity-ext extension {} freeze\".format(module))\n exception_count += 1\n\n return exception_count", "def state_failsafe_validate(cfg, app, win, events):", "def test_is_valid_manifest_with_missing_url_column(caplog):\n logging.getLogger().setLevel(logging.WARNING)\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_missing_url_column.tsv\",\n )\n missing_size_message = (\n 'could not find a column name corresponding to required \"Columns.URL\"'\n )\n assert missing_size_message in caplog.text\n assert result == True", "def __validate():\n # TODO: implement", "def test_is_valid_manifest_format_with_empty_url(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_empty_url.tsv\",\n )\n assert caplog.text == \"\"\n assert result == True", "def _ProcessManifest(manifest_path):\n doc = minidom.parse(manifest_path)\n manifests = doc.getElementsByTagName('manifest')\n assert len(manifests) == 1\n manifest = manifests[0]\n package = manifest.getAttribute('package')\n\n manifest.setAttribute('xmlns:%s' % _TOOLS_NAMESPACE_PREFIX, _TOOLS_NAMESPACE)\n\n tmp_prefix = os.path.basename(manifest_path)\n with tempfile.NamedTemporaryFile(prefix=tmp_prefix) as patched_manifest:\n doc.writexml(patched_manifest)\n patched_manifest.flush()\n yield patched_manifest.name, package", "def validate(self):\n Component.validate(self)\n kinds = (\"lib\", \"exe\")\n if self.kind not in kinds:\n raise Invalid(\"kind must be one of %s for component %s\" % (kinds,self.name))\n\n if self.kind == \"exe\" :\n if not self.exe_path:\n raise Invalid(\"exe_path must be defined for component %s\" % self.name)", "def test_is_valid_manifest_with_missing_size_column(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_missing_size_column.tsv\",\n )\n missing_size_message = (\n 'could not find a column name corresponding to required \"Columns.SIZE\"'\n )\n assert missing_size_message in caplog.text\n assert result == False", "def load_app_manifests(self):\n self.app_manifests = []\n apps_lib_path = os.path.join(self.apps_dir_path, \"lib\")\n for app_dir in os.listdir(apps_lib_path):\n if app_dir not in (\"__init__.py\", \"__init__.pyc\"):\n if app_dir.find(\"_v\") > 1:\n app_name = app_dir[:app_dir.find(\"_v\")]\n self.app_manifests.append(json.load(file(os.path.join(self.apps_dir_path, 'lib', app_dir, \"manifest.json\"))))\n log.info(\"Manifest for %s app was loaded\" % (app_dir))\n else:\n log.info(\"Directory %s will be skipped from app loader . Doesn't match naming convention .\" % app_dir)", "def validate_configuration_manifest(self, source, **kwargs):\n return self._validate_manifest(\"configuration_manifest\", source, **kwargs)", "def validate(self):\n print(\"Validating \")\n sha256_test = _get_file_sha256_hash(self.file_path)\n sha256_truth = self.metadata_pkg[\"hash\"]\n if sha256_test != sha256_truth:\n raise ValueError(\n f\"Hash of modelpkg file {os.path.basename(self.file_path)} ({sha256_test}) does not match truth hash ({sha256_truth}).\")", "def test_sa_mismatch_manifest_file_and_ecosystem(self):\n with open(str(Path(__file__).parent.parent.parent) +\n '/data/manifests/202/npmlist.json', 'rb') as fp:\n fs = FileStorage(stream=fp, filename='npmlist.json')\n with pytest.raises(Exception) as exception:\n sa_post_request = StackAnalysesPostRequest(manifest=fs, file_path='/tmp/bin',\n ecosystem='pypi', show_transitive=True)\n sa = StackAnalyses(sa_post_request)\n sa.post_request()\n self.assertIs(exception.type, ValidationError)", "def parse_manifest(manifest_path):\n with open(manifest_path, 'r') as f:\n data = f.read()\n if data:\n return json.loads(data)\n else:\n return {}", "def DoCheckManifestTest(self, fail_mismatched_block_size, fail_bad_sigs,\n fail_mismatched_oki_ori, fail_bad_oki, fail_bad_ori,\n fail_bad_nki, fail_bad_nri, fail_old_kernel_fs_size,\n fail_old_rootfs_fs_size, fail_new_kernel_fs_size,\n fail_new_rootfs_fs_size):\n # Generate a test payload. For this test, we only care about the manifest\n # and don't need any data blobs, hence we can use a plain paylaod generator\n # (which also gives us more control on things that can be screwed up).\n payload_gen = test_utils.PayloadGenerator()\n\n # Tamper with block size, if required.\n if fail_mismatched_block_size:\n payload_gen.SetBlockSize(test_utils.KiB(1))\n else:\n payload_gen.SetBlockSize(test_utils.KiB(4))\n\n # Add some operations.\n payload_gen.AddOperation(False, common.OpType.MOVE,\n src_extents=[(0, 16), (16, 497)],\n dst_extents=[(16, 496), (0, 16)])\n payload_gen.AddOperation(True, common.OpType.MOVE,\n src_extents=[(0, 8), (8, 8)],\n dst_extents=[(8, 8), (0, 8)])\n\n # Set an invalid signatures block (offset but no size), if required.\n if fail_bad_sigs:\n payload_gen.SetSignatures(32, None)\n\n # Set partition / filesystem sizes.\n rootfs_part_size = test_utils.MiB(8)\n kernel_part_size = test_utils.KiB(512)\n old_rootfs_fs_size = new_rootfs_fs_size = rootfs_part_size\n old_kernel_fs_size = new_kernel_fs_size = kernel_part_size\n if fail_old_kernel_fs_size:\n old_kernel_fs_size += 100\n if fail_old_rootfs_fs_size:\n old_rootfs_fs_size += 100\n if fail_new_kernel_fs_size:\n new_kernel_fs_size += 100\n if fail_new_rootfs_fs_size:\n new_rootfs_fs_size += 100\n\n # Add old kernel/rootfs partition info, as required.\n if fail_mismatched_oki_ori or fail_old_kernel_fs_size or fail_bad_oki:\n oki_hash = (None if fail_bad_oki\n else hashlib.sha256('fake-oki-content').digest())\n payload_gen.SetPartInfo(True, False, old_kernel_fs_size, oki_hash)\n if not fail_mismatched_oki_ori and (fail_old_rootfs_fs_size or\n fail_bad_ori):\n ori_hash = (None if fail_bad_ori\n else hashlib.sha256('fake-ori-content').digest())\n payload_gen.SetPartInfo(False, False, old_rootfs_fs_size, ori_hash)\n\n # Add new kernel/rootfs partition info.\n payload_gen.SetPartInfo(\n True, True, new_kernel_fs_size,\n None if fail_bad_nki else hashlib.sha256('fake-nki-content').digest())\n payload_gen.SetPartInfo(\n False, True, new_rootfs_fs_size,\n None if fail_bad_nri else hashlib.sha256('fake-nri-content').digest())\n\n # Set the minor version.\n payload_gen.SetMinorVersion(0)\n\n # Create the test object.\n payload_checker = _GetPayloadChecker(payload_gen.WriteToFile)\n report = checker._PayloadReport()\n\n should_fail = (fail_mismatched_block_size or fail_bad_sigs or\n fail_mismatched_oki_ori or fail_bad_oki or fail_bad_ori or\n fail_bad_nki or fail_bad_nri or fail_old_kernel_fs_size or\n fail_old_rootfs_fs_size or fail_new_kernel_fs_size or\n fail_new_rootfs_fs_size)\n part_sizes = {\n common.ROOTFS: rootfs_part_size,\n common.KERNEL: kernel_part_size\n }\n\n if should_fail:\n self.assertRaises(PayloadError, payload_checker._CheckManifest, report,\n part_sizes)\n else:\n self.assertIsNone(payload_checker._CheckManifest(report, part_sizes))", "def test_is_valid_manifest_format_allowing_base64_encoded_md5(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_invalid_md5_values.tsv\",\n allow_base64_encoded_md5=True,\n )\n\n error_log = caplog.text\n manifest_with_invalid_md5_values_helper(error_log)\n base64_encoded_md5 = '\"jd2L5LF5pSmvpfL/rkuYWA==\"'\n assert base64_encoded_md5 not in error_log\n assert result == False", "def validateProcess(process):\n \n schedule=process.schedule_()\n paths=process.paths_()\n endpaths=process.endpaths_()\n \n # check output mods are in paths and have appropriate settings\n for outputModName in process.outputModules_().keys():\n outputMod = getattr(process, outputModName)\n if not hasattr(outputMod, 'dataset'):\n msg = \"Process contains output module without dataset PSET: %s \\n\" % outputModName\n msg += \" You need to add this PSET to this module to set dataTier and filterName\\n\"\n raise RuntimeError(msg)\n ds=getattr(outputMod,'dataset')\n if not hasattr(ds, \"dataTier\"):\n msg = \"Process contains output module without dataTier parameter: %s \\n\" % outputModName\n msg += \" You need to add an untracked parameter to the dataset PSET of this module to set dataTier\\n\"\n raise RuntimeError(msg)\n\n # check module in path or whatever (not sure of exact syntax for endpath)\n omRun=False\n\n if schedule==None:\n for path in paths:\n if outputModName in getattr(process,path).moduleNames():\n omRun=True\n for path in endpaths:\n if outputModName in getattr(process,path).moduleNames():\n omRun=True\n else:\n for path in schedule:\n if outputModName in path.moduleNames():\n omRun=True\n if omRun==False:\n msg = \"Output Module %s not in endPath\" % outputModName\n raise RuntimeError(msg)", "def main(manifest_path, dest_path):\n print(\"Reading %s...\" % (manifest_path,))\n os.makedirs(dest_path, exist_ok=True)\n with open(manifest_path) as f:\n manifest = strictyaml.load(f.read(), SCHEMA_MANIFEST, label=manifest_path)\n print(\"Total of %i lsrules to generate.\" % (len(manifest),))\n for src in manifest:\n data = src.data\n dest_file = os.path.join(dest_path, \"%s.lsrules\" % (data[\"src\"],))\n sys.stdout.write(\"Generating %s -> %s... \" % (data[\"src\"], dest_file))\n sys.stdout.flush()\n out = generate(data[\"src\"], data[\"name\"], data[\"description\"])\n with open(dest_file, \"wb\") as f:\n f.write(out.encode(\"utf-8\"))\n print(\"Done.\")", "def readManifestFile(syn, manifestFile):\n table.test_import_pandas()\n import pandas as pd\n\n sys.stdout.write('Validation and upload of: %s\\n' % manifestFile)\n # Read manifest file into pandas dataframe\n df = pd.read_csv(manifestFile, sep='\\t')\n if 'synapseStore' not in df:\n df = df.assign(synapseStore=None)\n df.synapseStore[df['path'].apply(is_url)] = False # override synapseStore values to False when path is a url\n df.synapseStore[df['synapseStore'].isnull()] = True # remaining unset values default to True\n df.synapseStore = df.synapseStore.astype(bool)\n df = df.fillna('')\n\n sys.stdout.write('Validating columns of manifest...')\n for field in REQUIRED_FIELDS:\n sys.stdout.write('.')\n if field not in df.columns:\n sys.stdout.write('\\n')\n raise ValueError(\"Manifest must contain a column of %s\" % field)\n sys.stdout.write('OK\\n')\n\n sys.stdout.write('Validating that all paths exist')\n df.path = df.path.apply(_check_path_and_normalize)\n\n sys.stdout.write('OK\\n')\n\n sys.stdout.write('Validating that all files are unique...')\n if len(df.path) != len(set(df.path)):\n raise ValueError(\"All rows in manifest must contain a unique file to upload\")\n sys.stdout.write('OK\\n')\n\n sys.stdout.write('Validating provenance...')\n df = _sortAndFixProvenance(syn, df)\n sys.stdout.write('OK\\n')\n\n sys.stdout.write('Validating that parents exist and are containers...')\n parents = set(df.parent)\n for synId in parents:\n try:\n container = syn.get(synId, downloadFile=False)\n except SynapseHTTPError:\n sys.stdout.write('\\n%s in the parent column is not a valid Synapse Id\\n' % synId)\n raise\n if not is_container(container):\n sys.stdout.write('\\n%s in the parent column is is not a Folder or Project\\n' % synId)\n raise SynapseHTTPError\n sys.stdout.write('OK\\n')\n return df", "def test_upload_manifest_on_validation_multierror(\n cidc_api, some_file, clean_db, monkeypatch\n):\n user_id = setup_trial_and_user(cidc_api, monkeypatch)\n make_admin(user_id, cidc_api)\n\n UploadMocks(monkeypatch)\n\n client = cidc_api.test_client()\n\n patch_manifest = MagicMock()\n patch_manifest.side_effect = ValidationMultiError([\"one error\", \"another error\"])\n monkeypatch.setattr(\n \"cidc_api.resources.upload_jobs.TrialMetadata.patch_manifest\",\n staticmethod(patch_manifest),\n )\n\n res = client.post(MANIFEST_UPLOAD, data=form_data(\"pbmc.xlsx\", some_file, \"pbmc\"))\n assert res.status_code == 400\n assert res.json[\"_error\"][\"message\"] == {\"errors\": [\"one error\", \"another error\"]}", "def validate(entry: _LexiconEntry) -> None:\n _entry_has_required_fields(entry)\n _entry_field_values_are_not_empty(entry)\n _entry_field_values_does_not_contain_infix_whitespace(entry)\n _entry_tag_is_valid(entry)\n _entry_compound_annotation_is_valid(entry)\n _entry_morphophonemics_annotation_is_valid(entry)\n _entry_features_annotation_is_valid(entry)\n _entry_has_required_features(entry)\n _entry_required_features_are_valid(entry)\n _entry_optional_features_are_valid(entry)\n _entry_features_are_not_redundant(entry)", "def _validate(self):\n pass", "def validate(self, spec):\n d = spec.directory\n for file_name in os.listdir(d):\n if file_name.endswith(\".icon\"):\n if \" \" in file_name:\n raise ValidationException(f\"The .icon file name was '{file_name}'.\\n \"\n \".icon file may not contain spaces use a '_' instead.\")", "def Validate(self, relative_file, contents):\n pass", "def _perform_validate(syn, args):\n\n # Check parentid argparse\n _check_parentid_permission_container(syn=syn, parentid=args.parentid)\n genie_config = extract.get_genie_config(syn=syn, project_id=args.project_id)\n # HACK: Modify oncotree link config\n # TODO: Remove oncotree_link parameter from this function\n genie_config[\"oncotreeLink\"] = extract._get_oncotreelink(\n syn=syn, genie_config=genie_config, oncotree_link=args.oncotree_link\n )\n # Check center argparse\n _check_center_input(args.center, list(genie_config[\"center_config\"].keys()))\n\n format_registry = config.collect_format_types(args.format_registry_packages)\n logger.debug(f\"Using {format_registry} file formats.\")\n entity_list = [\n synapseclient.File(name=filepath, path=filepath, parentId=None)\n for filepath in args.filepath\n ]\n\n validator = GenieValidationHelper(\n syn=syn,\n project_id=args.project_id,\n center=args.center,\n entitylist=entity_list,\n format_registry=format_registry,\n file_type=args.filetype,\n genie_config=genie_config,\n )\n mykwargs = dict(\n nosymbol_check=args.nosymbol_check,\n project_id=args.project_id,\n )\n valid, message = validator.validate_single_file(**mykwargs)\n\n # Upload to synapse if parentid is specified and valid\n if valid and args.parentid is not None:\n logger.info(f\"Uploading files to {args.parentid}\")\n load.store_files(syn=syn, filepaths=args.filepath, parentid=args.parentid)", "def test_is_valid_manifest_with_missing_md5_column(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_missing_md5_column.tsv\",\n )\n missing_md5_message = (\n 'could not find a column name corresponding to required \"Columns.MD5\"'\n )\n assert missing_md5_message in caplog.text\n assert result == False", "def test_index_manifest_packages_failure(data, gen3_index, gen3_auth, logfile):\n with patch(\n \"gen3.tools.indexing.index_manifest.Gen3Metadata.create\", MagicMock()\n ) as mock_mds_create:\n index_object_manifest(\n manifest_file=f\"{CURRENT_DIR}/test_data/{data['manifest']}\",\n auth=gen3_auth,\n commons_url=gen3_index.client.url,\n thread_num=1,\n replace_urls=False,\n submit_additional_metadata_columns=True,\n )\n mds_records = {\n kwargs[\"guid\"]: kwargs[\"metadata\"]\n for (_, kwargs) in mock_mds_create.call_args_list\n }\n assert len(mds_records) == 0\n\n indexd_records = {r[\"did\"]: r for r in gen3_index.get_all_records()}\n assert len(indexd_records) == 0\n\n for error in data[\"expected_error_msgs\"]:\n assert error in logfile.read()", "def data_validation(self):\n print \"Starting basic data validation ...\"\n allattr = dir(bdefile)\n idx = [ii for ii, attr in enumerate(allattr) if \"validate_oee_error_\" in attr]\n vfunclist = []\n for ii in idx:\n vfunclist += [allattr[ii]]\n\n errorcodes = []\n for vfunc in vfunclist:\n errorcodes += [int(vfunc.split('_')[3])]\n\n errorcodes.sort()\n\n for code in errorcodes:\n sys.stdout.write(\"Checking validation rule %d ... \" % code)\n success, lines = (eval('self.validate_oee_error_'+str(code)))()\n if success:\n print \"PASSED\"\n else:\n self.report_error(code, lines)\n return False\n \n print \"Basic data validation succeeded.\\n\"\n return True", "def test_validate_valid_resume(self):\n # DEV: `validate` will raise an exception if it could not validate\n self.assertIsNone(resumeschema.validate(self.valid_resume))", "def test_upload_manifest(cidc_api, clean_db, monkeypatch, caplog):\n user_id = setup_trial_and_user(cidc_api, monkeypatch)\n mocks = UploadMocks(\n monkeypatch,\n prismify_extra=PBMC_PATCH,\n )\n\n client = cidc_api.test_client()\n\n # NCI users can upload manifests without explicit permission\n make_nci_biobank_user(user_id, cidc_api)\n with caplog.at_level(logging.DEBUG):\n res = client.post(\n MANIFEST_UPLOAD,\n data=form_data(\n \"pbmc.xlsx\",\n io.BytesIO(b\"a\"),\n \"pbmc\",\n ),\n )\n assert res.status_code == 200\n\n # Check that upload alert email was \"sent\"\n assert \"Would send email with subject '[UPLOAD SUCCESS]\" in caplog.text\n\n # Check that we tried to publish a patient/sample update\n mocks.publish_patient_sample_update.assert_called_once()\n\n # Check that we tried to upload the excel file\n mocks.make_all_assertions()", "def submit_manifest(\n request: ValidateManifestRequest = Body(...),\n schema: Any = Depends(get_description_schema),\n) -> ManifestSubmitResponse:\n manifest, validation = _validate_manifest(request, schema)\n if not manifest or validation.status == ResponseStatus.FAIL:\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST, detail=validation.details\n )\n\n try:\n with get_repository(get_client_id(), DataCollection.MANIFEST) as repository:\n manifest_hash = manifest.crypto_hash().to_hex()\n _ = repository.set(\n {\"manifest_hash\": manifest_hash, \"manifest\": manifest.to_json_object()}\n )\n return ManifestSubmitResponse(manifest_hash=manifest_hash)\n except Exception as error:\n print(sys.exc_info())\n raise HTTPException(\n status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,\n detail=\"Submit manifest failed\",\n ) from error", "def __check_errors(self):\n if not(\"input\" in self.passedArgs or \"source\" in self.passedArgs):\n raise ArgError(\"Program did not receive any of mandatory arguments! (--source=file, --input=file)\")", "def checkAttributes(self):\n if len(self.lSteps) == 0:\n msg = \"ERROR: missing compulsory option --step\"\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if len(self.lSteps) > 1:\n msg = \"ERROR: --step takes a single step\"\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if self.lSteps[0] not in [\"1\",\"2\",\"3\",\"4\",\"5\",\"6\",\"7\",\"8\",\"9\"]:\n msg = \"ERROR: unknown --step %s\" % self.lSteps[0]\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if \"1\" in self.lSteps or \"2\" in self.lSteps or \"3\" in self.lSteps \\\n or \"4\" in self.lSteps:\n if not self.project1Id:\n msg = \"ERROR: missing compulsory option --proj1\"\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if \"4\" in self.lSteps or \"5\" in self.lSteps or \"6\" in self.lSteps \\\n or \"7\" in self.lSteps or \"8\" in self.lSteps:\n if not self.project2Id:\n msg = \"ERROR: missing compulsory option --proj2\"\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if self.project1Id and \"_\" in self.project1Id:\n msg = \"ERROR: forbidden underscore '_' in project identifier '%s'\" \\\n % self.project1Id\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if self.project2Id and \"_\" in self.project2Id:\n msg = \"ERROR: forbidden underscore '_' in project identifier '%s'\" \\\n % self.project2Id\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if not self.samplesFile:\n msg = \"ERROR: missing compulsory option --samples\"\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if not os.path.exists(self.samplesFile):\n msg = \"ERROR: can't find file %s\" % self.samplesFile\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if not self.scheduler:\n msg = \"ERROR: missing compulsory option --schdlr\"\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if self.scheduler == \"OGE\":\n self.scheduler = \"SGE\"\n if not self.queue:\n msg = \"ERROR: missing compulsory option --queue\"\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if self.lSteps == []:\n msg = \"ERROR: missing compulsory option --step\"\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if \"1\" in self.lSteps:\n if not Utils.isProgramInPath(\"fastqc\"):\n msg = \"ERROR: can't find 'fastqc' in PATH\"\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if \"2\" in self.lSteps:\n if not Utils.isProgramInPath(\"demultiplex.py\"):\n msg = \"ERROR: can't find 'demultiplex.py' in PATH\"\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n obsMajVer, obsMinVer = ProgVersion.getVersion(\"demultiplex.py\")\n if not (obsMajVer == 1 and obsMinVer >= 14):\n msg = \"ERROR: 'demultiplex.py' is in version %s.%s\" % \\\n (obsMajVer, obsMinVer)\n msg += \" instead of >= 1.14.0\"\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if \"3\" in self.lSteps:\n if not Utils.isProgramInPath(\"cutadapt\"):\n msg = \"ERROR: can't find 'cutadapt' in PATH\"\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if not self.adpFile:\n msg = \"ERROR: missing compulsory option --adp\"\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if not os.path.exists(self.adpFile):\n msg = \"ERROR: can't find file %s\" % self.adpFile\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if self.maxNPerc < 0 or self.maxNPerc > 1:\n msg = \"ERROR: --maxNp %f should be between 0 and 1\" \\\n % self.maxNPerc\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if \"4\" in self.lSteps:\n if not Utils.isProgramInPath(\"bwa\"):\n msg = \"ERROR: can't find 'bwa' in PATH\"\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if not Utils.isProgramInPath(\"samtools\"):\n msg = \"ERROR: can't find 'samtools' in PATH\"\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if not Utils.isProgramInPath(\"picard.jar\"):\n msg = \"ERROR: can't find 'picard.jar' in PATH\"\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if not self.dictFile:\n msg = \"ERROR: missing compulsory option --dict\"\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if not os.path.exists(self.dictFile):\n msg = \"ERROR: can't find file %s\" % self.dictFile\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if os.path.dirname(self.dictFile) == '':\n self.dictFile = \"%s/%s\" % (os.getcwd(), self.dictFile)\n if not self.queue2:\n msg = \"ERROR: missing compulsory option --queue2\"\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if \"5\" in self.lSteps:\n if not Utils.isProgramInPath(\"GenomeAnalysisTK.jar\"):\n msg = \"ERROR: can't find 'GenomeAnalysisTK.jar' in PATH\"\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n obsMajVer, obsMinVer = ProgVersion.getVersionGatk()\n expMajVer = 3\n expMinVer = 5\n if not (obsMajVer == expMajVer and obsMinVer >= expMinVer):\n msg = \"ERROR: 'GATK' is in version %s.%s\" % \\\n (obsMajVer, obsMinVer)\n msg += \" instead of >= %i.%i\" % (expMajVer, expMinVer)\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if self.knownIndelsFile and not os.path.exists(self.knownIndelsFile):\n msg = \"ERROR: can't find file %s\" % self.knownIndelsFile\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if \"6\" in self.lSteps or \"7\" in self.lSteps or \"8\" in self.lSteps or \\\n \"9\" in self.lSteps:\n if not Utils.isProgramInPath(\"GenomeAnalysisTK.jar\"):\n msg = \"ERROR: can't find 'GenomeAnalysisTK.jar' in PATH\"\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n obsMajVer, obsMinVer = ProgVersion.getVersionGatk()\n if not (obsMajVer == 3 and obsMinVer >= 5):\n msg = \"ERROR: 'GATK' is in version %s.%s\" % \\\n (obsMajVer, obsMinVer)\n msg += \" instead of >= 3.5\"\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if \"4\" in self.lSteps or \"5\" in self.lSteps or \"6\" in self.lSteps or \\\n \"7\" in self.lSteps or \"8\" in self.lSteps or \"9\" in self.lSteps:\n if not self.pathToPrefixRefGenome:\n msg = \"ERROR: missing compulsory option --ref\"\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if not os.path.exists(\"%s.bwt\" % self.pathToPrefixRefGenome):\n msg = \"ERROR: can't find file %s.bwt\" % self.pathToPrefixRefGenome\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if not os.path.exists(\"%s.fa.fai\" % self.pathToPrefixRefGenome):\n msg = \"ERROR: can't find file %s.fa.fai\" % self.pathToPrefixRefGenome\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if os.path.dirname(self.pathToPrefixRefGenome) == \"\":\n self.pathToPrefixRefGenome = \"%s/%s\" % (os.getcwd(),\n self.pathToPrefixRefGenome)\n if \"8\" in self.lSteps or \"9\" in self.lSteps:\n if not self.jointGenoId:\n msg = \"ERROR: missing compulsory option --jgid\"\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n \n if \"9\" in self.lSteps:\n if self.restrictAllelesTo not in [\"ALL\", \"BIALLELIC\",\n \"MULTIALLELIC\"]:\n msg = \"ERROR: unknown option --rat %s\" % self.restrictAllelesTo\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if self.famFile:\n if not os.path.exists(self.famFile):\n msg = \"ERROR: can't find file %s\" % self.famFile\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)\n if self.excludeSampleFile:\n if not os.path.exists(self.excludeSampleFile):\n msg = \"ERROR: can't find file %s\" % self.excludeSampleFile\n sys.stderr.write(\"%s\\n\\n\" % msg)\n # self.help()\n sys.exit(1)", "def test_validate_invalid_resume(self):\n # DEV: `validate` will raise an exception if it could not validate\n with self.assertRaises(jsonschema.ValidationError):\n resumeschema.validate(self.invalid_resume)", "def test_is_valid_manifest_with_wide_row(caplog):\n logging.getLogger().setLevel(logging.WARNING)\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_wide_row.tsv\",\n )\n wide_warning = f\"line 3, number of fields (6) in row is unequal to number of column names in manifest (5)\"\n assert wide_warning in caplog.text\n assert result == True", "def validate(self, namespace):\n pass", "def __init__(self, settings, load = True):\n\t\tself.version = 2.0\n\t\tself.data = {'@meta':{'version': 0}}# Default to no version, which will be converted.\n\t\tself.file = stringutil.normalize_file(settings.save_base() + '/Manifest.json.gz')\n\t\tself._completed = []\n\t\tself._failed = []\n\t\tif load and os.path.isfile(self.file): #!cover\n\t\t\ttry:\n\t\t\t\twith gzip.GzipFile(self.file, 'rb') as data_file:\n\t\t\t\t\tself.data = json.loads(data_file.read().decode('utf8'))\n\t\t\texcept:\n\t\t\t\tstringutil.error('Failed to load Manifest at [%s]. Probably corrupt. Try removing the file.' % self.file)\n\t\t\t\traise\n\t\tchange, self.data = self._adapt(self.data)\n\t\twhile change:\n\t\t\tchange, self.data = self._adapt(self.data)\n\t\t#\n\t\tassert 'elements' in self.data\n\t\tassert 'completed' in self.data['elements']\n\t\tassert 'failed' in self.data['elements']\n\t\tself.og_count = len(self.data['elements']['completed']+ self.data['elements']['failed'])", "def test_verify_manifest(mock_index):\n mock_index.return_value.async_get_record.side_effect = _async_mock_get_guid\n\n loop = get_or_create_event_loop_for_thread()\n loop.run_until_complete(\n async_verify_object_manifest(\n \"http://localhost\",\n manifest_file=CURRENT_DIR + \"/test_data/test_manifest.csv\",\n max_concurrent_requests=3,\n output_filename=\"test.log\",\n )\n )\n\n logs = {}\n try:\n with open(\"test.log\") as file:\n for line in file:\n guid, error, expected, actual = line.strip(\"\\n\").split(\"|\")\n logs.setdefault(guid, {})[error] = {\n \"expected\": expected.split(\"expected \")[1],\n \"actual\": actual.split(\"actual \")[1],\n }\n except Exception as exc:\n # unexpected file format, fail test\n assert False\n\n # everything in indexd is mocked to be correct for this one\n assert \"dg.TEST/f2a39f98-6ae1-48a5-8d48-825a0c52a22b\" not in logs\n\n assert \"dg.TEST/1e9d3103-cbe2-4c39-917c-b3abad4750d2\" in logs\n assert \"dg.TEST/9c205cd7-c399-4503-9f49-5647188bde66\" in logs\n\n # ensure logs exist for fields that are mocked to be incorrect in indexd\n assert \"/programs/DEV/projects/test2\" in logs[\n \"dg.TEST/1e9d3103-cbe2-4c39-917c-b3abad4750d2\"\n ].get(\"authz\", {}).get(\"expected\")\n assert \"DEV\" in logs[\"dg.TEST/1e9d3103-cbe2-4c39-917c-b3abad4750d2\"].get(\n \"acl\", {}\n ).get(\"expected\")\n assert \"235\" in logs[\"dg.TEST/1e9d3103-cbe2-4c39-917c-b3abad4750d2\"].get(\n \"file_size\", {}\n ).get(\"expected\")\n assert \"c1234567891234567890123456789012\" in logs[\n \"dg.TEST/1e9d3103-cbe2-4c39-917c-b3abad4750d2\"\n ].get(\"md5\", {}).get(\"expected\")\n assert \"gs://test/test 3.txt\" in logs[\n \"dg.TEST/1e9d3103-cbe2-4c39-917c-b3abad4750d2\"\n ].get(\"urls\", {}).get(\"expected\")\n assert \"s3://testaws/file space.txt\" in logs[\n \"dg.TEST/1e9d3103-cbe2-4c39-917c-b3abad4750d2\"\n ].get(\"urls\", {}).get(\"expected\")\n assert \"s3://testaws/aws/file,with,comma.txt\" in logs[\n \"dg.TEST/1e9d3103-cbe2-4c39-917c-b3abad4750d2\"\n ].get(\"urls\", {}).get(\"expected\")\n\n # make sure error exists when record doesnt exist in indexd\n assert \"no_record\" in logs[\"dg.TEST/9c205cd7-c399-4503-9f49-5647188bde66\"]", "def validate(self):\n import os\n\n if self.kind == KDM.INTEROP:\n with open(os.path.join(os.path.dirname(__file__), 'xsd', 'interop.xsd'), 'r') as f:\n schema = f.read()\n elif self.kind == KDM.SMPTE:\n with open(os.path.join(os.path.dirname(__file__), 'xsd', 'smpte.xsd'), 'r') as f:\n schema = f.read()\n\n base_dir = os.getcwd()\n os.chdir(os.path.join(os.path.dirname(__file__), 'xsd'))\n try:\n schema = ET.XMLSchema(ET.XML(schema))\n xmlparser = ET.XMLParser(schema=schema)\n ET.fromstring(self.raw, xmlparser)\n finally:\n os.chdir(base_dir)", "def validateBoot (self):\n self.mountBootPartition()\n stateDictionary = self._createBootInstallationDictionary()\n self._writeDictionaryAsJson(stateDictionary, self._getBootInstallationFilePath())\n self._log(\"validate-boot\").notice(\"boot partition is validated\")", "def main():\n check_slugs()\n check_identifiers()", "def state_finish_validate(cfg, app, win, events):", "def _validate_all_expected_datasets_are_present_in_manifest(self, manifest_kind, manifest):\n # This is the manifest schema included in the `twine.json` file, not the schema for `manifest.json` files.\n manifest_schema = getattr(self, manifest_kind)\n\n for expected_dataset_name, expected_dataset_schema in manifest_schema[\"datasets\"].items():\n if expected_dataset_name in manifest[\"datasets\"]:\n continue\n\n if expected_dataset_schema.get(\"optional\", False):\n continue\n\n raise exceptions.invalid_contents_map[manifest_kind](\n f\"A dataset named {expected_dataset_name!r} is expected in the {manifest_kind} but is missing.\"\n )", "def validate_dependencies(self, session, entry):", "def test_launch_entry_on_rebuild_server_response(self):\n self.validate_attributes_in_launch_response(num_of_launch_entry=2)", "def _validate(self):\n config = self.config\n\n # Reject unknown sections.\n valid_sections = set((\n self.CUSTOM_HOOKS_SECTION,\n self.BUILTIN_HOOKS_SECTION,\n self.BUILTIN_HOOKS_OPTIONS_SECTION,\n self.TOOL_PATHS_SECTION,\n self.OPTIONS_SECTION,\n ))\n bad_sections = set(config.sections()) - valid_sections\n if bad_sections:\n raise ValidationError('%s: unknown sections: %s' %\n (self.paths, bad_sections))\n\n # Reject blank custom hooks.\n for hook in self.custom_hooks:\n if not config.get(self.CUSTOM_HOOKS_SECTION, hook):\n raise ValidationError('%s: custom hook \"%s\" cannot be blank' %\n (self.paths, hook))\n\n # Reject unknown builtin hooks.\n valid_builtin_hooks = set(rh.hooks.BUILTIN_HOOKS.keys())\n if config.has_section(self.BUILTIN_HOOKS_SECTION):\n hooks = set(config.options(self.BUILTIN_HOOKS_SECTION))\n bad_hooks = hooks - valid_builtin_hooks\n if bad_hooks:\n raise ValidationError('%s: unknown builtin hooks: %s' %\n (self.paths, bad_hooks))\n elif config.has_section(self.BUILTIN_HOOKS_OPTIONS_SECTION):\n raise ValidationError('Builtin hook options specified, but missing '\n 'builtin hook settings')\n\n if config.has_section(self.BUILTIN_HOOKS_OPTIONS_SECTION):\n hooks = set(config.options(self.BUILTIN_HOOKS_OPTIONS_SECTION))\n bad_hooks = hooks - valid_builtin_hooks\n if bad_hooks:\n raise ValidationError('%s: unknown builtin hook options: %s' %\n (self.paths, bad_hooks))\n\n # Verify hooks are valid shell strings.\n for hook in self.custom_hooks:\n try:\n self.custom_hook(hook)\n except ValueError as e:\n raise ValidationError('%s: hook \"%s\" command line is invalid: '\n '%s' % (self.paths, hook, e))\n\n # Verify hook options are valid shell strings.\n for hook in self.builtin_hooks:\n try:\n self.builtin_hook_option(hook)\n except ValueError as e:\n raise ValidationError('%s: hook options \"%s\" are invalid: %s' %\n (self.paths, hook, e))\n\n # Reject unknown tools.\n valid_tools = set(rh.hooks.TOOL_PATHS.keys())\n if config.has_section(self.TOOL_PATHS_SECTION):\n tools = set(config.options(self.TOOL_PATHS_SECTION))\n bad_tools = tools - valid_tools\n if bad_tools:\n raise ValidationError('%s: unknown tools: %s' %\n (self.paths, bad_tools))\n\n # Reject unknown options.\n valid_options = set(self.VALID_OPTIONS)\n if config.has_section(self.OPTIONS_SECTION):\n options = set(config.options(self.OPTIONS_SECTION))\n bad_options = options - valid_options\n if bad_options:\n raise ValidationError('%s: unknown options: %s' %\n (self.paths, bad_options))", "def validate(self):\n ...", "def _validate_scripts(self):\n if \"scripts\" in self.params:\n self.params[\"scripts\"] = Path(self.workflow_path) / self.params[\"scripts\"]\n else:\n self.params[\"scripts\"] = self.workflow_path / \"scripts\"\n if not self.params[\"scripts\"].exists():\n raise Exception(f\"{self.params['scripts']} doesnt exist\")", "def validate(self):\n errors = []\n if self.package_format:\n if not re.match('^[1-9][0-9]*$', str(self.package_format)):\n errors.append(\"The 'format' attribute of the package must \"\n 'contain a positive integer if present')\n\n if not self.name:\n errors.append('Package name must not be empty')\n # Must start with a lower case alphabetic character.\n # Allow lower case alphanummeric characters and underscores in\n # keymint packages.\n valid_package_name_regexp = '([^/ ]+/*)+(?<!/)'\n build_type = self.get_build_type()\n if not build_type.startswith('keymint'):\n # Dashes are allowed for other build_types.\n valid_package_name_regexp = '^[a-z][a-z0-9_-]*$'\n if not re.match(valid_package_name_regexp, self.name):\n errors.append(\"Package name '%s' does not follow naming \"\n 'conventions' % self.name)\n\n if self.version:\n if not re.match('^[0-9]+\\.[0-9_]+\\.[0-9_]+$', self.version):\n errors.append(\"Package version '%s' does not follow version \"\n 'conventions' % self.version)\n\n if self.maintainers is not None:\n # if not self.maintainers:\n # errors.append('Package must declare at least one maintainer')\n for maintainer in self.maintainers:\n try:\n maintainer.validate()\n except InvalidPackage as e:\n errors.append(str(e))\n if not maintainer.email:\n errors.append('Maintainers must have an email address')\n\n if self.authors is not None:\n for author in self.authors:\n try:\n author.validate()\n except InvalidPackage as e:\n errors.append(str(e))\n\n if errors:\n raise InvalidPackage('\\n'.join(errors))", "def __validate(self):\n self.report(self.name).receive_info_from_gui(self.tournament.get())\n msg.showinfo(title=None, message=self.message_path_to_folder)\n self.master.master.launch()\n self.master.destroy()", "def _check_manifest_resources(self, documents: list) -> str:\n for doc in documents:\n kind = doc.get(\"kind\")\n\n # If this kind defines a job template, pull it out\n if kind in JOB_TEMPLATE_RESOURCES:\n doc = doc.get(\"spec\").get(\"jobTemplate\")\n if doc is None:\n return f\"{kind} resources MUST specify a job template!\"\n\n if kind in POD_TEMPLATE_RESOURCES:\n pod_template = doc.get(\"spec\").get(\"template\")\n if pod_template is None:\n return f\"{kind} resources MUST specify a pod template!\"\n\n pod_spec = pod_template.get(\"spec\")\n if pod_spec is None:\n return f\"{kind} resources MUST specify a pod spec!\"\n\n containers = pod_spec.get(\"containers\")\n if not containers:\n return f\"{kind} resources MUST specify at least one container!\"\n\n init_containers = pod_spec.get(\"initContainers\")\n if init_containers:\n containers = containers + init_containers\n\n missing_resources_msg = (\n f\"All containers and initContainers in a {kind}\"\n \"must define resource constraints!\"\n )\n for cont in containers:\n resources = cont.get(\"resources\")\n if not resources:\n return missing_resources_msg\n\n limits = resources.get(\"limits\")\n if not limits or not limits.get(\"cpu\") or not limits.get(\"memory\"):\n return missing_resources_msg\n\n requests = resources.get(\"requests\")\n if (\n not requests\n or not requests.get(\"cpu\")\n or not requests.get(\"memory\")\n ):\n return missing_resources_msg" ]
[ "0.69982684", "0.67911905", "0.6616815", "0.65430355", "0.63983166", "0.6289555", "0.62729937", "0.6270612", "0.61467505", "0.6103869", "0.6071787", "0.5995637", "0.59809095", "0.5950012", "0.5940859", "0.5853178", "0.57848716", "0.5735434", "0.5723771", "0.56745976", "0.56583005", "0.5651255", "0.5629011", "0.5585329", "0.5579808", "0.5576735", "0.55718356", "0.5558085", "0.55396247", "0.5531804", "0.5529372", "0.551285", "0.5507859", "0.5503039", "0.5486223", "0.5472184", "0.5469153", "0.54593307", "0.54518616", "0.5441693", "0.5421909", "0.5420688", "0.5401979", "0.5397935", "0.5396471", "0.5396343", "0.53356326", "0.53154755", "0.52801496", "0.52794194", "0.5274558", "0.5263905", "0.5262064", "0.5217861", "0.52172077", "0.5213123", "0.52080977", "0.5204567", "0.52036947", "0.5202049", "0.51793444", "0.51774335", "0.51732874", "0.51633", "0.5139934", "0.51356065", "0.51347095", "0.51234424", "0.511663", "0.5112128", "0.51052576", "0.5089123", "0.5078911", "0.5076154", "0.50760496", "0.50629824", "0.50563836", "0.5049212", "0.5048564", "0.504514", "0.50423986", "0.5031414", "0.5029713", "0.5027878", "0.5024223", "0.5023497", "0.50202847", "0.49985725", "0.49965715", "0.498744", "0.4984063", "0.49755788", "0.49743262", "0.49487585", "0.4946928", "0.49453685", "0.49369842", "0.49364704", "0.49336776", "0.49293166" ]
0.61277246
9
cleans up possible errors in the user manifest.
def clean_manifest(manifest_json): manifest_json = copy.deepcopy(manifest_json) host = manifest_json["host"] host = host.strip("/").lstrip("http://").lstrip("https://") manifest_json["host"] = host return manifest_json
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean():\n user_init.clean_setup()", "def clear_errors(self) -> None:", "def clear_errors(self) -> None:", "def clean_up(self):\n\t\tpass", "def cleanUp(self):\r\n # All intermediates should be removed by app controller\r\n pass", "def clean_up(self):\n pass", "def clean_up(self):\n pass", "def clean_up(self) -> None:\n print('Doing some clean-up work...')", "def _clean_up(self):", "def fix(self):\n\n cmds.lockNode(self.errorNodes, l=False)\n cmds.delete(self.errorNodes)\n cmds.flushUndo()\n for plugin in self.errorPlugins:\n cmds.unloadPlugin(plugin)\n\n self.run()", "def clean_up(self):\n # TODO: Implement if needed\n pass", "def cleanup(self):\n self.log.debug('upm - in upm cleanup()')\n # Add resource setup code here", "def cleanUp(self):\r\n pass", "def cleanup(self,context,result):\n if self.do_cleanup:\n try:\n return_code, stdout, stderr= runProgram([context.gsec_path,\n \"-user\", context.user_name,\n \"-password\", context.user_password,\n \"-delete\", self.user_name],[])\n except:\n result.note_exception(cause=\"Resource cleanup: Can't remove user.\")\n result[\"user_name\"] = self.user_name\n return\n else:\n if return_code != 0:\n self.fail_and_annotate_streams(result, Result.ERROR,'GSEC','Delete user',\n stdout,stderr)", "def fix(self):\n\n pm.delete(self.errorNodes)\n\n self.run()", "def clean_up(self):\n self.fname = None\n self.failed_files = []\n self.custom_failed = []\n self.results = None", "def remove_stuff_post_error(self):\n os.system('rm %s' % self.destination)", "def fix(self):\n exceptionError = ''\n for each in self.errorNodes:\n try:\n pm.delete(each)\n except exceptionError:\n print exceptionError", "def cleanUp(self):\n import evoware.fileutil as F\n F.tryRemove(self.f_project, verbose=(self.VERBOSITY>1), tree=1)", "def cleanUp():\n pass", "def cleanup():", "def CleanUp(self):\n self.cmd.CleanUp()", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n pass", "def cleanup(self):\n\n pass", "def cleanup(self):\n\t\tself.pb.cleanup()\n\t\tsys.exit()", "def cleanup (self):\n pass", "def clean_error(self):\r\n return self._arm.clean_error()", "def clean():\n try:\n os.unlink(options.coords + 'mirza_mrna_input' + '.fa')\n os.unlink(options.coords + 'mirza_mirna_input' + '.fa')\n os.unlink(options.coords + 'mirza_mirna_expressions' + '.fa')\n except:\n pass", "async def clean_up(self) -> None:", "def _final_cleanup(self):\n # Clean up and remove the temporary gisdbase\n self._cleanup()\n # Remove resource directories\n if \"error\" in self.run_state or \"terminated\" in self.run_state:\n self.storage_interface.remove_resources()", "def cleanup(self):\r\n pass", "def cleanup(self):\r\n pass", "def env_cleanup(self):\n pass", "def post_cleanup(self):\n pass", "def clean_up():\n for action in reversed(undo_actions):\n try:\n action()\n except Exception, exc:\n sys.stderr.write(\"BAD CLEANUP: Call to %s failed\\n\"\n % action.func_name)\n sys.stderr.write(\" %s\\n\" % exc)", "def cleanup():\n management.call_command('cleanup')", "def cleanup_step(self):\n self.clean_home_subdir()\n\n super(IntelBase, self).cleanup_step()", "def clean_user_tokens() -> None:\n asyncio.run(clean_old_user_tokens())", "def _cleanup(self):\n os.system(\"rm -r %s/*\" %(self._snippet_index_dir))\n os.system(\"rm %s/*\" %(self._para_dir))\n os.system(\"rm %s/*\" %(self._temp_dir))\n os.system(\"rm %s/*\" %(self._snippet_result_dir))", "def cleanup(self):", "def cleanup(self):", "def cleanup(self):", "def cleanup(self):\n\n print \"Cleaning up...\",\n sys.stdout.flush()\n\n builddir = os.path.join(self.build)\n\n comm = 'rm -rf '+builddir\n #+' '+libdir+' '+logdir\n (output, error, retz) = runShellCommand(comm)\n\n print \"done.\"", "def cleanup(self):\r\n pass", "def clear_error(self):\n self.got_error = False", "def fix_nonerrors(self):\n if not self.only_error:\n return\n self.line = None\n self.filename = None", "def delete_error():\r\n item = core.get_all_items()\r\n for i in item:\r\n if \"Error\" in i or \"Warning\" in i:\r\n if core.does_item_exist(i):\r\n reset_error(i)", "def CleanUp(self):\n if (not self.keep_epi_raw or not self.keep_epi_mot) \\\n and not self.opts.debug_tmp:\n self.tmp.Clean()\n overall_msg = self.SummaryErrorMessage()\n if self.tmplt and not self.no_email:\n EmailResults(self.tmplt['email'], overall_msg, \\\n self.topdir, self.dumpfile, self.logfile, self.motcor_summary)\n\n# Write the error message to the log file.\n if self.f_log is None:\n# Log file not opened yet, do it now.\n if self.logdir is not None:\n logfile = '%s/preprocess.log' % self.logdir\n f_log = open(logfile,'w')\n f_log.write('\\n%s\\n' % overall_msg)\n f_log.close()\n else:\n self.f_log.write('\\n%s\\n' % overall_msg)\n sys.exit()", "def _cleanup(self):\n # delete stdout/stderr\n if os.path.isfile(self.stdout):\n os.unlink(self.stdout)", "def cleanup(self):\r\n print(\"Cleanup not implemented\")", "def fix(self):\n print 'Can\\'t be auto fixed, please select to check and fix it manually.'\n # pm.delete(self.errorNodes)", "def cleanup(self):\r\n logging.info(\"entered the cleanup\")", "def cleanup(self):\n\n # uninstall sourcedata\n if self.conversion.install_dataset_path.exists():\n # without the ChangeWorkingDir the command does not operate inside\n # of dataset_path\n with utils.ChangeWorkingDir(self.dataset_path):\n datalad.uninstall(\n path=self.conversion.install_dataset_name,\n dataset=self.dataset_path,\n recursive=True\n )\n\n # remove bids conversion\n bids_dir = self._get_bids_dir()\n if bids_dir.exists():\n self.log.info(\"Remove %s\", bids_dir)\n shutil.rmtree(bids_dir)", "def clean_up_data(self):\n pass", "def cleanup(self):\n self.exit_config_mode()", "def clean(self):\n self.clean_rally_conf()\n rally.RallyBase.clean_rally_logs()\n if self.image_alt:\n self.cloud.delete_image(self.image_alt)\n if self.flavor_alt:\n self.orig_cloud.delete_flavor(self.flavor_alt.id)\n super().clean()", "def clean_up(self):\n os.system(f'rm -r {self.submission_folder_path}')\n\n return", "def clean():\n clean_flatbuffer_binaries()\n clean_webp_textures()", "def cleanup(self):\n self.qemu.clean_run_files()\n for tmp in glob.glob(self.configfile + \"?*\"):\n os.unlink(tmp)", "def clean_errors(self):\n self._vim.eval('clearmatches()')\n self._errors = []\n self._matches = []\n # Reset Syntastic notes - TODO: bufdo?\n self._vim.current.buffer.vars['ensime_notes'] = []", "def cleanup() -> None:\n\n for fname in glob(os.path.join(tdir, 'alexandria.*')):\n if os.path.splitext(fname)[1] not in {'.c', '.h'}:\n os.unlink(fname)", "def _cleanup():\n if os.path.exists(WEBPROPERTIES_PATH):\n os.remove(WEBPROPERTIES_PATH)\n if os.path.exists(PROFILES_PATH):\n os.remove(PROFILES_PATH)", "def _cleanup(self):\n pass", "def cleanup(self): \n if os.path.exists(self.inpms):\n shutil.rmtree(self.inpms)", "def clean(self):\n self.clean_rally_conf()\n self.clean_rally_logs()\n if self.flavor_alt:\n self.orig_cloud.delete_flavor(self.flavor_alt.id)\n super().clean()", "def horde_cleanup(self):", "def postRunCleanup(self):\n self.logDesc(\"Post Run Cleanup\")\n #logout of application\n self.logout()", "def cleanup(self, *args, **kwargs):", "def fixup(self):\n raise Exception(\"Fixup not implemented yet!\")", "def finalize_error():\n print('')\n exit(-1)", "def cleanupInstall(self):\n\n os.chdir( os.path.dirname(self.installPath) )\n tryunlink( self.download.tarball )", "def tearDown(self):\n\n self.app = None\n users.clear()", "def preShutdown(self):\r\n for user in self._users.values():\r\n user.destroy()", "def clean_up(user, fname, tango_output):\n time.sleep(1)\n run(['rm', fname])\n time.sleep(1)\n path = tango_output + user + '.out'\n run(['rm', path])", "def reset_error(self):\n\t\t\n\t\tself.error = None", "def cleanup_resources(self, restart=False):", "def office_clean_failed(parser, args, params):\n parser.parse_known_args(args)\n control.clean_failed(params)", "def cleanUp(self):\r\n remove_files(self._db_files_to_remove, error_on_missing=False)", "def cleanup(self,result):\n pass", "def clean():\n sudo(\"rm -rf %(admin_webroot)s\" % env)", "def cleanupResources():\n None", "def postRunCleanup(self):\n self.logDesc(\"Post Run Cleanup\")", "def clean(self) -> None:\n # remove all *.py and *.pyi files in the folder\n for wc in [\"*.py\", \"*.pyi\", \"modules.json\"]:\n for f in (self.package_path).rglob(wc):\n f.unlink()", "def _clean(base_dir):\n # remove the snakemake cache\n shutil.rmtree(os.path.join(base_dir, \".snakemake\"), ignore_errors=True)\n\n # remove seq2science caches\n shutil.rmtree(os.path.expanduser(os.path.join(xdg.XDG_CACHE_HOME, \"seq2science\")), ignore_errors=True)\n\n # remove historic seq2science cache location\n shutil.rmtree(os.path.expanduser(f\"~/.config/seq2science/\"), ignore_errors=True)\n\n print(\"All cleaned up!\")", "def cleanup(self):\n self.GP.cleanup()", "def _clean_up():\n from tests.util import report\n report.update()\n if MAIN_RUNNER is not None:\n MAIN_RUNNER.on_exit()\n from tests.util.services import get_running_services\n for service in get_running_services():\n sys.stderr.write(\"Stopping service \")\n for c in service.cmd:\n sys.stderr.write(c + \" \")\n sys.stderr.write(\"...\\n\\r\")\n service.stop()", "def clean(self):\n files = ['CHG', 'CHGCAR', 'POSCAR', 'INCAR', 'CONTCAR',\n 'DOSCAR', 'EIGENVAL', 'IBZKPT', 'KPOINTS', 'OSZICAR',\n 'OUTCAR', 'PCDAT', 'POTCAR', 'vasprun.xml',\n 'WAVECAR', 'XDATCAR', 'PROCAR', 'ase-sort.dat',\n 'LOCPOT', 'AECCAR0', 'AECCAR1', 'AECCAR2',\n 'WAVECAR.GTO', 'vasp.out', 'vasp.err']\n for f in files:\n try:\n os.remove(f)\n except OSError:\n pass", "def finalize(self):\n print('Cleaning up...')", "def postRunCleanup(self):\n self.logDesc(\"Post Run Cleanup\")\n self.logout()", "def postRunCleanup(self):\n self.logDesc(\"Post Run Cleanup\")\n self.logout()", "def clean_up():\n if is_excel_running():\n # Prevents Excel from reopening\n # if it has been closed manually or never been opened\n for app in Apps():\n try:\n app.xl.run_VB_macro(\"CleanUp\")\n except (CommandError, AttributeError, aem.aemsend.EventError):\n # Excel files initiated from Python don't have the xlwings VBA module\n pass" ]
[ "0.6784651", "0.64837694", "0.64837694", "0.64458686", "0.6421459", "0.6405516", "0.6405516", "0.63899606", "0.6353433", "0.63306385", "0.63012415", "0.62639797", "0.6250906", "0.6223817", "0.619754", "0.6186283", "0.61667925", "0.6079713", "0.6040983", "0.60129434", "0.5937849", "0.5927334", "0.5913217", "0.5913217", "0.5913217", "0.5913217", "0.5913217", "0.5913217", "0.5913217", "0.5913217", "0.5913217", "0.5913217", "0.5913217", "0.59085435", "0.5891194", "0.5863979", "0.5828515", "0.58193475", "0.58134085", "0.5813404", "0.5787642", "0.5787642", "0.57678527", "0.5754178", "0.57534", "0.573773", "0.5709303", "0.5693683", "0.56921697", "0.5688169", "0.5688169", "0.5688169", "0.5676158", "0.5672254", "0.56705135", "0.5669329", "0.5663547", "0.56198317", "0.5616101", "0.56138283", "0.5604237", "0.5593644", "0.559202", "0.557855", "0.5569848", "0.55523425", "0.55441374", "0.55325943", "0.55302", "0.55255234", "0.55181503", "0.55168134", "0.5512471", "0.5511128", "0.5504753", "0.55033", "0.550296", "0.55019134", "0.54982036", "0.5496826", "0.54947084", "0.54819256", "0.54549474", "0.5447446", "0.54471326", "0.544177", "0.54395676", "0.5432968", "0.54297864", "0.53984714", "0.5392274", "0.53920096", "0.5386576", "0.5383707", "0.5383099", "0.5367077", "0.5351705", "0.5349348", "0.5344423", "0.5344423", "0.534121" ]
0.0
-1
Overrides fields in the manifest file.
def apply_overrides(manifest_json, overrides, marketplace): manifest_json = copy.deepcopy(manifest_json) if overrides is None: return manifest_json if "title" in overrides: manifest_json["info"]["title"] = overrides["title"] if "description" in overrides: manifest_json["info"]["description"] = overrides["description"] if "price" in overrides: invalid_price_format = "Price should be a non-negative integer." try: price = int(overrides["price"]) manifest_json["info"]["x-21-total-price"]["min"] = price manifest_json["info"]["x-21-total-price"]["max"] = price if price < 0: raise exceptions.ValidationError(invalid_price_format) except ValueError: raise exceptions.ValidationError(invalid_price_format) if "name" in overrides: manifest_json["info"]["contact"]["name"] = overrides["name"] if "email" in overrides: manifest_json["info"]["contact"]["email"] = overrides["email"] if "host" in overrides: manifest_json["host"] = overrides["host"] if "port" in overrides: host = manifest_json["host"] # if the host is in the form of https://x.com/ remove the trailing slash host = host.strip("/") invalid_port_format = "Port should be an integer between 0 and 65536." try: port = int(overrides["port"]) if port <= 0 or port > 65536: raise exceptions.ValidationError(invalid_port_format) except ValueError: raise exceptions.ValidationError(invalid_port_format) host += ":{}".format(port) manifest_json["host"] = host if "basePath" in overrides: manifest_json["basePath"] = overrides["basePath"] return manifest_json
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _set_manifest(self, manifest: Dict) -> None:\n if \"metadata\" not in manifest:\n manifest[\"metadata\"] = {}\n\n if \"files\" not in manifest:\n manifest[\"files\"] = {\n \"includes\": [],\n \"excludes\": [],\n }\n\n with open(self._manifest_path, \"w\", encoding=\"utf-8\") as file:\n # TODO: Exception handling\n self._yaml.dump(manifest, file)", "def _derive_extra_metadata(self, extra_metadata):\n extra_metadata['platform']['Family'] = extra_metadata['platform']['Platform Family Name']\n\n # Add platform number if derivable from file\n if self.__class__ is not SAFESentinel1:\n extra_metadata['platform']['Family'] += \"-%s\" % extra_metadata['platform']['Platform Number']", "def read_manifest(self): # -> None:\n ...", "def update_manifest(self, filename: Optional[str] = None, manifest: Optional[Dict[str, str]] = None) -> None:\n filename = filename or self.manifest_filename\n manifest = manifest or {}\n self.log.debug(f\"Updating manifest '{manifest}' to file '{filename}'\")\n with open(filename, \"w\") as f:\n json.dump(manifest, f, indent=2)", "def makeOverrides(self):\n\t\tself.overridesWithValues = self.dataOverrides", "def post_backup(self, backup, manifest_file):\n pass", "def _update_extra_metadata(self, extra_metadata):\n self._add_filename_metadata(extra_metadata)\n self._derive_extra_metadata(extra_metadata)\n \n if type(self) == SAFESentinel3:\n self._extract_metadata_from_zipfile(extra_metadata)", "def duplicateSettings(self, otherField):\n self.name = otherField.name\n self.enName = otherField.enName\n self.format = otherField.format\n self.prefix = otherField.prefix\n self.suffix = otherField.suffix\n self.html = otherField.html\n self.isRequired = otherField.isRequired\n self.hidden = otherField.hidden\n self.numLines = otherField.numLines\n self.initDefault = otherField.initDefault\n self.linkAltField = otherField.linkAltField\n self.parentLevel = otherField.parentLevel\n self.useFileInfo = otherField.useFileInfo\n self.showInDialog = otherField.showInDialog", "def test_edit_manifest(self):\n \n manifest = copy.deepcopy(self.manifest)\n manifest['job']['interface']['command'] = ''\n \n json_data = {\n 'manifest': manifest,\n 'auto_update': False\n }\n\n url = '/%s/job-types/%s/%s/' % (self.api, self.job_type.name, self.job_type.version)\n response = self.client.generic('PATCH', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n \n results = json.loads(response.content)\n self.assertTrue(results['is_valid'])\n self.assertDictEqual(results, {u'errors': [], u'is_valid': True, u'warnings': []})\n \n # mismatch name\n manifest = copy.deepcopy(self.manifest)\n manifest['job']['name'] = 'new-name'\n json_data = {\n 'manifest': manifest,\n }\n url = '/%s/job-types/%s/%s/' % (self.api, self.job_type.name, self.job_type.version)\n response = self.client.generic('PATCH', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)\n \n # mismatch version\n manifest = copy.deepcopy(self.manifest)\n manifest['job']['jobVersion'] = '1.2.3'\n json_data = {\n 'manifest': manifest,\n }\n url = '/%s/job-types/%s/%s/' % (self.api, self.job_type.name, self.job_type.version)\n response = self.client.generic('PATCH', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)", "def build_extra_vars_file(self, instance, private_data_dir):", "def setup_class(cls):\n super().setup_class()\n\n # this is an old version of the package, just to trigger an upgrade.\n cls.add_item(\"protocol\", \"fetchai/default:0.12.0\", local=False)\n\n # change aea version of the AEA project\n agent_config = cls.load_agent_config(cls.current_agent_context)\n cls._update_aea_version(agent_config)\n cls.nested_set_config(\"agent.aea_version\", cls.AEA_VERSION_SPECIFIER)\n cls.nested_set_config(\"agent.author\", \"wrong_author\")", "def __attrs_post_init__(self):\n if not self.path:\n self.path = Path.cwd() / CONFIG['meta_yaml_path']\n if not self.path.exists():\n raise AttributeError(f'Path {self.path} doesn\\'t exist.')\n self.update()\n try:\n validators.SMetaYaml(strict=True).load(self.get_content())\n except ValidationError as err:\n inform.error('meta.yaml has incorrect content.')\n inform.error('Invalid value for following params:')\n for key, value in err.messages.items():\n inform.error(f'{key}: {value}')\n inform.critical()", "def _store_package_metadata(self):", "def update_manifest(self, dst):\n # Read the current manifest into memory\n mpath = os.path.join(os.path.dirname(dst), \"manifest.json\")\n try:\n with open(mpath, 'r') as f:\n manifest = json.load(f)\n except IOError:\n manifest = {}\n\n name, _ = os.path.splitext(os.path.basename(dst))\n # Update the manifest record\n manifest[name] = {\n \"url\": os.path.basename(dst),\n \"signature\": sha256sum(dst),\n }\n\n # Write the manifest back to disk\n with open(mpath, 'w') as f:\n json.dump(manifest, f, indent=2)", "def __init__(self, app_name, app_owner):\n self.app_name = app_name\n self.app_owner = app_owner\n super().__init__(xml_tags.Elements.SECURE_APP_APPLICATION)", "def update_manifest(builder):\r\n\r\n manifest_path = join(builder.Config.SourceRootPath, builder.Config.WMAppManifest)\r\n dom = parse(manifest_path)\r\n\r\n #import pdb;pdb.set_trace()\r\n #version = make_version_string(builder)\r\n version = builder.AppVersion\r\n\r\n update_manifest_with_values(dom,\r\n Title = builder.CustomCfg.Title,\r\n #ProductID = builder.CustomCfg.ProductID,\r\n #PublisherID = builder.Config.PublisherID,\r\n Version = version,\r\n Languages = getattr(builder.CustomCfg, \"Languages\", None ) )\r\n\r\n with open(manifest_path, 'wb') as f:\r\n data = dom.toprettyxml(indent = \" \")\r\n # toprettyxml adds extra new lines\r\n lines = [ x for x in data.split(\"\\n\") if len(x.strip()) > 0]\r\n data = \"\\n\".join(lines)\r\n f.write(data)\r\n\r\n return True", "def _set_attributes(self):", "def extra_object_files(self):", "def update_manifest(self, language='en'):\n self._manifest.update_manifest(language)", "def map_from_app(self, app):\n # Store app etag in form\n self.etag.data = app.get('_etag', '')\n\n # Keep the use_custom_identity checked if it was\n if app.get('assumed_account_id', None) and app.get('assumed_role_name', None):\n self.use_custom_identity.data = True\n\n super(EditAppForm, self).map_from_app(app)\n\n self.env_ro.data = self.env.data\n self.role_ro.data = self.role.data", "def apply_extra_fields(self, om_context, f):\n if om_context.is_device_component:\n f.data[ZFact.MetadataKeys.ZEN_SCHEMA_TAGS_KEY] = \"DeviceComponent\"\n elif om_context.is_device:\n f.data[ZFact.MetadataKeys.ZEN_SCHEMA_TAGS_KEY] = \"Device\"\n if om_context.mem_capacity is not None:\n f.data[\n ZFact.MetadataKeys.MEM_CAPACITY_KEY\n ] = om_context.mem_capacity\n\n if om_context.dimensions:\n f.metadata.update(om_context.dimensions)\n\n if om_context.metadata:\n f.data.update(om_context.metadata)", "def __update_custom_field_settings(self,\n eachfield, #field etree\n resourcetablename,\n fieldname\n ):\n\n # xml attributes\n TYPE = \"type\"\n READABLE = \"readable\"\n WRITABLE = \"writable\"\n LABEL = \"label\"\n HINT = \"comment\"\n DEFAULT = \"default\"\n LINES = \"lines\"\n BOXES = \"boxes\"\n HASOPTIONS = \"has_options\"\n\n unikey = \"%s__%s\" % (resourcetablename, fieldname)\n field_property = self.custom_field_properties.get(unikey, {})\n\n cust_fieldtype = field_property.get(\"fieldtype\", None)\n cust_readable = field_property.get(\"readable\", None)\n cust_writable = field_property.get(\"writable\", None)\n cust_label = field_property.get(\"label\", None)\n cust_hint = field_property.get(\"hint\", None)\n cust_default = field_property.get(\"default\", None)\n cust_lines = field_property.get(\"lines\", None)\n cust_boxes = field_property.get(\"boxes\", None)\n cust_has_options = field_property.get(\"has_options\", None)\n cust_options = field_property.get(\"options\", None)\n\n if cust_fieldtype:\n if cust_fieldtype != None:\n eachfield.set(TYPE, cust_fieldtype)\n if cust_readable != None:\n eachfield.set(READABLE, cust_readable)\n if cust_writable != None:\n eachfield.set(WRITABLE, cust_writable)\n if cust_label != None:\n eachfield.set(LABEL, cust_label)\n if cust_hint != None:\n eachfield.set(HINT, cust_hint)\n if cust_default != None:\n eachfield.set(DEFAULT, cust_default)\n if cust_lines != None:\n eachfield.set(LINES, cust_lines)\n if cust_boxes != None:\n eachfield.set(BOXES, cust_boxes)\n if cust_has_options != None:\n eachfield.set(HASOPTIONS, cust_has_options)\n if cust_options != None:\n opt_available = eachfield.getchildren()\n if len(opt_available) == 0:\n eachfield.append(cust_options)\n elif len(opt_available) == 1:\n eachfield.remove(opt_available[0])\n eachfield.append(cust_options)", "def writeManifestEntry(context, key, value):\n GenericMetadata.writeEntryToSection(context, GenericMetadata.MANIFEST_SECTION, key, value)", "def _WriteChromeVersionToMetadata(self):\n self._run.attrs.metadata.UpdateKeyDictWithDict(\n 'version',\n {'chrome': self._run.attrs.chrome_version})\n self.UploadMetadata(filename=constants.PARTIAL_METADATA_JSON)", "def updateDictFile(self):\n if self.dictFile.vdata.get('version',0): return\n #--Update to version 1\n for name in self.data.keys():\n installer = self.data[name]\n if isinstance(installer,Installer):\n self.data[name] = installer.__copy__(InstallerArchive)\n self.dictFile.vdata['version'] = 1", "def test_python_custom_runtime_field(self):\n self.write_file('test.py', 'test file')\n config = testutil.AppInfoFake(runtime='custom',\n entrypoint='my_entrypoint')\n self.assertTrue(self.generate_configs(appinfo=config, deploy=True))", "def PopulateModuleMetadata(self, mod, mojom_file):\n mod.name = os.path.basename(mojom_file.file_name)\n mod.path = mojom_file.file_name\n mod.namespace = mojom_file.module_namespace\n if mojom_file.attributes:\n mod.attributes = {attr.key: attr.value for attr in mojom_file.attributes}", "def create_manifest():\n dirpath = os.getcwd()\n file_path_ori = dirpath + \"/manifest.json\"\n file_path_new = dirpath + \"/manifests3.json\"\n\n with open(file_path_ori, \"rt\") as fin:\n with open(file_path_new, \"wt\") as fout:\n for line in fin:\n fout.write(line.replace('bucket-name', bucketName))", "def set_specific_fields(self):\n raise NotImplementedError(\"Must be defined by subclass!\")", "def update(self):\n self.application['email'] = 'test@example.spinnaker'", "def test_MergeManifests_file_attributes():\n d1 = dpack_pb2.DataPackage()\n f1 = d1.file.add()\n f1.relative_path = \"a\"\n f1.size_in_bytes = 1\n f1.checksum_hash = dpack_pb2.SHA1\n f1.checksum = \"abc\"\n d2 = dpack_pb2.DataPackage()\n f2 = d2.file.add()\n f2.relative_path = \"a\"\n f2.size_in_bytes = 2\n f2.checksum_hash = dpack_pb2.MD5\n f2.checksum = \"def\"\n dpack.MergeManifests(d1, d2)\n assert d1.file[0].size_in_bytes == 1\n assert d1.file[0].checksum_hash == dpack_pb2.SHA1\n assert d1.file[0].checksum == \"abc\"", "def _post_process(self):\n # merge extendedMetadata into metadata\n if 'instance' in self._metadata and self._metadata['instance'] is not None:\n if 'metadata' in self._metadata['instance']:\n if 'extendedMetadata' in self._metadata['instance']:\n v = self._metadata['instance'].pop('extendedMetadata')\n self._metadata['instance']['metadata'].update(v)\n else:\n if 'extendedMetadata' in self._metadata['instance']:\n v = self._metadata.pop('extendedMetadata')\n self._metadata['metadata'] = v\n\n # change vnic's id to vnicId\n if 'vnics' in self._metadata:\n for i in range(len(self._metadata['vnics'])):\n v = self._metadata['vnics'][i].pop('id')\n self._metadata['vnics'][i]['vnicId'] = v", "def _add_filename_metadata(self, extra_metadata): \n \n # Make sure product_info section exists\n extra_metadata.setdefault('product_info', {})\n \n file_name = os.path.basename(self.fname)\n fn_comps = file_name.split(\"_\")\n \n if self.__class__ == SAFESentinel1:\n component = fn_comps[2]\n if len(component) < 4: \n resolution = 'N/A'\n else:\n resolution = component[-1]\n \n extra_metadata['product_info']['Resolution'] = resolution\n \n # Add file/scan name \n extra_metadata['product_info']['Name'] = os.path.splitext(file_name)[0]\n \n # Add Satellite and Mission from the file path\n comp_1 = fn_comps[0].upper()\n extra_metadata['platform']['Mission'] = \"Sentinel-%s\" % comp_1[1]\n extra_metadata['platform']['Satellite'] = \"Sentinel-%s\" % comp_1[1:]", "def default_settings_user_attributes():\n return ['fullname', 'email', 'groups']", "def update(self):\n self.content = self.get_content()\n self.dependencies = self.content['requirements']['run']\n self.pythonversion = self.content['extra']['pythonversion']\n self.package_name = self.content['package']['name']", "def extra(self) -> Dict[str, Any]:\n extra = self.extras.copy()\n if isinstance(self.author, str):\n extra['Author'] = self.author\n if isinstance(self.email, str):\n extra['Email'] = self.email\n if isinstance(self.description, str):\n extra['Description'] = self.description\n return extra", "def hide_fields_in_newer_versions(obj):\n if not api_utils.allow_start_end_audit_time():\n obj.start_time = wtypes.Unset\n obj.end_time = wtypes.Unset\n if not api_utils.allow_force():\n obj.force = wtypes.Unset", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n ignore_fields = (\n 'about_me',\n 'romanized_first_name',\n 'romanized_last_name',\n 'postal_code',\n )\n set_fields_to_required(self, ignore_fields=ignore_fields)", "def _metadata(self):\n meta = super()._metadata\n meta.update({\n \"name\": self.name,\n \"lead_in_time\": self.lead_in_time,\n \"amplification\": self.amplification,\n \"amplifier_clipping\": self.amplifier_clipping,\n \"power_threshold\": self.power_threshold,\n })\n return meta", "def inherit_metadata(descriptor, inherited_data):\r\n try:\r\n descriptor.xblock_kvs.inherited_settings = inherited_data\r\n except AttributeError: # the kvs doesn't have inherited_settings probably b/c it's an error module\r\n pass", "def platform_mode_manifest_updates(self, dbapi, mode):\n pass", "def test_good_practice_attrs(self):\n # FormOverrideMixIn.good_practice_attrs\n pass", "def _add_to_schema(self, new: dict):\n self._defaults.update(new)\n self._migrate()", "def no_resize_override(self, empty=False, names='all'):\n if names == 'all':\n names = list(self.form.fields.keys())\n overrides = {} if empty else getattr(self.form, 'formfield_attrs_overrides', {})\n add_skip = {'no_size_override': True}\n for name in names:\n overrides[name] = overrides.get(name, {})\n overrides[name].update(add_skip)\n return overrides", "def initMetadata(self):\n\n if not 'flags' in self.metadata:\n\n self.metadata['flags'] = {}\n\n if not 'uidvalidity' in self.metadata:\n\n\n self.metadata['uidvalidity'] = random.randint(1000000, 9999999)\n\n if not 'uids' in self.metadata:\n\n self.metadata['uids'] = {}\n\n if not 'uidnext' in self.metadata:\n\n self.metadata['uidnext'] = 1", "def process_manifest(vb, options):\n if not options.manifest:\n return\n\n vb.add_manifest(options.manifest_id, options.manifest_service, options.manifest_version, options.manifest_version_id,\n options.manifest_release_version)", "def fields(self, fields):\n self._fields = yaml.safe_dump(fields)", "def _set_attr(self):\n self.as_skeletal = self._import_as_skeleton()\n self.materials = self._import_materials()\n self.textures = self._import_textures()", "def tweaks(self) -> None:\n pass", "def __init__(self, settings, load = True):\n\t\tself.version = 2.0\n\t\tself.data = {'@meta':{'version': 0}}# Default to no version, which will be converted.\n\t\tself.file = stringutil.normalize_file(settings.save_base() + '/Manifest.json.gz')\n\t\tself._completed = []\n\t\tself._failed = []\n\t\tif load and os.path.isfile(self.file): #!cover\n\t\t\ttry:\n\t\t\t\twith gzip.GzipFile(self.file, 'rb') as data_file:\n\t\t\t\t\tself.data = json.loads(data_file.read().decode('utf8'))\n\t\t\texcept:\n\t\t\t\tstringutil.error('Failed to load Manifest at [%s]. Probably corrupt. Try removing the file.' % self.file)\n\t\t\t\traise\n\t\tchange, self.data = self._adapt(self.data)\n\t\twhile change:\n\t\t\tchange, self.data = self._adapt(self.data)\n\t\t#\n\t\tassert 'elements' in self.data\n\t\tassert 'completed' in self.data['elements']\n\t\tassert 'failed' in self.data['elements']\n\t\tself.og_count = len(self.data['elements']['completed']+ self.data['elements']['failed'])", "def __init__(self, first_name, last_name, location, job_title):\n \"\"\"Then initialize attributes of the child class.\"\"\"\n\n super().__init__(first_name, last_name, location, job_title)\n self.permissions = 'root'", "def log_manifest(self):\n self._audit_log.log_json(\"manifest\", self.get_manifest_json())", "def mergeManifest(channel, targetManifest, sdkManifest):\n\n if not os.path.exists(targetManifest) or not os.path.exists(sdkManifest):\n utils_log.error(\"the manifest file is not exists.targetManifest:%s;sdkManifest:%s\", targetManifest, sdkManifest)\n return False\n\n ET.register_namespace('android', androidNS)\n targetTree = ET.parse(targetManifest)\n targetRoot = targetTree.getroot()\n\n ET.register_namespace('android', androidNS)\n sdkTree = ET.parse(sdkManifest)\n sdkRoot = sdkTree.getroot()\n\n f = open(targetManifest)\n targetContent = f.read()\n f.close()\n\n permissionConfigNode = sdkRoot.find('permissionConfig')\n if permissionConfigNode != None and len(permissionConfigNode) > 0:\n for child in list(permissionConfigNode):\n key = '{' + androidNS + '}name'\n val = child.get(key)\n if val != None and len(val) > 0:\n attrIndex = targetContent.find(val)\n if -1 == attrIndex:\n targetRoot.append(child)\n\n appConfigNode = sdkRoot.find('applicationConfig')\n appNode = targetRoot.find('application')\n\n if appConfigNode != None:\n\n proxyApplicationName = appConfigNode.get('proxyApplication')\n if proxyApplicationName != None and len(proxyApplicationName) > 0:\n\n if 'PYW_APPLICATION_PROXY_NAME' in channel:\n\n channel['PYW_APPLICATION_PROXY_NAME'] = channel[\n 'PYW_APPLICATION_PROXY_NAME'] + ',' + proxyApplicationName\n else:\n\n channel['PYW_APPLICATION_PROXY_NAME'] = proxyApplicationName\n\n # 获取渠道闪屏名称\n launcherName = appConfigNode.get('channelLauncherName')\n # appKeyWord = appConfigNode.get('keyword')\n\n # exists = appKeyWord != None and len(appKeyWord.strip()) > 0 and targetContent.find(appKeyWord) != -1\n\n # if not exists:\n # remove keyword check...\n for child in list(appConfigNode):\n targetRoot.find('application').append(child)\n\n targetTree.write(targetManifest, 'UTF-8')\n # 修改闪屏 如果渠道 需要闪屏文件则增加此方法 不要则注释掉\n if launcherName != None and len(launcherName) > 0:\n mergeLauncher(launcherName, targetManifest)\n\n return True", "def save_info(base):\r\n _info = open(base.info_name,'wb')\r\n fields = []\r\n for k in base.field_names:\r\n if isinstance(base.fields[k],base.__class__):\r\n fields.append((k,'<base>'+urllib.quote(base.fields[k].name)))\r\n else:\r\n fields.append((k,base.fields[k].__name__))\r\n _info.write(' '.join(['%s:%s' %(k,v) for (k,v) in fields]))\r\n _info.close()\r\n out = open(os.path.join(base.name,\"__defaults__\"),\"wb\")\r\n for field_name,default_value in base.defaults.iteritems():\r\n if field_name in [\"__id__\",\"__version__\"]:\r\n continue\r\n value = base._file[field_name].to_block(default_value)\r\n out.write(\"%s %s\" %(field_name,value))\r\n out.close()", "def set_generic_fields(self):\n self.constant_fields[\"admver\"] = 9.1\n self.constant_fields[\"datatype\"] = 'raw'\n self.constant_fields[\"dfo\"] = '//'\n self.constant_fields[\"enterdate\"] = time.strftime(\"%m/%d/%Y\")", "def updateNameAndDescription(self, name, desc):\n self.magneticfield.name = name\n self.magneticfield.description = desc\n\n self.magneticfield.writeFile()", "def update_json(self):\n self.set_version_to_default()\n self.remove_null_fields()\n self.remove_unnecessary_keys()\n self.set_fromVersion(from_version=self.from_version)", "def test_create_from_entrypoint_with_custom_metadata(self):\n package_name = 'DummyExtension'\n module_name = 'test_extension.dummy.submodule'\n extension_id = '%s:DummyExtension' % module_name\n\n class TestExtension(Extension):\n __module__ = module_name\n id = extension_id\n metadata = {\n 'Name': 'OverrideName',\n 'Version': '3.14159',\n 'Summary': 'Lorem ipsum dolor sit amet.',\n 'Description': 'Tempus fugit.',\n 'License': 'None',\n 'Home-page': 'http://127.0.0.1/',\n }\n\n entrypoint = FakeEntryPoint(TestExtension, project_name=package_name)\n extension_info = ExtensionInfo.create_from_entrypoint(entrypoint,\n TestExtension)\n\n expected_metadata = entrypoint.dist.metadata.copy()\n expected_metadata.update(TestExtension.metadata)\n\n self._check_extension_info(extension_info=extension_info,\n app_name='test_extension.dummy',\n package_name=package_name,\n extension_id=extension_id,\n metadata=expected_metadata)", "def setUpFormData(self):\n super(InconsistentNameCSID, self).setUpFormData()\n self.formData['name'] = 'Pyrazine'", "def file_override(filename, *args, **kwargs):\n return self.new_file_override()", "def update_ev_whitelist(self, compressed_ev_whitelist):\n manifest = self._read_manifest_json()\n manifest[\"version\"] = str(self.manifest_version() + 1)\n self._write_manifest_json(manifest)\n with open(\n os.path.join(\n self._crx_dir,\n \"_platform_specific\",\n \"all\",\n \"ev_hashes_whitelist.bin\"),\n \"wb\") as hashes_file:\n hashes_file.write(compressed_ev_whitelist)", "def _write_attrs(self, title):\n # XXX: Should probably all be defined in some header file.\n self._f.attrs['api_version'] = np.float32([6.30000019])\n self._f.attrs['version'] = np.float32([6.30000019])\n self._f.attrs['floating_point_word_size'] = \\\n np.array([self.__f_word_size], dtype=np.int32)\n self._f.attrs['file_size'] = np.array([1], dtype=np.int32)\n self._f.attrs['maximum_name_length'] = np.array([32],\n dtype=np.int32)\n self._f.attrs['int64_status'] = np.array([0], dtype=np.int32)\n self._f.attrs['title'] = np.string_(title)", "def update_attributes_map(klass):\n\n return {\n 'name': '',\n 'default_locale': ''\n }", "def set_default_values_as_needed(self):\n if self.verbose:\n click.echo('Updating required default values')\n for field in ARGUMENTS_DEFAULT_VALUES:\n if self.__class__.__name__ in ARGUMENTS_DEFAULT_VALUES[field][1]:\n self.data[field] = ARGUMENTS_DEFAULT_VALUES[field][0]", "def override_from_folder(self, other: ItemVariant) -> None:\n self.authors.extend(other.authors)\n self.tags.extend(self.tags)\n self.vbsp_config = lazy_conf.concat(self.vbsp_config, other.vbsp_config)\n self.desc = tkMarkdown.join(self.desc, other.desc)", "def manifest_file(self, manifest_file):\n if manifest_file is None:\n raise ValueError(\"Invalid value for `manifest_file`, must not be `None`\")\n\n self._manifest_file = manifest_file", "def OnAttributesUpdated():\n pass", "def build_manifest(self, build_manifest):\n\n self._build_manifest = build_manifest", "def overwrite_attrs(self, src_detail, attrs):\n case_tile_configuration_list = [\n 'case_tile_template',\n 'persist_tile_on_forms',\n 'persistent_case_tile_from_module',\n 'pull_down_tile',\n 'persist_case_context',\n 'persistent_case_context_xml',\n 'case_tile_group',\n ]\n for attr in attrs:\n if attr == \"case_tile_configuration\":\n for ele in case_tile_configuration_list:\n setattr(self, ele, getattr(src_detail, ele))\n else:\n setattr(self, attr, getattr(src_detail, attr))", "def defaults():\n\n return {\"cr_shelter_flag_id\": S3ReusableField.dummy(\"flag_id\"),\n }", "def modify_device_fields(self, data):\n data = clean(data, self.fields_parameters)\n return self.put(\"/device/fields\", data)", "def initSlotObjectDict(cls):\n restslotattributedict.update(dict({extension_tunnel: \"name\"}))\n restslotattributedict.update(dict({extension_circuit: \"name\"}))\n restslotattributedict.update(dict({extension_ip_interface: \"name\"}))\n restslotattributedict.update(dict({extension_ip_route: \"name\"}))\n restslotattributedict.update(dict({gigabitethernet: \"name\"}))\n restslotattributedict.update(dict({blade: \"slot_number\"}))", "def __init__(self, *args, **kw):\n super(SignupFormExtra, self).__init__(*args, **kw)\n # Put the first and last name at the top\n new_order = self.fields.keyOrder[:-2]\n new_order.insert(0, 'first_name')\n new_order.insert(1, 'last_name')\n self.fields.keyOrder = new_order", "def update_manifest(explicit=False):\n if not os.path.exists(MANIFEST_FILENAME):\n return\n\n manifest_file = open(MANIFEST_FILENAME, 'r')\n parts = manifest_file.read().partition('\\n' + AUTOGEN_LINE)\n manifest_file.close()\n if parts[1] == '':\n if explicit:\n print \"%s has no AUTOGENERATE section\" % MANIFEST_FILENAME\n return\n\n commands = [line for line in parts[2].split('\\n') if line.startswith('#!')]\n excludes = []\n for command in commands:\n match = re.match(r'#!\\s*EXCLUDE:\\s*(.*)\\s*$', command)\n if options.verbose:\n print \"Excluding paths beginning with '%s'\" % match.group(1)\n if match:\n excludes.extend(re.split(r\",\\s*\", match.group(1)))\n\n cached_files = []\n hash_lines = []\n\n paths = options.local_listing.keys()\n paths.sort()\n size = 0\n for path in paths:\n info = options.local_listing[path]\n if path == MANIFEST_FILENAME or path == META_FILENAME or \\\n info['size'] > MAX_FILE_SIZE or \\\n is_data_path(path) or \\\n prefix_match(excludes, path):\n continue\n cached_files.append(path)\n hash_lines.append(\"%s=%s\" % (path, info['sha1']))\n size += info['size']\n\n manifest_lines = [parts[0], AUTOGEN_LINE, AUTOGEN_EXPLAIN]\n manifest_lines.extend(commands)\n manifest_lines.extend((\n \"# TOTAL FILES: %s (%s bytes)\" % (intcomma(len(cached_files)), intcomma(size)),\n \"# SIGNATURE: %s\" % hashlib.sha1('\\n'.join(hash_lines)).hexdigest(),\n \"CACHE:\",\n ))\n manifest_lines.extend(cached_files)\n\n manifest_file = open(MANIFEST_FILENAME, 'w')\n manifest_file.write('\\n'.join(manifest_lines) + '\\n')\n manifest_file.close()\n\n # Make sure the listing for the manifest file is up to date\n # so it will be uploaded if changed.\n update_local_listing(MANIFEST_FILENAME)", "def update_overrides(self, app, name, namespace,\n flag='reset', override_values=None):\n if override_values is None:\n override_values = {}\n body = {'flag': flag, 'values': override_values, 'attributes': {}}\n return self._update(self._path(app) +\n '?name=' + name +\n '&namespace=' + namespace, body)", "def __update_custom_fieldtype_settings(self,\n eachfield, #field etree\n ):\n\n # xml attributes\n TYPE = \"type\"\n READABLE = \"readable\"\n WRITABLE = \"writable\"\n LABEL = \"label\"\n HINT = \"comment\"\n DEFAULT = \"default\"\n LINES = \"lines\"\n BOXES = \"boxes\"\n HASOPTIONS = \"has_options\"\n\n fieldtype = eachfield.attrib.get(TYPE)\n field_property = self.custom_fieldtype_properties.get(fieldtype, {})\n\n cust_fieldtype = fieldtype_property.get(\"fieldtype\", None)\n cust_readable = fieldtype_property.get(\"readable\", None)\n cust_writable = fieldtype_property.get(\"writable\", None)\n cust_label = fieldtype_property.get(\"label\", None)\n cust_hint = fieldtype_property.get(\"hint\", None)\n cust_default = fieldtype_property.get(\"default\", None)\n cust_lines = fieldtype_property.get(\"lines\", None)\n cust_boxes = fieldtype_property.get(\"boxes\", None)\n cust_has_options = fieldtype_property.get(\"has_options\", None)\n cust_options = fieldtype_property.get(\"options\", None)\n \n if cust_fieldtype:\n if cust_fieldtype != None:\n eachfield.set(TYPE, cust_fieldtype)\n if cust_readable != None:\n eachfield.set(READABLE, cust_readable)\n if cust_writable != None:\n eachfield.set(WRITABLE, cust_writable)\n if cust_label != None:\n eachfield.set(LABEL, cust_label)\n if cust_hint != None:\n eachfield.set(HINT, cust_hint)\n if cust_default != None:\n eachfield.set(DEFAULT, cust_default)\n if cust_lines != None:\n eachfield.set(LINES, cust_lines)\n if cust_boxes != None:\n eachfield.set(BOXES, cust_boxes)\n if cust_has_options != None:\n eachfield.set(HASOPTIONS, cust_has_options)\n if cust_options != None:\n opt_available = eachfield.getchildren()\n if len(opt_available) == 0:\n eachfield.append(cust_options)\n elif len(opt_available) == 1:\n eachfield.remove(opt_available[0])\n eachfield.append(cust_options)", "def add_settings_early(self):\n pass", "def enable_metadata(self):\r\n if not self.metadata:\r\n self._set_subclient_properties(\"_subclient_properties['cloudAppsSubClientProp']\\\r\n ['salesforceSubclient']['backupSFMetadata']\", True)", "def add_settings_early(self):\n\n # config settings\n config = {\n # some generic settings for every site, to point to location of some stuff\n mconst.DEF_SETTINGNAME_pkgdirimps_sitempacks: [pkgdirimp_sitempacks],\n mconst.DEF_SETTINGNAME_controllerroot: pkgdirimp_controllers,\n mconst.DEF_SETTINGNAME_sitefilepath: misc.calc_modulefiledirpath(__file__),\n # should we also load mewlo site installed setuptools plugins\n mconst.DEF_SETTINGNAME_flag_importsetuptoolspacks: True,\n mconst.DEF_SETTINGNAME_replaceshadowpath: '${sitefilepath}/replaceshadow',\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_config, config)\n\n # config settings\n config = {\n # Name of site\n mconst.DEF_SETTINGNAME_sitename: 'Mewlo',\n # Specify where this site serves from\n # these siteurls should not end in / so if you are serving a site at root just use relative of '' and absolute of 'http://sitename.com'\n mconst.DEF_SETTINGNAME_siteurl_relative: '',\n mconst.DEF_SETTINGNAME_siteurl_absolute: 'http://127.0.0.1:8080',\n #mconst.DEF_SETTINGNAME_siteurl_relative: '/public/publicity',\n #mconst.DEF_SETTINGNAME_siteurl_absolute: 'http://127.0.0.1:8080/public/publicity',\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_config, config)\n\n # config settings\n config = {\n # online status information\n mconst.DEF_SETTINGNAME_isenabled: True,\n mconst.DEF_SETTINGNAME_isonline: True,\n mconst.DEF_SETTINGNAME_offline_mode: 'maintenance',\n mconst.DEF_SETTINGNAME_offline_message: 'We are down for leap-year maintenance; we will be back soon.',\n mconst.DEF_SETTINGNAME_offline_allowadmin: False,\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_config, config)\n\n\n\n # extension pack config -- we need to explicitly enable plugins\n packconfig = {\n 'mouser.mewlotestplug' : {\n 'isenabled': False,\n },\n 'mouser.testpack' : {\n 'isenabled': False,\n },\n 'mewlo.siteaddon.account' : {\n 'isenabled': True,\n },\n 'mewlo.siteaddon.group' : {\n 'isenabled': True,\n },\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_packs, packconfig)\n\n\n # database config\n databaseconfig = {\n 'settings' : {\n 'sqlalchemy_loglevel' : logging.NOTSET,\n #'sqlalchemy_loglevel' : logging.INFO,\n },\n 'default' : {\n 'url' : 'sqlite:///${dbfilepath}/mewlo_testsite1.sqlite',\n #'tablename_prefix': 'mewlo_',\n 'flag_echologging' : False,\n },\n 'mysql_unused' : {\n # Sample configuration for mysql\n 'url' : 'mysql://mewlo_user:mewlo_pass@localhost:3306/mewlo_testsite1',\n 'tablename_prefix': 'mewlo_'\n },\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_database, databaseconfig)\n self.settings.listappend_settings_key(mconst.DEF_SETTINGSEC_make_dirs, '${dbfilepath}')\n\n # email config settings\n mailconfig = {\n # online status information\n 'smtp_host': self.get_configval('mail_smtp_host'),\n 'smtp_login': self.get_configval('mail_smtp_login'),\n 'smtp_port': self.get_configval('mail_smtp_port'),\n 'smtp_mode': self.get_configval('mail_smtp_mode'),\n 'smtp_password': self.get_configval('mail_smtp_password'),\n 'mail_from' : self.get_configval('mail_from'),\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_mail, mailconfig)\n\n\n # account siteaddon settings\n siteaddonconfig = {\n # online status information\n 'registration_mode': 'immediate',\n 'flag_require_email_verified_before_login': False,\n }\n self.settings.merge_settings_key('siteaddon_account', siteaddonconfig)\n\n\n\n # ATTN: UNFINISHED\n # asset mounts config\n if (False):\n assetmountconfig = {\n 'default' : {\n # an internal assetmount just needs a url route\n 'type': 'internal',\n 'routeid': 'static_files',\n },\n 'external' : {\n 'type': 'external',\n 'filepath': '${mewlofilepath}/public_assets',\n 'urlpath': 'http://127.0.0.1/mewlo/public_assets',\n },\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_asset_mounts, assetmountconfig)\n\n\n\n\n\n #print \"TESTING CONFIG1:\"\n #self.run_configfunc('sayhello',1,2,3)\n #print \"TESTING CONFIG2:\"\n #self.run_allconfigfuncs('sayhello',1,2,3)", "def _store_package_metadata(self):\n\n context = self._config.context\n log.debug('processing chef_json file {0} for package metadata'.format(self._get_chef_json_full_path()))\n with open(self._get_chef_json_full_path()) as chef_json_file:\n chef_json = json.load(chef_json_file)\n log.debug(chef_json.dump)\n\n context.package.attributes = {}\n for x in self._config.pkg_attributes:\n context.package.attributes[x] = chef_json.get(x, None)", "def preCommitFixup(self):\n log_method_call(self, self.name)\n # UEFI firmware/bootloader cannot read 1.1 or 1.2 metadata arrays\n if getattr(self.format, \"mountpoint\", None) == \"/boot/efi\":\n self.metadataVersion = \"1.0\"", "def _update_inputs_file(self, additional_inputs):\n # Retrieve inputs from cli vm\n inputs_file = StringIO()\n fab.get(self.remote_bootstrap_inputs_path, inputs_file)\n inputs = yaml.load(inputs_file.getvalue())\n\n # Update the inputs to include the additional inputs\n inputs.update(additional_inputs)\n inputs_file.seek(0)\n json.dump(inputs, inputs_file)\n inputs_file.truncate()\n\n # Write the inputs back to the cli vm.\n fab.put(inputs_file, self.remote_bootstrap_inputs_path)", "def add_new_attributes(self):\n self.task = None\n self.reset_shadow()", "def __init__(self):\n super().__init__(interface.Metadata, DEFAULT_PRIORITIES)", "def extend_info(self, extend_info):\n self._extend_info = extend_info", "def update_plugin_data(self, entry):", "def write_manifest(self):\n import time\n import sys\n with open('bake-manifest-' + time.strftime('%Y-%m-%d-%H:%M:%S') + \n '.txt', 'w') as hout:\n hout.write(' '.join(sys.argv) + '\\n')\n for k, v in self.table.items():\n hout.write(';'.join([k] + v) + '\\n')", "def generate_manifest_dict(self):\n\n annotations = dict()\n\n for build_project in self.projects.get('build', []):\n for annotation in build_project.get('annotation', []):\n annotations[annotation['name']] = annotation['value']\n\n product = annotations.get('PRODUCT', 'unknown')\n version = annotations.get('VERSION', 'unknown')\n bld_num = annotations.get('BLD_NUM', '9999')\n manifest_name = '{}-{}-{}'.format(product, version, bld_num)\n\n return {\n manifest_name: {\n 'remotes': self.remotes,\n 'defaults': self.defaults,\n 'projects': self.projects\n }\n }", "def edit_files(project_name, app_name):\n SETTINGS = f'{project_name}/backend/backend/settings.py'\n PACKAGE_JSON = f'{project_name}/frontend/package.json'\n\n\n c1 = f\"\\n \\t'corsheaders', \\n\\t'rest_framework', \\n\\t'{app_name}',\\n\"\n add_to_line(SETTINGS, 32, c1 )\n\n c2 = f\"\\n \\t'corsheaders.middleware.CorsMidleware',\\n\"\n add_to_line(SETTINGS, 44, c2 )\n \n with open(SETTINGS, 'a+') as f:\n f.write(\"\\nCORS_ORIGIN_WHITELIST = ['localhost:3000/']\")\n\n c3 = '\\n\\t\"proxy\": \"http://localhost:8000\",\\n'\n add_to_line(PACKAGE_JSON, 3, c3)", "def set_version(self, bundle, ctx, filename, version):", "def put(self, **kwargs):\n logging.debug(\"In put() for FTDDeviceHAPairs class.\")\n # Attempting to \"Deploy\" during Device registration causes issues.\n self.fmc.autodeploy = False\n return super().put(**kwargs)", "def your_reservation_defaults(self, defaults):\n\n default_email = self.email()\n if default_email:\n defaults['email'] = self.email()\n\n data = self.additional_data()\n\n if not data:\n return defaults\n\n for form in data:\n if form in self.context.formsets:\n for field in data[form]['values']:\n defaults[\"%s.%s\" % (form, field['key'])] = field['value']\n\n return defaults", "def load_manifest(filename):\n\n data = manifest.load(filename)\n for field in manifest.validate(data):\n name = field.cfg or ''\n if name and name[-1] != '.':\n name += '>'\n name += field.name\n for msg in field.warnings:\n print('WARNING: {}@{} {}'.format(filename, name, msg))\n for msg in field.errors:\n print('CRITICAL: {}@{} {}'.format(filename, name, msg))\n return data", "def _update_site_configuration(self):\n self.site.configuration.site_values = {'THIRD_PARTY_AUTH_ONLY_DOMAIN': self.email_domain_name}\n self.site.configuration.save()", "def disable_metadata(self):\r\n if self.metadata:\r\n self._set_subclient_properties(\"_subclient_properties['cloudAppsSubClientProp']\\\r\n ['salesforceSubclient']['backupSFMetadata']\", False)", "def attach_puppet_resource(self):\n\t\tfilename = '/etc/puppet/manifests/cpanel.pp'\n\t\tfileobj = open(filename, 'w')\n\t\tfileobj.write(self.title)\n\t\tfileobj.write(\"\"\"\\\n# THIS PUPPET MANIFEST SHOULD NOT BE MANUALLY EDITTED.\n# POSTKILLACCT SCRIPT AUTO GENERATED THESE PUPPET RESOUCES. \n\"\"\");\n\t\tfileobj.write(self.puppet_resource)\n\t\tfileobj.close()\n\t\tprint \"[%s] Added puppet resource entry in '%s'\" % (ctime(), filename)", "def convert_manifest(\n self, allowed_mediatypes, namespace_name, repo_name, tag_name, content_retriever\n ):\n pass", "def convert_manifest(\n self, allowed_mediatypes, namespace_name, repo_name, tag_name, content_retriever\n ):\n pass", "def add_file_metadata(self):\n metadata = self.__file.require_group(METADATA)\n self.__write_value(metadata, DATE_CREATED, date.today().strftime(\"%Y-%m-%d\"))\n self.__write_value(metadata, SDK_VERSION, __version__)", "def info_from_entry(self, entry):\n info = super().info_from_entry(entry)\n info[ATTR_NAME] = info[ATTR_PROPERTIES]['Name'].replace('\\xa0', ' ')\n return info" ]
[ "0.61127824", "0.57904774", "0.57481503", "0.54889697", "0.54140353", "0.5328529", "0.5295124", "0.52817696", "0.5250707", "0.52453095", "0.5233346", "0.5206509", "0.5180013", "0.5170564", "0.51529664", "0.51508546", "0.5138381", "0.51179296", "0.5108538", "0.50936186", "0.5086031", "0.5084446", "0.5044203", "0.50303376", "0.50240767", "0.50217056", "0.50019425", "0.49900642", "0.49774122", "0.49656487", "0.4943151", "0.49384776", "0.49376053", "0.49370047", "0.49302477", "0.49219552", "0.49117154", "0.49080315", "0.49050778", "0.49004924", "0.48988292", "0.48946238", "0.4894311", "0.48914033", "0.48906305", "0.4889014", "0.4886727", "0.48844016", "0.48786366", "0.48696256", "0.48637006", "0.48598847", "0.48378816", "0.48336887", "0.4818053", "0.48167473", "0.48160806", "0.48107216", "0.48013794", "0.47998592", "0.47993502", "0.47842458", "0.47826913", "0.47799703", "0.47750163", "0.47719327", "0.47635522", "0.47633335", "0.4760744", "0.4759674", "0.47532004", "0.47476026", "0.47437748", "0.47300214", "0.47232568", "0.47232243", "0.47214028", "0.47179708", "0.4715389", "0.4714207", "0.47101885", "0.47067177", "0.47059858", "0.4693789", "0.46922708", "0.4690243", "0.46892497", "0.4688522", "0.4685167", "0.46841192", "0.4679832", "0.4677229", "0.46766356", "0.46728787", "0.46708557", "0.4662549", "0.46614683", "0.46614683", "0.46602088", "0.46601883" ]
0.53681576
5
Replace "AUTO" in the host and quickbuy with the ZeroTier IP. The server subsequently replaces, in the displayed quickbuy, instances of the manifest host value with a mkt.21.co address.
def replace_auto(manifest_dict, marketplace): manifest_dict = copy.deepcopy(manifest_dict) def get_formatted_zerotier_address(marketplace): host = get_zerotier_address(marketplace) if "." not in host: return "[{}]".format(host) else: return host if 'AUTO' in manifest_dict['host']: manifest_dict['host'] = manifest_dict['host'].replace( 'AUTO', get_formatted_zerotier_address(marketplace)) if 'AUTO' in manifest_dict['info']['x-21-quick-buy']: manifest_dict['info']['x-21-quick-buy'] = manifest_dict['info']['x-21-quick-buy'].replace( 'AUTO', get_formatted_zerotier_address(marketplace)) return manifest_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _post_task_update_advertise_address():\n default_network_interface = None\n\n with open(KUBE_APISERVER_CONFIG) as f:\n lines = f.read()\n m = re.search(REGEXPR_ADVERTISE_ADDRESS, lines)\n if m:\n default_network_interface = m.group(1)\n LOG.debug(' default_network_interface = %s', default_network_interface)\n\n if advertise_address and default_network_interface \\\n and advertise_address != default_network_interface:\n cmd = [\"sed\", \"-i\", \"/oidc-issuer-url/! s/{}/{}/g\".format(default_network_interface, advertise_address),\n KUBE_APISERVER_CONFIG]\n _ = _exec_cmd(cmd)", "def test_replace_host_subnet(self):\n pass", "def configure_host_ips(h3, h4, ip_address_hs):\n\n h3.libs.ip.flush_ip('eth1')\n h3.libs.ip.interface('eth1', up=False)\n\n h4.libs.ip.flush_ip('eth1')\n h4.libs.ip.interface('eth1', up=False)\n\n h3.libs.ip.interface(portlbl='eth1', addr=\"{}/{}\".format(\n ip_address_hs[0], MASK), up=True)\n h4.libs.ip.interface(portlbl='eth1', addr=\"{}/{}\".format(\n ip_address_hs[1], MASK), up=True)", "def set_host_ip(self, host, host_ip):\n host.setIP(str(host_ip.ip), prefixLen=self.NETPREFIX)", "def update_host(hostname, cpu_mhz, cpu_cores, ram):\n return update_host(hostname, cpu_mhz, cpu_cores, ram)", "def test_patch_host_subnet(self):\n pass", "def softupdate_ip(request, ipaddress):\n\n softupdate_key = settings.SOFTUPDATE_KEY\n if request.POST.get(\"key\", \"invalid_key\") != softupdate_key:\n raise PermissionDenied()\n\n # LC: UGGLY and not \"portable\"\n STATUS_EN_SERVICE = 'En service'\n\n def noanswer(reason=\"\"):\n message = \"\"\"Modification impossible.\\n\"\"\"\n if reason and settings.DEBUG:\n message += \"\"\"%s\\n\"\"\" % (reason,)\n return HttpResponse(message, content_type=\"plain/text\")\n\n serial = request.POST.get(\"serial\", None)\n hostname = request.POST.get(\"hostname\", None)\n\n host = None\n errmsgs = []\n\n if serial:\n hosts = Host.objects.filter(serial=serial)\n if len(hosts) == 1:\n host = hosts[0]\n elif len(hosts) > 1:\n for h in hosts:\n if h.ip == ipaddress:\n host = h\n break\n\n if not host:\n errmsgs.append(\"Le host serial=%s est introuvable.\" % (serial,))\n\n if hostname and not host:\n hosts = Host.objects.filter(hostname=hostname,\n status__description=STATUS_EN_SERVICE)\n if len(hosts) == 1:\n host = hosts[0]\n elif len(hosts) > 1:\n for h in hosts:\n if h.ip == ipaddress:\n host = h\n break\n\n # Get the last log entry\n hostlogs = HostIPLog.objects.filter(host=host, log_ip=ipaddress) \\\n .order_by(\"-date\")\n if hostlogs:\n hostlog = hostlogs[0]\n else:\n hostlog = HostIPLog(host=host, log_ip=ipaddress)\n \n hostlog.log_queryfrom = get_request_remote_addr(request)\n hostlog.log_hostname = request.POST.get('hostname', 'unknown')\n hostlog.save()\n\n return HttpResponse('ok.', content_type='plain/text')", "def set_static_ip_address(self, payload):\n\n # This request is received from CLI for setting ip address of an\n # instance.\n macaddr = payload.get('mac')\n ipaddr = payload.get('ip')\n\n # Find the entry associated with the mac in the database.\n req = dict(mac=macaddr)\n instances = self.get_vms_for_this_req(**req)\n for vm in instances:\n LOG.info(_LI('Updating IP address: %(ip)s %(mac)s.'),\n {'ip': ipaddr, 'mac': macaddr})\n # Send request to update the rule.\n try:\n rule_info = dict(ip=ipaddr, mac=macaddr,\n port=vm.port_id,\n status='up')\n self.neutron_event.update_ip_rule(str(vm.host),\n str(rule_info))\n except (rpc.MessagingTimeout, rpc.RPCException,\n rpc.RemoteError):\n LOG.error(_LE(\"RPC error: Failed to update rules.\"))\n else:\n # Update the database.\n params = dict(columns=dict(ip=ipaddr))\n self.update_vm_db(vm.port_id, **params)\n\n # Send update to the agent.\n vm_info = dict(status=vm.status, vm_mac=vm.mac,\n segmentation_id=vm.segmentation_id,\n host=vm.host, port_uuid=vm.port_id,\n net_uuid=vm.network_id,\n oui=dict(ip_addr=ipaddr,\n vm_name=vm.name,\n vm_uuid=vm.instance_id,\n gw_mac=vm.gw_mac,\n fwd_mod=vm.fwd_mod,\n oui_id='cisco'))\n try:\n self.neutron_event.send_vm_info(vm.host,\n str(vm_info))\n except (rpc.MessagingTimeout, rpc.RPCException,\n rpc.RemoteError):\n LOG.error(_LE('Failed to send VM info to agent.'))", "def set_ip_adresses(self):\n # unfold a config tree for the current suffix, if any\n for interface, details in self.interfaces.items():\n for k, v in details.items():\n if k == 'address':\n ip, prefix = address_to_ip_prefix(v)\n self.interfaces[interface]['ip_address'] = ip\n self.interfaces[interface]['ip_prefix'] = prefix\n break\n if interface == 'wan':\n self.ip_address = ip\n if interface == 'ha_sync':\n self.ha_sync_ip_address = ip", "def change_IP(self,server_IP,MAC):\n content = {'server_IP':server_IP,'MAC_address':MAC}\n content = json.dumps(content)\n headers = {\"Content-Type\":\"application/json\"}\n #address will be given by the api\n r = requests.post(f\"http://{self.webserver_address}/api/camera/update_ip\", data = content,headers = headers,verify=False)\n if(r.status_code == 200):\n return True\n return False", "def get_externalip(self):\n\n myip = \"\"\n for i in range(5):\n myip = self.fetch(random.choice(self.server_list))\n if myip != \"\":\n return myip\n else:\n continue\n return \"\"", "def set_ip(self, ip: str, host_addr: str) -> None:\n self.config[\"linkIp\"] = ip\n self.config[\"ngapIp\"] = ip\n self.config[\"gtpIp\"] = ip", "def format_host(host):\n\n host = strip_suffix(host, \".lan.urlab.be\")\n host = strip_suffix(host, \".lan\")\n host = strip_suffix(host, \".local\")\n host = strip_suffix(host, \"iPodtouch\")\n host = strip_suffix(host, \"-PC\")\n host = strip_suffix(host, \"-pc\")\n\n host = strip_prefix(host, \"pc-\")\n host = strip_prefix(host, \"PC-\")\n host = strip_prefix(host, \"DESKTOP-\")\n host = strip_prefix(host, \"LAPTOP-\")\n host = strip_prefix(host, \"iPod-de-\")\n host = strip_prefix(host, \"iPadde\")\n\n return host", "def calculate_trima_address(testMachine):\r\n _machineBase = int(testMachine/256)\r\n _machineRemainder = int(testMachine-(_machineBase*256))\r\n _machineBase = str(_machineBase)\r\n _machineRemainder = str(_machineRemainder)\r\n _address = \"172.21.\"+_machineBase+\".\"+_machineRemainder\r\n \r\n return _address", "def real_ip(self):\n if not hasattr(self, \"_real_ip\"):\n response = get(ICANHAZIP)\n self._real_ip = self._get_response_text(response)\n\n return self._real_ip", "def setIpaddr(self):\n\t\tself.ipaddr = self.settings.getKeyValue('ipaddr')\n\t\tself.socket.send('setenv ipaddr ' + self.ipaddr+'\\r', 1)\t\t\n\t\treturn None", "def get_local_host_ip(self) -> str:", "def test_try_create_auto_ip(self):\n\n name_file = 'api_ip/tests/sanity/ipv4/json/post/ipv4_auto_net_free.json'\n\n # Does get request\n response = self.client.post(\n '/api/v3/ipv4/',\n data=json.dumps(self.load_json_file(name_file)),\n content_type='application/json')\n\n self.compare_status(201, response.status_code)\n\n url = prepare_url('/api/v3/ipv4/%s/' % response.data[0]['id'],\n fields=['ip_formated'])\n response = self.client.get(\n url,\n content_type='application/json')\n\n self.compare_status(200, response.status_code)\n self.compare_values('10.0.1.2', response.data['ips'][0]['ip_formated'])", "def set_one(self, host_name, ip_address):\n self.hosts[host_name] = ip_address", "def set_host(self, host: str) -> None:\n _LOGGER.debug(\"Setting host to %s\", host)\n host_url = urlparse(host)\n self.scheme = host_url.scheme or \"http\"\n self.host = host_url.netloc or host_url.path\n self.base_url = f\"{self.scheme}://{self.host}\"\n self.api_url = f\"{self.base_url}/apps/api/{self.app_id}\"", "def test_add_autoassigned_ipv4(self):\n with DockerHost('host', dind=False) as host:\n # Test that auto-assiging IPv4 addresses gives what we expect\n workloads = self._setup_env(host, count=2, ip=\"ipv4\")\n\n workloads[0].assert_can_ping(\"192.168.0.1\", retries=3)\n workloads[1].assert_can_ping(\"192.168.0.0\", retries=3)\n\n host.calicoctl(\"container remove {0}\".format(\"workload0\"))\n host.calicoctl(\"container remove {0}\".format(\"workload1\"))\n\n host.remove_workloads()\n\n # Test that recreating returns the next two IPs (IPs are not\n # reassigned automatically unless we have run out of IPs).\n workloads = self._setup_env(host, count=2, ip=\"ipv4\")\n\n workloads[0].assert_can_ping(\"192.168.0.3\", retries=3)\n workloads[1].assert_can_ping(\"192.168.0.2\", retries=3)", "def enable_host(self, name):\n from soppa.local import aslocal\n self.guest_ip = self.guest_ip()\n self.guest_host_name = name\n # Host (remote) change\n self.file.set_setting('/etc/hosts', '{0} {1}'.format('127.0.0.1', self.guest_host_name))\n # local change\n aslocal()\n self.file.set_setting('/etc/hosts', '{0} {1}'.format(self.guest_ip, name))", "def JP_V0_addr(self, addr):\n\t\tself.IP = addr + self.V[0]", "def elReplaceStaticIP(self, ipaddress, netmask=\"255.255.255.0\", gateway=None, nameservers=None):\n # see http://docs.redhat.com/docs/en-US/Red_Hat_Enterprise_Linux/6/html/Installation_Guide/s1-kickstart2-options.html\n # sanity check\n normalizedStaticIp = NetworkConfigurationStaticParameters.normalizeStaticIp(ipaddress, netmask, gateway, nameservers)\n commandSection = self.sectionByName(\"command\")\n # several set\n commandSection.string = re.sub(r\"(?m)^([ \\t]*network[ \\t]+.*--ip[ \\t]*(?:=|[ \\t])[ \\t]*)[^\\s]+(.*)$\",\n r\"\\g<1>\" + normalizedStaticIp.ipaddress + r\"\\g<2>\",\n commandSection.string)\n commandSection.string = re.sub(r\"(?m)^([ \\t]*network[ \\t]+.*--netmask[ \\t]*(?:=|[ \\t])[ \\t]*)[^\\s]+(.*)$\",\n r\"\\g<1>\" + normalizedStaticIp.netmask + r\"\\g<2>\",\n commandSection.string)\n commandSection.string = re.sub(r\"(?m)^([ \\t]*network[ \\t]+.*--gateway[ \\t]*(?:=|[ \\t])[ \\t]*)[^\\s]+(.*)$\",\n r\"\\g<1>\" + normalizedStaticIp.gateway + r\"\\g<2>\",\n commandSection.string)\n if normalizedStaticIp.nameservers:\n commandSection.string = re.sub(r\"(?m)^([ \\t]*network[ \\t]+.*--nameserver[ \\t]*(?:=|[ \\t])[ \\t]*)[^\\s]+(.*)$\",\n r\"\\g<1>\" + \",\".join(normalizedStaticIp.nameservers) + r\"\\g<2>\",\n commandSection.string)\n else:\n # remove option --nameserver\n commandSection.string = re.sub(r\"(?m)^([ \\t]*network[ \\t]+.*)--nameserver[ \\t]*(?:=|[ \\t])[ \\t]*[^\\s]+(.*)$\",\n r\"\\g<1>\" + r\"\\g<2>\",\n commandSection.string)\n return self", "def replace_helmrepo_url_with_floating_address(dbapi, helmrepository_url):\n\n parsed_helm_repo_url = urlparse(helmrepository_url)\n sc_network = \\\n dbapi.network_get_by_type(constants.NETWORK_TYPE_CLUSTER_HOST)\n sc_network_addr_pool = \\\n dbapi.address_pool_get(sc_network.pool_uuid)\n sc_float_ip = sc_network_addr_pool.floating_address\n if is_valid_ipv6(sc_float_ip):\n sc_float_ip = '[' + sc_float_ip + ']'\n\n return \"http://{}:{}{}\".format(\n sc_float_ip,\n get_http_port(dbapi),\n parsed_helm_repo_url.path\n )", "def configure(node):\n script = []\n script.append(Statements.exec(\"hostname %s\" % node.getName()))\n script.append(Statements.createOrOverwriteFile(\n \"/etc/hostname\", [node.getName()]))\n script.append(Statements.exec(\n \"sed -i 's/127.0.0.1/127.0.0.1\\t%s/' /etc/hosts\" % node.getName()))\n return script", "def elReplaceHostname(self, hostname):\n # see http://docs.redhat.com/docs/en-US/Red_Hat_Enterprise_Linux/6/html/Installation_Guide/s1-kickstart2-options.html\n hostname = re.escape(hostname) # precaution\n commandSection = self.sectionByName(\"command\")\n # change to hostname\n commandSection.string = re.sub(r\"(?m)^([ \\t]*network[ \\t]+.*--hostname[ \\t]*(?:=|[ \\t])[ \\t]*)[^\\s]+(.*)$\",\n r\"\\g<1>\" + hostname + r\"\\g<2>\",\n commandSection.string)\n return self", "def getHost():", "def getHost():", "def test_add_autoassigned_ipv6(self):\n with DockerHost('host', dind=False) as host:\n # Test that auto-assiging IPv4 addresses gives what we expect\n workloads = self._setup_env(host, count=2, ip=\"ipv6\")\n\n workloads[0].assert_can_ping(\"fd80:24e2:f998:72d6::1\", retries=3)\n workloads[1].assert_can_ping(\"fd80:24e2:f998:72d6::\", retries=3)\n\n host.calicoctl(\"container remove {0}\".format(\"workload0\"))\n host.calicoctl(\"container remove {0}\".format(\"workload1\"))\n\n host.remove_workloads()\n\n # Test that recreating returns the next two IPs (IPs are not\n # reassigned automatically unless we have run out of IPs).\n workloads = self._setup_env(host, count=2, ip=\"ipv6\")\n\n workloads[0].assert_can_ping(\"fd80:24e2:f998:72d6::3\", retries=3)\n workloads[1].assert_can_ping(\"fd80:24e2:f998:72d6::2\", retries=3)", "def _set_static_ip(name, session, vm_):\n ipv4_cidr = \"\"\n ipv4_gw = \"\"\n if \"ipv4_gw\" in vm_.keys():\n log.debug(\"ipv4_gw is found in keys\")\n ipv4_gw = vm_[\"ipv4_gw\"]\n if \"ipv4_cidr\" in vm_.keys():\n log.debug(\"ipv4_cidr is found in keys\")\n ipv4_cidr = vm_[\"ipv4_cidr\"]\n log.debug(\"attempting to set IP in instance\")\n set_vm_ip(name, ipv4_cidr, ipv4_gw, session, None)", "def test_ipam_ip_addresses_update(self):\n pass", "def test_get_internal_host(matrix):\n matrix.charm_config[\"prefer-internal-ip\"] = True\n matrix.charm_config[\"prefer-internal-host\"] = True\n assert matrix.get_internal_host() == \"10.10.10.10\"\n matrix.charm_config[\"prefer-internal-ip\"] = False\n assert matrix.get_internal_host() == \"mock.fqdn\"", "def address_string(self):\n\n if self.server.log_ip_activated:\n host = self.client_address[0]\n else:\n host = '127.0.0.1'\n if self.server.resolve_clients:\n return socket.getfqdn(host)\n else:\n return host", "def get_host_ip_addr():\n return nova_conf.my_ip", "def _change_server_address():\n\n # Changes the server address\n path = (f\"{RPKI_Validator_Wrapper.rpki_package_path}conf\"\n \"/application.properties\")\n prepend = \"server.address=\"\n replace = \"localhost\"\n replace_with = \"0.0.0.0\"\n utils.replace_line(path, prepend, replace, replace_with)\n return path", "def update_host(self, conf, tenant_id, network_id, host_id, body):\n\t\tpass", "def set_host(host_index):\n env.hosts = [public_dns_names[int(host_index)]]\n env.password = [public_pwds[int(host_index)]]", "def set_service_host(self, host):\n self._api_host = f\"https://{host}\"", "def fill_host(self, data):\n check_input_params(data, self.HOST)\n self.host = data[self.HOST]", "def fill_host(self, data):\n check_input_params(data, self.HOST)\n self.host = data[self.HOST]", "def change_host(self, str_cef, address):\r\n tmp = str_cef\r\n if str_cef.find(\"CEF:\",0,len(str_cef)) != -1:\r\n tmp = str_cef[:21] + str(address[0]) + str_cef[25:]\r\n\r\n elif str_cef.find(\"|\",0,len(str_cef)) != -1:\r\n st = str_cef.find(\"|\",0,len(str_cef))\r\n st = str_cef.find(\"|\",st+1,len(str_cef))\r\n tmp = str_cef[:st+1]+str(address[0])+\"|\"+str_cef[st+1:]\r\n return tmp", "def set_host_ipaddress(self, sHostIPAddress):\n\t\tcall_sdk_function('PrlVirtNet_SetHostIPAddress', self.handle, sHostIPAddress)", "def gen_ip(self):\n\n try:\n self.ip = self.auth_url.split(\":\")[1].strip(\"//\")\n except Exception:\n self.ip = socket.gethostbyname(socket.gethostname())\n print \"\\t! Error obtaining ip address from cred file. Using %s\" % (self.ip)", "def mainfunc(portdata, netprotc):\n print(\" * Starting Fedora Easyfix...\")\n print(\" * Port number : \" + str(portdata))\n netpdata = \"\"\n if netprotc == \"ipprotv6\":\n print(\" * IP version : 6\")\n netpdata = \"::\"\n elif netprotc == \"ipprotv4\":\n print(\" * IP version : 4\")\n netpdata = \"0.0.0.0\"\n main.config[\"TEMPLATES_AUTO_RELOAD\"] = True\n main.run(port=portdata, host=netpdata)", "def get_primary_ip(options, index):\n\n second_octet = 160 + index\n return \"192.%s.1.1\" % second_octet", "def overlay_ip(ip):\n return \"192.168.{}.{}\".format( *ip.split(\".\")[2:])", "def configure_remote_hostname_override(target='prod'):\n config = setup_env('etc/ep.remote.cfg')\n env.update(config._sections['energyportal_%s' % target])\n\n upload_template('remote.template.py',\n '/home/ubuntu/ep_site/settings/components/env/%s.py' % target,\n use_sudo=True, template_dir='fabfile/templates', use_jinja=True, context=env)", "def process_proxy_host():\n\n with settings(warn_only=True):\n run(\"sudo iptables-restore < /etc/iptables.rules\")\n run(\"sudo squid3 -f /etc/squid3/squid.conf\")", "def host2ip(self, irc, msg, args, hostname):\n \n try:\n ip = socket.gethostbyname(hostname)\n if ip:\n record = self._record_by_addr(ip)\n if record:\n reply = u'%s (%s)' % (ip, self._geoip_city_check(record))\n else:\n reply = u'geoIP Fehler!'\n \n except:\n reply = u'gethostbyname() Error'\n \n irc.reply(reply.encode('utf-8'))", "def set_host_addr(self, addr: str) -> None:\n self.config[\"host_addr\"] = addr", "def set_hostname(hostname=None, deploy=False):\n\n if not hostname:\n raise CommandExecutionError(\"Hostname option must not be none.\")\n\n ret = {}\n\n query = {\n \"type\": \"config\",\n \"action\": \"set\",\n \"xpath\": (\n \"/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system\"\n ),\n \"element\": \"<hostname>{}</hostname>\".format(hostname),\n }\n\n ret.update(__proxy__[\"panos.call\"](query))\n\n if deploy is True:\n ret.update(commit())\n\n return ret", "def _update(self, host):\n pass", "def get_global_ip() -> str:\n return urllib.request.urlopen(\"https://icanhazip.com\").read().decode().strip()", "def set_static_ip_address(self, context, msg):\n args = jsonutils.loads(msg)\n macaddr = args.get('mac')\n ipaddr = args.get('ip')\n LOG.debug('set_static_ip_address received: %(mac)s %(ip)s', (\n {'mac': macaddr, 'ip': ipaddr}))\n\n # Add the request into queue for processing.\n event_type = 'cli.static_ip.set'\n payload = {'mac': macaddr, 'ip': ipaddr}\n timestamp = time.ctime()\n data = (event_type, payload)\n pri = self.obj.PRI_LOW_START\n self.obj.pqueue.put((pri, timestamp, data))\n LOG.debug('Added request to add static ip into queue.')\n\n return 0", "def host_ip_address(self, host_index, vlan_index):\n if isinstance(vlan_index, tuple):\n vlan_index = vlan_index[0]\n return '10.%u.0.%u/%u' % (vlan_index+1, host_index+1, self.NETPREFIX)", "def host_ip(self, host_ip):\n\n self._host_ip = host_ip", "def set_hostname(dut, host_name):\n cmd = \"sudo hostname {}\".format(host_name)\n st.config(dut, cmd)\n return", "def on_the_network_global_configuration_page_change_the_first_nameserver_to_nameserver1(driver, nameserver1):\n global nameserver_1\n nameserver_1 = nameserver1\n assert wait_on_element(driver, 7, '//h4[contains(.,\"Hostname and Domain\")]')\n assert wait_on_element(driver, 5, '//input[@ix-auto=\"input__Nameserver 1\"]', 'inputable')\n driver.find_element_by_xpath('//input[@ix-auto=\"input__Nameserver 1\"]').clear()\n driver.find_element_by_xpath('//input[@ix-auto=\"input__Nameserver 1\"]').send_keys(nameserver1)", "def resolve_hostname(request, hostname):\n try:\n ipaddress = usm_wrapper_utils.resolve_hostname(hostname)\n except Exception, e:\n log.exception(e)\n return Response(\n {'message': 'Error while resolving hostname'}, status=417)\n\n return Response({'IP_Address': ipaddress}, status=200)", "def get_hostname(self):\n # We set a default in install.py in case it isn't preseeded but when we\n # preseed, we are looking for None anyhow.\n return ''", "def set_deafult_gw(self, args):\n\n gw_ip = ip_address(args.ip)\n gw_info = UplinkGatewayInfo()\n gw_info.update_ip(str(gw_ip))\n print(\"set Default gw IP to %s\" % gw_info.get_gw_ip())", "def add_host_to_checkmk(hostname, hostlabels):\n\n logging.debug('going to add %s with hostlabels %s' % (hostname, hostlabels))\n\n checkmk_api_url = config['checkmk_api_url']\n checkmk_api_username = config['checkmk_api_username']\n checkmk_api_secret = config['checkmk_api_secret']\n checkmk_default_folder = config['checkmk_default_folder']\n checkmk_default_location = config['checkmk_default_location']\n checkmk_puppetdb_label = config['checkmk_puppetdb_label']\n\n hostlabels['from_puppetdb'] = checkmk_puppetdb_label\n\n # Determine if host is dual stacked v4/v6 and include ip-v4v6\n # address_family if so, else leave address_family off to use default\n try:\n d = dns.resolver.resolve(hostname, 'AAAA')\n logging.debug('-- host appears dual stacked, adding ip-v4v6')\n payload = {'request': json.dumps({\n 'hostname': hostname,\n 'folder': checkmk_default_folder,\n 'attributes': {\n 'tag_location': checkmk_default_location,\n 'tag_address_family': 'ip-v4v6',\n 'labels': hostlabels\n }\n })}\n except Exception as e:\n logging.debug('-- host not dual stacked')\n payload = {'request': json.dumps({\n 'hostname': hostname,\n 'folder': checkmk_default_folder,\n 'attributes': {\n 'tag_location': checkmk_default_location,\n 'labels': hostlabels\n }\n })}\n\n logging.debug('-- adding host %s', hostname)\n r = requests.post(\"%s?action=add_host&_username=%s&_secret=%s\" % (checkmk_api_url, checkmk_api_username, checkmk_api_secret), data=payload)\n logging.debug('-- got resp code = %d' % r.status_code)\n logging.debug('-- got resp text = %s' % r.text)\n r_json = json.loads(r.text)\n\n # Successful add_host gives response of {\"result\": null, \"result_code\": 0}\n if r_json['result_code'] == 0 and r_json['result'] is None:\n logging.info('added host %s successfully', hostname)\n else:\n logging.warn('failed to add host %s', r_json['result'])", "def _generate_new_address(self) -> str:\n while True:\n address = \"0x\" + \"\".join([str(hex(randint(0, 16)))[-1] for _ in range(20)])\n if address not in self.accounts.keys():\n return address", "def set_dns(self, pardus_profile):\n\n if pardus_profile.get_name_mode() == \"default\":\n default_nameservers = \";\".join( get_default_nameservers())\n default_nameservers = default_nameservers + \";\" # Make sure addresses end with ';'\n self.ignore_auto_dns = \"true\"\n return str(default_nameservers)\n elif pardus_profile.get_name_mode() == \"custom\":\n name_server = str(pardus_profile.get_name_server())\n name_server = name_server + \";\"\n self.ignore_auto_dns = \"true\"\n return str(name_server)\n else:\n # Nothing done in auto option\n return \"none\"", "def _create_server(self):\n server = super()._create_server(networks='none')\n source_host = server['OS-EXT-SRV-ATTR:host']\n target_host = 'host2' if source_host == 'host1' else 'host1'\n return server, source_host, target_host", "def __init__(self, identity, fqdn, primary_ip, is_online, memory, storage,\n bandwidth, ip_addresses):\n super(Host, self).__init__(identity.name, identity.key, identity.hash,\n identity.vendor)\n self.fqdn = fqdn\n self.primary_ip = primary_ip\n self.is_online = is_online\n self.memory = memory\n self.storage = storage\n self.bandwidth = bandwidth\n self.ip_addresses = ip_addresses", "def add_fixed_ip_to_instance(self, context, instance_id, host, network_id):\n args = {'instance_id': instance_id,\n 'host': host,\n 'network_id': network_id}\n rpc.cast(context, FLAGS.network_topic,\n {'method': 'add_fixed_ip_to_instance',\n 'args': args})", "def test_correct_sheme_host_sent_with_request(self):\n req = self.httpbin.get_my_ip(dry_run=True)\n self.assertIn(self.httpbin.client['host'], urlparse(req.prepared_request.url).netloc)\n self.assertIn(self.httpbin.client['scheme'], urlparse(req.prepared_request.url).scheme)\n self.assertIn(self.httpbin.client['get_my_ip']['path'], urlparse(req.prepared_request.url).path)", "def set_permitted_ip(address=None, deploy=False):\n\n if not address:\n raise CommandExecutionError(\"Address option must not be empty.\")\n\n ret = {}\n\n query = {\n \"type\": \"config\",\n \"action\": \"set\",\n \"xpath\": \"/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/permitted-ip\",\n \"element\": \"<entry name='{}'></entry>\".format(address),\n }\n\n ret.update(__proxy__[\"panos.call\"](query))\n\n if deploy is True:\n ret.update(commit())\n\n return ret", "def generateIPAddress(base, subnet, host, mask):\n\n addr = str(base)+'.'+str(subnet)+'.' + str(host)\n if mask != None:\n addr = addr + '/' + str(mask)\n return addr", "def setServerip(self):\n\t\tself.serverip = self.settings.getKeyValue('serverip')\n\t\tself.socket.send('setenv serverip ' + self.serverip+'\\r', 1)\n\t\treturn None", "async def test_update_address(hass):\n config_entry = await setup_axis_integration(hass)\n device = hass.data[AXIS_DOMAIN][config_entry.unique_id]\n assert device.api.config.host == \"1.2.3.4\"\n\n with patch(\n \"homeassistant.components.axis.async_setup_entry\",\n return_value=True,\n ) as mock_setup_entry, respx.mock:\n mock_default_vapix_requests(respx, \"2.3.4.5\")\n await hass.config_entries.flow.async_init(\n AXIS_DOMAIN,\n data={\n \"host\": \"2.3.4.5\",\n \"port\": 80,\n \"name\": \"name\",\n \"properties\": {\"macaddress\": MAC},\n },\n context={\"source\": SOURCE_ZEROCONF},\n )\n await hass.async_block_till_done()\n\n assert device.api.config.host == \"2.3.4.5\"\n assert len(mock_setup_entry.mock_calls) == 1", "def ipv4_interface_setup(self):\n\n for i in self._nodes.items():\n node = i[1]\n\n # Show the current interfaces with IP addresses\n current_ints = VPPUtil.get_int_ip(node)\n if current_ints != {}:\n print(\"\\nThese are the current interfaces with IP addresses:\")\n for items in sorted(current_ints.items()):\n name = items[0]\n value = items[1]\n if \"address\" not in value:\n address = \"Not Set\"\n else:\n address = value[\"address\"]\n print(\"{:30} {:20} {:10}\".format(name, address, value[\"state\"]))\n question = \"\\nWould you like to keep this configuration \" \"[Y/n]? \"\n answer = self._ask_user_yn(question, \"y\")\n if answer == \"y\":\n continue\n else:\n print(\"\\nThere are currently no interfaces with IP \" \"addresses.\")\n\n # Create a script that add the ip addresses to the interfaces\n # and brings the interfaces up\n ints_with_addrs = self._ipv4_interface_setup_questions(node)\n content = \"\"\n for ints in ints_with_addrs:\n name = ints[\"name\"]\n addr = ints[\"addr\"]\n setipstr = \"set int ip address {} {}\\n\".format(name, addr)\n setintupstr = \"set int state {} up\\n\".format(name)\n content += setipstr + setintupstr\n\n # Write the content to the script\n rootdir = node[\"rootdir\"]\n filename = rootdir + \"/vpp/vpp-config/scripts/set_int_ipv4_and_up\"\n with open(filename, \"w+\") as sfile:\n sfile.write(content)\n\n # Execute the script\n cmd = \"vppctl exec {}\".format(filename)\n (ret, stdout, stderr) = VPPUtil.exec_command(cmd)\n if ret != 0:\n logging.debug(stderr)\n\n print(\"\\nA script as been created at {}\".format(filename))\n print(\"This script can be run using the following:\")\n print(\"vppctl exec {}\\n\".format(filename))", "def set_host_aliases():\n with open('/tmp/hosts', 'w') as f:\n uname = os.uname()\n f.write(f'{uname.nodename} localhost\\n')\n os.environ['HOSTALIASES'] = '/tmp/hosts'", "def setup_gateway(self, args):\n if args.preponly:\n return\n\n # edit the gateway properties file and restart the gateway\n # mdm.ip.addresses = <addresses of node0,node1>\n # security.bypass_certificate_check = true\n _config = '/opt/emc/scaleio/gateway/webapps/ROOT/WEB-INF/classes/gatewayUser.properties'\n _commands = []\n #_commands.append(\"sed -i 's|^mdm.ip.addresses.*|mdm.ip.addresses={},{}|' {}\".format(args.IP[0], args.IP[1], _config))\n #_commands.append(\"sed -i 's|^security.bypass_certificate_check.*|security.bypass_certificate_check=true|' {}\".format( _config))\n _commands.append(\"systemctl restart scaleio-gateway\")\n self.node_execute_multiple(args.IP[2], args.USERNAME, args.PASSWORD, _commands)\n return", "def public_address() -> str:\n check_timeout = float(CONFIG['network']['check_timeout'])\n check_host_list = CONFIG.get_list('network', 'check_host_list')\n try:\n for check_url in check_host_list:\n with urllib.request.urlopen(\n url=check_url, timeout=check_timeout,\n ) as response:\n return response.read().decode().strip()\n return None\n except Exception as error:\n return None", "def show_hostname(self):\n if self.hostname is None:\n self.get_version()\n print self.hostname", "def configureDHCP():\n dhcpStart = config.get(\"hotspot\", \"dhcpstart\")\n dhcpEnd = config.get(\"hotspot\", \"dhcpend\")\n dnsmasqConfig = f\"\"\"#PI Hotspot config\ndomain-needed\nbogus-priv\ndhcp-option=option:dns-server\ndhcp-authoritative\ndhcp-range={dhcpStart},{dhcpEnd},1h\n\"\"\"\n confFile = open(\"/etc/dnsmasq.conf\", \"w\")\n confFile.write(dnsmasqConfig)\n confFile.close()", "def cleanup(self):\n all_aps_info = self.zd.get_all_ap_info()\n all_aps_ins = self.testbed.components['AP']\n for ap_ins in all_aps_ins:\n for ap_info in all_aps_info:\n if ap_ins.base_mac_addr.upper() == ap_info.get('mac').upper() and ap_info.get('ip_addr') != '':\n ap_ins.ip_addr = ap_info.get('ip_addr')", "def set_hostname(self, userid, hostname, os_version):\n tmp_path = self._pathutils.get_guest_temp_path(userid)\n if not os.path.exists(tmp_path):\n os.makedirs(tmp_path)\n tmp_file = tmp_path + '/hostname.sh'\n\n lnxdist = self._dist_manager.get_linux_dist(os_version)()\n lines = lnxdist.generate_set_hostname_script(hostname)\n with open(tmp_file, 'w') as f:\n f.writelines(lines)\n\n requestData = \"ChangeVM \" + userid + \" punchfile \" + \\\n tmp_file + \" --class x\"\n LOG.debug(\"Punch script to guest %s to set hostname\" % userid)\n\n try:\n self._smtclient._request(requestData)\n except exception.SDKSMTRequestFailed as err:\n msg = (\"Failed to punch set_hostname script to userid '%s'. SMT \"\n \"error: %s\" % (userid, err.format_message()))\n LOG.error(msg)\n raise exception.SDKSMTRequestFailed(err.results, msg)\n finally:\n self._pathutils.clean_temp_folder(tmp_path)", "def freeze_host(self, **kwargs):\n put_body = json.dumps(kwargs)\n resp, body = self.put('os-services/freeze', put_body)\n self.validate_response(schema.freeze_host, resp, body)\n return rest_client.ResponseBody(resp)", "def dvs_remote_ip_prefix(self):\n self.show_step(1)\n self.env.revert_snapshot(\"dvs_vcenter_systest_setup\")\n cluster_id = self.fuel_web.get_last_created_cluster()\n\n os_ip = self.fuel_web.get_public_vip(cluster_id)\n os_conn = os_actions.OpenStackActions(\n os_ip, SERVTEST_USERNAME,\n SERVTEST_PASSWORD,\n SERVTEST_TENANT)\n\n tenant = os_conn.get_tenant(SERVTEST_TENANT)\n\n # Create security group with rules for ssh and ping\n security_group = os_conn.create_sec_group_for_ssh()\n\n self.show_step(2)\n net_1 = os_conn.create_network(network_name=self.net_data[0].keys()[0],\n tenant_id=tenant.id)['network']\n\n subnet = os_conn.create_subnet(\n subnet_name=net_1['name'],\n network_id=net_1['id'],\n cidr=self.net_data[0][self.net_data[0].keys()[0]],\n ip_version=4)\n\n # Check that network are created\n assert_true(os_conn.get_network(net_1['name'])['id'] == net_1['id'])\n\n # Create Router_01, set gateway and add interface to external network.\n router_1 = os_conn.create_router('router_1', tenant=tenant)\n\n # Add net_1 to router_1\n os_conn.add_router_interface(router_id=router_1[\"id\"],\n subnet_id=subnet[\"id\"])\n\n self.show_step(3)\n self.show_step(4)\n self.show_step(5)\n sg1 = os_conn.nova.security_groups.create('SG1', \"descr\")\n sg2 = os_conn.nova.security_groups.create('SG2', \"descr\")\n\n access_point_1, access_point_ip_1 = openstack.create_access_point(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n security_groups=[security_group.name, sg1.name])\n\n access_point_2, access_point_ip_2 = openstack.create_access_point(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n security_groups=[security_group.name, sg2.name])\n\n self.show_step(6)\n _sg_rules = os_conn.neutron.list_security_group_rules()\n sg_rules = [sg_rule for sg_rule in _sg_rules['security_group_rules']\n if sg_rule['security_group_id'] in [sg1.id, sg2.id]]\n for rule in sg_rules:\n os_conn.neutron.delete_security_group_rule(rule['id'])\n\n self.show_step(7)\n for rule in [self.icmp, self.tcp]:\n rule[\"security_group_rule\"][\"security_group_id\"] = sg1.id\n rule[\"security_group_rule\"][\"remote_ip_prefix\"] = access_point_ip_1\n rule[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(rule)\n rule[\"security_group_rule\"][\"direction\"] = \"egress\"\n os_conn.neutron.create_security_group_rule(rule)\n\n # Get private ip of access_point_2\n private_ip = os_conn.get_nova_instance_ip(access_point_2,\n net_name=net_1['name'])\n\n self.show_step(8)\n self.tcp[\"security_group_rule\"][\"security_group_id\"] = sg2.id\n self.tcp[\"security_group_rule\"][\"remote_ip_prefix\"] = private_ip\n os_conn.neutron.create_security_group_rule(self.tcp)\n self.tcp[\"security_group_rule\"][\"direction\"] = \"ingress\"\n os_conn.neutron.create_security_group_rule(self.tcp)\n\n self.show_step(9)\n istances_sg1 = openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=1,\n security_groups=[sg1.name])\n\n istances_sg2 = openstack.create_instances(\n os_conn=os_conn,\n nics=[{'net-id': net_1['id']}],\n vm_count=1,\n security_groups=[sg2.name])\n openstack.verify_instance_state(os_conn)\n\n # Get private ips of instances\n ips = {\n 'SG1': [os_conn.assign_floating_ip(i).ip for i in istances_sg1],\n 'SG2': [os_conn.get_nova_instance_ip(i, net_name=net_1['name'])\n for i in istances_sg2]\n }\n\n self.show_step(10)\n ip_pair = dict.fromkeys(ips['SG1'])\n for key in ip_pair:\n ip_pair[key] = [access_point_ip_1]\n openstack.check_connection_through_host(access_point_ip_1,\n ip_pair,\n timeout=60 * 4)\n\n self.show_step(11)\n ip_pair = dict.fromkeys(ips['SG1'])\n for key in ip_pair:\n ip_pair[key] = [value for value in ips['SG1'] if key != value]\n openstack.check_connection_through_host(\n access_point_ip_1, ip_pair, result_of_command=1)\n\n self.show_step(12)\n self.show_step(13)\n ip_pair = dict.fromkeys(ips['SG2'])\n for key in ip_pair:\n ip_pair[key] = [value for value in ips['SG2'] if key != value]\n openstack.check_connection_through_host(\n access_point_ip_2, ip_pair, result_of_command=1)", "def reflect(host, port):\n global URL\n URL = URL.replace('<ip>', host).replace('<port>', str(port))", "def _set_nameserver(self, instance):\n ctxt = context.get_admin_context()\n ip = db.instance_get_fixed_address(ctxt, instance['id'])\n network = db.fixed_ip_get_network(ctxt, ip)\n\n try:\n _, err = utils.execute('sudo', 'vzctl', 'set', instance['id'],\n '--save', '--nameserver', network['dns'])\n if err:\n LOG.error(err)\n except Exception as err:\n LOG.error(err)\n raise exception.Error('Unable to set nameserver for %s' %\n instance['id'])", "def topo_conf():\n for k in switches.keys():\n switches_ip[k] = IPAddr((192<<24)+int(k))\n switches_mac[k] = EthAddr(\"aa\"+ \"%010d\"%(k))", "def update(call: ServiceCall) -> None:\n called_host = call.data[ATTR_HOST]\n if called_host in hass.data[DOMAIN]:\n hass.data[DOMAIN][called_host].update()\n else:\n for iperf3_host in hass.data[DOMAIN].values():\n iperf3_host.update()", "def set_deafult_gw_mac(self, args):\n\n gw_mac = ip_address(args.ip)\n gw_info = UplinkGatewayInfo()\n gw_info.update_mac(str(gw_mac))\n\n print(\"set Default gw mac to %s\" % gw_info.get_gw_mac())", "def setautoupdate(self, auto_update=1):\n # (net_bn* net, int auto_update)\n cnetica.SetNetAutoUpdate_bn.argtypes = [c_void_p, c_int]\n cnetica.SetNetAutoUpdate_bn.restype = None\n cnetica.SetNetAutoUpdate_bn(self.net, auto_update)", "def eff_request_host(request):\n erhn = req_host = request_host(request)\n if req_host.find(\".\") == -1 and not cookiejar.IPV4_RE.search(req_host):\n erhn = req_host + \".local\"\n return req_host, erhn", "def get_internal_host(self):\n prefer_internal_ip = self.charm_config.get(\"prefer-internal-ip\")\n fqdn = socket.getfqdn()\n ip = socket.gethostbyname(fqdn)\n if prefer_internal_ip:\n return ip\n return fqdn", "def get_internal_url(self):\n prefer_internal_ip = self.charm_config.get(\"prefer-internal-ip\")\n fqdn = socket.getfqdn()\n ip = socket.gethostbyname(fqdn)\n if prefer_internal_ip:\n return \"http://{}:8008\".format(ip)\n return \"http://{}:8008\".format(fqdn)", "def change_address(\n vm_hostname, new_address,\n offline=False, migrate=False, allow_reserved_hv=False,\n offline_transport='drbd',\n):\n\n if not offline:\n raise IGVMError('IP address change can be only performed offline')\n\n with _get_vm(vm_hostname) as vm:\n if vm.dataset_obj['datacenter_type'] != 'kvm.dct':\n raise NotImplementedError(\n 'This operation is not yet supported for {}'.format(\n vm.dataset_obj['datacenter_type'])\n )\n\n new_address = ip_address(new_address)\n\n if vm.dataset_obj['intern_ip'] == new_address:\n raise ConfigError('New IP address is the same as the old one!')\n\n if not vm.hypervisor.get_vlan_network(new_address) and not migrate:\n err = 'Current hypervisor does not support new subnet!'\n raise ConfigError(err)\n\n new_network = Query(\n {\n 'servertype': 'route_network',\n 'state': 'online',\n 'network_type': 'internal',\n 'intern_ip': Contains(new_address),\n }\n ).get()['hostname']\n\n vm_was_running = vm.is_running()\n\n with Transaction() as transaction:\n if vm_was_running:\n vm.shutdown(\n transaction=transaction,\n check_vm_up_on_transaction=False,\n )\n vm.change_address(\n new_address, new_network, transaction=transaction,\n )\n\n if migrate:\n vm_migrate(\n vm_object=vm,\n run_puppet=True,\n offline=True,\n no_shutdown=True,\n allow_reserved_hv=allow_reserved_hv,\n offline_transport=offline_transport,\n )\n else:\n vm.hypervisor.mount_vm_storage(vm, transaction=transaction)\n vm.run_puppet()\n vm.hypervisor.redefine_vm(vm)\n vm.hypervisor.umount_vm_storage(vm)\n\n if vm_was_running:\n vm.start()", "def modifyResolve_6_0(msw):\n hostip = socket.gethostbyname('mygen')\n name = 'nameserver ' + hostip+'\\n'\n name1 = 'search e164.com'+'\\n'\n newFileContents = [name,name1]\n name_msw='nameserver 127.0.0.1\\n'\n\n try:\n # Back up the original file on local host\n if (os.path.isfile('/etc/resolv.conf.bkup') == False):\n os.system('sudo cp /etc/resolv.conf /etc/resolv.conf.bkup')\n rconfile = open('/etc/resolv.conf',\"w\")\n rconfile.writelines(newFileContents)\n rconfile.close()\n\n rconfile = open('/tmp/resolv_msw.conf',\"w\")\n rconfile.writelines(name_msw)\n rconfile.close()\n\n ##31291 Taking the bkup of resolv.conf file from MSW to the\n ##/tmp directory of msw\n resultString = msw.filter('ls -lrt /tmp/resolv.conf.enum.bkup')\n if (resultString.find('No such file or directory') !=-1):\n msw.assertCommand('cp /etc/resolv.conf /tmp/resolv.conf.enum.bkup')\n\n # Copy the new File to the MSW\n os.system(\"scp -q /tmp/resolv_msw.conf root@mymsw:/etc/resolv.conf\")\n log.debug(\"Copying /tmp/resolv_msw.conf on msw\")\n\n if (msw.context['nextest.scm_configuration'] == 'ON'):\n\n os.system(\"scp -q /tmp/resolv_msw.conf root@bkupmsw:/etc/resolv.conf\")\n log.debug(\"Copying /tmp/resolv_msw.conf on backup msw\")\n\n\n except Exception, e:\n msg = \"file error: %s\" % str(e)\n #32363 Modified to resolve string formatting error\n log.error('File resolv.conf does not exist %s' %str(msg))", "def host_config(keep_defaults=False, splittx=False, splitrx=False, rss=False):\n print(f'''\\n{'-'*60}\\n\\t\\t\\t\\tHost Optimization\\n{'-'*60}\\n''')\n hosts = settings.getValue('HOST_DETAILS')\n # Create object of HostConfig() to access functions\n config_host = Host.HostConfig()\n for host in hosts:\n print(f' - Getting the Host current Status')\n get_host_config(header=f\"Current Host Status ({host['HOST']})\")\n client = HostSession.HostSession().connect(host['HOST'], host['USER'], host['PASSWORD'], False)\n _NICS = host['NICS'].split(',')\n logger.info(f'Getting ESXi version details')\n ver = config_host.get_host_version(client)\n logger.info(f'{ver}')\n logger.info('Start applying optimizations for the specific Esxi version')\n print(f' - Getting configuration settings from host.json')\n print(f' - Applying the host Optimization.')\n if ver.find('6.5') > -1:\n logger.info('Getting configuration settings from host.json')\n params = settings.getValue('ESXI65')\n logger.info(f'Configuring NICS {_NICS} for driver module: {params[\"NIC_DRIVER\"]}')\n for nic in _NICS:\n logger.info(f'Configuring {nic}')\n success, message = config_host.config_nic_driver(client, nic, params['NIC_DRIVER']['NAME'])\n if eval(success):\n logger.info(f'{nic} driver configuration success: {eval(success)}')\n else:\n logger.error(f'{nic} driver configuration success: {eval(success)}')\n logger.info(f'host output: {message}')\n HostSession.HostSession().disconnect(client)\n return False\n if rss:\n logger.info(f'RSS to be enabled: {params[\"ENABLE_RSS\"]}')\n logger.info('Configuring RSS')\n success, message = config_host.config_rss(client, params[\"NIC_DRIVER\"]['NAME'], enable=params[\"ENABLE_RSS\"])\n if eval(success):\n logger.info(f'RSS configuration success : {eval(success)}')\n else:\n logger.info(f'RSS configuration success : {eval(success)}')\n logger.info(f'host output: {message}')\n HostSession.HostSession().disconnect(client)\n return False\n else:\n success, message = config_host.config_rss(client, params[\"NIC_DRIVER\"]['NAME'], enable=False)\n if eval(success):\n logger.info(f'RSS configuration success : {eval(success)}')\n else:\n logger.info(f'RSS configuration success : {eval(success)}')\n logger.info(f'host output: {message}')\n HostSession.HostSession().disconnect(client)\n return False\n\n for nic in _NICS:\n logger.info(f'Configuring Physical NIC Ring Size as {params[\"NIC_BUFFER_SIZE\"]} on {nic}')\n success, message = config_host.config_nic_ring_size(client, nic, params[\"NIC_BUFFER_SIZE\"][\"RX\"], params[\"NIC_BUFFER_SIZE\"][\"TX\"])\n if eval(success):\n logger.info(f'{nic} ring size configuration success: {eval(success)}')\n else:\n logger.error(f'{nic} ring size configuration success: {eval(success)}')\n logger.info(f'host output: {message}')\n HostSession.HostSession().disconnect(client)\n return False\n\n logger.info(f'Configuring SW TX queue length as {params[\"SW_TX_QUEUE\"]}')\n success, message = config_host.config_sw_tx_queue_size(client, params[\"SW_TX_QUEUE\"])\n if eval(success):\n logger.info(f'Software TX queue configuration success: {eval(success)}')\n else:\n logger.error(f'Software TX queue configuration success: {eval(success)}')\n logger.info(f'host output: {message}')\n HostSession.HostSession().disconnect(client)\n return False\n\n logger.info(f'Queue Pairing to be enabled: {params[\"ENABLE_QUEUE_PAIRING\"]}')\n success, message = config_host.config_queue_pairing(client, enable=params[\"ENABLE_QUEUE_PAIRING\"])\n if eval(success):\n logger.info(f'Queue Pairing configuration success: {eval(success)}')\n else:\n logger.info(f'Queue Pairing configuration success: {eval(success)}')\n logger.info(f'host output: {message}')\n HostSession.HostSession().disconnect(client)\n return False\n if splittx:\n logger.info(f'Split TX mode to be enabled: {params[\"ENABLE_TX_SPLIT\"]}')\n for nic in _NICS:\n logger.info(f'Configuring Split TX on {nic}')\n success, message = config_host.config_tx_split(client, nic, enable=params[\"ENABLE_TX_SPLIT\"])\n if eval(success):\n logger.info(\n f'Split TX configuration success: {eval(success)}')\n else:\n logger.error(\n f'Split TX configuration success: {eval(success)}')\n logger.info(f'host output: {message}')\n HostSession.HostSession().disconnect(client)\n return False\n else:\n logger.info('Getting configuration settings from host.json')\n params = settings.getValue('ESXI60U2')\n logger.info(ver)\n logger.info(f'Hyper threading enabled: {config_host.is_hyperthreading_enabled(client)}')\n logger.info(\n f'Configure Hyper threading: enable={params[\"ENABLE_HYPERTHREADING\"]} config success={config_host.config_hyperthreading(client, enable=params[\"ENABLE_HYPERTHREADING\"])}')\n\n logger.info(f'NICS {_NICS}')\n logger.info(f'Configuring {params[\"NIC_DRIVER\"]}')\n for nic in _NICS:\n logger.info(\"{} : config={}\".format(nic, config_host.config_nic_driver(client, nic, params['NIC_DRIVER']['NAME'])))\n if rss:\n logger.info(f'RSS enabled: {config_host.verify_rss(client, params[\"NIC_DRIVER\"])}')\n logger.info(f'Configuring RSS: enable = {params[\"ENABLE_RSS\"]} success=\\\n {config_host.config_rss(client, params[\"NIC_DRIVER\"][\"NAME\"], enable=params[\"ENABLE_RSS\"])}')\n else:\n success, message = config_host.config_rss(client, params[\"NIC_DRIVER\"]['NAME'], enable=False)\n if eval(success):\n logger.info(f'RSS configuration success : {eval(success)}')\n else:\n logger.info(f'RSS configuration success : {eval(success)}')\n logger.info(f'host output: {message}')\n HostSession.HostSession().disconnect(client)\n return False\n\n for nic in _NICS:\n logger.info(f'Configuring Physical NIC Ring Size as {params[\"NIC_BUFFER_SIZE\"]} on {nic} : \\\n isSet = {config_host.verify_nic_ring_size(client, nic, params[\"NIC_BUFFER_SIZE\"][\"RX\"], params[\"NIC_BUFFER_SIZE\"][\"TX\"])} : \\\n config success={config_host.config_nic_ring_size(client, nic, params[\"NIC_BUFFER_SIZE\"][\"RX\"], params[\"NIC_BUFFER_SIZE\"][\"TX\"])} ')\n\n logger.info(f'Configuring SW TX queue length as {params[\"SW_TX_QUEUE\"]}: isSet={config_host.verify_sw_tx_queue_size(client, params[\"SW_TX_QUEUE\"])} \\\n : config success = {config_host.config_sw_tx_queue_size(client, params[\"SW_TX_QUEUE\"])}')\n\n logger.info(f'Configuring Queue Pairing enabled ={params[\"ENABLE_QUEUE_PAIRING\"]}: isEnabled={config_host.is_queue_pairing_enabled(client)} \\\n : config success = {config_host.config_queue_pairing(client, enable=params[\"ENABLE_QUEUE_PAIRING\"])}')\n if splittx:\n for nic in _NICS:\n logger.info(f'Configuring Split TX mode ={params[\"ENABLE_TX_SPLIT\"]}: isEnabled={config_host.is_tx_split_enabled(client, nic)}\\\n : config success = {config_host.config_tx_split(client, nic, enable=params[\"ENABLE_TX_SPLIT\"])}')\n print(f' - Getting the Host Status after Optimizations')\n get_host_config(header=f\"Post Host Optimization ({host['HOST']})\")\n HostSession.HostSession().disconnect(client)\n return True", "def pre_instance_ip_create(self, resource_dict):\n pass", "def setIP(self, idx, ip):\n self.ip[int(idx)-1] = ip", "def checkHost(host):\n if \"192.168.\" in host:\n return False\n elif \"169.254.\" in host: #APIPA (Automatic Private Internet Protocol Addressing)\n return False\n elif re.match(\"^(127\\.)\",host):\n return False\n elif re.match(\"^(10\\.)\",host):\n return False\n elif re.match(\"^(172\\.1[6-9]\\.)|(172\\.2[0-9]\\.)|(172\\.3[0-1]\\.)\",host):\n return False\n else:\n return True", "def set(isamAppliance, primaryServer=None, secondaryServer=None, tertiaryServer=None, searchDomains=None, auto=True,\n autoFromInterface=None, check_mode=False, force=False):\n\n if isinstance(auto, basestring):\n if auto.lower() == 'true':\n auto = True\n else:\n auto = False\n\n # check autoFromInterface. If it is a label replace it with corresponding interface UUID, else leave it untouched (treat as UUID)\n if autoFromInterface is not None:\n ret_obj = ibmsecurity.isam.base.network.interfaces.get_all(isamAppliance)\n for intfc in ret_obj['data']['interfaces']:\n if intfc['label'] == autoFromInterface:\n autoFromInterface = intfc['uuid']\n\n check_value,warnings = _check(isamAppliance, primaryServer, secondaryServer, tertiaryServer, searchDomains, auto,\n autoFromInterface)\n if force is True or check_value is False:\n if check_mode is True:\n return isamAppliance.create_return_object(changed=True, warnings=warnings)\n else:\n return isamAppliance.invoke_put(\n \"Updating the DNS configuration\",\n \"/net/dns\",\n {\n 'auto': auto,\n 'autoFromInterface': autoFromInterface,\n 'primaryServer': primaryServer,\n 'secondaryServer': secondaryServer,\n 'tertiaryServer': tertiaryServer,\n 'searchDomains': searchDomains\n }, requires_model=requires_model)\n\n return isamAppliance.create_return_object(warnings=warnings)", "def purchase_ip(self, debug=False):\n json_scheme = self.gen_def_json_scheme('SetPurchaseIpAddress')\n json_obj = self.call_method_post(method='SetPurchaseIpAddress', json_scheme=json_scheme, debug=debug)\n try:\n ip = Ip()\n ip.ip_addr = json_obj['Value']['Value']\n ip.resid = json_obj['Value']['ResourceId']\n return ip\n except:\n raise Exception('Unknown error retrieving IP.')" ]
[ "0.60582787", "0.6001809", "0.57579035", "0.5625545", "0.5491891", "0.5420629", "0.5379404", "0.5277771", "0.52422714", "0.52219576", "0.52120817", "0.52100813", "0.52078915", "0.5189471", "0.5156395", "0.51266915", "0.5107505", "0.5092903", "0.5082044", "0.5071935", "0.50623417", "0.5057758", "0.5056907", "0.50416505", "0.5035362", "0.50150806", "0.5003009", "0.5002411", "0.5002411", "0.4999194", "0.49990964", "0.4983992", "0.4978427", "0.49769217", "0.49602497", "0.49576798", "0.49557167", "0.49547702", "0.4948383", "0.4945151", "0.4945151", "0.4941011", "0.49364153", "0.49338692", "0.49298584", "0.49066988", "0.4903462", "0.48895213", "0.48854598", "0.4877532", "0.48593426", "0.48579863", "0.48567677", "0.48530003", "0.48411828", "0.48329514", "0.4828146", "0.4827641", "0.48258296", "0.48118073", "0.480165", "0.48005134", "0.47931355", "0.47902942", "0.47898525", "0.4782605", "0.4776085", "0.47608435", "0.4755216", "0.47327572", "0.47256902", "0.47243977", "0.4719246", "0.47057348", "0.47014672", "0.46987528", "0.46970594", "0.46904424", "0.46874398", "0.46871364", "0.46870685", "0.4686027", "0.46838972", "0.46791387", "0.4678603", "0.46756345", "0.467545", "0.4672663", "0.46711928", "0.46693018", "0.46688116", "0.46654218", "0.46578866", "0.46556374", "0.46499056", "0.46473074", "0.46416947", "0.46405628", "0.46384194", "0.4635389" ]
0.7036063
0
Validates the manifest file Ensures that the required fields in the manifest are present and valid
def validate_manifest(manifest_json): manifest_json = copy.deepcopy(manifest_json) for field in ["schemes", "host", "basePath", "info"]: if field not in manifest_json: raise exceptions.ValidationError( click.style("Field '{}' is missing from the manifest file.", fg="red").format(field), json=manifest_json) for field in ["contact", "title", "description", "x-21-total-price", "x-21-quick-buy", "x-21-category"]: if field not in manifest_json["info"]: raise exceptions.ValidationError( click.style( "Field '{}' is missing from the manifest file under the 'info' section.", fg="red").format(field), json=manifest_json) for field in {"name", "email"}: if field not in manifest_json["info"]["contact"]: raise exceptions.ValidationError( click.style( "Field '{}' is missing from the manifest file under the 'contact' section.", fg="red") .format(field), json=manifest_json) for field in ["min", "max"]: if field not in manifest_json["info"]["x-21-total-price"]: raise exceptions.ValidationError( click.style("Field '{}' is missing from the manifest file under the " "'x-21-total-price' section.", fg="red"), json=manifest_json) if len(manifest_json["schemes"]) == 0: raise exceptions.ValidationError( click.style( "You have to specify either HTTP or HTTPS for your endpoint under the " "`schemes` section.", fg="red"), json=manifest_json) valid_app_categories = {'blockchain', 'entertainment', 'social', 'markets', 'utilities', 'iot'} if manifest_json["info"]["x-21-category"].lower() not in valid_app_categories: valid_categories = ", ".join(valid_app_categories) raise exceptions.ValidationError( click.style("'{}' is not a valid category for the 21 marketplace. Valid categories are {}.", fg="red").format( manifest_json["info"]["x-21-category"], valid_categories), json=manifest_json)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_manifest(parser, options):\n if not options.manifest:\n return\n\n template = \"When specifying --manifest, {0} is also required\"\n\n if not options.manifest_id:\n parser.error(template.format(\"--manifest-id\"))\n \n if not options.manifest_service:\n parser.error(template.format(\"--manifest-service\"))\n\n if not options.manifest_version:\n parser.error(template.format(\"--manifest-version\"))", "def _validate(self):\n if not self._contents.has_key('type'):\n raise ValidationFailed(\"Metadata file %s contains no type field\" % (self._filename))\n \n if not self._contents.has_key('version'):\n raise ValidationFailed(\"Metadata file %s contains no version field\" %\n (self._filename))", "def load_manifest(filename):\n\n data = manifest.load(filename)\n for field in manifest.validate(data):\n name = field.cfg or ''\n if name and name[-1] != '.':\n name += '>'\n name += field.name\n for msg in field.warnings:\n print('WARNING: {}@{} {}'.format(filename, name, msg))\n for msg in field.errors:\n print('CRITICAL: {}@{} {}'.format(filename, name, msg))\n return data", "def check_manifest(manifest):\n if not manifest:\n raise Exception('manifest is null')\n\n for key in ['dublin_core', 'checking', 'projects']:\n if key not in manifest:\n raise Exception('manifest missing key \"{0}\"'.format(key))\n\n # check checking\n for key in ['checking_entity', 'checking_level']:\n if key not in manifest['checking']:\n raise Exception('manifest missing checking key \"{0}\"'.format(key))\n\n if not isinstance(manifest['checking']['checking_entity'], list):\n raise Exception('manifest key checking.checking_entity must be an array')\n\n # check projects\n if not isinstance(manifest['projects'], list):\n raise Exception('manifest key projects must be an array')\n\n for key in ['categories', 'identifier', 'path', 'sort', 'title', 'versification']:\n for project in manifest['projects']:\n if key not in project:\n raise Exception('manifest missing project key \"{0}\"'.format(key))\n\n # check dublin_core\n for key in ['conformsto', 'contributor', 'creator', 'description', 'format', 'identifier', 'issued', 'language',\n 'modified', 'publisher', 'relation', 'rights', 'source', 'subject', 'title', 'type', 'version']:\n if key not in manifest['dublin_core']:\n raise Exception('manifest missing dublin_core key \"{0}\"'.format(key))\n\n expectedRCVersion = 'rc0.2'\n if manifest['dublin_core']['conformsto'].lower() != expectedRCVersion:\n raise Exception('unsupported rc version {}. Expected {}'.format(manifest['dublin_core']['conformsto'], expectedRCVersion))\n\n for key in ['direction', 'identifier', 'title']:\n if key not in manifest['dublin_core']['language']:\n raise Exception('manifest missing dublin_core.language key \"{0}\"'.format(key))\n\n if not isinstance(manifest['dublin_core']['source'], list):\n raise Exception('manifest key dublin_core.source must be an array')\n\n for key in ['version', 'identifier', 'language']:\n for source in manifest['dublin_core']['source']:\n if key not in source:\n raise Exception('manifest missing dublin_core.source key \"{0}\"'.format(key))", "def validate_input_manifest(self, source, **kwargs):\n return self._validate_manifest(\"input_manifest\", source, **kwargs)", "def validate_manifest(\n request: ValidateManifestRequest = Body(...),\n schema: Any = Depends(get_description_schema),\n) -> ValidateManifestResponse:\n\n _, response = _validate_manifest(request, schema)\n return response", "def test_is_valid_manifest_format_with_no_errors(caplog):\n assert (\n is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_no_errors.tsv\"\n )\n == True\n )\n assert caplog.text == \"\"", "def _validate(self):\n All = voluptuous.All\n Required = voluptuous.Required\n Length = voluptuous.Length\n Extra = voluptuous.Extra\n\n schema = voluptuous.Schema({\n Required('description'): voluptuous.All(str, Length(min=5)),\n Required('environments'): dict,\n Required('application'): {\n Required('name'): str,\n Required('scenario'): [{\n Required('driver'): str,\n Required('description'): All(str, Length(min=5)),\n Extra: object}]}})\n try:\n schema(self.marmite_tree)\n except voluptuous.MultipleInvalid as e:\n LOG.error(\"Failed to validate %s/marmite.yaml structure: %s\" %\n (self.fs_layer.base_dir, e))\n raise InvalidStructure()", "def validate(self):\n print(\"Validating \")\n sha256_test = _get_file_sha256_hash(self.file_path)\n sha256_truth = self.metadata_pkg[\"hash\"]\n if sha256_test != sha256_truth:\n raise ValueError(\n f\"Hash of modelpkg file {os.path.basename(self.file_path)} ({sha256_test}) does not match truth hash ({sha256_truth}).\")", "def readManifestFile(syn, manifestFile):\n table.test_import_pandas()\n import pandas as pd\n\n sys.stdout.write('Validation and upload of: %s\\n' % manifestFile)\n # Read manifest file into pandas dataframe\n df = pd.read_csv(manifestFile, sep='\\t')\n if 'synapseStore' not in df:\n df = df.assign(synapseStore=None)\n df.synapseStore[df['path'].apply(is_url)] = False # override synapseStore values to False when path is a url\n df.synapseStore[df['synapseStore'].isnull()] = True # remaining unset values default to True\n df.synapseStore = df.synapseStore.astype(bool)\n df = df.fillna('')\n\n sys.stdout.write('Validating columns of manifest...')\n for field in REQUIRED_FIELDS:\n sys.stdout.write('.')\n if field not in df.columns:\n sys.stdout.write('\\n')\n raise ValueError(\"Manifest must contain a column of %s\" % field)\n sys.stdout.write('OK\\n')\n\n sys.stdout.write('Validating that all paths exist')\n df.path = df.path.apply(_check_path_and_normalize)\n\n sys.stdout.write('OK\\n')\n\n sys.stdout.write('Validating that all files are unique...')\n if len(df.path) != len(set(df.path)):\n raise ValueError(\"All rows in manifest must contain a unique file to upload\")\n sys.stdout.write('OK\\n')\n\n sys.stdout.write('Validating provenance...')\n df = _sortAndFixProvenance(syn, df)\n sys.stdout.write('OK\\n')\n\n sys.stdout.write('Validating that parents exist and are containers...')\n parents = set(df.parent)\n for synId in parents:\n try:\n container = syn.get(synId, downloadFile=False)\n except SynapseHTTPError:\n sys.stdout.write('\\n%s in the parent column is not a valid Synapse Id\\n' % synId)\n raise\n if not is_container(container):\n sys.stdout.write('\\n%s in the parent column is is not a Folder or Project\\n' % synId)\n raise SynapseHTTPError\n sys.stdout.write('OK\\n')\n return df", "def metadata_validate(self):\n # Set path to `service_schema` stored in the `resources` directory from cwd of `mpe_service.py`\n current_path = Path(__file__).parent\n relative_path = '../../snet/snet_cli/resources/service_schema'\n path_to_schema = (current_path / relative_path).resolve()\n with open(path_to_schema, 'r') as f:\n schema = json.load(f)\n metadata = load_mpe_service_metadata(self.args.metadata_file)\n try:\n validate(instance=metadata.m, schema=schema)\n except Exception as e:\n docs = \"http://snet-cli-docs.singularitynet.io/service.html\"\n error_message = f\"\\nVisit {docs} for more information.\"\n if e.validator == 'required':\n raise ValidationError(e.message + error_message)\n elif e.validator == 'minLength':\n raise ValidationError(f\"`{e.path[-1]}` -> cannot be empty.\" + error_message)\n elif e.validator == 'minItems':\n raise ValidationError(f\"`{e.path[-1]}` -> minimum 1 item required.\" + error_message)\n elif e.validator == 'type':\n raise ValidationError(f\"`{e.path[-1]}` -> {e.message}\" + error_message)\n elif e.validator == 'enum':\n raise ValidationError(f\"`{e.path[-1]}` -> {e.message}\" + error_message)\n elif e.validator == 'additionalProperties':\n if len(e.path) != 0:\n raise ValidationError(f\"{e.message} in `{e.path[-2]}`.\" + error_message)\n else:\n raise ValidationError(f\"{e.message} in main object.\" + error_message)\n else:\n exit(\"OK. Ready to publish.\")", "def test_is_valid_manifest_with_missing_md5_column(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_missing_md5_column.tsv\",\n )\n missing_md5_message = (\n 'could not find a column name corresponding to required \"Columns.MD5\"'\n )\n assert missing_md5_message in caplog.text\n assert result == False", "def test_is_valid_manifest_format_with_invalid_md5_values(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_invalid_md5_values.tsv\"\n )\n\n error_log = caplog.text\n manifest_with_invalid_md5_values_helper(error_log)\n base64_encoded_md5 = '\"jd2L5LF5pSmvpfL/rkuYWA==\"'\n assert base64_encoded_md5 in error_log\n assert result == False", "def validate_output_manifest(self, source, **kwargs):\n return self._validate_manifest(\"output_manifest\", source, **kwargs)", "def test_is_valid_manifest_with_missing_size_column(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_missing_size_column.tsv\",\n )\n missing_size_message = (\n 'could not find a column name corresponding to required \"Columns.SIZE\"'\n )\n assert missing_size_message in caplog.text\n assert result == False", "def check_app_manifest(api_docs_path, overrides, marketplace):\n if not os.path.exists(api_docs_path):\n raise exceptions.ValidationError(\n click.style(\"Could not find the manifest file at {}.\", fg=\"red\").format(api_docs_path))\n\n if os.path.isdir(api_docs_path):\n raise exceptions.ValidationError(\n click.style(\"{} is a directory. Please enter the direct path to the manifest file.\",\n fg=\"red\").format(api_docs_path))\n\n file_size = os.path.getsize(api_docs_path) / 1e6\n if file_size > 2:\n raise exceptions.ValidationError(\n click.style(\"The size of the manifest file at {} exceeds the maximum limit of 2MB.\", fg=\"red\")\n .format(api_docs_path))\n\n try:\n with open(api_docs_path, \"r\") as f:\n original_manifest_dict = yaml.load(f.read())\n\n manifest_dict = transform_manifest(original_manifest_dict, overrides, marketplace)\n\n # write back the manifest in case some clean up or overriding has happend\n with open(api_docs_path, \"w\") as f:\n yaml.dump(manifest_dict, f)\n\n return manifest_dict\n except (YAMLError, ValueError):\n raise exceptions.ValidationError(\n click.style(\"Your manifest file at {} is not valid YAML.\", fg=\"red\")\n .format(api_docs_path))", "def test_invalid_manifest_filepath(self):\n load_manifest(\"./ehiiehaiehnatheita\")", "def validate_configuration_manifest(self, source, **kwargs):\n return self._validate_manifest(\"configuration_manifest\", source, **kwargs)", "def _validate_manifest(self, kind, source, cls=None, **kwargs):\n data = self._load_json(kind, source, **kwargs)\n\n # TODO elegant way of cleaning up this nasty serialisation hack to manage conversion of outbound manifests to primitive\n inbound = True\n if hasattr(data, \"to_primitive\"):\n inbound = False\n data = data.to_primitive()\n\n self._validate_against_schema(kind, data)\n self._validate_all_expected_datasets_are_present_in_manifest(manifest_kind=kind, manifest=data)\n\n if cls and inbound:\n return cls(**data)\n\n return data", "def test_is_valid_manifest_format_with_many_types_of_errors(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_many_types_of_errors.tsv\",\n )\n error_log = caplog.text\n manifest_with_many_types_of_errors_helper(error_log)\n assert result == False", "def test_is_valid_manifest_format_with_invalid_authz_resources(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_invalid_authz_resources.tsv\",\n )\n error_log = caplog.text\n assert '\"invalid_authz\"' in error_log\n assert '\"/\"' in error_log\n assert '\"//\"' in error_log\n assert '\"///\"' in error_log\n assert '\"invalid_authz2\"' in error_log\n assert result == False", "def test_is_valid_manifest_format_with_invalid_sizes(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_invalid_sizes.tsv\"\n )\n error_log = caplog.text\n assert \"-1\" in error_log\n assert \"not_an_int\" in error_log\n assert \"3.34\" in error_log\n assert \"string_with_42\" in error_log\n assert result == False", "def test_is_valid_manifest_with_missing_url_column_and_error_on_empty_url(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_missing_url_column.tsv\",\n error_on_empty_url=True,\n )\n missing_size_message = (\n 'could not find a column name corresponding to required \"Columns.URL\"'\n )\n assert missing_size_message in caplog.text\n assert result == False", "def validate(self):\n errors = []\n if self.package_format:\n if not re.match('^[1-9][0-9]*$', str(self.package_format)):\n errors.append(\"The 'format' attribute of the package must \"\n 'contain a positive integer if present')\n\n if not self.name:\n errors.append('Package name must not be empty')\n # Must start with a lower case alphabetic character.\n # Allow lower case alphanummeric characters and underscores in\n # keymint packages.\n valid_package_name_regexp = '([^/ ]+/*)+(?<!/)'\n build_type = self.get_build_type()\n if not build_type.startswith('keymint'):\n # Dashes are allowed for other build_types.\n valid_package_name_regexp = '^[a-z][a-z0-9_-]*$'\n if not re.match(valid_package_name_regexp, self.name):\n errors.append(\"Package name '%s' does not follow naming \"\n 'conventions' % self.name)\n\n if self.version:\n if not re.match('^[0-9]+\\.[0-9_]+\\.[0-9_]+$', self.version):\n errors.append(\"Package version '%s' does not follow version \"\n 'conventions' % self.version)\n\n if self.maintainers is not None:\n # if not self.maintainers:\n # errors.append('Package must declare at least one maintainer')\n for maintainer in self.maintainers:\n try:\n maintainer.validate()\n except InvalidPackage as e:\n errors.append(str(e))\n if not maintainer.email:\n errors.append('Maintainers must have an email address')\n\n if self.authors is not None:\n for author in self.authors:\n try:\n author.validate()\n except InvalidPackage as e:\n errors.append(str(e))\n\n if errors:\n raise InvalidPackage('\\n'.join(errors))", "def _validate_all_expected_datasets_are_present_in_manifest(self, manifest_kind, manifest):\n # This is the manifest schema included in the `twine.json` file, not the schema for `manifest.json` files.\n manifest_schema = getattr(self, manifest_kind)\n\n for expected_dataset_name, expected_dataset_schema in manifest_schema[\"datasets\"].items():\n if expected_dataset_name in manifest[\"datasets\"]:\n continue\n\n if expected_dataset_schema.get(\"optional\", False):\n continue\n\n raise exceptions.invalid_contents_map[manifest_kind](\n f\"A dataset named {expected_dataset_name!r} is expected in the {manifest_kind} but is missing.\"\n )", "def validate_available(parser, options):\n if not options.available:\n return\n\n if not options.manifest_id:\n parser.error(\"When specifying --available, --manifest-id is also required\")", "def test_sa_invalid_manifest_file(self):\n with open(str(Path(__file__).parent.parent.parent) +\n '/data/manifests/400/npmlist.json', 'rb') as fp:\n fs = FileStorage(stream=fp, filename='npmlist.json')\n sa_post_request = StackAnalysesPostRequest(manifest=fs, file_path='/tmp/bin',\n ecosystem='npm', show_transitive=True)\n sa = StackAnalyses(sa_post_request)\n with pytest.raises(Exception) as exception:\n sa.post_request()\n self.assertIs(exception.type, SAInvalidInputException)", "def read_manifest(self): # -> None:\n ...", "def validate(attrs):\n print \"I GOT HERE.\"\n try:\n #required_attributes = ('qquuid', 'qqfilename')\n #[attrs.get(k) for k,v in attrs.items()]\n return True\n except Exception, e:\n return False", "def test_valid_and_empty_manifest(self):\n collector = PypiCollector()\n collector.parse_and_collect(MANIFEST_START + DEP_1, True)\n collector.parse_and_collect(None, True)\n packages = dict(collector.counter.most_common())\n assert packages == {\n 'daiquiri': 1\n }", "def test_is_valid_manifest_format_using_error_on_empty_url(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_empty_url.tsv\",\n error_on_empty_url=True,\n )\n assert '\"\"' in caplog.text\n assert result == False", "def test_is_valid_manifest_format_using_line_limit(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_invalid_sizes.tsv\",\n line_limit=3,\n )\n error_log = caplog.text\n assert \"line 2\" in error_log\n assert \"line 3\" in error_log\n assert \"line 4\" not in error_log\n assert \"line 5\" not in error_log\n assert result == False", "def validate(attrs):\n try:\n #required_attributes = ('qquuid', 'qqfilename')\n #[attrs.get(k) for k,v in attrs.items()]\n return True\n except Exception, e:\n return False", "def validate_package_metadata(filename, meta, expected_name, expected_version):\n if meta.get('name') != expected_name:\n raise distutils.errors.DistutilsSetupError(\n \"unexpected JS package name in %s: expected %s; got %s\"\n % (filename, expected_name, meta.get('name')))\n if meta.get('version') != expected_version:\n raise distutils.errors.DistutilsSetupError(\n \"unexpected JS package version in %s: expected %s; got %s\"\n % (filename, expected_version, meta.get('version')))\n if meta.get('dependencies') and not isinstance(meta['dependencies'], dict):\n raise distutils.errors.DistutilsSetupError(\n \"\\\"dependencies\\\" key should be a JSON object in %s\"\n % filename)\n if meta.get('peerDependencies') and not isinstance(meta['peerDependencies'], dict):\n raise distutils.errors.DistutilsSetupError(\n \"\\\"peerDependencies\\\" key should be a JSON object in %s\"\n % filename)\n if meta.get('devDependencies') and not isinstance(meta['devDependencies'], dict):\n raise distutils.errors.DistutilsSetupError(\n \"\\\"devDependencies\\\" key should be a JSON object in %s\"\n % filename)\n if meta.get('rex'):\n if not isinstance(meta['rex'], dict):\n raise distutils.errors.DistutilsSetupError(\n \"\\\"rex\\\" key should be a JSON object in %s\"\n % filename)\n if meta['rex'].get('dependencies') and not isinstance(meta['rex']['dependencies'], dict):\n raise distutils.errors.DistutilsSetupError(\n \"\\\"rex.dependencies\\\" key should be a JSON object in %s\"\n % filename)", "def check_attributes(self):\n for key in self.json_parsed_file.keys():\n if key not in self.HARDCODED_REQUIRED_JSON_FIELDS:\n print(key)\n self.output_message += \"All JSON attribute key are not correct\\n\"\n self.is_parsed_pdf_valid = False\n\n for key in self.HARDCODED_REQUIRED_JSON_FIELDS:\n if key not in self.json_parsed_file.keys():\n self.output_message += \"All required attribute keys are not in the parsed information\\n\"\n self.is_parsed_pdf_valid = False", "def Validate(self, relative_file, contents):\n pass", "def test_is_valid_manifest_format_allowing_base64_encoded_md5(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_invalid_md5_values.tsv\",\n allow_base64_encoded_md5=True,\n )\n\n error_log = caplog.text\n manifest_with_invalid_md5_values_helper(error_log)\n base64_encoded_md5 = '\"jd2L5LF5pSmvpfL/rkuYWA==\"'\n assert base64_encoded_md5 not in error_log\n assert result == False", "def test_is_valid_manifest_format_with_empty_url(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_empty_url.tsv\",\n )\n assert caplog.text == \"\"\n assert result == True", "def test_is_valid_manifest_with_missing_url_column(caplog):\n logging.getLogger().setLevel(logging.WARNING)\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_missing_url_column.tsv\",\n )\n missing_size_message = (\n 'could not find a column name corresponding to required \"Columns.URL\"'\n )\n assert missing_size_message in caplog.text\n assert result == True", "def test_schema_valid(path, name, data):\n schemas = metaschemas()\n if name in ('release-schema.json', 'release-package-schema.json'):\n metaschema = schemas['release_package_metaschema']\n elif name == 'record-package-schema.json':\n metaschema = schemas['record_package_metaschema']\n elif name in ('project-schema.json', 'project-package-schema.json'):\n metaschema = schemas['project_package_metaschema']\n else:\n metaschema = schemas['metaschema']\n\n validate_json_schema(path, name, data, metaschema)", "def test_required_fields_schema_version(self):\n\n del self.validator.adata.uns[\"schema_version\"]\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: adata has no schema definition in 'adata.uns'. \"\n \"Validation cannot be performed.\"\n ],\n )", "def validate_entries(self):\n message = ''\n if self.class_name.text() == '' or \\\n self.title.text() == '' or \\\n self.description.text() == '' or \\\n self.module_name.text() == '' or \\\n self.plugin_version.text() == '' or \\\n self.qgis_minimum_version.text() == '' or \\\n self.author.text() == '' or \\\n self.email_address.text() == '':\n message = (\n 'Some required fields are missing. '\n 'Please complete the form.\\n')\n try:\n # Assigning to _ is python sugar for a variable that will be unused\n _ = float(str(self.plugin_version.text()))\n _ = float(str(self.qgis_minimum_version.text()))\n except ValueError:\n message += 'Version numbers must be numeric.\\n'\n # validate plugin name\n # check that we have only ascii char in class name\n try:\n unicode(self.class_name.text()).decode('ascii')\n except UnicodeEncodeError:\n self.class_name.setText(\n unicode(\n self.class_name.text()).encode('ascii', 'ignore'))\n message += (\n 'The Class name must be ASCII characters only, '\n 'the name has been modified for you. \\n')\n # check space and force CamelCase\n if str(self.class_name.text()).find(' ') > -1:\n class_name = capwords(str(self.class_name.text()))\n self.class_name.setText(class_name.replace(' ', ''))\n message += (\n 'The Class name must use CamelCase. '\n 'No spaces are allowed; the name has been modified for you.')\n # noinspection PyArgumentList\n if message != '':\n QMessageBox.warning(\n self, 'Information missing or invalid', message)\n else:\n return True", "def test_sa_mismatch_manifest_file_and_ecosystem(self):\n with open(str(Path(__file__).parent.parent.parent) +\n '/data/manifests/202/npmlist.json', 'rb') as fp:\n fs = FileStorage(stream=fp, filename='npmlist.json')\n with pytest.raises(Exception) as exception:\n sa_post_request = StackAnalysesPostRequest(manifest=fs, file_path='/tmp/bin',\n ecosystem='pypi', show_transitive=True)\n sa = StackAnalyses(sa_post_request)\n sa.post_request()\n self.assertIs(exception.type, ValidationError)", "def test_metadata_schema_json_invalid(invalid_schema_file, mock_irods):\n\n schema_file_path = 'pytest/assets/{}'.format(invalid_schema_file)\n file_size = os.stat(schema_file_path).st_size\n assert file_size > 0\n file_to_upload = UploadedFile(file=open(schema_file_path, 'rb'),\n name=os.path.basename(schema_file_path), size=file_size)\n files = {\"mi_json_schema_file\": file_to_upload}\n metadata_validation_form = ModelProgramMetadataValidationForm(files=files)\n assert not metadata_validation_form.is_valid()", "def validate_data(self, data):\n # TODO use schema\n assert \"file_contents\" in data, data\n assert \"type\" in data, data", "def test_upload_invalid_manifest(cidc_api, some_file, clean_db, monkeypatch):\n user_id = setup_trial_and_user(cidc_api, monkeypatch)\n make_admin(user_id, cidc_api)\n\n mocks = UploadMocks(monkeypatch)\n\n mocks.iter_errors.return_value = [\"bad, bad error\"]\n\n grant_upload_permission(user_id, \"pbmc\", cidc_api)\n\n client = cidc_api.test_client()\n\n res = client.post(MANIFEST_UPLOAD, data=form_data(\"pbmc.xlsx\", some_file, \"pbmc\"))\n assert res.status_code == 400\n\n assert len(res.json[\"_error\"][\"message\"][\"errors\"]) > 0\n\n # Check that we tried to upload the excel file\n mocks.upload_xlsx.assert_not_called()", "def validate(self):\n Component.validate(self)\n kinds = (\"lib\", \"exe\")\n if self.kind not in kinds:\n raise Invalid(\"kind must be one of %s for component %s\" % (kinds,self.name))\n\n if self.kind == \"exe\" :\n if not self.exe_path:\n raise Invalid(\"exe_path must be defined for component %s\" % self.name)", "def test_check_presence_only(self):\n schema = yaml.load(self.yaml_presence_check, Loader=yaml.FullLoader)\n val = DwcaValidator(schema, allow_unknown=True,\n error_handler=WhipErrorHandler)\n\n document = {'abundance': 'many'}\n self.assertTrue(val.validate(document))\n document = {'abundance': ''}\n self.assertTrue(val.validate(document))\n document = {'eventDate': ''}\n val.validate(document)\n self.assertEqual(val.errors, {})", "def validate_file_contents(cube, metadata):\n _check_start_end_times(cube, metadata)\n _check_contiguity(cube, metadata)\n _check_data_point(cube, metadata)", "def parse(manifest_filename):\n manifest = {}\n with io.open(manifest_filename, 'rt', encoding='utf8') as f:\n lineno = 0\n for line in f:\n # Split line into fields\n lineno += 1\n line = line.rstrip('\\n')\n fields = line.split(' ')\n\n # Parse fields\n stored_path = fields[0]\n if len(fields) == 1 or (len(fields) == 2 and not fields[1]):\n # line like 'foo\\n' or 'foo \\n'\n local_path = None\n elif len(fields) == 2 and fields[1]:\n # line like 'foo bar\\n'\n local_path = fields[1]\n else:\n raise error.Error('Syntax error at line %d in [%s]: %s' %\n (lineno, manifest_filename, repr(line)))\n\n # Ensure no collisions\n if stored_path in manifest:\n raise error.Error(\n ('Configuration error at line %d in [%s]: file [%s] '\n 'specified more than once') %\n (lineno, manifest_filename, stored_path))\n manifest[stored_path] = local_path\n return manifest", "def validate_config(self):\n pass", "def validate_config(self):\n pass", "def validation(nameFile, fileContent):\n\n\n dayNameFile = nameFile[-5:-4]\n monthNameFile = nameFile[-8:-6]\n yearNameFile = nameFile[-13:-9]\n hourNameFile = nameFile[-19:-14]\n hourNameFile = hourNameFile.replace(\"h\", \"\")\n \n\n if nameFile[0:6] == \"drones\":\n scopeNameFile = nameFile[0:6]\n elif nameFile[0:7] == \"parcels\":\n scopeNameFile = nameFile[0:7]\n\n headerFileContent = fileContent[constants.header]\n dateFile = headerFileContent[constants.headerTime]\n dayFile = dateFile[0:1]\n monthFile = dateFile[2:4]\n yearFile = dateFile[5:9]\n hourFile = headerFileContent[1]\n hourFile = hourFile.replace(\"h\", \"\")\n scopeFile = headerFileContent[constants.scope]\n\n\n return hourNameFile == hourFile and dayNameFile == dayFile and monthNameFile == monthFile and yearNameFile == yearFile and scopeNameFile == scopeFile", "def test_is_valid_manifest_format_with_csv(caplog):\n assert is_valid_manifest_format(\"tests/test_manifest.csv\") == True\n assert caplog.text == \"\"", "def app_validate(data):\n\n schema = json.load(open('schemas/app_description_schema.json', 'r'))\n try:\n jsonschema.validate(data, schema)\n except jsonschema.ValidationError as e:\n raise InvalidApplicationDescription(str(e))\n except jsonschema.SchemaError:\n log.exception('BUG: invalid schema for application descriptions')\n raise ZoeLibException('BUG: invalid schema for application descriptions')\n\n # Start non-schema, semantic checks\n if data['version'] != zoe_lib.version.ZOE_APPLICATION_FORMAT_VERSION:\n raise InvalidApplicationDescription('Application description version mismatch (expected: {}, found: {}'.format(zoe_lib.version.ZOE_APPLICATION_FORMAT_VERSION, data['version']))\n\n found_monitor = False\n for service in data['services']:\n if service['monitor']:\n found_monitor = True\n\n service['resources']['memory']['max'] = zoe_lib.config.get_conf().max_memory_limit * (1024 ** 3)\n if service['resources']['memory']['min'] is not None and service['resources']['memory']['min'] > service['resources']['memory']['max']:\n raise InvalidApplicationDescription(msg='service {} tries to reserve more memory than the administrative limit'.format(service['name']))\n\n if service['resources']['cores']['min'] is None:\n service['resources']['cores']['min'] = 0.1\n\n if not found_monitor:\n raise InvalidApplicationDescription(msg=\"at least one process should have the monitor property set to true\")", "def _load_manifest(self, filename: Optional[str] = None) -> Dict[str, str]:\n filename = filename or self.manifest_filename\n if not os.path.isfile(filename):\n self.log.debug(f\"Manifest file '{filename}' doesn't exist and will be created.\")\n return {}\n with open(filename, \"r\") as f:\n manifest: Dict[str, str] = json.load(f)\n self.log.debug(f\"Reading manifest '{manifest}' from file '{filename}'\")\n return manifest", "def validate():", "def __validate():\n # TODO: implement", "def supports_manifest(manifest):\n pass", "def __checkFile(self, filename):\n \n try:\n with open(filename, 'r') as f:\n first_line = f.readline()\n \n if not len(first_line.split(\"\\t\")) == 19:\n raise BadProteomeScoutFile(\"N/A\")\n \n \n except:\n BadProteomeScoutFile(\"Invalid ProteomeScout flat file %s.\\nFile is invalid or corrupted\" % str(filename))", "def _validate_config(self):\n pass", "def validate_bagit_file(bagit_path):\n _assert_zip_file(bagit_path)\n bagit_zip = zipfile.ZipFile(bagit_path)\n manifest_info_list = _get_manifest_info_list(bagit_zip)\n _validate_checksums(bagit_zip, manifest_info_list)\n return True", "def validate(self):\n with open(os.path.join(settings.MEDIA_ROOT, self.file.name)) as file:\n lines = file.readlines()\n validators = ['os.', 'from os', 'io.', 'from io', 'open(', 'system(']\n for line in lines:\n for validator in validators:\n if validator in line:\n return False\n return True", "def validatePackage(filename, propFilename = None):\n\n if (propFilename == None):\n propFilename = filename + '.prop'\n\n if (not PackageUtil.validateProp(propFilename)):\n return False\n\n try:\n # check that the file exists\n if (not os.path.exists(filename)):\n LOG.warning('Package (%s) does not exists' % (filename))\n return False\n\n # load in the prop file\n propFile = open(propFilename, 'r')\n prop = json.load(propFile)\n propFile.close()\n\n size = os.path.getsize(filename)\n if (size != int(prop['size'])):\n LOG.warning('package size = %s : %s' % (str(size), str(prop['size'])))\n return False\n\n md5Sum = md5sum(filename)\n propmd5 = prop['md5']\n if (md5Sum != propmd5):\n LOG.warning('package md5 = %s : %s' % (md5Sum, prop['md5']))\n return False\n\n # make sure the tar file has the expected structure\n # TPY to do after we fix the cronus-deploy\n\n except Exception, excep:\n LOG.error('validatePackage exception %s' % excep)\n return False\n\n return True", "def test_versioned_release_schema():\n path = 'versioned-release-validation-schema.json'\n if os.path.exists(path):\n warn_and_assert([path], '{0} is present, run: rm {0}',\n 'Versioned release schema files are present. See warnings below.')", "def semantic_validate(instance):\n unknown_templates = {}\n for name, requires in instance[\"application\"][\"requires\"].items():\n if name in instance[\"application\"][\"services\"]:\n raise ValidationError(errors=[\n \"/application/requires/{}: the name {} conflicts with service\"\n \" /application/services/{}\".format(name,\n repr(name),\n name),\n ])\n if requires[\"template\"] not in instance[\"local\"][\"templates\"]:\n unknown_templates[\"/application/requires/{}/template\".format(\n name)] = requires[\"template\"]\n for service_name, service in instance[\"application\"][\"services\"].items():\n for name, requires in service[\"requires\"].items():\n if name in instance[\"application\"][\"requires\"]:\n raise ValidationError(errors=[\n \"/application/services/{}/requires/{}: the name {}\"\n \" conflicts with /application/requires/{}\".format(\n service_name, name, repr(name), name)\n ])\n if requires[\"template\"] not in instance[\"local\"][\"templates\"]:\n unknown_templates[\n \"/application/services/{}/requires/{}/template\".\n format(service_name, name)] = requires[\"template\"]\n if unknown_templates:\n raise ValidationError(errors=[\n \"{}: the template {} does not exist \"\n \"in /local/templates\".format(path, repr(name))\n for (path, name) in unknown_templates.items()\n ])", "def test_metadata_invalid(aquarius_instance):\n result, errors = aquarius_instance.validate_metadata(\n {\"some_dict\": \"that is invalid\"}\n )\n assert result is False\n assert errors[0][\"message\"] == \"'main' is a required property\"", "def check_image_manifest(self,\n idf,\n cids,\n cols = ['md5sum',\n 'storage_urls',\n 'file_size',\n 'case_ids',\n 'study_uid',\n 'series_uid',\n 'file_name']):\n errors = []\n for col in cols:\n missing = len(idf[idf[col].isnull()])\n if missing > 0:\n error = \"'{}' values issing for image manifest column '{}'.\".format(len(missing),col)\n print(error)\n errors.append(error)\n if \"case_ids\" in idf:\n icids = list(set(idf[\"case_ids\"]))\n extra_cids = list(set(icids).difference(cids))\n if len(extra_cids) > 0:\n error = \"The image manifest TSV contains {} case IDs that are not present in the case TSV!\".format(len(extra_cids))\n print(error)\n errors.append(error)\n else:\n error = \"'case_ids' column missing from image manifest!\"\n print(error)\n errors.append(error)\n return errors", "def check_requirements(self):\n if not os.path.isfile(self.file_path):\n _logger.error(\"File not found\")\n _logger.error(ex)\n raise\n _logger.info(\"File notifier check passed\")", "def parse_manifest(manifest_contents):\n manifest = {}\n for line in manifest_contents.split('\\n'):\n line_unpacked = line.split()\n try:\n # Check that the line isn't empty or a comment\n if not line_unpacked or line.strip().startswith('#'):\n continue\n\n target, repo_hash, url, sha256_hash = line_unpacked\n manifest[target] = {\"repo_hash\": repo_hash,\n \"url\": url,\n \"sha256_hash\": sha256_hash,\n }\n except ValueError:\n log(\"WARN\", \"Warning: Invalid line in manifest file:\\n\"\n \" {}\".format(line))\n continue\n return manifest", "def get_manifest(self):\r\n if os.path.exists(self.manifestfile):\r\n return Manifest(json.loads(file(self.manifestfile).read()))\r\n return Manifest({})", "def test_is_valid_manifest_with_wide_row(caplog):\n logging.getLogger().setLevel(logging.WARNING)\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_wide_row.tsv\",\n )\n wide_warning = f\"line 3, number of fields (6) in row is unequal to number of column names in manifest (5)\"\n assert wide_warning in caplog.text\n assert result == True", "def readManifest(fn):\n fp = open(fn)\n manifest = []\n for line in fp.readlines():\n work = line.strip()\n if work.startswith(\"#\"): continue\n if not work: continue\n if work.startswith('\"'):\n source = work.split('\"')[1]\n work = work[len(source)+2:].strip()\n else:\n source = work.split()[0]\n work = work[len(source):].strip()\n pass #end-else\n if not work or work.startswith(\"#\"):\n target = None\n elif work.startswith('\"'):\n target = work.split('\"')[1]\n work = work[len(target)+2:].strip()\n else:\n target = work.split()[0]\n work = work[len(target):].strip()\n pass #end-if\n if not work.startswith(\"#\") and work:\n raise Exception(\"Bad format line %s\" % line.strip())\n manifest.append((source,target))\n pass #end-for\n return manifest", "def test_is_valid_manifest_format_with_invalid_urls(caplog):\n result = is_valid_manifest_format(\n \"tests/validate_manifest_format/manifests/manifest_with_invalid_urls.tsv\"\n )\n error_log = caplog.text\n assert '\"wrong_protocol://test_bucket/test.txt\"' in error_log\n assert '\"test/test.txt\"' in error_log\n assert '\"testaws/aws/test.txt\"' in error_log\n assert '\"://test_bucket/test.txt\"' in error_log\n assert '\"s3://\"' in error_log\n assert '\"gs://\"' in error_log\n assert '\"s3://bucket_without_object\"' in error_log\n assert '\"s3://bucket_without_object/\"' in error_log\n assert '\"test_bucket/aws/test.txt\"' in error_log\n assert '\"s3:/test_bucket/aws/test.txt\"' in error_log\n assert '\"s3:test_bucket/aws/test.txt\"' in error_log\n assert '\"://test_bucket/aws/test.txt\"' in error_log\n assert '\"s3test_bucket/aws/test.txt\"' in error_log\n assert '\"https://www.uchicago.edu\"' in error_log\n assert '\"https://www.uchicago.edu/about\"' in error_log\n assert '\"google.com/path\"' in error_log\n assert '\"\"\"\"' in error_log\n assert \"\\\"''\\\"\" in error_log\n assert '\"[]\"' in error_log\n assert \"\\\"['']\\\"\" in error_log\n assert '\"[\"\"]\"' in error_log\n assert '\"[\"\", \"\"]\"' in error_log\n assert '\"[\"\", \\'\\']\"' in error_log\n assert result == False", "def validate_metadata(self, metadata: Dict[str, dict]):\n encoder = NWBMetaDataEncoder()\n # The encoder produces a serialiazed object so we de serialized it for comparison\n serialized_metadata = encoder.encode(metadata)\n decoded_metadata = json.loads(serialized_metadata)\n validate(instance=decoded_metadata, schema=self.get_metadata_schema())\n if self.verbose:\n print(\"Metadata is valid!\")", "def test_manifest(self):\n self.parse_manifest()\n\n ids = {}\n errors = []\n collisions = []\n manifest = self.cryptomattes[self.selection][\"names_to_IDs\"]\n for name, idvalue in manifest.iteritems():\n if mm3hash_float(name) != idvalue:\n errors.append(\"computed ID doesn't match manifest ID: (%s, %s)\" % (idvalue, mm3hash_float(name)))\n else:\n if idvalue in ids:\n collisions.append(\"colliding: %s %s\" % (ids[idvalue], name))\n ids[idvalue] = name\n\n print \"Tested %s, %s names\" % (self.nuke_node.name(), len(manifest))\n print \" \", len(errors), \"non-matching IDs between python and c++.\"\n print \" \", len(collisions), \"hash collisions in manifest.\"\n\n return errors, collisions", "def validate():\n description = f\"Validate XML metadata.\"\n parser = argparse.ArgumentParser(\n description=description,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n\n help = \"XML file or URL\"\n parser.add_argument('infile', help=help)\n\n help = (\n \"Format ID for metadata standard. If this argument is supplied, \"\n \"only that format ID will be checked. If not, all format IDs will be \"\n \"checked.\"\n )\n parser.add_argument('--format-id',\n help=help,\n choices=d1_scimeta.util.get_supported_format_id_list())\n\n help = \"Verbosity of log messages.\"\n choices = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']\n parser.add_argument('-v', '--verbosity', help=help, choices=choices,\n default='INFO')\n\n args = parser.parse_args()\n\n validator = XMLValidator(verbosity=args.verbosity)\n validator.validate(args.infile, format_id=args.format_id)", "def check_missing_files(self):\n files = [getattr(self, attr) for attr in self._required]\n try:\n utilities.check_missing_files(files)\n except utilities.MissingConstraintError as err:\n err.message += \"\\nSkipping {}\\n\".format(self.__class__.__name__)\n raise err", "def test_DataPackageFileAttributesAreValid_missing_file(tempdir: pathlib.Path):\n df = dpack_pb2.DataPackageFile()\n df.relative_path = \"a\"\n assert not dpack.DataPackageFileAttributesAreValid(tempdir, df)", "def test_edit_manifest(self):\n \n manifest = copy.deepcopy(self.manifest)\n manifest['job']['interface']['command'] = ''\n \n json_data = {\n 'manifest': manifest,\n 'auto_update': False\n }\n\n url = '/%s/job-types/%s/%s/' % (self.api, self.job_type.name, self.job_type.version)\n response = self.client.generic('PATCH', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n \n results = json.loads(response.content)\n self.assertTrue(results['is_valid'])\n self.assertDictEqual(results, {u'errors': [], u'is_valid': True, u'warnings': []})\n \n # mismatch name\n manifest = copy.deepcopy(self.manifest)\n manifest['job']['name'] = 'new-name'\n json_data = {\n 'manifest': manifest,\n }\n url = '/%s/job-types/%s/%s/' % (self.api, self.job_type.name, self.job_type.version)\n response = self.client.generic('PATCH', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)\n \n # mismatch version\n manifest = copy.deepcopy(self.manifest)\n manifest['job']['jobVersion'] = '1.2.3'\n json_data = {\n 'manifest': manifest,\n }\n url = '/%s/job-types/%s/%s/' % (self.api, self.job_type.name, self.job_type.version)\n response = self.client.generic('PATCH', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, response.content)", "def validateBoot (self):\n self.mountBootPartition()\n stateDictionary = self._createBootInstallationDictionary()\n self._writeDictionaryAsJson(stateDictionary, self._getBootInstallationFilePath())\n self._log(\"validate-boot\").notice(\"boot partition is validated\")", "def test_validate_valid_resume(self):\n # DEV: `validate` will raise an exception if it could not validate\n self.assertIsNone(resumeschema.validate(self.valid_resume))", "def init_check(self):\n for required_file in self._required_files:\n # Check if required files are there\n # FIXME Sometimes it doesn't work :?\n if required_file not in self.files:\n self.valid = False", "def cross_validate(self, contents, required=None, forbidden=None):\n if required:\n for item in required:\n self.assertTrue(\n item in contents,\n \"Required entry [{item}] not found in:\\n{contents}\".format(\n item=item, contents=contents\n )\n )\n if forbidden:\n for item in forbidden:\n self.assertTrue(\n item not in contents,\n \"Forbidden entry [{item}] found in:\\n{contents}\".format(\n item=item, contents=contents\n )\n )", "def validate(self):\n if os.path.exists(self.filename):\n with NWBHDF5IO(self.filename, mode='r') as io:\n errors = pynwb_validate(io)\n if errors:\n for err in errors:\n raise Exception(err)", "def validate_workbook(self):\n\n valid = True\n\n #Check for a sheet that should have preservation metadata data\n try:\n self.check_presSheetExists()\n except AMIExcelError as e:\n print(\"Error in workbook sheets: \", e.value)\n valid = False\n\n #Check that preservation sheet contains required headers\n for i in range(0, 3):\n try:\n expected = set([item[i] for item in ami_md_constants.MEDIAINGEST_EXPECTED_HEADERS if item[i]])\n found = self.get_headerRow(self.pres_sheetname, i)\n self.check_headerRow(expected, found)\n except AMIExcelError as e:\n print(\"Error in preservation header row {}: {}\"\n .format(i + 1, e.value))\n valid = False\n\n #Check that preservation sheet headers have the correct heirarchy\n try:\n header_entries = set(self.get_headerEntries(self.pres_sheetname))\n self.check_headerEntries(set(ami_md_constants.MEDIAINGEST_EXPECTED_HEADERS), header_entries)\n except AMIExcelError as e:\n print(\"Error in header entries: \", e.value)\n valid = False\n\n #Check that the preservation sheet does not contain equations\n try:\n self.check_noequations(self.pres_sheetname)\n except AMIExcelError as e:\n print(\"Error in cell values: \", e.value)\n valid = False\n\n return valid", "def checkRequiredConfigs(self):\n containmentFolder = self.getAbsContainmentFolder()\n rootFileName = self.app.config.exhale_args[\"rootFileName\"]\n rootFileTitle = self.app.config.exhale_args[\"rootFileTitle\"]\n doxygenStripFromPath = self.app.config.exhale_args[\"doxygenStripFromPath\"]\n\n # validate that the containmentFolder was created\n assert os.path.isdir(containmentFolder)\n # validate that {containmentFolder}/{rootFileName} was created\n assert os.path.isfile(os.path.join(containmentFolder, rootFileName))\n # validate that the title was included\n with open(os.path.join(containmentFolder, rootFileName), \"r\") as root:\n root_contents = root.read()\n root_heading = \"{0}\\n{1}\".format(\n rootFileTitle,\n exhale.utils.heading_mark(rootFileTitle, exhale.configs.SECTION_HEADING_CHAR)\n )\n assert root_heading in root_contents\n\n # TODO: validate doxygenStripFromPath\n if doxygenStripFromPath: # this is only here to avoid a flake8 fail on a todo\n pass", "def _validate_yaml(self):\n\n # verify the format is correct\n if self.validater == 'yamale':\n\n import yamale\n\n print('Validating yaml file with yamale.')\n cwd = Path(os.path.dirname(__file__))\n schema_path = str(cwd.parent / 'schema') + '/generic_schema.yaml'\n schema = yamale.make_schema(schema_path)\n data = yamale.make_data(self.yaml_path)\n try:\n yamale.validate(schema, data, strict=False)\n print('Validation success! 👍')\n return True\n except ValueError as e:\n print(\n 'Yamale found that your file, '\n + self.yaml_path\n + ' is not formatted correctly.'\n )\n print(e)\n return False\n else:\n print('Did not validate yaml.')\n print('If unexpected results occur, try installing yamale and rerun.')\n return True", "def test_incorrect_dependency(self):\n load_manifest(StringIO(manifest_incorrect_dependency))", "def validate_file_content(metadata_record_dict, mandatory_record_content):\n\n\tmetadata_record_file_errors = []\n\tinput_fields = list(metadata_record_dict.keys())\n\n\tfor required_field in mandatory_record_content:\n\t\tif required_field in input_fields:\n\t\t\tpass\n\t\telse:\n\t\t\tmessage = (\"Required field '{0}' not found in \")\n\t\t\tmetadata_record_file_errors.append(message)\n\n\treturn metadata_record_file_errors", "def validate(self, spec):\n d = spec.directory\n for file_name in os.listdir(d):\n if file_name.endswith(\".icon\"):\n if \" \" in file_name:\n raise ValidationException(f\"The .icon file name was '{file_name}'.\\n \"\n \".icon file may not contain spaces use a '_' instead.\")", "def test_metadata_schema_json_valid_file_upload(mock_irods):\n\n schema_file_path = 'pytest/assets/mi_schema.json'\n file_size = os.stat(schema_file_path).st_size\n assert file_size > 0\n file_to_upload = UploadedFile(file=open(schema_file_path, 'rb'),\n name=os.path.basename(schema_file_path), size=file_size)\n\n form_data = {\"mp_program_type\": \"Test Model Program\"}\n files = {\"mi_json_schema_file\": file_to_upload}\n metadata_validation_form = ModelProgramMetadataValidationForm(data=form_data, files=files)\n assert metadata_validation_form.is_valid()\n assert len(metadata_validation_form.cleaned_data['mi_json_schema_file']) > 0", "def test_metadata_schema_json_invalid_file_upload(invalid_schema_file, mock_irods):\n\n schema_file_path = 'pytest/assets/{}'.format(invalid_schema_file)\n file_size = os.stat(schema_file_path).st_size\n assert file_size > 0\n file_to_upload = UploadedFile(file=open(schema_file_path, 'rb'),\n name=os.path.basename(schema_file_path), size=file_size)\n\n form_data = {\"mp_program_type\": \"Test Model Program\"}\n files = {\"mi_json_schema_file\": file_to_upload}\n metadata_validation_form = ModelProgramMetadataValidationForm(data=form_data, files=files)\n assert not metadata_validation_form.is_valid()", "def parse_manifest(manifest_path):\n with open(manifest_path, 'r') as f:\n data = f.read()\n if data:\n return json.loads(data)\n else:\n return {}", "def _validate_main_config(self):\n # check for required top-level parameters in main config\n required_params = {\"name\": str, \"version\": str, \"datasets\": list}\n\n for param, expected_type in required_params.items():\n if param not in self.config:\n msg = (\n \"[ERROR] Config error: missing required configuration parameter in {}: '{}'\"\n )\n config_file = os.path.basename(self.config[\"config_file\"])\n sys.exit(msg.format(config_file, param))\n elif not isinstance(self.config[param], expected_type):\n msg = \"[ERROR] Config error: parameter is of unexpected type {}: '{}' (expected: '{}')\"\n config_file = os.path.basename(self.config[\"config_file\"])\n sys.exit(msg.format(config_file, param, expected_type))", "def validate_metadata(self):\n metadata = self.get_client_metadata()\n\n return True", "def _validate(self):\n schema_version = util.schemas[self.schema_name]\n stored_schemas = util.stored_schemas\n\n try:\n schema_obj = stored_schemas[\n \"http://redfish.dmtf.org/schemas/v1/\" + schema_version]\n except KeyError:\n raise OneViewRedfishError(\"{} not found\".format(schema_version))\n\n resolver = jsonschema.RefResolver('', schema_obj, store=stored_schemas)\n jsonschema.validate(self.redfish, schema_obj, resolver=resolver)", "def validate_settings(self):\n\t\t# Check all attributes exist\n\t\tfor key, value in vars(self).items():\n\t\t if hasattr(self, key) == False:\n\t\t\t\tUtility.report_error(1, '%s: Missing attribute \"%s\"' % (self._file_path, key))\n\n\t\t# Check mandatory attributes\n\t\tif self.is_valid_status(self.status) == False:\n\t\t\tUtility.report_error(1, '%s: Status \"%s\" is not valid' % (self._file_path, self.status))\n\n\t\tif self.definition == '' or self.definition == None:\n\t\t\tUtility.report_error(1, '%s: Definition field is empty or missing' % (self._file_path))\n\t\t\n\t\tif self.term == '' or self.term == None:\n\t\t\tUtility.report_error(1, '%s: Term field is empty or missing' % (self._file_path))\n\n\t\t# If status is neither approved or elaboration reject reason must be stated\n\t\tif (self.status == 'rejected' or self.status == 'replaced') and (self.status_reason == '' or self.status_reason == None):\n\t\t\tUtility.report_error(1, '%s: \"Status reason\" is missing, this is not allowed when status is \"%s\"' % (self._file_path, self.status))\n\n\t\t# If status is rejected a rejected by user must be specified\n\t\tif self.status == 'rejected' and (self.rejected_by == '' or self.rejected_by == None):\n\t\t\tUtility.report_error(1, '%s: \"Rejected by\" is missing, this is not allowed when status is \"%s\"' % (self._file_path, self.status))\n\n\t\t# If status is replaced then Replaced by must be specified\n\t\tif self.status == 'replaced' and (self.replaced_by == None or self.replaced == ''):\n\t\t\tUtility.report_error(1, '%s: \"Replaced by\" is missing, this is not allowed when status is \"%s\"' % (self._file_path, self.status))\n\n\t\tself.created_by = self.make_link_list('stakeholders', 'Created by', self.created_by, False)\n\t\tself.rejected_by = self.make_link_list('stakeholders', 'Rejected by', self.rejected_by, False)\n\t\tself.replaced_by = self.make_link_list('glossary', 'Replaced by', self.replaced_by)\n\n\t\tif self.is_string_date(self.created_on) == False:\n\t\t\tUtility.report_error(1, '%s: Created on field has value \"%s\", but it must be date in YYYY-MM-DD format' % (self._file_path, self.created_on))\n\n\t\tif self.is_string_date(self.rejected_on) == False:\n\t\t\tUtility.report_error(1, '%s: Rejected on field has value \"%s\", but it must be date in YYYY-MM-DD format' % (self._file_path, self.rejected_on))", "def validate(self):\n import os\n\n if self.kind == KDM.INTEROP:\n with open(os.path.join(os.path.dirname(__file__), 'xsd', 'interop.xsd'), 'r') as f:\n schema = f.read()\n elif self.kind == KDM.SMPTE:\n with open(os.path.join(os.path.dirname(__file__), 'xsd', 'smpte.xsd'), 'r') as f:\n schema = f.read()\n\n base_dir = os.getcwd()\n os.chdir(os.path.join(os.path.dirname(__file__), 'xsd'))\n try:\n schema = ET.XMLSchema(ET.XML(schema))\n xmlparser = ET.XMLParser(schema=schema)\n ET.fromstring(self.raw, xmlparser)\n finally:\n os.chdir(base_dir)", "def test_DataPackageFileAttributesAreValid_match(tempdir: pathlib.Path):\n df = dpack_pb2.DataPackageFile()\n df.relative_path = \"a\"\n df.checksum_hash = dpack_pb2.SHA256\n df.checksum = SHA256_EMPTY_FILE\n (tempdir / \"a\").touch()\n assert dpack.DataPackageFileAttributesAreValid(tempdir, df)" ]
[ "0.73052067", "0.6979145", "0.69215554", "0.68080264", "0.6786265", "0.6757262", "0.6450811", "0.6440165", "0.640244", "0.6351523", "0.63133943", "0.62944144", "0.62856257", "0.62499046", "0.62444276", "0.62436587", "0.6221382", "0.61910504", "0.6152198", "0.60909945", "0.60679615", "0.60588336", "0.60163915", "0.6001264", "0.5996636", "0.59722024", "0.59587604", "0.5956518", "0.59325206", "0.5914116", "0.5912054", "0.58673203", "0.5862588", "0.58538914", "0.5826268", "0.5815112", "0.5793097", "0.57824695", "0.57600623", "0.5753106", "0.574377", "0.5709536", "0.568913", "0.5685849", "0.5668629", "0.5666433", "0.56651276", "0.565961", "0.56305736", "0.5615993", "0.55980617", "0.55980617", "0.5587431", "0.556856", "0.5554154", "0.55504185", "0.5546379", "0.5537599", "0.5536559", "0.5536392", "0.5512272", "0.5509106", "0.55089366", "0.5507787", "0.5500205", "0.54960567", "0.5466127", "0.5456946", "0.54560435", "0.54550457", "0.5450761", "0.5449078", "0.54481995", "0.5445398", "0.54441524", "0.54293704", "0.5425856", "0.54184604", "0.5412102", "0.54054147", "0.54053724", "0.5403968", "0.53981405", "0.5390232", "0.5389681", "0.53871685", "0.53865576", "0.5380506", "0.5378905", "0.53764087", "0.53757054", "0.53714496", "0.53704464", "0.5368489", "0.5368367", "0.53604114", "0.5355798", "0.5349811", "0.5348181", "0.53461075" ]
0.741697
0
Gets the zerotier IP address from the given marketplace name
def get_zerotier_address(marketplace): logger.info("You might need to enter your superuser password.") address = zerotier.get_address(marketplace) if not address: join_cmd = click.style("21 join", bold=True, reset=False) no_zt_network = click.style( "You are not part of the {}. Use {} to join the market.", fg="red") raise UnloggedException(no_zt_network.format(marketplace, join_cmd)) return address
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_vip_address(self, vip_name):\n networks = self.nailgun_client.get_networks(self.cluster_id)\n vip = networks.get('vips').get(vip_name, {}).get('ipaddr', None)\n asserts.assert_is_not_none(\n vip, \"Failed to get the IP of {} server\".format(vip_name))\n\n logger.debug(\"VIP '{0}': {1}\".format(vip_name, vip))\n return vip", "def GetExternalIp():\n h = httplib2.Http(tempfile.gettempdir(), timeout=10)\n url = 'http://whatismyip.akamai.com'\n resp, content = h.request(url, 'GET')\n if resp.status == 200:\n return content\n for provider in (UltraDNSAuth(), MyResolverInfo()):\n answer = provider.GetClientIp()\n if answer:\n return answer", "def address(self, name):\n return self.query(name).response.answer[0].items[0].address", "def reverse_lookup_zone(ipaddress):\n return reverse_dotted_decimals(ipaddress) + '.in-addr.arpa'", "def getIp(name):\n tmp = []\n ips = socket.getaddrinfo(socket.gethostbyname(name), None)\n for x in ips:\n tmp.append(x[4][0])\n\n return tmp", "def get_global_ip() -> str:\n return urllib.request.urlopen(\"https://icanhazip.com\").read().decode().strip()", "def get_ip(pc_name):\n pc_ip = '' \n try: \n pc_ip = socket.gethostbyname(pc_name) \n except Exception, e:\n initlog('failed to get PC ip; %s' % str(e)) \n return pc_ip", "def getIP():\n data = _get_page(\"http://myip.cz\")\n data = data.split(\"Your IP Address is: <b>\")[-1].split(\"</b>\")[0]\n return data.strip()", "def get_ip_string():\n return netifaces.ifaddresses('br0')[netifaces.AF_INET][0]['addr']", "def get_address(machine: Machine) -> str:\n default_route, _ = machine.run(\"ip route get 8.8.8.8\")\n return re.search(\" src ([0-9.]+) \", default_route).group(1)", "def get_public_ip(self, name=None):\n raise NotImplementedError", "def get_IP(): \n \n return socket.gethostbyname(socket.gethostname())", "def get_ip():\n with hide(\"everything\"):\n ip_addresses = run('hostname -I').split(' ')\n return ip_addresses[0]", "def getLocalIpAddress() :\n \n if (platform.system() == 'Linux') :\n cmd = \"ifconfig wlan0 | grep 'inet addr:' | cut -d: -f2 | awk '{print $1}'\"\n return subprocess.check_output(cmd, shell=True) \n else : # Darwin\n return socket.gethostbyname(socket.gethostname())", "def internet_address(self) -> str:\n return pulumi.get(self, \"internet_address\")", "def get_transport_address_by_name(transport_name: str) -> str:\n # Open a new connection\n db, cursor = db_connector.cursor()\n\n query = \"select address from transport where name = '{}';\".format(transport_name)\n cursor.execute(query)\n data = cursor.fetchall()\n db.disconnect()\n return data[0][0]", "def get_ip_address(self): # type: () -> t.Optional[str]\n if self.networks:\n network_name = get_docker_preferred_network_name(self.args)\n\n if not network_name:\n # Sort networks and use the first available.\n # This assumes all containers will have access to the same networks.\n network_name = sorted(self.networks.keys()).pop(0)\n\n ipaddress = self.networks[network_name]['IPAddress']\n else:\n ipaddress = self.network_settings['IPAddress']\n\n if not ipaddress:\n return None\n\n return ipaddress", "def _get_ip_address(ifname):\n cmd = (\"ifconfig %s| grep 'inet ' | awk -F: '{print $1}' | awk '{print $2}'\" %str(ifname))\n ip = os.popen(cmd).read().replace(\"\\n\",\"\")\n\n return ip", "def get_local_host_ip(self) -> str:", "def get_ip_address(device):\n try:\n capwap_client_rcb = device.parse('show capwap client rcb')\n except SchemaEmptyParserError as e:\n log.error(e)\n return ''\n\n return capwap_client_rcb.get('mwar_ap_mgr_ip', '')", "def get_IP():\n\n return socket.gethostbyname(socket.gethostname())", "def GetIPAddr():\n cmd = \"ifconfig | awk '/192/ {print $2}'\"\n res = Run(cmd).replace(\"\\n\", \"\") # remove end of line char\n return res.replace(\"addr:\", \"\") # remove \"addr:\" prefix", "def get_my_ip():\r\n try:\r\n return [x[4] for x in conf.route.routes if x[2] != '0.0.0.0'][0]\r\n except IndexError:\r\n return '127.0.0.1'", "def get_node_ip(\n self,\n name,\n ):\n pass", "def get_ip_address(self):\n raise NotImplementedError", "def get_host_ip_addr():\n return nova_conf.my_ip", "def get_ip(self):", "def obtain_public_ip():\n from urllib2 import urlopen\n my_ip = urlopen('http://ip.42.pl/raw').read()\n logger.debug('The public ip is: %s' % my_ip)\n return str(my_ip)", "def get_ip_address():\n try:\n return socket.gethostbyname(socket.getfqdn())\n except socket.gaierror as error:\n logger.warn(error)\n return socket.gethostbyname(\"\")", "def get_address(project, zone, instance):\n return gcloud(\n project,\n 'addresses',\n 'describe',\n '%s-ip' % instance,\n '--region=%s' % get_region(zone),\n '--format=value(address)',\n )", "def urlToIp(self, url):\n return str(socket.gethostbyname(url))", "def get_public_ip():\n public_ip = get('https://api.ipify.org').text\n return public_ip", "def real_ip(self):\n if not hasattr(self, \"_real_ip\"):\n response = get(ICANHAZIP)\n self._real_ip = self._get_response_text(response)\n\n return self._real_ip", "def address(self):\n \n return self.__ip", "def ip_address(self) -> str:\n return pulumi.get(self, \"ip_address\")", "def __lookup_public_ip(self):\n\n response = requests.get('https://api.ipify.org?format=json', timeout=self.timeout)\n\n if response.status_code == 200:\n ip_data = response.json()\n if 'ip' not in ip_data.keys():\n return 'Unable to determine IP'\n else:\n return ip_data['ip']\n else:\n return 'Unable to determine IP'", "def get_global_ip():\n network_info_providers = [\n 'http://api.ipify.org/',\n 'http://myip.dnsomatic.com',\n 'http://inet-ip.info/ip',\n 'http://v4.ident.me/',\n ]\n random.shuffle(network_info_providers)\n for url in network_info_providers:\n try:\n return requests.get(url).text.lstrip().rstrip()\n except Exception:\n continue\n else:\n log.info('cannot find global ip')\n return \"\"", "def getPublicIpAddress() :\n f = urllib.urlopen(\"http://www.canyouseeme.org/\")\n html_doc = f.read()\n f.close()\n ipAddress = re.search('(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)',html_doc)\n\n #response = urllib.urlopen('http://api.hostip.info/get_html.php?ip=' + ipAddress.group(0) + '&position=true').read()\n return urllib.urlopen('http://api.hostip.info/get_html.php?ip=' + ipAddress.group(0)).read()", "def get_IPaddress():\n config = get_ifconfig()\n return config[0]", "def get_ip_address(self, ip_address=None):\r\n svc = self.client['Network_Subnet_IpAddress']\r\n return svc.getByIpAddress(ip_address)", "def get_host_ipaddress(self):\n\t\treturn call_sdk_function('PrlVirtNet_GetHostIPAddress', self.handle)", "def ipAddress():\n \n sk = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sk.connect((\"8.8.8.8\", 80))\n ip = (sk.getsockname()[0])\n sk.close()\n return str(ip)", "def get_zone(cls, name):\n\n def get_closest(n):\n \"\"\"\n Return closest matching zone\n \"\"\"\n while n:\n try:\n return DNSZone.objects.get(name=n)\n except DNSZone.DoesNotExist:\n pass\n n = \".\".join(n.split(\".\")[1:])\n return None\n\n if not name:\n return None\n if is_ipv4(name):\n # IPv4 zone\n n = name.split(\".\")\n n.reverse()\n return get_closest(\"%s.in-addr.arpa\" % (\".\".join(n[1:])))\n elif is_ipv6(name):\n # IPv6 zone\n d = IPv6(name).digits\n d.reverse()\n c = \".\".join(d)\n return get_closest(\"%s.ip6.arpa\" % c) or get_closest(\"%s.ip6.int\" % c)\n else:\n return get_closest(name)", "def getIP():\n try:\n page = urlopen(\"http://www.whatismyip.com/automation/n09230945.asp\")\n IP = page.read()\n page.close()\n return IP\n except:\n return \"Could not retrieve the IP address.\"", "def getnetwork(ipaddr):\n return '192.168.1.0/24'", "def get_address(xpub):\n return xpub.to_address() # p2pkh", "def address(self) -> str:\n return pulumi.get(self, \"address\")", "def address(self) -> str:\n return pulumi.get(self, \"address\")", "def address(self) -> str:\n return pulumi.get(self, \"address\")", "def get_external_ip():\n try:\n r = requests.get(\n METADATA_NETWORK_INTERFACE_URL,\n headers={'Metadata-Flavor': 'Google'},\n timeout=2)\n return r.text\n except requests.RequestException:\n logging.info('Metadata server could not be reached, assuming local.')\n return 'localhost'", "def host_ip(host):\n return host.cmd('ip addr show {}-eth1 | awk \\'/inet / {{ print $2 }}\\' | cut -d\\'/\\' -f1'.format(host.name, host.name), stdout=sp.PIPE).strip()", "def get_primary_ip(options, index):\n\n second_octet = 160 + index\n return \"192.%s.1.1\" % second_octet", "def get_ip_address(ifname):\n # I did not write this function I give credit to this site\n # for it:\n # hpython-mysqldbttp://code.activestate.com/recipes/439094/\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n return socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, # SIOCGIFADDR\n struct.pack('256s', ifname[:15])\n )[20:24])", "def fetch_address(cpr: str) -> str:\n\n return \"Åbogade 15, 8200 Aarhus N\"", "def address(self) -> Optional[str]:\n return pulumi.get(self, \"address\")", "def address(self) -> Optional[str]:\n return pulumi.get(self, \"address\")", "def ip(self) -> Optional[str]:\n return pulumi.get(self, \"ip\")", "def public_address() -> str:\n check_timeout = float(CONFIG['network']['check_timeout'])\n check_host_list = CONFIG.get_list('network', 'check_host_list')\n try:\n for check_url in check_host_list:\n with urllib.request.urlopen(\n url=check_url, timeout=check_timeout,\n ) as response:\n return response.read().decode().strip()\n return None\n except Exception as error:\n return None", "def ip(self) -> str:\n return pulumi.get(self, \"ip\")", "def get_address(address=None, vsys=\"1\"):\n query = {\n \"type\": \"config\",\n \"action\": \"get\",\n \"xpath\": (\n \"/config/devices/entry[@name='localhost.localdomain']/vsys/entry[@name='vsys{}']/\"\n \"address/entry[@name='{}']\".format(vsys, address)\n ),\n }\n\n return __proxy__[\"panos.call\"](query)", "def get_ip():\n return os.getenv(\"HOST_IP\", \"127.0.0.1\")", "def ip_address(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"ip_address\")", "def ip_address(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"ip_address\")", "def ip_address(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ip_address\")", "def ip_address(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ip_address\")", "def ip_address(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ip_address\")", "def ip_address(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"ip_address\")", "def get_ip_address(self):\n return self.adb.get_ip_address()", "def get_ip() -> str:\n for ip in socket.gethostbyname_ex(socket.gethostname())[2]:\n if not ip.startswith(\"127.\"):\n return ip\n for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]:\n s.connect((\"8.8.8.8\", 53))\n ip, port = s.getsockname()\n s.close()\n if not ip.startswith(\"127.\"):\n return ip\n raise ConnectionError(\"Can not get a suitable IP\")", "def _open_stack_get_ip_(srv):\n addr_info = srv.addresses\n for net in addr_info.keys():\n for addr in addr_info[net]:\n ip = addr['addr']\n return ip", "def get_host_ip(timeout=10):\n\n return get_default_route(timeout)[2]", "def get_zone_ip(config, section):\n\n current_zone = api.domain.zone.record.list(config.get(section, \"apikey\"),\n get_zone_id(config, section), 0)\n ip = '0.0.0.0'\n\n # There may be more than one A record - we're interested in one with\n # the specific name (typically @ but could be sub domain)\n for d in current_zone:\n if d['type'] == 'A'and d['name'] == config.get(section, \"a_name\"):\n ip = d['value']\n\n return ip", "def get_gateway_ip(timeout=10):\n\n return get_default_route(timeout)[0]", "def overlay_ip(ip):\n return \"192.168.{}.{}\".format( *ip.split(\".\")[2:])", "def get_external_ip(soap_url) -> str:\n s_o_a_p = '<?xml version=\"1.0\"?>\\r\\n'\n s_o_a_p += '<s:Envelope xmlns:s=\"http://schemas.xmlsoap.org/soap/envelope/\" s:encodingStyle=' \\\n '\"http://schemas.xmlsoap.org/soap/encoding/\">\\r\\n'\n s_o_a_p += '<s:Body>\\r\\n'\n s_o_a_p += '<u:GetExternalIPAddress xmlns:u=\"urn:schemas-upnp-org:service:WANPPPConnection:1\">\\r\\n'\n s_o_a_p += '</u:GetExternalIPAddress>\\r\\n'\n s_o_a_p += '</s:Body>\\r\\n'\n s_o_a_p += '</s:Envelope>\\r\\n'\n\n try:\n req = Request(soap_url)\n req.add_header('Content-Type', 'text/xml; charset=\"utf-8\"')\n req.add_header('SOAPACTION', '\"urn:schemas-upnp-org:service:WANPPPConnection:1#GetExternalIPAddress\"')\n req.data = s_o_a_p.encode('utf8')\n result = xmltodict.parse(urlopen(req).read().decode())\n return result['s:Envelope']['s:Body']['u:GetExternalIPAddressResponse']['NewExternalIPAddress']\n except Exception:\n log.debug(\"get_external_ip exception\", exc_info=True)", "def get_ip_by_name ( route53_conn, dns_name ) :\n record = get_r53_record_by_name( route53_conn, dns_name )\n if record :\n return record.resource_records[ 0 ]\n\n return None", "def getIp(self):\n raise NotImplementedError", "def get_local_ip(self, system):\n if system == \"Linux\":\n # This is a bit ugly but it works\n ips = check_output(['hostname', '--all-ip-addresses']).decode(\"utf-8\")\n return ips.split(\" \")[0]\n else:\n return socket.gethostbyname(socket.gethostname())", "def get_reverse_host():\n try:\n return socket.gethostbyaddr(get_ipaddress())[0]\n except:\n return \"Unable to resolve IP address to reverse hostname\"", "def get_default_ip_address():\r\n gws = netifaces.gateways() # get all gateways\r\n default = gws['default'] # get the default gw\r\n adapter = default[2][1] # get the adapter identifier\r\n realadapter = netifaces.ifaddresses(adapter) # get the adapter\r\n addr_dict = realadapter[2][0] # get the first ipv4 address tuple\r\n return addr_dict['addr']", "def address(self, net: str, compressed: bool) -> str:\n # encode the public key into bytes and hash to get the payload\n pkb_hash = self.encode(compressed=compressed, hash160=True)\n # add version byte (0x00 for Main Network, or 0x6f for Test Network)\n version = {'main': b'\\x00', 'test': b'\\x6f'}\n ver_pkb_hash = version[net] + pkb_hash\n # calculate the checksum\n checksum = sha256(sha256(ver_pkb_hash))[:4]\n # append to form the full 25-byte binary Bitcoin Address\n byte_address = ver_pkb_hash + checksum\n # finally b58 encode the result\n b58check_address = b58encode(byte_address)\n return b58check_address", "def _get_my_ip():\n try:\n csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n csock.connect(('8.8.8.8', 80))\n (addr, port) = csock.getsockname()\n csock.close()\n return addr\n except socket.error:\n return \"127.0.0.1\"", "def nameToAddress(self, name):\n pass", "def get_vm_ip(vm_name):\n return ll_vms.wait_for_vm_ip(vm_name)[1]['ip']", "def _get_ip():\n cmd_netstat = ['netstat', '-nr']\n p1 = subprocess.Popen(cmd_netstat, stdout=subprocess.PIPE)\n cmd_grep = ['grep', '^0\\.0\\.0\\.0']\n p2 = subprocess.Popen(cmd_grep, stdin=p1.stdout, stdout=subprocess.PIPE)\n cmd_awk = ['awk', '{ print $2 }']\n p3 = subprocess.Popen(cmd_awk, stdin=p2.stdout, stdout=subprocess.PIPE)\n galaxy_ip = p3.stdout.read()\n log.debug('Host IP determined to be %s', galaxy_ip)\n return galaxy_ip", "def searchCountry(host):\n process = subprocess.Popen(\"geoiplookup \"+host,stdout=subprocess.PIPE, shell=True)\n (output, err) = process.communicate()\n secondPart = output.split(\"GeoIP Country Edition: \", 1)[1]\n country = secondPart.split(\"\\nGeoIP City Edition\", 1)[0]\n return country", "def get_ext_ip_addr(self, node_name):\n node = self._cloud.get_server(node_name)\n if node is None:\n raise CloudError('Cannot retrieve node/IP information. Is `node_name` set correctly?')\n return node.accessIPv4", "def get_address_output(name: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[Optional[str]]] = None,\n region: Optional[pulumi.Input[Optional[str]]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetAddressResult]:\n ...", "async def get_ip():\n\turl = 'https://cheese.formice.com/api/tfm/ip'\n\tdata = await request_api(url)\n\n\tif not len(data):\n\t\t# Empty dictionary, request failed, let's use default server IP\n\t\tsuccess = True\n\telse:\n\t\tsuccess = data.pop('success', False)\n\t\terror = data.pop('error', '').capitalize()\n\t\tdescription = data.pop('description', 'No description was provided.')\n\n\tif not success:\n\t\tif error == 'Maintenance':\n\t\t\traise MaintenanceError('The game is under maintenance.')\n\n\t\tif error == 'Internal':\n\t\t\traise InternalError(description)\n\n\t\traise EndpointError(f'{error}: {description}')\n\n\treturn Keys(version=666, **data.get('server', {}))", "def ip_info():\n return str(getIP())", "def get_ip_address2(ifname):\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n return socket.inet_ntoa(fcntl.ioctl(\n s.fileno(),\n 0x8915, # SIOCGIFADDR\n struct.pack('256s', ifname[:15])\n )[20:24])\n except:\n return None", "def get_ip():\n if not request.headers.getlist(\"X-Forwarded-For\"):\n return str(request.remote_addr)\n else:\n return str(request.headers.getlist(\"X-Forwarded-For\")[0])", "def get_local_ip():\n\n return os.environ[LOCAL_IP_KEY]", "def ip_address(self):\n return self.address", "def ip_addr(self):\n return self.ip_addresses[0]", "def get_persistent_address(instance):\n if instance.cloud == 'aws':\n client = boto3.client('ec2', instance.region)\n try:\n client.describe_addresses(PublicIps=[instance.ip_address])\n return instance.ip_address\n except botocore.client.ClientError as exc:\n if exc.response.get('Error', {}).get('Code') != 'InvalidAddress.NotFound':\n raise\n # Address is not public\n return None\n if instance.cloud == 'gcp':\n credentials = GoogleCredentials.get_application_default()\n compute = discovery.build('compute', 'v1', credentials=credentials)\n try:\n return compute.addresses().get(address=instance.name, project=instance.project, region=instance.region).execute()['address']\n except errors.HttpError as exc:\n if 'was not found' in str(exc):\n return None\n raise\n raise ValueError('Unknown cloud %s' % instance.cloud)", "def api_myip():\n return request.remote_addr, 200, {'Content-Type': 'text/plain'}", "def get_free_ip(reservations,node,networkname):\n ips=[]\n iprange=''\n for reservation in sorted(reservations, key=lambda r: r.id, reverse=True):\n if reservation.next_action != \"DEPLOY\":\n continue\n rnetworks = reservation.data_reservation.networks\n for network in rnetworks:\n if network.name == networkname:\n for netres in network.network_resources:\n if netres.node_id == node:\n iprange = netres.iprange\n\n rcontainer = reservation.data_reservation.containers\n for container in rcontainer:\n if container.node_id == node:\n for netcon in container.network_connection:\n if netcon.network_id == networkname:\n ips.append(netcon.ipaddress)\n\n rkubernetes = reservation.data_reservation.kubernetes\n for kubernetes in rkubernetes:\n if kubernetes.node_id == node:\n ips.append(kubernetes.ipaddress)\n\n\n\n # asuming /24 !!\n if iprange == '':\n print(\"error: no network found for:\",networkname)\n sys.exit(1)\n nodenet = iprange[0:-4]\n #search first free IP\n i = 1\n free_ip = ''\n while i<254:\n i+=1\n free_ip = nodenet+str(i)\n if free_ip not in ips:\n break\n # todo: check if free_ip is a valid IP\n return free_ip", "def get_address_by_name(name, limit):\n request = \"{}/{}?key={}&q={}&type=json&limit={}\".format(config.GEOCODE_URL, config.GEOCODE_SEARCH_PATH, config.GEOCODE_KEY, name, limit)\n response = requests.get(request).json()\n return response", "def gethostbycondorname(name):\n\n m = htcondor_ip_name_re.match(name)\n if m is not None:\n return m.group(1).replace('-', '.')\n else:\n return socket.gethostbyname(name)" ]
[ "0.64503324", "0.63069993", "0.6197815", "0.618933", "0.61700016", "0.609886", "0.5992339", "0.5982672", "0.5972976", "0.5954624", "0.5945781", "0.5938082", "0.5936992", "0.5928978", "0.59271926", "0.59166557", "0.58961654", "0.5854242", "0.581978", "0.58141637", "0.5809872", "0.5806588", "0.5783619", "0.5768035", "0.5764843", "0.57551545", "0.5731219", "0.57216877", "0.57020044", "0.5693605", "0.5691003", "0.56872374", "0.5680021", "0.5669705", "0.56559914", "0.56485367", "0.56484413", "0.56382084", "0.5620377", "0.5613188", "0.55879486", "0.5571119", "0.55631495", "0.5535381", "0.5534715", "0.553209", "0.55097234", "0.55097234", "0.55097234", "0.550653", "0.54881114", "0.5481466", "0.5480401", "0.54795206", "0.547823", "0.547823", "0.54744625", "0.54672927", "0.545959", "0.545948", "0.54498816", "0.5445545", "0.5445545", "0.5439971", "0.5439971", "0.5439971", "0.5439971", "0.5435769", "0.54243284", "0.5414599", "0.5397294", "0.5391797", "0.5391596", "0.5390951", "0.5389545", "0.5380706", "0.53732306", "0.5368999", "0.53635085", "0.53609335", "0.53572017", "0.53543615", "0.53533363", "0.53529584", "0.5346882", "0.533461", "0.5329917", "0.53188497", "0.5312847", "0.5303731", "0.5298473", "0.5293943", "0.5291453", "0.5286361", "0.5284659", "0.5278101", "0.52766395", "0.5267742", "0.526316", "0.5260328" ]
0.6974309
0
Checks the state of a field and its neighbors to decide whether the field should die or live in the next tick
def _should_cell_live(self, cell: Cell) -> bool: living_neighbours_count = self._count_living_neighbors(cell) # Any live cell with two or three live neighbours survives if cell.is_alive and living_neighbours_count in [2, 3]: return True # Any dead cell with three live neighbours becomes a live cell if not cell.is_alive and living_neighbours_count == 3: return True # All other live cells die in the next generation. Similarly, all other dead cells stay dead return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def live_or_die(self, x, y):\n neighbors = self.get_neighbors(x, y)\n num_neighbors = 0\n for val in neighbors:\n if val:\n num_neighbors+=1\n\n\n # cell dies if less than 2 neighbors\n if num_neighbors < 2:\n return False\n\n # cell lives on if has 2 or 3 neighbors\n if (num_neighbors == 2 or num_neighbors == 3) and self._board[x][y]:\n return True\n\n # cell dies if more than 2 neighbors\n if num_neighbors > 3:\n return False\n\n # cell is born if has 3 neighbors\n if num_neighbors == 3 and not self._board[x][y]:\n return True\n\n # for consistency\n return False", "def _check_cells(self):\n for row_number in range(self.number_cells_y):\n for col_number in range(self.number_cells_x):\n alive_neighbours = self._get_neighbours(row_number,col_number)\n \n self.to_be_updated[row_number][col_number] = False\n if self.cells[row_number][col_number].get_status():\n if alive_neighbours < 2:\n self.to_be_updated[row_number][col_number] = True\n elif alive_neighbours > 3:\n self.to_be_updated[row_number][col_number] = True\n else:\n if alive_neighbours == 3:\n self.to_be_updated[row_number][col_number] = True", "def check_time(self):\n while True:\n for name in self.neighbors:\n if not self.neighbors[name].is_killed:\n if not self.neighbors[name].update_ready and time.time() - self.neighbors[name].send_timer > self.timeout:\n self.neighbors[name].update_ready = True\n if time.time() - self.neighbors[name].kill_timer > 3 * self.timeout:\n self.neighbors[name].is_killed = True", "def just_died(self):\r\n self.dead = True", "def check_bounds(self):\n\n if self.bounds_action == self.BOUNCE:\n if self.hits_left_or_right():\n self.dx = self.dx * -1\n if self.hits_top_or_bottom():\n self.dy = self.dy * -1\n\n if self.bounds_action == self.STOP:\n if self.hits_left_or_right():\n self.dx = 0\n self.dy = 0\n if self.hits_top_or_bottom():\n self.dx = 0\n self.dy = 0\n\n if self.bounds_action == self.SKID:\n if self.hits_left_or_right():\n self.dx = 0\n if self.hits_top_or_bottom():\n self.dy = 0\n\n if self.bounds_action == self.DIE:\n if self.hits_left_or_right() or self.hits_top_or_bottom():\n self.dx = 0\n self.dy = 0\n self.visible = False", "def get_death(self):\r\n if self.dead:\r\n self.dead = False\r\n return True\r\n return False", "def test_goto_field_boss_guider(self):\n sopTester.updateModel('mcp', TestHelper.mcpState['all_off'])\n sopTester.updateModel('guider', TestHelper.guiderState['guiderOnDecenter'])\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n cmdState.doSlew = False\n cmdState.doHartmann = False\n cmdState.doCalibs = False\n cmdState.arcTime = 0\n cmdState.flatTime = 0\n self._goto_field_boss(9, 37, 0, 0, cmdState)", "def have_i_lost(self):\n if self.life_points <= 0:\n self.running = False", "def test_goto_field_boss_slew(self):\n sopTester.updateModel('mcp', TestHelper.mcpState['all_off'])\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n cmdState.doGuider = False\n cmdState.doHartmann = False\n cmdState.doCalibs = False\n cmdState.arcTime = 0\n cmdState.flatTime = 0\n self._goto_field_boss(3, 26, 0, 0, cmdState)", "def _check_fleet_edges(self):\n\t\tfor auto in self.autos.sprites():\n\t\t\tif auto.check_edges():\n\t\t\t\tself._change_fleet_direction()\n\t\t\t\tbreak", "def check_neighbors(self, position):\r\n x, y, z = position\r\n for dx, dy, dz in FACES:\r\n key = (x + dx, y + dy, z + dz)\r\n if key not in self.world:\r\n continue\r\n if self.exposed(key):\r\n if key not in self.shown:\r\n self.show_block(key)\r\n else:\r\n if key in self.shown:\r\n self.hide_block(key)", "def is_fixed_state( previous_live, live_cells ):\n fixed = False\n if previous_live[0].size == live_cells[0].size:\n if previous_live[1].size == live_cells[1].size:\n if (previous_live[0]==live_cells[0]).all():\n if (previous_live[1]==live_cells[1]).all():\n fixed = True\n return fixed", "def check_dead(cart):\n id = cart_to_loc(cart)\n return voxel_data[id] == 0", "def crates_destroyed(self, game_state: dict):\n\n bomb_position_x = game_state['self'][3][0]\n bomb_position_y = game_state['self'][3][1]\n n_crates = 0\n\n for i in range(3):\n if bomb_position_x - i - 1 >= 0:\n if game_state['field'][bomb_position_x - i - 1][bomb_position_y] == 1:\n n_crates += 1\n elif game_state['field'][bomb_position_x - i - 1][bomb_position_y] == -1:\n break\n\n for i in range(3):\n if bomb_position_x + i + 1 <= 16:\n if game_state['field'][bomb_position_x + i + 1][bomb_position_y] == 1:\n n_crates += 1\n elif game_state['field'][bomb_position_x + i + 1][bomb_position_y] == -1:\n break\n\n for i in range(3):\n if bomb_position_y - i - 1 >= 0:\n if game_state['field'][bomb_position_x][bomb_position_y - i - 1] == 1:\n n_crates += 1\n elif game_state['field'][bomb_position_x][bomb_position_y - i - 1] == -1:\n break\n\n for i in range(3):\n if bomb_position_y + i + 1 <= 16:\n if game_state['field'][bomb_position_x][bomb_position_y + i + 1] == 1:\n n_crates += 1\n elif game_state['field'][bomb_position_x][bomb_position_y + i + 1] == -1:\n break\n\n return n_crates", "def test_goto_field_boss_hartmann(self):\n sopTester.updateModel('mcp', TestHelper.mcpState['all_off'])\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n cmdState.doSlew = False\n cmdState.doCalibs = False\n cmdState.arcTime = 0\n cmdState.flatTime = 0\n cmdState.doGuider = False\n self._goto_field_boss(5, 29, 0, 0, cmdState)", "def is_dead(self):\n if self.killer:\n if self.killer.stype == 'fire' and not (self.killer in self.pjs.fires):\n return True\n elif self.killer.stype == 'enemy' and self.timeout == 0:\n return True\n else:\n return False", "def _check_fleet_edges(self):\n for alien in self.aliens.sprites():\n # if self.rect.right >= screen_rect.right or self.rect.left <= 0:\n if alien.check_edges():\n print(\"alien.rect BEFORE\", alien.rect) # rect = <rect(x, y, width, height)> \n print(\"direction BEFORE \", self.settings.fleet_direction)\n self._change_fleet_direction()\n print(\"direction AFTER \", self.settings.fleet_direction)\n print(\"Change in y is \", alien.rect.y)\n break", "def check_state(self):\n pass", "def life_step(state):\n\t# For every cell each live cell in any of the 8 neighbouring cells contributes 1 to the sum\n\t# Rolling matricies is periodic so this implements periodic boundary conditions\n\tnumberOfNeigbours = sum(np.roll(np.roll(state, i, axis=0), j, axis=1)\n\t\t\t\t\t\t for i in (-1,0,1) for j in (-1,0,1) if (i != 0 or j != 0))\n\n\t# Any live cell with fewer than two live neighbours dies, as if caused by under-population\n\tstate = np.where(numberOfNeigbours < 2, 0, state)\n\t# Any live cell with more than three live neighbours dies, as if by over-population\n\tstate = np.where(numberOfNeigbours > 3, 0, state)\n\t# Any dead cell with exactly three live neighbours becomes a live cell, as if by reproduction.\n\tstate = np.where(numberOfNeigbours == 3, 1, state)\n\n\treturn state", "def test_goto_field_apogee_no_slew_decenter_off(self):\n sopTester.updateModel('mcp', TestHelper.mcpState['all_off'])\n sopTester.updateModel('guider', TestHelper.guiderState['guiderOnDecenter'])\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n cmdState.doSlew = False\n self._goto_feld_apogee(9, 37, 0, 0, cmdState)", "def _check_fleet_edges(self):\n\t\tfor alien in self.aliens.sprites():\n\t\t\tif alien.check_edges():\n\t\t\t\tself._change_fleet_direction()\n\t\t\t\tbreak", "def step(self):\n # gets who has fired who in this step\n blues_fire_reds = np.array([[blue.fires_(red) for red in self.red_drones] for blue in self.blue_drones])\n reds_fire_blues = np.array([[red.fires_(blue) for blue in self.blue_drones] for red in self.red_drones])\n\n # if the foe is no longer seen, the count restarts from 0\n self.blues_have_fired_reds *= blues_fire_reds\n self.reds_have_fired_blues *= reds_fire_blues\n\n # and the count is incremented for the others\n self.blues_have_fired_reds += blues_fire_reds\n self.reds_have_fired_blues += reds_fire_blues\n\n # np magic : first find the list of duos shooter/shot, keep the shots (only once)\n red_deads = np.unique(np.argwhere(self.blues_have_fired_reds >= self.blue_shots_to_kill).T[1])\n blue_deads = np.unique(np.argwhere(self.reds_have_fired_blues >= self.red_shots_to_kill).T[1])\n\n\n # tell the drones that they are dead\n for drone_id in blue_deads:\n self.blue_drones[drone_id].is_killed(is_blue=True)\n for drone_id in red_deads:\n self.red_drones[drone_id].is_killed(is_blue=False)\n\n # consider only living drones\n blue_drones = [drone for drone in self.blue_drones if drone.is_alive]\n red_drones = [drone for drone in self.red_drones if drone.is_alive]\n\n bf_obs, rf_obs = self.get_observation()\n bf_reward = rf_reward = 0\n remaining_blues, remaining_reds = len(blue_drones), len(red_drones),\n blue_shots, red_shots = len(blue_deads), len(red_deads)\n\n if blue_shots + red_shots > 0:\n print('someone is killed: {0} blues and {1} reds'.format(blue_shots, red_shots))\n\n return bf_obs, bf_reward, remaining_blues, blue_shots, rf_obs, rf_reward, remaining_reds, red_shots", "def _check_fleet_edges(self):\n for alien in self.aliens.sprites():\n if alien.check_edges():\n self._change_fleet_direction()\n break", "def _check_fleet_edges(self):\n for alien in self.aliens.sprites():\n if alien.check_edges():\n self._change_fleet_direction()\n break", "def _check_fleet_edges(self):\n for alien in self.aliens.sprites():\n if alien.check_edges():\n self._change_fleet_direction()\n break", "def is_dead(self):\n return self.hp <= 0", "def wall_check(x: int, y: int, state: bool) -> bool:\r\n if state:\r\n if x == 0 or x == shape-1 or y == 0 or y == shape-1:\r\n return True\r\n else:\r\n if x < 0 or x >= shape or y < 0 or y >= shape:\r\n return True\r\n return False", "def deadTest(self, board):\n if board[0] and board[4] and board[8]:\n return True\n if board[2] and board[4] and board[6]:\n return True\n for i in range(3):\n #check every row\n row = i * 3\n if board[row] and board[row+1] and board[row+2]:\n return True\n #check every column\n if board[i] and board[i+3] and board[i+6]:\n return True\n return False", "def deadTest(self, board):\n if board[0] and board[4] and board[8]:\n return True\n if board[2] and board[4] and board[6]:\n return True\n for i in range(3):\n #check every row\n row = i * 3\n if board[row] and board[row+1] and board[row+2]:\n return True\n #check every column\n if board[i] and board[i+3] and board[i+6]:\n return True\n return False", "def is_dead(self):\n return self.hearts <= 0", "def test_goto_field_boss_calibs(self):\n sopTester.updateModel('mcp', TestHelper.mcpState['all_off'])\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n cmdState.doSlew = False\n cmdState.doHartmann = False\n cmdState.doGuider = False\n self._goto_field_boss(10, 57, 0, 0, cmdState)", "def needs_rebuild(self):\n for p in self.sys.particles:\n dr = p.r - self.old_pos[p.id]\n dr.apply_periodic(self.sys.box)\n if dr.length() >= 0.5*self.pad:\n return True \n return False", "def check_change(self, state_variables):\n for control in self.__control_list:\n if control[0] != 'control':\n\t\t\t\t# sum of values of state variables of interest in the previous and the current interval of time\n sum1 = np.matmul(control[1], state_variables[:,0])\n sum2 = np.matmul(control[1], state_variables[:,1])\n\n if (np.sign(sum1 - control[2]) != np.sign(sum2 - control[2])):\n self.__active_control = control\n return True\t\n return False", "def isGameOver(self):\n for i in range(self.rows):\n for j in range(self.columns):\n if self.grid[i][j].face == 'down':\n return False\n #if here then all cards must be face up\n return True", "def check_game_over(self):\n for piece in self.pieces:\n if not piece.destroyed:\n return False\n print(\"Signal.END\")\n return True", "def checkEdges( self ):\n\t\tx, y = self.position.xy\n\t\tvx, vy = self.velocity.xy\n\t\t\n\t\t#if particle hit left or right wall\n\t\tif abs( x ) > WINDOW_X - self.r:\n\t\t\t#change vertical speed\n\t\t\tvx *= -1\n\t\t\t\n\t\t#if particle hit top or bottom wall\n\t\tif abs( y ) > WINDOW_Y - self.r:\n\t\t\t#change horizontal speed\n\t\t\tvy *= -1\n\t\t\n\t\t#enter new velocity\n\t\tself.velocity.xy = (vx, vy)", "def is_reached(self, vehicle_state) -> bool:\n return False", "def is_unhappy(self):\n #checked!#\n ###your code here###\n same=0\n for i in self.home.neighbors:\n if i.occupant!=None:\n if i.occupant.group==self.group:\n same+=1\n happniess=float(same)/len(self.home.neighbors)\n if happniess<self.happiness_threshold:\n return True\n else:\n return False", "def check_boundaries(self):\n # Checks if the enemy bar has gone of the net\n if self.rect.left <= self.settings.WINDOW_WIDTH / 2:\n self.rect.left = self.settings.WINDOW_WIDTH / 2\n self.isMovingUp = False\n\n # Checks if the enemy bar has gone out of bound to the right\n if self.rect.right >= self.settings.WINDOW_WIDTH:\n self.rect.right = self.settings.WINDOW_WIDTH\n self.isMovingUp = True", "def check_falling(self, obstacles):\n self.rect.move_ip((0, 1))\n if not pygame.sprite.spritecollideany(self, obstacles):\n if not self.climb:\n\t self.fall = True\n\n self.rect.move_ip((0, -1))", "def update(self):\n if self.left < 0 or self.right > games.screen.width:\n self.dx = -self.dx\n elif random.randrange(self.odds_change) == 0:\n self.dx = -self.dx\n self.collided = False \n self.check_collide()", "def advance_check(self):\n values = [self.die_a.value, self.die_b.value]\n if self.stage == 3:\n if not self.cheating and \"5\" in values and \"6\" in values:\n return True\n if self.stage == 2 and \"ANGRY\" in values and \"4\" in values:\n self.stage = 3\n if self.stage == 1 and \"1\" in values and \"2\" in values:\n self.stage = 2\n if self.die_a.value == self.die_b.value == \"ANGRY\":\n print(\"WOW, you're ANGRY!\")\n self.stage = 1\n return False", "def check_state(self):\n if self.dpad:\n x_state, y_state = self.handle_dpad()\n else:\n x_state, y_state = self.handle_abs()\n\n # pylint: disable=no-member\n new_state = set((\n x_state,\n y_state,\n ('Key', 0x130, int(self.microbit.button_a.is_pressed())),\n ('Key', 0x131, int(self.microbit.button_b.is_pressed())),\n ('Key', 0x13a, int(self.microbit.pin0.is_touched())),\n ('Key', 0x133, int(self.microbit.pin1.is_touched())),\n ('Key', 0x134, int(self.microbit.pin2.is_touched())),\n ))\n events = new_state - self.state\n self.state = new_state\n return events", "def on_death(self, state):", "def walkable(self, this_field):\n from Main import Player_list\n walkable2 = True\n for p in (Player_list):\n if (p.field == this_field and (int(math.floor(p.x)),int(math.ceil(p.y))) == self.coordinate):\n walkable2 = False\n return (self.walkable_var and walkable2)", "def goal_test(self, state):\n self.numbernodes += 1\n\n i = 0\n for box in state.boxes :\n for coord in self.board.positionGoal :\n if coord[0] == box.y and coord[1] == box.x : \n i+=1\n if i == 0 : return False\n i = 0\n return True", "def event_m20_11_x89(z42=20110480):\n \"\"\"State 0,6: Did you destroy the king?\"\"\"\n CompareEventFlag(0, 100978, 1)\n if ConditionGroup(0):\n \"\"\"State 1: Is the player living or dead?\"\"\"\n IsPlayerHollow(0, 0, 1)\n if ConditionGroup(0):\n pass\n else:\n \"\"\"State 2: The door cannot be opened or closed\"\"\"\n ChangeObjState(z42, 100)\n \"\"\"State 4: Have you examined the door? Or did you become a living person?\"\"\"\n IsObjSearched(0, z42)\n IsPlayerHollow(1, 0, 1)\n if ConditionGroup(0):\n Goto('L0')\n elif ConditionGroup(1):\n pass\n \"\"\"State 3: The door can be opened and closed\"\"\"\n ChangeObjState(z42, 10)\n \"\"\"State 5: Did you open the door?\"\"\"\n CompareObjState(0, z42, 30, 0)\n assert ConditionGroup(0)\n \"\"\"State 9: Opened the door\"\"\"\n return 0\n else:\n \"\"\"State 7: Unopenable door_2\"\"\"\n ChangeObjState(z42, 100)\n \"\"\"State 8: Have you examined the door?\"\"\"\n IsObjSearched(0, z42)\n assert ConditionGroup(0)\n \"\"\"State 10: The door does not open\"\"\"\n Label('L0')\n return 1", "def event_m20_11_7000():\n \"\"\"State 0,2: [Preset] Dead Door_SubState\"\"\"\n assert event_m20_11_x76(z39=20110485, z40=700000, z41=211000010)\n \"\"\"State 1: Finish\"\"\"\n EndMachine()", "def _is_dead_end(self, i_row, i_col, direction):\n return (((i_row, i_col) in self._ts_cells and direction == \"s\") or\n ((i_row, i_col) in self._ts_cells and direction == \"se\") or\n ((i_row, i_col) in self._ts_cells and direction == \"sw\") or\n ((i_row, i_col) in self._ls_cells and direction == \"e\") or\n ((i_row, i_col) in self._ls_cells and direction == \"ne\") or\n ((i_row, i_col) in self._ls_cells and direction == \"se\") or\n ((i_row, i_col) in self._bs_cells and direction == \"n\") or\n ((i_row, i_col) in self._bs_cells and direction == \"nw\") or\n ((i_row, i_col) in self._bs_cells and direction == \"ne\") or\n ((i_row, i_col) in self._rs_cells and direction == \"w\") or\n ((i_row, i_col) in self._rs_cells and direction == \"nw\") or\n ((i_row, i_col) in self._rs_cells and direction == \"sw\") or\n ((i_row, i_col) == self._tl_cell and direction == \"s\") or\n ((i_row, i_col) == self._tl_cell and direction == \"se\") or\n ((i_row, i_col) == self._tl_cell and direction == \"e\") or\n ((i_row, i_col) == self._bl_cell and direction == \"n\") or\n ((i_row, i_col) == self._bl_cell and direction == \"ne\") or\n ((i_row, i_col) == self._bl_cell and direction == \"e\") or\n ((i_row, i_col) == self._tr_cell and direction == \"w\") or\n ((i_row, i_col) == self._tr_cell and direction == \"sw\") or\n ((i_row, i_col) == self._tr_cell and direction == \"s\") or\n ((i_row, i_col) == self._br_cell and direction == \"w\") or\n ((i_row, i_col) == self._br_cell and direction == \"nw\") or\n ((i_row, i_col) == self._br_cell and direction == \"n\"))", "def test_dead_cell(self, alive_cells, alive):\n for positions in alive_cells:\n world = gol.World(3, 3)\n for x, y in positions:\n world.set_cell((x, y))\n world.update()\n assert world[(0, 0)] == alive", "def is_valid(field):\r\n taken_coordinates = []\r\n count_ships = [0]*4\r\n # counting ships\r\n try:\r\n for row in range(10):\r\n for cell in range(10):\r\n if (row, cell) not in taken_coordinates and\\\r\n has_ship((row, cell), field):\r\n taken_coordinates.extend(ship_coordinates((row, cell), field))\r\n count_ships[ship_size((row, cell), field) - 1] += 1\r\n except IndexError:\r\n return False\r\n # check if the amount of ship is correct and if they are not crossing\r\n if count_ships == [i for i in range(4, 0, -1)] and\\\r\n len(taken_coordinates) == len(set(taken_coordinates)):\r\n return True\r\n return False", "def checkAmountOfNeighbors(self):\n cellsToDelete = []\n for cell in self.cells:\n if(cell.numOfNeighbor > 3 or cell.numOfNeighbor < 2 or (cell.numOfNeighbor == 2 and cell.dead == True)):\n cellsToDelete.append(cell)\n elif(cell.numOfNeighbor == 3 and cell.dead == True):\n cell.makeAlive()\n cell.numOfNeighbor = 0\n\n self.removeCells(cellsToDelete)", "def is_cycle(self, state, visited):\n substate = (state.getPacmanPosition(),\n state.getGhostPositions(),\n state.getFood())\n\n if substate in visited:\n return True\n else:\n visited.append(substate)\n\n return False", "def calculate_dead_alive(board, posx, posy):\n alive = 0\n for aux in ((x, y) for x in [-1, 0, 1] for y in [-1, 0, 1]):\n if aux == (0, 0):\n continue\n pos = np.array((posx, posy)) + np.array(aux)\n if min(pos) < 0 or max(pos) >= board.shape[0]:\n continue\n alive += board[pos[0]][pos[1]]\n if board[posx][posy]:\n # alive cell\n if alive in (2, 3):\n return True\n else:\n # dead cell\n if alive == 3:\n return True\n return False", "def is_flying(self) -> bool:\n return self.proto.is_flying", "def is_valid_move(self, board, fieldy, fieldx):\n if isinstance(board[fieldy][fieldx], Piece):\n return False\n if self.posy - fieldy == self.direction and abs(self.posx - fieldx) == 1:\n return True\n else:\n return False", "def boundary_invariant(self):\n for cell in self.fire_boundary():\n if self.is_empty(cell[0], cell[1]):\n print \"Cell \" + str(cell) + \" in fire boundary is empty.\"\n return False\n return True", "def is_game_over(self):\n if self.just_cheated_a or self.just_cheated_b:\n return False\n if self.game_stage == 3:\n return (self.die_a.current_value == \"5\" and self.die_b.current_value == \"6\" or\n self.die_a.current_value == \"6\" and self.die_b.current_value == \"5\")\n else:\n return False", "def __game_is_over(self):\n return not (self.__playing and self.__bricks_total > 0 and self.__num_lives > 0)", "def check_legal(self, cur_pos, new_pos, board, state):\n new_row = self.ind(new_pos)[0]\n new_col = self.ind(new_pos)[1]\n cur_row = self.ind(cur_pos)[0]\n cur_col = self.ind(cur_pos)[1]\n piece = self.piece_type(cur_pos, board)\n\n if state == \"UNFINISHED\":\n if (new_row == cur_row + 3) and (new_col == cur_col + 2): #F5\n if board[cur_row + 1][cur_col] and board[cur_row + 2][cur_col + 1] is not None:\n print(\"hello 1 elephant\")\n return\n elif self.piece_type(new_pos, board) is not None and self.piece_type(new_pos, board).get_color() == self._color:\n print(\"1for some reason it thinks the new pos has a color of the same piece\")\n return\n print(\"elephant moved down and right\")\n return True\n\n elif (new_row == cur_row - 3) and (new_col == cur_col - 2): #B1\n print(\"Hello im here\")\n # checking left and right are valid\n if board[cur_row - 1][cur_col] and board[cur_row - 2][cur_col - 1] is not None:\n print(\"horse attempted to move left and up the board\")\n return\n elif self.piece_type(new_pos, board) is not None and self.piece_type(new_pos, board).get_color() == self._color:\n return\n print(\"e moved up and left\")\n return True\n\n elif (new_row == cur_row + 3) and (new_col == cur_col - 2): #\n # checking left and right are valid\n if board[cur_row + 1][cur_col] and board[cur_row + 2][cur_col - 1] is not None:\n print(\"hello e3\")\n return False\n elif self.piece_type(new_pos, board) is not None and self.piece_type(new_pos, board).get_color() == self._color:\n return False\n print(\"e moved down and right\")\n return True\n\n elif (new_row == cur_row - 3) and (new_col == cur_col + 2): #F1\n # checking left and right are valid\n if board[cur_row - 1][cur_col] and board[cur_row - 2][cur_col + 1] is not None:\n print(\"hello e4\")\n return False\n elif self.piece_type(new_pos, board) is not None and self.piece_type(new_pos, board).get_color() == self._color:\n return False\n print(\"Horse moved down and left 2\")\n return True\n #---------------------------------------------------------------------------------------------------------------\n # Check if the forwards and backwards is legal\n elif (new_row == cur_row - 2) and (new_col == cur_col + 3): #G2\n # checking left and right are valid\n if board[cur_row][cur_col + 1] and board[cur_row - 1][cur_col + 2] is not None:\n print(\"hello e5\")\n return\n elif self.piece_type(new_pos, board) is not None and self.piece_type(new_pos, board).get_color() == self._color:\n print(\"bye 5e\")\n return\n print(\"it worked e5\")\n return True\n\n elif (new_row == cur_row - 2) and (new_col == cur_col - 3): #A2\n # checking left and right are valid\n if board[cur_row][cur_col - 1] and board[cur_row - 1][cur_col - 2] is not None:\n print(\"hello e6\")\n return\n elif self.piece_type(new_pos, board) is not None and self.piece_type(new_pos, board).get_color() == self._color:\n print(\"bye 6e\")\n return\n print(\"it worked e6\")\n return True\n\n elif (new_row == cur_row + 2) and (new_col == cur_col + 3): #G6\n # checking left and right are valid\n if board[cur_row][cur_col + 1] and board[cur_row - 1][cur_col - 2] is not None:\n print(\"hello 7e\")\n return\n elif self.piece_type(new_pos, board) is not None and self.piece_type(new_pos, board).get_color() == self._color:\n print(\"ebye 7\")\n return\n print(\"it worked e7\")\n return True\n\n elif (new_row == cur_row + 2) and (new_col == cur_col - 3): #A6\n # checking left and right are valid\n if board[cur_row][cur_col - 1] and board[cur_row + 1][cur_col - 2] is not None:\n print(\"hello 8\")\n return\n elif self.piece_type(new_pos, board) is not None and self.piece_type(new_pos, board).get_color() == self._color:\n print(\"bye 8\")\n return\n print(\"it worked 8\")\n return True\n# else:\n # print(\"it actually never entered the if statement?\"\n #return False\n else:\n print(\"False\")\n return False", "def trackGhosts(self, gameState):\n\n # Get some values that we will use later\n myState = gameState.getAgentState(self.index)\n myPos = myState.getPosition()\n noisyDistances = gameState.getAgentDistances()\n eatenBabies = self.getEatenBabies(gameState)\n\n # Track each opponent\n opponentFound = [False] * 4\n for idx in self.getOpponents(gameState):\n pos = gameState.getAgentState(idx).getPosition()\n\n # If we are close to opponents (we see them), update beliefs to one point\n if pos is not None:\n self.setBeliefs(pos, idx)\n opponentFound[idx] = True\n\n # If the teammate has eaten a ghost, update belief to initial position\n elif self.updateEatenOpponents1(gameState, idx):\n opponentFound[idx] = True\n print \"Our teammate has eaten an opponent, yeah!\"\n\n # If not, update beliefs taking into account opponents possible movements\n else:\n # elapseTime (update beliefs of opponent considering they have taken an action)\n self.elapseTime(idx)\n\n # If opponent has changed from ghost to pacman or viceversa (and haven't died), we know their x coordinate\n if self.isPacman[idx] != gameState.getAgentState(idx).isPacman:\n if self.isPacman[idx]: # Was pacman, now is ghost\n for pos in self.beliefs[idx].keys():\n if pos[0] != self.ghostLand:\n self.beliefs[idx].pop(pos, None)\n else: # Was ghost, now is pacman\n for pos in self.beliefs[idx].keys():\n if pos[0] != self.pacmanLand:\n self.beliefs[idx].pop(pos, None)\n self.beliefs[idx].normalize()\n\n # Get positions of me and my teammate\n pos0 = gameState.getAgentState(self.getTeam(gameState)[0]).getPosition()\n pos1 = gameState.getAgentState(self.getTeam(gameState)[1]).getPosition()\n # Remove impossible positions\n for p in self.beliefs[idx].keys():\n # We should see the opponents from there, if we don't they are not there\n if (pos0 is not None and util.manhattanDistance(p, pos0) <= 5) or (pos1 is not None and util.manhattanDistance(p, pos1) <= 5):\n self.beliefs[idx].pop(p, None)\n # There is still a food dot there, therefore the opponent is not there\n elif self.getFoodYouAreDefending(gameState)[p[0]][p[1]]:\n self.beliefs[idx].pop(p, None)\n # Our belief says the opponent could be a ghost when it is a pacman\n elif self.isPacman[idx] and p[0] * self.going_left < self.pacmanLand * self.going_left - 1:\n self.beliefs[idx].pop(p, None)\n # Our belief says the opponent could be a pacman when it is a ghost\n elif not self.isPacman[idx] and p[0] * self.going_left > self.ghostLand * self.going_left + 1:\n self.beliefs[idx].pop(p, None)\n\n # Calculate opponents that could have eaten the missing food\n eaters = [[], []]\n for i, pos in enumerate(eatenBabies):\n eater = []\n for idx in self.getOpponents(gameState):\n if opponentFound[idx]:\n continue\n if pos in self.beliefs[idx].keys() and self.beliefs[idx][pos] > 0:\n eater.append(idx)\n eaters[i] = eater\n if i > 1:\n break\n\n for idx in self.getOpponents(gameState):\n if not opponentFound[idx]:\n # If we are not close to opponents (we don't see them), check if only one ghost can have eaten the food\n newBelief = False\n # This dirty code just changes the ghost beliefs\n if len(eaters[0]) == 1:\n newBelief = True\n if len(eaters[1]) == 0:\n if eaters[0][0] == idx:\n self.setBeliefs(eatenBabies[0], idx)\n else:\n newBelief = False\n else: #1 || 2\n if eaters[0][0] == idx:\n self.setBeliefs(eatenBabies[0], idx)\n else:\n self.setBeliefs(eatenBabies[1], idx)\n elif len(eaters[1]) == 1:\n newBelief = True\n if len(eaters[0]) == 2:\n if eaters[1][0] == idx:\n self.setBeliefs(eatenBabies[1], idx)\n else:\n self.setBeliefs(eatenBabies[0], idx)\n else: # 0\n if eaters[1][0] == idx:\n self.setBeliefs(eatenBabies[1], idx)\n else:\n newBelief = False\n\n if not newBelief:\n # If we have not figured out the exact position, use noisy distance that we have\n self.observe(noisyDistances[idx], gameState, myPos, idx)\n\n # This is to see all the possible positions where the opponents may be, all probabilities are turned to one\n beliefs = [util.Counter(), util.Counter(), util.Counter(), util.Counter()]\n for idx, bel in enumerate(self.beliefs):\n for p in bel:\n if bel[p] > 0:\n beliefs[idx][p] = 1\n # beliefs[idx][p] = self.beliefs[idx][p]\n\n self.displayDistributionsOverPositions(beliefs)", "def check_reached(self):\n m_x, m_y = self.destination.get_pos()\n m_radius = self.destination.radius\n distance_centre = math.sqrt((m_x - self.x)**2 + (m_y - self.y)**2)\n sum_radii = m_radius + self.radius\n if distance_centre < sum_radii:\n self.color = pygame.colordict.THECOLORS['green']\n self.has_reached = True", "def isGoalState(self, state):\n \"*** YOUR CODE HERE ***\"\n # Utilizaré el método .count del grid, de manera que me contará los trues que haya.\n # Cuando no queden trues, ya hemos acabado.\n return state[1].count() == 0\n # util.raiseNotDefined()", "def update_state(self):\n self.last_position = self.current_position\n self.last_distance = self.current_distance\n self.last_collision_time_stamp = self.current_collision_time_stamp\n self.current_kinematics = self.airsim_client.simGetGroundTruthKinematics(vehicle_name=self.drone_name)\n self.current_position = self.current_kinematics.position + self.base_offset\n self.current_collision_time_stamp = self.airsim_client.simGetCollisionInfo(vehicle_name=self.drone_name).time_stamp\n # print(\"DEBUG: simGetCollisionInfo:\", self.airsim_client.simGetCollisionInfo(vehicle_name=self.drone_name))\n # self.pending_death = self.airsim_client.simIsRacerDisqualified(vehicle_name=self.drone_name)\n self.objective_status = self.current_objective.next_gate_status(self.last_position, self.current_position)\n if self.objective_status == GateStatus.CROSSED or self.objective_status == GateStatus.PASSED:\n if self.switch_to_next_objective(): # if track is finished (changes self.last_distance)\n self.track_complete = True\n self.current_distance = self.current_position.distance_to(self.current_objective.gate_pose.position)", "def test_goto_field_cartridge_mismatch(self):\n\n sopTester.updateModel('guider', TestHelper.guiderState['bossLoaded'])\n\n mcpState = TestHelper.mcpState['boss_science']\n mcpState.update({'instrumentNum': [15]})\n sopTester.updateModel('mcp', mcpState)\n\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n\n masterThread.goto_field(self.cmd, cmdState, myGlobals.actorState)\n self._check_cmd(0, 14, 0, 0, finish=True, didFail=True)", "def check_evolve(self):\n if self.team == 'white':\n if self.position[0] == 0:\n self.evolve()\n \n else:\n if self.position[0] == 7:\n self.evolve()", "def checkMissionEnd(self) -> bool:\n if getTimestamp() - self.mission['timestamp'] < self.TAKE_OFF_DELAY:\n return False\n drone: Drone\n for drone in self.dronesSet.getDrones().values():\n if drone['state'] != 'onTheGround' and drone['state'] != 'crashed':\n return False\n\n self.endMission()\n return True", "def should_be_alive(live_coords=None, coord=None, is_alive=True):\n if not live_coords or not coord:\n return False\n num_alive = alive_neighbors(live_coords, coord)\n if is_alive:\n if num_alive < 2:\n return False\n elif num_alive == 2 or num_alive == 3:\n return True\n elif num_alive > 3:\n return False\n elif num_alive == 3:\n return True\n return False", "def test_check_event(self):\n field = Field()\n\n # out of borders\n self.assertEqual(field.check_event([-1, 1]), Field.Event.OBSTACLE_HIT)\n self.assertEqual(field.check_event([40, 1]), Field.Event.OBSTACLE_HIT)\n self.assertEqual(field.check_event([1, -1]), Field.Event.OBSTACLE_HIT)\n self.assertEqual(field.check_event([1, 40]), Field.Event.OBSTACLE_HIT)\n\n obj_pos = [1, 1]\n field.obstacles.append(obj_pos)\n self.assertEqual(field.check_event(obj_pos), Field.Event.OBSTACLE_HIT)\n field.obstacles.pop()\n\n field.poison.append(obj_pos)\n self.assertEqual(field.check_event(obj_pos), Field.Event.POISON)\n\n field.apple = obj_pos\n self.assertEqual(field.check_event(obj_pos), Field.Event.FOOD)", "def test_goto_field_boss_hartmann_blue_fails(self):\n\n sopTester.updateModel('mcp', TestHelper.mcpState['all_off'])\n sopTester.updateModel('hartmann', TestHelper.hartmannState['blue_fails'])\n\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n\n self._goto_field_boss(12, 37, 0, 0, cmdState, didFail=True, finish=True)", "def check_fires(self):\n for fire in self.pjs.fires:\n for block in fire.rects:\n if block.overlap(self.rects[0]):\n self.killer = fire\n return\n return", "def check_keys(self):\n if arcade.key.LEFT in self.held_keys:\n self.ship.rotate_left()\n \n if arcade.key.RIGHT in self.held_keys:\n self.ship.rotate_right()\n \n if arcade.key.UP in self.held_keys:\n self.ship.increaseThrust()\n self.ship.acelerate()\n \n if arcade.key.DOWN in self.held_keys:\n self.ship.increaseThrust()\n self.ship.decelerate()\n was_up = False\n\n if arcade.key.DOWN not in self.held_keys and arcade.key.UP not in self.held_keys:\n self.ship.decreaseThrust()\n self.ship.decelerate()\n self.ship.acelerate()\n\n \n\n # Machine gun mode...\n #if arcade.key.SPACE in self.held_keys:\n # pass", "def test_goto_field_apogee_no_slew(self):\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n cmdState.doSlew = False\n self._goto_feld_apogee(9, 37, 0, 0, cmdState)", "def reached_goal(state):\n return any(map(completely_removed, state['sliders']))", "def decide(self) :\n (self.futurX,self.futurY) = self.randomNextPos()\n if self.fishBreedTimeCPT == 0 :\n self.naissance = True\n self.fishBreedTimeCPT = self.fishBreedTime\n else :\n self.fishBreedTimeCPT = self.fishBreedTimeCPT - 1\n\n if self.env.grille[self.futurY][self.futurX] == None :\n self.bougera = True\n else :\n self.bougera = False\n\n self.update()", "def pre_or_post_turn(self, game_field, all_ghost_out:bool):\r\n\r\n reference_pos = self.pos[0] + self.grid_size // 2, self.pos[1] + self.grid_size // 2 #< Positon is set to center of Pac-Man so there is no difference in which direction he moves\r\n field = game_field.possible_way(reference_pos, self.last_dir)\r\n self.cnt_points(field, all_ghost_out)\r\n self.dist = reference_pos[0] % self.grid_size, reference_pos[1] % self.grid_size\r\n\r\n # Check if Pac-Man is moving to the right \r\n if self.direction == 'r':\r\n\r\n # dist to the center of the crossing less then grid_size//2 -> it's a preturn\r\n if self.dist[0] < self.grid_size // 2:\r\n\r\n # Check if Pac-Man wants to move up after the crossing\r\n if self.last_dir == 'u': \r\n \r\n # Check if the next field is a field Pac-Man can move to\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] += (self.grid_size - (self.pos[0] % self.grid_size))\r\n self.pos[1] -= self.speed\r\n self.direction = self.last_dir[:]\r\n\r\n # Check if Pac-Man wants to move down after the crossing\r\n if self.last_dir == 'd':\r\n\r\n # Check if the next field is a field Pac-Man can move to\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] += (self.grid_size - (self.pos[0] % self.grid_size))\r\n self.pos[1] += self.speed\r\n self.direction = self.last_dir[:]\r\n\r\n # dist to the center of the crossing greater then grid_size//2 -> it's a postturn\r\n elif self.dist[0] > self.grid_size // 2:\r\n\r\n # Check if Pac-Man wants to move up after the crossing\r\n if self.last_dir == 'u': \r\n \r\n # Check if the next field is a field Pac-Man can move to\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] -= (self.pos[0] % self.grid_size)\r\n self.pos[1] -= self.speed\r\n self.direction = self.last_dir[:]\r\n\r\n # Check if Pac-Man wants to move down after the crossing\r\n if self.last_dir == 'd':\r\n\r\n # Check if the next field is a field Pac-Man can move to\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] -= (self.pos[0] % self.grid_size)\r\n self.pos[1] += self.speed\r\n self.direction = self.last_dir[:]\r\n \r\n # The rest of the function does the same as above, just for the other three directions \r\n\r\n elif self.direction == 'l':\r\n #Preturn left\r\n if self.dist[0] > self.grid_size // 2:\r\n if self.last_dir == 'u':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] -= (self.pos[0] % self.grid_size)\r\n self.pos[1] -= self.speed\r\n self.direction = self.last_dir[:]\r\n if self.last_dir == 'd':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] -= (self.pos[0] % self.grid_size)\r\n self.pos[1] += self.speed\r\n self.direction = self.last_dir[:]\r\n #Postturn left\r\n elif self.dist[0] < self.grid_size // 2:\r\n if self.last_dir == 'u':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] += (self.grid_size - (self.pos[0] % self.grid_size))\r\n self.pos[1] -= self.speed\r\n self.direction = self.last_dir[:]\r\n if self.last_dir == 'd':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] += (self.grid_size - (self.pos[0] % self.grid_size))\r\n self.pos[1] += self.speed\r\n self.direction = self.last_dir[:]\r\n \r\n elif self.direction == 'u':\r\n #Preturn up\r\n if self.dist[1] > self.grid_size // 2:\r\n if self.last_dir == 'l':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] -= self.speed\r\n self.pos[1] -= (self.pos[1] % self.grid_size)\r\n self.direction = self.last_dir[:]\r\n if self.last_dir == 'r':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] += self.speed\r\n self.pos[1] -= (self.pos[1] % self.grid_size)\r\n self.direction = self.last_dir[:]\r\n #Postturn up\r\n elif self.dist[1] < self.grid_size // 2:\r\n if self.last_dir == 'l':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] -= self.speed\r\n self.pos[1] += self.grid_size - (self.pos[1] % self.grid_size)\r\n self.direction = self.last_dir[:]\r\n if self.last_dir == 'r':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] += self.speed\r\n self.pos[1] += (self.grid_size - (self.pos[1] % self.grid_size))\r\n self.direction = self.last_dir[:]\r\n \r\n elif self.direction == 'd':\r\n #Preturn down\r\n if self.dist[1] < self.grid_size // 2:\r\n if self.last_dir == 'l':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] -= self.speed\r\n self.pos[1] += (self.grid_size - (self.pos[1] % self.grid_size))\r\n self.direction = self.last_dir[:]\r\n if self.last_dir == 'r':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] += self.speed\r\n self.pos[1] += (self.grid_size - (self.pos[1] % self.grid_size))\r\n self.direction = self.last_dir[:]\r\n #Postturn down\r\n elif self.dist[1] > self.grid_size // 2:\r\n if self.last_dir == 'l':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] -= self.speed\r\n self.pos[1] -= (self.pos[1] % self.grid_size)\r\n self.direction = self.last_dir[:]\r\n if self.last_dir == 'r':\r\n if field == None or (field[0] != 'r' and field != 'os'):\r\n self.pos[0] += self.speed\r\n self.pos[1] -= (self.pos[1] % self.grid_size)\r\n self.direction = self.last_dir[:]\r\n pass", "def gameOver(self):\n\t\treturn self.lives == 0", "def any_neighbor_burning(self):\n neighbors = self.world.get_four_neighbors(self, Patch.null)\n states = [patch.state for patch in neighbors]\n return \"orange\" in states", "def event_m10_29_x23(z49=_, z40=_):\r\n \"\"\"State 0,1: Change global event flag\"\"\"\r\n SetEventFlag(105405, z49)\r\n \"\"\"State 2: Did you disappear from the hit group you were riding on?\"\"\"\r\n IsPlayerOnHitGroup(0, z40, 0)\r\n assert ConditionGroup(0)\r\n \"\"\"State 3: End state\"\"\"\r\n return 0", "def gameOfLife(self, board) :\n # mark live-->dead (-1)\n # mark live-->live (1)\n # mark dead-->live (2)\n # mark dead-->dead (0)\n\n h = len(board)\n w = len(board[0])\n\n def counter(i,j):\n c=0\n for m in range(-1,2):\n for n in range(-1,2):\n if i+m<0 or j+n <0 :\n continue\n if i+m>h-1 or j+n>w-1:\n continue\n else:\n if board[i+m][j+n]==1 or board[i+m][j+n]==-1:\n c+=1\n return c\n\n for i in range(h):\n for j in range(w):\n live=counter(i,j)\n if board[i][j] ==1:\n live=live-1\n if live<2 or live>3:\n board[i][j]=-1\n else:\n if live==3:\n board[i][j]=2\n for i in range(h):\n for j in range(w):\n if board[i][j]==2:\n board[i][j]=1\n if board[i][j]==-1:\n board[i][j]=0", "def boundary_checker(stage, player_new):\n # Go through each possible direction a player can travel\n if player_new[0] == 0:\n valid = False\n color.write(\"You can't leave the map!\\n\",\"ERROR\")\n elif player_new[1] == 0:\n valid = False\n color.write(\"You can't leave the map!\\n\",\"ERROR\")\n elif player_new[0] > stage[0]:\n valid = False\n color.write(\"You can't leave the map!\\n\",\"ERROR\")\n elif player_new[1] > stage[1]:\n valid = False\n color.write(\"You can't leave the map!\\n\",\"ERROR\")\n # Flag validity if player still within bounds of map\n else:\n valid = True\n\n return valid", "def island_deaths(self):\n for y in self.island_map:\n for cell in y:\n cell.deaths()", "def test_goto_field_apogee_no_guider(self):\n cmdState = self.actorState.gotoField\n cmdState.reinitialize(self.cmd)\n cmdState.doGuider = False\n self._goto_feld_apogee(3, 11, 0, 0, cmdState)", "def check_grid_full(self):\n for row in self.game_state:\n for e in row:\n if e is None:\n return False\n return True", "def check_lost (grid):\r\n for row in range(4):\r\n for col in range(4):\r\n if grid[row][col]==0:\r\n return False\r\n if grid[0][0]==grid[0][1] or grid[0][0]==grid[1][0]:\r\n return False \r\n if grid[0][3]==grid[0][2] or grid[0][3]==grid[1][3]:\r\n return False \r\n if grid[3][0]==grid[2][0] or grid[3][0]==grid[3][1]:\r\n return False\r\n if grid[3][3]==grid[2][3] or grid[3][3]==grid[3][2]:\r\n return False \r\n if grid[0][1]==grid[0][2] or grid[0][1]==grid[1][1]:\r\n return False \r\n if grid[0][2]==grid[1][2]:\r\n return False \r\n if grid[1][1]==grid[2][1] or grid[1][1]==grid[1][2] or grid[1][1]==grid[1][0]:\r\n return False\r\n if grid[2][1]==grid[2][0] or grid[2][1]==grid[2][2] or grid[2][1]==grid[3][1]:\r\n return False \r\n if grid[1][0]==grid[2][0]:\r\n return False\r\n if grid[1][2]==grid[1][3] or grid[1][2]==grid[2][2]:\r\n return False\r\n if grid[2][2]==grid[2][3] or grid[2][2]==grid[3][2]:\r\n return False\r\n if grid[3][1]==grid[3][2]:\r\n return False\r\n else:\r\n return True", "def event_m20_11_x90(z39=20110485, z41=211000010):\n \"\"\"State 0,1: Is the player living or dead?\"\"\"\n IsPlayerHollow(0, 0, 1)\n if ConditionGroup(0):\n \"\"\"State 2: The door cannot be opened or closed\"\"\"\n Label('L0')\n ChangeObjState(z39, 100)\n \"\"\"State 8: Have you examined the door?\"\"\"\n IsObjSearched(0, z39)\n assert ConditionGroup(0)\n else:\n \"\"\"State 6: Have you lifted Banshee?\"\"\"\n CompareEventFlag(0, z41, 1)\n if ConditionGroup(0):\n pass\n else:\n \"\"\"State 7: Unopenable door_2\"\"\"\n ChangeObjState(z39, 100)\n \"\"\"State 5: Have you examined the door? or Did Banshee Ascend?\"\"\"\n IsObjSearched(0, z39)\n CompareEventFlag(1, z41, 1)\n if ConditionGroup(0):\n Goto('L1')\n elif ConditionGroup(1):\n pass\n \"\"\"State 3: The door can be opened and closed\"\"\"\n ChangeObjState(z39, 10)\n \"\"\"State 4: Did you open the door? Or did you become a living person?\"\"\"\n CompareObjState(0, z39, 30, 0)\n IsPlayerHollow(1, 0, 1)\n if ConditionGroup(0):\n \"\"\"State 10: Opened the door\"\"\"\n return 1\n elif ConditionGroup(1):\n Goto('L0')\n \"\"\"State 9: The door does not open\"\"\"\n Label('L1')\n return 0", "def goal_test(self, state):\n for x, y in state.alvos:\n if state.tabuleiro[x][y] is not BOX_ON_TARGET:\n return False\n return True", "def check_limits(self):\n\n #Find the relative position of each leg vs. its \"zero\" position\n relpos = self.fixed_plate - self.fixed_plate_zero\n\n for leg in range(3):\n #Check that the leg is within allowable \"safe zone\"\n #Use the position of the leg (relative to 0) to find the index in the \"safe zone\" matrix\n i_x = nearest_index(self.leg_safe_xaxis, relpos[COORD_X, leg])\n i_z = nearest_index(self.leg_safe_zaxis, relpos[COORD_Z, leg])\n #Look up in the safe zone.\n self.leg_fault[leg] = (not self.leg_safe_zone[leg, i_x, i_z])\n\n if (not all(np.isreal(self.fixed_plate[:, leg]))) or any(np.isnan(self.fixed_plate[:, leg])):\n #A complex or NaN value = the angle found for the leg was invalid, meaning that the\n #leg would have to be longer to reach the desired position.\n self.leg_fault[leg] = True", "def event_m10_29_x22(z40=10, z41=20, z42=30, z43=10292020, z44=10292010, z46=10291000, z47=10291010):\r\n \"\"\"State 0,1: Which hit group are you on?\"\"\"\r\n IsPlayerOnHitGroup(0, z40, 1)\r\n IsPlayerOnHitGroup(1, z41, 1)\r\n IsPlayerOnHitGroup(2, z42, 1)\r\n if ConditionGroup(0):\r\n pass\r\n elif ConditionGroup(1):\r\n Goto('L0')\r\n elif ConditionGroup(2):\r\n Goto('L1')\r\n \"\"\"State 4: Madura side\"\"\"\r\n return 0\r\n \"\"\"State 5: Forest side of the shadow of emptiness\"\"\"\r\n Label('L0')\r\n return 1\r\n \"\"\"State 3: Was the gimmick door lever pulled?\"\"\"\r\n Label('L1')\r\n CompareObjState(0, z46, 74, 0)\r\n CompareObjState(0, z47, 74, 0)\r\n CompareObjState(0, z46, 84, 0)\r\n CompareObjState(0, z47, 84, 0)\r\n IsPlayerOnHitGroup(1, z42, 0)\r\n if ConditionGroup(0):\r\n pass\r\n elif ConditionGroup(1):\r\n Goto('L2')\r\n \"\"\"State 2: Which door started to close?\"\"\"\r\n CompareObjState(0, z43, 80, 0)\r\n CompareObjState(1, z44, 80, 0)\r\n if ConditionGroup(0):\r\n \"\"\"State 7: Inside the room (Destination: Forest of Shadow)\"\"\"\r\n return 3\r\n elif ConditionGroup(1):\r\n \"\"\"State 6: Inside the room (destination: Madura)\"\"\"\r\n return 2\r\n \"\"\"State 8: Rerun\"\"\"\r\n Label('L2')\r\n return 4", "def test_does_not_die(self):\n self.herb.fitness = 1\n nt.assert_false(self.herb.death())", "def die(self, dt):\r\n self.dead = True", "def _is_fail(self):\n failed = False\n for obj in self.world_state.objects:\n failed = failed or obj.lost\n return failed", "def check_lighting_state_room1():\n if timer_lights_on_off_room1() == room1_lux():\n pass\n else:\n light_room1(timer_lights_on_off_room1())", "def death(self):\n if not self.death_mode and not self.attack_mode and not self.damage_mode:\n self.death_mode = True\n self.lose = True\n self.cut_frame_update = 0", "def check_collision(self):\n self.collided = False\n\n for point in self.collision_points:\n\n try:\n if self.game_map.get_at((\n int(point[0]), int(point[1])\n )) == WHITE_COLOR:\n self.collided = True\n break\n except:\n self.collided = True", "def is_destroyed(self) -> bool:\n return self._coords == self.damaged_cells", "def _ensure_is_alive(self):\n if self._hit_points == 0:\n raise UnitIsDead('Unit is dead!')", "def update(self):\n if self.killer:\n if self.killer.stype == 'fire' and not(self.killer in self.pjs.fires):\n self.die()\n elif self.killer.stype == 'enemy':\n if self.timeout == 0:\n self.die()\n else:\n self.timeout -= 1\n else:\n self.move(self.direction, is_update=True)", "def check_crash(self):\n # check if the bird is aware of the pipes\n\n assert Bird.upper_pipes is not None and Bird.lower_pipes is not None, 'Set a reference to the pipes after ' \\\n 'initializing the first bird'\n\n # player = self\n player_index = self.player_index\n player_width = IMAGES['player'][0].get_width()\n player_height = IMAGES['player'][0].get_height()\n\n pos_y = self.pos_y\n pos_x = self.pos_x\n upper_pipes = self.upper_pipes\n lower_pipes = self.lower_pipes\n # if player crashes into ground\n\n if pos_y + player_height >= BASE_Y - 1:\n self.crash_test = True, True\n return\n\n else:\n player_rect = pygame.Rect(pos_x, pos_y, player_width, player_height)\n\n pipe_width = IMAGES['pipe'][0].get_width()\n pipe_height = IMAGES['pipe'][0].get_height()\n\n for uPipe, lPipe in zip(upper_pipes, lower_pipes):\n # upper and lower pipe rects\n u_pipe_rect = pygame.Rect(uPipe['x'], uPipe['y'], pipe_width, pipe_height)\n l_pipe_rect = pygame.Rect(lPipe['x'], lPipe['y'], pipe_width, pipe_height)\n\n # player and upper/lower pipe hitmasks\n p_hit_mask = HIT_MASKS['player'][player_index]\n u_hitmask = HIT_MASKS['pipe'][0]\n l_hitmask = HIT_MASKS['pipe'][1]\n\n # if bird collided with upipe or lpipe\n u_collide = pixel_collision(player_rect, u_pipe_rect, p_hit_mask, u_hitmask)\n l_collide = pixel_collision(player_rect, l_pipe_rect, p_hit_mask, l_hitmask)\n\n if u_collide or l_collide:\n self.crash_test = True, False\n return\n\n self.crash_test = False, False\n return", "def in_fire(self):\n Fire=False\n if self.state>0 and self.state<=5:\n Fire=True\n return Fire", "def update(self, player, world, deltaTime):\r\n if self.ghostPathIndex > len(self.path) - 1 or time.time() > self.lastTracked + 15 or self.firstTickScared == True:\r\n self.ghostPathIndex = 0\r\n self.firstTickScared = False\r\n\r\n plyGridX = int((player.boundingBox.pos.getX()) // world.nodeGrid.xScale) - 1\r\n plyGridY = int((player.boundingBox.pos.getY()) // world.nodeGrid.yScale) + 1\r\n ghostGridX = int((self.boundingBox.pos.getX()) // world.nodeGrid.xScale) - 1\r\n ghostGridY = int((self.boundingBox.pos.getY()) // world.nodeGrid.yScale) + 1\r\n\r\n # Reset alive if its made it\r\n if self.alive == False:\r\n self.alive = True\r\n self.scared = False\r\n\r\n if self.scared == True:\r\n # Make ghost to a random point\r\n randNode = world.nodeGrid.randomNode()\r\n self.path = world.nodeGrid.pathFind(world.nodeGrid.nodeList[ghostGridX][ghostGridY], randNode)\r\n else:\r\n try:\r\n self.path = world.nodeGrid.pathFind(world.nodeGrid.nodeList[ghostGridX][ghostGridY], world.nodeGrid.nodeList[plyGridX][plyGridY])\r\n except:\r\n print(\"error\")\r\n\r\n self.lastTracked = time.time()\r\n\r\n if self.ghostPathIndex < len(self.path) or self.lastTracked == 0:\r\n notAlive = not self.alive\r\n self.moveGhost(world, Point(self.path[self.ghostPathIndex].realPosX, self.path[self.ghostPathIndex].realPosY), (835 - (240 * self.scared) + (440 * notAlive)) * deltaTime)\r\n\r\n if BoundingBox.pointWithin(self.boundingBox, BoundingBox(Point(self.path[self.ghostPathIndex].realPosX, self.path[self.ghostPathIndex].realPosY), Point(world.nodeGrid.xScale, world.nodeGrid.yScale))):\r\n self.ghostPathIndex += 1" ]
[ "0.604368", "0.5863431", "0.584722", "0.5698167", "0.567636", "0.5640735", "0.5575544", "0.5574422", "0.55377287", "0.5520766", "0.55079025", "0.5434514", "0.54343575", "0.5425291", "0.5410013", "0.5398703", "0.5397574", "0.5384121", "0.53724176", "0.5368251", "0.53623694", "0.535856", "0.53529763", "0.53529763", "0.53529763", "0.5348079", "0.5345886", "0.53445184", "0.53445184", "0.53429", "0.534277", "0.53399765", "0.53399175", "0.53160673", "0.5301212", "0.5282271", "0.52746874", "0.5273188", "0.52506906", "0.52448815", "0.5244495", "0.5242573", "0.52394384", "0.523225", "0.5230873", "0.5227822", "0.5214662", "0.52098256", "0.52034205", "0.52023137", "0.5202065", "0.51939374", "0.5186652", "0.51856667", "0.517383", "0.5163072", "0.51621294", "0.5145634", "0.5145498", "0.5138235", "0.51380676", "0.513159", "0.5123745", "0.512054", "0.5119887", "0.5116908", "0.51141834", "0.5112498", "0.51097995", "0.5108831", "0.510854", "0.51079494", "0.5106712", "0.5104947", "0.5104489", "0.51009744", "0.51004624", "0.5095747", "0.5093094", "0.509202", "0.50845253", "0.5083868", "0.50812525", "0.5080037", "0.5079004", "0.5072865", "0.5068825", "0.50644636", "0.50625753", "0.50579584", "0.5057518", "0.50446856", "0.504411", "0.50403315", "0.5038296", "0.5033259", "0.50332415", "0.5031144", "0.50284404", "0.5021028", "0.50208026" ]
0.0
-1
Returns a count of horizontally, vertically and diagonally adjacent living cells
def _count_living_neighbors(self, cell: Cell) -> int: count = 0 # borders of the area in which we are trying to find neighbors # Let's assume y axis directs downside and x axis directs to the left for x in range(cell.x - 1, cell.x + 2): for y in range(cell.y - 1, cell.y + 2): if cell.x == x and cell.y == y: continue if (x, y) in self.living_cells.keys(): count += 1 return count
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getAdjacentCount(grid, x, y, X, Y, char):\n count = 0\n try{\n if x == 0:\n\n if y == 0:\n\n if x == X-1:\n\n if y == Y-1:\n }", "def _count_adj_occupied(grid: List[List[str]], row: int, col: int) -> int:\n count = 0\n if row - 1 >= 0:\n if col - 1 >= 0:\n count += 1 if grid[row - 1][col - 1] == '#' else 0\n if col + 1 < len(grid[0]):\n count += 1 if grid[row - 1][col + 1] == '#' else 0\n count += 1 if grid[row - 1][col] == '#' else 0\n if row + 1 < len(grid):\n if col - 1 >= 0:\n count += 1 if grid[row + 1][col - 1] == '#' else 0\n if col + 1 < len(grid[0]):\n count += 1 if grid[row + 1][col + 1] == '#' else 0\n count += 1 if grid[row + 1][col] == '#' else 0\n if col - 1 >= 0:\n count += 1 if grid[row][col - 1] == '#' else 0\n if col + 1 < len(grid[0]):\n count += 1 if grid[row][col + 1] == '#' else 0\n return count", "def get_neighbors_of(cell, board):\n count = 0\n (x, y) = cell\n for cell in board:\n if cell == (x - 1, y - 1):\n count += 1\n elif cell == (x, y - 1):\n count += 1\n elif cell == (x + 1, y - 1):\n count += 1\n elif cell == (x - 1, y):\n count += 1\n elif cell == (x + 1, y):\n count += 1\n elif cell == (x - 1, y + 1):\n count += 1\n elif cell == (x, y + 1):\n count += 1\n elif cell == (x + 1, y + 1):\n count += 1\n return count", "def getAdjacentWrapCount(grid, x, y, X, Y, char):\n count = 0\n # X, % Y gets spaces that are wrapped around the grid \n # Get x coordinates for adjacent grid spaces\n for i in [(x-1) % X, x, (x+1) % X]:\n # Get y coordinates for adjacent grid \n for j in [(y-1) % Y, y, (y+1) % Y]:\n # if the grid space is present and not the center of the grid spaces\n if (i, j) != (x, y) and grid[i][j] == char:\n count += 1\n return count", "def neighbor_count(A):\n sum2 = lambda A, B: map2(add, A, B)\n neighbors = ((-1, -1), (-1, 0), (-1, 1),\n (0, -1), (0, 1),\n (1, -1), (1, 0), (1, 1))\n return reduce(sum2,\n map(lambda d: rotate2(A, d[0], d[1]),\n neighbors))", "def count_neighbors(self, row, col):\n neighbors = 0\n neighbors += self.get_cell_value(row - 1, col - 1)\n neighbors += self.get_cell_value(row - 1, col)\n neighbors += self.get_cell_value(row - 1, col + 1)\n neighbors += self.get_cell_value(row, col - 1)\n neighbors += self.get_cell_value(row, col + 1)\n neighbors += self.get_cell_value(row + 1, col - 1)\n neighbors += self.get_cell_value(row + 1, col)\n neighbors += self.get_cell_value(row + 1, col + 1)\n\n return neighbors", "def countNeighbors(row, col, A):\n h = len(A)\n w = len(A[0])\n count = 0\n for x in range(-1, 2, 1):\n for y in range(-1, 2, 1):\n if abs(x) + abs(y) != 0:\n count += A[row+x][col+y]\n return count", "def island_perimeter(grid):\n count = 0\n for row in grid:\n size = len(row)\n row.insert(0, 0)\n row.append(0)\n grid.insert(0, [0 for x in range(size + 2)])\n grid.append([0 for x in range(size + 2)])\n\n for e, row in enumerate(grid):\n for i, num in enumerate(row):\n if num == 1:\n if grid[e][i - 1] != 1:\n count += 1\n if grid[e][i + 1] != 1:\n count += 1\n if grid[e - 1][i] != 1:\n count += 1\n if grid[e + 1][i] != 1:\n count += 1\n return count", "def get_number_neighbours_of_cell(self, x_cell, y_cell):\n alive_neighbours = 0\n \n # neighbour indices\n x_indices = [x_cell-1, x_cell, x_cell+1]\n y_indices = [y_cell-1, y_cell, y_cell+1]\n\n\n #TODO: use functional programming ^^^^^^\n #x_indices = list(filter(lambda x: x < 0 and x > self.size[0], x_indices))\n #y_indices = list(filter(lambda y: y < 0 and y > self.size[1], y_indices))\n \n # correct indices for cell neighbours based on wrap_around_borders\n #TODO: this so far only works for x,y same size..\n if self.wrap_around_borders:\n for indices in [x_indices, y_indices]:\n if -1 in indices:\n indices.remove(-1)\n indices.append(self.board_size[0] - 1)\n if self.board_size[0] in indices:\n indices.remove(self.board_size[0])\n indices.append(0)\n else:\n for indices in [x_indices, y_indices]:\n if -1 in indices:\n indices.remove(-1)\n if self.board_size[0] in indices:\n indices.remove(self.board_size[0])\n\n # check each neighbour status and add to counter\n for x in x_indices:\n for y in y_indices:\n alive_neighbours = alive_neighbours + self.board_state[x][y]\n\n # dont count own value\n alive_neighbours = alive_neighbours - self.board_state[x_cell][y_cell]\n\n return alive_neighbours", "def count_diags(rows, cols, I, J):\n total = 0\n # Count i, j s.t. i-j == I-J\n # 0 <= i < rows, and\n # 0 <= j = i - (I - J) < cols, so\n # I - J <= i < cols + I - J\n total += (\n min(rows, cols + I - J)\n - max(0, I - J)\n )\n\n # Count i, j s.t. i+j == I+J\n # 0 <= i < rows, and\n # 0 <= j = I + J - i < cols, so\n # I + J - cols < i <= I + J, or equivalently,\n # I + J - cols + 1 <= i < I + J + 1\n total += (\n min(rows, I + J + 1)\n - max(0, I + J - cols + 1)\n )\n # (I, J) was counted twice\n return total - 2", "def island_perimeter(grid):\n count = 0\n for j, r in enumerate(grid):\n for i, c in enumerate(r):\n if c == 1:\n if j == 0 or grid[j - 1][i] == 0:\n count += 1\n if i == 0 or grid[j][i - 1] == 0:\n count += 1\n if j == len(grid) - 1 or grid[j + 1][i] == 0:\n count += 1\n if i == len(r) - 1 or grid[j][i + 1] == 0:\n count += 1\n return count", "def count_alive_cells(self, x, y):\n\n # indices of surrounding cells.\n ul = max(y - 1, 0) # upper left\n ur = min(y + 2, self.f_shape[1]) # upper right\n bl = max(x - 1, 0) # bottom left\n br = min(x + 2, self.f_shape[0]) # bottom right\n\n # slice\n cells = self.cells[bl:br, ul:ur]\n n_cells = np.count_nonzero(cells)\n\n return n_cells - self.cells[x][y]", "def _count_seen_occupied(grid: List[List[str]], row: int, col: int) -> int:\n count = 0\n for dx in [-1, 0, 1]:\n for dy in [-1, 0, 1]:\n if not (dx == 0 and dy == 0):\n count += 1 if _is_occupied(grid, row, col, dx, dy) else 0\n return count", "def num_cells_for_rows(self, rows):\r\n return (rows * rows + rows) // 2", "def island_perimeter(grid):\n total = 0\n for x in range(0, len(grid)):\n for y in range(0, len(grid[0])):\n if grid[x][y] == 1:\n if x == 0 or grid[x - 1][y] == 0:\n total += 1\n if x == len(grid) - 1 or grid[x + 1][y] == 0:\n total += 1\n if y == len(grid[0]) - 1 or grid[x][y + 1] == 0:\n total += 1\n if y == 0 or grid[x][y - 1] == 0:\n total += 1\n return total", "def count_ones(self):\r\n count = 0\r\n for x in range(self.xspan):\r\n for y in range(self.yspan):\r\n if (self.cells[x][y] == 1):\r\n count = count + 1\r\n return count", "def countNeighbors(oldgen, x, y):\n temp = 1\n\n count = 0\n for i in range(-1, 2):\n for j in range(-1, 2):\n\n # TODO: this needs rewritin to be more understandable\n if not (i == 0 and j == 0):\n count += int(oldgen[(x + i + WID) % WID][(y + j + HGT) % HGT])\n\n for i in range(-1, 2):\n for j in range(-1, 2):\n temp += 1\n\n count -= int(oldgen[x][y])\n\n return count", "def island_perimeter(grid):\n\n counter = 0\n for i in range(len(grid)):\n for j in range(len(grid[i])):\n if (grid[i][j] == 1):\n if ((j + 1) == len(grid[i]) or (grid[i][j + 1] == 0)):\n counter += 1\n if ((j - 1) < 0 or (grid[i][j - 1] == 0)):\n counter += 1\n if ((i + 1) == len(grid) or (grid[i + 1][j] == 0)):\n counter += 1\n if ((i - 1) < 0 or (grid[i - 1][j] == 0)):\n counter += 1\n return counter", "def neighbor(board, x, y, n, m):\n deltas = (\n (-1, -1), (-1, 0), (-1, 1),\n (0, -1), (0, 1),\n (1, -1), (1, 0), (1, 1),\n )\n count = 0\n for dx, dy in deltas:\n xx = x + dx\n yy = y + dy\n if xx >= 0 and xx < n and yy >= 0 and yy < m and board[xx][yy] % 2 == 1:\n count += 1\n\n return count", "def island_perimeter(grid):\n c = 0\n length = len(grid) - 1\n width = len(grid[0]) - 1\n\n for i, r in enumerate(grid):\n for j, n in enumerate(r):\n if n == 1:\n if i == 0 or grid[i - 1][j] != 1:\n c += 1\n if j == 0 or grid[i][j - 1] != 1:\n c += 1\n if j == width or grid[i][j + 1] != 1:\n c += 1\n if i == length or grid[i + 1][j] != 1:\n c += 1\n return c", "def get_neighbors(self, line, col):\n neighbors = 0\n for line_shift in [-1, 0, 1]:\n for col_shift in [-1, 0, 1]:\n if line_shift == 0 and col_shift == 0:\n continue # Do not count given cell\n # % connects left/right and up/down\n i = (line + line_shift) % self.lines\n j = (col + col_shift) % self.cols\n if self[i][j] == self.cell_state['alive']:\n neighbors += 1\n return neighbors", "def island_perimeter(grid):\n total = 0\n for b in range(len(grid)):\n for a in range(len(grid[b])):\n # left corner\n if (a == 0) and (b == 0):\n if grid[b][a] == 1:\n total = total + 2\n if grid[b][a + 1] == 0:\n total = total + 1\n if grid[b + 1][a] == 0:\n total = total + 1\n # right corner\n elif (a == len(grid[b]) - 1) and b == 0:\n if grid[b][a] == 1:\n total = total + 2\n if grid[b + 1][a] == 0:\n total = total + 1\n if grid[b][a - 1] == 0:\n total = total + 1\n # lower-left corner\n elif a == 0 and b == (len(grid) - 1):\n if grid[b][a] == 1:\n total = total + 2\n if grid[b][a + 1] == 0:\n total = total + 1\n if grid[b - 1][a] == 0:\n total = total + 1\n # lower-right corner\n elif b == (len(grid) - 1) and a == (len(grid[b]) - 1):\n if grid[b][a] == 1:\n total = total + 2\n if grid[b - 1][a] == 0:\n total = total + 1\n if grid[b][a - 1] == 0:\n total = total + 1\n # top edge\n elif (b == 0 and a > 0) and a < (len(grid[b]) - 1):\n if grid[b][a] == 1:\n total = total + 1\n if grid[b][a - 1] == 0:\n total = total + 1\n if grid[b + 1][a] == 0:\n total = total + 1\n if grid[b][a + 1] == 0:\n total = total + 1\n # left edge\n elif (b > 0 and b < (len(grid) - 1)) and ((a == 0) and a <\n len(grid[b]) - 1):\n if grid[b][a] == 1:\n total = total + 1\n if grid[b - 1][a] == 0:\n total = total + 1\n if grid[b][a + 1] == 0:\n total = total + 1\n if grid[b + 1][a] == 0:\n total = total + 1\n # right edge\n elif (b > 0 and (b < len(grid) - 1)) and (a == len(grid[b]) - 1):\n if grid[b][a] == 1:\n total = total + 1\n if grid[b - 1][a] == 0:\n total = total + 1\n if grid[b][a - 1] == 0:\n total = total + 1\n if grid[b + 1][a] == 0:\n total = total + 1\n # bottom edge\n elif (b == len(grid) - 1) and a > 0 and a < len(grid[b]) - 1:\n if grid[b][a] == 1:\n total = total + 1\n if grid[b][a - 1] == 0:\n total = total + 1\n if grid[b - 1][a] == 0:\n total = total + 1\n if grid[b][a + 1] == 0:\n total = total + 1\n # cases that are neither edges nor corners\n elif (b > 0 and b < len(grid) - 1) and (a > 0 and a <\n len(grid[b]) - 1):\n if grid[b][a] == 1:\n if grid[b][a - 1] == 0:\n total = total + 1\n if grid[b][a + 1] == 0:\n total = total + 1\n if grid[b - 1][a] == 0:\n total = total + 1\n if grid[b + 1][a] == 0:\n total = total + 1\n return total", "def count(grid):\n star='@'\n c = 0\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j]==star: c += 1\n return c", "def countAdjacentFloorNodes(self, x, y):\n\t\treturn self.isFloor(x - 1, y) + self.isFloor(x + 1, y) + self.isFloor(x, y - 1) + self.isFloor(x, y + 1)", "def ink_offsets(self):\n if not self._rows:\n return 0, 0\n row_inked = [True in _row for _row in self._rows]\n if True not in row_inked:\n return self.width, self.height, 0, 0\n bottom = list(reversed(row_inked)).index(True)\n top = row_inked.index(True)\n col_inked = [bool(sum(_row[_i] for _row in self._rows)) for _i in range(self.width)]\n left = col_inked.index(True)\n right = list(reversed(col_inked)).index(True)\n return left, bottom, right, top", "def count_neighbors(self, x, y):\n # IMPLEMENT ME\n # HINT: You do not have to use a for-loop for this method; just\n # if-statements will suffice. Also, you do not need to indent further\n # than two levels further than this comment.\n neighbours = 0\n if x > 0 and y > 0:\n if self.board[x-1][y-1] == \"x\":\n neighbours += 1\n if x > 0:\n if self.board[x-1][y] == \"x\":\n neighbours += 1\n if x > 0 and y < self.width - 1:\n if self.board[x-1][y+1] == \"x\":\n neighbours += 1\n if y > 0:\n if self.board[x][y-1] == \"x\":\n neighbours += 1\n if y < self.width - 1:\n if self.board[x][y+1] == \"x\":\n neighbours += 1\n if x < self.height - 1 and y > 0:\n if self.board[x+1][y-1] == \"x\":\n neighbours += 1\n if x < self.height - 1:\n if self.board[x+1][y] == \"x\":\n neighbours += 1\n if x < self.height - 1 and y < self.width - 1:\n if self.board[x+1][y+1] == \"x\":\n neighbours += 1\n return neighbours", "def main():\n row, col, island = make_matrix()\n print(count_island(row, col, island))", "def count_island(row, col, island):\n count = 0\n for i in range(row):\n for j in range(col):\n count = count + floodfill(i, j, row, col, island)\n return count", "def island_perimeter(grid):\n\n count = 0\n\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n mul = 4\n if grid[i][j] == 1:\n if j < len(grid[0]) - 1:\n if grid[i][j + 1] == 1:\n mul -= 1\n if grid[i][j - 1] == 1 and j > 0:\n mul -= 1\n if i < len(grid) - 1:\n if grid[i + 1][j] == 1:\n mul -= 1\n if grid[i - 1][j] == 1 and i > 0:\n mul -= 1\n else:\n continue\n count += mul\n return count", "def calculateEdges(i, j, matrix):\n num = 0\n if i > 0:\n if matrix[i-1][j] == 0:\n num += 1\n if j > 0:\n if matrix[i][j-1] == 0:\n num += 1\n if i < len(matrix) - 1:\n if matrix[i+1][j] == 0:\n num += 1\n if j < len(matrix[0]) - 1:\n if matrix[i][j+1] == 0:\n num += 1\n \n return num", "def get_neighbours_count(self, cell: Position) -> int:\n possible_neighbours = self.get_neighbours(cell)\n return sum(self.is_alive(n) for n in possible_neighbours)", "def island_perimeter(grid):\n cx, cy = 0, 0\n len_grid = len(grid)\n for x in range(len_grid):\n for y in range(len(grid[x])):\n if grid[x][y] == 1:\n cx += 1\n if (y != len(grid[x]) - 1 and grid[x][y + 1] == 1):\n cy += 1\n if (x != len(grid) - 1 and grid[x + 1][y] == 1):\n cy += 1\n return 4 * cx - 2 * cy", "def get_cells(self):\r\n return \\\r\n (self.nx-1 if self.nx>1 else 1)* \\\r\n (self.ny-1 if self.ny>1 else 1)* \\\r\n (self.nz-1 if self.nz>1 else 1)", "def get_count_life_neighbor(arr, x, y, max_x, max_y):\n\tres_count = 0\n\n\tif x > 0 and y > 0:\n\t\tif arr[y-1][x-1]:\n\t\t\tres_count += 1\n\n\tif y > 0:\n\t\tif arr[y-1][x]:\n\t\t\tres_count += 1\n\n\tif y > 0 and x < max_x:\n\t\tif arr[y-1][x+1]:\n\t\t\tres_count += 1\n\n\tif x > 0:\n\t\tif arr[y][x-1]:\n\t\t\tres_count += 1;\n\n\tif x < max_x:\n\t\tif arr[y][x+1]:\n\t\t\tres_count += 1\n\n\tif y < max_y and x > 0:\n\t\tif arr[y+1][x-1]:\n\t\t\tres_count += 1\n\n\tif y < max_y:\n\t\tif arr[y+1][x]:\n\t\t\tres_count += 1\n\n\tif y < max_y and x < max_x:\n\t\tif arr[y+1][x+1]:\n\t\t\tres_count += 1\n\n\treturn res_count", "def numIslands(grid):\n # count to store each new island found\n count = 0\n # If the grid is empty, return 0\n if not grid:\n return count\n\n y_max = len(grid)\n x_max = len(grid[0])\n \n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] == '1':\n dfs(grid, i, j)\n count += 1\n return count", "def count_islands(matrix):\n visited = init_visited(matrix)\n num_islands = 0\n for i in range(len(matrix)):\n for j in range(len(matrix)):\n if matrix[i][j] and not visited[i][j]:\n check_neighbours(matrix, (i, j), visited)\n num_islands += 1\n # print(visited)\n return num_islands", "def neighbors(self, row, col):\n alive_around = 0\n for i in range(row -1, row + 2):\n for j in range(col - 1, col + 2):\n irow = i % self.row\n icol = j % self.col\n if (not (irow == row and icol == col)):\n if (self.now[irow, icol]):\n alive_around = alive_around + 1\n\n return alive_around", "def island_perimeter(grid):\n perimeter, connections = 0, 0\n\n for i in range(len(grid)):\n for j in range(len(grid[i])):\n if grid[i][j] == 1:\n perimeter += 4\n\n if i != 0 and grid[i-1][j] == 1:\n connections += 1\n if j != 0 and grid[i][j-1] == 1:\n connections += 1\n\n return(perimeter - (2 * connections))", "def count_to(ROW, COLUMN):\n n = 0\n\n # n = 1 + 2 + 3 + ... => n = k*(k+1) / 2\n k = ROW+COLUMN-1\n n = k * (k+1) / 2\n\n return n - (ROW - 1)", "def compute_neighbours(index, matrix):\n row, col = decode_to_matrix_cell(index, matrix)\n n1 = index + 1\n if n1 >= matrix.size or col == matrix.cols - 1:\n n1 = None\n\n n2 = index + matrix.cols\n if n2 >= matrix.size or row == matrix.rows - 1:\n n2 = None\n return n1, n2,", "def num_cells(self):\n if hasattr(self, '__num_cells__'):\n return self.__num_cells__\n if self.x is not None:\n return self.x.size(self.__cat_dim__('x', self.x))\n if self.boundary_index is not None:\n return int(self.boundary_index[1,:].max()) + 1\n assert self.upper_index is None and self.lower_index is None\n return None", "def count_islands(grid):\n grid_copy = list(grid)\n count = 0\n for i in range(0, len(grid_copy)):\n for j in range (0, len(grid_copy[0])):\n if grid[i][j] and grid_copy[i][j]:\n _dfs(grid_copy, i, j)\n count += 1\n return count", "def count_pairs(self, lines: List[int], cols: List[int]) -> int:\n mat = [[self.table[i][j] for j in cols] for i in lines]\n diff = 0\n for i in range(len(mat)):\n for j in range(len(mat[0])):\n if i + 1 < len(mat) and mat[i][j] != mat[i + 1][j]:\n diff += 1\n if j + 1 < len(mat[i]) and mat[i][j] != mat[i][j + 1]:\n diff += 1\n # if lines == [0, 1, 2, 3] and cols == [0, 1]:\n # print(\"COUNT PAIRS ON FIRST MOVE: \", diff)\n # print(mat)\n return diff", "def howManyNeigbors(board,row,col):\r\n\tneigbors = 0\r\n\tif board[row-1][col-1] == 1:\r\n\t\tneigbors += 1\r\n\tif board[row-1][col] == 1:\r\n\t\tneigbors += 1\r\n\tif board[row-1][col+1] == 1:\r\n\t\tneigbors += 1\r\n\tif board[row][col-1] == 1:\r\n\t\tneigbors += 1\r\n\tif board[row][col+1] == 1:\r\n\t\tneigbors += 1\r\n\tif board[row+1][col-1] == 1:\r\n\t\tneigbors += 1\r\n\tif board[row+1][col] == 1:\r\n\t\tneigbors += 1\r\n\tif board[row+1][col+1] == 1:\r\n\t\tneigbors += 1\r\n\treturn neigbors", "def get_cellcount(self):\n self.cellcount += 1\n return self.cellcount - 1", "def island_perimeter(grid):\n perimeter = 0\n for x in range(len(grid)):\n for y in range(len(grid[x])):\n if grid[x][y] == 1:\n if x == 0:\n perimeter += 1\n elif grid[x - 1][y] == 0:\n perimeter += 1\n if y == 0:\n perimeter += 1\n elif grid[x][y - 1] == 0:\n perimeter += 1\n if x == len(grid) - 1:\n perimeter += 1\n elif grid[x + 1][y] == 0:\n perimeter += 1\n if y == len(grid[0]) - 1:\n perimeter += 1\n elif grid[x][y + 1] == 0:\n perimeter += 1\n return perimeter", "def edge_num(self,row1,col1,row2,col2):\n\n row = row1\n col = col1\n row_n = row2\n col_n = col2\n \n if row2 < row1 or col2 < col1:\n row = row2\n col = col2\n row_n = row1\n col_n = col1\n \n if not ((row == row_n and col == col_n - 1) or (row == row_n-1 and col == col_n)):\n return -1\n\n if row < 0 or row_n >= self.rows or col < 0 or col_n >= self.cols:\n return -1\n \n node1 = row*self.rows+col+1\n node2 = row_n*self.rows+col_n+1\n edge_number = self.edge2index[(node1,node2)]\n #print \"%s %s: %d\" % (str(node1),str(node2),edge_number)\n \"\"\"\n #THIS DOWN HERE WOULD WORK IF GRAPHILLION NUMBERED EDGES CORRECTLY BUT IT DOESNT\n #print \"(%d,%d) (%d,%d)\" % (row,col,row_n,col_n)\n if row + col < self.cols - 1:\n if col_n == col + 1: \n #print \"(%d,%d) (%d,%d)\" % (row, col, row, col + 1)\n edge_number = self.diags[row + col] + 2 * row\n #edges[edge_number] = 1\n elif row_n == row + 1:\n #print \"(%d,%d) (%d,%d)\" % (row, col, row + 1, col)\n edge_number = self.diags[row + col] + 1 + 2 * row\n #edges[edge_number] = 1\n else:\n col_dist = self.cols - col - 1\n if col_n == col + 1: \n #print \"(%d,%d) (%d,%d)\" % (row, col, row, col + 1)\n edge_number = self.diags[row + col] + 2 * col_dist - 1\n #edges[edge_number] = 1\n elif row_n == row + 1:\n #print \"(%d,%d) (%d,%d)\" % (row, col, row + 1, col)\n edge_number = self.diags[row + col] + 2 * col_dist\n #edges[edge_number] = 1\n \"\"\"\n\n return edge_number", "def count_neighbors(lights, r, c):\n neighbors = 0\n\n if r > 0 and c > 0: # 1\n neighbors += 1 if lights[r - 1][c - 1] == \"#\" else 0\n\n if r > 0: # 2\n neighbors += 1 if lights[r - 1][c] == \"#\" else 0\n\n if r > 0 and c < GRID_SIZE - 1: # 3\n neighbors += 1 if lights[r - 1][c + 1] == \"#\" else 0\n\n if c < GRID_SIZE - 1: # 4\n neighbors += 1 if lights[r][c + 1] == \"#\" else 0\n\n if r < GRID_SIZE - 1 and c < GRID_SIZE - 1: # 5\n neighbors += 1 if lights[r + 1][c + 1] == \"#\" else 0\n\n if r < GRID_SIZE - 1: # 6\n neighbors += 1 if lights[r + 1][c] == \"#\" else 0\n\n if r < GRID_SIZE - 1 and c > 0: # 7\n neighbors += 1 if lights[r + 1][c - 1] == \"#\" else 0\n\n if c > 0: # 8\n neighbors += 1 if lights[r][c - 1] == \"#\" else 0\n\n return neighbors", "def countOccupied(data):\n\tcounter = 0\n\n\t# loop through rows and columns and\n\t# count the number of '#'s\n\tfor r in range(len(data)):\n\t\tfor c in range(len(data[r])):\n\t\t\tif data[r][c] == '#':\n\t\t\t\tcounter += 1\n\n\treturn counter", "def island_perimeter(grid):\n perimeter = 0\n for row in grid + list(map(list, zip(*grid))):\n for i, j in zip([0] + row, row + [0]):\n perimeter += int(i != j)\n return perimeter", "def island_perimeter(grid):\n perimeter = 0\n for row in range(len(grid)):\n for idx in range(len(grid[0])):\n if grid[row][idx] == 1:\n \"\"\"if 1 encountered check all sides for 0\"\"\"\n top = row - 1\n bottom = row + 1\n left = idx - 1\n right = idx + 1\n\n \"\"\"check top index value\"\"\"\n if top < 0:\n perimeter += 1\n elif grid[row - 1][idx] != 1:\n perimeter += 1\n\n \"\"\"check bottom index value\"\"\"\n if bottom >= len(grid):\n perimeter += 1\n elif grid[row + 1][idx] != 1:\n perimeter += 1\n\n \"\"\"check left index value\"\"\"\n if left < 0:\n perimeter += 1\n elif grid[row][idx - 1] != 1:\n perimeter += 1\n\n \"\"\"check right index value\"\"\"\n if right >= len(grid[0]):\n perimeter += 1\n elif grid[row][idx + 1] != 1:\n perimeter += 1\n return perimeter", "def check_diagonal_2(self, given_letter):\n count = 0\n avalible_pos = []\n for i in range(self.size):\n if self.positions[self.letters[self.size - 1 - i] + self.numbers[i]] == given_letter:\n count += 1\n else:\n avalible_pos.append(\n self.letters[self.size - 1 - i] + self.numbers[i])\n return count, avalible_pos", "def num_cells_up(self):\n if hasattr(self, '__num_cells_up__'):\n return self.__num_cells_up__\n elif self.shared_coboundaries is not None:\n assert self.upper_index is not None\n return int(self.shared_coboundaries.max()) + 1\n assert self.upper_index is None\n return 0", "def num_black_neighbors(tile, tiles):\n return sum([tiles[add(tile, step)] for step in NEIGHBORS])", "def island():\n\n grid = [\n [\"1\", \"1\", \"0\", \"0\", \"0\"],\n [\"1\", \"1\", \"0\", \"0\", \"0\"],\n [\"0\", \"0\", \"1\", \"0\", \"0\"],\n [\"0\", \"0\", \"0\", \"1\", \"1\"]\n ]\n\n def dfs():\n rows = len(grid)\n cols = len(grid[0])\n count = 0\n for i in range(0, rows):\n for j in range(0, cols):\n if grid[i][j] == '1':\n check_valid(i, j, grid)\n count = count + 1\n return count\n\n def check_valid(i, j, grid=None):\n rows = len(grid)\n cols = len(grid[0])\n\n if not 0 <= i < rows or not 0 <= j < cols or grid[i][j] != '1':\n return\n\n grid[i][j] = '0'\n\n check_valid(i + 1, j, grid)\n check_valid(i - 1, j, grid)\n check_valid(i, j + 1, grid)\n check_valid(i, j - 1, grid)\n\n return dfs()", "def numIslands3(self, grid: List[List[str]]) -> int:\n m = len(grid)\n if m > 0:\n n = len(grid[0])\n else:\n return 0\n\n def dfs(grid, i, j):\n if grid[i][j] != '0':\n grid[i][j] = '0'\n\n for direction in self.directions(grid, i, j):\n dfs(grid, direction[0], direction[1])\n\n island = 0\n for i in range(m):\n for j in range(n):\n if grid[i][j] == '1':\n island += 1 # count the number of CCs\n dfs(grid, i, j)\n return island", "def ncells(self):\n return self.izone.size", "def _adjacent_blob_size(self, pos, board, visited) -> int:\n col, row = pos[0], pos[1]\n total = 0\n total += self._undiscovered_blob_size((col - 1, row), board, visited)\n total += self._undiscovered_blob_size((col, row - 1), board, visited)\n total += self._undiscovered_blob_size((col + 1, row), board, visited)\n total += self._undiscovered_blob_size((col, row + 1), board, visited)\n return total", "def island_perimeter(grid):\n sum = 0\n\n for line in range(len(grid)):\n for column in range(len(grid[line])):\n value = grid[line][column]\n water_borders = 4\n if value == 1:\n if line != len(grid) - 1 and grid[line + 1][column] == 1:\n water_borders -= 1\n if line != 0 and grid[line - 1][column] == 1:\n water_borders -= 1\n if column != len(grid[0]) - 1 and grid[line][column + 1] == 1:\n water_borders -= 1\n if column != 0 and grid[line][column - 1] == 1:\n water_borders -= 1\n sum += water_borders\n return sum", "def count_trees(matrix, dx, dy):\n\n # We begin in the upper left corner\n x = 0\n y = 0\n count = 0\n\n # We continue until y > [height of matrix]\n while(y < len(matrix)):\n if matrix[y][x] == '#':\n count += 1\n\n # X is special since it needs to be wrapped around\n x = (x + dx) % len(matrix[0])\n y += dy\n\n return count", "def adjacent(self):\n x, y = self.opentile\n return (x - 1, y), (x + 1, y), (x, y - 1), (x, y + 1) # left, right, up, down", "def num_cells_down(self):\n if self.dim == 0:\n return None\n if hasattr(self, '__num_cells_down__'):\n return self.__num_cells_down__\n if self.lower_index is None:\n return 0\n raise ValueError('Cannot infer the number of cells in the cochain below.')", "def count_neighboors(self, x: int, y: int) -> int :\n\n cpt : int = 0\n min_x : int = max(0, x - 1)\n max_x : int = min(x + 1, self.width-1)\n min_y : int = max(0, y - 1)\n max_y : int = min(y + 1, self.height-1)\n\n x_tmp : int\n y_tmp : int\n for x_tmp in range(min_x, max_x+1):\n for y_tmp in range(min_y, max_y+1):\n if self.is_alive(x_tmp, y_tmp) and not (x_tmp == x and y_tmp == y):\n cpt += 1\n return cpt", "def num_tiles(self):\n return self.num_row_tiles * self.num_col_tiles", "def size(self):\n num_vert = 0\n num_edg = 0\n for vertex in self.vertices():\n num_vert += 1\n num_edg += len(self.neighbors(vertex))\n return (num_vert, num_edg)", "def calculate_nr_of_upslope_cells(node_conn_mat, rows, cols, traps, steepest_spill_pairs, d4):\n\n # Retrieve the expanded connectivity matrix with traps as nodes\n node_conn_mat = expand_conn_mat(node_conn_mat, len(traps))\n conn_mat = reroute_trap_connections(node_conn_mat, rows, cols, traps, steepest_spill_pairs, d4)\n\n # The flow starts in the start_cells. These are the cells without flow leading in to them\n start_nodes = calculate_flow_origins(conn_mat, traps, rows, cols)\n flow_acc, one_or_trap_size = assign_initial_flow_acc(traps, start_nodes, rows, cols)\n _, next_nodes = conn_mat[start_nodes, :].nonzero()\n next_nodes = np.unique(next_nodes)\n\n current_nodes = next_nodes\n it = 0\n\n while len(current_nodes) > 0:\n print 'Iteration: ', it\n # Current nodes cannot be assigned flow without previous nodes having flow assigned\n previous_nodes, corr_current_index = conn_mat[:, current_nodes].nonzero()\n _, flow_to_each_current = np.unique(corr_current_index, return_counts=True)\n previous_nodes_with_flow = flow_acc[previous_nodes] > 0\n remove_indices = corr_current_index[previous_nodes_with_flow == False]\n keep_indices = np.setdiff1d(np.arange(0, len(current_nodes), 1), remove_indices)\n sorting_order = np.argsort(corr_current_index)\n previous_nodes = previous_nodes[sorting_order]\n assign_flow_indices = np.setdiff1d(current_nodes, current_nodes[remove_indices])\n\n # Calculate flow to current nodes having previous nodes with assigned flow\n splits = np.cumsum(flow_to_each_current)\n nodes_to_each_current = np.split(previous_nodes, splits)[:-1]\n flow_to_each_current = np.asarray([np.sum(flow_acc[el]) for el in nodes_to_each_current])\n flow_acc[current_nodes[keep_indices]] = flow_to_each_current[keep_indices]\n\n # Add one or the trap size\n flow_acc[assign_flow_indices] += one_or_trap_size[assign_flow_indices]\n\n it += 1\n if len(assign_flow_indices) > 0:\n _, next_nodes = conn_mat[assign_flow_indices, :].nonzero()\n next_nodes = np.unique(next_nodes)\n unassigned_current_nodes = current_nodes[remove_indices]\n current_nodes = np.union1d(next_nodes, unassigned_current_nodes)\n\n else:\n current_nodes = []\n\n # Map from trap nodes back to traps\n for i in range(len(traps)):\n trap = traps[i]\n flow_acc[trap] = flow_acc[rows * cols + i]\n\n flow_acc = flow_acc[:rows * cols]\n flow_acc = flow_acc.reshape(rows, cols)\n\n return flow_acc", "def count_cells(rule, n=500):\n ca = Cell1D(rule, n)\n ca.start_single()\n\n res = []\n for i in range(1, n):\n cells = np.sum(ca.array)\n res.append((i, i**2, cells))\n ca.step()\n\n return res", "def numNeighbors(minesSet, row_index, cols_index, num_cols, num_rows):\n mines = 0\n for j in np.arange(max(0, cols_index-1), min(num_cols-1, cols_index+1)+1):\n for i in np.arange(max(0, row_index-1), min(num_rows-1, row_index+1)+1):\n if ((i, j) in minesSet):\n mines+=1\n return mines", "def flagser_contain(adjacency_matrix):\n N=adjacency_matrix.shape[0]\n row,col=convertCOO(adjacency_matrix,ret_data=False)\n return compute_cell_count(N, np.transpose(np.array( (row,col))))", "def row_count(self):\n return self.well_count // self.col_count", "def _count_occupied_seats(grid: List[List[str]]) -> int:\n total = 0\n for row in grid:\n total += row.count('#')\n return total", "def __get_adjacent_4c(p, row_limit, col_limit):\n rows = []\n cols = []\n\n if p[0] == 0:\n rows.append(p[0] + 1)\n elif p[0] == (row_limit - 1):\n rows.append(p[0] - 1)\n else:\n rows.extend([p[0] + 1, p[0] - 1])\n\n if p[1] == 0:\n cols.append(p[1] + 1)\n elif p[1] == (col_limit - 1):\n cols.append(p[1] - 1)\n else:\n cols.extend([p[1] + 1, p[1] - 1])\n\n adjacent = []\n adjacent.extend([(r, p[1]) for r in rows])\n adjacent.extend([(p[0], c) for c in cols])\n\n return adjacent", "def countFreeNeighbors( p, board, occupation):\n n = 0\n for m in [0, 1]:\n for d in [-1, 1]:\n pn = [p[0], p[1]]\n pn[m] += d\n j = board.grids.get( tuple(pn), None)\n if (j is None): continue # Not a board point\n if (occupation.has_key( j)): continue # Occupied\n n += 1\n return n", "def get_nr_of_misplaced_tiles(board):\n result = 0\n\n for idx, val in enumerate(board):\n if idx != val:\n result += 1\n\n return result", "def test_can_traverse_wide_grid(self):\n grid = [[\"1\", \"0\", \"1\", \"1\", \"0\", \"1\", \"0\", \"0\", \"1\", \"0\"]]\n result = num_islands(grid)\n self.assertEqual(result, 4)", "def count_accumulated_inflow(riv_dirs_section,paths_map_section):\n\n flow_to_cell = 0\n #Exact opposite across the keypad of the direction values\n inflow_values = np.array([[3, 2, 1],\n [6, 5, 4],\n [9, 8, 7]])\n for i in range(3):\n for j in range(3):\n if i == 1 and j == 1:\n flow_to_cell += 1\n #skip this iteration as flow to self is already counted\n continue\n if inflow_values[i,j] == riv_dirs_section[i,j]:\n if paths_map_section[i,j] != 0:\n flow_to_cell += paths_map_section[i,j]\n else:\n return 0\n if flow_to_cell < 1:\n raise RuntimeError('In flow less than 1')\n return flow_to_cell", "def count_subs(x,y):\n\t# Encases diagonals in square grid of size 'square'\n\tsquare = x + y - 2\n\tsubs = 0\n\t# For every point counts the number of rectagles with (a,b) as upper left corner\n\tfor a in range(square):\n\t\tfor b in range(square):\n\t\t\tif valid(a,b,x,y):\n\t\t\t\tthis_subs = subs_at_point(a,b,x,y)\n\t\t\t\tprint \"%3d \" %(this_subs),\n\t\t\tprint \"\"\n\treturn subs", "def count_pillars_and_exit(self, x, y):\r\n\r\n if not self.is_valid_room(x, y) or self.__maze[x][y].is_visited():\r\n return 0\r\n\r\n # check for exit or any pillar\r\n item_count = 0\r\n if self.__maze[x][y].get_exit():\r\n item_count = 1\r\n elif self.__maze[x][y].get_pillar_a():\r\n item_count = 1\r\n elif self.__maze[x][y].get_pillar_e():\r\n item_count = 1\r\n elif self.__maze[x][y].get_pillar_i():\r\n item_count = 1\r\n elif self.__maze[x][y].get_pillar_p():\r\n item_count = 1\r\n\r\n # not at exit so try another room: south, east, north, west\r\n self.__maze[x][y].set_visited(True)\r\n # if east_wall is not true, then we can go row +1\r\n if self.__maze[x][y].walls['E'] is False:\r\n item_count += self.count_pillars_and_exit(x + 1, y)\r\n if self.__maze[x][y].walls['S'] is False:\r\n item_count += self.count_pillars_and_exit(x, y + 1)\r\n if self.__maze[x][y].walls['W'] is False:\r\n item_count += self.count_pillars_and_exit(x - 1, y)\r\n if self.__maze[x][y].walls['N'] is False:\r\n item_count += self.count_pillars_and_exit(x, y - 1)\r\n\r\n return item_count", "def rectangles_in_grid(x_f, y_f):\n count = 0\n for x in range(x_f):\n for y in range(y_f):\n for i in range(x, x_f):\n for j in range(y, y_f):\n count += 1\n return count", "def count_winning_blocks(self, gameboard):\r\n count = {'red':0.1, 'blue':0.1}\r\n for x in range(gameboard.height):\r\n for y in range(gameboard.width):\r\n position = (x, y)\r\n h = gameboard.check_horizontal_state(position)\r\n v = gameboard.check_vertical_state(position)\r\n d1 = gameboard.check_diag_1_state(position)\r\n d2 = gameboard.check_diag_2_state(position)\r\n for state in [h, v, d1, d2]:\r\n if ((state.count('red') + state.count('x') == 5)\r\n and (state.count('red') > 0)):\r\n count['red'] += np.power(3, (state.count('red') - 1))\r\n elif ((state.count('blue') + state.count('x') == 5)\r\n and (state.count('blue') > 0)):\r\n count['blue'] += np.power(3, (state.count('blue') - 1))\r\n return count", "def count_mines(row, col):\r\n total = 0\r\n for r,c in ((-1,-1),(-1,0),(-1,1),(0,-1),(0,1),(1,-1),(1,0),(1,1)):\r\n try:\r\n if mines[row+r][col+c] == 1:\r\n total += 1\r\n except KeyError:\r\n pass\r\n return total", "def check_diagonal_1(self, given_letter):\n count = 0\n avalible_pos = []\n for i in range(self.size):\n if self.positions[self.letters[i] + self.numbers[i]] == given_letter:\n count += 1\n else:\n avalible_pos.append(self.letters[i] + self.numbers[i])\n return count, avalible_pos", "def island_perimeter(grid):\n LAND = 1\n WATER = 0\n perimeter = 0\n for y, row in enumerate(grid):\n for x, cell in enumerate(row):\n if cell == LAND:\n # print(\"land in [x= {:d} y= {:d}]\".format(x, y))\n # left\n if y == 0 or grid[y - 1][x] == WATER:\n perimeter += 1\n # right\n if y == len(grid) - 1 or grid[y + 1][x] == WATER:\n perimeter += 1\n # up\n if x == 0 or grid[y][x - 1] == WATER:\n perimeter += 1\n # down\n if x == len(row) - 1 or grid[y][x + 1] == WATER:\n perimeter += 1\n return perimeter", "def island_perimeter(grid):\n w = len(grid[0])\n h = len(grid)\n perimeter = 0\n\n for i, col in enumerate(grid):\n for j, row in enumerate(col):\n if row == 1:\n perimeter += 4\n if grid[i][j-1] == 1:\n perimeter -= 1\n if grid[i][(j+1) % w] == 1:\n perimeter -= 1\n if grid[(i+1) % h][j] == 1:\n perimeter -= 1\n if grid[i-1][j] == 1:\n perimeter -= 1\n return perimeter", "def island_perimeter(grid):\n perimeter = 0\n if not grid:\n return 0\n if not all(type(arr) == list for arr in grid):\n return 0\n al = len(grid[0])\n if not all(len(arr) == al for arr in grid):\n return 0\n al = al - 1\n gl = len(grid) - 1\n for cell, arr in enumerate(grid):\n for element, val in enumerate(arr):\n if (val == 1):\n if element == 0 or arr[element - 1] == 0:\n perimeter += 1\n if element == al or arr[element + 1] == 0:\n perimeter += 1\n if cell == 0 or grid[cell - 1][element] == 0:\n perimeter += 1\n if cell == gl or grid[cell + 1][element] == 0:\n perimeter += 1\n return perimeter", "def create_diags(self):\n\n num_diags = self.rows + self.cols - 2\n diag_counts = [0 for i in range(num_diags)]\n for diag_index in range(num_diags):\n first = (0,0)\n second = (0,0)\n if diag_index < self.rows - 1:\n first = (diag_index+1,0)\n elif diag_index == self.rows - 1:\n first = (diag_index,0)\n else:\n first = (self.rows-1,diag_index-self.rows+1)\n if diag_index < self.cols - 1:\n second = (0,diag_index+1)\n elif diag_index == self.cols - 1:\n second = (0,diag_index)\n else:\n second = (diag_index-self.cols+1,self.cols-1)\n #print str(first) + \" \" + str(second)\n diag_counts[diag_index] = dist_points(first,second) \n \n \"\"\"holds the sum of edges in diagonals previous to a given edge\"\"\"\n diag_full = [0 for i in range(num_diags + 1)]\n for i in range(1,num_diags+1):\n diag_full[i] = diag_full[i-1] + diag_counts[i-1]\n\n #print diag_counts\n #print diag_full\n return diag_full", "def test_can_traverse_tall_grid(self):\n grid = [\n [\"0\"],\n [\"1\"],\n [\"1\"],\n [\"0\"],\n [\"1\"],\n [\"0\"],\n [\"1\"],\n [\"0\"],\n [\"1\"],\n ]\n result = num_islands(grid)\n self.assertEqual(result, 4)", "def check_diag(self):\r\n if self.grid[4][-1] != ' ':\r\n if self.grid[0][-1] == self.grid[4][-1] and self.grid[4][-1] == self.grid[8][-1]:\r\n return (4, (self.grid[0], self.grid[8]))\r\n elif self.grid[2][-1] == self.grid[4][-1] and self.grid[4][-1] == self.grid[6][-1]:\r\n return (4, (self.grid[2], self.grid[6]))\r\n return (-1, None)", "def checkNumNeighbors():", "def get_num_tiles(grid_bbox, dxy): \r\n xmin, xmax, ymin, ymax = grid_bbox\r\n return (int(np.abs(ymax-ymin)/dxy), int(np.abs(xmax-xmin)/dxy))", "def inner_cells(w, h):\n a = create_board(w, h)\n\n for row in range(h):\n for col in range(w):\n if 0 < row < h - 1 and 0 < col < w - 1:\n a[row][col] = 1\n else:\n a[row][col] = 0\n\n return a", "def cell_edges(self):", "def check_neighbours(r, c, board):\n NeighboursSum = 0\n\n for x in range(r - 1, r + 1):\n for y in range(c - 1, c + 1):\n NeighboursSum = NeighboursSum * board[x][y]\n\n return NeighboursSum", "def occupied(r, c, layout):\n occupy = 0\n\n NW = (-1, -1)\n NE = (-1, 1)\n N = (-1, 0)\n W = (0, -1)\n E = (0, 1)\n SW = (1, -1)\n SE = (1, 1)\n S = (1, 0)\n\n for dirs in [NW, NE, N, W, E, SW, SE, S]:\n dr, dc = r+dirs[0], c + dirs[1]\n if (dr >= 0) and (dc >= 0) and (dr < len(layout)) and (dc < len(layout[0])):\n if layout[dr][dc] == '#':\n occupy += 1\n\n return occupy", "def original(arr):\n height = np.shape(arr)[0]\n width = np.shape(arr)[1]\n result = np.array(arr)\n\n for row in range(height):\n for col in range(width):\n neighbors = 0\n val = result[row][col]\n for i in range(-1, 2):\n for j in range(-1, 2):\n if i == 0 and j == 0: # The cell itself cannot be counted as a neighbor\n continue\n if row + i < 0 or col + j < 0 or row + i > height or col + j > width: # Out of bounds\n continue\n with suppress(IndexError):\n if arr[row + i][col + j] == 1:\n neighbors += 1\n\n if neighbors == 3 and val == 0: # Cell becomes alive\n result[row][col] = 1\n\n elif neighbors > 3 and val == 1 or neighbors < 2 and val == 1: # Cell dies\n result[row][col] = 0\n\n return result", "def count(seats: List[str]) -> int:\n # Map dimensions\n m = len(seats)\n n = len(seats[0]) if m else 0\n \n count = 0\n \n # Count locations filled with \"#\"\n for i in range(m):\n for j in range(n):\n if seats[i][j] == \"#\":\n count += 1\n\n return count", "def get_carrot_count(matrix, pos, size):\n\n row, col = pos\n\n if on_board(pos, size):\n return (matrix[row][col], pos)\n\n return (0, (-1, -1))", "def island_perimeter(grid):\n \"\"\"island_perimeter - perimeter of the island\n Parameter\n ---------\n grid:\n list\n Return\n ------\n int\n \"\"\"\n total = 0\n\n rows = len(grid)\n columns = len(grid[0])\n\n for row in range(rows):\n for col in range(columns):\n array = grid[row][col]\n if array == 1:\n total += 4\n if row != 0 and grid[row-1][col] == 1:\n total -= 1\n if col != 0 and grid[row][col-1] == 1:\n total -= 1\n if row + 1 != rows and grid[row + 1][col] == 1:\n total -= 1\n if col + 1 != columns and grid[row][col + 1] == 1:\n total -= 1\n\n return total", "def __get_total_neighbors(shape):\n from .util import prod\n\n ndim = len(shape)\n\n # Count the bulk of the pixels in the core\n core_n_pixels = prod(x-2 for x in shape)\n core_n_neighbors = 3**ndim-1\n count = core_n_pixels * core_n_neighbors\n\n # Go through pixels that are along planes/edges/corners\n # The number of neighbors is missing n_axes+1 axes\n n_axes = arange(ndim)\n n_neighbors = core_n_neighbors - ((1<<n_axes) * 3**(ndim-n_axes-1)).cumsum()\n for inds in axes_combinations(ndim):\n n_pixels = core_n_pixels // prod(shape[i]-2 for i in inds)\n count += (1<<len(inds)) * n_pixels * n_neighbors[len(inds)-1]\n\n return count", "def neighbours(box, kps):\n box_duplicate = box.unsqueeze(2).repeat(1, 1, len(kps.t())).transpose(0, 1)\n kps_duplicate = kps.unsqueeze(1).repeat(1, len(box), 1)\n\n xmin = kps_duplicate[0].ge(box_duplicate[0])\n ymin = kps_duplicate[1].ge(box_duplicate[1])\n xmax = kps_duplicate[0].le(box_duplicate[2])\n ymax = kps_duplicate[1].le(box_duplicate[3])\n\n nbr_onehot = torch.mul(torch.mul(xmin, ymin), torch.mul(xmax, ymax)).t()\n n_neighbours = nbr_onehot.sum(dim=1)\n\n return nbr_onehot, n_neighbours" ]
[ "0.75605994", "0.7353794", "0.7319598", "0.7206011", "0.6943233", "0.6866241", "0.68430185", "0.68314976", "0.6813723", "0.6796741", "0.67812294", "0.6732968", "0.6658989", "0.6624076", "0.65973485", "0.658886", "0.65862817", "0.65752614", "0.652818", "0.6520708", "0.645585", "0.6455639", "0.64165395", "0.6414781", "0.6356518", "0.6331627", "0.6331394", "0.63186496", "0.62763375", "0.6234949", "0.6234522", "0.61571366", "0.61343163", "0.6129878", "0.6127499", "0.6116882", "0.6115927", "0.61091954", "0.6074068", "0.6066201", "0.6065991", "0.60646176", "0.60499036", "0.60469615", "0.604427", "0.6036069", "0.6031212", "0.60272175", "0.60263574", "0.6014552", "0.60066885", "0.6003303", "0.59943", "0.5991088", "0.5984064", "0.5983468", "0.5981254", "0.5972037", "0.595864", "0.59547746", "0.59530723", "0.5922421", "0.58896923", "0.58861583", "0.5879834", "0.5873557", "0.58733183", "0.58641833", "0.58638805", "0.5862627", "0.5848168", "0.5840226", "0.583277", "0.58236825", "0.58107716", "0.5806105", "0.5805385", "0.58041954", "0.5802726", "0.5797716", "0.57948446", "0.5781291", "0.5780276", "0.57677466", "0.57544297", "0.57538265", "0.5748676", "0.57467914", "0.5743219", "0.5741379", "0.5740285", "0.5714856", "0.5712882", "0.57088053", "0.57048684", "0.5694845", "0.56890374", "0.56833375", "0.56819206", "0.5680171" ]
0.68710995
5
Generates an initial state of the game
def _create_living_cells(self, living_cells_positions: List[Tuple[int, int]]) -> None: for x, y in living_cells_positions: self.living_cells[x, y] = Cell(x, y, True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initial_state(self):\n state = GameState(self.size)\n return state", "def make_initial_state(self,seed,scrambles):\n seen = {}\n ns=0\n x = range(self.N*self.N)\n\n for r in range(self.N):\n for c in range(self.N):\n if x[r*self.N+c]==0:\n row,col=r,c\n self.initial = PuzzleState(x,self.N,row,col)\n R = random.Random()\n R.seed(seed)\n while ns<scrambles:\n index = R.randint(0,len(self.actions)-1)\n a = self.actions[index]\n nexts = self.initial.move(a)\n if nexts is not None:\n serial = nexts.__str__()\n if serial not in seen:\n seen[serial] = True\n self.initial = nexts\n ns += 1\n print('Problem:', self.__doc__, 'Initial state:')\n print(self.initial)\n print('==============')", "def __init__(self):\n self.action_space = [(0,0)] + list(permutations([i for i in range(m)], 2))\n self.state_space = [(X,T,D) for X in range(m) for T in range(t) for D in range(d)]\n self.state_init = random.choice(self.state_space)\n\n # Start the first round\n self.reset()", "def make_initial_state(self):\n pass", "def __init__(self):\n \"\"\" action_ space : pick up location , Drop location\n state_space : location , time (hours) , day\n state_init : random pick from the state_space \"\"\"\n self.action_space = [(i,j) for i in range(m) for j in range(m) if i!=j or i==0]\n # Total states (Xi Tj Dk)\n self.state_space = [[x, y, z] for x in range(m) for y in range(t) for z in range(d)]\n # random Initialize of state (location, hours, day)\n self.state_init = random.choice(self.state_space)\n # Start the first round\n self.reset()", "def reset(self):\n self.goal = random.randint(0, len(self.homes) - 1)\n\n curr_location = (random.randint(0, self.size-1), random.randint(0, self.size-1))\n bad_starts = self.homes + [self.store]\n\n while curr_location in bad_starts:\n curr_location = (random.randint(0, self.size - 1), random.randint(0, self.size - 1))\n\n self.curr_state = self.encode(*curr_location, 0, self.goal)\n return self.curr_state", "def initial_state() -> Board:\n board = (\"rnbqkbnr\", \"pppppppp\", \"........\", \"........\", \"........\",\n \"........\", \"PPPPPPPP\", \"RNBQKBNR\")\n\n return board", "def start_state():\n return chess.Board()", "def initial_state(self):\r\n return State(self.take_card(Color.BLACK), self.take_card(Color.BLACK))", "def initial(self):\n self.update_panel_displays()\n yield 0\n #\n if self.options.initial_state:\n self.started = True\n self.nextState(getattr(self, self.options.initial_state)())\n else:\n self.nextState(self.start_screen())", "def initial_state(self):\n return 0", "def initGameState(self):\n print(\"Setting game state: \")\n self.playGUI = GUI()\n self.playGUI.drawBoard(self.player)", "def init_game_setting(self):\n self.state.state_counter_while_testing += 1", "def startState(self):\n\n n_squares_per_row = int(math.ceil(math.sqrt(self.n_snakes))**2)\n square_size = self.grid_size // int(n_squares_per_row)\n assignment = random.sample(range(n_squares_per_row ** 2), self.n_snakes)\n\n\n assert self.grid_size >= 3*n_squares_per_row\n\n snakes = {}\n for snake, assign in enumerate(assignment):\n head = (random.randint(1, square_size-2) + (assign // n_squares_per_row) * square_size,\n random.randint(1, square_size-2) + (assign % n_squares_per_row) * square_size)\n snakes[snake] = newSnake([head, utils.add(head, random.sample(DIRECTIONS, 1)[0])], snake)\n\n fruits_to_put = 2 * int(self.fruit_ratio) + 1\n start_state = State(snakes, {})\n start_state.addNRandomfruits(fruits_to_put, self.grid_size)\n return start_state", "def make_state() -> state.GameState:\r\n dung: world.Dungeon = worldgen.EmptyDungeonGenerator(20, 20).spawn_dungeon(0)\r\n p1x, p1y = dung.get_random_unblocked()\r\n p2x, p2y = dung.get_random_unblocked()\r\n while (p2x, p2y) == (p1x, p1y):\r\n p2x, p2y = dung.get_random_unblocked()\r\n ent1 = entities.Entity(1, 0, p1x, p1y, 10, 10, 2, 1, [], dict())\r\n ent2 = entities.Entity(2, 0, p2x, p2y, 10, 10, 2, 1, [], dict())\r\n return state.GameState(True, 1, 1, 2, world.World({0: dung}), [ent1, ent2])", "def initial_state(self):\n pass", "def _init_game(self):\n state, player_id = self.game.init_game()\n if self.record_action:\n self.action_recorder = []\n return self._extract_state(state), player_id", "def initial_step(self, state, action):\n next_state = self.state_transition(state, action)\n env_action = random.choice(list(self.action_space(next_state)[1]))\n next_state = self.state_transition(next_state, env_action)\n return next_state", "def start_of_game(self):\n pass", "def __init__( self, prevState = None ): ###PLEASE NOTE THIS THAT THE __init__ method is here and this is where GameState() starts\n if prevState != None: # Initial state\n self.data = GameStateData(prevState.data) ##This statement imports the GameStateData object from the GameStateData class in game.py. This object contains a data packet documenting the state of the game.\n\n else:\n self.data = GameStateData()\n \"\"\"\n self._foodEaten = None\n self._capsuleEaten = None\n self._agentMoved = None\n self._lose = False\n self._win = False\n self.scoreChange = 0\n \"\"\"", "def init_game_setting(self):\n ##################\n # YOUR CODE HERE #\n ##################\n self.state = np.zeros((1, 80, 80))\n self.clear_action()", "def __init__(self):\n smach.State.__init__(self, \n outcomes=['GoToNormal','GoToSleep','GoToPlay'])\n\n self.rate = rospy.Rate(1) \n self.counter = 0", "def createState(self):\n return ''.join(random.choice(string.ascii_letters + string.digits) for x in range(30))", "def init_random_state(self):\n self.current_state = self.rng.uniform(size=[1, self.num_spins])\n self.current_state = np.where(self.current_state < 0.5, -1.0, 1.0)", "def _init_episode(self):\n # get states - one-hots\n self._states = np.zeros((self._size_state, self._size_state))\n\n # to_ones = np.random.permutation(self._size_state)[0:3]\n for x in xrange(self._size_state):\n # self._states[x][to_ones[x]] = 1\n self._states[x][x] = 1\n\n self._prob_transition = np.array([[.8,.2]])\n self._randomize()\n self._current_state = 0\n self._last_state = 0\n self._stage = 0\n self._since_flipped = 0", "def initial_state(self):\n\n return WorldState([[-1, -1], [1, 1], -1])", "def initializeGame(self):\n # Fill deck with cards and shuffle it\n self.deck.fill(104)\n self.deck.shuffle()\n #print \"Deck initialized\"\n\n # Initialize the field\n self.field.initialize(self.deck.draw(4))\n self.field.sortField()\n #self.field.printField()\n\n # Set players to initial state again\n # Distribute cards and set bulls to 0\n for p in self.players:\n p.bulls = 0\n p.setHand(self.deck.draw(10))", "def initial_state():\n board = [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]\n return board", "def generate_initial_states(env, max_steps=10000):\n\n initial_state, _ = env.reset()\n\n n_steps = 0\n seen_states = set([initial_state])\n frontier = [initial_state]\n while frontier and n_steps < max_steps:\n state = frontier.pop()\n valid_actions = sorted(list(env.action_space.all_ground_literals(state)))\n for action in valid_actions:\n env.set_state(state)\n next_state = env.step(action)[0]\n n_steps += 1\n if next_state not in seen_states:\n seen_states.add(next_state)\n frontier.append(next_state)\n if n_steps >= max_steps:\n break\n\n seen_states.remove(initial_state)\n # Sort states using the One True Ordering\n states = sorted(list(seen_states), key=lambda x: sorted(list(x.literals)))\n old_rng_st = random.getstate()\n random.seed(0)\n random.shuffle(states)\n random.setstate(old_rng_st)\n\n return states", "def env_start(self):\n self.current_state = {}\n self.current_state['usable_ace'] = self.random.randint(2)\n self.current_state['player_sum'] = self.random.randint(12,22)\n self.current_state['dealer_card'] = self.random.randint(1,11)\n\n self.player_ace_count = self.current_state['usable_ace']\n\n self.reward_obs_term = (0.0, self.observation(self.current_state), False)\n\n return self.reward_obs_term[1]", "def initGame(width=19):\n state = np.zeros((width, width, 2))\n available = np.zeros((width, width))\n\n return state, available", "def initial_state(particle,self):\n\n self.states[particle,:] = self.base_model.agents2state()\n\n return self.states[particle]", "def get_initial_state(self):\n return self.get_state(self.get_initial_observation())", "def __init__(self):\n\n super().__init__()\n self.setup_janggi_game()\n self._game_state = 'UNFINISHED'\n self._player_turn = 'BLUE'", "def initGame(self):\n self.map = {}\n self.blocks = Group()\n self.Coins =Group()\n self.players = Group()\n self.player1 = Player(1525,75,2)\n self.players.add(self.player1)\n if self.playernum == 2:\n self.player2 = Player(75,825,1)\n self.players.add(self.player2)\n else:\n self.player2 = False", "def generate_random_start_state(self) -> State:\n part_states = []\n random.shuffle(self.blocks)\n placed = []\n t = 0\n\n for block in self.blocks:\n if 1 / (t + 1) >= random.random():\n part_states.append(PartState(f'on({block.arguments[0]},table)'))\n else:\n rand = random.randint(0, len(placed) - 1)\n part_states.append(PartState(f'on({block.arguments[0]},{placed[rand]})'))\n\n placed.append(block.arguments[0])\n t += 1\n\n return State(set(part_states))", "def initialise(self, grid):\n self.total_reward = 0\n\n self.next_state = self.buildState(grid)", "def startGame():\n\n\tprint(\"\\nOK! Let's play!\")\n\tprint(\"--------------------------------------------------------------------------------------\")\n\tprint(\"Note:\")\n\tprint(\"\\tNow you must be kept in your mind a random integer from specific range and I must be guessing that number!\")\n\tprint(\"\\tIf you answer honestly all of my questions I certainly will guess that number!\")\n\tprint(\"--------------------------------------------------------------------------------------\\n\")\n\tgameLogic()", "def initial_state():\r\n return [[EMPTY, EMPTY, EMPTY],\r\n [EMPTY, EMPTY, EMPTY],\r\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\r\n return [[EMPTY, EMPTY, EMPTY],\r\n [EMPTY, EMPTY, EMPTY],\r\n [EMPTY, EMPTY, EMPTY]]", "def state_0():\n action = _Action(\"\", [])\n bar = []\n plates = Counter({20: 1, 25: 1})\n path_cost = 0\n path_used = 0\n goals = (20, 25, 0)\n goal_i = 0\n parent = None\n\n return _State(action, bar, plates, path_cost, path_used, goals, goal_i, parent)", "def initial_state(self):\n return None", "def __init__(self, game):\n # this calls the superclass constructor (does self.game = game)\n super().__init__(game) \n \n # YOUR CODE HERE\n self.gamma = 0.9\n #assign initial state values to be 0\n self.values = {s: 0 for s in game.states}\n #assign initial optimal actions to be ()\n self.policy = {s: () for s in game.states}\n #calculate state values\n self.valueIteration()\n #extract policy\n self.policyIteration()", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]", "def GenerateInitialState(self, setup=\"zeros\"):\n\n if setup == \"zeros\":\n return np.zeros((self.num_neurons, 1))\n else:\n return np.random.uniform(setup[0],setup[1],size=(self.num_neurons, 1))", "def fill_map(self):\n\n sim = Pong(max_steps=None)\n s = sim.empty_state()\n s[DEFAULT_DIMS] = DEFUALT_VALUES\n\n # Optimization issues:\n next_state = self.next_state\n next_reward = self.next_reward\n d = self.d\n\n # Make the terminal state a self-loop\n next_state[self.n] = self.n\n\n t0 = clock()\n for i in range(0, self.n, 1000000):\n for j in range(i, min(i + 1000000, self.n)):\n s[TRAIN_DIMS] = d.index_to_state(j)\n for a in c.ACTIONS:\n sim.fast_set_and_step(s, c.A_STAY, a)\n if sim.hit == \"r\":\n next_reward[j, a] = 1\n next_state[j, a] = -1\n elif sim.miss == \"r\":\n next_reward[j, a] = -1\n next_state[j, a] = -1\n else:\n next_state[j, a] = d.state_to_index(sim.s[TRAIN_DIMS])\n print(i, clock() - t0)", "def __init__(self):\n \n smach.State.__init__(self, \n outcomes=['GoToNormal','GoToPlay'])\n \n self.rate = rospy.Rate(200) # Loop at 50 Hz", "def registerInitialState(self, gameState):\n CaptureAgent.registerInitialState(self, gameState)", "def initial_state():\n\treturn [[EMPTY, EMPTY, EMPTY],\n\t\t\t[EMPTY, EMPTY, EMPTY],\n\t\t\t[EMPTY, EMPTY, EMPTY]]", "def __init__(self):\n self._game_state = \"UNFINISHED\"\n self._current_player = \"BLACK\"\n self._game_board = Board()", "def setup_game(self):", "def registerInitialState(self, gameState):\r\n\r\n '''\r\n Make sure you do not delete the following line. If you would like to\r\n use Manhattan distances instead of maze distances in order to save\r\n on initialization time, please take a look at\r\n CaptureAgent.registerInitialState in captureAgents.py.\r\n '''\r\n CaptureAgent.registerInitialState(self, gameState)\r\n\r\n '''\r\n Your initialization code goes here, if you need any.\r\n '''\r\n self.totalFoodNum = float(len(self.getFood(gameState).asList()))\r\n\r\n self.needFallback = False\r\n\r\n self.mapwidth = gameState.data.layout.width\r\n self.mapheight = gameState.data.layout.height\r\n self.myBorders = self.getMyBorder(gameState)\r\n self.enemyBorders = self.getEnemyBorder(gameState)\r\n # self.oneKi,self.twoKi,self.threeKi,self.fourKi = self.pointClassification(gameState)\r\n # self.dangerPath = self.getDangerPath(gameState)\r", "def initialize_state(self, state):\n print 'state initialized'\n return state", "def __init__(self, init_state):\n\n self.PUZZLE_TYPE = len(init_state) - 1\n self.initial_state = init_state\n self.current_state = init_state\n self.goal_state = [i for i in range(0, self.PUZZLE_TYPE + 1)]\n self.explored_states = []", "def registerInitialState(self, gameState):\n \n '''\n Make sure you do not delete the following line. If you would like to\n use Manhattan distances instead of maze distances in order to save\n on initialization time, please take a look at\n CaptureAgent.registerInitialState in captureAgents.py.\n '''\n MultiAgentSearchAgent.registerInitialState(self, gameState)\n\n self.setDefaultWeights()\n self.historicalActions = ['Go']\n \n '''\n Your initialization code goes here, if you need any.\n '''", "def getInitialState (states):\n length1 = len (states[0])\n length2 = len (states[1])\n\n ind1 = random.randint (0, length1 - 1)\n ind2 = random.randint (0, length2 - 1)\n return (ind1, ind2)", "def start(self):\n self.__init__()\n self.set_n_players()\n self.init_players()\n self.init_territory_selection_phase()\n self.init_troop_deployment_phase()\n # self.game_phase()", "def test_initialization(number: int) -> None:\n for _ in range(number):\n if random.random() < 0.5:\n size = random.randint(3, 10)\n baby_position = [random.randint(0, size - 1), random.randint(0, size - 1)]\n num_berries = random.randint(1, size)\n else:\n size = [random.randint(3, 10), random.randint(3, 10)]\n baby_position = [\n random.randint(0, size[0] - 1),\n random.randint(0, size[1] - 1),\n ]\n num_berries = random.randint(1, size[0])\n print(f\"\\n\\n\\nSize of the board {size}\")\n print(f\"Baby position: {baby_position}\")\n print(f\"Number of berries to be placed randomly: {num_berries}\")\n game = Game(size, baby_position, 0, 0, 0, 0, num_berries)\n print(f\"Here is the board:\\n{game.get_board()}\")\n print(game.get_baby())\n for b in game.get_berries():\n print(b)", "def __init__(self, allow_step_back=False):\n self.allow_step_back = allow_step_back\n self.np_random = np.random.RandomState()\n \"\"\" No big/small blind\n # Some configarations of the game\n # These arguments are fixed in Leduc Hold'em Game\n # Raise amount and allowed times\n self.raise_amount = 2\n self.allowed_raise_num = 2\n self.num_players = 2\n \"\"\"\n # Some configarations of the game\n # These arguments can be specified for creating new games\n\n # Small blind and big blind\n self.small_blind = 1\n self.big_blind = 2 * self.small_blind\n\n # Raise amount and allowed times\n self.raise_amount = self.big_blind\n self.allowed_raise_num = 2\n\n self.num_players = 2", "def test_init_with_existing_game(self):\n pass\n # Ensure judge is the same", "def __init__(self, max_step=-1):\n self.environment = mls.rl.common.Environment()\n self.environment.game = mls.rl.common.Game(max_step=max_step)\n self.environment.current_state = self.environment.game.init_state(self.environment)", "def initialize(self):\n self.currState = self.startState", "def new_state():\n return ''.join(random.choice(string.ascii_uppercase + string.digits)\n for x in range(32))", "def defaultInitialState():\n raise NotImplementedError", "def get_initial_state(self) -> str:\n return ''", "def get_new_gamestate(self):", "def registerInitialState(self, gameState):\n\n '''\n Make sure you do not delete the following line. If you would like to\n use Manhattan distances instead of maze distances in order to save\n on initialization time, please take a look at\n CaptureAgent.registerInitialState in captureAgents.py.\n '''\n CaptureAgent.registerInitialState(self, gameState)\n\n '''\n Your initialization code goes here, if you need any.\n '''", "def registerInitialState(self, gameState):\n\n '''\n Make sure you do not delete the following line. If you would like to\n use Manhattan distances instead of maze distances in order to save\n on initialization time, please take a look at\n CaptureAgent.registerInitialState in captureAgents.py.\n '''\n CaptureAgent.registerInitialState(self, gameState)\n\n '''\n Your initialization code goes here, if you need any.\n '''", "def start_game(self):\n\n\t\tpass", "def reinitialize(self, random_state):\n pass", "def __init__(self,state,player=WHITE):\n if(state==None):\n self.gameState = dict()\n for x in range(0,WIDTH):\n for y in range(0,HEIGHT):\n self.gameState[x,y] = EMPTY\n for x in range(0,WIDTH):\n self.gameState[x,BSTARTROW] = BLACK#Blacks starting row\n self.gameState[x,WSTARTROW] = WHITE#Whites starting row\n #whites.append(Board.pawn(Board.pos(x,WSTARTROW),WHITE))\n #blacks.append(Board.pawn(Board.pos(x,BSTARTROW),BLACK))\n else:\n self.gameState = state\n \n self.whoseTurn = player\n self.cachedWin = False # set to True in winFor() if\n self.cachedWinner = None", "def initial_state(self):\n\n return self._state()", "def init_game():\n raise ValueError(\"init_game is removed. Please use env.reset()\")", "def start(self):\n self.player = Player()\n self.dealer = Dealer()\n self.pot = 0\n self.side_bet = 0\n start_game()", "def initial_state(self):\n # Network details elided.\n return self.agent.initial_state()" ]
[ "0.75943387", "0.7504709", "0.7235902", "0.7209142", "0.6989828", "0.6983098", "0.6977527", "0.68630457", "0.68396306", "0.68289196", "0.6774058", "0.6772746", "0.67558104", "0.67219126", "0.6709018", "0.669816", "0.6664396", "0.6647363", "0.6641459", "0.65971696", "0.64977795", "0.6495657", "0.6493677", "0.6475991", "0.64578795", "0.6449281", "0.6437187", "0.64263207", "0.6418929", "0.6413104", "0.64100564", "0.64076453", "0.6396907", "0.63939023", "0.639309", "0.6391563", "0.63815457", "0.63799495", "0.6370425", "0.6370425", "0.6360176", "0.6359084", "0.6355679", "0.635383", "0.635383", "0.635383", "0.635383", "0.635383", "0.635383", "0.635383", "0.635383", "0.635383", "0.635383", "0.635383", "0.635383", "0.635383", "0.635383", "0.635383", "0.635383", "0.635383", "0.635383", "0.635383", "0.635383", "0.635383", "0.635383", "0.635383", "0.635383", "0.635383", "0.635383", "0.635383", "0.6339673", "0.63391256", "0.63285285", "0.63234675", "0.63226545", "0.63187695", "0.6318495", "0.63180166", "0.63141996", "0.62999856", "0.6296548", "0.62864554", "0.6286107", "0.6283252", "0.62765473", "0.6272301", "0.6271729", "0.6270317", "0.626734", "0.6259034", "0.6256705", "0.6246836", "0.62448674", "0.62448674", "0.62310064", "0.62155324", "0.62091535", "0.6206357", "0.6202742", "0.6198671", "0.61972076" ]
0.0
-1
Creates a game with a random initial state
def randomize(self, count, deviation: int = 5): cells = [] for i in range(count): cells.append( (random.randint(-deviation, deviation - 1), random.randint(-deviation, deviation - 1)) ) self._create_living_cells(cells) return self
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initial_state(self):\n state = GameState(self.size)\n return state", "def make_state() -> state.GameState:\r\n dung: world.Dungeon = worldgen.EmptyDungeonGenerator(20, 20).spawn_dungeon(0)\r\n p1x, p1y = dung.get_random_unblocked()\r\n p2x, p2y = dung.get_random_unblocked()\r\n while (p2x, p2y) == (p1x, p1y):\r\n p2x, p2y = dung.get_random_unblocked()\r\n ent1 = entities.Entity(1, 0, p1x, p1y, 10, 10, 2, 1, [], dict())\r\n ent2 = entities.Entity(2, 0, p2x, p2y, 10, 10, 2, 1, [], dict())\r\n return state.GameState(True, 1, 1, 2, world.World({0: dung}), [ent1, ent2])", "def init_new_game(self):\n self.game = get_new_game(self.game_config)", "def new_game(self) -> \"State\":\n return State(self, self.__sim.new_game())", "def new_game(self):\n old_state = self.rstate\n del old_state\n self.rstate = self.rsimulator.new_game()", "def test_init_with_existing_game(self):\n pass\n # Ensure judge is the same", "def initGameState(self):\n print(\"Setting game state: \")\n self.playGUI = GUI()\n self.playGUI.drawBoard(self.player)", "def make_initial_state(self,seed,scrambles):\n seen = {}\n ns=0\n x = range(self.N*self.N)\n\n for r in range(self.N):\n for c in range(self.N):\n if x[r*self.N+c]==0:\n row,col=r,c\n self.initial = PuzzleState(x,self.N,row,col)\n R = random.Random()\n R.seed(seed)\n while ns<scrambles:\n index = R.randint(0,len(self.actions)-1)\n a = self.actions[index]\n nexts = self.initial.move(a)\n if nexts is not None:\n serial = nexts.__str__()\n if serial not in seen:\n seen[serial] = True\n self.initial = nexts\n ns += 1\n print('Problem:', self.__doc__, 'Initial state:')\n print(self.initial)\n print('==============')", "def __init__(self):\n \"\"\" action_ space : pick up location , Drop location\n state_space : location , time (hours) , day\n state_init : random pick from the state_space \"\"\"\n self.action_space = [(i,j) for i in range(m) for j in range(m) if i!=j or i==0]\n # Total states (Xi Tj Dk)\n self.state_space = [[x, y, z] for x in range(m) for y in range(t) for z in range(d)]\n # random Initialize of state (location, hours, day)\n self.state_init = random.choice(self.state_space)\n # Start the first round\n self.reset()", "def init_game():\n raise ValueError(\"init_game is removed. Please use env.reset()\")", "def create_new_game(self):\r\n global game_instance\r\n game_instance = game.Game()\r\n game_instance.set_word(db.get_random_word())\r\n print(\"\\n---------NEW GAME---------\")\r\n self.current_word = \"----\"", "def create_game(self):\n\n\t\tself.player_model.grid = []\n\t\tself.player_model.available_cells = []\n\n\t\tfor i in range(9):\n\t\t\tc = Cell(i, None)\n\t\t\tself.player_model.grid.append(c)\n\t\t\tself.player_model.available_cells.append(c)\n\n\t\tself.player_frame.setup_game(self.player_model.current_player.name)", "def random_state(self) -> Grid2D.State:\n return Grid2D.State(random.choice(self.empty_cell_list))", "def __init__(self):\n self.action_space = [(0,0)] + list(permutations([i for i in range(m)], 2))\n self.state_space = [(X,T,D) for X in range(m) for T in range(t) for D in range(d)]\n self.state_init = random.choice(self.state_space)\n\n # Start the first round\n self.reset()", "def make_game(self):\n game = Game(self.data['gamename'])\n self.game = game\n return game", "def startState(self):\n\n n_squares_per_row = int(math.ceil(math.sqrt(self.n_snakes))**2)\n square_size = self.grid_size // int(n_squares_per_row)\n assignment = random.sample(range(n_squares_per_row ** 2), self.n_snakes)\n\n\n assert self.grid_size >= 3*n_squares_per_row\n\n snakes = {}\n for snake, assign in enumerate(assignment):\n head = (random.randint(1, square_size-2) + (assign // n_squares_per_row) * square_size,\n random.randint(1, square_size-2) + (assign % n_squares_per_row) * square_size)\n snakes[snake] = newSnake([head, utils.add(head, random.sample(DIRECTIONS, 1)[0])], snake)\n\n fruits_to_put = 2 * int(self.fruit_ratio) + 1\n start_state = State(snakes, {})\n start_state.addNRandomfruits(fruits_to_put, self.grid_size)\n return start_state", "def _newgame(self):\n self._view.clear()\n self._game = Play(self._view)\n self._state = STATE_COUNTDOWN\n self.time = 0", "def init_game(self):\n nrows = len(self.array)\n self.game_over = False\n self.squares_left = nrows * nrows\n self.bombs_left = 0\n # clear the board\n for i in xrange(nrows):\n for j in xrange(nrows):\n self.array[i][j].reset()\n # put N random bombs\n for i in xrange(nrows):\n rand_num = random.randrange(nrows*nrows)\n if self.array[rand_num / nrows][rand_num % nrows].type \\\n != SquareType.BOMB:\n self.insert_bomb(rand_num / nrows, rand_num % nrows)\n self.squares_left -= self.bombs_left\n self.print_board()", "def init_random_state(self):\n self.current_state = self.rng.uniform(size=[1, self.num_spins])\n self.current_state = np.where(self.current_state < 0.5, -1.0, 1.0)", "def init_game():\n return BoardRenderer('LifeSim', GRID_SIZE, BLOCK_SIZE), World(GRID_SIZE, LAKE_SIZE, FOREST_WIDTH)", "def init(seed=None):\n\tglobal _game\n\n\tfrom .game import Game\n\tfrom .prompt import install_words\n\n\t_game = Game(seed)\n\tload_advent_dat(_game)\n\tinstall_words(_game)\n\t_game.start()\n\treturn _game", "def __init__(self, allow_step_back=False):\n self.allow_step_back = allow_step_back\n self.np_random = np.random.RandomState()\n \"\"\" No big/small blind\n # Some configarations of the game\n # These arguments are fixed in Leduc Hold'em Game\n # Raise amount and allowed times\n self.raise_amount = 2\n self.allowed_raise_num = 2\n self.num_players = 2\n \"\"\"\n # Some configarations of the game\n # These arguments can be specified for creating new games\n\n # Small blind and big blind\n self.small_blind = 1\n self.big_blind = 2 * self.small_blind\n\n # Raise amount and allowed times\n self.raise_amount = self.big_blind\n self.allowed_raise_num = 2\n\n self.num_players = 2", "def initializeGame(self):\n # Fill deck with cards and shuffle it\n self.deck.fill(104)\n self.deck.shuffle()\n #print \"Deck initialized\"\n\n # Initialize the field\n self.field.initialize(self.deck.draw(4))\n self.field.sortField()\n #self.field.printField()\n\n # Set players to initial state again\n # Distribute cards and set bulls to 0\n for p in self.players:\n p.bulls = 0\n p.setHand(self.deck.draw(10))", "def new_game(self):\n self.board = [None] * 9\n self.player = \"X\"\n self.winner = None", "def make_random_move(state: State) -> State:\n return random.choice(state.get_possible_states())", "def startGame():\n\n\tprint(\"\\nOK! Let's play!\")\n\tprint(\"--------------------------------------------------------------------------------------\")\n\tprint(\"Note:\")\n\tprint(\"\\tNow you must be kept in your mind a random integer from specific range and I must be guessing that number!\")\n\tprint(\"\\tIf you answer honestly all of my questions I certainly will guess that number!\")\n\tprint(\"--------------------------------------------------------------------------------------\\n\")\n\tgameLogic()", "def __init__(self, max_step=-1):\n self.environment = mls.rl.common.Environment()\n self.environment.game = mls.rl.common.Game(max_step=max_step)\n self.environment.current_state = self.environment.game.init_state(self.environment)", "def new_game(self):\n self.ui = UI()\n self.board.retract_board()\n self.board = Board()\n self.turn = BLUE\n self.selected_legal_moves = []\n self.selected_piece = None", "def setup_game(self):", "def new_game(self):\n self.cells = [] # Array of cells\n self.frame_count = 0\n self.database = []\n self.timer = [Consts[\"MAX_TIME\"], Consts[\"MAX_TIME\"]]\n self.result = None\n # Define the players first\n self.cells.append(Cell(0, [Consts[\"WORLD_X\"] / 4, Consts[\"WORLD_Y\"] / 2], [0, 0], Consts[\"DEFAULT_RADIUS\"]))\n self.cells.append(Cell(1, [Consts[\"WORLD_X\"] / 4 * 3, Consts[\"WORLD_Y\"] / 2], [0, 0], Consts[\"DEFAULT_RADIUS\"]))\n # Generate a bunch of random cells\n for i in range(Consts[\"CELLS_COUNT\"]):\n if i < 4:\n rad = 1.5 + (random.random() * 1.5) # Small cells\n elif i < 10:\n rad = 10 + (random.random() * 4) # Big cells\n else:\n rad = 2 + (random.random() * 9) # Everything else\n x = Consts[\"WORLD_X\"] * random.random()\n y = Consts[\"WORLD_Y\"] * random.random()\n cell = Cell(i + 2, [x, y], [(random.random() - 0.5) * 2, (random.random() - 0.5) * 2], rad)\n safe_dist = Consts[\"SAFE_DIST\"] + rad\n while min(map(cell.distance_from, self.cells[:2])) < safe_dist:\n cell.pos = [\n Consts[\"WORLD_X\"] * random.random(),\n Consts[\"WORLD_Y\"] * random.random()\n ]\n self.cells.append(cell)", "def test_initialization(number: int) -> None:\n for _ in range(number):\n if random.random() < 0.5:\n size = random.randint(3, 10)\n baby_position = [random.randint(0, size - 1), random.randint(0, size - 1)]\n num_berries = random.randint(1, size)\n else:\n size = [random.randint(3, 10), random.randint(3, 10)]\n baby_position = [\n random.randint(0, size[0] - 1),\n random.randint(0, size[1] - 1),\n ]\n num_berries = random.randint(1, size[0])\n print(f\"\\n\\n\\nSize of the board {size}\")\n print(f\"Baby position: {baby_position}\")\n print(f\"Number of berries to be placed randomly: {num_berries}\")\n game = Game(size, baby_position, 0, 0, 0, 0, num_berries)\n print(f\"Here is the board:\\n{game.get_board()}\")\n print(game.get_baby())\n for b in game.get_berries():\n print(b)", "def new_game(self):\n\n self.board = {}", "def __init__(self, _gameBeingPlayed, _charcter):\n self.name = \"random\"\n random.seed(2)\n self.game = _gameBeingPlayed\n self.character = _charcter\n self.neutralCharacter = self.game.neutralCharacter", "def reset(self, *args):\n self.state = GameStates.playing\n self.human = evilrps.Player('Human', self.get_player_choice)\n self.ai = evilrps.Player('AI', evilrps.create_ai())\n self.game = evilrps.Game(self.human, self.ai)", "def seed():", "def newGame(self):\n self.last_move = \"go\"\n self.values = [None for i in range(64)]\n for i in range(8):\n self.setPiece(i, 2, self.makePiece(ChessPiece.WHITE_PAWN))\n self.setPiece(i, 7, self.makePiece(ChessPiece.BLACK_PAWN))\n\n self.setPiece('a', 1, self.makePiece(ChessPiece.WHITE_ROOK))\n self.setPiece('b', 1, self.makePiece(ChessPiece.WHITE_KNIGHT))\n self.setPiece('c', 1, self.makePiece(ChessPiece.WHITE_BISHOP))\n self.setPiece('d', 1, self.makePiece(ChessPiece.WHITE_QUEEN))\n self.setPiece('e', 1, self.makePiece(ChessPiece.WHITE_KING))\n self.setPiece('f', 1, self.makePiece(ChessPiece.WHITE_BISHOP))\n self.setPiece('g', 1, self.makePiece(ChessPiece.WHITE_KNIGHT))\n self.setPiece('h', 1, self.makePiece(ChessPiece.WHITE_ROOK))\n\n self.setPiece('a', 8, self.makePiece(ChessPiece.BLACK_ROOK))\n self.setPiece('b', 8, self.makePiece(ChessPiece.BLACK_KNIGHT))\n self.setPiece('c', 8, self.makePiece(ChessPiece.BLACK_BISHOP))\n self.setPiece('d', 8, self.makePiece(ChessPiece.BLACK_QUEEN))\n self.setPiece('e', 8, self.makePiece(ChessPiece.BLACK_KING))\n self.setPiece('f', 8, self.makePiece(ChessPiece.BLACK_BISHOP))\n self.setPiece('g', 8, self.makePiece(ChessPiece.BLACK_KNIGHT))\n self.setPiece('h', 8, self.makePiece(ChessPiece.BLACK_ROOK))", "def _init_game(self):\n state, player_id = self.game.init_game()\n if self.record_action:\n self.action_recorder = []\n return self._extract_state(state), player_id", "def start_new_game(self):\r\n\r\n self.initialize_game_params()\r\n self.timer = Timer(self.screen)\r\n self.mine_counter = MineCounter(self.num_of_mines, self.screen)\r\n self.reset_button = ResetButton(self.screen)\r\n self.high_score = HighScore(self.rows, self.cols, self.num_of_mines, self.screen)\r\n self.board = Board(self.rows, self.cols, self.num_of_mines, self.screen)\r\n self.play_game()", "def generate_random_start_state(self) -> State:\n part_states = []\n random.shuffle(self.blocks)\n placed = []\n t = 0\n\n for block in self.blocks:\n if 1 / (t + 1) >= random.random():\n part_states.append(PartState(f'on({block.arguments[0]},table)'))\n else:\n rand = random.randint(0, len(placed) - 1)\n part_states.append(PartState(f'on({block.arguments[0]},{placed[rand]})'))\n\n placed.append(block.arguments[0])\n t += 1\n\n return State(set(part_states))", "def __init__(self,state,player=WHITE):\n if(state==None):\n self.gameState = dict()\n for x in range(0,WIDTH):\n for y in range(0,HEIGHT):\n self.gameState[x,y] = EMPTY\n for x in range(0,WIDTH):\n self.gameState[x,BSTARTROW] = BLACK#Blacks starting row\n self.gameState[x,WSTARTROW] = WHITE#Whites starting row\n #whites.append(Board.pawn(Board.pos(x,WSTARTROW),WHITE))\n #blacks.append(Board.pawn(Board.pos(x,BSTARTROW),BLACK))\n else:\n self.gameState = state\n \n self.whoseTurn = player\n self.cachedWin = False # set to True in winFor() if\n self.cachedWinner = None", "def setup_new_game(self):\r\n self._player = Player()\r\n self._stats = GameStats(self._bb_settings)\r\n self._scoreboard = Scoreboard(self._bb_settings, self._screen)", "def init_game_setting(self):\n self.state.state_counter_while_testing += 1", "def test_setup_new_game(self):\n\n # Create a new game and make sure it has the correct settings\n game = Game()\n game.setup_new_game()\n self.assertTrue(game.dealer is not None, msg=\"The dealer of the game was not created.\")\n self.assertEqual(game.dealer.cards, [])\n self.assertEqual(game.state.name, \"get_number_of_packs\", msg=\"The initial game state was not correctly set.\")", "def init_game(self):\n while not self.finish:\n self.color = (randint(0, 255), randint(0, 255), randint(0, 255))\n self.position = (randint(5, 395), randint(5, 395))\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.finish = True\n\n elif event.type == pygame.MOUSEBUTTONUP:\n if event.button == 3:\n self.draw_square(self.SCREEN, self.color, self.position)\n elif event.type == pygame.KEYUP:\n if event.key == pygame.K_SPACE:\n self.draw_square(self.SCREEN, self.color, self.position)\n\n pygame.display.update()\n\n self.FPSCLOCK.tick(self.FPS)\n\n pygame.display.quit()", "def populate_games():\n global games\n games[0] = SnakeGame(lp)", "def rand(self):\n return self.State.rand()", "def initGame(self):\n self.map = {}\n self.blocks = Group()\n self.Coins =Group()\n self.players = Group()\n self.player1 = Player(1525,75,2)\n self.players.add(self.player1)\n if self.playernum == 2:\n self.player2 = Player(75,825,1)\n self.players.add(self.player2)\n else:\n self.player2 = False", "def __RandomState_ctor():\r\n return RandomState(seed=0)", "def new_game(cls, user):\n game = Game(user=user,\n game_state=\".........\",\n game_over=False)\n game.put()\n return game", "def initGame(width=19):\n state = np.zeros((width, width, 2))\n available = np.zeros((width, width))\n\n return state, available", "def __init__(self, random_state):\n self.random_state = random_state\n self.random_generator = RandomState(self.random_state)", "def create_one_game(self):\n return Game2048(task_name=self.result_path, game_mode=False)", "def random_player(env_name, render=False):\n\n # Make\n env = gym.make(env_name)\n\n n_game = 0\n\n # For each game\n while True:\n\n # Reset\n env.reset()\n done = False\n if render:\n env.render()\n\n # Until the end\n while not done:\n\n # Random agent moves\n action = env.action_space.sample()\n\n # Environment moves\n observation, reward, done, info = env.step(action)\n\n # Result\n if render:\n env.render()\n yield observation\n\n n_game += 1\n\n # Leave\n env.close()", "def main():\n g = Game(800, 600)\n g.start()", "def init_game(self):\n # Initilize a dealer that can deal cards\n self.dealer = Dealer(self.np_random)\n\n # Initilize two players to play the game\n self.players = [\n rlcard.games.leducholdem.Player(i, self.np_random)\n for i in range(self.num_players)\n ]\n\n # Initialize a judger class which will decide who wins in the end\n self.judger = Judger(self.np_random)\n\n # Prepare for the first round\n for i in range(self.num_players):\n self.dealer.shuffle()\n self.players[i].hand = self.dealer.deal_card()\n # Randomly choose a small blind and a big blind\n s = self.np_random.randint(0, self.num_players)\n b = (s + 1) % self.num_players\n self.players[b].in_chips = self.big_blind\n self.players[s].in_chips = self.small_blind\n self.public_card = None\n # The player with small blind plays the first\n self.game_pointer = s\n\n # Initilize a bidding round, in the first round, the big blind and the small blind needs to\n # be passed to the round for processing.\n self.round = Round(\n raise_amount=self.raise_amount,\n allowed_raise_num=self.allowed_raise_num,\n num_players=self.num_players,\n np_random=self.np_random,\n )\n\n self.round.start_new_round(\n game_pointer=self.game_pointer, raised=[p.in_chips for p in self.players]\n )\n\n # Count the round. There are 2 rounds in each game.\n self.round_counter = 0\n\n # Save the hisory for stepping back to the last state.\n self.history = []\n\n state = self.get_state(self.game_pointer)\n\n return state, self.game_pointer", "def reinitialize(self, random_state):\n pass", "def update_random_state(self):\n self.random_state = RandomState()", "def load_game(self):\n game = Game(self.w, self.h, self.screen)\n game.run()", "def game_start():\n herolist = Hero_List(hots_db)\n heroclasses = []\n for item in herolist:\n heroclasses.append(Item(item, 'hero'))\n curgame = Game(Team('home'), Team('enemy'), Team('hero_pool', heroclasses), '')\n return curgame", "def __init__(self):\n\n self.width = 10\n self.height = 10\n self.new_game()", "def start_game(self):\n\n\t\tpass", "def new_game():\n # Prints the welcome message to the terminal\n welcome_message()\n # Gets the players name\n player_name = name_input()\n # Creates the players game board\n player_board = GameBoard(player_name, 'player')\n # Creates the players guess board\n user_guess = GameBoard('GUESS', 'user guess')\n # Creates the computers board\n computer_board = GameBoard(\"COMPUTER's\", 'computer')\n # Creates the computers guess board\n computer_guess = GameBoard('COMPUTER GUESS', 'computer guess')\n # Randomly places the computers ships on their board\n computer_board.place_ships()\n # Prints the players board to the terminal for reference\n player_board.print_board()\n # Allows the player to place their ships\n player_board.place_ships()\n time.sleep(2)\n # Prints the players guess board to terminal for reference\n print(PHASE)\n print(' ')\n # Takes turns attacking until winner\n run_game(player_board, user_guess, computer_board, computer_guess)\n # Asks the player if they want to play again or quit\n play_again()", "def start_game(self):\n print(\"hi there, game started!\")\n self.draw()", "def simulate(state: GameState) -> int:\n moves = list(state.moves)\n #print(\" moves available: \", moves)\n for i in range(len(state.moves)):\n move = random.choice(moves)\n #print(\" move making: \", move)\n move_idx = moves.index(move)\n #print(\" index of move: \", move_idx)\n moves.pop(move_idx)\n #print(\" new moves available: \", moves)\n state = state.traverse(move)\n #print(\" Winner: \", state.util)\n #print(\" New Board: \", state.display)\n return state.util", "def random_state(state):\n old_state = RandomState()\n state.set_global()\n yield\n old_state.set_global()", "def do_create_game(self):\n\t\tself.nickname = self.e_nickname.text\n\n\t\tself.hide_all()\n\t\tself.show_create()\n\t\tself.renderer.color = (255, 255, 255, 0)", "def __init__(self):\n\n super().__init__()\n self.setup_janggi_game()\n self._game_state = 'UNFINISHED'\n self._player_turn = 'BLUE'", "def start_game(self) -> None:\n self.init_game()\n self.play()", "def random_v_random(n=1):\n p1_strategy = strategies.RandomStrategy()\n p2_strategy = strategies.RandomStrategy()\n p1 = player.Player('X', p1_strategy)\n p2 = player.Player('O', p2_strategy)\n board = tictactoe.Board()\n game = rl_game.Game(p1, p2, board)\n game.play_one()", "def create_game(game_ID):\n\n if r.exists(\"state:\" + game_ID) == 1:\n raise Exception(\"Game exists already\")\n\n new_game = {\n \"winner\": \"none\",\n \"turn\": \"blue\",\n \"action\": \"spymaster\",\n \"hint\": \"\",\n \"attemptsLeft\": 0,\n \"redPoints\": 0,\n \"bluePoints\": 0,\n }\n words = create_board()\n set_fields = r.hset(\"state:\" + game_ID, mapping=new_game)\n set_fields += r.hset(\"words:\" + game_ID, mapping=words)\n\n if set_fields == 32:\n return {\"playerState\": new_game, \"wordsState\": words}\n else:\n raise Exception(\"Could not make Game\")", "def random_state(numStates, numLetters):\n next_state = random.randrange(0, numStates)\n replace_letter = random.randrange(0, numLetters)\n movement = random.randrange(0, 2)\n return { \"next_state\": next_state, \"replace_letter\": replace_letter, \"movement\": movement }", "def generate(self):\n for i in range(4):\n random_first = randomize_first_box()\n self.randomize(random_first)\n for i in range(9):\n random_pos = randomize_position()\n self.randomize(random_pos)\n self.board.solve()", "def reset(self):\n self.goal = random.randint(0, len(self.homes) - 1)\n\n curr_location = (random.randint(0, self.size-1), random.randint(0, self.size-1))\n bad_starts = self.homes + [self.store]\n\n while curr_location in bad_starts:\n curr_location = (random.randint(0, self.size - 1), random.randint(0, self.size - 1))\n\n self.curr_state = self.encode(*curr_location, 0, self.goal)\n return self.curr_state", "def _create_games(self):\n\n ''''''", "def init_game_setting(self):\n ##################\n # YOUR CODE HERE #\n ##################\n self.state = np.zeros((1, 80, 80))\n self.clear_action()", "def make_game():\n return ascii_art.ascii_art_to_game(\n GAME_ART, what_lies_beneath='.',\n sprites={'P': PlayerSprite})", "def __init__( self, prevState = None ): ###PLEASE NOTE THIS THAT THE __init__ method is here and this is where GameState() starts\n if prevState != None: # Initial state\n self.data = GameStateData(prevState.data) ##This statement imports the GameStateData object from the GameStateData class in game.py. This object contains a data packet documenting the state of the game.\n\n else:\n self.data = GameStateData()\n \"\"\"\n self._foodEaten = None\n self._capsuleEaten = None\n self._agentMoved = None\n self._lose = False\n self._win = False\n self.scoreChange = 0\n \"\"\"", "def restartGame(self):\n\t\tself.state = [[0 for x in range(3)] for y in range(3)]\n\t\tself.turn = self.whoGoesFirst()\n\t\tself.win = 0", "def new_game(self):\n self.bet_history = []\n self.in_game = [True] * len(self.agents)\n self.in_game_count = len(self.agents)\n self.bet_hist = []\n self.pot = big_blind + small_blind\n self.chips[self.starting_player] -= small_blind\n self.chips[(self.starting_player + 1) % len(self.agents)] -= big_blind\n self.all_in = [False] * len(self.agents)\n\n self.hands = None\n self.community_cards = []\n\n self.shuffle_deck()\n\n for i in range(0, len(agents)):\n self.agents[i].new_game(len(agents), i)", "def createState(self):\n return ''.join(random.choice(string.ascii_letters + string.digits) for x in range(30))", "def restart(self):\n self.set_random_pos('starting')\n self.set_random_pos('finishing')\n self.game_loop()", "def initial_state(self):\r\n return State(self.take_card(Color.BLACK), self.take_card(Color.BLACK))", "def init_gym_pygame(game_name):\n # OpenAI gym setup \n game_name = 'gvgai-aliens-lvl0-v0'\n env = gym.make(game_name)\n env.reset()\n \n # pygame setup\n pg.init()\n height = env.observation_space.shape[0]\n width = env.observation_space.shape[1]\n screen = pg.display.set_mode((width, height))\n\n return env, screen", "def newGame(self):\n self.last_move = \"go\"\n self.values = [None for i in range(64)]\n for i in range(8):\n self.setPiece(i, 2, self.makePiece(ChessPiece.WHITE_PAWN, i, 2, \"wpawn\"+str(i)))\n self.setPiece(i, 7, self.makePiece(ChessPiece.BLACK_PAWN, i, 7, \"bpawn\"+str(i)))\n\n self.setPiece('a', 1, self.makePiece(ChessPiece.WHITE_ROOK, 'a', 1, \"wrook0\"))\n self.setPiece('b', 1, self.makePiece(ChessPiece.WHITE_KNIGHT, 'b', 1, \"wknight0\"))\n self.setPiece('c', 1, self.makePiece(ChessPiece.WHITE_BISHOP, 'c', 1, \"wbishop0\"))\n self.setPiece('d', 1, self.makePiece(ChessPiece.WHITE_QUEEN, 'd', 1, \"wqueen\"))\n self.setPiece('e', 1, self.makePiece(ChessPiece.WHITE_KING, 'e', 1, \"wking\"))\n self.setPiece('f', 1, self.makePiece(ChessPiece.WHITE_BISHOP, 'f', 1, \"wbishop1\"))\n self.setPiece('g', 1, self.makePiece(ChessPiece.WHITE_KNIGHT, 'g', 1, \"wknight1\"))\n self.setPiece('h', 1, self.makePiece(ChessPiece.WHITE_ROOK, 'h', 1, \"wrook1\"))\n\n self.setPiece('a', 8, self.makePiece(ChessPiece.BLACK_ROOK, 'a', 8, \"brook0\"))\n self.setPiece('b', 8, self.makePiece(ChessPiece.BLACK_KNIGHT, 'b', 8, \"bknight0\"))\n self.setPiece('c', 8, self.makePiece(ChessPiece.BLACK_BISHOP, 'c', 8, \"bbishop0\"))\n self.setPiece('d', 8, self.makePiece(ChessPiece.BLACK_QUEEN, 'd', 8, \"bqueen\"))\n self.setPiece('e', 8, self.makePiece(ChessPiece.BLACK_KING, 'e', 8, \"bking\"))\n self.setPiece('f', 8, self.makePiece(ChessPiece.BLACK_BISHOP, 'f', 8, \"bbishop1\"))\n self.setPiece('g', 8, self.makePiece(ChessPiece.BLACK_KNIGHT, 'g', 8, \"bknight1\"))\n self.setPiece('h', 8, self.makePiece(ChessPiece.BLACK_ROOK, 'h', 8, \"brook1\"))", "def init_game(self):\n self.view.carregar_jogadores_possiveis(self._possible_players_list())\n self.view.put_view_in_main_loop()", "def play_game(self):\n TF = self.TF\n # keep updating\n actions = collections.defaultdict(dict)\n for i in range(10):\n for j in range(self.N):\n actions[i][j] = 0\n\n sums = []\n for time in range(self.MAX):\n print(\"begin time epoch: \" + str(time))\n train_state_pool = collections.defaultdict(dict)\n flow_num = 0\n sum_all = 0\n for i in TF.keys():\n for j in TF[i].keys():\n for agent in self.Ns:\n actions[flow_num][agent.id] = random.randint(0, agent.n_actions - 1)\n\n # update states to ss_\n sum_all = self.update_state(flow_num, actions)\n\n flow_num += 1\n\n sums.append(sum_all)\n print('cut-random: ' + str(sum_all))\n if time % 10000 == 0 and time != 0:\n str1 = 'cut-mini-random' + str(time) + '.txt'\n file = open(str1, 'w')\n file.write(str(sums))\n file.close()", "def initial_state() -> Board:\n board = (\"rnbqkbnr\", \"pppppppp\", \"........\", \"........\", \"........\",\n \"........\", \"PPPPPPPP\", \"RNBQKBNR\")\n\n return board", "def rand(self):\n self.state = (self.a * self.state + self.c)\n return self.state", "def create_game_internal(black, white,\n sgf_or_stones=None,\n stones=None, sgf=None):\n assert sum(1 for x in [sgf_or_stones, stones, sgf]\n if x is not None) <= 1, \\\n \"can't supply more than one initial state to create_game_internal\"\n if sgf_or_stones:\n if isinstance(sgf_or_stones, str):\n assert sgf_or_stones[0] == '(', \\\n \"invalid SGF passed to create_game_internal; if you meant \" \\\n \"a text map, make it a list\"\n sgf = sgf_or_stones\n else:\n stones = sgf_or_stones\n if not sgf:\n if not stones:\n stones = []\n sgf = sgf_from_text_map(stones)\n game = Game(black=black, white=white, sgf=sgf,\n last_move_time=datetime.now())\n db.session.add(game)\n db.session.commit()\n return game", "def reset_game(self):\n self.bHasWon = False\n self.game_over = False\n self.game_active = False\n self.player_points = 0\n self.enemy_points = 0\n self.remainingBalls = 0", "def __init__(self, \n game_name: str, \n grayscale: bool = True, \n frameskip: int = 0, \n seed: Optional[int] = None, \n withstate: Optional[dict] = None):\n self.game_name = game_name\n self.frames_per_action = frameskip + 1\n self.rsimulator = Simulator(game_name)\n self.rstate = self.rsimulator.new_game()\n self.grayscale = grayscale\n if seed:\n self.set_seed(seed)\n self.new_game()\n if withstate:\n self.write_state_json(withstate)", "def seed():\n pass", "def seed():\n pass", "def play_game(self):\n player = Player(input(\"What is your name?\"))\n while player.health > 0:\n input(\"Press t to start another turn\")\n n = random.randint(0, 3)\n if n == 0:\n if self.monster_attack(player):\n break\n elif n == 1:\n self.find_gold(player)\n else:\n print(\"Nothing happened!\")", "def intro():\n room = Room()\n room.name = \"Intro\"\n \"\"\" Set up the game and initialize the variables. \"\"\"\n\n if room.is_answered is True:\n room.key = arcade.Sprite(\"images/16x16/Item__69.png\", WALL_SPRITE_SCALING)\n room.key.center_x = random.randrange(SCREEN_WIDTH)\n room.key.center_y = random.randrange(SCREEN_HEIGHT)\n room.key.draw()\n\n # Load the background image for this level.\n room.background = arcade.load_texture(\"images/classic1.png\")\n\n room.wall_list = arcade.SpriteList()\n\n return room", "def get_new_gamestate(self):", "def start(self):\n self.player = Player()\n self.dealer = Dealer()\n self.pot = 0\n self.side_bet = 0\n start_game()", "def __init__(self):\n self._game_state = \"UNFINISHED\"\n self._current_player = \"BLACK\"\n self._game_board = Board()", "def __init__(self):\n self.opening_scene = DungeonGate()\n # this list define the order of scenes in the corridor\n self.corridor_scenes = [GuardsRoom(), Cell(), Armory(), EmptyRoom(), Dormitory()]\n shuffle(self.corridor_scenes)\n self.explored_scenes = {\n \"GuardsRoom\": \"unexplored\",\n \"Cell\": \"unexplored\",\n \"Dormitory\": \"unexplored\",\n \"Armory\": \"unexplored\",\n \"EmptyRoom\": \"unexplored\",\n \"DungeonGate\": \"unexplored\"\n }", "def test_state(\n size: Union[int, tuple],\n num_berries: int,\n number_steps: int,\n state_sizes: List[int] = [3, 5],\n) -> None:\n for state_size in state_sizes:\n game = Game(\n size,\n [0, 0],\n -1,\n 5,\n -5,\n 10,\n num_berries,\n berry_movement_probabilities=[0.5] * num_berries,\n state_size=state_size,\n )\n done = False\n i = 1\n print(f\"Beginning full board\\n{game.get_state(full=True)}\")\n print(f\"And the state\\n{game.get_state(state_size)}\")\n while not done and i < number_steps:\n action = random.choice(MOVEMENTS)\n print(f\"Action taken {action}\")\n state, reward, done = game.step(action)\n print(f\"Full board\\n{game.get_state(full=True)}\")\n print(f\"The state\\n{game.get_state(state_size)}\")\n i += 1", "def run(self, GameState):\n pass" ]
[ "0.7173411", "0.7049228", "0.6956561", "0.6915384", "0.6877471", "0.67299753", "0.67241377", "0.6658237", "0.6629494", "0.66283655", "0.66206324", "0.65918475", "0.6558591", "0.6555488", "0.6528986", "0.65245974", "0.6520028", "0.64893174", "0.6486757", "0.64732444", "0.64720464", "0.6452912", "0.6444048", "0.64350516", "0.6429097", "0.64184624", "0.64183575", "0.6414381", "0.6409974", "0.6407474", "0.64073765", "0.63757074", "0.6374155", "0.6367984", "0.6353689", "0.63475806", "0.6330589", "0.63303864", "0.6322867", "0.6318919", "0.63115853", "0.6307152", "0.6305074", "0.62989515", "0.62978274", "0.6275151", "0.62734604", "0.62708163", "0.62682486", "0.6260119", "0.62566626", "0.62531835", "0.624951", "0.62406355", "0.61998606", "0.6198281", "0.6190069", "0.6188832", "0.6187552", "0.6182523", "0.61819184", "0.6178439", "0.61735666", "0.61705524", "0.61679804", "0.6165271", "0.6162554", "0.6159707", "0.6158289", "0.6156075", "0.61528176", "0.6146753", "0.61397153", "0.61377966", "0.6125831", "0.61214864", "0.61188245", "0.61151814", "0.6112898", "0.6099125", "0.6095608", "0.6091209", "0.60654956", "0.60644054", "0.6063851", "0.6057729", "0.60526216", "0.60422623", "0.60331994", "0.6028338", "0.6027717", "0.6023374", "0.6023374", "0.6016433", "0.6011373", "0.60112405", "0.6008628", "0.6001844", "0.5989496", "0.597631", "0.597105" ]
0.0
-1
Returns the next state of the game
def generate_next_state(self) -> Dict[Tuple[int, int], Cell]: next_state: Dict[Tuple[int, int], Cell] = {} for living_cell in self.living_cells.values(): for x in range(living_cell.x - 1, living_cell.x + 2): for y in range(living_cell.y - 1, living_cell.y + 2): cell = Cell(x, y) if (x, y) in self.living_cells.keys(): cell = self.living_cells[x, y] if self._should_cell_live(cell): next_state[x, y] = Cell(x, y, True) self.living_cells = next_state return self.living_cells
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_next(self):\n\t\tassert(len(self.past_values) < 256**3)\n\t\twhile self.advance_state() in self.past_values:\n\t\t\tpass\n\t\tself.past_values.add(self.state)\n\t\treturn self.state", "def getNextState(self):\n if self.__mode == \"AlphaBeta\":\n next_state = self.startAlphaBeta()\n else:\n next_state = self.startMiniMax()\n self.__state = next_state\n return next_state", "def get_next_state(self, state, action):\n pass", "def getNextState(self):\n return None", "def next_state(self, debug=False):\n\n if self.current_state == 'NoObstacle':\n # First check if any obstacle is in sight\n if self.transitions.next_room_reached():\n self.current_state = 'RoomReached'\n elif self.transitions.obstacle_in_sight():\n self.current_state = 'Obstacle'\n\n elif self.current_state == 'Obstacle':\n # First check if obstacle is still in sight\n if self.transitions.no_obstacle_in_sight() and not self.transitions.obstacle_in_sight():\n self.current_state = 'NoObstacle'\n elif self.transitions.next_room_reached():\n self.current_state = 'RoomReached'\n\n elif self.current_state == 'RoomReached':\n self.current_state = 'InspectCorners'\n\n elif self.current_state == 'InspectCorners':\n if self.transitions.all_corners_inspected():\n if not self.transitions.all_rooms_visited():\n self.current_state = 'RotateToExit'\n else:\n self.current_state = 'Finished'\n\n elif self.current_state == 'RotateToExit':\n if self.transitions.aiming_to_carrot():\n self.current_state = 'NoObstacle'\n\n\n elif self.current_state == 'Finished':\n pass\n\n # DEBUG\n if debug:\n print 'Next state: %s' % self.current_state\n\n if self.current_state is not self.old_state:\n print self.current_state\n\n self.old_state = self.current_state\n\n return self.current_state", "def next(self):\n self.current_state = self.next_state\n self.next_state = self.clear_screen() # set values to 0\n for x in range(1, 101):\n for y in range(1, 101):\n # calculate the number of alive neighbours at given coordinates\n self.neighbours_alive = self.check_neighbours_alive(x, y)\n\n # assign the result value from rule sets\n self.next_state[x][y] = self.rule_sets[self.selected_rule][ # selected rule name\n str(self.current_state[x][y])][ # 0 or 1 (dead or alive)\n self.neighbours_alive] # number between 0 to 8\n return self.next_state", "def go_to_next_state(self):\n pass", "def get_next_state(self, player, current_state, action_index):\n action = self.get_action(player, action_index)\n new_state = self.game.get_next_state(current_state, action)\n return new_state", "def findNextMove(state):\n return alphabeta_search(state, 3)", "def getNextState(self,input):\n\t\tif input in self.trans:\n\t\t\treturn self.trans[input]\n\t\telse:\n\t\t\treturn None", "def next_step(self, state, x, y):\n my_board = state\n if not is_new_move(my_board, x, y):\n return my_board, -1, False, {}\n while True:\n state, game_over = self.get_next_state(my_board, x, y)\n if not game_over:\n if is_win(state):\n return state, 1000, True, {}\n else:\n return state, 5, False, {}\n else:\n return state, -100, True, {}", "def next_step(self, state, x, y):\n my_board = state\n if not is_new_move(my_board, x, y):\n return my_board, 0, False, {}\n while True:\n state, game_over = self.get_next_state(my_board, x, y)\n if not game_over:\n if is_win(state):\n return state, 1000, True, {}\n else:\n return state, 0, False, {}\n else:\n return state, -100, True, {}", "def next_state(self, action):\n self.state = self.states[action][self.state]", "def getNextState(self, board, player, action):\n b = self._base_board.with_np_pieces(np_pieces=np.copy(board))\n b.add_stone(action, player)\n return b.np_pieces, -player", "def _next_state(self, state, action):\n\n # Transition table to define movement for each action\n if self.action_type == 'VonNeumann':\n transitions = {0: [-1, 0], 1: [+1, 0], 2: [0, -1], 3: [0, +1]}\n elif self.action_type == 'Moore':\n transitions = {0: [-1, 0], 1: [+1, 0], 2: [0, -1], 3: [0, +1],\n 4: [-1, +1], 5: [+1, +1], 6: [-1, -1], 7: [+1, -1]}\n\n new_state = [state[0] + transitions[action][0], state[1] + transitions[action][1]]\n if self.maze[new_state[0]][new_state[1]] == 1: # Hit wall, stay there\n return state\n else: # Valid move for 0, 2, 3, 4\n return new_state", "def get_next_state(self):\n return self.stack.pop()", "def next_state(self, current_state):\n\t\tchoices_amount = len(self.states)\n\t\tchoice_index = np.random.choice(\n\t\t\tchoices_amount, \n\t\t\tp=[self.transition_prob[current_state][next_state] \n\t\t\t\tfor next_state in self.states]\n\t\t)\n\t\treturn self.states[choice_index]", "def computeNextState(self):\n aliveNeighbors = self.numOfLiveNeighbors()\n if aliveNeighbors < 2 or aliveNeighbors > 3:\n self.setNextToDead()\n\n if not self.isAlive() and aliveNeighbors == 3:\n self.setNextToAlive()", "def get_next_state(self, state, x, y):\n my_board = state\n game_over = False\n if is_mine(self.board, x, y):\n my_board[x, y] = MINE\n game_over = True\n else:\n my_board[x, y] = self.count_neighbour_mines(x, y)\n if my_board[x, y] == 0:\n my_board = self.open_neighbour_cells(my_board, x, y)\n self.my_board = my_board\n return my_board, game_over", "def get_next_state(self, state, x, y):\n my_board = state\n game_over = False\n if is_mine(self.board, x, y):\n my_board[x, y] = MINE\n game_over = True\n else:\n my_board[x, y] = self.count_neighbour_mines(x, y)\n if my_board[x, y] == 0:\n my_board = self.open_neighbour_cells(my_board, x, y)\n self.my_board = my_board\n return my_board, game_over", "def _get_next_state(net, r0):\n\n vj = GillespieSimulator._pick_next_reaction(net, r0)\n return GillespieSimulator._apply_change_vector(net.species, vj) if vj else net.species", "def next(self):\n self.state += 1\n if self.state > 1:\n self.state = 0", "def get_new_gamestate(self):", "def get_next_turn(game):\n return game['next_turn']", "def get_current_state(self):\n return self.nextYs[-1]", "def next(self, state, turn, greedy_strategy):\n return self.agent_action", "def get_next_move(self, game_state):\n next_move = None\n encoded_game_state = self.__encode_state(game_state)\n\n self.__init_q_values(game_state)\n\n if random.random() < self.epsilon:\n next_move = self.__get_next_random_move(game_state)\n self.__update_epsilon()\n else:\n next_move = self.__get_next_greedy_move(game_state)\n\n self.game_moves_history.append((encoded_game_state, next_move))\n\n return next_move", "def get_winner(state):\n\n if", "def _run_next_state(self):\n if self.state != \"STOP\":\n self.state = self.get_state_info(\"next\")\n self._run_state()", "def next_move(\r\n self,\r\n state: TwoPlayerGameState,\r\n gui: bool = False,\r\n ) -> TwoPlayerGameState:\r\n\r\n successors = self.generate_successors(state)\r\n\r\n alpha = -np.inf\r\n beta = np.inf\r\n\r\n for successor in successors:\r\n if self.verbose > 1:\r\n print('{}: {}'.format(state.board, alpha))\r\n\r\n successor_alpha_beta_value = self._min_value(\r\n successor,\r\n alpha,\r\n beta,\r\n self.max_depth_minimax,\r\n )\r\n\r\n if (successor_alpha_beta_value > alpha):\r\n alpha = successor_alpha_beta_value\r\n next_state = successor\r\n\r\n if self.verbose > 0:\r\n if self.verbose > 1:\r\n print('\\nGame state before move:\\n')\r\n print(state.board)\r\n print()\r\n print('Alpha value = {:.2g}'.format(alpha))\r\n\r\n return next_state", "def next_state(self, current_state):\n return np.random.choice(\n self.states, \n p=self.transition_matrix[self.index_dict[current_state], :]\n )", "def generate_next_state(self, action) :\n raise NotImplementedError", "def next_state(self, current_state):\n return np.random.choice(\n self.states,\n p=self.transition_matrix[self.index_dict[current_state], :]\n )", "def get_next_states(self):\n return self.__next_state", "def _get_next_state(self):\n self.string_level_blocks.popleft()\n self.sprite_level_blocks.popleft()\n self._generate_next_blocks()\n self.is_start = False", "def current_state():\n global current_state\n while current_state is None:\n pass\n return current_state", "def calculate_next_board_state(self):\n new_board_state = np.zeros_like(self.board_state)\n\n for x in range(self.board_size[0]):\n for y in range(self.board_size[0]):\n new_board_state[x][y] = self.next_state_of_cell(x,y)\n \n self.set_state(new_board_state)", "def getNextState(self, current_state, coordinator_action):\n \n # if robot_action == human_action:\n # return current_state\n \n robot_action = coordinator_action[1]\n human_action = self.getHumanAction(current_state, coordinator_action)\n total_action = tuple(map(add, human_action, robot_action))\n state = (tuple(map(add, current_state[0], total_action)), current_state[1])\n return state", "def user_to_move_in_game(game):\n if game.finished:\n return None\n black_or_white = go.next_color(game.sgf)\n next_in_game = {go.Color.black: game.black,\n go.Color.white: game.white}[black_or_white]\n return next_in_game", "def next_state(self):\n \n self.state = np.random.choice(['checkout', 'dairy', 'drinks', 'fruit', 'spices'], p=self.tr_array_dict[f'{self.state}'])", "def get_next_state(self, pos, action):\n new_pos = [0, 0]\n if action in [5, 0, 4]:\n new_pos[0] = pos[0]-1\n elif action in [7, 1, 6]:\n new_pos[0] = pos[0]+1\n else:\n new_pos[0] = pos[0]\n\n if action in [5, 3, 7]:\n new_pos[1] = pos[1]-1\n elif action in [4, 2, 6]:\n new_pos[1] = pos[1]+1\n else:\n new_pos[1] = pos[1]\n return tuple(new_pos)", "def handle_get_action(self, state):\n\n # This is an example player who picks random moves. REMOVE THIS WHEN YOU ADD YOUR OWN CODE !!\n\n #next_move = tuple(self.pick_random_free_cell(\n # state, size=int(math.sqrt(len(state)-1))))\n #############################\n #\n #\n NN_state = self.server_state_to_NN_state(state)\n predictions = self.policy_network.predict([[NN_state]])\n next_move = np.argmax(predictions)\n self.game.set_state(NN_state,1)\n legal_actions = self.game.get_legal_actions()\n if next_move not in legal_actions:\n next_move = np.random.choice(legal_actions,1)\n next_move = self.action_to_tuple_action(next_move)\n\n #\n # next_move = ???\n ##############################\n return next_move", "def next_move(self, cur_state):\n\n alpha, final_state, min_level, action_took = self.alpha_beta(cur_state, 2, 0, -math.inf, math.inf, math.inf)\n #print(\"-----------------------------------------\")\n #print(\"value = \"+str(alpha)+\", min_level = \"+str(min_level))\n #print(\"previous: top=\"+str(cur_state.top)+\", bottom=\"+str(cur_state.bottom)+\", left=\"+str(cur_state.left)+\", right=\"+str(cur_state.right))\n #print(final_state.pre_state)\n return action_took", "def get_next_state(self, u1, true_props):\n\t\t# check not one of the broken\n\t\tif (u1 < self.u_broken):\n\t\t\t# for every next possible state\n\t\t\tfor u2 in self.delta_u[u1]:\n\t\t\t\t# if validates the formula (see in reward machine utils evaluate_dnf(formula, true_props))\n\t\t\t\t# then its the next state for the agent (note this is not pruning the next possible states)\n\t\t\t\tif evaluate_dnf(self.delta_u[u1][u2], true_props):\n\t\t\t\t\treturn u2\n\t\t# if u1 is broken or none of the next states validates evaluate_dnf() then return broken\n\t\t\t#print(u1,u2,self.delta_u[u1][u2],true_props)\n\t\t\treturn self.u_broken", "def next_move(\r\n self,\r\n state: TwoPlayerGameState,\r\n gui: bool = False,\r\n ) -> TwoPlayerGameState:\r\n\r\n successors = self.generate_successors(state)\r\n\r\n minimax_value = -np.inf\r\n\r\n for successor in successors:\r\n if self.verbose > 1:\r\n print('{}: {}'.format(state.board, minimax_value))\r\n\r\n successor_minimax_value = self._min_value(\r\n successor,\r\n self.max_depth_minimax,\r\n )\r\n\r\n if (successor_minimax_value > minimax_value):\r\n minimax_value = successor_minimax_value\r\n next_state = successor\r\n\r\n if self.verbose > 0:\r\n if self.verbose > 1:\r\n print('\\nGame state before move:\\n')\r\n print(state.board)\r\n print()\r\n print('Minimax value = {:.2g}'.format(minimax_value))\r\n\r\n return next_state", "def _get_state(self, state, direction):\n row_change = [-1,1,0,0]\n col_change = [0,0,-1,1]\n row_col = seq_to_col_row(state, self.num_cols)\n row_col[0,0] += row_change[direction]\n row_col[0,1] += col_change[direction]\n\n # check for invalid states\n if self.obs_states is not None:\n if (np.any(row_col < 0) or\n np.any(row_col[:,0] > self.num_rows-1) or\n np.any(row_col[:,1] > self.num_cols-1) or\n np.any(np.sum(abs(self.obs_states - row_col), 1)==0)):\n next_state = state\n else:\n next_state = row_col_to_seq(row_col, self.num_cols)[0]\n else:\n if (np.any(row_col < 0) or\n np.any(row_col[:,0] > self.num_rows-1) or\n np.any(row_col[:,1] > self.num_cols-1)):\n next_state = state\n else:\n next_state = row_col_to_seq(row_col, self.num_cols)[0]\n\n return next_state", "def next_state_generation(state, target, operators):\n current_swap_state = ''\n current_swap_res = sys.maxsize\n current_change_state = ''\n current_change_res = sys.maxsize\n # for swapping\n for i in range(0, len(state), 2):\n for j in range(2, len(state), 2):\n new_swap_state, swap_res = swap(i, j, state, target)\n if current_swap_res > swap_res:\n current_swap_res = swap_res\n current_swap_state = new_swap_state\n # for changing\n for i in range(1, len(state) - 1, 2):\n new_change_state, change_res = change(i, random.choice(operators), state, target)\n if current_change_res > change_res:\n current_change_res = change_res\n current_change_state = new_change_state\n # return the lowest of the 2 value and state amongst swapping and changing operations\n print(\"Swap res:\", current_swap_res)\n print(\"Change res:\", current_change_res)\n if current_swap_res < current_change_res:\n print(\"Best State \", current_swap_state)\n print(\"Distance from target: \", current_swap_res)\n print()\n return current_swap_state, current_swap_res\n else:\n print(\"Best State \", current_change_state)\n print(\"Distance from target: \", current_change_res)\n print()\n return current_change_state, current_change_res", "def get_next(self, execute: bool = True):\n if len(self.q) == 0:\n state = self.idle\n data = None\n else:\n state = self.q.popleft()\n data = self.data_q.popleft()\n if state not in self.states:\n state = self.unknown\n\n if execute:\n self.execute_state(state, data)\n else:\n return self.states[state]", "def next_state_of_cell(self, x_cell, y_cell):\n neighbours = self.get_number_neighbours_of_cell(x_cell, y_cell)\n if(self.board_state[x_cell][y_cell] == 1):\n # Any live cell with more than three live neighbours dies, \n # as if by overpopulation.\n if(neighbours > 3):\n return 0\n # Any live cell with fewer than two live neighbours dies,\n # as if by underpopulation.\n elif(neighbours < 2):\n return 0\n # Any live cell with two or three live neighbours lives\n # on to the next generation.\n else:\n return 1\n if(self.board_state[x_cell][y_cell] == 0):\n # Any dead cell with exactly three live neighbours becomes a live cell, \n # as if by reproduction.\n if(neighbours == 3):\n return 1\n else:\n return 0", "def next_state(self) -> Set[Position]:\n return self._next_state", "def nextMoveDecision(self):\n b = random.randint(1, 9) \n while (self.Occupied(b)):\n b = random.randint(1, 9) \n return b", "def next_state(s_curr, action, params):\n P_dist = params['P_dist']\n R = params['R']\n n_rows = params['n_rows']\n n_cols = params['n_cols']\n occ_grid = params['occ_grid']\n\n rnd = np.random.uniform()\n\n s_next = s_curr\n\n # Actions - ['left','right','up','down']\n\n if rnd <= P_dist:\n if action == 0:\n move = 2\n elif action == 1:\n move = 2\n elif action == 2:\n move = 1\n else:\n move = 0\n elif rnd < 2*P_dist:\n if action == 0:\n move = 3\n elif action == 1:\n move = 3\n elif action == 2:\n move = 1\n else:\n move = 1\n else:\n move = action\n\n # Move left\n if move == 0:\n row_next = s_curr[0]\n col_next = s_curr[1] - 1\n if col_next >= 0 and occ_grid[row_next, col_next] == 0:\n s_next = [row_next, col_next]\n\n # Move right\n if move == 1:\n row_next = s_curr[0]\n col_next = s_curr[1] + 1\n if col_next < n_cols and occ_grid[row_next, col_next] == 0:\n s_next = [row_next, col_next]\n\n # Move up\n if move == 2:\n row_next = s_curr[0] - 1\n col_next = s_curr[1]\n if row_next >= 0 and occ_grid[row_next, col_next] == 0:\n s_next = [row_next, col_next]\n\n # Move down\n if move == 3:\n row_next = s_curr[0] + 1\n col_next = s_curr[1]\n if row_next < n_rows and occ_grid[row_next, col_next] == 0:\n s_next = [row_next, col_next]\n\n r = R[s_next[0], s_next[1]]\n return s_next, r", "def advance_state_machine():\n global state_num\n\n if state_num == 0:\n brighter_switch(jess, \"orange\")\n dimmer_switch(ness, \"red\")\n dimmer_switch(tess, \"green\")\n state_num = 1\n\n elif state_num == 1:\n brighter_switch(ness, \"red\")\n dimmer_switch(jess, \"orange\")\n dimmer_switch(tess, \"green\")\n\n state_num = 2\n\n else:\n brighter_switch(tess, \"green\")\n dimmer_switch(ness, \"red\")\n dimmer_switch(jess, \"orange\")\n\n state_num = 0", "def next_move(self):\n return self.decoded_population[self.current_index]", "def get_game_state(self):\n return self._current_state", "def next_states(self):\n return self._states[1:]", "def start_state():\n return chess.Board()", "def _next_goal(self):\n current_state = self.goal_generation.current_state()\n\n return self.goal_generation.next_goal(self._random_state, current_state)", "def next_state(self):\r\n observed_state: State = self.opened.popleft()\r\n if not self.is_solvable():\r\n print(\"UNSOLVABLE\")\r\n return\r\n\r\n if np.all(observed_state == self.target_state):\r\n self.current_state = observed_state\r\n return\r\n\r\n self.closed.add(observed_state)\r\n\r\n for neighbor in observed_state.neighbors():\r\n if neighbor not in self.closed or neighbor not in self.opened:\r\n self.opened.append(neighbor)", "def next_state_func(self, state, action, Time_matrix):\n curr_loc, curr_time, curr_day = state\n pickup_loc, drop_loc = action\n \n rewards = self.reward_func(state, action, Time_matrix)\n total_time = 0\n \n if action == (0,0):\n # update time by 1 hour\n curr_time, curr_day = self.time_day_update_func(curr_time, curr_day, 1)\n next_state = (curr_loc, curr_time, curr_day)\n total_time = 1\n else:\n # time from curr_loc to reach pickup_loc\n t1 = int(Time_matrix[curr_loc][pickup_loc][curr_time][curr_day])\n curr_time, curr_day = self.time_day_update_func(curr_time, curr_day, t1)\n \n # time from pickup_loc to reach drop_loc\n t2 = int(Time_matrix[pickup_loc][drop_loc][curr_time][curr_day])\n curr_time, curr_day = self.time_day_update_func(curr_time, curr_day, t2)\n \n total_time = t1 + t2\n next_state = (drop_loc, curr_time, curr_day)\n \n return next_state, rewards, total_time", "def leaderFiniteStateMachine(self):\n if(self.mode == 'simulation'):\n print(self.name + \": Actualizamos la maquina de estado del lider\")\n \n else:\n if(self.current_state == EMERGENCY or self.current_state == UNDEFINED): # No puede salir de EMERGENCIA, hay que reiniciar el robot\n self.next_state = EMERGENCY\n #1\n elif(self.current_state == STOP and (self.st_information.light_detection == sensors.HIGH_LIGHT_DETECTED or self.st_information.light_detection == sensors.LOW_LIGHT_DETECTED or self.st_information.light_detection == sensors.UNKNOWN_LIGHT_DETECTED)):\n self.next_state = STOP\n print(1)\n #2\n elif(self.current_state == MOVING_FORWARD_MAX and (self.st_information.light_detection == sensors.NO_LIGHT_DETECTED or self.st_information.light_detection == sensors.UNKNOWN_LIGHT_DETECTED)):\n self.next_state = MOVING_FORWARD_MAX\n print(2)\n #3\n elif(self.current_state == MOVING_FORWARD_PROPORTIONAL and (self.st_information.light_detection == sensors.LOW_LIGHT_DETECTED or self.st_information.light_detection == sensors.UNKNOWN_LIGHT_DETECTED)):\n self.next_state = MOVING_FORWARD_PROPORTIONAL\n print(3)\n #4\n elif(self.current_state == MOVING_FORWARD_MAX and self.st_information.light_detection == sensors.LOW_LIGHT_DETECTED):\n self.next_state = MOVING_FORWARD_PROPORTIONAL\n print(4)\n #5\n elif(self.current_state == MOVING_FORWARD_PROPORTIONAL and self.st_information.light_detection == sensors.NO_LIGHT_DETECTED):\n self.next_state = MOVING_FORWARD_MAX\n print(5)\n #6\n elif(self.current_state == STOP and self.st_information.light_detection == sensors.NO_LIGHT_DETECTED and self.st_actions.object_picked == False and self.st_actions.grasping == False):\n self.next_state = MOVING_FORWARD_MAX\n print(6) \n #7\n elif(self.current_state == MOVING_FORWARD_MAX and self.st_information.light_detection == sensors.HIGH_LIGHT_DETECTED):\n self.next_state = STOP\n self.st_actions.grasping = True\n print(7)\n #8\n elif(self.current_state == STOP and self.st_information.light_detection == sensors.LOW_LIGHT_DETECTED and self.st_actions.object_picked == False and self.st_actions.grasping == False):\n self.next_state = MOVING_FORWARD_PROPORTIONAL\n print(8) \n #9\n elif(self.current_state == MOVING_FORWARD_PROPORTIONAL and self.st_information.light_detection == sensors.HIGH_LIGHT_DETECTED):\n self.next_state = STOP\n self.st_actions.grasping = True\n print(9)\n #10\n elif(self.current_state == STOP and self.st_information.light_detection == sensors.NO_LIGHT_DETECTED and self.st_actions.grasping == True):\n self.next_state = PICK_PLACE_OBJECT\n self.st_actions.object_picked = not(self.st_actions.object_picked)\n print(self.st_actions.object_picked)\n print(10)\n \n #11\n #elif(self.current_state == PICK_PLACE_OBJECT and self.st_actions.finished_grasping == False):\n # self.next_state = PICK_PLACE_OBJECT\n \n #12\n elif(self.current_state == PICK_PLACE_OBJECT and self.st_actions.object_picked == True):\n self.next_state = MOVING_BACKWARD_MAX\n self.st_actions.grasping = False\n print(12)\n #elif(self.current_state == PICK_PLACE_OBJECT and self.st_actions.object_picked == True and self.st_information.light_detection == sensors.NO_LIGHT_DETECTED):\n # self.next_state = MOVING_BACKWARD_MAX\n # self.st_actions.grasping = False\n #13\n #elif(self.current_state == PICK_PLACE_OBJECT and self.st_actions.object_picked == True and self.st_information.light_detection == sensors.LOW_LIGHT_DETECTED):\n # self.next_state = MOVING_BACKWARD_PROPORTIONAL\n # self.st_actions.grasping = False \n #14\n elif(self.current_state == PICK_PLACE_OBJECT and self.st_actions.object_picked == False):\n self.next_state = MOVING_FORWARD_MAX\n self.st_actions.grasping = False\n print(14)\n #elif(self.current_state == PICK_PLACE_OBJECT and self.st_actions.object_picked == False and self.st_information.light_detection == sensors.NO_LIGHT_DETECTED):\n # self.next_state = MOVING_FORWARD_MAX\n # self.st_actions.grasping = False\n #15\n #elif(self.current_state == PICK_PLACE_OBJECT and self.st_actions.object_picked == False and self.st_information.light_detection == sensors.LOW_LIGHT_DETECTED):\n # self.next_state = MOVING_FORWARD_PROPORTIONAL\n # self.st_actions.grasping = False\n #16\n elif(self.current_state == MOVING_BACKWARD_MAX and (self.st_information.light_detection == sensors.NO_LIGHT_DETECTED or self.st_information.light_detection == sensors.UNKNOWN_LIGHT_DETECTED)):\n self.next_state = MOVING_BACKWARD_MAX\n #17\n elif(self.current_state == MOVING_BACKWARD_PROPORTIONAL and (self.st_information.light_detection == sensors.LOW_LIGHT_DETECTED or self.st_information.light_detection == sensors.UNKNOWN_LIGHT_DETECTED)):\n self.next_state = MOVING_BACKWARD_PROPORTIONAL\n #18\n elif(self.current_state == MOVING_BACKWARD_MAX and self.st_information.light_detection == sensors.LOW_LIGHT_DETECTED):\n self.next_state = MOVING_BACKWARD_PROPORTIONAL\n #19\n elif(self.current_state == MOVING_BACKWARD_PROPORTIONAL and self.st_information.light_detection == sensors.NO_LIGHT_DETECTED):\n self.next_state = MOVING_BACKWARD_MAX\n #20 \n elif(self.current_state == MOVING_BACKWARD_MAX and self.st_information.light_detection == sensors.HIGH_LIGHT_DETECTED):\n self.next_state = STOP\n self.st_actions.grasping = True\n #21\n elif(self.current_state == MOVING_BACKWARD_PROPORTIONAL and self.st_information.light_detection == sensors.HIGH_LIGHT_DETECTED):\n self.next_state = STOP\n self.st_actions.grasping = True\n \n else:\n self.next_state = UNDEFINED\n \n self.current_state = self.next_state", "def _get_next_state(self,\n single_input: np.ndarray) -> np.ndarray:\n\n # check if feedback weights matrix is not None but empty feedback\n if self.Wfb is not None and self.output_values is None:\n raise RuntimeError(\"Missing a feedback vector.\")\n\n x = self.state[1:self.N+1]\n\n # add bias\n if self.in_bias:\n u = np.hstack((1, single_input)).astype(self.typefloat)\n else:\n u = single_input\n\n # linear transformation\n x1 = self.Win @ u.reshape(-1, 1) + self.W @ x\n\n # add feedback if requested\n if self.Wfb is not None:\n x1 += self.Wfb @ self.fbfunc(self.output_values)\n\n # previous states memory leak and non-linear transformation\n x1 = (1-self.lr)*x + self.lr*np.tanh(x1)\n\n # return the next state computed\n if self.use_raw_inp:\n self.state = np.vstack((1.0, x1, u.reshape(-1, 1)))\n else:\n self.state = np.vstack((1.0, x1))\n\n return self.state.copy()", "def nextState(self, state, action):\n return state + action", "def getAction(self, gameState):\n \"*** YOUR CODE HERE ***\"\n games = 0\n begin = datetime.datetime.utcnow()\n while datetime.datetime.utcnow() - begin < self.calculation_time:\n games += 1\n self.run_simulation(gameState)\n legal = gameState.getLegalActions(0)\n successors = [(gameState.generateSuccessor(0, action), action) for action in legal]\n successors = [(successor, action) for successor, action in successors\n if successor in self.plays and self.plays[successor] != 0]\n values = list((1.0 * self.wins[successor] / self.plays[successor], action)\n for successor, action in successors)\n max_val = max(value for value in values if not math.isnan(value[0]))\n return random.choice([(successor, action) for successor, action in successors\n if max_val[0] == (1.0 * self.wins[successor] / self.plays[successor])])[1]", "def movee(self):\n\n #return the initial state if he cant move and he's in the initial state\n if not self.move and self.index == 0:\n return self.path[self.index]\n\n #return the goal state if he's at the goal state\n if self.index == len(self.path):\n return self.path[-1]\n\n #return the next move and increments the index attribute\n nextMove = self.path[self.index]\n self.index += 1\n\n return nextMove", "def _next_turn(self):\n return self.TURNS[self._turn is self.BLACK]", "def executeNextMove(self, gameState, currIndex):\n succScores = util.Counter()\n actions = getLegalActionsNoStop(gameState, currIndex)\n successors = [gameState.generateSuccessor(currIndex, a) for a in actions]\n for s in successors:\n succScores[s] = self.evaluateState(s, currIndex)\n chosenMoveState = max(succScores)\n return chosenMoveState", "def get_state(self):\n return ONEUP_STATES[self.state][0]", "def step(self, action):\r\n s = self.get_state()\r\n\r\n elements = np.arange(self.S)\r\n # weights = np.squeeze(self.nextStateProbability[s,action])\r\n weights = self.nextStateProbability[s, action]\r\n nexts = choices(elements, weights, k=1)[0]\r\n\r\n # p = self.nextStateProbability[s,action]\r\n # reward = self.rewardsTable[s,action, nexts][0]\r\n reward = self.rewardsTable[s, action, nexts]\r\n\r\n # fully observable MDP: observation is the actual state\r\n self.currentObservation = nexts\r\n\r\n gameOver = False\r\n if self.currentIteration > np.Inf:\r\n ob = self.reset()\r\n gameOver = True # game ends\r\n else:\r\n ob = self.get_state()\r\n\r\n history = {\"time\": self.currentIteration, \"state_t\": s, \"action_t\": action,\r\n \"reward_tp1\": reward, \"state_tp1\": nexts}\r\n # history version with actions and states, not their indices\r\n # history = {\"time\": self.currentIteration, \"action_t\": self.actionListGivenIndex[action],\r\n # \"reward_tp1\": reward, \"observation_tp1\": self.stateListGivenIndex[self.get_state()]}\r\n self.currentIteration += 1\r\n return ob, reward, gameOver, history", "def next_states(self):\n return self.get_next_states()", "def next_state(self):\n\n # Increases current path index\n self.current_state_index += 1\n\n # Retrieves the current state in the path and updates it\n self.status = self.path_states[self.current_state_index]", "def play_move(self,state):\n self.__engine.set_state(state)\n result = self.__engine.getNextState()\n time_elapsed = self.__engine.get_time_elapsed()\n num_nodes = self.__engine.get_num_explored()\n if self.moves == 0:\n self.average_time = time_elapsed\n self.average_nodes = num_nodes\n else:\n self.average_time = ( (self.average_time * self.moves) + time_elapsed ) / (self.moves+1)\n self.average_nodes = ( (self.average_nodes * self.moves) + num_nodes ) / (self.moves+1)\n self.moves += 1\n return result", "def next_states(self, state):\n import copy\n\n ans = []\n current_array = state.board.array\n space_pos = state.board.space\n\n up_pos = [space_pos[0] - 1, space_pos[1]]\n down_pos = [space_pos[0] + 1, space_pos[1]]\n left_pos = [space_pos[0], space_pos[1] - 1]\n right_pos = [space_pos[0], space_pos[1] + 1]\n\n # down position\n if self.__is_valid(down_pos):\n down_array = [copy.copy(row) for row in current_array]\n down_board = Board(array=down_array, space=space_pos.copy())\n down_board.swap(down_pos)\n ans.append(State(board=down_board, came_from=state, move='U'))\n\n # up position\n if self.__is_valid(up_pos):\n up_array = [copy.copy(row) for row in current_array]\n up_board = Board(array=up_array, space=space_pos.copy())\n up_board.swap(up_pos)\n ans.append(State(board=up_board, came_from=state, move='D'))\n\n # right position\n if self.__is_valid(right_pos):\n right_array = [copy.copy(row) for row in current_array]\n right_board = Board(array=right_array, space=space_pos.copy())\n right_board.swap(right_pos)\n ans.append(State(board=right_board, came_from=state, move='L'))\n\n # left position\n if self.__is_valid(left_pos):\n left_array = [copy.copy(row) for row in current_array]\n left_board = Board(array=left_array, space=space_pos.copy())\n left_board.swap(left_pos)\n ans.append(State(board=left_board, came_from=state, move='R'))\n\n return ans", "def next_turn(self):\n if self.turn == BLUE and self.ai:\n self.ai_turn = True\n self.turn = RED\n elif self.turn == BLUE:\n self.turn = RED\n else:\n self.turn = BLUE\n\n self.selected_piece = None\n self.selected_legal_moves = []\n self.check_game_over()", "def step(self, action):\n x, y = self.state_to_coord(self.current_state)\n if action == self.actions['up']:\n possible_next_state = self.coord_to_state(x - 1, y)\n if x - 1 < 0 or possible_next_state in self.block_states:\n result = self.current_state, self.step_reward, False\n elif possible_next_state in self.goal_states:\n result = possible_next_state, self.goal_reward, True\n else:\n result = possible_next_state, self.step_reward, False\n elif action == self.actions['right']:\n possible_next_state = self.coord_to_state(x, y + 1)\n if y + 1 >= self.columns or possible_next_state in self.block_states:\n result = self.current_state, self.step_reward, False\n else:\n result = possible_next_state, self.step_reward, False\n\n elif action == self.actions['left']:\n possible_next_state = self.coord_to_state(x, y - 1)\n if y - 1 < 0 or possible_next_state in self.block_states:\n result = self.current_state, self.step_reward, False\n else:\n result = possible_next_state, self.step_reward, False\n\n elif action == self.actions['down']:\n possible_next_state = self.coord_to_state(x + 1, y)\n if x + 1 >= self.rows or possible_next_state in self.block_states:\n result = self.current_state, self.step_reward, False\n else:\n result = possible_next_state, self.step_reward, False\n\n else:\n raise ValueError('Expected action value in {}, received {} in state {}'.\n format(self.actions, action, self.state_to_coord(self.current_state)))\n\n self.current_state = result[0]\n return result", "def play_game() -> None:\n board = tuple(tuple(0 for _ in range(i, i + 16))\n for i in range(0, 64, 16))\n state = GameState(board, 1)\n while state.util is None:\n # human move\n print(state.display)\n state = state.traverse(int(input(\"Move: \")))\n if state.util is not None:\n break\n # computer move\n find_best_move(state)\n move = (state.selected if state.selected != -1\n else random.choice(state.moves))\n state = state.traverse(move)\n print(state.display)\n if state.util == 0:\n print(\"Tie Game\")\n else:\n print(f\"Player {state.util} Wins!\")", "def get_random_start_state(self) -> State:\n if len(self.blocks) <= state_enumeration_limit:\n rnd = random.randint(0, len(self.allStates) - 1)\n return self.allStates[rnd]\n else:\n return self.generate_random_start_state()", "def __get_next_greedy_move(self, game_state): \n best_move = None\n best_score = None\n for free_seat in self.__get_free_seats(game_state):\n next_game_state_score = self.__get_score(game_state, free_seat)\n if best_score is None:\n best_score = next_game_state_score\n best_move = free_seat\n continue\n if next_game_state_score > best_score:\n best_score = next_game_state_score\n best_move = free_seat\n return best_move", "def get_current_state(self):\n return self.game.get_current_state()", "def getNextState(self, obs):\n return torch.tensor(obs, device=self.device, dtype=torch.float).unsqueeze(0)", "def get_game_state(self):\n return self.game_state", "def _get_state(self):\n start = self.design.first_unassigned_site\n return self.target.padded_encoding[\n start : start + 2 * self._env_config.state_radius + 1\n ]", "def find_best_move(state: GameState) -> None:", "def current_state(self):\n\n square_state = np.zeros((4, self.width, self.height))\n if self.states:\n moves, players = np.array(list(zip(*self.states.items())))\n move_curr = moves[players == self.current_player]\n move_oppo = moves[players != self.current_player]\n square_state[0][move_curr // self.width,\n move_curr % self.height] = 1.0\n square_state[1][move_oppo // self.width,\n move_oppo % self.height] = 1.0\n # indicate the last move location\n square_state[2][self.last_move // self.width,\n self.last_move % self.height] = 1.0\n if len(self.states) % 2 == 0:\n square_state[3][:, :] = 1.0 # indicate the colour to play\n return square_state[:, ::-1, :]", "def current_state(self):\n\n square_state = np.zeros((4, self.width, self.height))\n if self.states:\n moves, players = np.array(list(zip(*self.states.items())))\n move_curr = moves[players == self.current_player]\n move_oppo = moves[players != self.current_player]\n square_state[0][move_curr // self.width,\n move_curr % self.height] = 1.0\n square_state[1][move_oppo // self.width,\n move_oppo % self.height] = 1.0\n # indicate the last move location\n square_state[2][self.last_move // self.width,\n self.last_move % self.height] = 1.0\n if len(self.states) % 2 == 0:\n square_state[3][:, :] = 1.0 # indicate the colour to play\n return square_state[:, ::-1, :]", "def choose(self, state: State) -> State:", "def human_opponent(state):\n print(state)\n while True:\n inp = input(\"What is your move? \\n\")\n if inp == 'pass':\n return len(state.valid_actions) - 1\n if inp == 'random':\n return random.randint(0, len(state.valid_actions) - 1)\n\n try:\n pos = [int(x) for x in inp.split()]\n action = pos[0]*state.board_size + pos[1]\n choice = state.valid_actions.index(action)\n return choice\n except:\n print(\"Invalid move {} try again.\".format(inp))", "def get_state(self):\n return STAR_STATES[self.state][0]", "def get_state(self):\n return STAR_STATES[self.state][0]", "def determine_game_state(self):\n if self.board == BLANK_BOARD:\n return GameState.GAME_NOT_STARTED\n\n # check for three of the same symbol across or down.\n for r in range(3):\n offset = r*3\n if self.board[offset] == self.board[offset+1] == self.board[offset+2]:\n if self.board[offset] == X_SYMBOL:\n return GameState.GAME_OVER_X_WINS\n elif self.board[offset] == O_SYMBOL:\n return GameState.GAME_OVER_O_WINS\n if self.board[r] == self.board[3 + r] == self.board[6 + r]:\n if self.board[r] == X_SYMBOL:\n return GameState.GAME_OVER_X_WINS\n elif self.board[r] == O_SYMBOL:\n return GameState.GAME_OVER_O_WINS\n\n # check for diagonal wins\n if ((self.board[0] == self.board[4] == self.board[8]) or\n (self.board[2] == self.board[4] == self.board[6])):\n if self.board[4] == X_SYMBOL:\n return GameState.GAME_OVER_X_WINS\n elif self.board[4] == O_SYMBOL:\n return GameState.GAME_OVER_O_WINS\n \n # check for tie.\n if not self.board.count(EMPTY_SYMBOL):\n return GameState.GAME_OVER_DRAW\n\n return GameState.GAME_IN_PROGRESS", "def debuggerstep(self, action):\n\n\n\n #Calculate actual Next State you are supposed to reach via action\n if action == 0:\n nxtState = (self.state[0] - 1, self.state[1])\n elif action == 1:\n nxtState = (self.state[0], self.state[1] - 1)\n elif action == 2:\n nxtState = (self.state[0], self.state[1] + 1) \n elif action == 3: \n nxtState = (self.state[0] + 1, self.state[1])\n\n \n #BUT YOU CAN ONLY REACH THERE WITH 80% PROBABIBILITY\n #Stocasticity Implementation\n correctMove = random.random() \n \n #Check if nextState to reach is valid, Redundant Check (Might have to remove in future iterations)\n if self.isValid(nxtState): \n #If you have a valid next state, you reach there with 80% probability\n if correctMove <= 0.8: \n \n print(\"Ended up in correct state taking action \", end = \"\")\n if (action == 0): \n print(\"Up\")\n elif (action == 1): \n print(\"Left\")\n elif (action == 2): \n print(\"Right\")\n elif(action == 3): \n print(\"Down\")\n self.state = nxtState\n self.isEnd()\n return nxtState, self.giveReward(), self.isTerminal\n \n #Else you didn't end up in right place\n else: \n \n print(\"Ended up in wrong state. Had to go \", end = \"\")\n if (action == 0): \n print(\"Up \", end = \"\")\n elif (action == 1): \n print(\"Left \", end = \"\")\n elif (action == 2): \n print(\"Right \", end = \"\")\n elif(action == 3): \n print(\"Down \", end = \"\")\n \n print(\"And end up in \", end = \"\")\n print(nxtState)\n \n print(\"But ended up in: \", end = \"\")\n \n \n #Find remaining states that can be possibly reached: \n altActions =[]\n \n if action == 0:\n altActions.append(1)\n altActions.append(2)\n elif action == 1: \n altActions.append(0)\n altActions.append(3)\n elif action == 2: \n altActions.append(0)\n altActions.append(3)\n else: \n altActions.append(1)\n altActions.append(2)\n \n \n #Pick one random of all possible next states\n altAction = random.choice(altActions)\n #Check if alternate possibility is valid \n if altAction == 0:\n nxtState1 = (self.state[0] - 1, self.state[1])\n elif altAction == 1:\n nxtState1 = (self.state[0], self.state[1] - 1)\n elif altAction == 2:\n nxtState1 = (self.state[0], self.state[1] + 1) \n elif altAction == 3: \n nxtState1 = (self.state[0] + 1, self.state[1])\n \n \n \n #If alternate possibility is valid, update values\n if self.isValid(nxtState1): \n print(nxtState1)\n \n #Update Values \n self.state = nxtState1\n self.isEnd()\n return nxtState1, self.giveReward(), self.isTerminal\n \n #If alternate possibility is not valid, then you stay in place\n else: \n #Stay in place \n print(self.state)\n print(\"Stayed in Place!\")\n self.isEnd()\n return self.state, self.giveReward(), self.isTerminal\n else: \n #Stay in place \n print(self.state)\n print(\"Invalid action picked, Stayed in Place!\")\n self.isEnd()\n return self.state, self.giveReward(), self.isTerminal", "def next_round(self):\n if self.finish_game == 3:\n self.restart_game()\n return\n\n atual_color = self.atual_player.color\n if self.board.valid_moves(atual_color).__len__() > 0:\n self.board.play(self.atual_player.play(self.board.get_clone()), atual_color)\n self.view.atualizar_discos()\n self.finish_game = 0\n else:\n self.finish_game += 1\n self.atual_player = self._opponent(self.atual_player)\n\n self.view.atualizar_jogador_atual(self.atual_player.color)\n\n if self.finish_game == 2:\n self._end_game()", "def get_game_state(self):\r\n return self._game_state", "def get_action(self, state):\n self.visited = {}\n utility = -inf\n move = 'STOP'\n\n # We choose the successor with the maximum utility\n for successor in state.generatePacmanSuccessors():\n maxPlayer = True\n score = self.alphabeta(successor[0], -inf, +inf, maxPlayer)\n if utility < score:\n move = successor[1]\n utility = score\n\n # If there's no winning state, we try to to move farther from the ghost\n if utility == -inf:\n dist = -inf\n for successor in state.generatePacmanSuccessors():\n newDist = self.distanceFromGhost(successor[0])\n if not successor[0].isLose() and newDist > dist:\n move = successor[1]\n dist = newDist\n print(utility)\n return move", "def play(self, sims=1):\n #sims = min(sims, len(self.legal_moves() ))\n #update policies via network simulations\n self._simulate(sims)\n # pick a move based on improved policy\n # next_state = choice(self.children, p=self.pi[self.env.legal_moves() ])\n next_state = max(self.children, key=lambda x: x.U)\n return next_state", "def _run_one_game(self):\n sum_reward = 0\n done = False\n state = torch.tensor(self.env.reset(), device=device).view(1, -1)\n losses = list()\n\n while not done:\n\n # Choose action in function of observation and play it\n action = self._select_action(state)\n next_state, reward, done, _ = self.env.step(action.item())\n\n sum_reward += reward\n next_state = torch.tensor(next_state, device=device).view(1, -1)\n reward = torch.tensor([reward], device=device)\n done = torch.tensor([done], device=device)\n \n # Add transition to memory\n self._add_to_memory(state, action, next_state, reward, done)\n\n # Compute loss\n loss = self._optimize_model()\n losses += [loss]\n \n # Prepare next state\n state = next_state\n\n # Wait time_to_sleep second so the user can view the state\n sleep(self.time_to_sleep)\n \n\n return sum_reward, mean(losses)", "def greedy_next_action(self, state):\n max_val = float('-inf')\n if self.verbose:\n cells = []\n max_candidates = {}\n for i in range(3):\n for j in range(3):\n if state[i][j] == VALUES.EMPTY:\n val = self.q_value((state, (i, j)))\n if val >= max_val:\n max_val = val\n max_move = (i, j)\n max_candidates[max_move] = val\n if self.verbose:\n cells.append('{0:.3f}'.format(val).center(6))\n elif self.verbose:\n cells.append(state[i][j].center(6))\n if self.verbose:\n self.logger.info(BOARD.format(*cells))\n possible_actions = [k for k, v in max_candidates.items() if v == max_val]\n action = random.choice(possible_actions) if len(possible_actions) > 0 else None\n return action", "def get_state(self):\n return PLANET_STATES[self.state][0]", "def get_state(self):\n return PLANET_STATES[self.state][0]", "def transition(board, player, action):\n if _ENV.is_valid((board, player), action):\n new_board, __ = _ENV.get_next_state((board, player), action)\n return new_board\n return None" ]
[ "0.81304115", "0.8000824", "0.7832929", "0.78188527", "0.7760601", "0.7588468", "0.75123686", "0.7505072", "0.7259888", "0.72233593", "0.7170211", "0.7106312", "0.70742047", "0.7022712", "0.7002653", "0.69881517", "0.6954046", "0.69376624", "0.69316417", "0.69316417", "0.6925038", "0.6904523", "0.6904161", "0.68728095", "0.6872426", "0.68350387", "0.6829118", "0.68191004", "0.6802731", "0.6784289", "0.6774061", "0.67626053", "0.67487884", "0.6747725", "0.6745463", "0.67267454", "0.672088", "0.6693717", "0.6692326", "0.6603686", "0.6595889", "0.6575592", "0.6563973", "0.65632397", "0.6560087", "0.65598845", "0.6514348", "0.6512255", "0.64995587", "0.6499321", "0.6475687", "0.64708763", "0.646382", "0.6452127", "0.64497936", "0.64459825", "0.6442276", "0.64299726", "0.64007354", "0.63807726", "0.6374784", "0.635807", "0.6345256", "0.63331246", "0.6329544", "0.6327379", "0.63264227", "0.6323907", "0.63149077", "0.63141835", "0.6289535", "0.62883425", "0.6286356", "0.62763387", "0.62762016", "0.62687206", "0.62545866", "0.6254486", "0.6250853", "0.62348026", "0.62322694", "0.6228262", "0.6223792", "0.62106174", "0.62106174", "0.6206858", "0.61904097", "0.617927", "0.617927", "0.61755", "0.6171369", "0.6156985", "0.6143115", "0.6139929", "0.61377734", "0.6133865", "0.6132301", "0.61316615", "0.61316615", "0.6130288" ]
0.6383667
59
Prints the current state in the console
def display_cli(self) -> None: if len(self.living_cells.keys()) == 0: print('.') return min_x, min_y = math.inf, math.inf max_x, max_y = -math.inf, -math.inf for x, y in self.living_cells.keys(): min_x = min(min_x, x) min_y = min(min_y, y) max_x = max(max_x, x) max_y = max(max_y, y) for y in range(min_y, max_y + 1): chars = "" for x in range(min_x, max_x + 1): chars += '*' if (x, y) in self.living_cells.keys() else '.' print(chars) print()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_state(self):\n print('\\nthe current state is: ' + str(self.state) + '\\n')", "def display_state(self):\r\n\r\n print('\\n')\r\n print('>>CURRENT STATE')\r\n ct = 0\r\n for i in self.state:\r\n for j in i:\r\n if j == -1:\r\n val = 'X'\r\n else:\r\n val = str(ct)\r\n if len(val) == 1:\r\n print(' ' + val + ' ', end='')\r\n else:\r\n print(val + ' ', end='')\r\n ct += 1\r\n print('\\n')", "def show_state(self):\n print \"I don't know how to show_state.\"", "def show_state(self):\n print(\"I don't know how to show_state.\")", "def print_state():\n global simulator\n if simulator is None:\n print \"program is not started\"\n return\n print simulator.state()", "def print_state(self):\n\t\tprint self.time, len(self.state['s']), len(self.state['p']), len(self.state['c'])", "def print_state(self):\n raise AIError(\"Must be implemented in child class!\")", "def print_state(self):\n print(self.identifier, \n self.gender, \n self.age,\n self.sexual_activity,\n self.disease_status,\n self.time_since_infection,\n self.number_of_partners,\n self.current_partners)", "def display_state_cmd(self):\n self.__display(self.state)", "def print_state(self):\n print(self.type,\n self.persons[0].identifier,\n self.persons[1].identifier)", "def printState(self,board):\n self.printBoard(board.getBoard())\n self.printScore(board,board.getScore())", "def showState(self):\n for i in self.state[0]:\n for j in self.state[1]:\n print(self.table[i][j], end=\"\")\n print(\"\")", "def state_print_do(cfg, app, win, events):", "def print_state(state,indent=4):\n if state != False:\n for (name,val) in vars(state).items():\n if name != '__name__':\n for x in range(indent): sys.stdout.write(' ')\n sys.stdout.write(state.__name__ + '.' + name)\n print(' =', val)\n else: print('False')", "def print_state(self):\n print(\"n\\tg\\to\\ta\\tc\\ts\\ttau\\td\\tN\")\n for p in self.persons:\n p.print_state()\n print(\"type\\tpersons\")\n for ps in self.partnerships:\n ps.print_state()", "def state_print_enter(cfg, app, win):", "def display_state(self):\n # self.__display(self.state)\n self.__draw(self.state)", "def _print(self):\n self.i += 1\n np.set_printoptions(precision=3, suppress=True)\n if self.i%40 == 0:\n self.i = 0\n print self.ekf.current_state_estimate[4:7]", "def print_state(self: Q, label: str = \"\", spacer: bool = True, quiet: bool = True) -> None:\n\n print(label)\n\n print(self.__str__(quiet))\n\n if spacer:\n print(\"\")", "def print(self):\n print(\"State : {} \\nDensity : {} \\nVelocity : {} \\nPressure : {} \\nTemperature : {} \\nSound speed : {}\\n\".format(self.name, self.rho, self.velocity, self.pressure, self.T, self.c))", "def __print_node_state(self):\n if self.node is None:\n return\n node_info = {}\n\n print()\n\n print(\"State Name: {}\".format(self.getStateId()))\n\n print(\"Node: {}\".format(self.node))\n\n node_info = self.getNodeInfo(self.node)\n node_json_data = json.dumps(node_info, sort_keys=True, indent=2)\n print(node_json_data)", "def debug(self):\r\n print(\"_,.-^ DEBUG ^-.,_\")\r\n print(\"state = %s %s\"%(self.state, self.error))\r\n print(\"\".join(self.memory))\r\n print ((self.pointer*\" \")+\"^\")\r\n print(\"PROGRAM\")\r\n print(\" {:16}{:7}{:7}{:7}{:16}\".format(\"State\", \"symbol\", \"write\", \"move\", \"new_state\"))\r\n for row in self.program:\r\n if row.state == self.state and row.symbol == self.memory[self.pointer]:\r\n print(\">\", end=\"\")\r\n else:\r\n print(\" \", end=\"\")\r\n print(row)", "def printStep(self):\n\n\t\tprint '\\nConfiguracao da fita: ',\n\n\t\tcount = 0\n\t\twhile count < len(self.tape):\n\t\t\tif count == self.currentPos:\n\t\t\t\tprint '_',\n\n\t\t\tprint self.tape[count],\n\t\t\tcount += 1\n\n\t\tprint '\\nEstado atual: ', self.currentState", "def print_out():\n pass", "def print_state(X):\n out = ''\n for coord in range(18):\n out += \"{0}\".format(STATE_VARS[coord])\n val = float(X[coord])\n out += \" {0: 2.4e}\\n\".format(val)\n\n print out", "def status(s):\n print(\"\\033[1m{0}\\033[0m\".format(s))", "def status(s):\n print(\"\\033[1m{0}\\033[0m\".format(s))", "def status(s):\n print(\"\\033[1m{0}\\033[0m\".format(s))", "def status(s):\n print(\"\\033[1m{0}\\033[0m\".format(s))", "def status(s):\n print('\\033[1m{0}\\033[0m'.format(s))", "def status(s):\n print('\\033[1m{0}\\033[0m'.format(s))", "def status(s):\n print('\\033[1m{0}\\033[0m'.format(s))", "def status(s):\n print('\\033[1m{0}\\033[0m'.format(s))", "def state(self) -> str:", "def print_state(id=None):\n data = storage.all(\"State\")\n return render_template('9-states.html', states=data, id=id)", "def status(s: str):\n print(\"\\033[1m{0}\\033[0m\".format(s))", "def fullDebug():\r\n global CurrentState\r\n global CurrentInput\r\n global RESPONSEOPTIONS\r\n print(\"___________________________\")\r\n print(\"\\n*Current Input:\", CurrentInput)\r\n print(\"*Current State: \", CurrentState)\r\n print(\"\\n*Response Options: \", RESPONSEOPTIONS)\r\n print(\"___________________________\")", "def print_cpu_state(self):\n print(\"PC:\", hex(self.pc))\n print(\"SP:\", hex(self.sp))\n print(\"A:\", hex(self.a))\n print(\"X:\", hex(self.x))\n print(\"Y:\", hex(self.y))\n print(\"P:\", bin(self.p))", "def print(self):\n print(\" a b c d e f g h \")\n print(\" ┼───┼───┼───┼───┼───┼───┼───┼───┼\")\n for row in range(8, 0, -1):\n pieces = \" │ \".join(self.state[row - 1])\n print(f\"{row} │ {pieces} │ {row}\")\n print(\" ┼───┼───┼───┼───┼───┼───┼───┼───┼\")\n print(\" a b c d e f g h \")", "def debug():\r\n global CurrentState\r\n global CurrentInput\r\n global RESPONSEOPTIONS\r\n print(\"___________________________\")\r\n for state in RESPONSEOPTIONS:\r\n score = calcTotalScore(state, CurrentInput, CurrentState)\r\n print(state.id + \": \" + str(score) + \" ,\", end=\"\")\r\n print(\"\\n___________________________\")", "def printStatus(self, status):\n \"\"\" This probably breaks SOI \"\"\"\n\n if hasattr(self, 'show_state') and self.show_state:\n print(\"=============== \" + str(status[\"count\"]) + \" ===============\")\n print(\"Current best energy: \" + str(status[\"bestEnergy\"]) + \" from state: \" + str(status[\"bestState\"]))\n print(\"last accepted energy: \" + str(status[\"energy\"]) + \" from state: \" + str(status[\"state\"]))\n print(\"current temperature: \" + str(status[\"temperature\"]))\n else:\n print(\"=============== \" + str(status[\"count\"]) + \" ===============\")\n print(\"Current best energy: \" + str(status[\"bestEnergy\"]) )\n print(\"last accepted energy: \" + str(status[\"energy\"]) )\n print(\"current temperature: \" + str(status[\"temperature\"]))", "def print_status(self):\r\n\t\tif VERBOSE:\r\n\r\n\t\t\tprint( 'Player : ')\r\n\t\t\tfor h in self.hands:\r\n\t\t\t\tprint('\\t' + str(h))\r\n\t\t\tprint( 'Dealer:\\n\\t' + str(self.dealer))\r\n\t\t\tprint( '-----------------------')", "def __str__(self):\n # Build the string line by line. Join at the end.\n lines = []\n lines.append(\"Initial State: {{{}}}\".format(self.initial_state))\n lines.append(\n \"Final States: {{{}}}\".format(\n \",\".join(map(str, self.final_states))))\n\n # column headers\n lines.append(\n \"State\\t{}\".format(\"\\t\".join(self.alphabet)))\n\n # For each state, print transitions\n for state_name in range(1, len(self.transitions) + 1):\n line = \"{}\".format(state_name)\n for symbol in self.alphabet:\n line += \"\\t{{{}}}\".format(\n \",\".join(map(str, self.transitions.get(\n state_name, dict()).get(symbol, []))))\n lines.append(line)\n\n return \"\\n\".join(lines)", "def show(self):\n\n print(self._walk(self, depth=1))", "def display(self):\n for r in range(1, self.size+1):\n print(\"+\" + (\"-+\"*self.size))\n print(\"|\", end=\"\")\n for c in range(1, self.size+1):\n print(self.gameState[r,c], end=\"\")\n print(\"|\",end=\"\")\n print()\n print(\"+\" + (\"-+\"*self.size))", "def status(s):\n print(\"\\033 {}\".format(s))#print(\"\\033[1m{0}\\033[0m\".format(s))", "def debug(state, message):\n if state:\n print(message)", "def __repr__(self):\r\n r = str(self.current_instance_state())\r\n return r", "def __str__(self):\n return ''.join(str(e) + ' ' for e in self.state)", "def print_state(self):\n grid = [[\".\" for _ in range(self.width)] for _ in range(self.height)]\n #icons = [\"^\", \"/\", \">\", \"\\\\\", \"|\", \"/\", \"<\", \"\\\\\"] # NON-UNICODE, uncomment if problems\n icons = [chr(0x2191), chr(0x2197), chr(0x2192), chr(0x2198), \\\n chr(0x2193), chr(0x2199), chr(0x2190), chr(0x2196)]\n for robot in self.robots:\n grid[robot[1]][robot[0]] = icons[(robot[2]+robot[3]) % 8]\n for item in self.items:\n if item[2] == 1:\n grid[item[1]][item[0]] = \"O\"\n elif item[2] == 2:\n grid[item[1]][item[0]] = \"*\"\n print(\"-\"*(self.width+2))\n for i in range(self.height):\n print(\"|\", end=\"\")\n for j in range(self.width):\n print(grid[i][j], end=\"\")\n print(\"|\")\n print(\"-\"*(self.width+2))", "def log_state(self):\n rospy.loginfo(\"STATE: %s [%s]\" %(self.__class__.__name__, 15 - self.ros_node.get_time()))", "def display(self):\n\t\tpointer = self.head\n\t\twhile pointer != None:\n\t\t\tprint pointer.state + \"\\t\" + pointer.info\t\n\t\t\tpointer = pointer.next", "def debug_print(self):\n os.system('cls' if os.name == 'nt' else 'clear')\n\n print('\\nPosition')\n print(self.tetromino.position())\n print('\\nBlock coordinates')\n print(self.tetromino.block_coordinates())\n print('\\nBoard')\n print(self.board)\n print('\\nBoard heights')\n print(self.board.get_height())\n\n if self.pause:\n print('\\nPaused')", "def text_output(self):\n print(self.board)\n print()", "def print_all_states(self): # pragma: no cover\n all_matches = self._get_matches()\n print 'all matches'\n for state, f in all_matches:\n print ' %s: %s' % (state, f.func_name)", "def say(self, bot):\n bot.say(\n _(u\"Current state: {} - {}\").format(self.label, self.message))", "def print_menu():\r\n print(\"==============================================\")\r\n print(\"What do you want to do now? \")\r\n print(\"==============================================\")\r\n print(\"Available options:\")\r\n i = 1\r\n for a in available_actions:\r\n if current_state in a[\"valid_states\"]:\r\n # Only hint about the action if the current state allows it\r\n print(\" %i) %s\" % (i, a[\"description\"]))\r\n i += 1\r\n print()", "def report_state(self):\n text = \"Status: %d\"%self.state.num;\n if self.state.msg !=\"\":\n text += \", Msg: %s\"%self.state.msg;\n return text;", "def print_state(self,f, turn=None):\n\n if turn != None:\n print(\"\\n------ Turn \" + str(turn) + \" ------\")\n\n self.board.print_board(f)\n print('\\n CURR_PLAYER: {} NEXT_MOVE: {} NEXT_PIECES: {} \\n'.format(self.curr_player,self.next_move,self.next_pieces,self.message),file=f)", "def main():\n print \"Printing Sample Status\"", "def display(self):\n print(self)", "def displayInfo(self):\n # clear stdout for a smoother display\n # os.system('cls' if os.name=='nt' else 'clear')\n\n #print(\"=========== Status ============\")\n # print(\n # \"speed: \" + str(self.speed) +\n # \"\\nangle: \" + str(self.steering_angle) +\n # \"\\nsign: \" + str(self.detected_sign) +\n # \"\\nlane lines: \" + str(self.lane_lines) +\n # \"\\nintersection line flag: \" + str(self.intersection_line) +\n # \"\\ncurrent state label: \" + str(self.currentStateLabel) +\n # \"\\ncurrent states: \" + str(self.currentState)\n #)", "def print_state(self: Qss, label: str = \"\", spacer: bool = True, quiet: bool = True) -> None:\n\n\n # Warn if empty.\n if self.qss is None or len(self.qss) == 0:\n raise ValueError(\"Oops, no quaternions.\")\n \n print(label)\n \n for qs in self.qss:\n qs.print_state()\n \n if spacer:\n print(\"\")", "def __repr__(self, state):\n print ' ',\n for w in range(len(state)+2):\n print \"___\",\n print '\\n'\n for x in state:\n print \"| \", x, \" |\"\n print ' ',\n for y in range(len(state)+2):\n print \"___\",\n print '\\n'\n return state", "def __repr__( self ):\n\n return self.__class__.__name__ + \"( \" + repr(self.state) + \")\";", "def current_state(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"current_state\")", "def print(self):\n print(self.pretty_str())", "def stateString(self):\n return self._mdp.stateString(self._cur_state);", "def render (self, mode=\"human\"):\n print(\"location:\", self.state)", "def print_status(self):\n print \"Zombie has\" + super(Zombie, self).print_status()", "def printToTerminal(self, state):\n\n sender = self.sender()\n if state == Qt.Checked:\n print(\"{} Selecionado.\".format(sender.text()))\n else:\n print(\"{} Recusado.\".format(sender.text()))", "def p(self):\n self.printstdout = True", "def print_outcome(self) -> None:\n pass", "def _print_status(self):", "def enter_state(self):\r\n self.__log__(logging.debug)\r\n return", "def print_moves(self):\n print self._current_moves\n self._current_moves = \"\"", "def printBoard(self):\n\t\tkey = [' ', 'X', 'O']\n\t\tprint(' | |')\n\t\tprint(' ' + key[self.state[0][0]] + ' | ' + key[self.state[0][1]] + ' | ' + key[self.state[0][2]])\n\t\tprint(' | |')\n\t\tprint('-----------')\n\t\tprint(' | |')\n\t\tprint(' ' + key[self.state[1][0]] + ' | ' + key[self.state[1][1]] + ' | ' + key[self.state[1][2]])\n\t\tprint(' | |')\n\t\tprint('-----------')\n\t\tprint(' | |')\n\t\tprint(' ' + key[self.state[2][0]] + ' | ' + key[self.state[2][1]] + ' | ' + key[self.state[2][2]])\n\t\tprint(' | |')", "def print_level():\n print(\"\")\n\n def show_hide_word(word):\n \"\"\"show/hide finished/unfinished words\"\"\"\n if word not in current_level.finished_words:\n return \"*\" * len(word)\n return word\n\n current_level.layout.print_layout(\n show_hide_word,\n # Print unfinished words first with '*'\n set(current_level.words) - set(current_level.finished_words),\n )\n\n # level state\n print(\"\")\n print(\"Level: %d/%d\" % (current_level_index + 1, len(all_levels)))\n if current_level.bonus_words:\n bonus_words_status = \"Bonus words: %d/%d\" % (\n len(current_level.finished_bonus_words),\n len(current_level.bonus_words)\n )\n bonus_words_status += \" %s\" % \" \".join(\n change_case(word)\n if word in current_level.finished_bonus_words\n else \"*\" * len(word)\n for word in current_level.bonus_words\n )\n print(bonus_words_status)\n\n # characters\n print(\"\")\n print(\"Chars: %s\" % \" \".join(change_case(char) for char in current_level.chars))\n print(\"\")", "def display(self):\n print(str(self))", "def state(self):\r\n return str(self)", "def print_history(self):\n self.game_started = False\n for state in self.history:\n self.__draw_board(state)", "def debug(state: bool, /) -> None:", "def Print(self):\n print(self.__dict__)", "def state(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"state\")", "def state(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"state\")", "def status(self):\n\n for index, x in enumerate(self.lot):\n print('|', end='')\n for spot, value in enumerate(x):\n if value == 1:\n print(\"|\", end='')\n if value == 2:\n print(\" |\", end='')\n if value == 3:\n print(\" |\", end='')\n if value == -1:\n print(\"X|\", end='')\n if value == -2:\n print(\"XXX|\", end='')\n if value == -3:\n print(\"XXXXX|\", end='')\n print()", "def display(self):\n ob = self._convert_state(self._env.reset())\n done = False\n while not done:\n ac, _ = self._act(ob, stochastic=False)\n ob, rew, done, _ = self._env.step(ac)\n ob = self._convert_state(ob)\n self._env.render()\n self._env.close()", "def menu(self):\n print(f\"{str(self)}\")", "def printOutput(self):\n pass", "def show(self):\n print highlight(self.current_content, self.lexer(), Formatter())", "def print(self):\n\n print(self)" ]
[ "0.88597125", "0.8146921", "0.7986526", "0.7926085", "0.7904914", "0.7863573", "0.7799375", "0.7672214", "0.7600998", "0.7504073", "0.750387", "0.73954594", "0.7305813", "0.7303963", "0.724449", "0.71872765", "0.71179473", "0.7104623", "0.7040808", "0.70034736", "0.6951544", "0.6908845", "0.68968594", "0.6882716", "0.6876615", "0.6859662", "0.6859662", "0.6859662", "0.6859662", "0.6853672", "0.6853672", "0.6853672", "0.6853672", "0.68362886", "0.68327457", "0.6828354", "0.6818999", "0.68120074", "0.67754155", "0.6775207", "0.67687947", "0.67245847", "0.67164457", "0.6715019", "0.6688922", "0.66518265", "0.6637262", "0.66174436", "0.66055924", "0.6591886", "0.6587744", "0.65870017", "0.6577743", "0.6568766", "0.65619123", "0.65596765", "0.6543229", "0.6534844", "0.6527865", "0.65092826", "0.65058815", "0.65051097", "0.65050006", "0.6504483", "0.6497144", "0.6468855", "0.6457057", "0.6453832", "0.64305633", "0.64203787", "0.64083445", "0.6406854", "0.63960403", "0.6395997", "0.638459", "0.63714373", "0.6332569", "0.6317274", "0.6314187", "0.63102216", "0.6309679", "0.6304087", "0.6293944", "0.6287116", "0.6287116", "0.6287116", "0.6287116", "0.6287116", "0.6287116", "0.6287116", "0.6287116", "0.6287116", "0.6287116", "0.6287116", "0.6287116", "0.628265", "0.6271244", "0.62598056", "0.62570494", "0.6252321", "0.62505263" ]
0.0
-1
This program plays hangman game
def main(): # initial condition ans = random_word() remaining_guess_num = N_TURNS guess_word = '' for i in range(len(ans)): guess_word += '-' # start to play hangman game while (remaining_guess_num > 0) and (guess_word != ans): print('The word looks like: ' + str(guess_word)) print('You have ' + str(remaining_guess_num) + ' guesses left.') input_ch = str(input('Your guess: ')) # illegal format if not input_ch.isalpha(): print('illegal format.') elif len(input_ch) != 1: print('illegal format.') # correct format else: # case-insensitive input_ch = input_ch.upper() # wrong guess if ans.find(input_ch) == -1: print('There is no ' + str(input_ch) + '\'s in the word.') remaining_guess_num -= 1 # correct guess else: print('You are correct!') ans_slice = ans # replace all the correct guessed letter(s) while ans_slice.find(input_ch) != -1: replace_loc = len(ans) - len(ans_slice) + ans_slice.find(input_ch) guess_word = replace_letter(input_ch, replace_loc, guess_word) ans_slice = ans_slice[ans_slice.find(input_ch)+1:] # win if guess_word == ans: print('You win!!') # lose else: print('You are completely hung : (') print('The word was: ' + str(ans))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n game = Hangman()\n game.play_hangman()", "def play_hangman(self):\n while self.stage < 6:\n self.display_hangman()\n guess = input(f'{Fore.YELLOW}Choose a letter: {Style.RESET_ALL}').lower().strip() # noqa\n print('\\n')\n if guess.isalpha() and len(guess) == 1:\n if guess not in self.word:\n if guess in self.guessed_letters:\n print(f'You already guessed {guess}, try again')\n print('\\n')\n else:\n print(f'{Fore.RED}{guess} is not in the word, try again{Style.RESET_ALL}') # noqa\n print('\\n')\n self.stage += 1\n self.guessed_letters.append(guess)\n elif guess.isalpha() and guess in self.word:\n if guess in self.guessed_letters:\n print(f'You already guessed {guess}, try again')\n print('\\n')\n else:\n print(f'{Fore.GREEN}{guess} is in the word!{Style.RESET_ALL}') # noqa\n print('\\n')\n self.guessed_letters.append(guess)\n # code for replacing dashes with letters adapted from # noqa\n # https://github.com/kiteco/python-youtube-code/blob/master/build-hangman-in-python/hangman.py\n word_as_list = list(self.progress)\n indices = [i for i, letter in enumerate(self.word) if letter == guess] # noqa\n for index in indices:\n word_as_list[index] = guess\n self.progress = \"\".join(word_as_list)\n if \"-\" not in self.progress:\n print(f'{Fore.GREEN}Congrats! You correctly guessed the answer: {self.word}{Style.RESET_ALL}') # noqa\n print('\\n')\n self.games_won += 1\n break\n\n elif guess.isalpha() and guess == self.word:\n print(f'{Fore.GREEN}Congrats! You correctly guessed the answer: {self.word}{Style.RESET_ALL}') # noqa\n print('\\n')\n self.games_won += 1\n break\n\n elif guess.isalpha() and guess not in self.word and guess in self.guessed_words: # noqa\n print(f'You already guessed {guess}, try again')\n print('\\n')\n\n elif guess.isalpha() and guess not in self.word and guess not in self.guessed_words: # noqa\n print(f'{Fore.RED}{guess} is not the word, try again{Style.RESET_ALL}') # noqa\n print('\\n')\n self.stage += 1\n self.guessed_words.append(guess)\n print('\\n')\n else:\n print('Invalid input \\n')\n if self.stage >= 6:\n print(Fore.CYAN + HANGMAN_PICS[self.stage])\n print('\\n')\n print(f'{Fore.RED}Game Over! The word was {self.word}{Style.RESET_ALL}') # noqa\n print('\\n')\n self.play_again()", "def play_hangman(self) -> None: \n tries=6\n current_word=self.get_word()\n guessed_word = False\n word_hidden_states = [current_word[indx] for indx in sample(range(0, len(current_word)-1), randint(1, len(current_word)-2))]\n word_completion_state = [letter if letter not in word_hidden_states else \"_\" for letter in current_word]\n\n while tries > 0 and not guessed_word: \n os.system('cls' if os.name == 'nt' else 'clear') ## Clear the terminal for new lines to be printed\n self.display_state(tries,word_completion_state)\n guessed_char=str(input(\"Guess a Character : \")).upper()\n\n if guessed_char in word_hidden_states :\n print(\"\\nCorrect Guess !!!!!! Updating..........\")\n for indx,_ in enumerate(word_completion_state) : \n if guessed_char == current_word[indx]:\n word_completion_state[indx]=guessed_char\n \n word_hidden_states = [char for char in word_hidden_states if char != guessed_char]\n guessed_word = False if \"_\" in word_completion_state else True\n sleep(5)\n else :\n print(\"\\nIncorrect Guess!!! Updating!!!!!!\")\n sleep(5)\n tries=tries-1\n \n if tries == 0 and not guessed_word:\n os.system('cls' if os.name == 'nt' else 'clear') ## Clear the terminal for new lines to be printed\n print(f\"{'-' * 20}HANGMAN{ '-' * 20}\\n\\n\")\n print(self.hangman_states[-1] + \"\\n\")\n print(f\"No Tries Remaining , YOU LOST !!!!!\")\n print(f\"CORRECT WORD was ------> {current_word}\")\n print(f\"GAME OVER\")\n \n if guessed_word:\n os.system('cls' if os.name == 'nt' else 'clear') ## Clear the terminal for new lines to be printed\n print(f\"{'-' * 20}HANGMAN{ '-' * 20}\\n\\n\")\n print(self.hangman_states[-tries] + \"\\n\")\n print(f\"YOU GUESSED THE WORD CORRECTLY !!!\")\n print(f\"WORD was ------> {current_word}\")\n print(f\"Congratulations You win\")", "def main():\n word = random_word()\n attempt_left = N_TURNS\n ans = intro(word, attempt_left)\n while attempt_left != 0:\n hangman_figure(attempt_left)\n ans, attempt_left = hangman(word, ans, attempt_left)\n if ans == word: # if players had guess the word correctly\n print('You are correct!')\n print('You win!!')\n print('The word was: ' + word)\n break\n else:\n print('The word looks like: ' + ans)\n if attempt_left == 0: # players failed to guess the word correctly\n hangman_figure(attempt_left)\n print('You are completely hung : (')", "def hangman(l, b, i):\n def new(l, b, i):\n global game\n if game is not None:\n b.l_say('There is already a hangman game running.', i, 0)\n return True\n diff = 'medium'\n if len(i.args) > 1:\n if i.args[1] not in ['easy', 'medium', 'hard', 'boss']:\n b.l_say('Please choose easy, medium or hard difficulty.', i, 0)\n return True\n diff = i.args[1]\n game = Hangman(b, diff)\n game.print_phrase()\n\n def guess(l, b, i):\n global game\n if game is None:\n b.l_say('There is no hangman game at the moment.', i, 0)\n b.l_say('Start one with %s.hangman new' % BOLD, i, 0)\n return True\n if len(i.args) > 1:\n if len(i.args[1]) > 1:\n if game.guess_phrase(' '.join(i.args[1:])):\n game.print_phrase()\n b.l_say('%s %s wins! +%d points.' % (game.format, i.nick, game.reward), i, 2)\n i.user.add_points(game.reward)\n b.l_say('You now have %d points.' % i.user.get_points(), i, 0)\n game = None\n else:\n game.remove_life()\n b.l_say('%s %s guessed wrong! %d lives left.' % (game.format, i.nick, game.lives), i, 2)\n else:\n result = game.guess_letter(i.args[1])\n if result == 'winner':\n game.print_phrase()\n b.l_say('%s %s wins! +%d points.' % (game.format, i.nick, game.reward), i, 2)\n i.user.add_points(game.reward)\n b.l_say('You now have %d points.' % i.user.get_points(), i, 0)\n game = None\n elif result:\n game.print_phrase()\n else:\n game.remove_life()\n b.l_say('%s %s guessed wrong! %d lives left.' % (game.format, i.nick, game.lives), i, 2)\n\n return True\n b.l_say('You need to guess something...', i, 0)\n\n def info(l, b, i):\n global game\n if game is None:\n b.l_say('There is no hangman game at the moment.', i, 0)\n b.l_say('Start one with %s.hangman new' % BOLD, i, 0)\n return True\n game.print_phrase()\n\n try:\n exec ('%s(l, b, i)' % i.args[0]) in globals(), locals()\n except Exception, e:\n traceback.print_exc()\n info(l, b, i)\n return True", "def run_single_game(words_list):\r\n word = hangman_helper.get_random_word(words_list) #random word\r\n pattern = len(word)*'_'\r\n wrong_guess_lst= list()\r\n error_count=0\r\n msg= hangman_helper.DEFAULT_MSG\r\n ask_play=False\r\n while error_count < hangman_helper.MAX_ERRORS and '_' in pattern:\r\n hangman_helper.display_state(pattern, error_count, wrong_guess_lst, msg, ask_play)\r\n user_input = hangman_helper.get_input()\r\n does_letter = if_letter(user_input[1]) #if the input is letter or not\r\n if user_input[0] == hangman_helper.HINT:\r\n filter_list= filter_words_list(words_list,pattern,wrong_guess_lst)\r\n filter_1 = choose_letter(filter_list,pattern)\r\n msg = hangman_helper.HINT_MSG+filter_1\r\n else:\r\n if len(user_input[1])!=1 or does_letter==False:\r\n msg= hangman_helper.NON_VALID_MSG\r\n elif user_input[1] in wrong_guess_lst or user_input[1] in pattern:\r\n msg= hangman_helper.ALREADY_CHOSEN_MSG+user_input[1]\r\n elif user_input[1] in word:\r\n pattern = update_word_pattern(word, pattern, user_input[1])\r\n msg = hangman_helper.DEFAULT_MSG\r\n else:\r\n error_count+=1\r\n msg=hangman_helper.DEFAULT_MSG\r\n wrong_guess_lst.append(user_input[1])\r\n if '_' in pattern:\r\n ask_play = True\r\n msg = hangman_helper.LOSS_MSG + word\r\n else:\r\n ask_play = True\r\n msg = hangman_helper.WIN_MSG\r\n hangman_helper.display_state(pattern, error_count, wrong_guess_lst, msg, ask_play)", "def hangman():\n\n secret = random.choice(words) # select a random word from the words list\n while '-' in secret or ' ' in secret: # ensures that the words from the list dont have spaces or -'s\n secret = random.choice(words)\n\n wrong_guess = 8\n guesses = []\n done = False\n\n while not done:\n for letter in secret: # will print a correct guess and a - for a missing letter\n if letter.lower() in guesses:\n print(letter, end=\" \")\n else:\n print(\"-\", end=\" \")\n print(\"\")\n\n guess = input(f\"You have {wrong_guess} guesses left. Pick a letter: \")\n guesses.append(guess.lower()) # places correct letter into guess list\n if guess.lower() not in secret.lower():\n wrong_guess -= 1 # if guess is wrong lose a life\n if wrong_guess == 0:\n break\n\n done = True\n for letter in secret:\n if letter.lower() not in guesses:\n done = False\n\n if done:\n print(f\"You guessed the word. It was {secret}\")\n play_again = input(\"Would you like to play again? Enter 'y' for yes or 'n' for no\")\n if play_again == \"y\":\n hangman()\n else:\n print(\"Thanks for playing\")\n\n else:\n print(f\"You lose. The word was {secret}\")\n play_again = input(\"Would you like to play again? Enter 'y' for yes or 'n' for no: \")\n if play_again == \"y\":\n hangman()\n else:\n print(\"Thanks for playing\")", "def main():\n\n # variable for pictures of hangman, start from 0 to 6 used in function print_hangman\n num_of_tries = 0\n # all letters that user guesses\n old_letters_guessed = []\n # print welcome screen\n\n hangman_game_start_logo()\n\n # ask from user to enter file path with words + index to choose word\n file_path = input(\"Enter file path:\")\n index = int(input(\"Enter index (only numbers):\"))\n # will chose word from file\n secret_word = choose_word(file_path, index)\n len_word = len(secret_word)\n # create secret word of like that: _ _ _ _\n guess = (\"_ \" * len_word)\n\n print(\"\\nLet's start!\\n\")\n print_hangman(num_of_tries)\n print(guess)\n\n while check_win(secret_word, old_letters_guessed) is False:\n letter_guess = input(\"\\nGuess a letter:\")\n letter_guess = letter_guess.lower()\n\n # check if it's correct input\n if check_valid_input(letter_guess, old_letters_guessed) is False:\n try_update_letter_guessed(letter_guess, old_letters_guessed)\n\n else:\n if try_update_letter_guessed(letter_guess, old_letters_guessed) is False:\n pass\n else:\n if new_letter_incorrect(letter_guess, secret_word) is False:\n num_of_tries += 1\n print_hangman(num_of_tries)\n print(show_hidden_word(secret_word, old_letters_guessed))\n\n if MAX_TRIES == num_of_tries:\n print(\"LOSE\")\n break\n\n if check_win(secret_word, old_letters_guessed) is True:\n print(\"\\nWIN!\")\n\n # After the game is finished, ask if user want to play more\n answer = input(\"\\nplay again ? input 'yes' or 'Enter key' to Exit:\")\n if answer == \"yes\" or answer == \"y\":\n main()\n else:\n print(\"\\n*** GAME OVER *** \")\n time.sleep(3)", "def main():\n\n # after first round it will ask if want to change word list\n first_round = True\n\n # 1.\n print_game_logo()\n\n # will break out of loop when the player wouldn't want another round\n while True:\n if not first_round:\n if get_yes_no(\"Would you like to switch to a different word-list?\"):\n # 2.1.\n print_game_logo()\n word_list_path = change_word_list()\n else:\n sys_comment(\"Playing with the same word-list\")\n else:\n # 2.1.\n word_list_path = change_word_list()\n first_round = False\n\n # 2.2.\n secret_word = change_secret_word(word_list_path)\n\n # starting the game\n hangman(secret_word)\n\n # finished the game - ask if want another round\n if get_yes_no(\"Would you like to play another game?\"):\n sys_comment(\"Starting another game\")\n else:\n sys_comment(\"Quitting\")\n break\n\n return None", "def hangman(secret_word):\n HANGMAN_ASCII_ART = \"\"\" _ _ \n | | | |\n | |__| | __ _ _ __ __ _ _ __ ___ __ _ _ __\n | __ |/ _` | '_ \\ / _` | '_ ` _ \\ / _` | '_ \\\\\n | | | | (_| | | | | (_| | | | | | | (_| | | | |\n |_| |_|\\__,_|_| |_|\\__, |_| |_| |_|\\__,_|_| |_|\n __/ |\n |___/ \"\"\"\n MAX_TRIES = 6\n print(HANGMAN_ASCII_ART, \"\\n\", MAX_TRIES, \"\\n\" * 4)\n time.sleep(2) # Makes the thread to sleep\n file_path = input(\"Enter file path (the text needs to be string of words separated by spaces:\\n\")\n while not (path.isfile(file_path)): # checks for the correction of the path (if the path is valid)\n print(\"The path is wrong or not exsited, please enter new one:\")\n file_path = input()\n index = input(\"Enter the index in which the secret word is in:\\n\")\n\n file_reader(file_path, index) # Go to file_reader func. and performs the reading of the file\n secret_word = choose_word(file_path, index) # By choose_word func. gives us the secret word\n print(\"\\nLet's start!\\n\")\n num_of_tries = 1\n old_letters_guessed = []\n print(HANGMAN_PHOTOS[num_of_tries])\n print(show_hidden_word(secret_word, old_letters_guessed))\n\n while not (\n num_of_tries == MAX_TRIES + 1): # While the number of tries is less then 6 keep getting a letter to guess\n letter = input(\"Guess a letter: \")\n if try_update_letter_guessed(letter, old_letters_guessed) == True and not (letter in secret_word):\n num_of_tries += 1 # Add 1 to number of tries\n print(\":( \\n\\n\")\n print(HANGMAN_PHOTOS[num_of_tries]) # Print the hangman position by number of tries\n show_hidden_word(secret_word, old_letters_guessed)\n print(\"\\n\")\n if check_win(secret_word, old_letters_guessed): # If check_win returns true, print winner\n time.sleep(0.5)\n clear()\n winner()\n break\n if num_of_tries == MAX_TRIES + 1: # If the player tried 6 times worng letters print loser\n time.sleep(0.5)\n clear()\n loser()\n break", "def main():\r\n # Initialize words from specific file\r\n words_list = hangman_helper.load_words()\r\n # Run single game with given word list to choose from\r\n run_single_game(words_list)\r\n # Ask the user if he would like to play again\r\n request = hangman_helper.get_input()\r\n if request[INPUT_TYPE] == hangman_helper.PLAY_AGAIN:\r\n if request[INPUT_VALUE]:\r\n run_single_game(words_list)", "def main():\r\n words = hangman_helper.load_words(file='words.txt')\r\n run_single_game(words)\r\n type_of_input=hangman_helper.get_input()\r\n while type_of_input[1]:\r\n run_single_game(words)\r\n type_of_input = hangman_helper.get_input()", "async def hangman(self, ctx: commands.Context, category = None):\n # Check if there is an existing game\n self._existing_game(ctx)\n\n # Get hangman word from category\n choice = self.hangman_choose(category)\n self.games_info[ctx.guild.id][1] = True\n guessed = set()\n comparison = set()\n for letter in choice:\n if letter.isalpha():\n comparison.add(letter)\n \n category = category.capitalize() if category else \"Random\"\n\n def produce_embed(description, colour):\n hangman = (\n '💔💀',\n '1️⃣😭', \n '2️⃣😢', \n '3️⃣😠',\n '4️⃣😓',\n '5️⃣😅',\n '6️⃣😄'\n )\n embed = discord.Embed(title=f\"Game of Hangman ({category})\", description=description, color=colour)\n n = 6 - len(guessed - comparison) # Number of chances left\n embed.add_field(name=hangman[n] + f\" {n} chances left!\",\n value=f\"`{' '.join(guessed)}`\")\n return (embed, n)\n \n colour = {\n True: discord.Colour.green(), # True meaning correct response\n False: discord.Colour.red() # False meaning incorrect response\n }\n status = True\n description = \"Game started!\"\n\n while True:\n display_words = '\\n`'\n for letter in choice:\n if letter in guessed or not letter.isalpha():\n display_words += f\"{letter} \"\n else:\n display_words += \"_ \"\n display_words = display_words[:-1] + '`'\n\n embed, chances_left = produce_embed(description + display_words, colour[status])\n await ctx.send(embed=embed)\n\n if chances_left <= 0:\n result = 'lose'; break\n\n # Receive response\n def hangman_check(message: discord.Message):\n content = message.content.lower()\n return (message.channel.id == self.data[ctx.guild.id]['channel'] \n and (content == choice \n or len(content) == 1 \n and content.isalpha() \n and content not in guessed\n )\n )\n try:\n message = await self.bot.wait_for('message', check=hangman_check, timeout=120)\n except asyncio.TimeoutError:\n result = 'timeout'; break\n \n content = message.content.lower()\n if content == choice:\n result = 'win'; break\n\n # If not, then the message was an unguessed character\n guessed.add(content)\n if guessed.issuperset(comparison):\n result = 'win'; break\n\n # If not, then the game has not ended\n status = content in comparison\n if status:\n description = f\"`{message.author}`'s guessed a letter `{content}`!\"\n else:\n description = f\"`{message.author}`'s guess of letter `{content}` is wrong!\"\n \n if result == \"win\":\n await ctx.send(f\"`{message.author}` guessed the word! It was `{choice}`!\")\n elif result == \"lose\":\n await ctx.send(f\"You lost! The word was `{choice}`!\")\n elif result == \"timeout\":\n await ctx.send(f\"Timeout. The word was `{choice}`!\")\n \n # Clear the database\n self.games_info[ctx.guild.id] = gamesDict()", "def run_game(ans, n):\n # transform to upper case to be case-insensitive\n ans = ans.upper()\n wrong_times = 0\n dashed = \"\"\n for i in range(len(ans)):\n dashed += '-'\n print_hangman(n, wrong_times)\n print('The word looks like: ' + dashed)\n print('You have '+str(n-wrong_times)+' guesses left.')\n while True:\n input_ch = input('Your guess: ')\n # check type of the input, just only one alphabet can be accepted\n if not (input_ch.isalpha() and (len(input_ch) == 1)):\n print('illegal format.')\n else:\n # transform to upper case to be case-insensitive\n input_ch = input_ch.upper()\n # if guessed alphabet was in the answer word\n if ans.find(input_ch) != -1:\n # check the alphabet's index in the word\n for i in range(len(ans)):\n if ans[i] == input_ch:\n # replace the guessed alphabet in the dashed string to show\n dashed = dashed[:i]+ans[i]+dashed[i+1:]\n print_hangman(n, wrong_times)\n print('You are correct!')\n # if alphabets were not all guessed, the while loop will be continued\n if not dashed.isalpha():\n print('The word looks like: ' + dashed)\n print('You have ' + str(n - wrong_times) + ' guesses left.')\n # if all alphabets were guessed, the game is over\n else:\n print('You win!')\n print('The word was: ' + ans)\n break\n # if guessed alphabet wasn't in the answer word\n else:\n wrong_times += 1\n # if wrong times haven't reached N_TURNS, the while loop will be continued\n print_hangman(n, wrong_times)\n if wrong_times < n:\n print(\"There's no \" + input_ch + \"'s in the word.\")\n print('The word looks like: ' + dashed)\n print('You have ' + str(n - wrong_times) + ' guesses left.')\n # if user guessed the wrong alphabet at the last time, the game is over\n elif wrong_times == n:\n print(\"There's no \" + input_ch + \"'s in the word.\")\n print('You are completely hung :(')\n print('The word was: ' + ans)\n break", "def hangman(Computer, Graphic, Length):\n if Computer == True:\n tries = 0\n word = '_'*Length \n print(\"Hello, let's play hangman!\\nI'm choosing a word... thinking...\\nOk.\")\n MakeMyDictionary('american-english.txt') \n ComputerGuess(computerWordPick(Length),tries,Length,word)\n if Computer == False:\n tries = 0 \n word = '_'*Length \n print('You are going to pick this time!\\nOnce you have a word that is',Length,'letters long, hit any key')\n input('...')\n MakeMyDictionary('american-english.txt') \n alphabet = 'abcdefghijklmnopqrstuvwxyz' \n humanWordPick(tries,Length, word,alphabet)\n if Graphic == True:\n turtleDrawing()", "def print_hangman(num_of_tries):\r\n\r\n picture_1 = \"\"\"x-------x\"\"\"\r\n\r\n picture_2 = \"\"\"\r\n x-------x\r\n |\r\n |\r\n |\r\n |\r\n |\"\"\"\r\n\r\n picture_3 = \"\"\"\r\n x-------x\r\n | |\r\n | 0\r\n |\r\n |\r\n |\"\"\"\r\n\r\n picture_4 = \"\"\"\r\n x-------x\r\n | |\r\n | 0\r\n | |\r\n |\r\n |\"\"\"\r\n\r\n picture_5 = \"\"\"\r\n x-------x\r\n | |\r\n | 0\r\n | /|\\\\\r\n |\r\n |\"\"\"\r\n\r\n picture_6 = \"\"\"\r\n x-------x\r\n | |\r\n | 0\r\n | /|\\\\\r\n | /\r\n |\"\"\"\r\n\r\n picture_7 = \"\"\"\r\n x-------x\r\n | |\r\n | 0\r\n | /|\\\\\r\n | / \\\\\r\n |\"\"\"\r\n HANGMAN_PHOTOS = {0: picture_1, 1: picture_2, 2: picture_3, 3: picture_4, 4: picture_5, 5: picture_6, 6: picture_7}\r\n print(HANGMAN_PHOTOS[num_of_tries])", "def run_single_game(words_list):\r\n\r\n # Initialize parameters\r\n wrong_guess_count = 0\r\n wrong_guess_words = []\r\n already_chosed = []\r\n msg = \"\"\r\n\r\n # Get random name from the words list.\r\n random_word = hangman_helper.get_random_word(words_list)\r\n\r\n # Initialize the pattern\r\n pattern = len(random_word) * HIDDEN_SIGN\r\n\r\n # Print default message to user\r\n msg = hangman_helper.DEFAULT_MSG\r\n\r\n # the game wont stop until the pattern will be revealed or guess number\r\n # will cross the max errors available.\r\n while wrong_guess_count < hangman_helper.MAX_ERRORS and \\\r\n pattern != random_word:\r\n # display the current state in each iteration of the loop\r\n hangman_helper.display_state(pattern, wrong_guess_count,\r\n wrong_guess_words, msg)\r\n # Get input from user\r\n request = hangman_helper.get_input()\r\n\r\n # Check if the input is a guess\r\n if request[INPUT_TYPE] == hangman_helper.LETTER:\r\n\r\n # Check parameter validation\r\n if len(request[INPUT_VALUE]) != 1 or \\\r\n not request[INPUT_VALUE].islower():\r\n msg = hangman_helper.NON_VALID_MSG\r\n continue\r\n # Check if the letter already was chosen before.\r\n elif request[INPUT_VALUE] in already_chosed:\r\n msg = hangman_helper.ALREADY_CHOSEN_MSG + request[INPUT_VALUE]\r\n # If the guessed letter does exist in the word\r\n elif request[INPUT_VALUE] in random_word:\r\n # Updating the the word pattern accordingly\r\n pattern = update_word_pattern(random_word, pattern,\r\n request[INPUT_VALUE])\r\n msg = hangman_helper.DEFAULT_MSG\r\n already_chosed.append(request[INPUT_VALUE])\r\n else:\r\n wrong_guess_count += 1\r\n wrong_guess_words.append(request[INPUT_VALUE])\r\n msg = hangman_helper.DEFAULT_MSG\r\n already_chosed.append(request[INPUT_VALUE])\r\n\r\n elif request[INPUT_TYPE] == hangman_helper.HINT:\r\n # Call the filter words function\r\n sort = filter_words_list(words_list, pattern, wrong_guess_words)\r\n # Call the choose letter function\r\n chosen_letter = choose_letter(sort, pattern)\r\n # Initialize the msg variable\r\n msg = hangman_helper.HINT_MSG + chosen_letter\r\n\r\n # Initialise the display function in case winning\r\n if pattern == random_word:\r\n msg = hangman_helper.WIN_MSG\r\n # Initialise the display function in case of losing\r\n elif wrong_guess_count == hangman_helper.MAX_ERRORS:\r\n msg = hangman_helper.LOSS_MSG + random_word\r\n # Calling the display function\r\n hangman_helper.display_state(pattern, wrong_guess_count, wrong_guess_words,\r\n msg, ask_play=True)", "async def hangman(self, ctx, defaultWord = \"\"):\r\n if await bMsg(ctx,ctx.message.author.name,client):\r\n return\r\n logger.info('Games.hangman', extra={'invoker': ctx.message.author.name})\r\n if ctx.channel in self.channels_occupied_hangman:\r\n return await ctx.send(\"There is already a game going on in this channel!\")\r\n self.channels_occupied_hangman.add(ctx.channel)\r\n if defaultWord == \"\":\r\n await ctx.send(\"Awaiting DM with word...\")\r\n WORD = await ctx.bot.wait_for('message',\r\n check=lambda m: isinstance(m.channel, d.DMChannel) and m.author == ctx.message.author)\r\n else:\r\n WORD = defaultWord\r\n WORD = WORD.content.lower()\r\n letters = ['_'] * len(WORD)\r\n lowers = (\r\n 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j',\r\n 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't',\r\n 'u', 'v', 'w', 'x', 'y', 'z'\r\n )\r\n for i in range(len(WORD)):\r\n if WORD[i] not in lowers:\r\n letters[i] = WORD[i]\r\n missed = []\r\n shanpe = 0\r\n await ctx.send(DGHANGMANSHANPES[shanpe] + '\\n' + 'Missed:\\nGotten: `' + \"\".join(letters) + '`')\r\n while \"\".join(letters) != WORD and shanpe < len(DGHANGMANSHANPES) - 1:\r\n letter = (await ctx.bot.wait_for('message',\r\n check=lambda m: m.channel == ctx.channel and m.content in lowers)).content\r\n if WORD.find(letter) != -1:\r\n for i in self.substrs(letter, WORD):\r\n letters[i] = letter\r\n else:\r\n if letter not in missed:\r\n missed.append(letter)\r\n shanpe += 1\r\n await ctx.send(DGHANGMANSHANPES[shanpe] + '\\nMissed: ' + ','.join(missed) + '\\nGotten: `' + \"\".join(letters) + '`')\r\n if \"\".join(letters) == WORD:\r\n await ctx.send('Congratulations! You have guessed the complete word!')\r\n else:\r\n await ctx.send('You lost! The word was \\\"{}\\\".'.format(WORD))\r\n self.channels_occupied.remove(ctx.channel)", "def game_loop():\n\n # init some game variables\n game_over = False\n game_index = 0\n turn_counter = 0\n challenge_word = ''\n with open(word_file, 'r') as txt:\n lines = txt.read().splitlines()\n challenge_word = codecs.decode(choice(lines), 'rot_13')\n hidden_word = []\n guessed_letters = []\n\n while game_over == False:\n\n # update hidden word\n hidden_word = []\n for letter in challenge_word:\n if letter in guessed_letters:\n hidden_word.append(letter)\n else:\n hidden_word.append('_')\n\n # display game board\n print(game_board[game_index])\n\n # check if winner\n if '_' not in hidden_word:\n print('Congratulations, you won!')\n print('You correctly guessed the word: \"{}\" in {} turns.'.format(challenge_word, turn_counter))\n game_over = True\n break\n\n # check if loser\n if game_index == len(game_board) - 1:\n print('Game over, you lost!')\n print('You incorrectly guessed the word: \"{}\" in {} turns.'.format(challenge_word, turn_counter))\n game_over = True\n break\n\n # not winner/loser yet, so play ball\n game_status = 'Word:'\n for letter in hidden_word:\n game_status = game_status + ' ' + letter\n print(game_status)\n print('')\n game_status = 'Guessed Letters:'\n for letter in guessed_letters:\n game_status = game_status + ' ' + letter\n print(game_status)\n print('')\n\n # guess letter\n guessing = True\n while guessing:\n letter = input('Guess a letter: ').lower()\n if len(letter) != 1:\n print('Only guess one letter at a time. Guess again!')\n elif letter in guessed_letters:\n print('\"' + letter + '\" was already used. Guess again!')\n else:\n guessing = False\n guessed_letters.append(letter)\n turn_counter += 1\n if letter not in challenge_word:\n game_index += 1\n os.system('cls||clear')", "async def localhangman(self, ctx):\r\n if await bMsg(ctx,ctx.message.author.name,client):\r\n return\r\n logger.info('Games.localhangman', extra={'invoker': ctx.message.author.name})\r\n if ctx.channel in self.channels_occupied:\r\n await ctx.send('There is already a game going on in this channel!')\r\n self.channels_occupied.add(ctx.channel)\r\n await ctx.send('Awaiting DM with word...')\r\n msg = await ctx.bot.wait_for('message',\r\n check=lambda m: isinstance(m.channel, d.DMChannel) and m.author == ctx.author)\r\n WORD = msg.content.lower()\r\n letters = ['_'] * len(WORD)\r\n lowers = (\r\n 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k',\r\n 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v',\r\n 'w', 'x', 'y', 'z'\r\n )\r\n for i in range(len(WORD)):\r\n if WORD[i] not in lowers:\r\n letters[i] = WORD[i]\r\n missed = []\r\n shanpe = 0\r\n status = await ctx.send(DGHANGMANSHANPES[shanpe] + '\\nMissed: ' + ', '.join(missed) + '\\nGotten: `' + \"\".join(letters) + '`')\r\n while \"\".join(letters) != WORD and shanpe < len(DGHANGMANSHANPES) - 1:\r\n guess = await ctx.bot.wait_for('message',\r\n check=lambda m: m.channel == ctx.channel and m.content in lowers)\r\n letter = guess.content\r\n await guess.delete()\r\n if WORD.find(letter) != -1:\r\n for i in self.substrs(letter, WORD):\r\n letters[i] = letter\r\n else:\r\n if letter not in missed:\r\n missed.append(letter)\r\n shanpe += 1\r\n await status.edit(content=(DGHANGMANSHANPES[shanpe] + '\\nMissed: ' + ', '.join(missed) + '\\nGotten: `' + \"\".join(letters) + '`'))\r\n if \"\".join(letters) == WORD:\r\n await ctx.send('Congratulations! You have guessed the complete word!')\r\n else:\r\n await ctx.send('You lost! The word was \\\"{}\\\".'.format(WORD))\r\n self.channels_occupied.remove(ctx.channel)", "def main():\n secret_word = get_word()\n play_game(secret_word)", "def main():\n word = random_word()\n old_ans = dashed(word)\n print('You have ' + str(N_TURNS) + ' guesses left.')\n guess(word, old_ans)", "def play_game(secret_word):\n word=secret_word\n hidden='-'\n inp=''\n no_guesses = 8\n list_word=list(word)\n \n for i in range(len(word)-1):\n hidden=hidden+'-'\n hidden_word=list(hidden)\n #print('Hidden',hidden,hidden_word)\n\n while no_guesses <= 8 :\n # print(type(word))\n print(\"The word now looks like this: \",hidden_word)\n print(\"\\n You have \"+str(no_guesses)+\" left\")\n inp=input(\"Type a single letter here, then press enter: \")\n for i in range(len(word)):\n if list_word[i]==inp:\n hidden_word[i]=inp\n # print('i=',i)\n elif list_word[i]=='-':\n hidden_word[i]=str('-')\n print('i=',i)\n else :\n hidden_word[i]=hidden_word[i]\n # print(hidden_word)\n #print(\"\\n You have \"+str(no_guesses)+\" left\")\n #inp=input(\"Type a single letter here, then press enter: \")\n\n if inp not in word:\n no_guesses = no_guesses - 1\n\n if list_word == hidden_word:\n print(\"Matches\")\n break\n\n if no_guesses == 0:\n print(\"You have reached the maximum attempts\")\n break", "def hangman_figure(attempt_left):\n if attempt_left == N_TURNS:\n print('___________')\n print('|')\n print('|')\n print('|')\n print('|')\n print('|')\n print('|')\n print('|')\n print('|_____')\n if attempt_left == N_TURNS - 1:\n print('___________')\n print('| |')\n print('|')\n print('|')\n print('|')\n print('|')\n print('|')\n print('|')\n print('|_____')\n if attempt_left == N_TURNS - 2:\n print('___________')\n print('| |')\n print('| O')\n print('|')\n print('|')\n print('|')\n print('|')\n print('|')\n print('|_____')\n if attempt_left == N_TURNS - 3:\n print('___________')\n print('| |')\n print('| O')\n print('| |')\n print('| |')\n print('|')\n print('|')\n print('|')\n print('|_____')\n if attempt_left == N_TURNS - 4:\n print('___________')\n print('| |')\n print('| O')\n print('| \\\\_|')\n print('| |')\n print('|')\n print('|')\n print('|')\n print('|_____')\n if attempt_left == N_TURNS - 5:\n print('___________')\n print('| |')\n print('| O')\n print('| \\\\_|_/')\n print('| |')\n print('|')\n print('|')\n print('|')\n print('|_____')\n if attempt_left == N_TURNS - 6:\n print('___________')\n print('| |')\n print('| O')\n print('| \\\\_|_/')\n print('| |')\n print('| /')\n print('| |')\n print('|')\n print('|_____')\n if attempt_left == N_TURNS - 7:\n print('___________')\n print('| |')\n print('| O')\n print('| \\\\_|_/')\n print('| |')\n print('| / \\\\')\n print('| | |')\n print('|')\n print('|_____')\n if attempt_left == N_TURNS - 8:\n print('___________')\n print('| |')\n print('| O')\n print('| \\\\_|_/')\n print('| |')\n print('| / \\\\')\n print('| | |')\n print('| |')\n print('|_____')\n if attempt_left == N_TURNS - 9:\n print('___________')\n print('| |')\n print('| O')\n print('| \\\\_|_/')\n print('| |')\n print('| / \\\\')\n print('| | |')\n print('| | |')\n print('|_____')\n if attempt_left == N_TURNS - 10:\n print('___________')\n print('| |')\n print('| -O')\n print('| \\\\_|_/')\n print('| |')\n print('| / \\\\')\n print('| | |')\n print('| | |')\n print('|_____')\n if attempt_left == N_TURNS - 11:\n print('___________')\n print('| |')\n print('| -O-')\n print('| \\\\_|_/')\n print('| |')\n print('| / \\\\')\n print('| | |')\n print('| | |')\n print('|_____')", "def print_hangman(num_of_tries):\n print(HANGMAN_PHOTOS[str(num_of_tries)])", "def main():\n secret_word = get_word()\n print(secret_word)\n play_game(secret_word)", "def start():\n\n word = input(\"Please enter the word to be guessed: \").lower()\n while ( len(word) < MIN_WORD_LENGTH ) or ( len(word) > MAX_WORD_LENGTH ):\n word = input(\"Error, word must be > 5 and < 11 letters. Enter again: \").lower()\n\n game_end = False\n guess_letter = \"\"\n found_list = [\"\"] * len(word) # correct guesses\n guess_list = [] # failed guesses (all other guesses)\n hang_state = 0\n while not game_end: #while game is running\n game_end = gui(word,found_list, guess_list, hang_state)\n if game_end == False:\n guess_letter = input(\"Guess the next letter: \").lower() # Guess a letter\n while(len(guess_letter) > 1 or len(guess_letter) < 1):\n print(\"The letter must be length 1\")\n guess_letter = input(\"Guess the next letter: \").lower() # Guess a letter\n while guess_letter in guess_list or guess_letter in found_list:\n print(\"You already guessed this letter!\")\n guess_letter = input(\"Guess the next letter: \").lower() # Guess a letter\n for charnum in range(len(word)): #for each letter in the word\n \n if guess_letter == word[charnum]:\n if found_list[charnum] == \"\":\n found_list[charnum] = guess_letter.lower()\n else:\n if charnum == (len(word) - 1) and guess_letter not in found_list:\n print(\"That letter does not exist in the word...\")\n guess_list.append(guess_letter.lower())\n hang_state += 1\n finalString = ''.join(found_list)\n if str(finalString) == str(word):\n print(\"You got it! The word was \" + word + \"!\")\n game_end = True", "def play_game():\n pass", "def main(level, word):\n hangman = Hangman(\n level=level,\n left_spaces=SPACE_STR * 10,\n middle_spaces=SPACE_STR * 6,\n left_foot=SPACE_STR * 7,\n right_foot=SPACE_STR * 3\n )\n app = QtWidgets.QApplication(sys.argv)\n login = Login(hangman)\n if login.exec_() == QtWidgets.QDialog.Accepted:\n ui = Window(login.name.text(), word, hangman)\n game = HangGame(level, word, hangman, ui)\n ui.connect_all(game.play_turn)\n ui.prepare(level)\n ui.show()\n app.exec_()", "def main():\n ans = random_word()\n run_game(ans, N_TURNS)", "def hm(l, b, i):\n return hangman(l, b, i)", "def main():\n answer = random_word().upper()\n dashed_word = ''\n for i in range(len(answer)):\n dashed_word += '-'\n guess_times = 0\n while True:\n if guess_times == N_TURNS:\n # This is the last chance to guess and user failed\n print('You are completely hung :\\'(')\n break\n print('The word looks like: ' + dashed_word + '\\nYou have ' + str(N_TURNS - guess_times) + ' guesses left.')\n guess = input('Your Guess: ')\n if len(guess) == 1 and guess.isalpha():\n # Legal format\n guess = guess.upper()\n if answer.find(guess) != -1:\n # The guess is correct and should uncover the dashed_word\n print('You are correct!')\n dashed_word = uncover_dash(guess, answer, dashed_word)\n if not dashed_word.find('-') > -1:\n # No dash left.\n print('You win!!')\n break\n else:\n # Wrong guess\n guess_times += 1\n print('There is no ' + guess + '\\'s in the word.')\n else:\n print('Illegal format')\n print('The word was: ' + answer)", "def guessing():\r\n word = random_word()\r\n word_guess = ['_' for _ in word]\r\n letters = set()\r\n guesses = 6\r\n\r\n print('Your Word:')\r\n print(' '.join(word_guess) + '\\n')\r\n\r\n while '_' in word_guess and guesses > -1:\r\n guess = input('Guess your letter: ').upper()\r\n if guess == 'EXIT':\r\n print('\\nThe game had been ended.\\n')\r\n sys.exit()\r\n elif guess in string.ascii_uppercase:\r\n if guess == '':\r\n print('Not a letter!\\n')\r\n elif guess in letters:\r\n print('Letter already taken!\\n')\r\n elif guess in word:\r\n for i in range(len(word)):\r\n if word[i] == guess:\r\n word_guess[i] = guess\r\n print(' '.join(word_guess) + '\\n')\r\n if ''.join(word_guess) == word:\r\n print('You win!\\n')\r\n else:\r\n guesses -= 1\r\n print('Incorrect!')\r\n if guesses == -1:\r\n make_drawing(guesses)\r\n print('\\nYou hang!')\r\n print(f'The full word was: {\" \".join(list(word))}\\n')\r\n elif guesses == 1:\r\n make_drawing(guesses)\r\n print(f'You have {guesses} wrong guess left.\\n')\r\n else:\r\n make_drawing(guesses)\r\n print(f'You have {guesses} wrong guesses left.\\n')\r\n letters.add(guess)\r\n else:\r\n print('Not a letter!\\n')", "def perform(self):\n i = 1\n attempts = 0\n\n while i < 8:\n letter = self.ask_letter()\n result = self.check_letter(letter, self.random_word)\n attempts += 1\n if result:\n print(result)\n if self.is_game_finished(result):\n print('\\nYou`re win!')\n winner = 1\n return self.send_result(attempts, winner)\n break\n else:\n self.draw_hangman(i)\n i += 1\n if i == 8:\n print('\\nYou`re lose')\n winner = 2\n return self.send_result(attempts, winner)", "def display_hangman(self):\n print(Fore.CYAN + HANGMAN_PICS[self.stage])\n print('\\n')\n print(self.progress + Style.RESET_ALL)\n print('\\n')", "def print_start_game():\n print(HANGMAN_ASCII_ART)\n print(MAX_TRIES)", "def start(self):\n self.welcome()\n while self.missed < 5 and not self.active_phrase.check_complete(self.guesses):\n print(f\"Number missed: {self.missed}\")\n self.active_phrase.display(self.guesses)\n user_guess = self.get_guess()\n if user_guess == \"INVALID\":\n continue\n self.guesses.append(user_guess)\n if not self.check_guess(user_guess):\n print(f\"Sorry, there are no {user_guess}'s in the phrase\")\n self.missed += 1\n else:\n count_correct = self.active_phrase.phrase.count(user_guess)\n if count_correct == 1:\n print(f\"Great job! There is {count_correct} '{user_guess}' in the phrase\")\n else:\n print(f\"Great job! There are {count_correct} {user_guess}'s in the phrase\")\n self.game_over()", "def play_game():\n pass", "def print_hangman(num_of_tries):\n\n HANGMAN_PHOTOS = {\n 0: \"x-------x\\n\\n\\n\\n\\n\\n\",\n 1: \"x-------x\\n\"\n \"|\\n\"\n \"|\\n\"\n \"|\\n\"\n \"|\\n\"\n \"|\\n\",\n 2: \"x-------x\\n\"\n \"| |\\n\"\n \"| 0\\n\"\n \"|\\n\"\n \"|\\n\"\n \"|\\n\",\n 3: \"x-------x\\n\"\n \"| |\\n\"\n \"| 0\\n\"\n \"| |\\n\"\n \"|\\n\"\n \"|\\n\",\n 4: \"x-------x\\n\"\n \"| |\\n\"\n \"| 0\\n\"\n \"| /|\\\\\\n\"\n \"|\\n\"\n \"|\\n\",\n 5: \"x-------x\\n\"\n \"| |\\n\"\n \"| 0\\n\"\n \"| /|\\\\\\n\"\n \"| / \\n\"\n \"|\\n\",\n 6: \"x-------x\\n\"\n \"| |\\n\"\n \"| 0\\n\"\n \"| /|\\\\\\n\"\n \"| / \\\\\\n\"\n \"|\\n\"\n }\n\n print_centered(HANGMAN_PHOTOS[num_of_tries])\n return None", "def new_game(secret_words):\n\n\tattempts=0\n\tword_index = random.randint(0,5)\n\tword_to_guess = secret_words[word_index]\n\tglobal mask\n\tmask = \" _ \" * len(secret_words[word_index])\n\tget_input(mask, word_to_guess, user_guesses, attempts, valid_characters, secret_words)\n\treturn", "def start_game(self):\n self.word_view.next_word()\n self.greeterboard.reset(msg=i18n.OUT_MSG_LUCK.format(self.player_name))\n self.keyboard.reset()\n self.init_game_metrics()", "def hangman(t, n, height):\n stump(t, n * height)\n fdbk(t, n)\n lt(t)\n bk(t, n*height)\n rt(t)", "def startGame():\n\n\tprint(\"\\nOK! Let's play!\")\n\tprint(\"--------------------------------------------------------------------------------------\")\n\tprint(\"Note:\")\n\tprint(\"\\tNow you must be kept in your mind a random integer from specific range and I must be guessing that number!\")\n\tprint(\"\\tIf you answer honestly all of my questions I certainly will guess that number!\")\n\tprint(\"--------------------------------------------------------------------------------------\\n\")\n\tgameLogic()", "def play(self):\n print(\"Game is starting!!\")\n self.generate_secret_number()\n while True:\n self.get_guess_from_user()\n self.ans = self.compare_results()\n if self.ans:\n print(f\"Right Guess!! , the number is {self.secret_number}\")\n break\n else:\n print(f\"Wrong Guess!! , Please try again.\")\n return self.ans", "async def guess(self, ctx):\n server = ctx.message.server.id\n current_streak = 0\n while True:\n if current_streak > 0:\n await self.bot.say('Your current streak is {}'.format(current_streak))\n reply = guessing_game(server, ctx)\n await self.bot.send_file(\n ctx.message.channel,\n 'images/lineup/game_postgame.png',\n content='Guess a hero {} played that game. {}'.format(\n reply[1], reply[2])\n )\n\n def guess_check(m):\n return ctx.message.content\n\n guess = await self.bot.wait_for_message(\n timeout=30.0,\n check=guess_check,\n author=ctx.message.author,\n channel=ctx.message.channel\n )\n answer = reply[0]\n if guess is None:\n fmt = 'Sorry, you took too long. It was {}.\\nGame over. Your score: {}.'\n await self.bot.send_message(\n ctx.message.channel,\n fmt.format(answer, current_streak)\n )\n if current_streak > 0:\n db.add_leaderboard_guess(\n ctx.message.server.id,\n ctx.message.author.id,\n current_streak,\n 'guess-leaderboard'\n )\n break\n if guess.content.lower() == answer.lower():\n\n await self.bot.say('Yay! You are right.')\n current_streak += 1\n else:\n await self.bot.say(\n 'Nope. It is actually {}.\\n Game over. Your score: {}'.format(answer, current_streak))\n if current_streak > 0:\n db.add_leaderboard_guess(\n ctx.message.server.id,\n ctx.message.author.id,\n current_streak,\n 'guess-leaderboard'\n )\n break", "def play(self):\n # create players, instances of Player and Roll\n print('\\nWELCOME TO YAHTZEE!')\n self.create_number_of_players()\n self.create_players()\n self.create_players_list()\n self.create_rolls_list()\n print(\"-\"*48)\n\n # starts games of rounds of player turns\n while True:\n\n print(f\"\\nLET'S PLAY! GAME {self.game_counter + 1}\")\n\n self.yahtzee_rounds()", "def play_a_game(strategy, word):\n guesses = []\n state_of_play = \"\"\n for i in range(len(word)):\n state_of_play = state_of_play + \" \"\n print(state_of_play)\n while state_of_play != word:\n letter = strategy.play_round(state_of_play,guesses)\n guesses.append(letter)\n print(letter)\n guess_success = False\n for i in range(len(word)):\n if letter == word[i]:\n state_of_play = state_of_play[0:i] + letter + state_of_play[i+1:]\n print(\"State of game: \" + state_of_play)\n guess_success = True\n if not guess_success:\n strategy.made_mistake()\n return strategy.mistakes", "def play_again(self):\n play = input(f'{Fore.YELLOW}Would you like to play again? (Y/N)').strip().upper() # noqa\n print('\\n')\n if play == 'Y':\n self.stage = 0\n self.guessed_letters = []\n self.guessed_words = []\n self.word = random.choice(words)\n self.progress = '-' * len(self.word)\n self.games_played += 1\n self.play_hangman()\n elif play == 'N':\n self.games_played += 1\n print('Thanks for playing! \\n')\n print(f'You won {self.games_won} out of {self.games_played} games')\n else:\n print('Invalid choice \\n')\n self.play_again()", "def playGameplus(wordList):\n #选择游戏模式\n global la_st2\n n = 0\n print '请选择你想进行的模式:a:单人 c:人机 e: 退出游戏'\n while True:\n order9 = raw_input('>>>').lower()\n if (order9 == 'a') or (order9 =='c'):\n moudl = True\n break\n elif order9 == 'e':\n moudl = False\n print '游戏已退出'\n print ' = ' * 20\n break\n else:\n print '命令有误,请重新输入'\n if moudl:\n print 'n:新的游戏 r:重开上局 e:退出'\n order8 = raw_input('>>>').lower() \n while True:\n if order8 == 'n':\n while True:\n n = raw_input('你想获取的字母数(大于4个):')\n while True:\n try:\n n = int(n)\n if n > 4:\n break\n except ValueError,e:\n print '输入有误!'\n if order9 == 'a':\n hand = dealHand(n)\n la_st = copy.deepcopy(hand)\n playHand(hand, wordList, n)\n elif order9 == 'c':\n hand = dealHand(n)\n la_st = copy.deepcopy(hand)\n playHandplus(hand, wordList, n)\n if order8 == 'r':\n if la_st2 and (order9 == 'a'):\n playHand(la_st, wordList, n)\n elif la_st2 and (order9 == 'c'):\n playHandplus(hand, wordList, n)\n elif not la_st2:\n print '您没有上局存档,请重新输入指令:'\n order8 = raw_input('>>>').lower()\n if order8 == 'e':\n print '游戏结束'\n break\n if not order8 in ['r','n','e'] or order8 == '':\n print '请重新输入指令:'\n order8 = raw_input('>>>').lower()", "def print_message(self):\r\n # print(\"Word : \" + game_instance.get_word())\r\n print(\"\\nCurrent guess : \"+self.current_word)\r\n print(\"\\ng:guess, t:tell me, l:letter guess, q:quit\")", "def start_game(self):\n self.code = code.get_random_num()\n self.Player1 = self.get_player(1)\n self.Player2 = self.get_player(2)\n attempt = self.Player1.make_guess()\n guess.guess_lists(attempt, self.code)\n right_answer_list = guess.return_answer()\n num_guessed_list = guess.return_player_guess()\n check.check(num_guessed_list, right_answer_list)\n attempt = self.Player2.make_guess()\n guess.guess_lists(attempt, self.code)\n right_answer_list = guess.return_answer()\n num_guessed_list = guess.return_player_guess()\n output = check.check(num_guessed_list, right_answer_list)\n play = end_game.end_game(output)\n if play == True:\n self.keep_playing()", "def intro():\n print_pause(\"Welcome to the adventure mini-game!\")\n print_pause(\"Here you'll be able to explore a small part of a\"\n \" fantasy world\")\n print_pause(\"I'm smart enough to recognize commands based on\"\n \" single words.\")\n print_pause(\"So, for instance, if one of your choices is: \")\n print_pause(\"\\033[1;32m\\\"Go to the farmhouse\\\"\\033[0m\")\n print_pause(\"All you need to do is type: \")\n print_pause(\"\\033[1;32m farmhouse \\033[0m\")\n print_pause(\"or: \")\n print_pause(\"\\033[1;32m house \\033[0m\")\n print_pause(\"or even: \")\n print_pause(\"\\033[1;32m farm \\033[0m\")\n print_pause(\"\\nYou can also get a look at your inventory at any time\"\n \" just by typing\\033[1;32m inventory \\033[0m\")\n play_now = input(\"\\nPress the \\'return\\' key to start playing.\")\n for n in range(10):\n print(\"*\")\n time.sleep(.5)\n print_pause(\"You awaken with a splitting headache in the middle\"\n \" of a field.\")\n print_pause(\"Based on the large number of empty bottles lying around you\"\n \" it must have been a heck of a party.\")\n print_pause(\"You slowly get up off the ground.\")\n clearing()", "def play_game(word_list):\n # TO DO...\n print 'Welcome to the 6.00 Word Game!'\n hand = deal_hand(HAND_SIZE) \n while True:\n choice1 = raw_input('Enter \"n\" for a new hand, \"r\" to repeat the last hand, or \"e\" to exit the game: ')\n if choice1 not in ('ren'):\n print 'invalid choice'\n continue\n if choice1 == 'e': break\n while True:\n choice2 = raw_input('Enter \"u\" to play the hand yourself or \"c\" to let the computer play the hand: ') \n if choice2 not in ('uc'):\n print 'invalid choice'\n continue\n else: break \n if choice1 == 'r':\n if choice2 == 'u': play_hand(hand, word_list)\n else: comp_play_hand(hand, word_list)\n else: \n hand= deal_hand(HAND_SIZE) \n if choice2 == 'u': play_hand(hand, word_list)\n else: comp_play_hand(hand, word_list)", "def play_game(self):\n \n# self.display_letter_prompt()\n\n if self.input_letter != None:\n if self.input_letter == self.current_prompt:\n self.correct_response()\n else:\n self.incorrect_response()\n\n self.frames_passed += 1\n\n if self.prompt_vibrated == False:\n self.vibrate_buttons()\n self.prompt_vibrated = True\n\n if self.frames_passed > (self.delay * self.fps * 0.07):\n self.vibrate_buttons()\n self.frames_passed = 0", "def play_game(word_list):\n # TO DO ...\n\n hand = deal_hand(HAND_SIZE) # random init\n\n while True:\n cmd = input('Enter n to deal a new hand, r to replay the last hand, or e to end game: ')\n\n if cmd == 'n':\n hand = deal_hand(HAND_SIZE)\n play_hand(hand.copy(), word_list)\n print()\n\n elif cmd == 'r':\n play_hand(hand.copy(), word_list)\n print()\n\n elif cmd == 'e':\n break\n\n else:\n print(\"Invalid command.\")", "def new_game(self):\n self.strikes = 0\n self.change_image()\n self.word = choice(self.WORD_LIST).upper()\n self.word_blank.set(\" _ \" * len(self.word))\n self.word_underscored = [\"_\"] * len(self.word)\n self.guessed = \"GUESSES: \"\n self.guesses.set(self.guessed)", "def check_guess(guess, secret_word):\n\n if guess == \"\" or len(guess) > 1 and len(guess) != len(secret_word) or guess.isdigit():\n message_to_player = \"YOU MUST INPUT A LETTER OR GUESS THE ENTIRE WORD.\"\n\n elif len(guess) == len(secret_word) and guess == secret_word:\n message_to_player = \"YOU GUESSED THE WORD. IT WAS \" + secret_word + \".\"\n game_won()\n\n elif secret_word != \"\" and guess in secret_word:\n message_to_player = \"THAT LETTER IS IN THE WORD! GOOD GUESS!!\"\n player.letters_guessed.append(guess)\n\n elif guess not in player.letters_guessed:\n message_to_player = \"WRONG! TRY AGAIN!\"\n\n player.add_body_part()\n player.letters_guessed.append(guess)\n\n else:\n message_to_player = \"YOU ALREADY GUESSED THAT LETTER!!!\\n\\n\"\n\n update(secret_word, \"Letters guessed:\\n\\t\" + str(player.letters_guessed) + \"\\n\\n\" + message_to_player)", "def start_game(self, entered_word):\n if entered_word not in self._word_list:\n print \"Not a word\"\n return\n \n strings = self._substrs(entered_word)\n sorted_strings = self._merge_sort(strings)\n all_strings = self._remove_duplicates(sorted_strings)\n self._subset_strings = self._intersect(self._word_list, all_strings)\n self._guessed_strings = [] \n for word in self._subset_strings:\n self._guessed_strings.append(\"*\" * len(word))\n self.enter_guess(entered_word)", "def play():\n\tprint(\"Welcome to TIC TAC TOE!\")\n\tboard, player_mark, message, turn_counter = initialize_game();\n\twhile player_mark != \"GG\":\n\t\tdisplay_game(board, message)\n\t\trow,col = get_coordinates()\n\t\tboard, player_mark, turn_counter = update_game(board, row, col, player_mark, turn_counter)\n\t\tplayer_mark, message = check_status(board, player_mark, turn_counter)\n\telse:\n\t\tdisplay_game(board, message)", "def __init__(self, root):\n with open(\"words.txt\") as fp:\n for word in fp:\n self.WORD_LIST.append(word.strip())\n\n self.root = root\n root.title(\"Hangman\")\n\n self.strikes = 0\n self.word = choice(self.WORD_LIST).upper()\n self.word_underscored = [\"_\"] * len(self.word)\n self.guess = \"\"\n self.guessed = \"GUESSES: \"\n self.photo = PhotoImage(file=\"images/hangman01.png\")\n\n # Canvas where image will be put.\n self.canvas = Canvas(root, width=600, height=500)\n self.canvas.grid(row=0, columnspan=3)\n self.canvas.create_image(340, 240, image=self.photo)\n\n # Displays word with underscores for characters.\n self.word_blank = StringVar()\n self.word_blank.set(\" _ \" * len(self.word))\n self.word_blank_label = Label(root, textvariable=self.word_blank)\n self.word_blank_label.grid(row=1, column=0, sticky=W+E)\n\n self.enter_letter_label = Label(root, text=\"ENTER LETTER: \")\n self.enter_letter_label.grid(row=1, column=1, sticky=W+E)\n\n # Entry field to accept letters.\n letterfield = root.register(self.validate)\n\n self.entry = Entry(root, validate=\"key\", validatecommand=(letterfield, \"%P\"))\n self.entry.grid(row=1, columnspan=4, column=2, sticky=W+E)\n\n # Displays the guesses that have been made\n self.guesses = StringVar()\n self.guesses.set(self.guessed)\n self.guesses_label = Label(root, textvariable=self.guesses)\n self.guesses_label.grid(row=2, column=3, sticky=W)\n\n # New game, with values reset.\n self.new = Button(root, text=\"New Game\", command=self.new_game)\n self.new.grid(row=2, column=0, sticky=W+E)\n\n self.add = Button(root, text=\"HINT\", state=DISABLED)\n self.add.grid(row=2, column=1, sticky=W+E)\n\n # Submit button, calls check_guess() function.\n self.submit = Button(root, text=\"Submit\", command=self.check_guess)\n self.submit.grid(row=2, column=2, sticky=W+E)", "def setup(self):\n setup = RandomWordGenerator().get()\n self.formatted_word = ConvertWord().convert_to_dict(setup)\n self.underscore_word = HangmanUnderscoreDiagram(\n setup).create_hidden_word()\n self.failed_guesses = 0\n print(\"Hello\")\n self.has_won = False\n self.start_game(True)", "def welcome_screen(self):\n print()\n print('*M*A*S*T*E*R*M*I*N*D*')\n print('Welcome to Mastermind!')\n print('The goal of this game is to guess the secret code.\\n' +\n 'You have as many guesses as you need.\\n' +\n 'After every guess you will see a result of that guess.\\n' +\n 'A result may look like this:\\n' +\n 'Your guess: 1,2,3,4\\n' +\n \"The result: ['1', '-', 'C', '-']\")\n print('This means the following:\\n' +\n 'The first number, 1, is in the correct position\\n' +\n 'The second number, 2, is not included in the secret code\\n' +\n 'The third number, 3,' + \n ' is in the code but is in the wrong position\\n' +\n 'The fourth number, 4, is not included in the code')\n print('When you have the correct numbers ' +\n 'in the right place, you win!\\n' +\n 'Try to beat the game in as few guesses as possible.\\n' +\n 'The first thing you will do is decide if' +\n 'you want standard or custom game.\\n' +\n 'Only the standard game can save you highscore')", "def play_game(self):\n player = Player(input(\"What is your name?\"))\n while player.health > 0:\n input(\"Press t to start another turn\")\n n = random.randint(0, 3)\n if n == 0:\n if self.monster_attack(player):\n break\n elif n == 1:\n self.find_gold(player)\n else:\n print(\"Nothing happened!\")", "def main() -> None:\n # the current game is initialized with 1, 3, 5, 7 matches on the 4 rows.\n game: List[int] = [1, 3, 5, 7]\n\n print(\"\\nGame of Nim\")\n print( \"===========\")\n display_game(game)\n start = input(\"Do you want to start? (y/n) \")\n print()\n if start==\"y\" or start==\"Y\":\n print(\"Your turn\")\n user_turn(game)\n display_game(game)\n while True:\n print(\"My turn\")\n computer_turn(game)\n display_game(game)\n if is_finished(game):\n print(\"I WON\\n\")\n break\n print(\"Your turn\")\n user_turn(game)\n display_game(game)\n if is_finished(game):\n print(\"YOU WON\\n\")\n break", "def play(verbose, no_ai):\n if verbose and no_ai:\n click.echo(\"Verbose option has no effect when no_ai option is selected!\\n\")\n click.echo(\"Welcome to the mastermind game!\")\n if no_ai:\n return run_no_ai()\n users_input = get_users_input()\n results = run(users_input, verbose)\n click.echo(\n f\"I {'won' if results['result'] else 'lost'} this game after {singular_or_plural(results['turns'],'turn')}\"\n )", "def main():\r\n print(WELCOME_MESSAGE)\r\n\r\n playing = True\r\n while playing:\r\n\r\n # Valid inputs that the user can use\r\n move_actions = (UP, DOWN, LEFT, RIGHT)\r\n other_actions = (GIVE_UP, HELP)\r\n\r\n grid_size = int(input(BOARD_SIZE_PROMPT))\r\n\r\n # Get the puzzle and its solution\r\n solution = get_game_solution(WORDS_FILE, grid_size)\r\n puzzle = shuffle_puzzle(solution)\r\n\r\n solved = check_win(puzzle, solution)\r\n print_solution_position(solution, puzzle)\r\n\r\n # Continue to loop until the puzzle is solved or the user gives up\r\n while not solved:\r\n player_action = input(DIRECTION_PROMPT)\r\n\r\n # Player move input handler\r\n # Updates the puzzle with the new board layout, if fail alert user\r\n if player_action in move_actions:\r\n move_attempt = move(puzzle, player_action)\r\n if move_attempt:\r\n puzzle = move_attempt\r\n else:\r\n print(INVALID_MOVE_FORMAT.format(player_action))\r\n\r\n # Other inputs handler\r\n elif player_action in other_actions:\r\n if player_action == GIVE_UP:\r\n break\r\n elif player_action == HELP:\r\n print(HELP_MESSAGE)\r\n\r\n # If there is no match for input, alert the user\r\n else:\r\n print(INVALID_MESSAGE)\r\n\r\n print_solution_position(solution, puzzle)\r\n solved = check_win(puzzle, solution)\r\n\r\n # Show message depending if user won or not\r\n if solved:\r\n print(WIN_MESSAGE)\r\n else:\r\n print(GIVE_UP_MESSAGE)\r\n\r\n # Check if the user wishes to play again\r\n play_again = input(PLAY_AGAIN_PROMPT)\r\n if not (play_again.lower() == \"y\" or play_again == \"\"):\r\n playing = False\r\n print(BYE)", "def play_game():\n difficulty = select_game_difficulty()\n mad_lib, answers = get_mad_lib_and_answers(difficulty)\n max_guesses = find_max_guesses()\n current_blank = 1\n while current_blank <= len(answers):\n mad_lib, current_blank = ask_question(\n mad_lib, current_blank, answers[current_blank - 1], max_guesses\n )\n if mad_lib is None:\n return False\n print(mad_lib + \"\\nYou won!\\n\")\n return True", "def try_to_guess(word):\n\n # set number of tries based on word length\n if 4 < len(word) < 7:\n tries = 4\n elif 7 < len(word) < 12:\n tries = 8\n else:\n tries = 12\n \n # create placeholder word eg: ---\n placeholder = ['-' for _ in range(len(word))]\n \n # list to check if letter was already guessed\n guesses = []\n\n while tries > 0:\n print('\\n' + ''.join(placeholder))\n letter = str(input(f\"Input a letter: \"))\n\n # only one lower case alphanum character\n if len(letter) > 1:\n print(\"You should input a single letter\")\n elif not letter.isalnum() or not letter.islower():\n print(\"It is not an ASCII lowercase letter\")\n \n elif letter in guesses:\n print(\"You already typed this letter\") \n elif letter not in word:\n print(\"No such letter in the word\")\n tries -= 1\n \n # we have a good letter\n else:\n for i, v in enumerate(word):\n \n if v == letter:\n placeholder[i] = letter\n \n if ''.join(placeholder) == word:\n print()\n print(''.join(placeholder))\n print(\"You guessed the word!\\nYou survived!\")\n return\n \n guesses.append(letter)\n \n else:\n print(\"You lost!\")\n print(f\"The word was {word}\")", "def run_application():\n show_theme_message()\n keep_playing = 'y'\n health_meter = {}\n reset_health_meter(health_meter)\n show_game_mission()\n\n while keep_playing == 'y':\n reset_health_meter(health_meter)\n play_game(health_meter)\n keep_playing = input(\"\\nPlay again? Yes(y)/No(n): \")", "def guess_word():\n words = [\"tom\", \"jerry\", \"apple\", \"banana\", \"peal\", \"water\", \"ice\", \"one\", \"tw0\", \"three\"]\n word = list(words[random.randint(0, len(words)-1)])\n print(word)\n length = len(word)\n underline = [\"_ \" for i in range(length)]\n count = 5\n while count > 0:\n # 输出当前状态\n output = \"\".join(underline)\n print(output)\n guess = input(\"guess a char(left guess %d times ):\" % (count))\n if guess in word:\n index = word.index(guess)\n underline[index] = guess\n word[index] = \" \"\n else:\n count -= 1\n # 跳出循环的条件\n if word.count(\" \") == length:\n break\n if count > 0:\n print(\"yur are right!\")\n else:\n print(\"游戏结束!你没有猜对。\")", "def main():\n print('\\n\\n\\n\\nThe Mystery Word Challenge')\n print(\"\"\"\\n\\nWhat level do you want to play?\\n\n 1 - Easy 4-6 letters\n 2 - Medium 6-8 letters\n 3 - Hard 8 + letters\n 4 - QUIT\\n\"\"\")\n\n level = input(\"Enter a number 1-4 to start:\\n\")\n\n\n # word = ''\n if level is '4':\n print(\"\\nThanks for playing the game.\\n\")\n exit()\n elif level is '1':\n word = random.choice(easy_words(word_list))\n elif level is '2':\n word = random.choice(medium_words(word_list))\n elif level is '3':\n word = random.choice(hard_words(word_list))\n\n print(\"\\n Your word is {} letters long.\\n\".format(len(word)))\n\n chances = 8\n gameplay(chances, word)", "def do_updates(self):\n if self.words.check_for_dashes():\n self.console.write(\"Great job! We are so proud of you!\")\n self.keep_playing = False\n \n if not self.good_guess:\n del self.jumper.jumper_list[0]\n if self.jumper.jumper_list[0] == \" 0 \":\n webbrowser.open(\"https://www.youtube.com/watch?v=oHg5SJYRHA0&ab_channel=cotter548\")\n self.keep_playing = False", "def playGame(wordList: List[str]) -> None:\n hand = {}\n while True:\n user_ans = input('\\nEnter n to deal a new hand, r to replay the last'\n + 'hand, or e to end game: ')\n if user_ans == 'n':\n hand = dealHand(HAND_SIZE)\n playHand(hand, wordList, HAND_SIZE)\n\n elif user_ans == 'e':\n break\n elif user_ans == 'r':\n if len(hand) == 0:\n print('You have not played a hand yet. Please play a new hand'\n + 'first!')\n else:\n playHand(hand, wordList, HAND_SIZE) \n else:\n print('Invalid command.')", "def display_letters(word, guesses):\n pass", "def start_game(attempts,sentences,answers,difficulty):\n cycle_count = 0\n least_number_of_attempts = 0;\n while cycle_count < answers_number:\n if attempts == least_number_of_attempts:\n print \"Sorry, you lose!\"\n sys.exit()\n given_answer = raw_input(sentences[difficulty]).lower()\n while given_answer == \"\":\n print \"you cant leave this field empty please write in the right answer.\"\n given_answer = raw_input(sentences[difficulty]).lower()\n if given_answer == answers[difficulty][cycle_count]:\n sentences[difficulty] = string.replace(sentences[difficulty], \"__%d__\" %(cycle_count+1) , given_answer)\n print \"Correct answer!\"\n if cycle_count == answers_number-1 :\n print \"Congratulations you won :)\"\n cycle_count += 1\n else:\n attempts -= 1\n print \"Wrong answer! Try again! you have %d attempts left\"%attempts", "def play_game(self):\r\n\r\n print('Welcome to a game of Concentration!!')\r\n if self.who_goes_first():\r\n self.user_turn()\r\n else:\r\n self.computer_turn()\r\n\r\n while True:\r\n if self.match:\r\n self.user_turn()\r\n else:\r\n self.computer_turn()\r\n self.check_game_end()", "def show_current_game_state(num_of_tries, secret_word, old_letters_guessed):\n print_hangman(num_of_tries)\n # 4.\n print_show_hidden_word_box(secret_word, old_letters_guessed)\n\n return None", "def alphabet_press(user_letter_pick):\n # Checks to see if the START NEW GAME button was successfully pressed w/o error\n if lets_begin.winfo_exists() != 1:\n messagebox.showwarning(title=\"Pump the brakes!\", message=\"It appears you've gotten a bit ahead of\\n\"\n \"yourself, though your eagerness is appreciated\\n\"\n \"please follow procedure :\\\\\")\n else:\n attempt = multiple_letters_check(user_letter_pick)\n if attempt == True: # Checks if the user entered the same letter, ignores if it's the first iteration\n global first_strike_through\n first_strike_through = True\n global past_first_iteration\n global make_to_string\n if past_first_iteration == True:\n b_label = []\n blanks = return_to_string # Just giving it a name with better context\n else:\n b_label = []\n blanks = make_to_string\n for n in blanks: # Creating a list of chars from the string of blanks\n b_label += n\n conversion = [] # used to combine the original entry into a string with \" \" in between the chars\n global original_user_entry\n original_entry_length = len(original_user_entry)\n # create a string of the original entry + \" _\"\n for h in range(original_entry_length): # create a string of the original entry + \" \" to gain the length\n conversion.append(\" \")\n conversion.append(original_user_entry[h])\n remove_end_spaces(conversion) # Removes any additional spaces from the end\n remove_double_spaces(conversion) # Removes the space (' ') before any double spaces created by newlines (\\n)\n global game_over\n global correct_guess_count\n global blank_labels\n global new_blank_labels\n good_guess = FALSE\n j = 0\n for letters in conversion: # This loop checks if the user's pick matches any of the letters from the original entry\n if letters == ' ':\n j += 1\n continue # skips an iteration\n if letters == alphabet[user_letter_pick]: # If the user guesses the correct letter\n b_label[j] = alphabet[user_letter_pick] # Assign the letter corresponding spot in the string\n make_to_string = \"\" # resets the variable\n for c in b_label: # this conversion back to a string is to make the labels look better\n make_to_string += c\n new_blank_labels = Label(root, text=make_to_string, bg=\"gray\", fg=\"white\") # Update the Blanks label\n new_blank_labels.grid(row=65, column=9, rowspan=19, columnspan=2, sticky='NESW')\n past_first_iteration = False # Causes make_to_string to hold its value\n good_guess = TRUE # The player guessed the right letter\n correct_guess_count += 1\n if correct_guess_count == length_of_chars_only: # The User wins the game!!\n # Clears this info\n make_to_string = \"\"\n correct_guess_count = 0\n b_label.clear()\n blanks = \"\"\n conversion.clear()\n j = 0\n past_first_iteration = True\n first_strike_through = False\n strike_one.clear()\n winner() # Let's the player know they won and displays the winner graphic\n j += 1\n if good_guess == FALSE: # The player guessed the wrong letter\n game_over += 1\n if game_over >= 6:\n # Clears this info\n make_to_string = \"\"\n b_label.clear()\n blanks = \"\"\n conversion.clear()\n j = 0\n past_first_iteration = True\n first_strike_through = False\n strike_one.clear()\n game_over_remove_labels(game_over) # Removes the labels based on the parameters\n if game_over >= 6:\n game_over = 0\n correct_guess_count = 0\n strike_one.append(user_letter_pick)", "def game_intro():\n print(\"\\nWould you like to play a game of Rock, Paper, Scissors? (Y/N)\\n\")\n answer = input()\n answer = answer.lower()\n # If user wants to play game\n if answer in yes:\n time.sleep(2)\n print(\"\\nGreat!\")\n print(\"\\nDo you know the rules? (Y/N)\\n\")\n answer = input()\n answer = answer.lower()\n\n if answer in yes:\n print(\"\\nGood. Let's play one round and see how it goes.\")\n time.sleep(3)\n print(\"\\nThe first round is starting now!\")\n time.sleep(1)\n first_round()\n elif answer in no:\n print(\"\\nWe play against each other.\")\n time.sleep(2)\n print(\"\\nWe must choose one of these:\")\n time.sleep(2)\n print(\"\\nROCK\")\n print(\"\\nPAPER\")\n print(\"\\nSCISSORS\")\n time.sleep(2)\n print(\"\\nRock beats Scissors beats Paper beats Rock. Very simple!\")\n time.sleep(3)\n print(\"\\nThe first round is starting now!\")\n time.sleep(1)\n first_round()\n else:\n print(\"\\nWrong answer!\\n\")\n game_intro()\n # If user doesn't want to play game they can choose to have a story or not\n elif answer in no:\n print(\"\\nOh really? I'm a bit disappointed. We can do something else.\")\n time.sleep(2)\n print(\"\\nWould you like me to tell you a story? (Y/N)\\n\")\n answer = input()\n answer = answer.lower()\n # If so then they move on to story_duck_language\n if answer in yes:\n story_duck_language()\n # If not, user has one last chance to change mind or else the app ends\n elif answer in no:\n print(\"\\nHmmm. Looks like you're not in the mood for anything.\")\n time.sleep(2)\n print(\"\\nIn which case let's finish this at once! (Y/N)\\n\")\n answer = input()\n answer = answer.lower()\n if answer in no:\n print(\"\\nGreat! Let's start again.\")\n time.sleep(2)\n game_intro()\n # A polite farewell from Duck\n elif answer in yes:\n print(\"\\nWell... It was nice meeting you.\")\n time.sleep(3)\n end()\n # At this point, Duck is a bit pissed off so doens't give user\n # a chance to replay in case of invalid input and app ends\n else:\n print(\"\\nInvalid answer. I'll take that as a no.\")\n time.sleep(3)\n end()\n # See previous comment\n else:\n print(\"\\nInvalid answer. I'll take that as a no.\")\n time.sleep(3)\n end()\n else:\n print(\"\\nTry again!\")\n time.sleep(1)\n game_intro()", "def main():\n board_state = [['_', '_', '_'],\n ['_', '_', '_'],\n ['_', '_', '_']]\n\n player_turn = int(input(\"Who goes first - select AI(0) or Human(1)? \").strip())\n human_marker = input(\"Select marker - 'X' or 'O'? \").strip()\n \n play(board_state, player_turn, human_marker, 0)", "def welcome():\n print(colored(\"Hello there \\U0001F60A, Welcome to Guess the Number game\", \"green\"))\n print(\"Game Rule -> \")\n print(\n \"In this game you have to guess a number and you will have total 5 chances to guess it correct\"\n \"\\n\"\n \"You guess should be in between 1 and 25\"\n )", "def guess(word, old_ans):\n life = N_TURNS\n while life > 0:\n guess_ch = input('Your guess: ')\n guess_ch = guess_ch.upper()\n if guess_ch.isalpha() != True or len(guess_ch) != 1:\n print('Illegal format.')\n else:\n ans = ''\n if word.find(guess_ch) == -1:\n # when user doesn't find the right character\n print('There is no ' + guess_ch + \"'s in the word.\")\n life -= 1\n life = life\n for ch in word:\n if ch == guess_ch:\n ans += ch\n else:\n ans += '-'\n else:\n # when user make a correct guess that find out the right character of the word\n print('You are correct!')\n for ch in word:\n if ch != guess_ch:\n ans += '-'\n else:\n ans += guess_ch\n new_ans = ''\n for i in range(len(old_ans)):\n # to keep the previous right guess' result\n ch = old_ans[i]\n if ch.isalpha():\n new_ans += ch\n elif ch != ans[i]:\n new_ans += guess_ch\n else:\n new_ans += ch\n old_ans = new_ans\n if old_ans.isalpha():\n # when the user find all characters of the random word ans still alive\n print('You win!!')\n print('The word was: '+word)\n break\n else:\n if life > 0:\n print('The word looks like '+old_ans)\n print('You have '+str(life)+' guesses left.')\n # when the user make wrong guesses and finish all his/her guess opportunities\n if life == 0:\n print('You are completely hung : (')\n print('The word was: '+word)", "def play_game(health_meter):\n huts = occupy_huts()\n idx = process_user_choice()\n reveal_occupants(idx, huts)\n\n if huts[idx - 1] != 'enemy':\n print_bold(\"恭喜! 你赢了!!!\")\n else:\n print_bold('发现敌人! ', end='')\n show_health(health_meter, bold=True)\n continue_attack = True\n\n # Loop that actually runs the combat if user wants to attack\n while continue_attack:\n continue_attack = input(\".......继续战斗? (y/n): \")\n if continue_attack == 'n':\n print_bold(\"敌我状态如下...\")\n show_health(health_meter, bold=True)\n print_bold(\"GAME OVER!\")\n break\n\n attack(health_meter)\n\n # Check if either one of the opponents is defeated\n if health_meter['enemy'] <= 0:\n print_bold(\"幸运的家伙,你赢的了胜利女神的光顾!\")\n break\n\n if health_meter['player'] <= 0:\n print_bold(\"你输了,快逃,下次继续吧\")\n break", "def bye_bye():\n\n os.system(\"clear\")\n print(\"Your opponent was too strong...\")\n time.sleep(1)\n print(\"You've should be better prepared...\")\n time.sleep(2)\n print()\n print()\n for i in range(20):\n os.system(\"clear\")\n print(\"\"\"\n * ) ( \n ( ( ( ` ( /( )\\ ) \n )\\ ) )\\ )\\))( ( )\\())( ( ( (()/( \n (()/( ((((_)( ((_)()\\ )\\ ((_)\\ )\\ )\\ )\\ /(_)) \n /(_))_)\\ _ )\\(_()((_|(_) ((_|(_)((_|(_)(_)) \n (_)) __(_)_\\(_) \\/ | __| / _ \\ \\ / /| __| _ \\ \n | (_ |/ _ \\ | |\\/| | _| | (_) \\ V / | _|| / \n \\___/_/ \\_\\|_| |_|___| \\___/ \\_/ |___|_|_\\ \"\"\")\n time.sleep(0.05)\n os.system(\"clear\")\n print(\"\"\"\n * ) ( \n ( ( ( ` ( /( )\\ ) \n )\\ ) )\\ )\\))( ( )\\())( ( ( (()/( \n (()/( ((((_)( ((_)()\\ )\\ ((_)\\ )\\ )\\ )\\ /(_)) \n /(_))_)\\ _ )\\(_()((_|(_) ((_|(_)((_|(_)(_)) \n (_)) __(_)_\\(_) \\/ | __| / _ \\ \\ / /| __| _ \\ \n | (_ |/ _ \\ | |\\/| | _| | (_) \\ V / | _|| / \n \\___/_/ \\_\\|_| |_|___| \\___/ \\_/ |___|_|_\\ \"\"\")\n time.sleep(0.05)\n os.system(\"clear\")\n print(\"\"\"\n * ) ( \n ( ( ( ` ( /( )\\ ) \n )\\ ) )\\ )\\))( ( )\\())( ( ( (()/( \n (()/( ((((_)( ((_)()\\ )\\ ((_)\\ )\\ )\\ )\\ /(_)) \n /(_))_)\\ _ )\\(_()((_|(_) ((_|(_)((_|(_)(_)) \n (_)) __(_)_\\(_) \\/ | __| / _ \\ \\ / /| __| _ \\ \n | (_ |/ _ \\ | |\\/| | _| | (_) \\ V / | _|| / \n \\___/_/ \\_\\|_| |_|___| \\___/ \\_/ |___|_|_\\ \"\"\")\n os.system(\"clear\")\n time.sleep(0.05)\n print(\"\"\"\n * ) ( \n ( ( ( ` ( /( )\\ ) \n )\\ ) )\\ )\\))( ( )\\())( ( ( (()/( \n (()/( ((((_)( ((_)()\\ )\\ ((_)\\ )\\ )\\ )\\ /(_)) \n /(_))_)\\ _ )\\(_()((_|(_) ((_|(_)((_|(_)(_)) \n (_)) __(_)_\\(_) \\/ | __| / _ \\ \\ / /| __| _ \\ \n | (_ |/ _ \\ | |\\/| | _| | (_) \\ V / | _|| / \n \\___/_/ \\_\\|_| |_|___| \\___/ \\_/ |___|_|_\\ \"\"\")\n os.system(\"clear\")\n time.sleep(0.05)\n print(\"\"\"\n * ) ( \n ( ( ( ` ( /( )\\ ) \n )\\ ) )\\ )\\))( ( )\\())( ( ( (()/( \n (()/( ((((_)( ((_)()\\ )\\ ((_)\\ )\\ )\\ )\\ /(_)) \n /(_))_)\\ _ )\\(_()((_|(_) ((_|(_)((_|(_)(_)) \n (_)) __(_)_\\(_) \\/ | __| / _ \\ \\ / /| __| _ \\ \n | (_ |/ _ \\ | |\\/| | _| | (_) \\ V / | _|| / \n \\___/_/ \\_\\|_| |_|___| \\___/ \\_/ |___|_|_\\ \"\"\")\n time.sleep(0.05)", "def main():\r\n clean()\r\n h_choice = '2' # \r\n c_choice = '1' # \r\n first = '' # if human is the first\r\n\r\n # Human may starts first\r\n clean()\r\n while first != 'Y' and first != 'N':\r\n try:\r\n print(\" $$\\ $$\\ $$$$$$\\ $$$$$$$\\ $$$$$$$\\ $$$$$$$$\\ $$$$$$$\\ $$$$$$\\ \") \r\n print(\" $$ | $$ |$$ __$$\\ $$ __$$\\ $$ __$$\\ $$ _____|$$ __$$\\ $$ __$$\\ \")\r\n print(\" $$ | $$ |$$ / $$ |$$ | $$ |$$ | $$ |$$ | $$ | $$ |$$ / \\__|\")\r\n print(\" $$$$$$$$ |$$ | $$ |$$$$$$$ |$$$$$$$ |$$$$$\\ $$$$$$$ |\\$$$$$$\\ \")\r\n print(\" $$ __$$ |$$ | $$ |$$ ____/ $$ ____/ $$ __| $$ __$$< \\____$$\\ \")\r\n print(\" $$ | $$ |$$ | $$ |$$ | $$ | $$ | $$ | $$ |$$\\ $$ |\")\r\n print(\" $$ | $$ | $$$$$$ |$$ | $$ | $$$$$$$$\\ $$ | $$ |\\$$$$$$ |\")\r\n print(\" \\__| \\__| \\______/ \\__| \\__| \\________|\\__| \\__| \\______/ \") \r\n \r\n first = input('First to start?[y/n]: ').upper()\r\n except (EOFError, KeyboardInterrupt):\r\n print('Bye')\r\n exit()\r\n except (KeyError, ValueError):\r\n print('Bad choice')\r\n\r\n # Main loop of this game\r\n while len(empty_cells(board)) > 0 and not game_over(board):\r\n \r\n if first == 'N':\r\n print(\"Step\")\r\n xi = int (input(\"Initial row COMP(0-9): \"))\r\n yi = int (input(\"Initial column COMP(0-9): \"))\r\n ai_turn(c_choice, h_choice, xi, yi)\r\n first = ''\r\n render(board, c_choice, h_choice)\r\n print(\"Hope\")\r\n xi = int (input(\"Initial row HUMAN(0-9): \"))\r\n yi = int (input(\"Initial column HUMAN(0-9): \"))\r\n human_turn(c_choice, h_choice,xi,yi)\r\n render(board, c_choice, h_choice)\r\n xi = int (input(\"Initial row COMP(0-9): \"))\r\n yi = int (input(\"Initial column COMP(0-9): \"))\r\n ai_turn(c_choice, h_choice, xi, yi)\r\n\r\n # Game over message\r\n if wins(board, HUMAN):\r\n clean()\r\n print(f'Human turn [{h_choice}]')\r\n render(board, c_choice, h_choice)\r\n print('YOU WIN!')\r\n elif wins(board, COMP):\r\n clean()\r\n print(f'Computer turn [{c_choice}]')\r\n render(board, c_choice, h_choice)\r\n print('YOU LOSE!')\r\n else:\r\n clean()\r\n render(board, c_choice, h_choice)\r\n print('DRAW!')\r\n\r\n exit()", "def new_game():\n # Prints the welcome message to the terminal\n welcome_message()\n # Gets the players name\n player_name = name_input()\n # Creates the players game board\n player_board = GameBoard(player_name, 'player')\n # Creates the players guess board\n user_guess = GameBoard('GUESS', 'user guess')\n # Creates the computers board\n computer_board = GameBoard(\"COMPUTER's\", 'computer')\n # Creates the computers guess board\n computer_guess = GameBoard('COMPUTER GUESS', 'computer guess')\n # Randomly places the computers ships on their board\n computer_board.place_ships()\n # Prints the players board to the terminal for reference\n player_board.print_board()\n # Allows the player to place their ships\n player_board.place_ships()\n time.sleep(2)\n # Prints the players guess board to terminal for reference\n print(PHASE)\n print(' ')\n # Takes turns attacking until winner\n run_game(player_board, user_guess, computer_board, computer_guess)\n # Asks the player if they want to play again or quit\n play_again()", "def start():\r\n introduction()\r\n score = duck_shooting1()\r\n dogs()\r\n play_again(score)", "def play_command(update,context):\n update.message.reply_text('Rkrt: Welcome on board. Let\\'s see if you are worth the challenge. To find the invite code and land on planet hackazon you will need to solve this first. Ready for a ride?!')\n time.sleep(5)\n update.message.reply_text('Mx: During intergalactical travel, time does not matter. Any enemies could be listening in at any time. This is why the crew is sometimes forced to used coded languages to exchange messages between vessels. To decrypt messages every crew member can use the key on their hardware tokens.')\n time.sleep(10)\n update.message.reply_text('Jms: Mx we are getting a distress signal from vessel Vigenere. Do you copy?')\n time.sleep(3)\n update.message.reply_text('Mx: [gasps...]')\n time.sleep(1)\n update.message.reply_text('Mx: This one is for you rookie... See you on the other side.')\n update.message.reply_text('Kyjkda kghc tir Yeevobyj: BgXfsGofrCyrDouwfh\\r\\nUsfcfqg zb dywzv lcfy ij cqff hsnal jjoa:\\r\\nCKJ{en55td2my6jse8361a427p3xf319tf12}')", "def guess_word(self):\r\n guess = input(\"# Guess the Word :\")\r\n if not guess:\r\n print(\"Please enter a valid word.\")\r\n else:\r\n if game_instance.check_word(guess):\r\n print(\"Correct! You did it Champ!\")\r\n game_instance.calculate_score(self.frequency)\r\n self.instances.append(game_instance)\r\n obj.create_new_game()\r\n else:\r\n print(\"Wrong Guess. Try Again!\")", "def play(self):\n self.player = Knight()\n self._occupy_huts()\n acquired_hut_counter = 0\n\n self.show_game_mission()\n self.player.show_health(bold=True)\n\n while acquired_hut_counter < 5:\n idx = self._process_user_choice()\n self.player.acquire_hut(self.huts[idx-1])\n\n if self.player.health_meter <= 0:\n print_bold(\"YOU LOSE :( Better luck next time\")\n break\n\n if self.huts[idx-1].is_acquired:\n acquired_hut_counter += 1\n\n if acquired_hut_counter == 5:\n print_bold(\"Congratulations! YOU WIN!!!\")", "def play_game():\n # let the user select her levle\n level = raw_input(\"\"\"\n Please select a game difficulty by typing it in!\n Possible choices include easy, medium, and hard.\n \"\"\")\n print \"You've chosen %s!\\n\" %(level)\n print \"You will get %s guesses per problem\\n\" %(number_of_guess)\n\n quiz_and_answer = quiz_and_answer_list[level]\n quiz, answer = quiz_and_answer[0], quiz_and_answer[1]\n\n # iterate through the blanks.\n for index, value in enumerate(answer):\n if index != len(answer) - 1:\n print \"The current paragraph reads as such:\\n\"\n print quiz\n guess = raw_input(\"What should be substituted in for __%s__?\" %(index + 1))\n quiz = guess_until_right(index, value, guess, quiz)\n if index == len(answer) - 1:\n print quiz\n print \"You won!\"\n else:\n print \"Correct!\\n\"", "async def numguess(self, ctx):\r\n if await bMsg(ctx,ctx.message.author.name,client):\r\n return\r\n logger.info('Games.numguess', extra={'invoker': ctx.message.author.name})\r\n guess = None\r\n limDn = 0\r\n limUp = 100\r\n tries = 7\r\n secret = random.randint(1, 100)\r\n await ctx.send(\"\"\"Arr! I'm the Dread Pirate Roberts, and I have a secret!\r\nIt's a number from {} to {}. I'll give you {} tries.\r\nSend a number to guess it.\"\"\".format(limDn, limUp, tries))\r\n while guess != secret and tries > 0:\r\n await ctx.send(\"What's yer guess, matey?\")\r\n result = ''\r\n guess = await ctx.bot.wait_for('message',\r\n check=lambda m: m.channel == ctx.channel and re.match('[0-9]+', m.content))\r\n guess = int(guess.content)\r\n if guess == secret:\r\n break\r\n elif guess < limDn or guess > limUp:\r\n result += \"Out of range, ye swab!\\n\"\r\n elif guess < secret:\r\n result += \"Too low, ye scurvy dog!\\n\"\r\n limDn = guess\r\n elif guess > secret:\r\n result += \"Too high, landlubber!\\n\"\r\n limUp = guess\r\n tries -= 1\r\n result += \"Yer range is {} to {}; ye have {} tries left.\".format(limDn, limUp, tries)\r\n await ctx.send(result)\r\n if guess == secret:\r\n await ctx.send(\"Avast! Ye got it! Found my secret, ye did! With {} tries left!\".format(tries))\r\n else:\r\n await ctx.send(\"No more tries, matey! Better luck next time! The secret number was {}.\".format(secret))", "def talk_m10_23_7420():\n \"\"\"State 0: Start state\"\"\"\n while Loop('mainloop'):\n \"\"\"State 1: Conversation: Start\"\"\"\n if IsGuest() != 0:\n break\n else:\n pass\n \"\"\"State 3: [Lib] Event: Branch_SubState\"\"\"\n call = talk_m10_23_x9(z43=103651, z44=104150, z45=123020111)\n if call.Get() == 1:\n \"\"\"State 4: [Lib] Reunion hostility_SubState\"\"\"\n # talk:74205010:\"Don't you ever give up!\"\n call = talk_m10_23_x3(text9=74205010, z30=0, z31=20, z32=80)\n if call.Done():\n \"\"\"State 8: [Lib] Hostile state_SubState\"\"\"\n Label('L0')\n # talk:74205060:\"Damn! This isn't right!\"\n call = talk_m10_23_x6(z41=123020112, text16=74205060, text17=74205060, z42=74205060)\n if (HpValue() > 1) != 1:\n pass\n elif KilledPlayer() != 0:\n \"\"\"State 5: [Lib] Killing state_SubState\"\"\"\n Label('L1')\n # talk:74205020:\"Next time, you think before you pick a fight!\\n\"\n talk_m10_23_x8(text18=74205020, z46=0)\n Quit()\n elif (HpValue() > 1) != 1:\n pass\n elif KilledPlayer() != 0:\n Goto('L1')\n elif call.Get() == 0:\n while True:\n \"\"\"State 10: Wandering Warrior: Conversation_SubState\"\"\"\n call = talk_m10_23_x45()\n if call.Done():\n Continue('mainloop')\n elif (HpValue() > 1) != 1:\n break\n elif KilledPlayer() != 0:\n Goto('L1')\n elif (NumberOfTimesDamaged(1) > 3) != 0:\n \"\"\"State 7: [Lib] Hostile waiting_SubState\"\"\"\n Label('L2')\n # talk:74205030:\"Whoah!\", talk:74205040:\"Hey, watch it there!\", talk:74205050:\"Why, you!\"\n call = talk_m10_23_x5(text20=74205030, text21=74205040, text22=74205050, text23=74205030,\n z48=123020115, z49=123020116)\n if call.Done():\n pass\n elif (HpValue() > 1) != 1:\n break\n elif KilledPlayer() != 0:\n Goto('L1')\n elif (NumberOfTimesDamaged(1) > 3) != 0:\n \"\"\"State 9: [Lib] First adversification_SubState\"\"\"\n # talk:74205000:\"Do you want some? Fine enough!\"\n call = talk_m10_23_x4(z50=103650, text24=74205000, z51=0, z52=103651)\n if call.Done():\n Goto('L0')\n elif (HpValue() > 1) != 1:\n break\n elif KilledPlayer() != 0:\n Goto('L1')\n elif (NumberOfTimesDamaged(1) > 2) != 0 and GetEventFlag(123020116) != 1:\n Goto('L2')\n elif (NumberOfTimesDamaged(1) > 1) != 0 and GetEventFlag(123020115) != 1:\n Goto('L2')\n \"\"\"State 6: [Lib] Death state_SubState\"\"\"\n # talk:74205070:\"Aiiieegh!\"\n talk_m10_23_x7(text19=74205070, z47=0)\n Quit()\n \"\"\"State 2: Conversation: System: End\"\"\"\n EndMachine()", "def display_state(self,tries : int,word_completion_state : list) -> None: \n print(f\"{'-' * 20}HANGMAN{ '-' * 20}\\n\\n\")\n print(self.hangman_states[-(tries+1)] + \"\\n\")\n print(f\"WORD ------> {' '.join(word_completion_state)}\")\n print(f\"Tries Remaining : {tries}\")", "def run_game(self) -> None:\n decision = 0\n if self._initial:\n self._initial = False\n while decision != 1:\n try:\n display_no_combat_init(self.hero)\n decision = get_user_input([1, 2, -1])\n if decision == -1:\n self._quit()\n elif decision == 2:\n self._show_bag()\n else:\n break\n except KeyboardInterrupt:\n print(\"[!] If you want to quit, use the provided user interface\")\n\n while not self.hero.is_dead:\n try:\n self._load_map()\n except KeyboardInterrupt:\n print(\"[!] If you want to quit, use the provided user interface\")", "def print_title():\r\n HANGMAN_ASCII_ART = \"\"\"welcome to the game hangman\r\n _ _ \r\n | | | | \r\n | |__| | __ _ _ __ __ _ _ __ ___ __ _ _ __ \r\n | __ |/ _` | '_ \\ / _` | '_ ` _ \\ / _` | '_ \\ \r\n | | | | (_| | | | | (_| | | | | | | (_| | | | |\r\n |_| |_|\\__,_|_| |_|\\__, |_| |_| |_|\\__,_|_| |_|\r\n __/ | \r\n |___/\r\n\"\"\"\r\n print(HANGMAN_ASCII_ART)", "def start_new_game(word, max_tries):\n\n # replace the pass statement with your code\n pass", "def play_game(health_meter):\n huts = occupy_huts()\n idx = process_user_choice()\n reveal_occupants(idx, huts)\n\n if huts[idx - 1] != 'neprijatelj':\n print_bold(\"Čestitke! POBIJEDIO SI!!!\")\n else:\n print_bold('neprijatelj PRIMJEĆEN! ', end='')\n show_health(health_meter, bold=True)\n continue_attack = True\n\n # Ponavljaj dok korisnik zeli i dalje napadati\n while continue_attack:\n continue_attack = input(\".......nastavi napad? (d/n): \")\n if continue_attack == 'n':\n print_bold(\"BJEŽIM s stanjem zdravlja ...\")\n show_health(health_meter, bold=True)\n print_bold(\"KRAJ IGRE!\")\n break\n\n attack(health_meter)\n\n # provjeri je li netko boraca poginuo\n if health_meter['neprijatelj'] <= 0:\n print_bold(\"neprijatelj poražen! POBIJEDIO SI!!!\")\n break\n\n if health_meter['igrac'] <= 0:\n print_bold(\"IZGUBIO SI :( više sreće sljedeći puta\")\n break", "def welcome_screen(self):\n print()\n print('P*O*K*E*R')\n print('Welcome to a 5-card poker game,\\n' +\n 'The goal is the get a better hand than the AI.')\n print('To do this you get one chance to swap cards' +\n 'that are in your hand')\n print('You swap like this:\\n' +\n '1. Choose how many cards you want to swap\\n' +\n '2. Write the number of the card(s) you want to swap, like this:\\n' +\n 'If you want to swap card 2, type in 2.\\n' +\n 'If you want to swap card 1 and 4, type 1,4')\n print('Next both your and AI hand is shown,\\n' +\n 'and the winner is declared.')\n print('For information on what hand beats what, \\n' +\n 'and what happens when both players have an equally good hand,\\n' +\n 'please follow the link below:\\n' +\n 'https://github.com/oljung/portfolio-project-three\\n' +\n 'NOTE! Ctrl + c will terminate the app, use right click to copy')\n message = 'Would you like to play a round? Y(es) or N(o): '\n answer = InputHandler.input_bool(message)\n if answer:\n self.run_game()", "def game_start():\n # Greets user, prompts for name, and asks if they are ready\n input(\"Hello there, welcome to the number guessing game! \" \n \"What is your name? \\n \\n\") \n print(\"\\nAre you ready to play?\") \n\n # Uses .upper() so that the input is not case sensitive\n game_choice = input(\"Enter 'YES' to start the game, \" \n \"or enter 'NO' to quit. \\t\").upper() \n game_loop = True # Intialized as true to make game_loop repeat\n\n while game_loop: # Repeatedly prompts user with different game choices\n\n console_clear(1) # Clear console with 1 second delay\n\n # If user enters \"YES\", the game starts using the game_number function\n # below. If user enters \"NO\", they are prompted to system exit. If \n # \"YES\" or \"NO\" are not entered, the greeting starts over\n if game_choice == \"YES\": \n console_clear(1) \n game_number()\n\n elif game_choice == \"NO\": \n console_clear(1) \n sys.exit()\n\n else: \n game_start()" ]
[ "0.8373847", "0.8304962", "0.82357603", "0.8165628", "0.7786699", "0.7716054", "0.7695237", "0.7678341", "0.7639744", "0.7586323", "0.7568711", "0.75466156", "0.7424747", "0.7330501", "0.7276859", "0.72238797", "0.72146386", "0.71630037", "0.7157477", "0.714775", "0.7065392", "0.70633227", "0.6968037", "0.69526345", "0.68947893", "0.68546313", "0.6826209", "0.6787455", "0.67827094", "0.6752921", "0.672071", "0.6698165", "0.66955197", "0.66760534", "0.6588238", "0.65755427", "0.6572916", "0.65432835", "0.65366733", "0.65352106", "0.6441891", "0.64343715", "0.6433636", "0.6418925", "0.6364993", "0.63358927", "0.6328463", "0.6322312", "0.6308789", "0.63071036", "0.6284708", "0.62840575", "0.62771595", "0.62683696", "0.62396306", "0.62197363", "0.6213928", "0.621224", "0.62067175", "0.6204931", "0.62029314", "0.61987525", "0.61983025", "0.61928433", "0.6182538", "0.6173529", "0.617234", "0.6167275", "0.6158632", "0.6158501", "0.6155179", "0.6141798", "0.6138995", "0.6138067", "0.6105282", "0.6104817", "0.6104013", "0.6102428", "0.61019486", "0.6100786", "0.60936123", "0.60904276", "0.60831106", "0.60684305", "0.6046703", "0.6045471", "0.6039851", "0.60333455", "0.6031738", "0.6016167", "0.6013507", "0.60108984", "0.6010698", "0.6005925", "0.6005323", "0.6002641", "0.5999977", "0.5991346", "0.59862316", "0.59824735" ]
0.77418953
5
Initialize the view provider
def __init__(self, obj): obj.Proxy = self
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_view(self):\n self.view_map = self.ctx.clientmap", "def initialize(self, view, request, *args, **kwargs):\n view.request = request\n view.args = args\n view.kwargs = kwargs\n return view", "def setUp(self):\n self.theView = View()", "def initView(self):\n return {}", "def __init__( viewname, view ):", "def setup_view(self, view, request, *args, **kwargs):\n view.request = request\n view.args = args\n view.kwargs = kwargs\n return view", "def __init__(self, view, model):\n self.view = view\n self.view.set_controller(self)\n self.model = model", "def viewer_setup(self):\n pass", "def viewer_setup(self):\n pass", "def __init__(self, **kwargs):\n \n # init base\n super().__init__(**kwargs)\n self.name = \"Pero\"\n \n # init view\n self._view = None", "def on_show_view(self):\n self.setup()", "def on_show_view(self):\n self.setup()", "def on_show_view(self):\n self.setup()", "def get_view(self, request=None, args=None, kwargs=None, **initkwargs):\n view = self.view_class(**initkwargs)\n view.setup(request, *(args or ()), **(kwargs or {}))\n return view", "def setup_view(view, request, *args, **kwargs):\n\n view.request = request\n view.args = args\n view.kwargs = kwargs\n return view", "def test_init(self):\n self.view.__init__()\n self.assertIsInstance(self.view.questionnaire, Questionnaire)\n self.assertEqual(self.view.questionnaire, self.questionnaire)", "def __init__(self):\n\t\tself.satchmo_variation_manager = adminviews.variation_manager\n\t\tadminviews.variation_list = self.variation_list\n\t\tadminviews.variation_manager = self.variation_manager\n\t\t# listeners.add_toolbar_context = self.add_my_toolbar_context#(listeners.add_toolbar_context)", "def __init__(self):\n self.view = GuiView(self)\n return", "def init_context_data(self):\n pass", "def __init__(self):\n self.match_views = MatchView.MatchView()", "def initialize(self, context):\r\n pass", "def initialize(self, context):\n pass", "def initialize(self, context):\n pass", "def initialize(self, context):\n pass", "def __init__(self):\n super(RouteLayer, self).__init__()\n\n routes = [(\"^/ping\", views.ping),\n (\"^/e(co)?(?P<eco_message>[^$]+)$\", views.echo),\n (\"^/p(iada)?\\s*$\", views.get_piada)]\n\n routes.extend(MediaViews(self).routes)\n routes.extend(StaticViews(self).routes)\n # routes.extend(GroupAdminViews(self).routes)\n\n self.views = [(re.compile(pattern), callback) for pattern, callback in routes]", "def setup_provider(self):\n pass", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def initialize(context):", "def __init__(self, parent: View):\n super().__init__(parent)", "def setup_view(view, request=None, *args, **kwargs):\n view.request = request\n view.args = args\n view.kwargs = kwargs\n return view", "def __init__(self, module_manager):\n self._module_manager = module_manager\n\n \n self._config = DefaultConfigClass()\n\n # modules should toggle this variable to True once they have\n # initialised and shown their view once.\n self.view_initialised = False", "def setup_view(view, request, *args, **kwargs):\n\n view.request = request\n view.args = args\n view.kwargs = kwargs\n setattr(request, \"session\", \"session\")\n messages = FallbackStorage(request)\n setattr(request, \"_messages\", messages)\n return view", "def __init__(self, render_widget: QtWidgets.QWidget, parent=None, **kwargs):\n super(DebugMenuProviderMixin, self).__init__(render_widget=render_widget, parent=parent, **kwargs)\n # Setup member variables:\n self.DebugMenuProviderMixin_on_init()\n self.DebugMenuProviderMixin_on_setup()\n self.DebugMenuProviderMixin_on_buildUI()", "def init_layout(self):\n pass", "def initialize_default(self):\n self.initialize_navigation()\n self.initialize_viewport()", "def viewfactory(self):\n raise NotImplementedError()", "def init_widget(self):", "def initializeUI(self):\n self.setGeometry(100, 100, 450, 300)\n self.setWindowTitle('Model and View Example')\n\n self.setupModelView()\n\n self.show()", "def __init__(self):\n self.model = self.load_model()\n self.form_html = self.create_form_html()", "def __init__(self, view):\n self.view = view\n if self.view.num_images_display == 1: # single image\n sizes = [12, 12, 12, 12]\n elif self.view.num_images_display == 2: # card\n sizes = [6, 12, 12, 12]\n else: # gallery\n sizes = [3, 6, 6, 6]\n self.div_class = ('col-lg-%s col-md-%s col-sm-%s col-xm-%s' %\n (sizes[0], sizes[1], sizes[2], sizes[3]))", "def InitView(self):\n\n self._DisablePlotterOptions()\n\n if (self.localModel.datasetCols == 2):\n self.view.Enable2DRadio()\n\n self.Radio2DClicked(True)\n\n if (self.localModel.datasetCols >= 3):\n self.view.Enable2DRadio()\n self.view.Enable3DRadio()\n self.view.Set3DSelected()\n\n self.Radio3DClicked(True)", "def __init__(self):\r\n super().__init__()\r\n self.init_ui()", "def views(self):\r\n return Views(self)", "def initialize(context):\n\n pass", "def init_widget(self):\n super(UiKitTextView, self).init_widget()\n self.init_text()", "def init_with_context(self, context):\n pass", "def initializeUI(self):\n self.setStyleSheet(abstyle)\n self.setGeometry(140, 100, 860, 484)\n self.setWindowTitle('Emotions Data View')\n self.setupModelView()", "def initialize(self, context):\n raise NotImplementedError", "def init_ui(self):\n raise NotImplementedError", "def init_ui(self):\n raise NotImplementedError", "def __init__(self, context):\n super(InitState, self).__init__(context)", "def my_view(cls):\n return cls.__my_view", "def _init(self):\n pass", "def __init__(self, auth):\n super(Socrata, self).__init__(auth)\n self.views = Views(auth)\n self.sources = Sources(auth)\n self.configs = Configs(auth)", "def __init__(self):\n self.tasks = []\n self.page_context = PageContext()", "def _connectView(self):\n self._view.select_asset = self.select_asset\n self._view.add_assets = self.add_assets\n self._view.remove_assets = self.remove_assets\n self._view.update_assets = self.update_assets\n self._view.commit = self.commit", "def _init_display(self):\n raise NotImplementedError", "def _initialize(self):\n self._frame = ttk.Frame(master=self._root)\n self._ingredients_frame = ttk.Frame(master=self._frame)\n\n self._create_header()\n self._show_ingredient_list()\n self._create_footer()\n\n self._ingredients_frame.grid(row=1, column=1, columnspan=2)\n self._frame.grid_columnconfigure(1, weight=1, minsize=250)", "def __init__(self, views=None, page_load=None, mboxes=None, metrics=None):\n\n self._views = None\n self._page_load = None\n self._mboxes = None\n self._metrics = None\n self.discriminator = None\n\n if views is not None:\n self.views = views\n if page_load is not None:\n self.page_load = page_load\n if mboxes is not None:\n self.mboxes = mboxes\n if metrics is not None:\n self.metrics = metrics", "def __init__(self, **kwargs):\r\n\r\n self.def_ms = modulestore()\r\n self.is_using_mongo = True\r\n if isinstance(self.def_ms, XMLModuleStore):\r\n self.is_using_mongo = False\r\n self.msg = u''\r\n self.datatable = []\r\n super(SysadminDashboardView, self).__init__(**kwargs)", "def on_load(self):\n self.__init__()", "def view_init(self, elev=None, azim=None):\n\n self.dist = 10\n\n if elev is None:\n self.elev = self.initial_elev\n else:\n self.elev = elev\n\n if azim is None:\n self.azim = self.initial_azim\n else:\n self.azim = azim", "def setUp(self) -> None:\n self.serializer = EmployeeSerializer\n self.view = EmployeeDetailView()", "def views(self, views):\n\n self._views = views", "def initialize(self, config, context):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def __init__(self, params=None):\n\n rights = gci_access.GCIChecker(params)\n rights['any_access'] = ['allow']\n\n new_params = {}\n new_params['logic'] = soc.modules.gci.logic.models.student_ranking.logic\n new_params['rights'] = rights\n\n new_params['name'] = \"Student Ranking\"\n new_params['module_name'] = \"student_ranking\"\n new_params['sidebar_grouping'] = 'Student Rankings'\n\n new_params['module_package'] = 'soc.modules.gci.views.models'\n new_params['url_name'] = 'gci/student_ranking'\n\n new_params['scope_view'] = gci_program_view\n\n patterns = []\n patterns += [\n (r'^%(url_name)s/(?P<access_type>show_details)/%(key_fields)s$',\n '%(module_package)s.%(module_name)s.show_details',\n 'Show ranking details.'),\n ]\n\n new_params['extra_django_patterns'] = patterns\n\n params = dicts.merge(params, new_params, sub_merge=True)\n\n super(View, self).__init__(params=params)", "def _initialize(self, **kwargs):\n return None", "def initialise(self, **kwargs):\n pass", "def __init__(self):\n self.context={}", "def __init__(self):\n self.context={}", "def __init__(self):\n self.context={}", "def __init__(self):\n self.context={}", "def __init__(self):\n self.context={}", "def __init__(self):\n self.context={}", "def __init__(self, parent):\n self.name = \"Base.View\"\n self.parent = parent\n self.Main = parent.Main", "def __init__(self):\n self._context = {}" ]
[ "0.74448436", "0.70989877", "0.69125175", "0.67591083", "0.66585654", "0.653166", "0.6484332", "0.63898385", "0.63898385", "0.6375199", "0.63615674", "0.63615674", "0.63615674", "0.6273222", "0.62673444", "0.6202396", "0.61920524", "0.6179315", "0.6178323", "0.61032915", "0.6096475", "0.6004463", "0.6004463", "0.6004463", "0.59580266", "0.5955447", "0.59527403", "0.59527403", "0.59527403", "0.59527403", "0.59527403", "0.59527403", "0.59527403", "0.59527403", "0.59527403", "0.59527403", "0.59527403", "0.59527403", "0.59527403", "0.59527403", "0.59527403", "0.59527403", "0.59527403", "0.59527403", "0.59527403", "0.59250873", "0.5919755", "0.58924294", "0.5872437", "0.5847663", "0.58424145", "0.58042", "0.5803724", "0.5779311", "0.5778812", "0.5762985", "0.5739143", "0.5728299", "0.5696558", "0.5690921", "0.5685531", "0.56771433", "0.5667639", "0.5634254", "0.56251633", "0.5623398", "0.5623398", "0.5621704", "0.5621487", "0.55967826", "0.55925274", "0.55779934", "0.5570985", "0.55707663", "0.5558035", "0.5557802", "0.554855", "0.5544782", "0.5543878", "0.5537205", "0.55299765", "0.552461", "0.55180967", "0.55180967", "0.55180967", "0.55180967", "0.55180967", "0.55180967", "0.55180967", "0.55180967", "0.55112857", "0.5509462", "0.55091536", "0.5502934", "0.5502934", "0.5502934", "0.5502934", "0.5502934", "0.5502934", "0.5500815", "0.5494013" ]
0.0
-1
View provider scene graph initialization
def attach(self, obj): self.Object = obj.Object
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_view(self):\n self.view_map = self.ctx.clientmap", "def viewer_setup(self):\n pass", "def viewer_setup(self):\n pass", "def __init__(self, scene: Scene):\n self.scene = scene", "def __init__(self, scene): # type: (Scene) -> None\n self.scene = scene", "def on_show_view(self):\n self.setup()", "def on_show_view(self):\n self.setup()", "def on_show_view(self):\n self.setup()", "def __init__(self):\n super().__init__()\n # init attributes\n self.grScene: Union[GraphicsScene, None] = None\n\n self.nodes = []\n self.edges = []\n self.scene_width = 64000\n self.scene_height = 64000\n\n # custom flag used to suppress triggering onItemSelected which does a bunch of stuff\n self._silent_selection_events = False\n # flag identifying wether the current scene has been modified\n self._has_been_modified = False\n self._last_selected_items = None\n\n # initialize all listeners\n self._has_been_modified_listeners = [] # list of function to call when the scene is modified\n self._item_selected_listeners = []\n self._items_deselected_listeners = []\n\n # Store callback for retrieving the class for Nodes\n self.node_class_selector = None\n\n self.initUI()\n self.history = SceneHistory(self)\n self.clipboard = SceneClipboard(self)\n\n self.grScene.itemSelected.connect(self.onItemSelected)\n self.grScene.itemsDeselected.connect(self.onItemsDeselected)", "def __init__(self, meta: SceneDescription):\n super().__init__(meta)\n self.scenes = []\n self.nodes = []\n self.meshes = []\n self.materials = []\n self.images = []\n self.samplers = []\n self.textures = []\n\n self.path = None\n self.scene = None\n self.gltf = None", "def __init__(self, graph: SceneGraph) -> None:\n self.graph: SceneGraph = graph", "def set_view(self, s):\n #s.scene.reset_zoom()\n s.scene.z_plus_view()\n c = s.scene.camera\n c.azimuth(30)\n c.elevation(30)\n s.render()", "def set_view(self, s):\n #s.scene.reset_zoom()\n s.scene.z_plus_view()\n c = s.scene.camera\n c.azimuth(-30)\n c.elevation(20)\n s.render()", "def __init__(self, *args, **kwargs):\n super(MayaScene, self).__init__(*args, **kwargs)", "def set_up_scenes():\n cmd.zoom('Cathepsin', 10) # Zoom out to get a view on the whole complex\n cmd.scene('001', 'store', message='This is the first scene with a view on the complex!')\n cmd.set_view(closeup) # Get a close-up of the ligand by using the manually chosen viewpoint\n cmd.scene('002', 'store', message='This is the second scene with a close-up on the ligand!')", "def viewAll(self):\n self._sceneviewer.viewAll()", "def __init__(self, parent):\n super(P5, self).__init__(parent)\n self.shapes = []\n self.scenes = []\n self.current_scene = 0\n self.objects = []\n self.lighting = True\n self.draw_axes = True", "def __init__(self):\n self.model = gameModel.Model()\n self.view = gameView.View()", "def create_scene(self):\n \n self.scene=soya.World()", "def _handler_default_view(self, event):\n self._mgr.LoadPerspective(\n self._perspectives['default'])", "def appInit(self):\n glMatrixMode( GL_PROJECTION )\n glLoadIdentity()\n glMatrixMode( GL_MODELVIEW )\n glLoadIdentity()\n\n glClearColor(0.0, 0.0, 0.0, 0.0)\n glClearDepth(1.0)\n glEnable( GL_DEPTH_TEST )\n glShadeModel( GL_SMOOTH )\n glEnable( GL_NORMALIZE )\n glEnable( GL_COLOR_MATERIAL )\n\n self.set_lighting()\n glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB | GLUT_DEPTH)\n self.make_simple_scenes()\n self.make_multi_object_scene()", "def appInit(self):\n glClearColor(1.0, 1.0, 1.0, 1.0)\n glColor(0.0, 0.0, 0.0)\n glPointSize(4.0)\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n gluOrtho2D(0.0, WINDOW_WIDTH, 0.0, WINDOW_HEIGHT)\n\n self.scene = Scene()\n\n tri = Triangle()\n tri.set_location(10, 50)\n tri.set_color(0, 1, 1)\n self.scene.addShape(tri)\n\n tri = Triangle()\n tri.set_location(70, 50)\n tri.set_color(1, 0, 1)\n tri.set_size(2, 2)\n self.scene.addShape(tri)\n\n tri = Triangle()\n tri.set_location(300, 50)\n self.scene.addShape(tri)", "def initialize_visualization(self) -> None:\n pass", "def visualize(self):\n app = QtGui.QApplication([''])\n SceneGUI(self)\n app.exec_()", "def initViewer(self, viewer=None, open=False, loadModel=False):\n\n import meshcat\n\n self.viewer = meshcat.Visualizer() if viewer is None else viewer\n\n if open:\n self.viewer.open()\n\n if loadModel:\n self.loadViewerModel()", "def __init__(self, *args, **kwargs):\n \n super(AvatarView, self).__init__(*args, **kwargs)\n \n wm = bpy.context.window_manager\n wm.verse_avatars.add()\n wm.verse_avatars[-1].node_id = self.id\n \n # Force redraw of 3D view\n ui.update_all_views(('VIEW_3D',))\n\n self.scene_node = None\n view_initialized = False\n self.visualized = True\n self.cur_area = None\n self.cur_space = None\n\n if self.id == self.session.avatar_id:\n # Initialize default values\n self.cur_screen = bpy.context.screen\n self.__class__.__my_view = self\n\n # Try to find current 3D view \n for area in bpy.context.screen.areas.values():\n if area.type == 'VIEW_3D':\n self.cur_area = area\n for space in area.spaces.values():\n if space.type == 'VIEW_3D':\n self.cur_space = space\n break\n break\n\n if self.cur_area.type == 'VIEW_3D' and self.cur_space.type == 'VIEW_3D':\n view_initialized = True\n # Create tag group containing information about view\n self.view_tg = vrsent.VerseTagGroup(\n node=self,\n custom_type=TG_INFO_CT)\n # Create tags with data of view to 3D view\n # Location\n self.location = AvatarLocation(\n tg=self.view_tg,\n value=tuple(self.cur_space.region_3d.view_location))\n # Rotation\n self.rotation = AvatarRotation(\n tg=self.view_tg,\n value=tuple(self.cur_space.region_3d.view_rotation))\n # Distance\n self.distance = AvatarDistance(\n tg=self.view_tg,\n value=(self.cur_space.region_3d.view_distance,))\n # Perspective/Orthogonal\n self.perspective = AvatarPerspective(\n tg=self.view_tg,\n value=(self.cur_space.region_3d.view_perspective,))\n # Width\n self.width = AvatarWidth(\n tg=self.view_tg,\n value=(self.cur_area.width,))\n # Height\n self.height = AvatarHeight(\n tg=self.view_tg,\n value=(self.cur_area.height,))\n # Lens\n self.lens = AvatarLens(\n tg=self.view_tg,\n value=(self.cur_space.lens,))\n # Get current Scene ID\n if bpy.context.scene.verse_node_id != -1:\n scene_node_id = bpy.context.scene.verse_node_id\n else:\n scene_node_id = 0\n self.scene_node_id = AvatarScene(\n tg=self.view_tg,\n value=(scene_node_id,))\n \n # TODO: check following code (may be not needed anymore)\n original_type = bpy.context.area.type\n bpy.context.area.type = 'VIEW_3D'\n bpy.ops.view3d.verse_avatar()\n bpy.context.area.type = original_type\n else:\n # TODO: Add some assert, because this should not happen.\n pass\n else:\n self.__class__.__other_views[self.id] = self\n \n if view_initialized is False:\n # Create tag group containing information about view\n self.view_tg = vrsent.VerseTagGroup(\n node=self,\n custom_type=TG_INFO_CT)\n # Create tags with data of view to 3D view\n self.location = AvatarLocation(tg=self.view_tg)\n self.rotation = AvatarRotation(tg=self.view_tg)\n self.distance = AvatarDistance(tg=self.view_tg)\n self.perspective = AvatarPerspective(tg=self.view_tg)\n self.width = AvatarWidth(tg=self.view_tg)\n self.height = AvatarHeight(tg=self.view_tg)\n self.lens = AvatarLens(tg=self.view_tg)\n self.scene_node_id = AvatarScene(tg=self.view_tg)", "def __init__(self):\n self.view = GuiView(self)\n return", "def set_view(self):\n self.scene.mlab.view(azimuth=90.0, elevation=-90.0)", "def __init__(self, parent):\n super(Demo4, self).__init__(parent)\n self.scenes = []\n self.draw_axes = True\n self.lighting = True\n self.current_scene = 0\n self.objects = []\n self.diffuse_light = [0.8, 0.8, 0.8, 1]", "def __init__( viewname, view ):", "def loadViewerModel(self, rootNodeName=\"pinocchio\", color = None):\n\n # Set viewer to use to gepetto-gui.\n self.viewerRootNodeName = rootNodeName\n\n # Load robot meshes in MeshCat\n\n # Collisions\n # self.viewerCollisionGroupName = self.viewerRootNodeName + \"/\" + \"collisions\"\n self.viewerCollisionGroupName = None # TODO: collision meshes\n\n # Visuals\n self.viewerVisualGroupName = self.viewerRootNodeName + \"/\" + \"visuals\"\n\n for visual in self.visual_model.geometryObjects:\n self.loadViewerGeometryObject(visual,pin.GeometryType.VISUAL,color)", "def __init__(self):\n super().__init__()\n self.arch = PoseArch()\n self.build_backbone()\n self.build_head()", "def initializeGL(self):\n self._graphicsInitialized = True\n if self._context:\n self._createSceneviewer()\n self.graphicsInitialized.emit()\n # initializeGL end", "def setup(self, app: VisModel):\n self.app = app", "def __init__(self, *args, **kwargs):\n super(Scene, self).__init__(*args, **kwargs)\n\n self.setVar('category', 'scene')", "def initView(self):\n return {}", "def __init__(self):\n self.match_views = MatchView.MatchView()", "def _setup_graph(self):\n pass", "def _setup_graph(self):\n pass", "def setUp(self):\n self.theView = View()", "def __init__(self, viewer: geoviewer.GeoGraphViewer) -> None:\n super().__init__(viewer=viewer)\n\n # Resetting all prior visibility control\n self.viewer.hide_all_layers()\n\n widget = self.assemble_widget()\n self.children = [widget]", "def create_scene(self, ):\n self.scene = create_scene(\n self.opt.splats_img_size, self.opt.splats_img_size, self.opt.fovy,\n self.opt.focal_length, self.opt.n_splats)", "def __init__(self, input_graph, app_display='default'):\n\n pass", "def __init__(self, input_graph, app_display='default'):\n\n pass", "def appInit(self):\n glutInitDisplayMode( GLUT_SINGLE | GLUT_RGB | GLUT_DEPTH )\n glClearColor(0.0, 0.0, 0.0, 0.0)\n glClearDepth(1.0 )\n glEnable( GL_DEPTH_TEST )\n glShadeModel( GL_SMOOTH )\n glEnable( GL_NORMALIZE )\n glEnable( GL_COLOR_MATERIAL )\n\n glEnable( GL_LIGHTING )\n glEnable( GL_LIGHT0 )\n\n self.set_lighting()\n\n self.make_simple_scenes()\n self.make_multi_object_scene()", "def initialize_default(self):\n self.initialize_navigation()\n self.initialize_viewport()", "def setup_scene(self):\n\n # read map\n options, landscapes, statics, dynamics, trees, hero, hare = read_map('test.map')\n self.num_of_blocks_X, self.num_of_blocks_Y = options['size']\n with self.canvas:\n # init landscapes\n block_x = 0\n for i in xrange(self.num_of_blocks_X):\n block_y = 0\n for j in xrange(self.num_of_blocks_Y):\n class_name = landscapes[i][j]\n if class_name is not None:\n clazz = eval(class_name.capitalize())\n else:\n clazz = Grass\n block = clazz(pos=(block_x, block_y),\n size=(self.block_width, self.block_height), border=(0, 0))\n self.blocks[i][j] = block\n block_y += self.block_height \n block_x += self.block_width\n\n # init dynamics\n for x, y, class_name in dynamics:\n if 'dynamics_as_blocks' in options and options['dynamics_as_blocks']:\n x, y = (x + 0.5) * self.block_width, (y + 0.5) * self.block_height\n eval(class_name.capitalize())(x, y)\n \n with self.canvas:\n # draw or hero\n HeroRabbit(BLOCK_SIZE[0]*(hero[0] + 0.5), BLOCK_SIZE[1]*(hero[1] + 0.5))\n Hare(BLOCK_SIZE[0]*(hare[0] + 0.5), BLOCK_SIZE[1]*(hare[1] + 0.5))\n\n # init statics\n def _is_mountain(i, j):\n return int(0 <= i < self.num_of_blocks_X and 0 <= j <= self.num_of_blocks_Y and\n statics[i][j] == 'mountain')\n\n def _get_mountain_type(i, j):\n opensides = (_is_mountain(i - 1, j), _is_mountain(i, j + 1),\n _is_mountain(i + 1, j), _is_mountain(i, j - 1)) # left, top, right, bottom\n opensides_to_type = {\n (1, 1, 1, 1): 'center',\n (1, 0, 1, 0): 'horizontal_center',\n (0, 1, 0, 1): 'vertical_center',\n (1, 0, 0, 0): 'horizontal_right',\n (0, 1, 0, 0): 'vertical_bottom',\n (0, 0, 1, 0): 'horizontal_left',\n (0, 0, 0, 1): 'vertical_top',\n }\n return opensides_to_type.get(opensides, 'horizontal_center')\n \n _mountains = []\n _bushes= []\n \n for i in xrange(self.num_of_blocks_X):\n for j in xrange(self.num_of_blocks_Y):\n class_name = statics[i][j]\n if class_name is not None:\n pos = (i + 0.5) * self.block_width, (j + 0.5) * self.block_height\n if class_name == 'bush':\n #Bush(*pos)\n _bushes.append(pos)\n elif class_name == 'mountain':\n _mountains.append((pos, _get_mountain_type(i, j)))\n #Mountain(*pos, type=_get_mountain_type(i, j))\n \n for tree_pos in trees:\n Tree(BLOCK_SIZE[0]*(tree_pos[0] + 0.5), BLOCK_SIZE[1]*(tree_pos[1] + 0.5))\n \n with self.canvas:\n for pos in _bushes:\n Bush(*pos)\n \n for pos, type in _mountains:\n Mountain(*pos, type=type)\n\n HolyCarrot(13.5*self.block_width, 7.5*self.block_height)\n # This should be called at the end\n self.reindex_graphics()", "def run():\n\n if lm.scene_object().type == 'env':\n read_layout(outFile=lm.scene_object().copy(ext='json').path_root)\n\n else:\n read_layout()", "def graphing_setup(self):\n pass", "def run(self):\n print('Running test of the markups in different views')\n\n #\n # first load the data\n #\n import SampleData\n sampleDataLogic = SampleData.SampleDataLogic()\n print(\"Getting MR Head Volume\")\n mrHeadVolume = sampleDataLogic.downloadMRHead()\n\n #\n # link the viewers\n #\n sliceLogic = slicer.app.layoutManager().sliceWidget('Red').sliceLogic()\n compositeNode = sliceLogic.GetSliceCompositeNode()\n compositeNode.SetLinkedControl(1)\n\n #\n # MR Head in the background\n #\n sliceLogic.StartSliceCompositeNodeInteraction(1)\n compositeNode.SetBackgroundVolumeID(mrHeadVolume.GetID())\n sliceLogic.EndSliceCompositeNodeInteraction()\n\n #\n # switch to conventional layout\n #\n lm = slicer.app.layoutManager()\n lm.setLayout(2)\n\n # create a fiducial list\n displayNode = slicer.vtkMRMLMarkupsDisplayNode()\n slicer.mrmlScene.AddNode(displayNode)\n fidNode = slicer.vtkMRMLMarkupsFiducialNode()\n slicer.mrmlScene.AddNode(fidNode)\n fidNode.SetAndObserveDisplayNodeID(displayNode.GetID())\n\n # make it active\n selectionNode = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSelectionNodeSingleton\")\n if (selectionNode is not None):\n selectionNode.SetReferenceActivePlaceNodeID(fidNode.GetID())\n\n # add some known points to it\n eye1 = [33.4975, 79.4042, -10.2143]\n eye2 = [-31.283, 80.9652, -16.2143]\n nose = [4.61944, 114.526, -33.2143]\n index = fidNode.AddFiducialFromArray(eye1)\n fidNode.SetNthFiducialLabel(index, \"eye-1\")\n index = fidNode.AddFiducialFromArray(eye2)\n fidNode.SetNthFiducialLabel(index, \"eye-2\")\n # hide the second eye as a test of visibility flags\n fidNode.SetNthFiducialVisibility(index, 0)\n index = fidNode.AddFiducialFromArray(nose)\n fidNode.SetNthFiducialLabel(index, \"nose\")\n\n self.logicDelayDisplay(\"Placed 3 fiducials\")\n\n # self.printViewAndSliceNodes()\n\n if self.widgetVisible(fidNode, 'vtkMRMLViewNode1') == 0:\n self.logicDelayDisplay(\"Test failed: widget is not visible in view 1\")\n # self.printViewNodeIDs(displayNode)\n return False\n\n #\n # switch to 2 3D views layout\n #\n lm.setLayout(15)\n self.logicDelayDisplay(\"Switched to 2 3D views\")\n # self.printViewAndSliceNodes()\n\n if self.widgetVisible(fidNode, 'vtkMRMLViewNode1') == 0 or self.widgetVisible(fidNode, 'vtkMRMLViewNode2') == 0:\n self.logicDelayDisplay(\"Test failed: widget is not visible in view 1 and 2\")\n # self.printViewNodeIDs(displayNode)\n return False\n\n #\n # show only in view 2\n #\n displayNode.AddViewNodeID(\"vtkMRMLViewNode2\")\n self.logicDelayDisplay(\"Showing only in view 2\")\n if self.widgetVisible(fidNode, 'vtkMRMLViewNode1') == 1:\n self.logicDelayDisplay(\"Test failed: widget is not supposed to be visible in view 1\")\n # self.printViewNodeIDs(displayNode)\n return False\n if self.widgetVisible(fidNode, 'vtkMRMLViewNode2') == 0:\n self.logicDelayDisplay(\"Test failed: widget is not visible in view 2\")\n # self.printViewNodeIDs(displayNode)\n return False\n\n #\n # remove it so show in all\n #\n displayNode.RemoveAllViewNodeIDs()\n self.logicDelayDisplay(\"Showing in both views\")\n if self.widgetVisible(fidNode, 'vtkMRMLViewNode1') == 0 or self.widgetVisible(fidNode, 'vtkMRMLViewNode2') == 0:\n self.logicDelayDisplay(\"Test failed: widget is not visible in view 1 and 2\")\n self.printViewNodeIDs(displayNode)\n return False\n\n #\n # show only in view 1\n #\n displayNode.AddViewNodeID(\"vtkMRMLViewNode1\")\n self.logicDelayDisplay(\"Showing only in view 1\")\n if self.widgetVisible(fidNode, 'vtkMRMLViewNode2') == 1:\n self.logicDelayDisplay(\"Test failed: widget is not supposed to be visible in view 2\")\n # self.printViewNodeIDs(displayNode)\n return False\n if self.widgetVisible(fidNode, 'vtkMRMLViewNode1') == 0:\n self.logicDelayDisplay(\"Test failed: widget is not visible in view 1\")\n # self.printViewNodeIDs(displayNode)\n return False\n\n # switch back to conventional\n lm.setLayout(2)\n self.logicDelayDisplay(\"Switched back to conventional layout\")\n # self.printViewAndSliceNodes()\n\n # test of the visibility in slice views\n displayNode.RemoveAllViewNodeIDs()\n\n # jump to the last fiducial\n slicer.modules.markups.logic().JumpSlicesToNthPointInMarkup(fidNode.GetID(), index, 1)\n # refocus the 3D cameras as well\n slicer.modules.markups.logic().FocusCamerasOnNthPointInMarkup(fidNode.GetID(), index)\n\n # show only in red\n displayNode.AddViewNodeID('vtkMRMLSliceNodeRed')\n self.logicDelayDisplay(\"Show only in red slice\")\n if self.widgetVisibleOnSlice(fidNode,'vtkMRMLSliceNodeRed') != 1:\n self.logicDelayDisplay(\"Test failed: widget not displayed on red slice\")\n # self.printViewNodeIDs(displayNode)\n return False\n\n # remove all, add green\n # print 'before remove all, after added red'\n # self.printViewNodeIDs(displayNode)\n displayNode.RemoveAllViewNodeIDs()\n # print 'after removed all'\n # self.printViewNodeIDs(displayNode)\n displayNode.AddViewNodeID('vtkMRMLSliceNodeGreen')\n self.logicDelayDisplay('Show only in green slice')\n if self.widgetVisibleOnSlice(fidNode,'vtkMRMLSliceNodeRed') != 0 or self.widgetVisibleOnSlice(fidNode,'vtkMRMLSliceNodeGreen') != 1:\n self.logicDelayDisplay(\"Test failed: widget not displayed only on green slice\")\n print '\\tred = ',self.widgetVisibleOnSlice(fidNode,'vtkMRMLSliceNodeRed')\n print '\\tgreen =',self.widgetVisibleOnSlice(fidNode,'vtkMRMLSliceNodeGreen')\n self.printViewNodeIDs(displayNode)\n return False\n\n return True", "def __init__(self, parent=None):\n super(Scene, self).__init__(parent)\n\n self.lastDragPos = QPoint()\n self.setItemIndexMethod(QGraphicsScene.NoIndex)\n\n # list of nodes in scene\n self.nodes = dict()\n #  axis of the scene for debug purpose\n # self.addLine(-100, 0, 100, 0)\n # self.addLine(0, -100, 0, 100)", "def __init__(self, parent):\n super(StageVisualizer, self).__init__(parent)\n\n self.figure = matplotlib.figure.Figure()\n self.axes = self.figure.add_subplot(111)\n self.figure.subplots_adjust(top=1, bottom=0, right=1, left=0)\n\n self.axes.get_xaxis().set_visible(False)\n self.axes.get_yaxis().set_visible(False)\n\n self.canvas = FigureCanvas(self, -1, self.figure)\n\n self.sizer = wx.BoxSizer(wx.VERTICAL)\n self.sizer.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.GROW)\n self.SetSizer(self.sizer)\n\n self.toolbar = NavigationToolbar(self.canvas)\n self.toolbar.pan()\n self.toolbar.Hide()\n\n self.Fit()\n\n self.zoomer = self.zoom_factory(self.axes, base_scale=1.5)\n\n self.canvas.Bind(wx.EVT_PAINT, self.on_paint)", "def __init__(self, view, model):\n self.view = view\n self.view.set_controller(self)\n self.model = model", "def __init__(self, *args):\n _snap.TArtPointVisitor_swiginit(self, _snap.new_TArtPointVisitor(*args))", "def __init__(self, **kwargs):\n \n # init base\n super().__init__(**kwargs)\n self.name = \"Pero\"\n \n # init view\n self._view = None", "def __init__(self):\n # Passing the class make this Python 2 and Python 3 compatible\n super(MayaSceneLevelGeneratorUI, self).__init__(parent=maya_main_window())\n\n # Create the generators needed\n self._level_gen = level.LevelGenerator([blocks.BlockFile(\"\", blk_type) for blk_type in VALID_BLOCK_TYPES])\n self._scene_gen = MayaSceneLevelGenerator(None) # Fill in level at button press time\n\n # Window things\n self.setWindowTitle(\"Maya Scene Level Generator\")\n self.resize(500, 200)\n self.setWindowFlags(self.windowFlags() ^ PySide2.QtCore.Qt.WindowContextHelpButtonHint)\n\n # Set up for the first time\n self._create_widgets()\n self._create_layout()\n self._refresh_view()\n self._create_connections() # Order matters, since refreshing triggers connections\n\n print(self._level_gen.block_list) # TODO delete", "def _configureVisualFactory(self):\n raise NotImplementedError", "def InitView(self):\n\n self._DisablePlotterOptions()\n\n if (self.localModel.datasetCols == 2):\n self.view.Enable2DRadio()\n\n self.Radio2DClicked(True)\n\n if (self.localModel.datasetCols >= 3):\n self.view.Enable2DRadio()\n self.view.Enable3DRadio()\n self.view.Set3DSelected()\n\n self.Radio3DClicked(True)", "def __init__(self, viewer: geoviewer.GeoGraphViewer) -> None:\n super().__init__(viewer=viewer)\n\n widget = self._create_timeline_controls()\n self.children = [widget]", "def __init__(self):\n super().__init__(ShowGraphABC._UNDIRECTED, \"path\")", "def view(self):", "def load(self):\n self._web_view.load_graph(\n options=self._options, styles=self._styles, stencils=self._stencils\n )", "def _handler_voxel_view(self,event):\n self._mgr.LoadPerspective(\n\t\t\tself._perspectives['voxel_view'])", "def visualize(self):\n self.octree.updateInnerOccupancy()\n print(\"Start Octomap Visualization\")\n\n # define parameters\n data = imgviz.data.arc2017()\n camera_info = data['camera_info']\n K = np.array(camera_info['K']).reshape(3, 3)\n width=camera_info['width']\n height=camera_info['height']\n\n # get free and occupied grid\n occupied, _ = self.octree.extractPointCloud()\n #frontier = self.gen_frontier()\n \n print(\"load point cloud\")\n window = pyglet.window.Window(\n width=int(1280), height=int(960)\n )\n\n @window.event\n def on_key_press(symbol, modifiers):\n if modifiers == 0:\n if symbol == pyglet.window.key.Q:\n window.on_close()\n\n gui = glooey.Gui(window)\n hbox = glooey.HBox()\n hbox.set_padding(5)\n\n camera = trimesh.scene.Camera(\n resolution=(width, height), focal=(K[0, 0], K[1, 1])\n )\n\n # initial camera pose\n camera_transform = np.array(\n [\n [1, 0, 0, 0],\n [0, -1, 0, 0],\n [0, 0, -1, -5],\n [0.0, 0.0, 0.0, 1.0],\n ],\n )\n\n \n\n occupied_geom = trimesh.voxel.ops.multibox(\n occupied, pitch=self.resolution, colors=[0.0, 0.0, 0.0, 0.5]\n )\n\n # frontier_geom = trimesh.voxel.ops.multibox(\n # frontier, pitch=self.resolution, colors=[1.0, 0, 0, 0.5]\n # )\n scene = trimesh.Scene(camera=camera, geometry=[occupied_geom])#, frontier_geom])\n scene.camera_transform = camera_transform\n hbox.add(self.labeled_scene_widget(scene, label='octomap'))\n\n\n gui.add(hbox)\n pyglet.app.run()", "def appInit(self):\n self.shapes = []\n # set viewing projection\n glClearColor(1.0, 1.0, 1.0, 0.0)\n glColor(0.0, 0.0, 0.0)\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n gluOrtho2D(0.0, WINDOW_WIDTH, 0.0, WINDOW_HEIGHT)\n\n tri = Triangle()\n tri.set_location(10, 50)\n tri.set_color(0, 1, 1)\n self.shapes.append(tri)\n\n tri = Triangle()\n tri.set_location(70, 50)\n tri.set_color(1, 0, 1)\n tri.set_size(2, 2)\n self.shapes.append(tri)\n\n tri = Triangle()\n tri.set_location(300, 50)\n self.shapes.append(tri)", "def _make_view(tabbed=False, split=False, scene_width=-1):\n view_options = VGroup(Item('headview', style='custom'), 'view_options',\n show_border=True, show_labels=False, label='View')\n\n scene = VGroup(Item('scene', show_label=False,\n editor=SceneEditor(scene_class=MayaviScene),\n dock='vertical', width=500),\n view_options)\n\n data_panel = VGroup(VGroup(Item('subject_panel', style='custom'),\n label=\"MRI Subject\", show_border=True,\n show_labels=False),\n VGroup(Item('lock_fiducials', style='custom',\n editor=EnumEditor(cols=2,\n values={False: '2:Edit',\n True: '1:Lock'}),\n enabled_when='fid_ok'),\n HGroup('hsp_always_visible',\n Label(\"Always Show Head Shape Points\"),\n show_labels=False),\n Item('fid_panel', style='custom'),\n label=\"MRI Fiducials\", show_border=True,\n show_labels=False),\n VGroup(Item('raw_src', style=\"custom\"),\n HGroup(Item('distance', show_label=True),\n 'omit_points', 'reset_omit_points',\n show_labels=False),\n Item('omitted_info', style='readonly',\n show_label=False),\n label='Head Shape Source (Raw)',\n show_border=True, show_labels=False),\n show_labels=False, label=\"Data Source\")\n\n coreg_panel = VGroup(Item('coreg_panel', style='custom'),\n label=\"Coregistration\", show_border=True,\n show_labels=False,\n enabled_when=\"fid_panel.locked\")\n\n if split:\n main_layout = 'split'\n else:\n main_layout = 'normal'\n\n if tabbed:\n main = HGroup(scene,\n Group(data_panel, coreg_panel, show_labels=False,\n layout='tabbed'),\n layout=main_layout)\n else:\n main = HGroup(data_panel, scene, coreg_panel, show_labels=False,\n layout=main_layout)\n\n view = View(main, resizable=True, handler=CoregFrameHandler(),\n buttons=NoButtons)\n return view", "def show(self):\n self.scene().show()", "def __init__(self,\n scene,\n smooth=True,\n flags=None,\n visible=True,\n resolution=(640, 480),\n start_loop=True,\n callback=None,\n callback_period=None,\n caption=None,\n **kwargs):\n self.scene = self._scene = scene\n self.callback = callback\n self.callback_period = callback_period\n self.scene._redraw = self._redraw\n self.reset_view(flags=flags)\n self.batch = pyglet.graphics.Batch()\n\n # store kwargs\n self.kwargs = kwargs\n\n # store a vertexlist for an axis marker\n self._axis = None\n # store scene geometry as vertex lists\n self.vertex_list = {}\n # store geometry hashes\n self.vertex_list_hash = {}\n # store geometry rendering mode\n self.vertex_list_mode = {}\n\n # name : texture\n self.textures = {}\n\n if scene.camera is not None:\n if resolution is not None and not np.allclose(\n resolution,\n scene.camera.resolution,\n rtol=0, atol=0):\n log.warning(\n 'resolution is overwritten by camera: '\n '{} -> {}'.format(resolution,\n scene.camera.resolution))\n resolution = scene.camera.resolution\n\n if 'camera' not in scene.graph:\n # if the camera hasn't been set, set it now\n scene.set_camera()\n\n try:\n # try enabling antialiasing\n # if you have a graphics card this will probably work\n conf = gl.Config(sample_buffers=1,\n samples=4,\n depth_size=24,\n double_buffer=True)\n super(SceneViewer, self).__init__(config=conf,\n visible=visible,\n resizable=True,\n width=resolution[0],\n height=resolution[1],\n caption=caption)\n except pyglet.window.NoSuchConfigException:\n conf = gl.Config(double_buffer=True)\n super(SceneViewer, self).__init__(config=conf,\n resizable=True,\n visible=visible,\n width=resolution[0],\n height=resolution[1],\n caption=caption)\n\n # add scene geometry to viewer geometry\n for name, mesh in scene.geometry.items():\n self.add_geometry(name=name,\n geometry=mesh,\n smooth=bool(smooth))\n\n # call after geometry is added\n self.init_gl()\n self.set_size(*resolution)\n self.update_flags()\n\n # someone has passed a callback to be called periodically\n if self.callback is not None:\n # if no callback period is specified set it to default\n if callback_period is None:\n callback_period = 1.0 / 100.0\n # set up a do-nothing periodic task which will\n # trigger `self.on_draw` every `callback_period`\n # seconds if someone has passed a callback\n pyglet.clock.schedule_interval(lambda x: x,\n callback_period)\n if start_loop:\n pyglet.app.run()", "def initialize_scene(self):\n if Time.now() - self.initial_time > 0.45 and self.should_initialize:\n self.should_initialize = False\n self.background_particle_controller = BackgroundParticlesController()\n self.player_controller = PlayerController()\n self.obstacle_controller_wrapper = ObstacleControllerWrapper()\n self.items_controller = ItemsControllerWrapper()\n self.score_controller = ScoreController()", "def setup_scenes(self, scene_dict, start_scene):\n self._scene_dict = scene_dict\n self._scene_name = start_scene\n self._scene = self._scene_dict[self._scene_name]", "def __init__(self, parent: QWidget, gui: 'MetalGUI', view: QTreeView):\n super().__init__(parent=parent,\n gui=gui,\n view=view,\n child='GDS renderer')", "def __init__(self, root):\n self.app=root\n self.app.geometry('800x500')\n self.app.title(\"Bryce Streeper: Asset Allocation Visual \")\n self.makeTitle()\n self.makeGraph()\n self.makeSliders()\n self.update()", "def __init__(self, parent=None):\n ScriptedLoadableModuleWidget.__init__(self, parent)\n VTKObservationMixin.__init__(self) # needed for parameter node observation\n self.logic = None\n self._parameterNode = None\n self.T1_LLN_Node = None\n self.T1_LLE_Node = None\n self.ECVMapNode = None\n self.LLE_Node = None\n self.LLN_Node = None\n self.ArefNode = None\n self.T1_LLE_Name = 'T1 Enhanced'\n self.T1_LLN_Name = 'T1 Native'\n self.ResetSliceViews()\n self.LinkSlices()\n self.ColorBarEnabled()\n self.setupVolumeNodeViewLayout()\n self.Warning = True", "def setUp(self):\r\n slicer.mrmlScene.Clear()", "def __init__(self):\n super(RouteLayer, self).__init__()\n\n routes = [(\"^/ping\", views.ping),\n (\"^/e(co)?(?P<eco_message>[^$]+)$\", views.echo),\n (\"^/p(iada)?\\s*$\", views.get_piada)]\n\n routes.extend(MediaViews(self).routes)\n routes.extend(StaticViews(self).routes)\n # routes.extend(GroupAdminViews(self).routes)\n\n self.views = [(re.compile(pattern), callback) for pattern, callback in routes]", "def __init__(self, viewer: geoviewer.GeoGraphViewer) -> None:\n super().__init__(viewer=viewer)\n\n widget = self._create_settings_widget()\n self.children = [widget]", "def initializeUI(self):\n self.setGeometry(100, 100, 450, 300)\n self.setWindowTitle('Model and View Example')\n\n self.setupModelView()\n\n self.show()", "def _setup_scene(self):\n\n scene = bpy.context.scene\n\n bpy.ops.object.select_all(action=\"DESELECT\")\n\n # remove non mesh objects\n for obj in scene.objects:\n obj.select = (obj.type != \"MESH\")\n bpy.ops.object.delete()\n\n # empty sequences are false by default\n if scene.objects:\n\n # unlink objects (all meshes) from parents\n bpy.ops.object.select_all()\n bpy.ops.object.parent_clear(type=\"CLEAR_KEEP_TRANSFORM\")\n\n # join all meshes in one single object\n scene.objects.active = bpy.data.objects[0]\n bpy.ops.object.join()\n bpy.ops.object.transform_apply(location=False, rotation=True, scale=False)\n bpy.context.object.name = \"Object\"\n bpy.context.object.dimensions = bpy.context.object.dimensions / max(bpy.context.object.dimensions)\n\n # set the origin of the object to the cursor location\n scene.cursor_location = [0, 0, 0]\n bpy.ops.object.origin_set(type=\"ORIGIN_CURSOR\")\n # bpy.ops.object.origin_set(type=\"GEOMETRY_ORIGIN\", center=\"BOUNDS\")\n bpy.ops.object.origin_set(type=\"ORIGIN_CENTER_OF_MASS\", center=\"BOUNDS\")\n\n if self.add_ground_plane:\n bpy.ops.mesh.primitive_plane_add(radius=10.)\n\n bpy.ops.object.select_all(action=\"DESELECT\")", "def __init__(self, *args):\n _snap.TNGraph_swiginit(self, _snap.new_TNGraph(*args))", "def init_stage(self):\n\n self.estimators_ = []", "def init_model(self, scene):\n # The number of faces corresponding to each level of detail.\n n_sides = [8, 16, 32, 46, 68, 90]\n n_stacks = [1, 2, 4, 7, 10, 14]\n for i in range(0, 6):\n scene.cone_model[i].gl_compile_begin()\n _quadric = Quadric()\n _quadric.render_cylinder(1.0, 1.0, n_sides[i], n_stacks[i],\n top_radius=0.0)\n _quadric.render_disk(1.0, n_sides[i], n_stacks[i] * 2, -1)\n scene.cone_model[i].gl_compile_end()", "def main():\n viewer = Viewer()\n\n # paramètre de transformation des paramètres\n #sol\n ground_size = 512\n ground_offset = 20\n\n #dinosaure\n characters_offset_x = 0\n characters_offset_y = -20\n characters_offset_z = 0\n characters_scale = 15\n characters_rotate_deg = 180\n\n #forêt\n forest_offset = -15\n forest_scale = 1.5\n\n #skybox\n Skysphere_scale = 3\n\n characters = Node(transform = translate(characters_offset_x, characters_offset_y, characters_offset_z) @ scale(characters_scale) @ rotate(axis=(0, 1, 0), angle = characters_rotate_deg))\n characters.add(*load_skinned(\"dino/Dinosaurus_roar.dae\"))\n\n forest = Node(transform = translate(0, forest_offset, 0) @ scale(forest_scale))\n forest.add(*load_textured(\"trees9/forest.obj\"))\n\n ground = Node(transform = translate(-ground_size>>1, ground_offset, -ground_size>>1))\n ground.add(sol(ground_size))\n\n Skysphere = Node(transform = scale(Skysphere_scale))\n Skysphere.add(*load_textured(\"Skysphere/skysphere.obj\"))\n\n scene = Node(transform = identity(), children = [characters, forest, ground, Skysphere])\n\n viewer.add(scene)\n\n viewer.run()", "def populate_graph(self):", "def afterLoadSceneObject(self):\n\t\tpass", "def OnRootView( self, event ):\n self.adapter,tree = self.RootNode()\n self.squareMap.SetModel( tree, self.adapter )\n self.RecordHistory()", "def view(self):\n raise NotImplementedError", "def view(config_file):\n import open3d as o3d\n with open(config_file) as f:\n config = json.load(f)\n scenes = get_realsense_scenes(config['realsense_dir'])\n for scene in scenes:\n # if scene['scene_name'] != \"Scene_004\":\n # continue\n scene_data = get_data_from_scene(scene)\n logger.info(\"Visualizing - %s\", scene['scene_name'])\n pcd = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(scene_data['points3d']))\n o3d.visualization.draw_geometries_with_editing([pcd])\n pcd = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(scene_data['points3d_segmented']))\n o3d.visualization.draw_geometries([pcd])", "def views(self):\r\n return Views(self)", "def initialize(self):\n self.Update()\n ViewportManager.updateAll()\n self.wxStep()\n ViewportManager.initializeAll()\n # Position the camera\n if base.trackball is not None:\n base.trackball.node().setPos(0, 30, 0)\n base.trackball.node().setHpr(0, 15, 0)\n\n # to make persp view as default\n self.perspViewMenuItem.Check()\n self.onViewChange(None, 3)\n\n # initializing direct\n if self.fStartDirect:\n base.startDirect(fWantTk = 0, fWantWx = 0)\n\n base.direct.disableMouseEvents()\n newMouseEvents = [\"_le_per_%s\"%x for x in base.direct.mouseEvents] +\\\n [\"_le_fro_%s\"%x for x in base.direct.mouseEvents] +\\\n [\"_le_lef_%s\"%x for x in base.direct.mouseEvents] +\\\n [\"_le_top_%s\"%x for x in base.direct.mouseEvents]\n base.direct.mouseEvents = newMouseEvents\n base.direct.enableMouseEvents()\n\n base.direct.disableKeyEvents()\n keyEvents = [\"_le_per_%s\"%x for x in base.direct.keyEvents] +\\\n [\"_le_fro_%s\"%x for x in base.direct.keyEvents] +\\\n [\"_le_lef_%s\"%x for x in base.direct.keyEvents] +\\\n [\"_le_top_%s\"%x for x in base.direct.keyEvents]\n base.direct.keyEvents = keyEvents\n base.direct.enableKeyEvents()\n\n base.direct.disableModifierEvents()\n modifierEvents = [\"_le_per_%s\"%x for x in base.direct.modifierEvents] +\\\n [\"_le_fro_%s\"%x for x in base.direct.modifierEvents] +\\\n [\"_le_lef_%s\"%x for x in base.direct.modifierEvents] +\\\n [\"_le_top_%s\"%x for x in base.direct.modifierEvents]\n base.direct.modifierEvents = modifierEvents\n base.direct.enableModifierEvents()\n\n base.direct.cameraControl.lockRoll = True\n base.direct.setFScaleWidgetByCam(1)\n\n unpickables = [\n \"z-guide\",\n \"y-guide\",\n \"x-guide\",\n \"x-disc-geom\",\n \"x-ring-line\",\n \"x-post-line\",\n \"y-disc-geom\",\n \"y-ring-line\",\n \"y-post-line\",\n \"z-disc-geom\",\n \"z-ring-line\",\n \"z-post-line\",\n \"centerLines\",\n \"majorLines\",\n \"minorLines\",\n \"Sphere\",]\n\n for unpickable in unpickables:\n base.direct.addUnpickable(unpickable)\n\n base.direct.manipulationControl.optionalSkipFlags |= SKIP_UNPICKABLE\n base.direct.manipulationControl.fAllowMarquee = 1\n base.direct.manipulationControl.supportMultiView()\n base.direct.cameraControl.useMayaCamControls = 1\n base.direct.cameraControl.perspCollPlane = self.perspView.collPlane\n base.direct.cameraControl.perspCollPlane2 = self.perspView.collPlane2\n\n for widget in base.direct.manipulationControl.widgetList:\n widget.setBin('gui-popup', 0)\n widget.setDepthTest(0)\n\n # [gjeon] to intercept messages here\n base.direct.ignore('DIRECT-delete')\n base.direct.ignore('DIRECT-select')\n base.direct.ignore('DIRECT-preDeselectAll')\n base.direct.ignore('DIRECT-toggleWidgetVis')\n base.direct.fIgnoreDirectOnlyKeyMap = 1\n\n # [gjeon] do not use the old way of finding current DR\n base.direct.drList.tryToGetCurrentDr = False\n\n else:\n base.direct=None\n #base.closeWindow(base.win)\n base.win = base.winList[3]", "def __init__(self):\n\n self.nodes = {}", "def on_load(self):\n self.__init__()", "def _initialize_trees(self):", "def __init__(\n self,\n viewer: geoviewer.GeoGraphViewer,\n layer_type: str,\n layer_subtype: str,\n layer_name: Optional[str] = None,\n link_to_current_state: bool = True,\n layout: Optional[widgets.Layout] = None,\n **kwargs,\n ) -> None:\n\n self.viewer = viewer\n\n # Setting log with handler, that allows access to log\n # via self.log_handler.show_logs()\n self.logger = logging.getLogger(type(self).__name__)\n self.logger.setLevel(self.viewer.logger.level)\n self.log_handler = self.viewer.log_handler\n self.logger.addHandler(self.log_handler)\n\n if layout is None:\n layout = widgets.Layout(height=\"auto\", width=\"auto\")\n\n super().__init__(layout=layout, **kwargs)\n\n self.add_traits(\n layer_type=traitlets.Unicode().tag(sync=True),\n layer_subtype=traitlets.Unicode().tag(sync=True),\n layer_name=traitlets.Unicode().tag(sync=True),\n )\n self.layer_subtype = layer_subtype\n self.layer_type = layer_type\n\n if layer_type == \"maps\":\n if layer_name is None:\n layer_name = self.viewer.current_map\n self.layer_name = layer_name\n\n # If current map changes the function of this button changes\n if link_to_current_state:\n widgets.dlink((self.viewer, \"current_map\"), (self, \"layer_name\"))\n\n elif layer_type == \"graphs\":\n if layer_name is None:\n layer_name = self.viewer.current_graph\n self.layer_name = layer_name\n\n if link_to_current_state:\n widgets.dlink((self.viewer, \"current_graph\"), (self, \"layer_name\"))\n\n self.observe(self._handle_view, names=[\"value\", \"layer_name\"])\n self._check_layer_exists()\n\n self.logger.info(\"Initialised.\")", "def createScene(self):\n self.scene().clear()\n self.scene().drawGrid()\n # draw nodes\n for i, edge in enumerate(self.dat.flowsheet.edges):\n n1 = self.dat.flowsheet.nodes[edge.start]\n n2 = self.dat.flowsheet.nodes[edge.end]\n self.scene().drawEdge(n1.x, n1.y, n2.x, n2.y, i, edge.curve, edge.tear)\n # draw edges\n for name, node in self.dat.flowsheet.nodes.items():\n self.scene().drawNode(node.x, node.y, name, node.modelName)\n # redraw the scene\n self.scene().update()", "def setUp(self):\r\n slicer.mrmlScene.Clear(0)", "def setUp(self):\r\n slicer.mrmlScene.Clear(0)", "def setUp(self):\r\n slicer.mrmlScene.Clear(0)", "def __initialize(self):\n self.__object = None\n \n self.__mainAct = None\n self.__mainMenu = None\n \n self.__e5project = e5App().getObject(\"Project\")\n \n self.__supportedVariants = []", "def __init__(self, params=None):\n\n rights = access.GSoCChecker(params)\n rights['edit'] = [('checkCanEditTimeline', [program_logic])]\n\n new_params = {}\n new_params['logic'] = timeline_logic\n new_params['rights'] = rights\n\n new_params['name'] = \"GSoC Timeline\"\n new_params['module_name'] = \"timeline\"\n\n new_params['module_package'] = 'soc.modules.gsoc.views.models'\n new_params['url_name'] = 'gsoc/timeline'\n\n params = dicts.merge(params, new_params, sub_merge=True)\n\n super(View, self).__init__(params=params)", "def viewfactory(self):\n raise NotImplementedError()", "def __init__(self, parent):\n self.name = \"Base.View\"\n self.parent = parent\n self.Main = parent.Main" ]
[ "0.6847052", "0.6840577", "0.6840577", "0.66847914", "0.65465605", "0.63439626", "0.63439626", "0.63439626", "0.6235359", "0.6225565", "0.6199402", "0.61638135", "0.61520505", "0.6145105", "0.6108402", "0.6104385", "0.6099813", "0.6098735", "0.6098318", "0.6096668", "0.6069175", "0.60553604", "0.60407734", "0.6037822", "0.6023956", "0.6004981", "0.5997512", "0.5978027", "0.59493417", "0.59478426", "0.5922764", "0.5911394", "0.5881379", "0.5878458", "0.5869748", "0.5845234", "0.584168", "0.58317286", "0.58317286", "0.5818091", "0.5777864", "0.57734835", "0.5766422", "0.5766422", "0.5750868", "0.5735239", "0.5732416", "0.5723077", "0.5684532", "0.5679859", "0.56641483", "0.5659075", "0.5652853", "0.5647954", "0.5636629", "0.56282353", "0.5611702", "0.5610439", "0.5608721", "0.5592928", "0.5592142", "0.5590082", "0.55886674", "0.55794287", "0.5578069", "0.55702144", "0.55676967", "0.55671275", "0.5565658", "0.5560491", "0.55536467", "0.5537013", "0.55362177", "0.5533711", "0.55329007", "0.5529487", "0.5509523", "0.55077314", "0.55063266", "0.54956234", "0.5493405", "0.5490979", "0.54891217", "0.54880464", "0.5485411", "0.5483814", "0.54789335", "0.5473279", "0.5472924", "0.5465913", "0.5461904", "0.5459262", "0.5457286", "0.5454144", "0.5442079", "0.5442079", "0.5442079", "0.5438519", "0.5435938", "0.5433368", "0.5431724" ]
0.0
-1
Return default display mode
def getDefaultDisplayMode(self): return "Wireframe"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getCurrentDisplay():\n # Windows reports the active display as 'console', so we hardcode it\n return \"console\"", "def mode(self) -> Optional[str]:\n return pulumi.get(self, \"mode\")", "def mode(self) -> Optional[str]:\n return pulumi.get(self, \"mode\")", "def get_mode(self):\r\n return self._api.get_mode()", "def get_mode(self):\r\n return self.mode", "def mode(self) -> str:\n return pulumi.get(self, \"mode\")", "def mode(self) -> str:\n return pulumi.get(self, \"mode\")", "def setDisplayMode(self, mode):\n return mode", "def getmode(self):\n return self.mode", "def isDefaultMode():\n\treturn 0", "def swing_mode(self) -> str | None:\n return self._current_swing_mode", "def default_mode(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"default_mode\")", "def mode(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"mode\")", "def mode(self):\n return self._data.get('mode', None)", "def get_mode(self, ):\n return self.get_parameter('mode')", "def mode(self) -> Optional[str]:\n for mode in self._modes:\n if mode.active:\n return mode.name\n return None", "def mode(self):\n return self._lift(\"mode\")", "def mode(self) -> str:\r\n return self._mode", "def get_mode(self) -> str:\n\n return self.send(self.cmd.GET_MODE)", "def getDefaultDisplayMode(self):\n return \"Shaded\"", "def config_mode(self):\n return \"\"", "def mode(self):\r\n return self._mode", "def mode(self):\r\n return self._mode", "def mode(self):\r\n return self._mode", "def current_swing_mode(self):\n return None", "def mode(self):\n return self._mode", "def mode(self):\n return self._mode", "def mode(self):\n return self._mode", "def mode(self):\n return self.__mode", "def mode(self):\r\n pass", "def getMode(self):\n return self._mode", "def dev_mode(self):\r\n return self._dev_mode", "def _get_mode(self):\n raise NotImplementedError", "def game_mode(self):\n return self._get(\"game_mode\")", "def mode(self):\n return self._mode_func", "def mode(self):\n\n return self._mode", "def preset_mode(self):\n return self._preset_mode", "def get_mode(self):\n if self.vimiv.library.treeview.is_focus():\n return \"<b>-- LIBRARY --</b>\"\n elif self.vimiv.manipulate.scrolled_win.is_visible():\n return \"<b>-- MANIPULATE --</b>\"\n elif self.vimiv.thumbnail.toggled:\n return \"<b>-- THUMBNAIL --</b>\"\n else:\n return \"<b>-- IMAGE --</b>\"", "def get_window_mode(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetWindowMode', self.handle)", "def get_default_MXNet_mode():\n return MXNET_DEFAULT_MODE", "def get_current_mode(self):\n return self.read(0xa2)", "def current_swing_mode(self):\n return self._current_swing_mode", "def preset_mode(self) -> Optional[str]:\n return self._preset", "def mode(self) -> str:\r\n ...", "def mode(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"mode\")", "def mode(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"mode\")", "def preset_mode(self) -> str | None:\n\n if self._device.system_mode is None:\n return # unable to determine\n return PRESET_TCS_TO_HA[self._device.system_mode[CONF_SYSTEM_MODE]]", "def swing_mode(self) -> str | None:\n return _SWING_MODES.from_esphome(self._state.swing_mode)", "def auto_mode(self):\n return self._auto_mode", "def swing_mode(self):\n return self._swing_mode", "def get_current_boot_mode(self):\n boot_mode = self._get_bios_setting('BootMode')\n if boot_mode == 'LegacyBios':\n boot_mode = 'legacy'\n\n return boot_mode.upper()", "def mode(self) -> int:\n return self._mode", "def mode(self) -> Optional[pulumi.Input[Union[str, 'Mode']]]:\n return pulumi.get(self, \"mode\")", "def preset_mode(self) -> str:\n # NOTE: fan speeds are not really presets...the only presets LUNOS has is vent mode\n return self._vent_mode", "def get_autofeed_mode(self):\n mode = self.parent.printer.get_autofeed()\n if mode is not None:\n mode = mode[0]\n return mode", "def _get_mode():\n return context.get_context('mode')", "def read_configuration_mode(self):\n configuration_mode = self.scpi_comm('CONFIG?').strip()\n mode = 'Unknown'\n if configuration_mode == '0':\n mode = 'Voltage tracking'\n if configuration_mode == '2':\n mode = 'Dual output'\n if configuration_mode in ('3', '4'):\n mode = 'Track Voltage and Current'\n return mode", "def get_mode(self):\r\n s = self.query('FREQ:MODE?')\r\n if s == None: return None\r\n \r\n s = s.strip()\r\n if s == 'FIX': return 'Fixed'\r\n elif s == 'LIST': return 'List'\r\n else:\r\n print('ERROR: Unknown mode '+str(s))\r\n return", "def currentMode(self):\n logger.debug(\"Func: currentMode/getter\")\n\n return self._currentsDict[\"currentMode\"]", "def fan_mode(self) -> str | None:\n return self._current_fan_mode", "def get_double_mode(self):\r\n msg = struct.pack('>2B', 56, 1)\r\n response = self.query(msg)\r\n if response[1] == 254:\r\n return 'Subtractive mode selected.'\r\n elif response[1] == 1:\r\n return 'Additive mode selected.'\r\n else:\r\n raise ValueError('Mode not recognised.')", "def get_mode(self):\r\n _debug('simq03b_api.get_mode')\r\n \r\n s = self.query('FREQ:MODE?')\r\n if s == None: return None\r\n \r\n s = s.strip()\r\n if s == 'CW': return 'Fixed'\r\n elif s == 'LIST': return 'List'\r\n else:\r\n print('ERROR: Unknown mode '+str(s))\r\n return", "def mode(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"mode\")", "def get_focus_mode(self):\n if self.active_focus_mode:\n return self.active_focus_mode.lower()", "def get_popup_mode(self) -> str:\n return POPUP_MODES.inv[self.popupMode()]", "def get_mode(self):\r\n s = self.query('FREQ:MODE?')\r\n if s == None: return None\r\n \r\n s = s.strip()\r\n if s == 'CW': return 'Fixed'\r\n elif s == 'LIST': return 'List'\r\n else:\r\n print('ERROR: Unknown mode '+str(s))\r\n return", "def get_mode(self):\r\n s = self.query('FREQ:MODE?')\r\n if s == None: return None\r\n \r\n s = s.strip()\r\n if s == 'CW': return 'Fixed'\r\n elif s == 'LIST': return 'List'\r\n else:\r\n print('ERROR: Unknown mode '+str(s))\r\n return", "def device_mode(self) -> str:\n raw_mode = self._device_info[\"SensorMode\"]\n if raw_mode not in DEVICE_MODE_MAP:\n LOGGER.debug(\"Unknown device mode value: %s\", raw_mode)\n return DEVICE_MODE_UNKNOWN\n return DEVICE_MODE_MAP[raw_mode]", "def mode(self) -> Mode:\n return self._mode", "def preset_mode(self) -> str | None:\n state = self._state\n return state.custom_preset or _PRESETS.from_esphome(\n state.preset_compat(self._api_version)\n )", "def getDisplayModes(self, obj):\n modes = []\n return modes", "def usb_mode() -> str:", "def opt_display(self, display):\n key = get_enum_key(display, DISPLAYS)\n if key is not None:\n self.conf[\"display\"] = key\n self.display = DISPLAYS[key]\n print(\"Set display %r\" % key)\n else:\n print(\"Unknown display %r\" % display)", "def current_option(self) -> str:\n return self.coordinator.data.settings.lamp_mode.name.lower()", "def tflite_mode(self):\n return getattr(self, \"_tflite_mode\", False)", "def preset_mode(self) -> Optional[str]:\n if self._device.fan_whoosh_mode:\n return PRESET_MODE_WHOOSH\n return None", "def getPanelMode(self) -> str:\r\n if self.visprotocol is not None:\r\n return self.visprotocol.getPanelMode()\r\n return \"Not Connected\"", "def get_virtual_display(self):\n pass", "def setDisplayMode(self, mode):\n return \"Wireframe\"", "def get_heatmode(self, text=False):\n if text:\n return text_heatmode[self.heatmode]\n return self.heatmode", "def preset_mode(self):\n dps_mode = self._device.get_property(PROPERTY_TO_DPS_ID[ATTR_PRESET_MODE])\n if dps_mode is not None:\n return GoldairTuyaDevice.get_key_for_value(PRESET_MODE_TO_DPS_MODE, dps_mode)\n else:\n return None", "def set_display_mode(self, mode):\n self.grid.view_mode = mode", "def mode(self):\n if self._vsanobj.id is None:\n raise VsanNotPresent(\"Vsan \" + str(self._vsanobj._id) + \" is not present on the switch.\")\n out = self.__show_zone_status()\n return out[get_key(zonekeys.MODE, self._SW_VER)]", "def preset_mode(self) -> str | None:\n\n if self._device.tcs.system_mode is None:\n return # unable to determine\n # if self._device.tcs.system_mode[CONF_SYSTEM_MODE] in MODE_TCS_TO_HA:\n if self._device.tcs.system_mode[CONF_SYSTEM_MODE] in (\n SystemMode.AWAY,\n SystemMode.HEAT_OFF,\n ):\n return PRESET_TCS_TO_HA[self._device.tcs.system_mode[CONF_SYSTEM_MODE]]\n\n if self._device.mode is None:\n return # unable to determine\n if self._device.mode[CONF_MODE] == ZoneMode.SCHEDULE:\n return PRESET_TCS_TO_HA[self._device.tcs.system_mode[CONF_SYSTEM_MODE]]\n return PRESET_ZONE_TO_HA.get(self._device.mode[CONF_MODE])", "def getMode(self):\n with self.lock:\n mode = self.mode\n return mode", "def mode(self):\n return self._wepy_mode", "def mode(self, mode: Optional[int] = None) -> Optional[int]:\n ...", "def getModeString(self, mode):\n if mode == 0:\n return \"4 MHz\"\n elif mode == 1:\n return \"500 KHz\"\n else:\n raise FliError(\"FLIGetCameraModeString failed\")", "def common_mode(self):\n return self._common_mode", "def common_mode(self):\n return self._common_mode", "def get_preferred_mode(self):\n ret = self._transfer(TVGetModes())\n return ret.data if isinstance(ret, RaspiAckMsg) and ret.ack else None", "def picture_mode(self):\n if self._state != STStatus.STATE_ON:\n return None\n return self._picture_mode", "def getmodebase(mode):\r\n return ImageMode().getmode(mode).basemode", "def login_mode(self) -> str:\n return pulumi.get(self, \"login_mode\")", "def get_printc_mode(self):\n return io_tam.MODE_16", "def get_pump_mode(self):\n return self.__pump_mode", "def patch_mode(self) -> str:\n return pulumi.get(self, \"patch_mode\")", "def patch_mode(self) -> str:\n return pulumi.get(self, \"patch_mode\")", "def operating_mode(self):\n return self._read(MX_OPERATING_MODE)", "def get_mode(self):\n self.read(\":FUNC?\")" ]
[ "0.7362568", "0.72185713", "0.72185713", "0.7218308", "0.7204967", "0.7182178", "0.7182178", "0.7181349", "0.71671027", "0.7127637", "0.7036331", "0.7016391", "0.70013005", "0.6981956", "0.6971124", "0.6963615", "0.6962245", "0.6953157", "0.69366914", "0.6927507", "0.69018614", "0.6854454", "0.6854454", "0.6854454", "0.6832153", "0.68132216", "0.68132216", "0.68132216", "0.67708564", "0.6741573", "0.6728038", "0.6721376", "0.6718664", "0.66907805", "0.667138", "0.6650335", "0.6649305", "0.658622", "0.657556", "0.6571137", "0.6566471", "0.6544467", "0.65352017", "0.6518771", "0.65145254", "0.65145254", "0.6454129", "0.6443812", "0.64336455", "0.64191085", "0.6408372", "0.63864833", "0.638508", "0.63477606", "0.6336581", "0.63231844", "0.63140553", "0.6304139", "0.6292376", "0.62714076", "0.6267424", "0.6266323", "0.6256947", "0.62426794", "0.623421", "0.6227421", "0.6227421", "0.62273073", "0.62156534", "0.6192319", "0.61902136", "0.61862886", "0.6183534", "0.61772346", "0.6160473", "0.6159", "0.6157411", "0.61560756", "0.6151267", "0.61340654", "0.61106175", "0.6110386", "0.610884", "0.6106991", "0.60893536", "0.608316", "0.6072744", "0.6066017", "0.6061341", "0.6061341", "0.6058219", "0.60273427", "0.60139024", "0.6005651", "0.59960014", "0.59901667", "0.5979636", "0.5979636", "0.59755933", "0.5963538" ]
0.7002804
12
Set mode wireframe only
def setDisplayMode(self, mode): return "Wireframe"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def toggle_wireframe(self):\n self.view['wireframe'] = not self.view['wireframe']\n self.update_flags()", "def wireframe_only(self):\n return self._wireframe_only", "def setSurfaceShadingMode(mode='flat'):\n sdict = {'flat':'FLAT','smooth':'SMOOTH'}\n dislin.shdmod(sdict[mode], 'SURFACE')", "def getDefaultDisplayMode(self):\n return \"Wireframe\"", "def add_wireframe_modifier(self):\n scene = self.set_as_active()\n\n # if the user selected a material, use it\n if w_var.cb_mat_wire:\n wireframe_mat = bpy.data.materials[w_var.mat_wire_name]\n\n # else, create a new one with the color selected\n else:\n color_wire = w_var.color_wire\n\n # separating rgb and alpha\n wireframe_color_rgb = color_wire[0:3]\n wireframe_color_alpha = color_wire[-1]\n wireframe_mat = bpy.data.materials.new('wireframe')\n\n renderengine = scene.wirebomb.data_renderengine\n \n if renderengine == 'CYCLES':\n wireframe_mat.use_nodes = True\n tree = wireframe_mat.node_tree\n tree.nodes.clear()\n\n # creating the nodes\n node_transparent = tree.nodes.new('ShaderNodeBsdfTransparent')\n node_transparent.location = -300, 100\n\n node_diffuse = tree.nodes.new('ShaderNodeBsdfDiffuse')\n node_diffuse.location = -300, -100\n node_diffuse.inputs[0].default_value = wireframe_color_rgb + (1.0,)\n node_diffuse.color = wireframe_color_rgb\n node_diffuse.name = 'addon_wireframe_color' # referencing to this ID in the real-time change\n\n node_mixshader = tree.nodes.new('ShaderNodeMixShader')\n node_mixshader.location = 0, 50\n node_mixshader.inputs[0].default_value = wireframe_color_alpha\n node_mixshader.name = 'addon_wireframe_alpha' # referencing to this ID in the real-time change\n\n node_output = tree.nodes.new('ShaderNodeOutputMaterial')\n node_output.location = 300, 50\n\n # connecting the nodes\n tree.links.new(node_transparent.outputs[0], node_mixshader.inputs[1])\n tree.links.new(node_diffuse.outputs[0], node_mixshader.inputs[2])\n tree.links.new(node_mixshader.outputs[0], node_output.inputs[0])\n\n for node in tree.nodes:\n node.select = False\n\n # sets the viewport color\n wireframe_mat.diffuse_color = wireframe_color_rgb\n\n elif renderengine == 'BLENDER_RENDER':\n wireframe_mat.diffuse_color = wireframe_color_rgb\n wireframe_mat.use_transparency = True\n wireframe_mat.alpha = wireframe_color_alpha\n\n self.select('SELECT', {'MESH'}, objects_excluded={'ELSE'})\n\n for obj in scene.objects:\n if obj.select:\n obj.data.materials.append(wireframe_mat)\n modifier_wireframe = obj.modifiers.new(name='Wireframe', type='WIREFRAME')\n modifier_wireframe.use_even_offset = False # Causes spikes on some models\n modifier_wireframe.use_replace = False\n modifier_wireframe.thickness = w_var.slider_wt_modifier\n\n # arbitrary high number because wire material is always added to end\n modifier_wireframe.material_offset = 12345\n\n # referencing to this ID in the real-time change\n modifier_wireframe.name = 'addon_wireframe'\n\n return wireframe_mat", "def resMode(mode): \n if mode==0:\n makeMesh(r0x, r0y)\n elif mode==1:\n makeMesh(r1x, r1y)\n elif (mode==2):\n makeMesh(r2x, r2y)", "def setSurfaceMeshing(state='off',shading=1):\n sdict = {'off':'OFF','on':'ON'}\n val = sdict[state]\n if not shading:\n val = 'ONLY'\n dislin.surmsh(val)", "def add_wireframe_freestyle(self):\n scene = self.set_as_active()\n previous_area = bpy.context.area.type\n bpy.context.area.type = 'VIEW_3D'\n previous_layers = tuple(scene.layers)\n\n # can't enter edit mode on objects on inactive layers\n scene.layers = (True,)*20\n self.select('SELECT', {'MESH'}, objects_excluded={'ELSE'})\n\n for obj in scene.objects:\n if obj.select:\n scene.objects.active = obj\n bpy.ops.object.mode_set(mode='EDIT')\n bpy.ops.mesh.select_all(action='SELECT')\n bpy.ops.mesh.mark_freestyle_edge()\n bpy.ops.mesh.select_all(action='DESELECT')\n bpy.ops.object.mode_set(mode='OBJECT')\n\n bpy.context.area.type = previous_area\n scene.layers = previous_layers\n\n scene.render.use_freestyle = True\n scene.render.layers.active = scene.render.layers[w_var.rlname]\n\n for n in scene.render.layers.active.freestyle_settings.linesets:\n scene.render.layers.active.freestyle_settings.linesets.remove(n)\n\n lineset = scene.render.layers.active.freestyle_settings.linesets.new('wireframe')\n lineset.select_edge_mark = True\n lineset.select_crease = False\n\n wire_color = w_var.color_wire\n wire_thickness = w_var.slider_wt_freestyle\n\n wire_color_rgb = wire_color[0:3]\n wire_color_alpha = wire_color[-1]\n\n linestyle = bpy.data.linestyles.new('wire_style')\n linestyle.color = wire_color_rgb\n linestyle.alpha = wire_color_alpha\n linestyle.thickness = wire_thickness\n\n scene.render.layers.active.freestyle_settings.linesets.active.linestyle = linestyle\n\n return linestyle", "def _sketch_mode(self):\r\n self._mode_select(1)", "def render_wireframe(self, **kwds):\n proj = self.projection()\n if self.ambient_dim()==3:\n return proj.render_wireframe_3d(**kwds)\n if self.ambient_dim()==2:\n return proj.render_outline_2d(**kwds)\n raise ValueError, \"render_wireframe is only defined for 2 and 3 dimensional polyhedra.\"", "def SetPlaneMode(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_ConvertSurfaceToBezierBasis_SetPlaneMode(self, *args)", "def setDrawingMode(self):\n pass", "def addWireframe(self, wireframe):\n self.wireframe = wireframe\n self.tf_wireframe = wireframe.copy()", "def SetPlaneMode(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_ShapeConvertToBezier_SetPlaneMode(self, *args)", "def toggle_surface_mode(self):\n for poly in self.poly_list:\n poly.setFlag(QGraphicsItem.GraphicsItemFlag.ItemIsSelectable, True)\n if poly in self.hole_list:\n poly.setBrush(QBrush(QColor(255, 255, 255)))\n else:\n poly.setBrush(QBrush(QColor(0, 0, 0, 50)))\n\n # Disable the selection of edges and hide the marker if there is one\n for edge in self.edge_list:\n edge.setFlag(QGraphicsItem.GraphicsItemFlag.ItemIsSelectable, False)\n\n if edge.childItems()[0].childItems():\n text = edge.childItems()[0].childItems()[0]\n text.setVisible(False)\n\n # Hide markers on points\n for point in self.point_marker_list:\n if point.childItems():\n point.childItems()[0].setVisible(False)", "def set_up_wireframe_modifier(self):\n scene = self.set_as_active()\n \n if w_var.cb_clear_materials and w_var.is_any_affected:\n self.clear_materials()\n\n # updates progress bar to 50 %\n bpy.context.window_manager.progress_update(50)\n\n if w_var.cb_clay:\n\n # adding clay material before wireframe material for material offset in wireframe modifier to be correct\n self.set_up_clay()\n\n # updates progress bar to 75 %\n bpy.context.window_manager.progress_update(75)\n\n # sets up renderlayer and adds wireframe modifier/material to affected meshes and saves wireframe material\n self.set_up_rlayer('wireframe')\n scene.wirebomb.data_material_wire = self.add_wireframe_modifier().name\n\n # updates progress bar to 99 %\n bpy.context.window_manager.progress_update(99)\n\n if w_var.cb_ao:\n self.set_up_all_ao()\n\n # deselects all objects as a last thing to clean up\n self.select('DESELECT', objects={'ALL'})", "def set_up_wireframe_freestyle(self):\n scene = self.set_as_active()\n \n # sets up renderlayer(s) (depending on 'Composited wireframing' checkbox) and freestyle wireframing\n # also saves freestyle linestyle name\n self.set_up_rlayer('wireframe', rlname_other='other')\n scene.wirebomb.data_freestyle_linestyle = self.add_wireframe_freestyle().name\n\n # updates progress bar to 50 %\n bpy.context.window_manager.progress_update(50)\n\n if w_var.cb_clear_materials and w_var.is_any_affected:\n self.clear_materials()\n\n # updates progress bar to 75 %\n bpy.context.window_manager.progress_update(75)\n\n if w_var.cb_clay:\n self.set_up_clay()\n\n # updates progress bar to 99 %\n bpy.context.window_manager.progress_update(99)\n\n if w_var.cb_ao and not w_var.cb_composited:\n self.set_up_all_ao()\n\n elif w_var.cb_composited:\n\n # sets up composition for wireframe and sets up ambient occlusion lighting if used\n self.comp_add_wireframe_freestyle()\n \n if scene.render.engine == 'CYCLES':\n scene.cycles.film_transparent = True\n\n else:\n scene.render.alpha_mode = 'TRANSPARENT'\n\n if w_var.cb_ao:\n self.set_up_world_ao()\n\n # deselects all objects as a last thing to clean up\n self.select('DESELECT', objects={'ALL'})", "def setColorMode(mode='full'):\n mdict = {'low':'NONE','full':'FULL'}\n dislin.clrmod(mdict[mode])", "def enable_texture_mode():\n for area in bpy.context.screen.areas:\n if area.type == \"VIEW_3D\":\n for space in area.spaces:\n if space.type == \"VIEW_3D\":\n space.viewport_shade = \"TEXTURED\"\n return", "def set_mode_point():\n global DRAW_MODE\n DRAW_MODE=\"point\"", "def setCompositionMode(self, mode):\n self.paintMode = mode\n self.update()", "def _set_draw_mode(draw_mode):\n###############################################################################\n global _draw_mode\n _draw_mode = draw_mode", "def SetEdgeMode(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_WireDivide_SetEdgeMode(self, *args)", "def enable_textured_solid_mode():\n for area in bpy.context.screen.areas:\n if area.type == \"VIEW_3D\":\n for space in area.spaces:\n if space.type == \"VIEW_3D\":\n space.viewport_shade = \"SOLID\"\n space.show_textured_solid = True\n return", "def plot_wireframe(Tfull):\n from mpl_toolkits.mplot3d import axes3d\n N = Tfull.shape[0]\n x = y = np.linspace(0, 1, N)\n X, Y = np.meshgrid(x,y)\n # Construct and return a function suitable for interactive demo\n def plot(elev=25, azim=50):\n fig = plt.figure(1, figsize=(14, 8))\n plt.clf()\n ax = fig.add_subplot(111, projection='3d')\n ax.plot_wireframe(X, Y, Tfull)\n ax.view_init(elev=elev, azim=azim)\n plt.axis('scaled')\n plt.xlabel('x (m)')\n plt.ylabel('y (m)')\n plt.title('T(x,y) on %dx%d grid' % (N,N))\n plot()\n return plot", "def setProtectSurfaces():\n dislin.shlsur()", "def _setmode(self, mode=None):\n if mode is None:\n return self._mode\n if mode not in [\"standard\", \"logo\", \"world\"]:\n return\n self._mode = mode\n if mode in [\"standard\", \"world\"]:\n self._angleOffset = 0\n self._angleOrient = 1\n else: # mode == \"logo\":\n self._angleOffset = self._fullcircle/4.\n self._angleOrient = -1", "def set_mode(self, mode):\n if mode == 'train':\n self.hidden = self._make_hidden(self.batch_size)\n elif mode == 'generate':\n self.hidden = self._make_hidden(1)", "def set_mode(self, mode):\n if mode == 'train':\n self.hidden = self._make_hidden(self.batch_size)\n elif mode == 'generate':\n self.hidden = self._make_hidden(1)", "def DualMode(self) -> bool:", "def ToggleAllGizmoLocalMode( self ):\n\n value = self.gizmoMgr.GetGizmoLocal( 'pos' )\n self.gizmoMgr.SetGizmoLocal( 'pos', not value )\n self.gizmoMgr.SetGizmoLocal( 'rot', not value )\n self.gizmoMgr.SetGizmoLocal( 'scl', not value )", "def setContourFilling(mode='cell'):\n mdict = {'cell':'CELL','polygon':'POLY'}\n dislin.shdmod(mdict[mode],'CONTUR')", "def _mask_mode(self):\r\n self._mode_select(0)", "def set_mode(self, mode: str) -> None:\n # Not all programs are fully supported by the current\n # OpenInterface API version. The known restricitons are:\n # - The 'Calibration' and 'TightnessTest' programms cannot\n # be started through the API.\n # - The 'Dry' program does not expose all it's parameters\n # (see github.com/buchi-labortechnik-ag/openinterface_rotavapor/issues/1)\n return self.send(self.cmd.SET_MODE, mode)", "def enableRemoveDesign(self):\n if self.mode.selectedShipHull.aftQuadInfo == None:\n self.isDroneDesign = 1\n self.enableRemoveDroneDesign()\n else:\n self.isDroneDesign = 0\n self.enableRemoveShipDesign()", "def toggle_culling(self):\n self.view['cull'] = not self.view['cull']\n self.update_flags()", "def setDisplayWireframe():\n for node in nuke.allNodes():\n print node.name()\n goodGeo = [\"Group\", \"ReadGeo\",\"ReadGeo2\",\"Sphere\",\"Cube\",\"Cylinder\",\"Card\", \"Card2\"]\n if node.Class() in goodGeo:\n if node.Class() == \"Group\":\n node.begin()\n for child in nuke.allNodes():\n if child.Class() in goodGeo:\n child['display'].setValue(1)\n node.end()\n else:\n node['display'].setValue(1)", "def set_up_world_ao(self):\n scene = self.set_as_active()\n new_world = bpy.context.blend_data.worlds.new('World of Wireframe')\n scene.world = new_world\n new_world.light_settings.use_ambient_occlusion = True\n new_world.light_settings.ao_factor = 0.3\n\n renderengine = scene.wirebomb.data_renderengine\n\n if renderengine == 'CYCLES':\n new_world.use_nodes = True\n new_world.node_tree.nodes[1].inputs[0].default_value = (1, 1, 1, 1)\n\n for node in new_world.node_tree.nodes:\n node.select = False\n \n elif renderengine == 'BLENDER_RENDER':\n new_world.horizon_color = (1, 1, 1)", "def draw_car_body():\r\n\t# draw the car body\t\r\n\tglPushMatrix()\r\n\tglTranslatef(0,.5,0)\r\n\tglScalef(1,1,2)\r\n\tif wireframe:\r\n\t\tglutWireCube(1)\r\n\telse:\r\n\t\tglutSolidCube(1)\r\n\t\t# draw the wireframe outer shell\r\n\t\tglPushAttrib(GL_CURRENT_BIT)\r\n\t\tglPushAttrib(GL_LIGHTING_BIT)\r\n\t\tglDisable(GL_LIGHTING)\r\n\t\tglColor3f(0,0,0)\r\n\t\tglutWireCube(1.001)\t\r\n\t\tglPopAttrib()\r\n\t\tglPopAttrib()\r\n\tglPopMatrix()", "def SetRevolutionMode(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_ConvertSurfaceToBezierBasis_SetRevolutionMode(self, *args)", "def comp_add_wireframe_freestyle(self):\n scene = self.set_as_active()\n scene.use_nodes = True\n tree = scene.node_tree\n tree.nodes.clear()\n\n # creating the nodes\n node_alphaover = tree.nodes.new('CompositorNodeAlphaOver')\n node_alphaover.location = -25, 50\n\n node_rlwire = tree.nodes.new('CompositorNodeRLayers')\n node_rlwire.location = -400, 250\n node_rlwire.scene = scene\n node_rlwire.layer = w_var.rlname\n\n node_rlclay = tree.nodes.new('CompositorNodeRLayers')\n node_rlclay.location = -400, -75\n node_rlclay.scene = scene\n node_rlclay.layer = w_var.rlname_other\n\n node_comp = tree.nodes.new('CompositorNodeComposite')\n node_comp.location = 400, 65\n\n node_viewer = tree.nodes.new('CompositorNodeViewer')\n node_viewer.location = 400, -125\n\n # connecting the nodes\n links = tree.links\n links.new(node_rlwire.outputs[0], node_alphaover.inputs[1])\n links.new(node_rlclay.outputs[0], node_alphaover.inputs[2])\n\n if w_var.cb_ao:\n node_mixcolor_wire = tree.nodes.new('CompositorNodeMixRGB')\n node_mixcolor_wire.location = -125, 150\n node_mixcolor_wire.blend_type = 'MULTIPLY'\n node_mixcolor_wire.inputs[0].default_value = 0.730\n\n node_mixcolor_clay = tree.nodes.new('CompositorNodeMixRGB')\n node_mixcolor_clay.location = -125, -100\n node_mixcolor_clay.blend_type = 'MULTIPLY'\n node_mixcolor_clay.inputs[0].default_value = 0.730\n\n node_alphaover.location = 125, 75\n\n links.new(node_rlwire.outputs[0], node_mixcolor_wire.inputs[1])\n links.new(node_rlwire.outputs[10], node_mixcolor_wire.inputs[2])\n\n links.new(node_rlclay.outputs[0], node_mixcolor_clay.inputs[1])\n links.new(node_rlclay.outputs[10], node_mixcolor_clay.inputs[2])\n\n links.new(node_mixcolor_wire.outputs[0], node_alphaover.inputs[1])\n links.new(node_mixcolor_clay.outputs[0], node_alphaover.inputs[2])\n\n links.new(node_alphaover.outputs[0], node_comp.inputs[0])\n links.new(node_alphaover.outputs[0], node_viewer.inputs[0])\n\n else:\n links.new(node_alphaover.outputs[0], node_comp.inputs[0])\n links.new(node_alphaover.outputs[0], node_viewer.inputs[0])\n\n for node in tree.nodes:\n node.select = False", "def GetPlaneMode(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_ConvertSurfaceToBezierBasis_GetPlaneMode(self, *args)", "def plot_wireframe(self, line_width=None, index_row=0, index_col=0, show=True, plotter=None, title='', font_size=10,\n title_location=\"upper_edge\", font_color='black', camera=None):\n if not plotter:\n plotter = pv.Plotter()\n plotter.subplot(index_column=index_col, index_row=index_row)\n plotter.add_text(title, position=title_location, font_size=font_size, color=font_color)\n if camera is not None:\n plotter.set_position(camera[0])\n plotter.set_focus(camera[1])\n plotter.set_viewup(camera[2])\n plotter.add_mesh(self.pv_mesh, style='wireframe', line_width=line_width, show_scalar_bar=False, color=\"white\")\n if show:\n plotter.show()\n return plotter", "def setOperationMode(self):\n\n if self.ui.checkEditNone.isChecked():\n self.operationMode = 'normal'\n elif self.ui.checkEditBuildPoints.isChecked():\n self.operationMode = 'build'\n elif self.ui.checkEditHorizonMask.isChecked():\n self.operationMode = 'horizon'\n elif self.ui.checkPolarAlignment.isChecked():\n self.operationMode = 'star'\n\n self.drawHemisphere()\n\n return True", "def set_mode(self, mode=0, detection_param=0):\r\n return self._arm.set_mode(mode=mode, detection_param=detection_param)", "def GetPlaneMode(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_ShapeConvertToBezier_GetPlaneMode(self, *args)", "def solid(self):\r\n return not not self.model.prototype.solid", "def train(self, mode=True):\n super().train(mode)\n if mode and self.freeze_2d and self.backbone is not None:\n self._freeze(self.backbone)\n return self", "def flipNormals(self):\n self.flip = not self.flip", "def squarePrimitive(self):\n\n def drawWireframe(self):\n\n # Draw line\n glFT.glBegin(OpenMayaRender.MGL_LINE_STRIP)\n\n # Get Color\n # self.getColor(\"edge\")\n\n for edges in SQUARE[\"EDGES\"]:\n for edge in edges:\n\n point = OpenMaya.MVector(edge[0], edge[1], edge[2])\n point *= self.shape_size\n point = self.setUpAxis(point)\n point = self.addOffsetRotation(point)\n point = self.addOffsetPosition(point)\n self.getBoundingBoxSize(point)\n \n \n glFT.glVertex3f(point.x, point.y, point.z)\n\n glFT.glEnd()\n\n def drawShaded(self):\n\n # Draw quad\n glFT.glBegin(OpenMayaRender.MGL_POLYGON)\n\n # Get Color\n # self.getColor(\"polygon\")\n\n for polygons in SQUARE[\"POLYGONS\"]:\n for polygon in polygons:\n\n point = OpenMaya.MVector(polygon[0], polygon[1], polygon[2])\n point *= self.shape_size\n point = self.setUpAxis(point)\n point = self.addOffsetRotation(point)\n point = self.addOffsetPosition(point)\n self.getBoundingBoxSize(point)\n\n glFT.glVertex3f(point.x, point.y, point.z)\n\n glFT.glEnd()\n\n glFT.glNewList(self.vList_id, OpenMayaRender.MGL_COMPILE)\n\n # Draw lines\n if self.draw_type == 0 or self.draw_type == 2:\n drawWireframe(self)\n \n # Draww Polygons\n if self.draw_type == 1 or self.draw_type == 2:\n drawShaded(self)\n\n glFT.glEndList()", "def set_und_surface(self):\n if (self.dimension == '3D'):\n self.part_RVE.Set(\n cells=self.part_RVE.cells.getSequenceFromMask(mask=('[#1 ]', ), ),\n name='Set_RVE')\n elif (self.dimension == '2D'):\n self.part_RVE.Set(\n faces=self.part_RVE.faces.getSequenceFromMask(mask=('[#1 ]', ), ),\n name='Set_RVE')\n else:\n print('dimension Error!')", "def addWireframe(self, name, wireframe):\n\n self.wireframes[name] = wireframe", "def SetStandbyFPMode(self):\n handler = self.get_command_object(\"SetStandbyFPMode\")\n handler()", "def setMode(self,mode):\n self.mode=mode\n if self.mode==0:\n self.setDrawing()\n elif self.mode==1:\n self.setConstruction()\n elif self.mode==2:\n self.setDisplay()\n self.context.text.append(\"mode: \"+self.messages[self.mode])", "def wireframe(self, projection_type, canvas_dimensions):\n # Configure viewportself.screen_dimensions = {\n self.screen_dimensions = {\n \"width\": canvas_dimensions['width'],\n \"height\": canvas_dimensions['height']\n }\n\n self.projection.viewport = self.screen_dimensions\n self.projection.projection_type = projection_type\n self.projection.camera = self.cameras[0]\n self.projection.region_width = self.screen_dimensions.get('width')\n self.projection.region_height = self.screen_dimensions.get('height')\n\n # Draw polygons for each object\n projected_objects = []\n for obj in self.objects:\n print('Rendering: ', obj)\n\n world_transformation = obj.translate(\n obj.rotate(obj.scale(obj.vertices))\n )\n camera_transformation = obj.rotate(\n obj.translate(world_transformation, np.array(\n [\n -self.projection.camera.translation[0],\n -self.projection.camera.translation[1],\n -self.projection.camera.translation[2]\n ]\n )), np.array(\n [\n -self.projection.camera.rotation[0],\n -self.projection.camera.rotation[1],\n -self.projection.camera.rotation[2]\n ]\n \n )\n )\n projected_view = self.projection.project_all(camera_transformation)\n normalized_view = obj.normalize(\n projected_view, self.projection.viewport\n )\n projected_faces = []\n for face in obj.faces:\n poly = []\n for vertex_index in face:\n poly.append(\n [\n int(normalized_view[vertex_index][0]),\n int(normalized_view[vertex_index][1]),\n int(camera_transformation[vertex_index][2])\n ]\n )\n projected_faces.append(poly)\n center = list(obj.calculate_center(normalized_view))\n vertices = [ [int(p[0]), int(p[1]), int(p[2])] for p in normalized_view]\n # print('calculated_center: ', center)\n # print(''vertices)\n projected_objects.append({\n 'vertices': vertices,\n 'faces': obj.clip(self.projection.camera.translation, projected_faces),\n 'center': [ int(coord) for coord in obj.calculate_center(normalized_view) ],\n })\n print(projected_objects[0]['faces'][:20])\n return projected_objects", "def toggle_airplanes(self):\n if self.locations_map.show_airplanes:\n self.locations_map.show_airplanes = False\n else:\n if self.locations_map.zoom > 5:\n self.locations_map.show_airplanes = True\n self.locations_map.start_getting_locations_in_fov()\n else:\n self.btn_toggle_airplanes.state = 'normal'\n show_message_popup(\"Zoom level must be greater than 5.\")", "def setMode(self, mode):\n self.mode = mode\n if self.mode == 0:\n self.setDrawingMode()\n elif self.mode == 1:\n self.setConstructionMode()\n elif self.mode == 2:\n self.setDisplayMode()\n self.context.text.append(\"mode: \" + self.messages[self.mode])", "def setBorder3D():\n dislin.box3d()", "def enable3D(self):\r\n if(self.dataController.fileLoaded==True):\r\n self.dataController.toggleInteractiveMode()\r\n\r\n self.midsagittalView = False\r\n self.frontView = False\r\n self.topView = False\r\n self.bottomView = False\r\n self.threeDView = True", "def update_flags(self):\n # view mode, filled vs wirefrom\n if self.view['wireframe']:\n gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE)\n else:\n gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_FILL)\n\n # set fullscreen or windowed\n self.set_fullscreen(fullscreen=self.view['fullscreen'])\n\n # backface culling on or off\n if self.view['cull']:\n gl.glEnable(gl.GL_CULL_FACE)\n else:\n gl.glDisable(gl.GL_CULL_FACE)\n\n # case where we WANT an axis and NO vertexlist\n # is stored internally\n if self.view['axis'] and self._axis is None:\n from .. import creation\n # create an axis marker sized relative to the scene\n axis = creation.axis(origin_size=self.scene.scale / 100)\n # create ordered args for a vertex list\n args = rendering.mesh_to_vertexlist(axis)\n # store the axis as a reference\n self._axis = self.batch.add_indexed(*args)\n\n # case where we DON'T want an axis but a vertexlist\n # IS stored internally\n elif not self.view['axis'] and self._axis is not None:\n # remove the axis from the rendering batch\n self._axis.delete()\n # set the reference to None\n self._axis = None", "def texture_mode_enabled():\n for area in bpy.context.screen.areas:\n if area.type == \"VIEW_3D\":\n for space in area.spaces:\n if space.type == \"VIEW_3D\":\n if space.viewport_shade == \"TEXTURED\":\n return True\n elif (space.viewport_shade == \"SOLID\" and\n space.show_textured_solid):\n return True\n return False", "def setMode(self, mode):\n if mode == 0 or mode == 1:\n with self.lock:\n self.mode = mode\n else:\n raise FliError(\"FLISetCameraMode failed\")", "def SetEdgeMode(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_ShapeDivide_SetEdgeMode(self, *args)", "def flicker_lights(self):\n if self.lighting:\n self.lighting = False\n else:\n self.lighting = True\n self.redraw()", "def flicker_lights(self):\n if self.lighting:\n self.lighting = False\n else:\n self.lighting = True\n self.redraw()", "def switchMayaViewport(newMode):\n\n pass", "def modes_off(self):\n bm = self.fitsimage.get_bindmap()\n bm.reset_mode(self.fitsimage)", "def setScalingMode(mode='down'):\n mdict = {'down':'DOWN','full':'FULL'}\n dislin.sclmod(mode)", "def setScreenMode(mode='normal'):\n screendict = {'normal':'REVERS', 'black':'NOREV'}\n dislin.scrmod(screendict[mode])", "def setPointMode(self, mode):\n for point in self.points:\n point.mode = mode", "def interaction_door(self) -> None:\n self.grid.win = True", "def set_multiplex_mode(self, c, on):\n self.binding.set_switcher_mode(on)\n return True", "def SetSurfaceSegmentMode(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_FaceDivide_SetSurfaceSegmentMode(self, *args)", "def set_mode(self, mode):\n if mode == self._model_modes.INFERENCE:\n self.network.eval()\n else:\n self.network.train()\n return self", "def SetRevolutionMode(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_ShapeConvertToBezier_SetRevolutionMode(self, *args)", "def terminatePlane3D():\n dislin.grffin()", "def set_is_window_transparent(self, flag):\n self.params['full_transparent'] = flag", "def modeHack(self, pin, mode, board=0):\n msg = [int(pin), int(mode)]\n return self.callModule('hackp', board, 0, 'setMode', msg)", "def flicker_lights(self):\n print 'Lights Set'", "def set_light_mode(self, is_lid):\n raise NotImplementedError()", "def setMode(self):\n if self.currentTarget != None and self.finishedAssault == 0:\n if self.isAssault == 1:\n if self.currentTarget != None:\n self.mode = 'assault'\n else:\n self.mode = 'escape'\n else:\n self.log.debug('COUNT: %s: %s TARGET-> %s' % (self.myGalaxy.count, self.name, self.currentTarget.name))\n ##self.myGalaxy.resultList.append('COUNT: %s: %s TARGET-> %s' % (self.myGalaxy.count, self.name, self.currentTarget.name))\n if ((len(self.activeWeapons) == 0 or (self.currentISP/self.myShipHull.maxISP) < 0.7)) and self.__module__ == 'anw.war.ship':\n self.mode = 'escape'\n else:\n range = funcs.getTargetRange(self.posX, self.posY, self.currentTarget.posX, self.currentTarget.posY)\n if range <= self.range:\n self.mode = 'engage'\n else:\n self.mode = 'close'\n else:\n self.mode == 'escape'\n if globals.serverMode == 0:\n self.shipsim.updateShipMode()", "def draw_normal_mode(mode=0, coords=None, normal_modes=None):\n fac=0.52917721067121 # bohr to A\n xyz =f\"{len(coords)}\\n\\n\"\n for i in range(len(coords)):\n atom_coords = [float(m) for m in coords[i][8:].split(' ')]\n mode_coords = [float(m) for m in normal_modes[mode][i][8:].split(' ')]\n xyz+=f\"{coords[i][0:4]} {atom_coords[0]*fac} {atom_coords[1]*fac} {atom_coords[2]*fac} {mode_coords[0]*fac} {mode_coords[1]*fac} {mode_coords[2]*fac} \\n\"\n view = py3Dmol.view(width=400, height=400)\n view.addModel(xyz, \"xyz\", {'vibrate': {'frames':10,'amplitude':1}})\n view.setStyle({'sphere':{'scale':0.30},'stick':{'radius':0.25}})\n view.setBackgroundColor('0xeeeeee')\n view.animate({'loop': 'backAndForth'})\n view.zoomTo()\n return(view.show())", "def mode(self) -> pulumi.Input[Union[str, 'ServiceMeshMode']]:\n return pulumi.get(self, \"mode\")", "def circlePrimitive(self):\n\n def drawWireframe(self):\n\n # Draw line\n glFT.glBegin(OpenMayaRender.MGL_LINE_STRIP)\n\n # Get Color\n # self.getColor(\"edge\")\n\n def_circle = 100\n for idx in range(def_circle + 1):\n\n theta = 2 * 3.141592 * idx / def_circle\n x = self.shape_size * math.cos(theta)\n z = self.shape_size * math.sin(theta)\n\n point = OpenMaya.MVector(x, 0.0, z)\n point = self.setUpAxis(point)\n point = self.addOffsetRotation(point)\n point = self.addOffsetPosition(point)\n self.getBoundingBoxSize(point)\n\n glFT.glVertex3f(point.x, point.y, point.z)\n\n glFT.glEnd()\n \n def drawShaded(self):\n\n # Draw quad\n glFT.glBegin(OpenMayaRender.MGL_POLYGON)\n\n # Get Color\n # self.getColor(\"polygon\")\n\n def_circle = 100\n for idx in range(def_circle + 1):\n\n theta = 2 * 3.141592 * idx / def_circle\n x = self.shape_size * math.cos(theta)\n z = self.shape_size * math.sin(theta)\n\n point = OpenMaya.MVector(x, 0.0, z)\n point = self.setUpAxis(point)\n point = self.addOffsetRotation(point)\n point = self.addOffsetPosition(point)\n self.getBoundingBoxSize(point)\n\n glFT.glVertex3f(point.x, point.y, point.z)\n\n glFT.glEnd()\n\n glFT.glNewList(self.vList_id, OpenMayaRender.MGL_COMPILE)\n\n # Draw lines\n if self.draw_type == 0 or self.draw_type == 2:\n drawWireframe(self)\n \n # Draww Polygons\n if self.draw_type == 1 or self.draw_type == 2:\n drawShaded(self)\n\n glFT.glEndList()", "def develope_mode(self, _):\n global develope_mode\n develope_mode = not develope_mode", "def singleboolean(self, context, wall, o):\n\n # generate holes for crossing window and doors\n self.itM = wall.matrix_world.inverted()\n d = self.datablock(o)\n\n hole = None\n hole_obj = None\n # default mode defined by __init__\n self.detect_mode(context, wall)\n\n if d is not None:\n if self.mode != 'ROBUST':\n hole = d.interactive_hole(context, o)\n else:\n hole = d.robust_hole(context, o.matrix_world)\n if hole is None:\n return\n\n hole.data.materials.clear()\n for mat in wall.data.materials:\n hole.data.materials.append(mat)\n\n self.prepare_hole(hole)\n\n if self.mode == 'INTERACTIVE':\n # update / remove / add boolean modifier\n self.difference(wall, hole)\n\n elif self.mode == 'HYBRID':\n m = wall.modifiers.get('AutoMixedBoolean')\n\n if m is None:\n m = wall.modifiers.new('AutoMixedBoolean', 'BOOLEAN')\n m.operation = 'DIFFERENCE'\n\n if m.object is None:\n hole_obj = self.create_merge_basis(context, wall)\n m.object = hole_obj\n else:\n hole_obj = m.object\n self.union(hole_obj, hole)\n\n bpy.ops.object.select_all(action='DESELECT')\n\n # parenting childs to wall reference point\n if wall.parent is None:\n x, y, z = wall.bound_box[0]\n context.scene.cursor_location = wall.matrix_world @ Vector((x, y, z))\n # fix issue #9\n context.view_layer.objects.active = wall\n bpy.ops.archipack.reference_point()\n else:\n context.view_layer.objects.active = wall.parent\n\n if hole_obj is not None:\n hole_obj.select_set(state=True)\n\n wall.select_set(state=True)\n o.select_set(state=True)\n bpy.ops.archipack.parent_to_reference()\n wall.select_set(state=True)\n context.view_layer.objects.active = wall\n if \"archipack_wall2\" in wall.data:\n d = wall.data.archipack_wall2[0]\n g = d.get_generator()\n d.setup_childs(wall, g)\n d.relocate_childs(context, wall, g)\n elif \"archipack_roof\" in wall.data:\n pass\n if hole_obj is not None:\n self.prepare_hole(hole_obj)", "def setModeSelect(self):\n self.scene().mode = fsScene.MODE_SELECT", "def set_pewter(face):\n ambient = [ 0.10588, 0.058824, 0.113725, 1.0 ]\n diffuse = [ 0.427451, 0.470588, 0.541176, 1.0 ]\n specular = [ 0.3333, 0.3333, 0.521569, 1.0 ]\n shininess = 9.84615\n glMaterialfv(face, GL_AMBIENT, ambient);\n glMaterialfv(face, GL_DIFFUSE, diffuse);\n glMaterialfv(face, GL_SPECULAR, specular);\n glMaterialf(face, GL_SHININESS, shininess);", "def set_pewter(face):\n ambient = [ 0.10588, 0.058824, 0.113725, 1.0 ]\n diffuse = [ 0.427451, 0.470588, 0.541176, 1.0 ]\n specular = [ 0.3333, 0.3333, 0.521569, 1.0 ]\n shininess = 9.84615\n glMaterialfv(face, GL_AMBIENT, ambient);\n glMaterialfv(face, GL_DIFFUSE, diffuse);\n glMaterialfv(face, GL_SPECULAR, specular);\n glMaterialf(face, GL_SHININESS, shininess);", "def SetSurfaceSegmentMode(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_ShapeDivide_SetSurfaceSegmentMode(self, *args)", "def solid(self):\r\n return not not self.prototype.solid", "def __init__(self, model, size):\n self.model = model\n self.screen = pygame.display.set_mode(size)", "def set_light_off(self):\r\n self._light = \"OFF\"", "def set_wireframe_export_path(self, path):\n\n\t\tself.export[\"wire_mesh\"] = path", "def mode(self) -> Mode:\n ...", "def setConstructionMode(self):\n self.step = 0\n self.display = []\n # t=Trajectory.createFromTuples(self.drawing)\n # l=t.sampleSegments(self.sample_number)\n self.coefficients = Fourier.transform(\n self.sample, self.coefficients_number)", "def mode(self):\r\n pass", "def change_light(self):\n self._light_status = not self._light_status", "def setNoZeroColor():\n dislin.nobgd()", "def set_copper(face):\n ambient = [ 0.19125, 0.0735, 0.0225, 1.0 ]\n diffuse = [ 0.7038, 0.27048, 0.0828, 1.0 ]\n specular = [ 0.256777, 0.137622, 0.086014, 1.0 ]\n shininess = 128.0\n glMaterialfv(face, GL_AMBIENT, ambient);\n glMaterialfv(face, GL_DIFFUSE, diffuse);\n glMaterialfv(face, GL_SPECULAR, specular);\n glMaterialf(face, GL_SHININESS, shininess);" ]
[ "0.8165864", "0.7190839", "0.67805", "0.6758721", "0.65666986", "0.6441041", "0.6434427", "0.6336882", "0.6314271", "0.6215526", "0.61872584", "0.6129271", "0.60960007", "0.6085161", "0.5989606", "0.5981274", "0.59671307", "0.5965656", "0.5920257", "0.58588403", "0.5820351", "0.5812003", "0.5810211", "0.5778697", "0.5736275", "0.5732092", "0.5725577", "0.5725093", "0.5725093", "0.5684997", "0.5608193", "0.56012833", "0.5584724", "0.5579787", "0.555539", "0.55537635", "0.5549015", "0.5541358", "0.55311406", "0.55250555", "0.5524594", "0.55190754", "0.5514473", "0.5499374", "0.54956174", "0.5487067", "0.5460087", "0.5450701", "0.543527", "0.5433395", "0.54244995", "0.54046893", "0.53809077", "0.5378903", "0.5377891", "0.533795", "0.5333515", "0.5323718", "0.5297308", "0.5296734", "0.5288503", "0.5277692", "0.52687454", "0.5264125", "0.5264125", "0.5251423", "0.52377087", "0.52193725", "0.52150774", "0.52099305", "0.52032363", "0.5200163", "0.51964754", "0.5193083", "0.5191357", "0.5190641", "0.51897734", "0.5183637", "0.5182124", "0.518065", "0.5177472", "0.5175664", "0.5170974", "0.51705605", "0.51696146", "0.51643914", "0.51591694", "0.5154733", "0.5154733", "0.51503646", "0.514194", "0.5139919", "0.5134492", "0.5134093", "0.5132684", "0.51294833", "0.5128748", "0.5126248", "0.5122724", "0.51202774" ]
0.7538678
1
Handle individual property changes
def onChanged(self, vp, prop): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def onPropertiesChange(self, data):\n pass", "def propertyChanged(self, p_str, Any): # real signature unknown; restored from __doc__\n pass", "def on_property_change(self, td, name):\n\n raise NotImplementedError()", "def onChanged(self, fp, prop):\n if prop == \"Length\" or prop == \"Breadth\" or prop == \"Draft\":\n pass", "def on_property_select(self, prop):\n pass", "def listener(self, proxy, changed_properties, invalidated_properties):\n metadata = changed_properties.lookup_value('Metadata')\n # do not signal if the metadata is empty\n self.process_metadata(metadata, False)", "def __process_property(self, element, property_name):\n try:\n expected_value = element.properties[property_name]\n except KeyError:\n return\n\n value_changed = self.__check_property_changed(element, property_name)\n if isinstance(expected_value, svgelements.svgelements.Color):\n if value_changed or expected_value.opacity:\n self.path_began = False\n elif not value_changed:\n return\n\n if property_name == 'fill':\n if value_changed:\n self.generator.fill_color(expected_value)\n if expected_value.opacity:\n self.generator.fill()\n elif property_name == 'linecap':\n self.generator.line_cap(expected_value)\n elif property_name == 'linejoin':\n self.generator.line_join(expected_value)\n elif property_name == 'miterlimit':\n self.generator.miter_limit(expected_value)\n elif property_name == 'stroke':\n if value_changed:\n self.generator.stroke_color(expected_value)\n if expected_value.opacity:\n self.generator.stroke()\n elif property_name == 'stroke_width':\n self.generator.stroke_width(expected_value)\n elif property_name == 'transform':\n self.__save(element)\n self.generator.transform(expected_value[0], expected_value[1],\n expected_value[2], expected_value[3],\n expected_value[4], expected_value[5])\n\n self.properties[-1][property_name] = expected_value", "def process_property(self, resources, resource, model, prop, context):\n pass", "def _propertyStateChangedSlot(self):\r\n \r\n self._updateButtonStates()", "def _changed(self, name, value):\n have_lock = self._change_lock.acquire(blocking=False)\n if have_lock:\n self._change_lock.release()\n self.changed()\n if not name.startswith('_'):\n self.PropertiesChanged(self.INTERFACE, {name: value}, [])", "def on_change(self, value):", "def _adapter_properties_changed(self, interface, changed_props, invalidated, path):\n if 'Powered' in changed_props.keys():\n if not self.is_powered:\n self.is_powered = True\n if 'DiscoverableTimeout' in changed_props.keys():\n # DiscoverableTimeout = 0 means timeout is disabled\n if self.discoverable_timeout != 0:\n self.discoverable_timeout = 0", "def _modifyProperty(self,dec):\n pass", "def property( self, prop ):\n raise NotImplementedError(\"property\")", "def on_properties_changed(self, adapter, path, dbus_iface, sig, data):\n iface = data[0]\n properties = data[1]\n if iface == DEVICE_IFACE:\n for key, value in properties.items():\n if key == \"Connected\":\n self.connected = value\n elif iface == GATT_CHRC_IFACE:\n for key, value in properties.items():\n if key == \"Value\":\n #print(\"***\" + str(key) + \", \" + str(value))\n self.value = value[0] + 256*value[1]", "def exogenous_change(self):\n pass", "def exogenous_change(self):\n pass", "def exogenous_change(self):\n pass", "def _update_property(self, field, value):\n if value is not None:\n self.properties[field] = value\n elif field in self.properties:\n del self.properties[field]", "def changed(self):\n\t\tpass", "def _on_properties_change(self, event=None):\n with self.layer.events.properties.blocker():\n\n with qt_signals_blocked(self.color_by_combobox):\n self.color_by_combobox.clear()\n self.color_by_combobox.addItems(self.layer.properties_to_color_by)", "def process_property(self, prop):\n NifLog.warn(f\"Unknown property block found : {prop.name}\")\n NifLog.warn(f\"This type isn't currently supported: {type(prop)}\")", "def update_object ( self, event ):\n self.value = self.control.GetValue()", "def changed(self, *args, **kwargs): # real signature unknown\n pass", "def handle_update(self, json_snippet, tmpl_diff, prop_diff):\n subscription = self._get_subscription()\n subscription.update(prop_diff)", "def _device_properties_changed(self, interface, changed_props, invalidated, path):\n if 'Connected' in changed_props.keys():\n if changed_props['Connected']:\n logger.info(\"Remote device connected\")\n else:\n logger.info(\"Remote device was disconnected\")\n for service in self._app.services:\n service.remote_disconnected()\n if callable(self.on_remote_disconnected):\n self.on_remote_disconnected()", "def before_update(self, obj, st):\n pass", "def process_IN_MODIFY(self, event):", "def state_changed(self, oldstate, newstate, event, *args, **kwargs):", "def state_changed(self, oldstate, newstate, event, *args, **kwargs):", "def handle_actual_updated(self):\n self._actual_updated()", "def _data_updated_callback(self, attr, old, new):\n pass", "def onUpdated(self):", "def changeProperty(self, node, name, propertyName, value, setIfNotExist=False):", "def change():", "def data_changed(self):\n return", "def on_change(key):\n pass", "def updateProperties(self):\n self.manage_changeProperties(title = self.getTitle(),\n description = self.getDescription(),\n basepath = self.getPath())", "def after_update(self, obj, st):\n pass", "def subscribe_to_lazy_prop(object, property_name, on_change_func):\n assert isinstance(property_name, str)\n\n if not hasattr(object, _LAZY_PROP_SUBSCRIBERS):\n setattr(object, _LAZY_PROP_SUBSCRIBERS, defaultdict(lambda: set()))\n\n object.__dict__[_LAZY_PROP_SUBSCRIBERS][property_name].add(on_change_func)", "def on_change(self, pvname=None, **kws):\n\n current_ctr = kws['value']\n # init on first read\n if self.current_counter is None:\n self.current_counter = current_ctr - 1 # the self.current_counter holds previous\n self.eventq.put(current_ctr)", "def _set_changed(self) -> None:\n self._changed = True", "def _itemChanged(self, event):\n if event == items.ItemChangedType.VISUALIZATION_MODE:\n item = self.sender()\n if item is not None: # This occurs with PySide/python2.7\n self.__isEnabled = item.isPropertyEnabled(self.__propertyName)\n self.__updateFlags()\n\n # Notify model\n model = self.model()\n if model is not None:\n begin = self.index(column=0)\n end = self.index(column=1)\n model.dataChanged.emit(begin, end)", "def update(self, **kwargs):\n for key, value in kwargs.items():\n try:\n is_property = isinstance(getattr(self.__class__, key), property)\n except AttributeError:\n continue\n\n if is_property:\n setattr(self, key, value)", "def notify_change(self, change):\n # Send the state to the frontend before the user-registered callbacks\n # are called.\n name = change['name']\n if self.comm is not None and getattr(self.comm, 'kernel', True) is not None:\n # Make sure this isn't information that the front-end just sent us.\n if name in self.keys and self._should_send_property(name, getattr(self, name)):\n # Send new state to front-end\n self.send_state(key=name)\n super().notify_change(change)", "def run_setprop(self, expanded, unexpanded) :\n\t\toptions, args = self.getopt([\"name=\", \"value=\"], expanded)\n\t\tif (options is None) and (args is None) :\n\t\t\treturn -1\t# message was already displayed in self.getopt()\n\t\tif not args :\n\t\t\treturn self.errormessage(\"Needs at least one object to change this property\")\n\t\tif not (options.has_key(\"name\") and options.has_key(\"value\")) :\n\t\t\treturn self.errormessage(\"You must supply a property name and value\")\n\t\tproperty = options[\"name\"]\n\t\tif options.has_key(\"value\") :\n\t\t\tpropvalue = options[\"value\"]\n\t\t\ttry :\n\t\t\t\t# maybe it's a list in a string, e.g. \"['e', 'f']\"\n\t\t\t\t# or something like that\n\t\t\t\tnewvalue = eval(propvalue)\n\t\t\t\tif (type(newvalue) != type(0)) and (type(newvalue) != type(0.0)) :\n\t\t\t\t\t# we mustn't convert numeric to string\n\t\t\t\t\tpropvalue = newvalue\n\t\t\texcept NameError :\n\t\t\t\tpass\t# normal string\n\t\telse :\n\t\t\tpropvalue = \"\"\n\n\t\tstatus = 0\n\t\tfor arg in args :\n\t\t\tobject = self.toObject(self.__context, arg)\n\t\t\tif object is not None :\n\t\t\t\tif not self.HasPerms(object, 'Manage properties') :\n\t\t\t\t\tstatus = status - 1\n\t\t\t\telif hasattr(object, 'hasProperty') :\n\t\t\t\t\tif not object.hasProperty(property) :\n\t\t\t\t\t\tstatus = status + self.errormessage(\"Object %s has no property %s\" % (self.ObjectPath(object), property))\n\t\t\t\t\telse :\n\t\t\t\t\t\t# in the following lines the absence of a _updateProperty\n\t\t\t\t\t\t# attribute indicates an object without properties (e.g. a method)\n\t\t\t\t\t\t# which indicates an object for which setting properties is a nonsense\n\t\t\t\t\t\tif hasattr(object, \"_updateProperty\") :\n\t\t\t\t\t\t\tobject._updateProperty(property, propvalue)\n\t\t\t\t\t\t\tself.htmlmessage(\"Object %s property %s was modified to %s\" % (self.ObjectPath(object), property, str(propvalue)))\n\t\treturn status", "def update(self):\n event = self._watcher()\n self._event_properties['Timestamp'] = event.timestamp\n self._event_properties['EventType'] = event.event_type\n if hasattr(event, 'Drive'):\n self._event_properties['Drive'] = event.Drive\n if hasattr(event, 'Path'):\n self._event_properties['Path'] = event.Path\n if hasattr(event, 'FileName'):\n self._event_properties['FileName'] = event.FileName\n if hasattr(event, 'Extension'):\n self._event_properties['Extension'] = event.Extension", "def update_properties(self, prop_dict):\n ft_dict = {ft.name: ft for ft in self.get_field_types()}\n for name, val in prop_dict.items():\n ft = ft_dict[name]\n if ft.is_parameter():\n key = \"value\"\n else:\n key = \"sample\"\n if issubclass(type(val), Sequence) and ft.array:\n self.set_field_value_array(name, None, [{key: v} for v in val])\n else:\n self.set_field_value(name, None, {key: val})", "def do_set_property(self, pspec, val):\n # FIXME: need an asynchronous API to set these properties,\n # particularly 'private'\n\n if pspec.name == 'name':\n self._name = val\n elif pspec.name == 'color':\n self._color = val\n elif pspec.name == 'tags':\n self._tags = val\n elif pspec.name == 'private':\n self._private = val\n else:\n raise ValueError('Unknown property %r', pspec.name)\n\n self._publish_properties()", "def on_selection_change(self, tree_selection):\n (model, tree_iter) = tree_selection.get_selected()\n if not tree_iter:\n return\n obj = model.get_object(tree_iter)\n self.on_property_select(obj)\n\n # Always expand multi value properties when selected\n is_multi_value = isinstance(obj, BaseProperty) and len(obj.values) > 1\n if is_multi_value:\n tree_selection.get_tree_view().expand_row(model.get_path(tree_iter), False)", "def modelChanged(self) -> None:\n ...", "def handleProperty(self, propOpType, propertyId, value = None):\n\n # check for property specific handler if any\n # call property specific handler if found or call\n # default property handler\n if (propertyId in self.propHandlerDict and self.propHandlerDict[propertyId] != None):\n return self.propHandlerDict[propertyId](propertyId, value, propOpType)\n else:\n return self._genericPropertyHandler(propertyId, value, propOpType)", "def properties(self):", "def properties(self):", "def properties(self):", "def _update_proxy(self, change):\n # The superclass handler implementation is sufficient.\n super(AbstractItemView, self)._update_proxy(change)", "def OnAttributesUpdated():\n pass", "def set_property_value(self, current_value, new_value):\n if self._enabled:\n current_value *= self.marker_factor\n new_value *= self.marker_factor\n if live_object_is_valid(self._parent) and current_value != new_value:\n new_param_value = max(self._absolute_range[0] * self.marker_factor, min(self._absolute_range[1] * self.marker_factor, new_value))\n setattr(self._parent, self._property_name, new_param_value)\n if self._should_zoom:\n self._parent.view.show_loop()\n self._parent.view.show_loop()", "def process_IN_ATTRIB(self, event):", "def properties(self):\n raise NotImplementedError", "def svn_fs_props_changed(*args):\r\n return _fs.svn_fs_props_changed(*args)", "def updateData(self, fp, prop):\n return", "def changed_event(self):\n return True", "def settings_changed(self, name, value):\n return", "def on_change(self, event):\n event_path = event.src_path\n observed_paths = []\n\n for watchdog_path, child_observed_paths in self._watch_dog_observed_paths.items():\n if event_path.startswith(watchdog_path):\n observed_paths += child_observed_paths\n\n if not observed_paths:\n return\n\n changed_paths = []\n for path in observed_paths:\n path_obj = Path(path)\n # The path got deleted\n if not path_obj.exists():\n self._observed_paths.pop(path, None)\n changed_paths += [path]\n else:\n new_checksum = calculate_checksum(path)\n if new_checksum != self._observed_paths.get(path, None):\n changed_paths += [path]\n self._observed_paths[path] = new_checksum\n if changed_paths:\n self._input_on_change(changed_paths)", "def on_changed(self, func):\n return self._observers.connect('changed', lambda val: func(val))", "def on_changed(self, func):\n return self._observers.connect('changed', lambda val: func(val))", "def __set__(self, model_instance, value):\r\n raise ValueError, 'Virtual property is read-only'", "def setChanged(self,value=True):\n self.changed = value", "def setChanged(self,value=True):\n self.changed = value", "def _on_state_change(\n self, entity: Union[str, dict], attribute: str, old: str, new: str, kwargs: dict\n ) -> None:\n new_value = float(new)\n\n above = self.properties.get(CONF_ABOVE)\n below = self.properties.get(CONF_BELOW)\n\n if above and new_value >= above:\n self.toggle(state=self.properties[CONF_STATE])\n elif below and new_value < below:\n self.toggle(state=self.properties[CONF_STATE])\n else:\n self.toggle(opposite_of=self.properties[CONF_STATE])", "def Property(name):\n\n attr_name = \"__\" + name\n\n def get(self):\n try:\n return self.__dict__[attr_name]\n except KeyError:\n raise AttributeError, name\n\n def set(self, value):\n if attr_name not in self.__dict__ \\\n or self.__dict__[attr_name] != value:\n self.__dict__[attr_name] = value\n self.is_modified = True\n\n return property(get, set)", "def dynchanged(self):\n self._dynchanged = True", "def changeProperty(self, prop, function, conditional=lambda x:True):\r\n beam_elements = copy.deepcopy(self)\r\n for previous_beam_element in beam_elements:\r\n if conditional(previous_beam_element):\r\n current_attribute_value = getattr(previous_beam_element, prop)\r\n setattr(previous_beam_element, prop, function(current_attribute_value)) \r\n return BeamElements(beam_elements.bar_elements)", "def _properties_changed(self, interface, properties, invalidated, path):\n if self._transfer is None:\n return\n if self._transfer.path != path:\n return\n if \"Status\" not in properties:\n return\n\n status = properties[\"Status\"]\n \n # still going?\n if status in [\"queued\", \"active\"]:\n return\n\n # store and cleanup before anything can blow up\n self._transfer = None\n fname = self._transfer_file\n self._transfer_file = None\n\n # Bluez doesn't elaborate on the error :(\n if status == \"error\":\n logger.info(\"Obex session transfer from '{}' failed.\".format(\n self._destination))\n if self.on_transfer_error:\n self.on_transfer_error(\n client=self)\n\n # Bluez writes the data to a temp file so we need\n # to return all data in that file and delete it\n # NOTE: parsing is the initiators responsibility\n if status == \"complete\":\n logger.info(\"Obex session transfer from '{}' completed.\".format(\n self._destination))\n data = None\n try:\n with open(fname, 'r') as f:\n data = f.read()\n except Exception:\n logger.exception(\"Error reading transferred data in \"\n \"temporary file '{}' from '{}'.\".format(\n fname,\n self._destination))\n if self.on_transfer_error:\n self.on_transfer_error(\n client=self)\n else:\n if self.on_transfer_complete:\n self.on_transfer_complete(\n client=self,\n data=data)\n\n # delete the temporary file\n try:\n os.remove(fname)\n logger.debug(\"Temporary destination file '{}' for transfer from\"\n \" '{}' has been deleted.\".format(\n fname,\n self._destination))\n except Exception as e:\n logger.warning(\"Error deleting temporary destination file '{}' \"\n \"for transfer from '{}' - {}\".format(\n fname,\n self._destination,\n e))", "def on_entity_update(self, event):\n self.entity.on_entity_update(event)", "def update_property(self, property_info):\n SchemaValidator(self.schema_extension_only, self.full_schema_graph).validate_property_schema(property_info)\n self.schema[\"@graph\"].append(property_info)\n self.load_schema(self.schema)\n print(\"Updated the property {} successfully!\".format(property_info[\"rdfs:label\"]))", "def update_properties(self, update_dict: Dict[str, Any]):\n self.event_properties.update(update_dict)", "def Update(self, values):\n # XXX: Rewrite to validate without fragile locks\n with self._change_lock:\n for prop, value in values.items():\n if prop in self.__properties__:\n setattr(self, prop, value)\n else:\n self._props[prop] = value\n self.changed()\n self.PropertiesChanged(self.INTERFACE, values, [])", "def changed(self, event: Event):\n\n for observer in self._observers:\n observer.on_change(event)", "def _attr_updated(self, name, value):\n event = AttributeUpdateEvent(self, name, value)\n events.notify(event)", "def property(self, p_int): # real signature unknown; restored from __doc__\n pass", "def set_property(self, name, value):\n self.properties[name] = value", "def set_changed(self, parts):\r\n self.command_manager.set_changed(parts)", "def forward_callbacks(self, obj):\n obj.add_change_callback(self._forward_cb)", "def endpointValueChanged(self, endpoint):\n if self._printSWAP == True:\n print endpoint.name + \" in address \" + str(endpoint.getRegAddress()) + \" changed to \" + endpoint.getValueInAscii()", "def autoprops_generated_setter(self, **kwargs):\n setattr(self, private_property_name, kwargs[property_name])", "def __call__(self, change: ChangeDict) -> None:\n old = None\n new = None\n ctype = change[\"type\"]\n if ctype == \"create\":\n new = change[\"value\"]\n elif ctype == \"update\":\n old = change[\"oldvalue\"]\n new = change[\"value\"]\n elif ctype == \"delete\":\n old = change[\"value\"]\n attr = self.attr\n owner = change[\"object\"]\n handler = getattr(owner, self.funcname)\n if isinstance(old, Atom):\n old.unobserve(attr, handler)\n if isinstance(new, Atom):\n new.observe(attr, handler)\n elif new is not None:\n msg = \"cannot attach observer '%s' to non-Atom %s\"\n raise TypeError(msg % (attr, new))", "def _validate(self):\n for name, prop in self._properties.iteritems():\n value = getattr(self, name, None)\n prop._do_validate(value)", "def on_changes(self, ts, changes):\n # This is a ConfigBlock object\n b = self.config_block\n\n # Set attributes\n for name, value in changes.items():\n setattr(self, name, value)\n\n # Force regs take priority\n if b.FORCE_RST in changes:\n self.OUT = 0\n elif b.FORCE_SET in changes:\n self.OUT = 1\n elif self.inp_matches_edge(changes.get(b.RST, None), self.RST_EDGE):\n self.OUT = 0\n elif self.inp_matches_edge(changes.get(b.SET, None), self.SET_EDGE):\n self.OUT = 1", "def onChange(self, parent):\r\n pass", "def norm_real_changed(self):\n self.model.notifyObservers()", "def AddSubversionPropertyChange(filename):\r\n if self.options.emulate_svn_auto_props and IsFileNew(filename):\r\n svnprops = GetSubversionPropertyChanges(filename)\r\n if svnprops:\r\n svndiff.append(\"\\n\" + svnprops + \"\\n\")", "def update_prim_properties(self, prim_properties):\n\n _object = self.get_object_from_store(FullID = prim_properties['FullID'])\n\n if _object == None:\n #if self.settings.LOG_VERBOSE and self.settings.ENABLE_OBJECT_LOGGING: logger.debug(\"Creating a new object and storing it's attributes. LocalID = %s\" % (object_properties['LocalID']))\n _object = Object()\n _object._update_properties(prim_properties)\n self.store_object(_object)\n else:\n #if self.settings.LOG_VERBOSE and self.settings.ENABLE_OBJECT_LOGGING: logger.debug(\"Updating an object's attributes. LocalID = %s\" % (object_properties['LocalID']))\n _object._update_properties(prim_properties)\n if _object.UpdateFlags & 2 != 0 and self.agent != None:\n \n self.agent.events_handler.handle(AppEvent(\"ObjectSelected\",\n payload = {'object':_object}))", "def update_observable(self):\n self.scenario.update_observable()", "def change_data(self):\n\n if self.changed is not True:\n self.changed = True\n print('True')", "def onChanged(self, w, scroll_type, val, joint_name):\n if joint_name[0] in 'LR' and self._mirror_moves.get_active():\n mirrored = {'L':'R', 'R':'L'}[joint_name[0]] + joint_name[1:]\n scales = [self.scales[joint_name], self.scales[mirrored]]\n else:\n scales = [self.scales[joint_name]]\n for scale in scales:\n scale.onChanged(w=w, scroll_type=scroll_type, val=val)", "def handle_state_change(self, entity, attribute, old, new, kwargs):\n self.log(\"Sensor notify gate state change reported on {} from {} to {}\".format(self._gate, old, new))\n if self._allow_monitor_control:\n if new == \"on\":\n self._monitor.enable_function()\n elif new == \"off\":\n self._monitor.set_function_state('None')\n # if user manually turns off notify gate, then should clear squelch\n # otherwise notify gate will be turned back on with timer expires\n # self.reset_squelch()\n else:\n self.log(\"ERROR: unexpected state change to {}\".format(new))\n self._current_gate_state = new", "def _voltage_changed(self):\n if self.checkValueBool:\n self.check_status()" ]
[ "0.75981975", "0.71719784", "0.7011226", "0.69150203", "0.66239375", "0.6611883", "0.6571908", "0.65436894", "0.6386289", "0.63312656", "0.6329722", "0.632825", "0.6243396", "0.6038764", "0.6026384", "0.60087305", "0.60087305", "0.60087305", "0.596836", "0.5836696", "0.5827314", "0.5819513", "0.58160836", "0.5801498", "0.5795733", "0.5768573", "0.5763614", "0.5757387", "0.5749916", "0.5749916", "0.5708092", "0.56964666", "0.56290233", "0.56215024", "0.56116194", "0.55771506", "0.5576458", "0.55688906", "0.5555555", "0.5552263", "0.5550946", "0.5520612", "0.55131274", "0.55114347", "0.5509099", "0.5503746", "0.5501927", "0.5488135", "0.54863423", "0.54516536", "0.5447529", "0.54465806", "0.5433047", "0.5433047", "0.5433047", "0.54300505", "0.54278874", "0.54097944", "0.5405279", "0.5386507", "0.53860265", "0.53724027", "0.5366154", "0.5352368", "0.5342727", "0.5335941", "0.5335941", "0.5327517", "0.5316956", "0.5316956", "0.531113", "0.5310851", "0.5306732", "0.528308", "0.5281307", "0.5272021", "0.5271645", "0.5264776", "0.5261302", "0.52605325", "0.52585375", "0.5255699", "0.5255587", "0.5254955", "0.5254031", "0.5253877", "0.52485824", "0.52480394", "0.52354014", "0.52308017", "0.5225773", "0.52244145", "0.5223421", "0.5220835", "0.5219475", "0.52190435", "0.52176815", "0.52167493", "0.52024674" ]
0.74094045
2
Check internet connection Raise exception if none
def check_connection(url="http://example.com/"): try: requests.head(url) return True except requests.ConnectionError: spinner.warn("No internet connecction 🤭") sys.exit(1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def internet_on(): \n try:\n urlopen('http://www.google.com', timeout=2)\n return True\n except urlopen.URLError as err: \n return False", "def check_internet_connection():\n logging.debug('Checking internet connection')\n try:\n urlopen(config.api_base_url,\n timeout=config.timeout_internet_connection)\n logging.debug('Connected to the internet')\n return True\n except URLError as err:\n logging.debug('No internet connection')\n return False", "def internet_on():\n try:\n urllib.request.urlopen('http://216.58.192.142', timeout=1)\n return True\n except urllib.error.URLError: \n return False", "def check_internet_connection(self):\n while not has_internet():\n time.sleep(5)\n self._logger.info('Internet connection is enabled')", "def is_internet_on() -> bool:\n try:\n urlopen('https://www.google.com/', timeout=5)\n return True\n except: \n return False", "def internet_availability(cls, **kwargs):\n if internet_connectivity_check():\n cls.response(\"The internet connection is ok\")\n return True\n else:\n cls.response(\"The internet is down for now\")\n return False", "def connected_internet() -> bool:\n url = \"http://www.google.com\"\n timeout = 5\n try:\n requests.get(url, timeout=timeout)\n return True\n except (requests.ConnectionError, requests.Timeout):\n print(\"\\nError: No internet connection!\\n\")\n return False", "def check_conn():\n try:\n urllib2.urlopen(\"http://www.google.com\", timeout=5)\n return True\n except urllib2.URLError:\n pass\n return False", "def check_no_network():\n try:\n socket.gethostbyname(\"www.google.com\")\n return False\n except:\n return True", "def is_internet():\n try:\n urllib.request.urlopen(URL, timeout=1)\n return True\n except urllib.error.URLError:\n return False", "def has_server_connection(driver, server_url):\r\n\r\n OK = True\r\n try:\r\n driver.get(server_url)\r\n # we aim at this element on the chrome default page on 'No internet connection' error\r\n ele = driver.find_element_by_xpath('//*[@id=\"main-message\"]/h1/span')\r\n if ele is not None and ele.text == 'No internet':\r\n printR(' No internet connection')\r\n return not OK\r\n \r\n if requests.get(server_url).status_code != 200:\r\n printR(f' Error connecting to {server_url}')\r\n return not OK\r\n except: \r\n return OK", "def test_connection(self, **kwargs):\n try:\n url = \"{0}\".format(self.base_url)\n response = requests.request(\"GET\", url)\n if response.status_code < 500:\n return True\n else:\n return False\n except KeyError:\n return False", "def check_connection(connected, config):\n\ttry:\n\t\turllib.request.urlopen(\"https://www.google.co.uk\").close()\n\texcept urllib.error.URLError:\n\t\tif connected is True:\n\t\t\tconnected = False\n\t\t\ton_disconnected()\n\t\t\tif config[\"closeOnDisconnect\"] is True or config[\"closeOnChange\"] is True:\n\t\t\t\tsys.exit()\n\telse:\n\t\tif connected is False:\n\t\t\tconnected = True\n\t\t\ton_connected()\n\t\t\tif config[\"closeOnConnect\"] is True or config[\"closeOnChange\"] is True:\n\t\t\t\tsys.exit()\n\treturn connected", "def is_able_to_connect(url: Optional[Text] = None) -> bool:\n if url is None:\n url = \"https://www.google.com\"\n try:\n _ = requests.get(url, timeout=.5)\n return True\n except (requests.ConnectionError, requests.Timeout):\n return False", "def check_connection(self):\n pass", "def has_internet() -> bool:\n if public_address():\n return True\n else:\n return False", "def net_check():\n resp = None\n host = \"https://gitlab.manjaro.org\"\n # noinspection PyBroadException\n try:\n resp = urllib.request.urlopen(host, timeout=2)\n except Exception:\n pass\n return bool(resp)", "def onCheckConnectionError(self):\r\n\r\n # show the error message\r\n msgBox = QMessageBox(self)\r\n msgBox.setWindowTitle(conf_parser.get(\"APP\", \"name\"))\r\n msgBox.setText(\"Internet connection not detected.\")\r\n msgBox.setStandardButtons(QMessageBox.Retry | QMessageBox.Close)\r\n msgBox.setDefaultButton(QMessageBox.Close)\r\n ret = msgBox.exec()\r\n\r\n # interact user\r\n if(ret == QMessageBox.Close):\r\n # exit program\r\n sys.exit()\r\n if(ret == QMessageBox.Retry):\r\n # retry connection\r\n self.thread = threading.Thread(target=self.checkServerThread)\r\n self.thread.setDaemon(True)\r\n self.thread.start()\r\n self.pros = 0\r\n self.check_timer.start(100)", "def _check_connection(self):\n for _ in range(3):\n try:\n r = get(f\"http://{self.ip}/student/{self.user}\")\n if r.ok:\n break \n except OSError as e:\n print(f\"Connection error:\\n{e}\")\n sleep(2)\n else:\n raise ConnectionError(f\"Can not connect to server with params ip: {self.ip}, user: {self.user}\")", "def isonline():\n\n conn = httplib.HTTPConnection(\"www.google.com\", timeout=5)\n try:\n conn.request(\"HEAD\", \"/\")\n conn.close()\n return True\n except:\n conn.close()\n return False", "def _CheckConnect(self):\n try:\n resp = requests.get(self._target_url, timeout=2)\n if resp.headers['Maximum-Bytes']:\n self._max_bytes = int(resp.headers['Maximum-Bytes'])\n return resp.status_code == 200\n except requests.exceptions.ConnectionError:\n return False\n except Exception as e:\n self.exception('Unexpected test connect failure: %s', str(e))\n return False", "def check_connection():\n status_code = urllib.request.urlopen(local_settings.DAFT_URL).getcode()\n\n if status_code == 200:\n on_or_404 = 'OK'\n else:\n on_or_404 = 'NOT OK'\n \n return on_or_404", "def check_connection(self):\n return False", "def retry_if_connection_error(exception):\r\n # return True\r\n return isinstance(exception, HttpError)", "def _checknet():\n exit_code = os.system('ping -c 1 www.baidu.com 1>/dev/null 2>&1')\n return exit_code", "def check_status(self):\n log = logging.getLogger(\"%s.%s\" % (self.log_name,\n inspect.stack()[0][3]))\n log.setLevel(self.log_level)\n\n if self.url:\n return True\n try:\n result = requests.get(self.ext_url)\n self.url = self.ext_url\n return True\n except requests.exceptions.ConnectionError:\n pass\n try:\n result = requests.get(self.local_url)\n log.warning(\"Url '%s' not connecting. Using local_url '%s'\" % \\\n (self.ext_url, self.local_url))\n self.url = self.local_url\n return True\n except requests.exceptions.ConnectionError:\n self.url = None\n log.warning(\"Unable to connect using urls: %s\" % set([self.ext_url,\n self.local_url]))\n return False", "def check(self):\n try:\n response = requests.head(self.url)\n except requests.exceptions.RequestException:\n return False, \"darkRed\", \"🛑 Connection Error\"\n return self._status_is_okay(response.status_code)", "def check_connection():\n r = requests.get('https://www.google.com')\n if r.status_code == 200:\n print (colored(\"Connected.\", 'green'))\n else:\n print (colored(\"Not Connected.\", 'red'))", "def is_connected():\n \n try:\n socket.create_connection((\"www.google.com\", 80))\n return True\n except OSError:\n pass\n return False", "def is_website_online(host):\n try:\n socket.gethostbyname(host)\n except socket.gaierror:\n return False\n else:\n return True", "def can_connect(test_url):\n try:\n requests.get(test_url)\n except (OSError):#connection error\n logger.warning('couldn\\'t reach server on: {test_url}')\n return False\n return True", "def get_conn(url):\n try:\n request = requests.get(url)\n if request.status_code == 200:\n res = True\n else:\n res = False\n except:\n res = False\n return res", "def _check_connect(self) -> bool:\n\n if (self._conn is None):\n if (self._exception):\n raise base_connection.ConnectException(\n \"No connection established\")\n\n else:\n return False\n\n return True", "def check_internet_scheme(self, elb_item):\n scheme = elb_item.config.get('scheme', None)\n if scheme and scheme == u\"internet-facing\":\n self.add_issue(1, 'ELB is Internet accessible.', elb_item)", "def stack_exchange_is_reachable(context):\n try:\n requests.get(context.platform_url, timeout=context.request_timeout)\n except requests.ConnectionError:\n raise requests.ConnectionError('Unable to connect to the test URL {} -'\n ' please ensure that Stack Exchange API is reachable'\n .format(context.platform_url))", "def is_connected():\n import socket\n try:\n host = socket.gethostbyname(\"www.gov.uk\")\n socket.create_connection((host, 80), 2)\n return True\n except:\n pass\n return False", "def status_check(self):\n try:\n client = self.connect()\n client.sys.is_initialized() # make an actual network connection\n return True\n except:\n return False", "def check_availability(self):\n\t\tif not self.connection_is_usable:\n\t\t\treturn False\n\t\twith self.client_lock:\n\t\t\tif self.stream is None:\n\t\t\t\treturn False\n\t\t\tif self.last_ping is None or self.last_ping.age() >= self.ping_max_age:\n\t\t\t\tself.last_ping = SendPing(self, self.ping_timeout)\n\t\t\tlast_ping = self.last_ping\n\t\treturn last_ping.answered(self.ping_timeout)", "def can_connect(url):\n\n log(\"Checking connection to: {0}\".format(url))\n success = True\n\n try:\n urlopen(url, timeout=1)\n log(\"... can connect\")\n except URLError:\n log(\"... can't connect\")\n success = False\n\n return success", "def verify_core_connection():\n if not base_url or not api_credentials:\n retrieve_connection_info()\n return", "def verify_core_connection():\n if not base_url or not api_credentials:\n retrieve_connection_info()\n return", "def wifi_connectivity_verify(self):\n self.sendline(\"iw %s link\" % self.iface_wifi)\n matched = self.expect([\"Connected\", \"Not connected\", pexpect.TIMEOUT])\n if matched == 0:\n return True\n else:\n return False", "def _IsReady(self):\n if self.ip_address is None:\n self._GetIpAddress()\n if self.ip_address is not None:\n url = 'http://%s' % (self.ip_address)\n r = requests.get(url)\n if r.status_code == 200:\n return True\n return False", "def check_cgi_connection(url):\n try:\n return urlopen(url, timeout=15).getcode() == 411\n except HTTPError as e:\n if e.code == 411:\n return True\n warn_user(f\"Connection to {url} failed with error {e}. Retrying with different url and port.\")\n return False\n except (OSError, URLError) as e:\n warn_user(f\"Connection to {url} failed with error {e}. Retrying with different url and port.\")\n return False", "def CheckIfConnecting(self):\n if self.CheckIfWiredConnecting() or self.CheckIfWirelessConnecting():\n return True\n else:\n return False", "def is_responsive(url: Any) -> Any:\n url = f\"{url}/ready\"\n try:\n response = requests.get(url)\n if response.status_code == 200:\n time.sleep(2) # sleep extra 2 sec\n return True\n except ConnectionError:\n return False", "def test_get_availability_with_error(self, m):\n url = \"https://www.cellartracker.com/xlquery.asp?User=test-username&Password=test-password&Table=Availability&Format=tab&Location=1\"\n m.register_uri(\"GET\", url, exc=requests.exceptions.ConnectTimeout)\n\n cellartracker = CellarTracker(username=\"test-username\", password=\"test-password\")\n with self.assertRaises(CannotConnect):\n cellartracker.get_availability()", "def _check_whole_network(self):\n if not self.network.check_network():\n # check_network has failed, issue error\n self._display_semantic_error(\"network\")", "def _can_ping_url(self, url, headers):\n try:\n self.http_request(url, \"GET\", \"\", headers, timeout=.75)\n return True\n except:\n return False", "def test_check_connection(self):\n self.assertIsNotNone(app.check_connection())", "def __CheckConnectStatus(self):\r\n if not self.tn:\r\n print \"Connection is down!\"\r\n return False\r\n else:\r\n print \"Connection is alive!\"\r\n return True", "def ping():\n api_online = bool(check_url(\"https://rest.ensembl.org/info/ping?\"))\n vertebrate_url_online = bool(check_url(\"http://ftp.ensembl.org\"))\n other_url_online = bool(check_url(\"http://ftp.ensemblgenomes.org\"))\n return api_online and vertebrate_url_online and other_url_online", "def check_is_alive(url):\n print(url)\n try:\n requests.get(url, allow_redirects=False, timeout=10.0)\n except requests.exceptions.ConnectionError as exc:\n print('- Website doesn\\'t exists: ', exc)\n with open('result_test.txt', 'a') as result_test: # Appending urls\n result_test.write(url + '\\n')", "def check_status(site: str, url: str):\n try:\n response = requests.get(url)\n if response.status_code == 200:\n log_status(site, url, True)\n else:\n log_status(site, url, False)\n except ConnectionError:\n log_status(site, url, False)", "def check_url_availability(url):\n\n response = website_alive.get_response_object(url)\n return response.status_code == requests.codes['ok']", "def _is_redis_available(self) -> None:\n try:\n redis = self._conn_redis()\n redis.ping()\n except ConnectionError as err:\n raise HTTPException(status_code=500,detail=f\"REDIS CONNECTION -> {err}\")", "def check_link(url):\n try:\n\n r = requests.get(url)\n r.raise_for_status()\n r.encoding = r.apparent_encoding\n return r.text\n except:\n print('Connection Failed!!!')", "def check_heartbeat(self):\n try:\n req = request(self.values['url'].data)\n response = urllib.urlopen(req)\n the_page = response.read()\n return True\n except urllib.HTTPError as e:\n if e.code == 400:\n return True\n else:\n logger.exception('[%s] - Exception when checking heartbeat')\n return False\n except Exception:\n logger.exception('[%s] - Exception when checking heartbeat')\n return False", "def handle_connection_lost(self, exc: Optional[Exception]) -> None:", "def get_check_url(self,url):\n r = requests.get(url).status_code\n if r==requests.codes.ok:\n return(True)\n else:\n print \"something wrong! status_code: \" + r\n return(False)", "def online_check():\n try_first_ips = [\n \"216.58.213.238\", # google\n \"8.8.8.8\", # google\n \"8.8.4.4\", # google\n \"46.228.47.115\", # yahoo\n ]\n last_resort_ips = [ # dns root servers\n \"198.41.0.4\",\n \"192.228.79.201\",\n \"192.33.4.12\",\n \"128.8.10.90\",\n \"192.203.230.10\",\n \"192.5.5.241\",\n \"192.112.36.4\",\n \"128.63.2.53\",\n \"192.36.148.17\",\n \"192.58.128.30\",\n \"193.0.14.129\",\n \"198.32.64.12\",\n \"202.12.27.33\"\n ]\n\n iplists = []\n iplists.append(try_first_ips)\n iplists.append(rand_ips(max_num=50))\n iplists.append(last_resort_ips)\n\n return any(can_ping_host(ip) for ip in chain(*iplists))", "def __check_ping(self):\n if not self.communications.ping():\n self.communications.ping(True)", "def _check_connection() -> bool:\n return bool(subprocess.check_output([\"hostname\", \"-I\"]))", "def check_status():\n try:\n return HTTPClient().fetch(\"https://api.random.org/\").code == 200\n except Exception: # pylint: disable=broad-except\n return False", "def retry_on_py4j_network(exc: Exception) -> bool:\n return isinstance(exc, Py4JNetworkError)", "def check_connectivity(self):\n return self.connected", "def _check_connection(self):\n if \"_connection\" not in self.__dict__:\n message = \"use connect method before doing operation on this database\"\n raise Exception(message)", "def __check(self):\n status = '200 OK'\n try:\n response = get(self.__url)\n status = '{} {}'.format(\n response.status_code,\n http.client.responses[response.status_code]\n )\n except Exception as e:\n status = e.__class__.__name__\n \n if status[:3] == '200':\n self.__notify_up()\n else:\n if not self.downtime_info:\n self.downtime_info = DowntimeInfo(status)\n self.__notify_down()", "async def check_config(self) -> None:\n try:\n await self._check_api()\n except aiohttp.ClientError as e:\n raise ConnectionError(str(e))", "def check_remote_pairing(ignore_errors):\n try:\n DeviceApi().get()\n return True\n except HTTPError as e:\n if e.response.status_code == 401:\n return False\n error = e\n except Exception as e:\n error = e\n\n LOG.warning('Could not get device info: {}'.format(repr(error)))\n\n if ignore_errors:\n return False\n\n if isinstance(error, HTTPError):\n if connected():\n raise BackendDown from error\n else:\n raise InternetDown from error\n else:\n raise error", "def check_up(addr, p):\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n result = sock.connect_ex((addr, p))\n sock.close()\n if result == 0:\n ans = True\n else:\n ans = False\n return ans", "def is_alive(self):\n conn = HTTPConnection(self.browser.host, self.browser.port)\n conn.request(\"HEAD\", \"/invalid\")\n res = conn.getresponse()\n return res.status == 404", "def check_site_availability(url):\n\n try:\n conn = urllib.request.urlopen(url)\n except urllib.error.HTTPError as e:\n # Return code error (e.g. 404, 501, ...)\n print('HTTPError: {}'.format(e.code))\n logging.info('HTTPError: {}'.format(e.code))\n return int(e.code)\n except urllib.error.URLError as e:\n # Not an HTTP-specific error (e.g. connection refused)\n print('URLError: {}'.format(e.reason))\n logging.info('URLError: {}'.format(e.reason))\n return -7\n except Exception as e:\n # other reasons such as \"your connection is not secure\"\n print(e)\n logging.info(e)\n return -8\n\n # check if redirected\n if conn.geturl() != url:\n print(\"Redirected to {}\".format(conn.geturl()))\n logging.info(\"Redirected to {}\".format(conn.geturl()))\n return 302\n\n # reaching this point means it received code 200\n print(\"Return code 200\")\n logging.info(\"Return code 200\")\n return 200", "def check_connection_status(status):\n\n if status.status_code == 200:\n return True\n else:\n return False", "def is_available():", "def offline_error():\n\n colored('No available internet connection\\n', 'red')", "def is_ok(url: str) -> bool:\n try:\n resp = requests.get(url)\n except:\n return False\n return True if math.floor(resp.status_code / 100) == 2 else False", "def ready(self):\n\n if not self.running:\n return False\n\n try:\n response = requests.get(\n 'http://{}:{}'.format(\n self.running_host,\n self.running_port\n )\n )\n except requests.ConnectionError:\n return False\n\n if response.status_code == 404:\n return True\n elif response.status_code == 500:\n return False\n else:\n return False", "async def check_connection(self, hass: HomeAssistantType):\n from elasticsearch import (\n AuthenticationException,\n AuthorizationException,\n ConnectionError,\n ElasticsearchException,\n SSLError,\n )\n\n client = None\n is_supported_version = True\n try:\n client = self._create_es_client()\n\n es_version = ElasticsearchVersion(client)\n await es_version.async_init()\n\n is_supported_version = es_version.is_supported_version()\n except SSLError as err:\n raise UntrustedCertificate(err)\n except ConnectionError as err:\n if isinstance(\n err.info, aiohttp.client_exceptions.ClientConnectorCertificateError\n ):\n raise UntrustedCertificate(err)\n raise CannotConnect(err)\n except AuthenticationException as err:\n raise AuthenticationRequired(err)\n except AuthorizationException as err:\n raise InsufficientPrivileges(err)\n except ElasticsearchException as err:\n raise ElasticException(err)\n except Exception as err:\n raise ElasticException(err)\n finally:\n if client:\n await client.close()\n client = None\n\n if not is_supported_version:\n raise UnsupportedVersion()", "def checkWifi():\n try:\n subprocess.check_output(\"iwgetid\")\n return True\n except subprocess.CalledProcessError: # if not connected\n return False", "def pass_sanity_checks():\n if not check_database_connection():\n return False\n\n # The IP address might take some time to arrive\n have_public_ip = False\n for i in range(10):\n if public_ip():\n have_public_ip = True\n break\n else:\n time.sleep(5)\n\n if not have_public_ip:\n return False\n\n if not access_s3():\n return False\n\n # If we get here we're good\n return True", "def is_open(self):\n result = None\n if(self._type_connection == \"COM\"):\n result = self._connection.is_open\n else:\n msg = \"99, No se puede validar la conexion para {}.\".format(self._type_connection)\n raise ValueError(msg)\n\n return result", "def check_url(url, read_lines=False):\n lines = None\n try:\n # Access URL\n url_stream = urllib2.urlopen(url, timeout=2)\n\n # Read lines\n if read_lines is True:\n lines = url_stream.readlines()\n except urllib2.URLError as url_error:\n url_stream = url_error\n except socket.timeout:\n return False, 'Time out. Try again!'\n\n # Return result\n if url_stream.code in (200, 401):\n url_good = True\n else:\n url_good = False\n\n # Close connect\n url_stream.close()\n\n # Return\n if read_lines is True:\n return url_good, lines\n if url_good is False:\n error_message = 'Unable to access %s. Check internet access. Code %d' % (url, url_stream.code)\n else:\n error_message = ''\n\n return url_good, error_message", "def check(self, connection):\n return True", "def connect(self, host):\n return False", "def can_detect_offline(self):\n raise NotImplementedError(\"Abstract method, must be overridden\")", "def is_bad_proxy(pip, url):\n try:\n res = requests.get(\n url,\n proxies={'http':pip},\n headers={'User-agent':'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0'},\n timeout=10\n )\n except Exception as e:\n return 1\n if res.status_code == 200:\n return 0\n\n print(res.status_code)\n return 1", "def is_retriable_exception(e):\n is_retriable = e.response['ResponseMetadata']['HTTPStatusCode'] == 500 or \\\n e.response['ResponseMetadata']['HTTPStatusCode'] == 503 or \\\n e.response['Error']['Code'] == 'NoHttpResponseException' or \\\n e.response['Error']['Code'] == 'SocketTimeoutException'\n return is_retriable", "def wait_for_connection(n_tries=10, seconds_wait=5, ip_address='8.8.8.8'):\n count_flag = '-n' if config.WINDOWS else '-c'\n\n for n_try in range(n_tries):\n ping = subprocess.Popen(\n ['ping', count_flag, str(1), '-w', str(1), ip_address],\n stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n )\n ping.communicate()\n if ping.wait():\n Logger.warning('Connection not found')\n time.sleep(seconds_wait)\n elif n_try:\n Logger.warning('Connection found')\n return True\n else:\n return True\n else: # no break; really no internet connection\n return False", "def CheckWirelessConnectingMessage(self):\n if not self.wifi.connecting_thread == None:\n stat = self.wifi.connecting_thread.GetStatus()\n return stat\n else:\n return False", "def checkServerThread(self):\r\n\r\n # check if the server is alive\r\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\r\n result = 1\r\n try:\r\n result = sock.connect_ex((\"dealookup.com\", 80))\r\n except:\r\n result = 1 \r\n\r\n # server is not live \r\n if result != 0:\r\n result = 1\r\n\r\n self.checkResultSignal.emit(result)", "def raise_connection_error(api_url, headers, timeout, proxies):\n raise requests.exceptions.ConnectionError", "def raise_connection_error(api_url, headers, timeout, proxies):\n raise requests.exceptions.ConnectionError", "def check_url(url_link):\n res = requests.get(url_link, allow_redirects =True)\n if res.status_code == 200:\n print('valid URL \\n')\n return url_link\n else:\n print('Oupps there is something wrong with your URL. Run the program again!! ')\n return res.status_code", "def reachable(self):\n service = build('gmail', 'v1', http=Http(timeout=1.0))\n url = urlparse.urlparse(service._baseUrl)\n host = url.hostname\n port = url.port\n try:\n socket.getaddrinfo(host, port, proto=socket.IPPROTO_TCP)\n except (socket.herror, socket.gaierror, URLError, OSError):\n return False\n return True", "def url_is_good(url):\n return website_re.match(url)\n # possible validation of reachability of website\n # http_response = requests.get(url)\n # return http_response < 400:", "def check_network(config_name, urls = ''):\n\n logging.info(\"calling obsolete network diagnotic. Use '-interactive' instead\")\n\n config = config_namespace.ConfigNameSpace({})\n config.ExecFile(config_name)\n # get relevant parameters from config file:\n dns_servers = string.split(config.namespace['BOT_DNS_SERVERS'], ',')\n\n if Check_Gateway(config.namespace['EXTERNAL_DEFAULT_ROUTE']) != 0:\n return 1\n\n good_dns_servers = 0\n for s in dns_servers:\n if Check_DNS(s) != 4: # all other errors are non-fatal\n good_dns_servers = good_dns_servers + 1\n # if no DNS servers are up, we give up:\n if good_dns_servers == 0:\n return 1\n\n # First check the SMTP server\n logging.info(\"testing SMTP server %s\" % config.namespace['SMTP_SERVER'] )\n Check_SMTP(config.namespace['SMTP_SERVER'],\n config.namespace['EXTERNAL_CRAWL_IP'])\n\n # what about NTP:\n logging.info(\"testing NTP server %s\" % config.namespace['NTP_SERVERS'])\n for s in config.namespace['NTP_SERVERS']:\n Check_NTP(s)\n\n # SYSLOG server:\n logging.info(\"testing SYSLOG server %s\" % config.namespace['SYSLOG_SERVER'] )\n Check_SYSLOG(config.namespace['SYSLOG_SERVER'])\n\n # OK, now walk over all collections and try to get starturls\n for u in urls:\n check_url(u, dns_servers)\n\n return 0", "def isConnected():", "def test_link(link):\n r = requests.get(link)\n if (r.status_code != 200):\n return False\n else:\n return True", "def test_url():\r\n global provided_url\r\n global verbose_flag\r\n # extracting url\r\n provided_url = urlparse(provided_url).scheme+\"://\"+urlparse(provided_url).netloc\r\n print provided_url \r\n if verbose_flag: print \"\\t[.] Checking if connection can be established...\",# + provided_url\r\n try:\r\n response = urllib2.urlopen(provided_url)\r\n \r\n except HTTPError, e:\r\n if verbose_flag: print \"[!] Failed\"\r\n return 0\r\n except URLError, e:\r\n if verbose_flag: print \"[!] Failed\"\r\n return 0\r\n else:\r\n valid_target = 1\r\n if verbose_flag: print \"Success\"\r\n return 1" ]
[ "0.8113887", "0.8006314", "0.798462", "0.78738654", "0.7798628", "0.7766045", "0.7750547", "0.7554359", "0.7195896", "0.7195208", "0.7083016", "0.70552427", "0.69422174", "0.69266725", "0.6923615", "0.69057584", "0.6864931", "0.67980105", "0.67922544", "0.6775535", "0.67517847", "0.6720917", "0.6711094", "0.6705836", "0.6668665", "0.6663482", "0.66062546", "0.6535993", "0.65122664", "0.6495804", "0.64487094", "0.64459825", "0.64414424", "0.6429203", "0.642376", "0.6417559", "0.639956", "0.6343078", "0.6341526", "0.6290445", "0.6290445", "0.6290062", "0.62803537", "0.62592924", "0.6220063", "0.6202108", "0.6187673", "0.6147273", "0.61198926", "0.6116262", "0.61099064", "0.6086088", "0.60790503", "0.6055135", "0.6047086", "0.603813", "0.6037035", "0.60353875", "0.60307163", "0.5990858", "0.5990667", "0.5987101", "0.5974926", "0.59745055", "0.5964913", "0.5960008", "0.5946801", "0.5945927", "0.5933036", "0.59178793", "0.58863014", "0.58836234", "0.58782583", "0.5873213", "0.587256", "0.58690727", "0.58598304", "0.5856718", "0.5824864", "0.5784405", "0.5772191", "0.5771373", "0.57513905", "0.57401496", "0.57374805", "0.5729579", "0.572704", "0.5717998", "0.5708339", "0.5705837", "0.57025117", "0.5689704", "0.5689704", "0.5688767", "0.56881", "0.568745", "0.5675751", "0.5674529", "0.5669706", "0.5667936" ]
0.7992725
2
Creates a Flask application.
def _create_app(self, config={}): app = Flask('signac-dashboard') app.config.update({ 'SECRET_KEY': os.urandom(24), 'SEND_FILE_MAX_AGE_DEFAULT': 300, # Cache control for static files }) # Load the provided config app.config.update(config) # Enable profiling if app.config.get('PROFILE'): logger.warning("Application profiling is enabled.") from werkzeug.contrib.profiler import ProfilerMiddleware app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[10]) # Set up default signac-dashboard static and template paths signac_dashboard_path = os.path.dirname(__file__) app.static_folder = signac_dashboard_path + '/static' app.template_folder = signac_dashboard_path + '/templates' # Set up custom template paths # The paths in DASHBOARD_PATHS give the preferred order of template # loading loader_list = [] for dashpath in list(app.config.get('DASHBOARD_PATHS', [])): logger.warning("Adding '{}' to dashboard paths.".format(dashpath)) loader_list.append( jinja2.FileSystemLoader(dashpath + '/templates')) # The default loader goes last and is overridden by any custom paths loader_list.append(app.jinja_loader) app.jinja_loader = jinja2.ChoiceLoader(loader_list) turbolinks(app) return app
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_app() -> Flask:\n logger.info('creating flask application')\n app = Flask(\n 'pasta',\n static_url_path='/static',\n static_folder='./static',\n template_folder='./views')\n config.flask.SECRET_KEY = os.urandom(32)\n config.flask.SERVER_NAME = None\n app.config.from_mapping(config.flask)\n return app", "def create_app():\n app = Flask(__name__)\n app.config.from_object('app.configs.config')\n app.config.from_object('app.configs.settings')\n return app", "def create_app():\n app = Flask(__name__, instance_relative_config=False)\n app.config.from_object('config.Config')\n db.init_app(app)\n ma.init_app(app)\n migrate = Migrate(app, db)\n\n with app.app_context():\n from . import routes\n\n # Create tables for our models\n db.create_all()\n app.logger.info(\"application started\")\n\n return app", "def create_app():\n app = Flask(\n __name__,\n instance_relative_config=False,\n )\n app.config.from_object('config.Config')\n\n with app.app_context():\n # CORS\n CORS(app)\n\n # JWT & BCRYPT\n from .utils.auth import init_auth\n init_auth(app)\n\n # DB\n from .utils.db import db\n db.init_app(app)\n\n # Mail\n from .utils.mail.service import mail\n mail.init_app(app)\n app.extensions['mail'].debug = 0 # No logging\n\n # Jobs\n from .utils.scheduler import start_jobs\n start_jobs(app)\n\n # Import routes\n from .routes import (\n admin, users, files,\n suprema,\n b_locals, b_federals)\n\n app.register_blueprint(admin.bp)\n app.register_blueprint(users.bp)\n app.register_blueprint(files.bp)\n app.register_blueprint(suprema.bp)\n app.register_blueprint(b_locals.bp)\n app.register_blueprint(b_federals.bp)\n\n return app", "def create_app() -> Flask:\r\n app = Flask(__name__.split('.')[0])\r\n init_config(app)\r\n app.register_blueprint(observer)\r\n app.teardown_appcontext(close_db)\r\n app.cli.add_command(init_db)\r\n\r\n return app", "def create_app():\n\n config = config_by_name[os.getenv('APP_SETTINGS', 'dev')]\n flask_app = Flask(__name__, static_folder=None, instance_relative_config=True)\n flask_app.config.from_object(config)\n with flask_app.app_context():\n app_manager = Manager(flask_app)\n\n from app.models import db, ma\n db.init_app(flask_app)\n Migrate(flask_app, db)\n app_manager.add_command('db', MigrateCommand)\n ma.init_app(flask_app)\n\n from app.service import mail\n mail.init_app(flask_app)\n\n from app.api import blueprint_api\n flask_app.register_blueprint(blueprint_api)\n\n json_logging.ENABLE_JSON_LOGGING = True\n json_logging.COMPONENT_NAME = 'MS-Auth'\n json_logging.COMPONENT_ID = 1\n json_logging.init(framework_name='flask')\n json_logging.init_request_instrument(flask_app)\n\n return flask_app, app_manager", "def create_app():\n app = Flask(__name__)\n\n # Load application settings\n settings = os.environ.get(\"FLASK_SETTINGS\", SETTINGS)\n if settings is not None:\n c = Config(settings)\n print(c)\n app.config.update(c.get_map('flask'))\n\n from users.views import user\n # Register the blueprints to app\n app.register_blueprint(user)\n\n db.init_app(app)\n\n return app", "def create_app(testing=False, cli=False):\n app = Flask(__name__)\n app.config.from_object(\"flask_cli.config\")\n\n if testing is True:\n app.config[\"TESTING\"] = True\n\n configure_extensions(app, cli)\n configure_apispec(app)\n register_blueprints(app)\n return app", "def create_app(config=Config):\r\n # Initialise app and configuration\r\n app = Flask(__name__)\r\n app.config.from_object(config)\r\n\r\n\r\n # Initialise flask plugins\r\n db.init_app(app)\r\n api.init_app(app)\r\n ma.init_app(app)\r\n login.init_app(app)\r\n migrate.init_app(app, db)\r\n register_api(api)\r\n\r\n\r\n return app", "def create_app():\n app = Flask(__name__)\n\n app.config.from_pyfile('../settings.py')\n\n app.register_blueprint(layout_bp, url_prefix='/layouts')\n app.register_blueprint(sheet_bp, url_prefix='/sheets')\n app.register_blueprint(user_bp, url_prefix='/users')\n\n db.init_app(app)\n ma.init_app(app)\n migrate.init_app(app)\n login_manager.init_app(app)\n\n return app", "def create_app():\n\n # Create app\n app = Flask(__name__)\n app.config.from_object(\"nextbus.config.Config\")\n\n app.logger = logger.app_logger\n # Load logging configuration and log initial configuration\n logger.load_config(app)\n\n # Initialise SQLAlchemy and Migrate in app\n db.init_app(app)\n migrate.init_app(app, db)\n\n # Adding app, db and model objects to flask shell\n from nextbus import models\n app.shell_context_processor(\n lambda: {\"app\": app, \"db\": db, \"models\": models}\n )\n\n from nextbus.converters import add_converters\n add_converters(app)\n\n from nextbus.views import page\n from nextbus.resources import api\n app.register_blueprint(page)\n app.register_blueprint(api)\n\n return app", "def app():\n return create_app()", "def make_app(*args, **kwargs):\n app = Flask(*args, **kwargs)\n Roots(app)\n return app", "def create_app():\n logging.basicConfig(\n level=REANA_LOG_LEVEL,\n format=REANA_LOG_FORMAT\n )\n app = Flask(__name__)\n app.config.from_object('reana_server.config')\n app.secret_key = \"hyper secret key\"\n\n # Register API routes\n from .rest import ping, secrets, users, workflows # noqa\n app.register_blueprint(ping.blueprint, url_prefix='/api')\n app.register_blueprint(workflows.blueprint, url_prefix='/api')\n app.register_blueprint(users.blueprint, url_prefix='/api')\n app.register_blueprint(secrets.blueprint, url_prefix='/api')\n\n app.session = Session\n CORS(app)\n return app", "def create_app():\n app = Flask(__name__)\n\n # Used by Flask to secure data\n app.config['SECRET_KEY'] = 'super-secret-secure-key'\n # Path to save the Database\n app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite'\n\n # Initialize the Database\n db.init_app(app)\n\n # Set up login manager\n from source.models import manage_login\n manage_login(app)\n\n # Blueprint for auth routes\n from source.auth import auth as auth_blueprint\n app.register_blueprint(auth_blueprint)\n\n # Blueprint for non-auth routes\n from source.main import main as main_blueprint\n app.register_blueprint(main_blueprint)\n\n return app", "def app():\n app = create_app()\n return app", "def create_app():\n app = Flask(__name__)\n\n\n @app.route('/')\n def barebones():\n return 'the barebones'\n\n return app", "def create_app(app=None):\n #\n # Either use the existing flask provided as an argument or initialize\n # a brand new flask application.\n #\n return app or create_flask_app()", "def create_app():\n app = Flask(__name__, instance_relative_config=False)\n app.config.from_object('config.Config')\n db.init_app(app)\n flask_bcrypt.init_app(app)\n jwt.init_app(app)\n\n with app.app_context():\n # Import Blueprints\n from .routes.users_route import users_bp\n from .routes.messages_route import messages_bp\n\n # REGISTER ROUTES\n app.register_blueprint(users_bp, url_prefix=\"/users\")\n app.register_blueprint(messages_bp, url_prefix=\"/messages\")\n\n\n return app", "def create_app(config_name):\n\n app = Flask(__name__)\n app.config.from_object(config[config_name])\n\n bootstrap.init_app(app)\n\n from .main import main\n app.register_blueprint(main)\n\n return app", "def create_app(env=\"production\"):\n app = Flask(__name__, static_url_path=\"/\")\n config_app(app, env=env)\n\n with app.app_context():\n Moment(app)\n init_db(app)\n enable_parser(app)\n register_route(app)\n register_blue(app)\n init_logger(app)\n init_scheduler(app)\n return app", "def create_app():\n app = Flask(__name__, instance_relative_config=False)\n app.config.from_object('config.Config')\n\n # Initialize Plugins\n bootstrap = Bootstrap(app) # noqa: F841\n\n with app.app_context():\n # Include our Routes\n from . import routes # noqa: F401\n\n # # Register Blueprints\n # app.register_blueprint(auth.auth_bp)\n # app.register_blueprint(admin.admin_bp)\n\n return app", "def create_app() -> Flask:\n\n flask_app = Flask('extraction_api', template_folder='./template')\n flask_app.secret_key = \"super secret key\"\n # import blueprints\n flask_app.register_blueprint(extraction_app)\n\n return flask_app", "def create_app(self):\r\n self.app = Flask(__name__, instance_relative_config=True)\r\n\r\n # Init the secret key of the app -it is a must for flask to run\r\n self.app.config.from_mapping(\r\n SECRET_KEY='!ZNeverSayNever116Z!',\r\n MONGODB_SETTINGS= {'host': 'mongodb://localhost/opc_integrity'}\r\n )\r\n initialize_db(self.app)\r\n\r\n\r\n # Init the app with core routes\r\n routes.init_app(self.app)", "def create_app():\n app = Flask(__name__)\n app.register_blueprint(playlists)\n app.register_blueprint(comments)\n return app", "def create_app():\n app = Flask(__name__)\n app.config.from_pyfile('config.py')\n\n login_manager.init_app(app) # initialize flask_login with our app\n # redirect route when @login_required fails\n login_manager.login_view = 'routes.signin'\n db.init_app(app)\n\n from .routes import routes\n app.register_blueprint(routes)\n\n return app", "def create_app(config_name):\n app = Flask(__name__)\n # create app instance\n app.config.from_object(config_by_name[config_name])\n flask_bcrypt.init_app(app)\n\n CORS(app)\n\n routes.init_routes(app)\n\n return app", "def create_app(config: dict) -> Flask:\n for key, value in config.items():\n app.config[key] = value\n db.init_app(app)\n ma.init_app(app)\n app.app_context().push()\n return app", "def create_app():\n # Creates flask object with directory for to serve static files\n app = Flask(__name__, static_url_path=C.STATIC_FILE_PATH)\n\n # Enabling CORS for the application\n CORS(app)\n\n app.debug = True\n # Registering books controller\n from app.controllers.books import mod\n app.register_blueprint(mod)\n\n # Test Route\n @app.route('/hello')\n def hello_world():\n return 'Hello World!'\n\n # Index route - serves index.html\n @mod.route('/')\n def main():\n return mod.send_static_file(\"index.html\")\n\n # serve routes from index by default\n app.add_url_rule('/', endpoint='index')\n\n return app", "def create_app(app_name: str):\n\n app = Flask(app_name)\n app.json_encoder = CustomJSONEncoder\n\n app.config.update({\n 'SQLALCHEMY_DATABASE_URI': build_db_uri(),\n 'SQLALCHEMY_TRACK_MODIFICATIONS': os.environ.get('SQLALCHEMY_TRACK_MODIFICATIONS', False),\n 'APP_CONFIG': {\n 'HOSTNAME': os.environ.get('HOSTNAME', ''),\n 'GREETING': os.environ.get('GREETING', 'Hello'),\n }\n })\n\n db.init_app(app)\n api = Api(app)\n\n with app.app_context():\n api.add_resource(Index, '/')\n api.add_resource(Config, '/config')\n api.add_resource(StudentMany, '/student')\n api.add_resource(StudentOne, '/student/<int:student_id>')\n return app", "def create_app(config='config.py'):\n app = Flask(__name__, static_folder=None)\n app.config.from_pyfile(config)\n\n # Initialize extensions/add-ons/plugins.\n mongo.init_app(app)\n login_manager.init_app(app)\n\n for blueprint in all_blueprints:\n import_module(blueprint.import_name)\n app.register_blueprint(blueprint)\n\n return app", "def setup_app():\n\n # 1 Create Flask application\n app = Flask(\n import_name=__name__,\n template_folder=\"templates\",\n static_folder=\"static\"\n )\n\n # 2 Update the apps configuration\n app = config_selector(app)\n register_error_handlers(app)\n\n cache.init_app(app)\n\n # 3 Set up logger\n setup_logger(app.config)\n LOGGER.info(\"Set up app & logger.\")\n\n # 4 Init clients\n init_clients(app.config)\n\n # 5 Init Daemon\n start_daemon(app.config)\n\n # 6 Register blueprints\n register_blueprints(app)\n Bootstrap(app)\n\n return app", "def create_app(config_name, log_level=logging.INFO):\n app = Flask(__name__)\n app.config.from_object(config[config_name])\n\n bootstrap.init_app(app)\n mail.init_app(app)\n moment.init_app(app)\n db.init_app(app)\n login_manager.init_app(app)\n\n Markdown(app)\n\n redis_store.init_app(app)\n\n handler = RotatingFileHandler('flask.log', maxBytes=10000, backupCount=1)\n handler.setLevel(log_level)\n app.logger.addHandler(handler)\n\n #attach routes and custom error pages here\n\n from .main import main as main_blueprint\n app.register_blueprint(main_blueprint)\n\n from .api import api as api_blueprint\n app.register_blueprint(api_blueprint)\n\n return app", "def create_app():\n app = Flask(__name__)\n\n settings = {\n 'DEBUG': True,\n 'WEBPACK_MANIFEST_PATH': './build/manifest.json',\n 'SECRET_KEY': 'the quick brown fox jumps over the lazy dog',\n 'SQLALCHEMY_DATABASE_URI': 'sqlite:///db.sqlite',\n 'SQLALCHEMY_COMMIT_ON_TEARDOWN': True,\n 'UPLOAD_FOLDER':UPLOAD_FOLDER,\n 'ALLOWED_EXTENSIONS': set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif','zip','log'])\n\n }\n\n app.config.update(settings)\n CORS(app)\n webpack.init_app(app)\n\n return app", "def create_app(config=None, app_name=\"todo-list\", blueprints=None):\n app = Flask(\n app_name,\n static_folder=os.path.join(os.path.dirname(__file__), '..', 'static'),\n template_folder='templates',\n )\n\n #app.config.from_object('project.config')\n app.config.from_pyfile('default.cfg', silent=False)\n if config:\n app.config.from_pyfile(config, silent=True)\n\n if blueprints is None:\n blueprints = BLUEPRINTS\n\n blueprints_fabrics(app, blueprints)\n extensions_fabrics(app)\n\n auth_util.init_auth_callbacks()\n\n return app", "def create_app():\n from server.web import create_app\n # If we do a static javascript app via flask, add it here\n # from server.web import create_app as create_web_app\n return create_app()", "def create_app(config='dev'):\n if config == 'dev':\n from .conf.config import DevelopmentConfig as dev_config\n app = configure_app(Flask(__name__), dev_config)\n else:\n from .conf.config import ProdConfig\n app = configure_app(Flask(__name__), ProdConfig)\n\n # setup flask blueprints\n configure_blueprints(app)\n\n return app", "def create_app():\n app = Flask(__name__, instance_relative_config=False)\n # app.config.from_object('config.Config')\n file_path = os.path.abspath(os.getcwd())+\"/mpulse.db\"\n app.config.from_mapping(\n SECRET_KEY='dev',\n SQLALCHEMY_DATABASE_URI = 'sqlite:///'+file_path,\n SCHEMA=os.path.join(os.path.dirname(__file__), 'schema.sql'),\n SQLALCHEMY_TRACK_MODIFICATIONS = False,\n JSON_SORT_KEYS=False\n )\n\n # ensure the instance folder exists\n try:\n os.makedirs(app.instance_path)\n except OSError:\n pass\n \n # init database\n db.init_app(app)\n \n with app.app_context():\n \n # Create tables if they don't exist\n db.create_all() \n \n # Include our api Routes for members\n from . import members\n # Register Blueprints\n app.register_blueprint(members.bp)\n\n return app", "def create_app(test_config=None):\n\n app = Flask(__name__, instance_relative_config=True)\n\n try:\n os.makedirs(app.instance_path)\n except OSError:\n pass\n\n if test_config is None:\n app.config.from_pyfile('config.py', silent=False)\n else:\n app.config.from_mapping(test_config)\n\n # Instantiate the Database.\n db.init_app(app=app)\n migrate = Migrate(app=app, db=db)\n\n # Initialize WTForms to handle JSON data.\n wtforms_json.init()\n\n # Routing starts from here.\n app.add_url_rule(\"/\", view_func=Home.as_view(\"home\"))\n app.register_blueprint(api)\n\n return app", "def create_app(config: str) -> Flask:\n api = FlaskApp(__name__, specification_dir=Path() / \"swagger\")\n api.add_api(\"swagger.yml\")\n\n # Get `Flask` object\n app = api.app\n\n app.config.from_object(config)\n app.register_blueprint(site.mod)\n\n db.init_app(app)\n\n return app", "def create_app(app_name=PKG_NAME, config=None, **kwargs):\n app = Flask(app_name, static_url_path='/flask-static')\n\n # Update the app configuration.\n app.config.from_object(config)\n\n # Supress flask_sqlalchemy warning.\n app.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\n\n # For CSRF and flash\n app.secret_key = \"42d2a9e832245e0e56bb929d46393c4a467322cc21b53bc61a181004\"\n\n if kwargs.get(\"celery\"):\n init_celery(kwargs.get(\"celery\"), app)\n\n initialize_app(app)\n\n return app", "def create_app(config='catalog.config.ProductionConfig'):\n # Create app\n app = Flask(__name__)\n app.config.from_object(config)\n\n # Register blueprints\n reg_bps(app)\n\n # Import models (for migration purposes)\n from . import Category, Item, AppUser # noqa: F401\n\n # Initialize extensions\n db.init_app(app)\n migrate.init_app(app, db)\n\n return app", "def create_app():\n app = Flask(__name__, instance_relative_config=False)\n app.register_blueprint(auth_bp, url_prefix='/auth')\n app.register_blueprint(errors_bp, url_prefix='/error')\n app.config.from_object('config.Config')\n\n db.init_app(app)\n store.bind(db)\n login_manager.init_app(app)\n Session(app)\n captcha = FlaskSessionCaptcha(app)\n captcha.init_app(app)\n\n\n with app.app_context():\n from . import routes # Import routes\n db.create_all() # Create sql tables for our data models\n\n return app", "def create_app(test_config: dict = None) -> Flask:\n\n # create and configure the app\n app = Flask(__name__, instance_relative_config=True)\n\n # Load in the configuration\n if test_config is None:\n # load the instance config, if it exists, when not testing\n app.config.from_pyfile(os.path.join(os.path.dirname(__file__), 'config.py'), silent=True)\n print(app.config)\n else:\n # load the test config if passed in\n app.config.from_mapping(test_config)\n\n # Make the initial status page\n start_time = datetime.now().isoformat()\n\n @app.route('/')\n def home():\n return render_template('home.html',\n message=f'Running on {platform.node()} since {start_time}')\n\n # Load the blueprints\n from .views import ingest\n app.register_blueprint(ingest.bp)\n\n return app", "def create_app(test_config=None):\n app = Flask(__name__,\n instance_relative_config=True,\n static_folder=\"resources\")\n app.config.from_mapping(\n SECRET_KEY=\"dev\",\n SQLALCHEMY_DATABASE_URI=\"sqlite:///\" + os.path.join(\n app.instance_path, \"database.sqlite\"),\n SQLALCHEMY_TRACK_MODIFICATIONS=False,\n API_DOMAIN=\"http://localhost:5000\"\n )\n if test_config is None:\n # load the instance config, if it exists, when not testing\n app.config.from_pyfile(\"config.py\", silent=True)\n else:\n # load the test config if passed in\n app.config.from_mapping(test_config)\n\n try:\n os.makedirs(app.instance_path)\n except OSError:\n pass\n\n # Initialize DB\n db.init_app(app)\n db.create_all(app=app)\n\n # Initialize Marshmallow Serialization\n ma.init_app(app)\n\n # Initialize Blueprints\n from . import local\n app.register_blueprint(local.bp)\n\n @app.route(\"/\", defaults={\"path\": \"\"})\n @app.route(\"/<path:path>\")\n def index(path):\n return render_template(\n \"index.html\", api_domain=app.config.get(\"API_DOMAIN\")\n )\n\n return app", "def create_app(config='dev'):\n config_object = {'dev': DevConfig, 'test': TestConfig}[config]\n\n app = Flask(__name__)\n app.config.from_object(config_object)\n\n if app.config.get('PROFILE'):\n from werkzeug.contrib.profiler import ProfilerMiddleware\n app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[30])\n\n configure_log(app)\n configure_database(app)\n configure_json(app)\n configure_converters(app)\n\n register_extensions(app)\n register_blueprints(app)\n\n log.info(\"%s loaded with %s configuration\", bright(\"ups\"), bright(config))\n\n return app", "def new_app(project_name,app_name ):\n from flask_create_app.core.commands.cmd_newapp import create_new_app\n proj_dir = os.getcwd()\n create_new_app(app_name, proj_dir,project_name)", "def create_app(config_overrides={}):\n # create app; load config\n app = Flask(__name__)\n app.config.from_object('config')\n app.config.update(**config_overrides)\n\n db.init_app(app)\n\n # flask-debug-toolbar\n DebugToolbarExtension(app)\n\n if app.config.get('DEBUG_TOOLBAR'):\n toolbar = DebugToolbarExtension(app)\n\n # error page handlers\n @app.errorhandler(404)\n def not_found(error):\n return render_template('404.html'), 404\n\n @app.errorhandler(500)\n def server_error(error):\n return render_template('500.html'), 500\n\n # register blueprints\n from app.main.views import mod as main_module\n\n app.register_blueprint(main_module)\n\n # load models\n from app.players.models import Player\n\n return app", "def create_app():\n app = Flask(__name__)\n\n @app.route('/')\n def root():\n \"\"\"Base view.\"\"\"\n return 'TODO - part 2 and beyond!'\n\n return app", "def create_app():\n # pylint: disable=C0415\n # note: Ignoring 'Import outside toplevel' to avoid import while init\n\n from todo_app.config.swagger import SwaggerConfig\n from todo_app.routes.users import user_management_process\n from todo_app.routes.todo_item import todo_item_management_process\n from todo_app.routes.user_todo_list import todo_list_management_process\n\n # Define the WSGI application object\n app = Flask(__name__)\n\n # DEBUG ONLY!\n app.config[\"WTF_CSRF_ENABLED\"] = CommonConfig.wtf_csrf\n app.config[\"SECRET_KEY\"] = CommonConfig.app_secret_key\n\n @app.route(\"/\")\n def home():\n return \"We are working for new feature development! Please come back later!\"\n\n @app.route('/favicon.ico')\n def favicon():\n # To avoid 404 error\n return {}, 200\n\n\n\n # pylint: disable=W0613\n # note: Ignoring Unused argument 'resp_or_exc' as it's related to app\n # @app.teardown_appcontext\n # def cleanup(resp_or_exc):\n # handler.db_session.remove()\n\n # Register the blueprint here\n \n app.register_blueprint(\n SwaggerConfig.SWAGGERUI_BLUEPRINT, url_prefix=SwaggerConfig.SWAGGER_URL\n )\n app.register_blueprint(user_management_process, url_prefix=\"/api/v1/users\")\n app.register_blueprint(todo_item_management_process, url_prefix=\"/api/v1/todo/item\")\n app.register_blueprint(todo_list_management_process, url_prefix=\"/api/v1/todo/list\")\n \n\n return app", "def create_app(config_mapping=None):\n logging.basicConfig(level=REANA_LOG_LEVEL, format=REANA_LOG_FORMAT)\n app = Flask(__name__)\n app.config.from_object(\"reana_workflow_controller.config\")\n if config_mapping:\n app.config.from_mapping(config_mapping)\n\n app.secret_key = \"super secret key\"\n # Register API routes\n from reana_workflow_controller.rest import (\n workflows_session,\n workflows_status,\n workflows_workspace,\n workflows,\n ) # noqa\n\n app.register_blueprint(workflows_session.blueprint, url_prefix=\"/api\")\n app.register_blueprint(workflows.blueprint, url_prefix=\"/api\")\n app.register_blueprint(workflows_status.blueprint, url_prefix=\"/api\")\n app.register_blueprint(workflows_workspace.blueprint, url_prefix=\"/api\")\n\n app.register_error_handler(UnprocessableEntity, handle_args_validation_error)\n\n app.session = Session\n return app", "def create_app(configobj=ProdConfig):\n\n app = Flask(__name__)\n app.config.from_object(configobj)\n configure_blueprints(app)\n configure_extensions(app)\n configure_callbacks(app)\n configure_filters(app)\n configure_error_handlers(app)\n return app", "def create_app():\n\n app = Flask(__name__, instance_relative_config=True)\n\n # Load common settings\n app.config.from_object('app.settings')\n # Load environment specific settings\n app.config.from_object('app.local_settings')\n\n register_extensions(app)\n register_blueprints(app)\n # add the init_db command to flask cli\n app.cli.add_command(init_db)\n\n # Signal for giving users who register the 'user' role\n @user_registered.connect_via(app)\n def after_register_hook(sender, user, **extra):\n\n role = Role.query.filter_by(name=\"user\").first()\n\n if role is None:\n role = Role(name=\"user\")\n db.session.add(role)\n db.session.commit()\n\n user_role = UsersRoles(user_id=user.id, role_id=role.id)\n db.session.add(user_role)\n db.session.commit()\n\n # Setup an error-logger to send emails to app.config.ADMINS\n init_email_error_handler(app)\n\n # Define bootstrap_is_hidden_field for flask-bootstrap's bootstrap_wtf.html\n from wtforms.fields import HiddenField\n\n def is_hidden_field_filter(field):\n return isinstance(field, HiddenField)\n\n app.jinja_env.globals['bootstrap_is_hidden_field'] = is_hidden_field_filter\n\n return app", "def create_app(test_config=None):\n app = Flask(__name__, instance_relative_config=True)\n app.config.from_mapping(\n # a default secret that should be overridden by instance config\n SECRET_KEY=\"dev\",\n # store the database in the instance folder\n DATABASE=os.path.join(app.instance_path, \"sqlite\"),\n )\n\n if test_config is None:\n # load the instance config, if it exists, when not testing\n app.config.from_pyfile(\"config.py\", silent=True)\n else:\n # load the test config if passed in\n app.config.update(test_config)\n\n # ensure the instance folder exists\n try:\n os.makedirs(app.instance_path)\n except OSError:\n pass\n\n @app.route(\"/hello\")\n def hello():\n return \"Hello, World!\"\n\n # register the database commands\n from music_recommender import db\n\n db.init_app(app)\n\n # apply the blueprints to the app\n from music_recommender import auth\n\n app.register_blueprint(auth.bp)\n\n # make url_for('index') == url_for('body.index')\n # in another app, you might define a separate main index here with\n # app.route, while giving the body blueprint a url_prefix, but for\n # the tutorial the body will be the main index\n app.add_url_rule(\"/\", endpoint=\"index\")\n\n return app", "def make_server() -> Flask:\n app: Flask = Flask(__name__)\n return app", "def create_app(mode=os.environ.get('FLASK_MODE', 'app.config.Development')):\n app = APIFlask(__name__)\n # add configurations\n app_config = config.get(mode)\n app.config.from_object(app_config)\n app_config().init_app(app)\n\n # initialize all extensions\n init_extensions(app)\n\n # register blueprints\n # add blueprint registration statements here\n from app.users import users\n app.register_blueprint(users)\n\n # register error handlers\n app.register_error_handler(400, bad_request)\n app.register_error_handler(Forbidden, forbidden)\n app.register_error_handler(404, not_found)\n app.register_error_handler(405, method_not_supported)\n app.register_error_handler(APIException, conflict)\n\n return app", "def create_app():\n database_url = os.environ.get('DATABASE_URL') or 'postgres://{}:{}@{}:{}/{}'.format(\n config.get('database.user'),\n config.get('database.password'),\n config.get('database.host'),\n config.get('database.port'),\n config.get('database.name'),\n )\n app = FlaskJSON(__name__)\n app.secret_key = config.get('secret_key')\n app.config['SQLALCHEMY_DATABASE_URI'] = database_url\n app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\n # Set up the database\n db.init_app(app)\n\n # register blueprints\n app.register_blueprint(health)\n\n return app", "def create_app(config_object=Config):\n app = Flask(__name__.split('.')[0], static_folder='../client/build/static', template_folder=\"../client/build\")\n\n app.url_map.strict_slashes = False\n app.config.from_object(config_object)\n db.init_app(app)\n cache.init_app(app)\n register_blueprints(app)\n register_error_handler(app)\n \n\n return app", "def create_app(config_name):\n\n app = Flask(__name__)\n app.config.from_object(config_by_name[config_name])\n CORS(app)\n mongo.init_app(app)\n app.register_blueprint(check_bp)\n\n return app", "def create_app() -> Flask:\n app = Flask('preview')\n app.json_encoder = PreviewEncoder\n app.config.from_pyfile('config.py')\n\n Base(app)\n auth.Auth(app)\n\n # Set up the API.\n app.register_blueprint(routes.api)\n register_error_handlers(app)\n\n # Add WSGI middlewares.\n middleware = [request_logs.ClassicLogsMiddleware,\n auth.middleware.AuthMiddleware]\n if app.config['VAULT_ENABLED']:\n middleware.insert(0, vault.middleware.VaultMiddleware)\n wrap(app, middleware)\n\n # Make sure that we have all of the secrets that we need to run.\n if app.config['VAULT_ENABLED']:\n app.middlewares['VaultMiddleware'].update_secrets({})\n\n # Initialize upstream services.\n PreviewStore.init_app(app)\n if app.config['WAIT_FOR_SERVICES']:\n with app.app_context(): # type: ignore\n PreviewStore.current_session().initialize()\n return app", "def create_app(config):\n\n # Initialize app. Flatten config_obj to dictionary (resolve properties).\n app = Flask(__name__)\n config_dict = dict(\n [(k, getattr(config, k)) for k in dir(config) if\n not k.startswith('_')])\n\n app.config.update(config_dict)\n\n for bp in all_blueprints:\n import_module(bp.import_name)\n app.register_blueprint(bp)\n\n pipeline.set_enforce_auth(False)\n\n # Return the application instance.\n return app", "def generate(self) -> Flask:\n app = Flask(self.name, *self.args, **self.kwargs)\n app = self.setup_app_config(app)\n app = self.add_app_headers(app)\n app = self.add_xsrf_error_handler(app)\n\n return app", "def create_app(test_config=None):\n app = Flask(__name__)\n\n # apply the blueprints to the app\n from app import common\n\n app.register_blueprint(common.bp)\n\n # default url for site\n app.add_url_rule(\"/\", endpoint=\"index\")\n\n return app", "def create_app(test_config=None):\n app = Flask(__name__, instance_relative_config=True)\n app.config.from_mapping(\n SECRET_KEY=os.environ.get('FLASK_SECRET_KEY', 'dev'),\n # SQLALCHEMY_DATABASE_URI='sqlite:////' + os.path.join(app.instance_path, 'app.sqlite'),\n SQLALCHEMY_DATABASE_URI=os.environ.get('FLASK_SQLALCHEMY_DATABASE_URI'),\n SQLALCHEMY_TRACK_MODIFICATIONS=False,\n )\n\n if test_config is None:\n app.config.from_pyfile('config.py', silent=True)\n else:\n app.config.from_mapping(test_config)\n\n try:\n os.makedirs(app.instance_path)\n except OSError:\n pass\n\n # Set custom json encoder\n app.json_encoder = JSONEncoder\n\n # SQLAlchemy\n from tuinbouwer_server_api.models import db, migrate\n db.init_app(app)\n migrate.init_app(app, db)\n\n # Apscheduler\n from tuinbouwer_server_api.scheduler import scheduler, start_jobs\n scheduler.init_app(app)\n scheduler.start()\n start_jobs()\n \n # CORS\n CORS(app, resources={r'/*': {'origins': '*'}})\n\n # Website\n app.register_blueprint(website.frontend.blueprint)\n \n # API\n app.register_blueprint(api.sensor.blueprint)\n app.register_blueprint(api.frontend.blueprint)\n\n\n return app", "def create_app(config_path: str):\n\n if not os.path.exists(config_path):\n raise OSError(f\"Configuration file {config_path} does not exist\")\n\n # create flask app\n app = Flask(__name__)\n\n # add app configration \n app.config.from_pyfile(config_path)\n\n # initialize database \n db.init_app(app)\n logger.info(f\"Initializing app with database from {app.config['SQLALCHEMY_DATABASE_URI']}\")\n\n # initialize api enpoints\n from deekoo_auth.endpoints import api_endpoints\n app.register_blueprint(api_endpoints)\n\n return app", "def create_app(register_blueprints=True):\n app = Flask(__name__, instance_relative_config=True)\n\n app.config.from_object('app.default_config') # default config\n # app.config.from_pyfile('application.cfg.py') # server config file, do not include in versioning\n\n db.init_app(app)\n api = Api(app)\n api.add_resource(UserList, '/api/users')\n\n if register_blueprints:\n register_blueprints_on_app(app)\n\n return app", "def create_app(config_class=DevConfig):\n\n app = Flask(__name__)\n app.config.from_object(config_class)\n\n # Register Blueprints\n from routes import bp_main\n app.register_blueprint(bp_main)\n\n return app", "def create_app(config_name=\"development\"):\n # return app with config file on config folder\n app = Flask(__name__)\n\n # get default settings for app\n app.config.from_object(\"app_name.settings\")\n\n # load according config object\n app.config.from_object(app_config.config[config_name])\n\n # run classmethod to init app with Flask-DotEnv\n app_config.config[config_name].init_app(app)\n\n # register blueprints\n app.register_blueprint(api_mod, url_prefix=\"/api\")\n app.register_blueprint(mock_module, url_prefix=\"/api\")\n app.register_blueprint(support_ticket_module, url_prefix=\"/api\")\n \n # enable cors\n CORS(app)\n\n with app.app_context():\n # if config_name != \"testing\":\n # init db instance\n db.init_app(app)\n\n # migrate for Flask-Migrate\n migrate.init_app(app, db)\n\n return app", "def create_app(config_filename=None, config_object=None):\n app = Flask(__name__)\n\n app.config.from_object('psephology.config.default')\n if config_filename is not None:\n app.config.from_pyfile(config_filename)\n if config_object is not None:\n app.config.from_object(config_object)\n\n db.init_app(app)\n migrate.init_app(app, db, render_as_batch=True)\n\n app.register_blueprint(ui)\n app.register_blueprint(api, url_prefix='/api')\n app.cli.add_command(cli)\n\n # Things which should only be present in DEBUG-enabled apps\n app.debug = app.config.get('DEBUG', False)\n if app.debug:\n from flask_debugtoolbar import DebugToolbarExtension\n toolbar = DebugToolbarExtension()\n toolbar.init_app(app)\n\n return app", "def create_app(config_name='development'):\n\tapp = Flask(__name__,instance_relative_config=True)\n\tapp.config.from_object(APP_CONFIG[config_name])\n\n\turl = app.config.get('DATABASE_URL')\n\turl = app.config.get('DATABASE_URL')\n\tCORS(app, resources={r\"/api/*\": {\"origins\": \"*\"}})\n\n\tapp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\n\tcreate_tables(url)\n\tpublic_id = str(uuid.uuid4())\n\tif config_name == 'testing':\n\t\tpublic_id = \"f3b8a1c3-f775-49e1-991c-5bfb963eb419\"\n\tcreate_super_user(url, public_id)\n\n\tapp.register_error_handler(404, url_not_found)\n\tapp.url_map.strict_slashes = False\n\n\n\tapp.register_blueprint(v1)\n\tapp.register_blueprint(v2)\n\treturn app", "def CreateApp(test_config=None):\n app = Flask(__name__, instance_relative_config=True)\n\n if test_config:\n pass\n\n @app.route('/validate', methods=['POST'])\n def Validate(): # pylint: disable=unused-variable\n # TODO(dberris): Implement this!\n return ''\n\n @app.route('/service-metadata')\n def ServiceMetadata(): # pylint: disable=unused-variable\n # TODO(dberris): Implement this!\n return ''\n\n return app", "def create_app(config=DevConfig, **kwargs):\n app = Flask(__name__, **kwargs)\n app.config.from_object(config)\n\n # flask-restplus seem to use standard json lib and not the flask one\n # so we patch it here so it can handle UUIDs\n JSONEncoder.default = JSONEncoder_newdefault\n\n extensions.init_app(app)\n modules.init_app(app)\n\n return app", "def app():\n os.environ[\"FLASK_ENV\"] = \"test\"\n return create_app()", "def create_app(settings_override=None):\n\n app = Flask(__name__, instance_relative_config=True)\n\n app.config.from_object('config.settings')\n app.config.from_pyfile('settings.py', silent=True)\n\n if settings_override:\n app.config.update(settings_override)\n\n app.register_blueprint(course)\n app.register_blueprint(user)\n\n extensions(app)\n\n return app", "def create_app():\n app = FastAPI()\n configure_rest_server(app=app, router_configs=WEB_SERVICES_ROUTER_CONFIGS, db_configs=DB_CONFIGS)\n return app", "def create_app(settings_override=None):\n app = Flask(__name__, static_folder='static')\n\n params = {\n 'DEBUG': True,\n 'WEBPACK_MANIFEST_PATH': '../build/manifest.json'\n }\n\n app.config.update(params)\n\n if settings_override:\n app.config.update(settings_override)\n\n webpack.init_app(app)\n\n return app", "def create_app(config_class):\n # create a Flask application instance\n app = Flask(__name__)\n\n # load configs\n app.config.from_object(config_class)\n\n register_extensions(app)\n register_blueprints(app)\n register_error_handlers(app)\n register_shell_context(app)\n register_middleware(app)\n\n return app", "def create_app(test_config=\"test_config.py\"):\n app = Flask(__name__, instance_relative_config=True)\n\n # set common config values\n try:\n os.makedirs(app.instance_path)\n except OSError:\n pass\n\n # if not testing, config is loaded from config.py in the instance folder\n if test_config is None:\n app.config.from_pyfile(\"config.py\")\n else:\n # whichever config file name you pass in also has to be in the instance folder\n app.config.from_pyfile(test_config)\n\n db.init_app(app)\n login_manager.init_app(app)\n bootstrap.init_app(app)\n migrate.init_app(app, db)\n mail.init_app(app)\n app.redis = Redis.from_url(REDIS_URL)\n app.task_queue = rq.Queue(\"yamz-tasks\", connection=app.redis)\n app.elasticsearch = Elasticsearch(ELASTICSEARCH_URL)\n\n # apply the blueprints to the app\n from .main import main as main_blueprint\n\n app.register_blueprint(main_blueprint)\n\n from .auth import auth as auth_blueprint\n\n app.register_blueprint(auth_blueprint)\n\n from .term import term as term_blueprint\n\n app.register_blueprint(term_blueprint, url_prefix=\"/term\")\n\n from .graph import graph as graph_blueprint\n\n app.register_blueprint(graph_blueprint, url_prefix=\"/graph\")\n\n # register command line functions\n @app.cli.command()\n def test():\n \"\"\"Run the unit tests.\"\"\"\n import unittest\n\n tests = unittest.TestLoader().discover(\"tests\")\n unittest.TextTestRunner(verbosity=2).run(tests)\n\n return app", "def create_app():\n app = Flask(__name__)\n\n # app.secret_key = os.urandom(12)\n # jwt_manager = JWTManager()\n # jwt_manager.init_app(app)\n\n CORS(app)\n\n app.register_blueprint(redflag_blueprint, url_prefix=\"/api/v1/red-flags\")\n app.register_blueprint(user_blueprint, url_prefix=\"/api/v1/users\")\n app.register_blueprint(intervention_blueprint, url_prefix=\"/api/v1/interventions\")\n app.register_blueprint(auth_blueprint, url_prefix=\"/api/v1/auth\")\n app.register_blueprint(index_blueprint, url_prefix=\"/api/v1\")\n app.register_blueprint(base_url_blueprint, url_prefix=\"/\")\n app.register_blueprint(media_blueprint, url_prefix=\"/api/v1/files/uploads\")\n # app.register_blueprint(media_edit_blueprint, url_prefix=\"/api/v1/\")\n\n app.register_error_handler(400, bad_request_error)\n app.register_error_handler(404, page_not_found)\n app.register_error_handler(405, method_not_allowed)\n app.register_error_handler(500, internal_server_error)\n\n swagger_ui_blueprint = get_swaggerui_blueprint(SWAGGER_UI_URL, API_URL)\n app.register_blueprint(swagger_ui_blueprint, url_prefix=SWAGGER_UI_URL)\n\n return app", "def create_app(self):\n app = Flask(__name__)\n\n app.config[\"auth_func\"] = self.auth_func\n app.config[\"hydrator_func\"] = self.hydrator_func\n app.config[\"request_hydrator_func\"] = self.request_hydrator_func\n app.config[\"database_uri\"] = self.database_uri\n app.config[\"hmac_secret\"] = self.hmac_secret\n\n cors = CORS()\n cors.init_app(app, resources={r\"/*\": {\"origins\": self.cors_origins, \"supports_credentials\": True}})\n\n app.register_blueprint(api_v0.bp)\n\n @app.route(\"/\")\n def health_check():\n \"\"\"Can be called by e.g. Kubernetes to verify that the API is up\n\n Returns:\n str: the static string \"Comet-API\", could be anything\n \"\"\"\n return \"Comet-API\"\n\n return app", "def create_and_run():\n\n app = App()\n app.run()", "def create_app():\n\n # --------------------- #\n # Initial configuration #\n # --------------------- #\n\n instance_path = config.INSTANCE_PATH\n\n # Creates the instance path if it doesn't exist\n if not os.path.exists(instance_path):\n os.makedirs(instance_path)\n\n app = Flask(__name__, instance_path=instance_path)\n\n # Logging utility setup\n if app.config['ENV'] == 'development' or app.config['DEBUG'] is True:\n log_level = logging.DEBUG\n else:\n if hasattr(logging, config.DEBUG_LOG_LEVEL):\n log_level = getattr(logging, config.DEBUG_LOG_LEVEL)\n else:\n print(\n \"WARNING: log level value from config file is not a valid attribute: {}\".format(config.DEBUG_LOG_LEVEL))\n print(f\"Defaulting to '{logging.WARNING}\")\n log_level = logging.WARNING\n\n logging.basicConfig(\n format='[%(asctime)s] %(levelname)s in %(module)s: %(message)s',\n level=log_level,\n filename=config.LOG_PATH\n )\n\n # Intercepts generic server errors and logs them\n @app.errorhandler(werkzeug.exceptions.HTTPException)\n def handle_errors(e):\n logging.error(str(e))\n return str(e), 500\n\n # Handles correct favicon\n @app.route('/favicon.ico')\n def favicon():\n return send_from_directory(os.path.join(app.root_path, 'static'),\n 'favicon.ico', mimetype='image/vnd.microsoft.icon')\n\n # --------- #\n # Web pages #\n # --------- #\n\n # Root page\n @app.route('/')\n def index():\n \"\"\"Simple root page.\n\n The \"@app.route('/')\" decorator assigns this function\n to the '/' address, so that when you visit '/', a\n request is sent to the server, which will call this function.\n\n Once this function is called it returns an html page\n produced from the 'index.html' file.\n\n Returns\n -------\n html page\n \"\"\"\n return render_template('index.html')\n\n @app.route('/test_drawing')\n def test_drawing_page():\n return render_template('test_drawing.html')\n\n @app.route('/log', methods=['GET'])\n def view_log():\n \"\"\"Display the log\"\"\"\n if request.values.get(\"clear\") == \"True\":\n with open(config.LOG_PATH, \"w\") as log_file:\n log_file.write(\"\")\n logging.info(\"Log file cleared from browser.\")\n print(request.values.get(\"clear\"))\n with open(config.LOG_PATH) as log_file:\n log_text = log_file.read()\n return render_template('log.html', log_text=log_text)\n\n @app.route('/study_legacy')\n def study_legacy():\n \"\"\"Renders the study page.\n\n Returns\n -------\n html page\n\n \"\"\"\n return render_template('study-legacy.html')\n\n @app.route('/study')\n def study():\n \"\"\"Renders the study page.\n\n Returns\n -------\n html page\n\n \"\"\"\n return render_template('study.html')\n\n # --------------------- #\n # API-related functions #\n # --------------------- #\n\n @app.route('/api_initialise_gp_and_sample', methods=['GET', 'POST'])\n def api_initialise_gp_and_sample():\n \"\"\"Initialises a GP based on the given parameters.\n\n The parameters are retrieved from the settings file. After initialising\n the GP it samples a function from it to be the true function. Finally\n it chooses a query point uniformly at random.\n\n All the data is sent to the frontend as a JSON object to be used by the frontend.\n\n Returns\n -------\n JSON data\n\n \"\"\"\n\n # Retrieves the data from the request object\n interface_settings = utils.get_response_and_log(request)\n logging.debug(\"Interface settings: {}\".format(str(interface_settings)))\n\n # Loads the settings file\n settings_file_name = interface_settings[\n 'settings_name'] # if 'settings_name' in interface_settings else 'default'\n settings = io.load_settings(settings_file_name)\n logging.debug(\"File settings: {}\".format(str(settings)))\n\n # Integrate the settings with those provided by the interface, if any\n for key in interface_settings.keys():\n if key not in settings:\n settings[key] = interface_settings[key]\n\n # Fail early and provide some error message when crucial data is missing.\n try:\n utils.assert_required_data(settings, ['x_limits', 'n_points', 'noise'])\n except AssertionError as e:\n logging.error(str(e))\n logging.error(\"Provided keys: {}\".format(settings.keys()))\n return str(e), 400 # BAD_REQUEST\n\n # Generate user and session IDs if not provided\n user_id: int = settings['user_id'] if 'user_id' in settings else io.get_new_user_id(\n study_name=settings_file_name)\n settings['user_id'] = str(user_id)\n\n # Ensure save dir exists\n if not (\"save\" in settings and settings[\"save\"] == False):\n io.ensure_savedir_exists(study_name=settings_file_name, sub_path=str(user_id))\n\n session_id: int = settings['session_id'] if 'session_id' in settings else io.get_new_session_id(user_id,\n study_name=settings_file_name)\n settings['user_id'] = str(user_id)\n settings['session_id'] = str(session_id)\n\n # Call GP data_gp_initialisation function\n x, y_true, query_index, mean_vector, confidence_up, confidence_down = user_study_gp.data_gp_initialisation(\n settings['x_limits'][0],\n settings['x_limits'][1],\n settings['n_points'],\n settings['kernel'],\n settings['kernel_args'],\n settings['noise']\n )\n\n # Convert the data to JSON\n data = {\n \"settings\": settings,\n \"iteration\": 0,\n \"new_point_index\": query_index, # index of new point to be queried\n \"new_point_x\": x[query_index], # new point to be queried\n 'x_data': [], # queried data points (initially empty)\n 'y_data': [], # values given by the user for the queried points (initially empty)\n 'y_data_actual': [], # actual value of f(queried point)\n 'x_limits': settings['x_limits'],\n 'n_points': settings['n_points'],\n \"x\": x, # x points in the interval (linspace)\n \"y\": y_true, # f(x) true values in the x points\n \"mean\": mean_vector,\n \"std\": confidence_up + confidence_down, # list concatenation\n }\n\n # Update session_id to match session, when running a full user study\n if \"max_sessions\" in settings:\n if \"update_session\" in interface_settings and interface_settings[\"update_session\"] == True:\n data[\"session\"] = interface_settings[\"session\"] + 1\n else:\n data[\"session\"] = 0\n session_id = data[\"session\"]\n\n if \"save\" in settings and settings[\"save\"] == False:\n logging.debug(\"Not saving data because of settings[\\\"save\\\"] = False\")\n else:\n io.save_data(data,\n study_name=settings_file_name,\n user_id=user_id,\n session_id=session_id,\n incremental=settings['save_split'])\n\n return utils.remove_nan(json.dumps(data))\n\n @app.route('/api_update_gp', methods=['GET', 'POST'])\n def api_update_gp():\n \"\"\"Updates a GP based on the given parameters.\n\n The parameters are retrieved from the request object. It updates the GP with the new points. Finally it chooses\n a new query point.\n\n All the data is sent to the frontend as a JSON object.\n\n Returns\n -------\n JSON data\n\n \"\"\"\n logging.info(\"Called: api_update_gp\")\n data = utils.get_response_and_log(request)\n try:\n utils.assert_required_data(data,\n [\n 'settings', # settings of the user study\n 'x_data', # queried data points\n 'y_data', # values by the user for the queried points\n \"x_limits\", # beginning and end of the interval\n \"x\", # x points\n \"iteration\" # current iteration\n ])\n except AssertionError as e:\n logging.error(str(e))\n logging.error(\"Provided keys: {}\".format(data.keys()))\n return str(e), 400 # BAD_REQUEST\n\n if (\"x_data\" in data and \"y_data\" in data) and (len(data[\"x_data\"]) >= 1 and len(data[\"y_data\"]) >= 1):\n logging.info(\"Received new data point: ({}, {}), updating..\".format(\n data[\"x_data\"][-1],\n data[\"y_data\"][-1])\n )\n\n settings = data['settings']\n\n # Update vanilla GP\n query_index, mean_vector, upper_confidence, lower_confidence = user_study_gp.update(data[\"x\"],\n settings[\"kernel\"],\n settings[\"kernel_args\"],\n data[\"x_data\"],\n data[\"y_data\"],\n settings[\"noise\"])\n\n # Update data\n data[\"new_point_index\"] = query_index\n data[\"new_point_x\"] = data[\"x\"][query_index]\n data[\"mean\"] = mean_vector\n data[\"std\"] = upper_confidence + lower_confidence\n data[\"iteration\"] += 1\n\n data_json = utils.remove_nan(json.dumps(data))\n if \"save\" in settings and settings[\"save\"] == False:\n logging.debug(\"Not saving data because of settings[\\\"save\\\"] = False\")\n else:\n logging.debug(f'Study name: {settings[\"settings_name\"]}')\n io.save_data(data,\n study_name=settings[\"settings_name\"],\n user_id=settings['user_id'],\n session_id=settings['session_id'],\n incremental=settings['save_split'])\n return data_json\n\n return app", "def create_app(config_name):\n app = Flask(__name__)\n app.config.from_object(config[config_name])\n config[config_name].init_app(app)\n register_extensions(app)\n\n from main import main as main_blueprint\n\n app.register_blueprint(main_blueprint, url_prefix='/')\n\n from preview import preview as preview_blueprint\n\n app.register_blueprint(preview_blueprint, url_prefix='/preview')\n\n return app", "def create_app():\n\n app = FastAPI()\n add_root_route(app)\n\n return app", "def create_application(config=None):\n app = Flask(__name__) \n if config is not None:\n print('mewo')\n app.config.from_object(config)\n # else:\n # print(os.environ['APP_SETTINGS'])\n # app.config.from_object(os.environ['APP_SETTINGS'])\n\n @app.route('/')\n def example():\n \"\"\" a example funciton \"\"\"\n return 'hello world'\n\n return app", "def create_app(test_config=None):\n app = Flask(__name__, instance_relative_config=True)\n\n if app.config[\"ENV\"] == \"production\":\n app.config.from_object(\"config.ProductionConfig\")\n else:\n app.config.from_object(\"config.DevelopmentConfig\")\n\n if app.config['SECRET_KEY'] is None:\n raise Exception(\n \"SECRET_KEY can't be None. Try to generate one by command: python -c 'import os; print(os.urandom(16))', and copy the result into configs.py.\")\n\n if app.config['OWNER_USER_ID'] is None:\n raise Exception(\n \"OWNER_USER_ID can't be None. It is an integer user_id of your Clubhouse account, you can get it from token json file generated by OpenClubhouse-worker\")\n\n @app.route(\"/alive\")\n def alive():\n return {\"alive\": True}\n\n db = MongoEngine(app)\n channelsCache.init_cache(app.logger)\n\n # apply the blueprints to the app\n from handlers import clubhouse\n\n app.register_blueprint(clubhouse.bp)\n # in another app, you might define a separate main index here with\n # app.route, while giving the blog blueprint a url_prefix, but for\n # the tutorial the blog will be the main index\n app.add_url_rule(\"/\", endpoint=\"index\")\n\n return app", "def create_app(conf: Type[Config]):\n app = Flask(__name__)\n app.config.from_object(conf)\n configure_logger(conf)\n register_pc_blueprints(app)\n register_extensions(app)\n return app", "def create_app(settings_override=None):\r\n app = Flask(__name__, instance_relative_config=True)\r\n\r\n app.config.from_object('config.settings')\r\n app.config.from_pyfile('settings.py', silent=True)\r\n\r\n if settings_override:\r\n app.config.update(settings_override)\r\n\r\n extensions(app)\r\n\r\n @app.before_first_request\r\n def init_db():\r\n session['email'] = None\r\n Database()\r\n\r\n @app.route('/')\r\n def home_page():\r\n form = LoginForm()\r\n return render_template('index.html', form=form)\r\n\r\n app.register_blueprint(users, url_prefix='/user')\r\n app.register_blueprint(admin, url_prefix='/admin')\r\n return app", "def create():\n app = Flask(__name__, instance_relative_config=False)\n\n app.config.update(\n FLASK_ENV=\"development\",\n DEBUG=True,\n SECRET_KEY=\"Segredo\",\n SQLALCHEMY_DATABASE_URI=f\"sqlite:////tmp/escola.db\",\n SQLALCHEMY_ECHO=False,\n SQLALCHEMY_TRACK_MODIFICATIONS=False\n )\n\n db.init_app(app)\n\n with app.app_context():\n from . import routes\n db.create_all()\n\n return app", "def create_app(test_config=None) -> Flask:\n app = Flask(\"front-end-web-server\", instance_relative_config=True)\n\n if test_config is None:\n # Get path to configuration file and load that configuration into Flask's app.config object.\n curr_dir = os.path.abspath(os.path.dirname(__file__))\n config_dir = os.path.dirname(curr_dir)\n config_filename = os.path.join(config_dir, \"config.py\")\n\n # load the instance config, if it exists, when not testing\n app.config.from_pyfile(config_filename)\n\n print(app.config.get(\"SECRET_KEY\"))\n else:\n # load the test config if passed in\n app.config.from_mapping(test_config)\n\n # ensure the instance folder exists\n try:\n os.makedirs(app.instance_path)\n except OSError:\n pass\n\n # a simple page that says hell\n @app.route(\"/\")\n def index():\n return \"Index page!\"\n\n from .views import hello\n\n app.register_blueprint(hello)\n\n return app", "def create_app(settings_override=None):\n try:\n # initialize app & define config scopes\n app = Flask(__name__, instance_relative_config=True)\n app.logger.info(\"Initializing app\")\n\n # useful when hooking up the api to a front end or custom ajax later\n app.logger.info(\"Adding CORS\")\n CORS(app)\n\n app.logger.info(\"Loading settings from config file\")\n app.config.from_object('config.settings')\n app.config.from_pyfile('settings.py', silent=True)\n\n if settings_override:\n app.config.update(settings_override)\n\n # add extensions\n app.logger.info(\"Loading extensions\")\n extensions(app)\n\n # make api\n app.logger.info(\"Loading restful interface\")\n api = Api(app)\n\n # create tables\n @app.before_first_request\n def create_tables():\n app.logger.info(\"Creating database tables\")\n db.create_all()\n\n # add routes, '/' first is best practice\n app.logger.info(\"Loading restful routes\")\n api.add_resource(Success, app.config['ROUTE_SUCCESS'])\n\n # user routes\n api.add_resource(UserRegister, app.config['ROUTE_USER_REGISTER'])\n api.add_resource(User, app.config['ROUTE_USER'])\n\n # menu routes\n api.add_resource(MenuAdd, app.config['ROUTE_MENU'])\n api.add_resource(MenuItem, app.config['ROUTE_MENU_ITEM'])\n api.add_resource(MenuList, app.config['ROUTE_MENU_LIST'])\n\n # order routes\n api.add_resource(OrderAdd, app.config['ROUTE_ORDER'])\n api.add_resource(OrderList, app.config['ROUTE_ORDER_LIST'])\n api.add_resource(OrderItem, app.config['ROUTE_ORDER_ITEM'])\n\n app.logger.info(\"API ready\")\n return app\n\n # base exception to catch everything\n except BaseException:\n app.logger.error(app_error(nondict=True))\n return app_error()", "def create_app(config_name=\"development\"):\n app = Flask(__name__, instance_relative_config=True)\n cors = CORS(app, resources={r\"/*\": {\"origins\": \"*\", \"methods\": [\"GET\", \"HEAD\", \"POST\", \"OPTIONS\", \"PUT\", \"PATCH\", \"DELETE\"], \"expose_headers\": \"Authorization\"}})\n app.config.from_mapping(\n # a default secret that should be overridden by instance config\n SECRET_KEY=\"dev\",\n # store the database in the instance folder\n DATABASE=os.path.join(app.instance_path, \"sqlite.db\"),\n )\n\n app.config.from_object(app_config[config_name])\n app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n\n # Set logging\n if app.config.get(\"LOG_LEVEL\") is not None and app.config.get(\"LOG_LOCATION\") is not None:\n formatter = logging.Formatter(\n \"[%(asctime)s] {%(pathname)s:%(lineno)d} %(levelname)s - %(message)s\")\n handler = RotatingFileHandler(app.config['LOG_LOCATION'], maxBytes=10000, backupCount=1)\n handler.setLevel(app.config[\"LOG_LEVEL\"])\n handler.setFormatter(formatter)\n app.logger.addHandler(handler)\n\n # register the database commands with SQLAlchemy\n db.init_app(app)\n\n # set strict_slashes to False, endpoints like /sales_data/upload and /sales_data/upload/ will map to the same thing\n app.url_map.strict_slashes = False\n\n # apply the blueprints to the app\n from .api_views.auth import auth_bp\n from .api_views.sales_data import sales_data_bp\n\n app.register_blueprint(auth_bp)\n app.register_blueprint(sales_data_bp)\n\n return app", "def create_app(name, path, settings_override=None,\n register_security_blueprint=True):\n\n app = Flask(name, instance_relative_config=True)\n app.config.from_object(\"linkedlist.config\") # public config\n app.config.from_pyfile(\"config.py\", silent=True) # instance config\n app.config.from_object(settings_override) # argument override\n\n # patch in envvar config\n environ_config_override = find_environ_config_vars()\n for key, value in environ_config_override.iteritems():\n app.config[key] = value\n\n db.init_app(app)\n security.init_app(app, SQLAlchemyUserDatastore(db, User, Role),\n register_blueprint=register_security_blueprint)\n register_blueprints(app, name, path)\n\n # create database tables\n with app.app_context():\n db.create_all()\n\n return app", "def create_app(config_name):\n app = Flask(__name__)\n app.config.from_object(app_config[config_name])\n # versions of api\n from app.api.v2 import version2 as v2\n\n app.register_blueprint(v2)\n\n # registered JWT manager\n app.config['JWT_SECRET_KEY'] = 'owezzy'\n jwt = JWTManager(app)\n\n create_tables()\n\n return app", "def create():\n\n return App()", "def create_app(settings_override=None):\n app = factory.create_app(__name__, __path__, settings_override)\n\n Bootstrap(app)\n admin.init_app(app)\n filters.init_app(app)\n Sentry(app)\n\n if not app.debug:\n for e in (404, 500):\n app.errorhandler(e)(handle_error)\n\n return app", "def create_app(**config_overrides):\n # we want to modify the global app, not a local copy\n global app\n global eventum\n\n app = Flask(__name__)\n\n # Load config then apply overrides\n app.config.from_object('config.flask_config')\n app.config.update(config_overrides)\n\n # Initialize assets\n assets = Environment(app)\n register_scss(assets)\n\n # Eventum\n eventum = Eventum(app)\n\n # Blueprints\n register_blueprints()\n\n return app", "def init_app(db_name=None):\n\n # create a Flask app, force SSL when debug is False\n app = Flask(__name__, static_folder='./ui/static')\n app.config.from_pyfile('config.py')\n\n # load custom config file\n custom_config = app.config['CUSTOM_CONFIG_PATH']\n if os.environ.get(custom_config) is not None:\n app.config.from_envvar(custom_config)\n\n # setup\n app.db = db.connect(db_name)\n setup_auth(app)\n SSLify(app, subdomains=True)\n\n # register blueprints\n app.register_blueprint(api, url_prefix='/api')\n app.register_blueprint(auth)\n app.register_blueprint(ui)\n\n return app", "def create_app(config_class=flaskblog_cf.Config):\n app = flask.Flask(__name__)\n app.config.from_object(flaskblog_cf.Config)\n\n db.init_app(app)\n bcrypt.init_app(app)\n login_manager.init_app(app)\n mail.init_app(app)\n\n import flaskblog.controller.user_controller as flaskblog_user_ctrl\n import flaskblog.controller.general_controller as flaskblog_general_ctrl\n import flaskblog.controller.posts_controller as flaskblog_post_ctrl\n import flaskblog.controller.error_pages_controller as flaskblog_error_ctrl\n\n app.register_blueprint(flaskblog_user_ctrl.users)\n app.register_blueprint(flaskblog_post_ctrl.posts)\n app.register_blueprint(flaskblog_general_ctrl.main)\n app.register_blueprint(flaskblog_error_ctrl.errors)\n\n return app", "def make_app(conf=None):\n if not conf:\n conf = 'development'\n app = create_app(cm.get(conf))\n return app", "def create_app(custom_exceptions=True):\n app = Flask(__name__)\n app.secret_key = os.getenv(\"RENKU_SVC_SERVICE_KEY\", uuid.uuid4().hex)\n app.json_encoder = SvcJSONEncoder\n app.config[\"UPLOAD_FOLDER\"] = CACHE_DIR\n\n app.config[\"MAX_CONTENT_LENGTH\"] = MAX_CONTENT_LENGTH\n\n app.config[\"cache\"] = cache\n\n build_routes(app)\n\n @app.route(SERVICE_PREFIX)\n def root():\n \"\"\"Root shows basic service information.\"\"\"\n import renku\n\n return jsonify({\"service_version\": renku.__version__, \"spec_url\": url_for(\"apispec.openapi\")})\n\n @app.route(\"/health\")\n def health():\n \"\"\"Service health check.\"\"\"\n import renku\n\n return f\"renku repository service version {renku.__version__}\\n\"\n\n if custom_exceptions:\n register_exceptions(app)\n\n return app" ]
[ "0.85773337", "0.84702384", "0.8426545", "0.84196174", "0.84109616", "0.8407618", "0.83920527", "0.8390233", "0.83386177", "0.8315099", "0.83083576", "0.8303583", "0.8263485", "0.8250034", "0.8220532", "0.82113147", "0.82048184", "0.81779504", "0.81760377", "0.8132556", "0.812531", "0.80895805", "0.8074234", "0.8059521", "0.8048551", "0.80408585", "0.802338", "0.80197775", "0.8016099", "0.80142564", "0.80024695", "0.79998684", "0.79959494", "0.79943365", "0.7984833", "0.79776686", "0.79756975", "0.7964481", "0.7951846", "0.79418164", "0.7929636", "0.7907212", "0.79001987", "0.78955096", "0.78833413", "0.7879261", "0.78743696", "0.78700674", "0.7866922", "0.7849222", "0.7823336", "0.7823154", "0.7820928", "0.7819017", "0.7812451", "0.77937734", "0.77937186", "0.77745277", "0.77704793", "0.77654606", "0.7751393", "0.7739008", "0.7737989", "0.7718349", "0.77163607", "0.77122164", "0.7698609", "0.7683759", "0.7682247", "0.7682209", "0.7677288", "0.766899", "0.7654603", "0.7649576", "0.7649098", "0.76431805", "0.76415175", "0.7633058", "0.76307774", "0.76301813", "0.76196384", "0.7611539", "0.7605598", "0.75996226", "0.7593391", "0.7570069", "0.7561003", "0.7551076", "0.7530264", "0.75284123", "0.75194055", "0.74971676", "0.74911577", "0.7485113", "0.74808764", "0.74777496", "0.74703133", "0.74428606", "0.7430315", "0.7428082", "0.7424788" ]
0.0
-1
Add assets for inclusion in the dashboard HTML.
def _create_assets(self): assets = Environment(self.app) # jQuery is served as a standalone file jquery = Bundle('js/jquery-*.min.js', output='gen/jquery.min.js') # JavaScript is combined into one file and minified js_all = Bundle('js/js_all/*.js', filters='jsmin', output='gen/app.min.js') # SCSS (Sassy CSS) is compiled to CSS scss_all = Bundle('scss/app.scss', filters='libsass', output='gen/app.css') assets.register('jquery', jquery) assets.register('js_all', js_all) assets.register('scss_all', scss_all) return assets
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def assets():", "def assets():\n pass", "def compile_static_assets(assets):\n assets.auto_build = True\n assets.debug = False\n\n css = Bundle(\n \"css/*.css\",\n # filters=\"less,cssmin\",\n output=\"gen/avantui.css\",\n # extra={\"rel\": \"stylesheet/less\"},\n )\n\n js = Bundle(\n \"js/*.js\",\n output='gen/avantui.js'\n )\n\n assets.register(\"avantui_css\", css)\n assets.register(\"avantui_js\", js)\n if app.config[\"ENV\"] == \"development\":\n css.build()\n js.build()\n return assets", "def add_latesettings_assets(self):\n\n # setting up static file serving\n assetmanager = self.comp('assetmanager')\n\n # add external asset mount point where we can copy public static files so they can be served by a separate traditional web server\n # presumably this directory is being served by a more traditional webserver, at this url we specify below\n assetmanager.add_assetmount(\n massetmanager.MewloAssetMount_ExternalServer('external_assets', filepath = '${mewlofilepath}/public_assets', urlabs = 'http://127.0.0.1/mewlo/mewlo/public_assets' )\n )\n\n # add internal asset mount point where we will serve files internally; a route will be automatically created for any asset source attached to this mount point; we can choose the path prefix for urls served by the route\n assetmanager.add_assetmount(\n massetmanager.MewloAssetMount_InternalRoute('internal_assets', urlpath='assets')\n )\n\n\n # now that we have some mount points, we can specify some files to be hosted on them\n # note that the ids for all asset sources MUST be unique (ATTN:TODO elaborate on this please)\n # first we mount the files in the staticfilesource/ directory as internal assets that we will serve internally via mewlo; the id will be used for alias creation, and for the route\n assetmanager.add_assetsource(\n massetmanager.MewloAssetSource(id='siteinternal', mountid = 'internal_assets', filepath = '${sitefilepath}/staticfilesource', mnamespace=None)\n )\n # then as a test, lets mount same files on the external mount point -- this will cause mewlo to physically copy the files to the external filepath, where presumably another web server can serve them\n assetmanager.add_assetsource(\n massetmanager.MewloAssetSource(id='siteexternal', mountid = 'external_assets', filepath = '${sitefilepath}/staticfilesource', mnamespace=None)\n )\n\n # remember that one should never refer to the assets by a hardcoded url or file path; always use the aliases created by these functions, which will take the form (where ID is the id of the asset source):\n # 'asset_ID_urlrel' | 'asset_ID_urlabs' | 'asset_ID_filepath'\n # you can also use helper function to build these names, which would be better.", "def _add_static_files(self, req):\n add_script(req, self._get_jqplot('jquery.jqplot'))\n add_stylesheet(req, 'common/js/jqPlot/jquery.jqplot.css')\n # excanvas is needed for IE8 support\n add_script(req, self._get_jqplot('excanvas.min'))\n add_script(req, self._get_jqplot('plugins/jqplot.dateAxisRenderer'))\n add_script(req, self._get_jqplot('plugins/jqplot.highlighter'))\n add_script(req, self._get_jqplot('plugins/jqplot.canvasTextRenderer'))\n add_script(req, self._get_jqplot('plugins/jqplot.canvasAxisTickRenderer'))\n add_script(req, self._get_jqplot('plugins/jqplot.canvasAxisLabelRenderer'))\n add_script(req, self._get_jqplot('plugins/jqplot.enhancedLegendRenderer'))", "def add_static_paths(app):\n app.env.book_theme_resources_changed = False\n\n output_static_folder = Path(app.outdir) / \"_static\"\n theme_static_files = resources.contents(theme_static)\n\n if (\n app.config.html_theme_options.get(\"theme_dev_mode\", False)\n and output_static_folder.exists()\n ):\n # during development, the JS/CSS may change, if this is the case,\n # we want to remove the old files and ensure that the new files are loaded\n for path in output_static_folder.glob(\"sphinx-book-theme*\"):\n if path.name not in theme_static_files:\n app.env.book_theme_resources_changed = True\n path.unlink()\n # note sphinx treats theme css different to regular css\n # (it is specified in theme.conf), so we don't directly use app.add_css_file\n for fname in resources.contents(theme_static):\n if fname.endswith(\".css\"):\n if not (output_static_folder / fname).exists():\n (output_static_folder / fname).write_bytes(\n resources.read_binary(theme_static, fname)\n )\n app.env.book_theme_resources_changed = True\n\n # add javascript\n for fname in resources.contents(theme_static):\n if fname.endswith(\".js\"):\n app.add_js_file(fname)", "def dashboard_workflow(self):\n # copy html/js/css templates to the workflow specific directory\n js_dir = Path(__file__).absolute().parent / \"dashboard_template\"\n for js_template in [\"dashboard.js\", \"index.html\", \"style.css\"]:\n shutil.copy2(js_dir / js_template, self.working_dir)", "def includeme(config):\n\n config.add_translation_dirs('kotti_dashboard:locale')\n config.add_static_view('static-kotti_dashboard', 'kotti_dashboard:static')\n\n config.scan(__name__)", "def script_info_assets(app, static_dir, testcss):\n InvenioAssets(app)\n\n blueprint = Blueprint(__name__, \"test_bp\", static_folder=static_dir)\n\n class Ext(object):\n def __init__(self, app):\n assets = app.extensions[\"invenio-assets\"]\n app.register_blueprint(blueprint)\n\n Ext(app)\n\n yield ScriptInfo(create_app=lambda: app)", "def build_assets(self):\n theme = self.theme\n \n # ~ self.assets_dir = cwd + \"/CenterSide_Themes/\" + theme + \"/\"\n \n \n \n \n \n \n # ~ self.blank_langmssg = QPixmap(\"blank_langmssg.svg\")\n # ~ self.blank_thememssg = QPixmap(\"blank_thememssg.svg\")\n \n \n \n \n \n # ~ self.icon_info = QIcon(\"Icons/info.svg\")\n # ~ self.icon_intructions = QIcon(\"Icons/instructions.svg\")\n # ~ self.icon_internet = QIcon(\"Icons/internet.svg\")\n # ~ self.icon_invite = QIcon(\"Icons/invite.svg\")\n # ~ self.icon_languages = QIcon(\"Icons/languages.svg\")\n # ~ self.icon_local = QIcon(\"Icons/local.svg\")\n # ~ self.icon_message = QIcon(\"Icons/message.svg\")\n # ~ self.icon_name = QIcon(\"Icons/name.svg\")\n # ~ self.icon_options = QIcon(\"Icons/options.svg\")\n # ~ self.icon_palettes = QIcon(\"Icons/palettes.svg\")\n \n # ~ self.icon_quit = QIcon(\"Icons/quit.svg\")\n # ~ self.icon_refresh = QIcon(\"Icons/refresh.svg\")\n # ~ self.icon_shop = QIcon(\"Icons/shop.svg\")\n # ~ self.icon_soundon = QIcon(\"Icons/soundon.svg\")\n # ~ self.icon_soundoff = QIcon(\"Icons/soundoff.svg\")\n # ~ self.icon_vsAI = QIcon(\"Icons/vsAI.svg\")", "def _assets_url(self):\r\n return \"/assets/\" + self._course_key + \"/\"", "def collect_links(self, env=None):\n for asset in self.assets.values():\n if asset.has_bundles():\n asset.collect_files()\n if env is None:\n env = self.config.env\n if env == static_bundle.ENV_PRODUCTION:\n self._minify(emulate=True)\n self._add_url_prefix()", "def create_assets():\n assets = {}\n\n # Load all static files\n for root, dirs, files in os.walk(STATIC_DIR):\n for fname in files:\n filename = os.path.join(root, fname)\n with open(filename, \"rb\") as f:\n assets[os.path.relpath(filename, STATIC_DIR)] = f.read()\n\n # Collect pages\n pages = {}\n for fname in os.listdir(PAGES_DIR):\n if fname.lower().endswith(\".md\"):\n name = fname.split(\".\")[0].lower()\n with open(os.path.join(PAGES_DIR, fname), \"rb\") as f:\n md = f.read().decode()\n pages[name] = Page(name, md)\n\n # todo: Collect blog posts\n\n # Get template\n with open(os.path.join(THIS_DIR, \"template.html\"), \"rb\") as f:\n html_template = f.read().decode()\n\n with open(os.path.join(THIS_DIR, \"style.css\"), \"rb\") as f:\n css = f.read().decode()\n css += \"/* Pygments CSS */\\n\" + HtmlFormatter(style=\"vs\").get_style_defs(\n \".highlight\"\n )\n\n # Generate pages\n year = datetime.now().year\n for page in pages.values():\n page.prepare(pages.keys())\n title = TITLE if page.name == \"index\" else TITLE + \" - \" + page.name\n menu = create_menu(page)\n html = html_template.format(\n title=title, style=css, body=page.to_html(), menu=menu, year=year\n )\n print(\"generating\", page.name + \".html\")\n assets[page.name + \".html\"] = html.encode()\n\n # Fix backslashes on Windows\n for key in list(assets.keys()):\n if \"\\\\\" in key:\n assets[key.replace(\"\\\\\", \"/\")] = assets.pop(key)\n\n return assets", "def make_static_assets(opts):\n\n css_filename = do_css(opts['css_source_dir'], opts['out_dir'])\n js_filename = do_js(opts['js_source_dir'], opts['out_dir'])\n return {\n 'primary_css': css_filename,\n 'js': js_filename\n }", "def assets(self):\n static = self.static\n if static is None:\n return None\n\n assets = os.path.join(static, 'assets')\n if not os.path.isdir(assets):\n return None\n\n return assets", "def index(request):\n return render_to_response(\n # note: this is slightly different than the labs app with \"app/app.html\" rather than the labs/labs.html\n # and we don't pass submodule name. fixme, by changing to new style with name = app_name\n settings.JS_HOME+'app.html',\n {'INDIVO_UI_APP_CSS': settings.INDIVO_UI_SERVER_BASE+'/jmvc/ui/resources/css/ui.css'}\n )", "def linkAssets(des, Xrc):\n with open(des, 'r') as f:\n body = f.read()\n f.close()\n with open(des, 'w') as f:\n body = body.replace(\"custom.css\", \"\\\\\" + Xrc[\"gh_repo_name\"] + \"/Assets\" + \"/css\" + \"/custom.css\")\n f.write(body)\n f.close()\n ccc.success(\"linking assets to \" + des)", "def collect_assets(systems, settings):\r\n for sys in systems:\r\n sh(django_cmd(sys, settings, \"collectstatic --noinput > /dev/null\"))", "def dashboard():\r\n return render_template('{}/dashboard.html'.format(MODULE_DIR))", "def build_assets():\n\n # templates\n template = open(os.path.join(BASE_PATH, 'AssetLibrary.as.template'), 'r').read()\n\n embed_templates = {\n 'image': \"[Embed(source='%(asset_path)s')] private var %(asset_class_name)s:Class;\\n\",\n 'mp3': \"[Embed(source='%(asset_path)s')] private var %(asset_class_name)s:Class;\\n\", \n 'xml': \"[Embed(source='%(asset_path)s', mimeType=\\\"application/octet-stream\\\")] private var %(asset_class_name)s:Class;\\n\"\n }\n \n library_element_template = \"'%(asset_id)s': %(asset_class_name)s\"\n\n # load+parse asset xml\n complete_asset_embed_code = \"\"\n complete_asset_data_code = \"\"\n asset_dom = minidom.parse(ASSET_XML_FILE)\n \n asset_nodes = list(asset_dom.getElementsByTagName('asset'))\n \n for asset_node in asset_nodes:\n asset_attrs = dict(asset_node.attributes.items())\n asset_embed_code = embed_templates[asset_attrs['type']] % {\n 'asset_class_name': asset_attrs['name'],\n 'asset_path': ASSET_BASE + asset_attrs['file']\n }\n\n complete_asset_embed_code += asset_embed_code\n \n asset_data_code = library_element_template % {\n 'asset_id': asset_attrs['name'],\n 'asset_class_name': asset_attrs['name']\n }\n\n complete_asset_data_code += asset_data_code\n\n if asset_nodes.index(asset_node) == len(asset_nodes) - 1:\n complete_asset_data_code += \"\\n\"\n else:\n complete_asset_data_code += \",\\n\"\n \n output = template % {\n 'asset_embeds': complete_asset_embed_code,\n 'asset_data': complete_asset_data_code\n }\n \n # render\n output_f = open(os.path.join(BASE_PATH, 'AssetLibrary.as'), 'w')\n output_f.write(output)", "def process_xmodule_assets():\r\n sh('xmodule_assets common/static/xmodule')", "def configure_ext_assets(app, xstatic):\n assets = Environment(app)\n coffee_lib = Bundle(\n 'coffee/lib/*.coffee',\n filters='coffeescript',\n output='gen/lib.js'\n )\n assets.register('coffee_lib', coffee_lib)\n coffee_pages = Bundle(\n 'coffee/pages/*.coffee',\n filters='coffeescript',\n output='gen/pages.js'\n )\n assets.register('coffee_lib', coffee_lib)\n coffee = Bundle(\n coffee_lib,\n coffee_pages,\n output='gen/app.js'\n )\n assets.register('coffee_app', coffee)\n\n coffee_spec = Bundle(\n 'coffee/spec/*.coffee',\n filters='coffeescript',\n output='gen/coffee_spec.js'\n )\n assets.register('coffee_spec', coffee_spec)\n\n vendor_js = Bundle(\n os.path.join(xstatic.path_for('jquery'), 'jquery.min.js'),\n 'vendor/pdfjs-' + app.config['X_PDFJS_VERSION'] + '-dist/build/pdf.js',\n 'vendor/jquery.jeditable.mini.js',\n 'vendor/jquery-ui-1.11.2/jquery-ui.min.js',\n output='gen/vendor_js.js',\n )\n assets.register('vendor_js', vendor_js)\n\n scss_bundle = Bundle(\n 'scss/site.scss',\n depends='**/*.scss',\n filters='pyscss',\n output='gen/app.css'\n )\n assets.register('scss_all', scss_bundle)\n\n this_dir = os.path.dirname(os.path.abspath(__file__))\n scss.config.LOAD_PATHS = [\n os.path.join(xstatic.path_for('bootstrap_scss'), 'scss'),\n os.path.join(this_dir, '../static/vendor/bootswatch-darkly'),\n ]", "def dashboard_render(self,servers):\n THIS_DIR = os.path.dirname(os.path.abspath(__file__))\n j2_env = Environment(loader=FileSystemLoader(THIS_DIR),\n trim_blocks=True)\n new_dashboard = (j2_env.get_template('templating_dashboard.json').render(\n list_templating=self.templating(servers)\n ))\n return (new_dashboard)", "def add_assets_mapping(config, mapping):\n assets = config.registry.queryUtility(IAssets) or Assets()\n assets.update(mapping)\n config.registry.registerUtility(assets, IAssets)", "def add_asset(self, asset_name):\r\n self._assets.extend(asset_name)", "def asset_adding_panel(self, context):\r\n \r\n AM = context.window_manager.asset_m\r\n layout = self.layout\r\n box = layout.box()\r\n act_obj = context.active_object\r\n obj_list = [obj for obj in context.scene.objects if obj.select]\r\n thumbnails_path = get_directory('icons')\r\n is_subsurf = False\r\n view = context.space_data\r\n fx_settings = view.fx_settings\r\n ssao_settings = fx_settings.ssao\r\n extentions = (\".jpg\", \".jpeg\", \".png\")\r\n thumb_list = [thumb.rsplit(\".\", 1)[0] for thumb in listdir(thumbnails_path) if thumb.endswith(extentions)]\r\n \r\n if len(obj_list) >= 2:\r\n asset_name = AM.group_name\r\n \r\n else:\r\n asset_name = act_obj.name\r\n if act_obj.modifiers:\r\n for mod in act_obj.modifiers:\r\n if mod.type == 'SUBSURF':\r\n is_subsurf = True\r\n \r\n if asset_name not in thumb_list or asset_name in thumb_list and AM.replace_rename == 'replace':\r\n if asset_name in thumb_list and AM.replace_rename == 'replace':\r\n box.label(\"\\\" {} \\\" already exist\".format(asset_name), icon='ERROR')\r\n box.separator()\r\n row = box.row(align=True)\r\n row.prop(AM, \"replace_rename\", text=\" \", expand=True)\r\n if AM.replace_rename == 'rename':\r\n if multi_object:\r\n box.prop(AM, \"group_name\", text=\"\")\r\n else:\r\n ob = context.object\r\n box.prop(ob, \"name\", text=\"\") \r\n \r\n else:\r\n if len(obj_list) >= 2:\r\n row = box.row()\r\n box.label(\"Choose the asset name\")\r\n box.prop(AM, \"group_name\", text = \"\")\r\n \r\n else:\r\n ob = context.object\r\n box.prop(ob, \"name\", text=\"Name\")\r\n \r\n row = box.row(align = True)\r\n row.prop(AM, \"render_type\", text = \" \", expand = True)\r\n row = box.row()\r\n row.label(\"Thumbnail extention:\")\r\n row = box.row(align = True)\r\n row.prop(AM, \"thumb_ext\", expand = True)\r\n \r\n # ---------------------- # \r\n # RENNDER THUMBNAIL #\r\n # ---------------------- #\r\n \r\n if AM.render_type == 'render':\r\n if len(obj_list) == 1 and not is_subsurf:\r\n box.prop(AM, \"add_subsurf\", text = \"Subsurf\")\r\n box.prop(AM, \"add_smooth\", text = \"Smooth\") \r\n \r\n box.prop(AM, \"material_render\", text=\"Addon material\")\r\n \r\n # --------------------- # \r\n # OPENGL THUMBNAIL #\r\n # --------------------- #\r\n \r\n elif AM.render_type == 'opengl':\r\n row = box.row(align=True)\r\n row.operator(\"object.setup_ogl_render\", text=\"Setup OGL render\" if not \"AM_OGL_Camera\" in [obj.name for obj in context.scene.objects] else \"View camera\", icon='ZOOMIN')\r\n row.operator(\"object.remove_ogl_render\", text=\"\", icon='ZOOMOUT')\r\n row = layout.column()\r\n row = box.row(align=True) \r\n row.label(\"Background:\")\r\n row.prop(AM, \"background_alpha\", text=\"\")\r\n row = box.row(align=True)\r\n row.prop(view, \"show_only_render\")\r\n row = box.row(align=True)\r\n row.prop(view, \"use_matcap\")\r\n if view.use_matcap :\r\n row.prop(AM, \"matcap_options\", text=\"\", icon='TRIA_UP' if AM.matcap_options else 'TRIA_DOWN') \r\n if AM.matcap_options:\r\n row = box.row(align=True)\r\n row.template_icon_view(view, \"matcap_icon\")\r\n row = box.row(align=True)\r\n row.prop(fx_settings, \"use_ssao\", text=\"Ambient Occlusion\")\r\n if fx_settings.use_ssao:\r\n row.prop(AM, \"ao_options\", text=\"\", icon='TRIA_UP' if AM.ao_options else 'TRIA_DOWN') \r\n if AM.ao_options:\r\n subcol = box.column(align=True)\r\n subcol.prop(ssao_settings, \"factor\")\r\n subcol.prop(ssao_settings, \"distance_max\")\r\n subcol.prop(ssao_settings, \"attenuation\")\r\n subcol.prop(ssao_settings, \"samples\")\r\n subcol.prop(ssao_settings, \"color\")\r\n \r\n # -------------------- # \r\n # IMAGE THUMBNAIL #\r\n # -------------------- #\r\n \r\n elif AM.render_type == 'image':\r\n row = box.row(align=True)\r\n row.prop(AM, \"image_type\", text=\" \", expand=True)\r\n if AM.image_type == 'disk':\r\n box.label(\"Choose your thumbnail\")\r\n box.prop(AM, \"custom_thumbnail_path\", text=\"\")\r\n else:\r\n box.prop_search(AM, \"render_name\", bpy.data, \"images\", text=\"\") \r\n \r\n row = box.row(align=True)\r\n if len(obj_list) == 1:\r\n if (asset_name not in thumb_list or AM.replace_rename == 'replace') and (AM.render_type in ['opengl', 'render'] or AM.render_type == 'image' and (AM.image_type == 'disk' and AM.custom_thumbnail_path or AM.image_type == 'rendered' and AM.render_name)):\r\n row.operator(\"object.add_asset_in_library\", text=\"OK\", icon='FILE_TICK') \r\n else:\r\n if AM.group_name and (asset_name not in thumb_list or AM.replace_rename == 'replace') and (AM.render_type in ['opengl', 'render'] or AM.render_type == 'image' and (AM.image_type == 'disk' and AM.custom_thumbnail_path or AM.image_type == 'rendered' and AM.render_name)):\r\n \r\n row.operator(\"object.add_asset_in_library\", text=\"OK\", icon='FILE_TICK') \r\n row.operator(\"object.cancel_panel_choise\", text=\"Cancel\", icon='X')\r\n \r\n else:\r\n box.label(\"\\\" {} \\\" already exist\".format(asset_name), icon='ERROR')\r\n box.separator()\r\n row = box.row(align=True)\r\n row.prop(AM, \"replace_rename\", text=\" \", expand=True)\r\n if AM.replace_rename == 'rename':\r\n if len(obj_list) >= 2:\r\n box.prop(AM, \"group_name\", text=\"\")\r\n else:\r\n ob = context.object\r\n box.prop(ob, \"name\", text=\"\")\r\n row = box.row()\r\n row.operator(\"object.cancel_panel_choise\", text=\"Cancel\", icon='X')", "def add_assets(self, assets=None):\n if assets is None:\n assets = PipelineHelper.getSelectedMayaAssets()\n\n assets = [\n asset\n for asset in assets\n if (\n asset.get_maya_commit().component.stage ==\n zefir.STAGES.FX_SIMULATION\n )\n ]\n\n for asset in assets:\n name = asset.name\n if name in self._model.assets:\n continue\n\n asset_data = AssetData()\n asset_data.asset = asset\n\n effects_node = str(asset.get_effects_node())\n if (\n mc.objExists(effects_node) and\n mc.listRelatives(effects_node) is not None\n ):\n asset_data.commit_to_fx_cache = True\n else:\n asset_data.can_commit_to_fx_cache = False\n asset_data.commit_to_fx_cache = False\n asset_data.generate_alembic_from_geos = False\n asset_data.use_local_space_for_alembic = False\n asset_data.commit_to_alembic_anim = True\n\n self._model.assets[name] = asset_data\n\n self._view.build_items(sorted(self._model.assets.keys()))", "def test_dashboard_static_files(self):\n url = reverse('shipping.views.dashboard')\n response = self.client.get(url)\n eq_(response.status_code, 200)\n self.assert_all_embeds(response.content)", "def resources(self) -> HTMLBody:\n\t\treturn render_template(\"resources.jinja2\")", "def global_admin_css():\n return format_html('<link rel=\"stylesheet\" href=\"{}\">', static('css/admin.css'))", "def on_main(self, request):\n return self.render_template('main.html', ads=self.get_adds())", "def set_assets(self, assets):\n self._assets = {}\n for asset in assets:\n self.add_asset(asset)", "def ext_assets(assets: t.List[str]) -> str:\n # XXX: External assets are deprecated, so this function serves them as internal\n # assets\n return gen_assets_url(assets)", "def _prepare_assets(self, page_instructions, assets=None):\n assert type(assets) == tuple or type(assets) == list\n\n for yaml in page_instructions.yaml:\n # yaml = app/page/page.yaml\n template, origin = loader.find_template(yaml)\n filepath = template.origin.name\n\n # /Users/me/Development/app/templates/app/page/page.yaml\n yaml_basedir = os.path.dirname(yaml)\n # app/page\n template_basedir = filepath[:filepath.find(yaml)]\n # /Users/me/Development/app/templates\n\n for asset in assets:\n # directory = /media/js/templates\n if not yaml_basedir in asset:\n # The user might be specifying the directory relative to\n # the yaml file itself, so we'll add it for them if they\n # gave us something like 'media/js/templates'\n directory = os.path.join(yaml_basedir, asset)\n else:\n directory = asset\n\n sourcedirectory = os.path.join(template_basedir, directory)\n\n if not os.path.isdir(sourcedirectory):\n # We're going to try and find it somewhere else, it may not\n # be relative to the YAML file\n #\n # This is quite possible if the yaml file is processing a\n # \"chirp:\" attribute.\n try:\n sourcedirectory = find_directory_from_loader(\n page_instructions, asset)\n # We need to reset this, it has the yaml_basedir on it\n # at this point\n directory = asset\n except TemplateDoesNotExist:\n continue\n\n if not os.path.isdir(sourcedirectory):\n continue\n\n cachedirectory = os.path.join(self.cache_root, directory)\n\n if os.path.isdir(cachedirectory):\n if self._assets_are_stale(sourcedirectory, cachedirectory):\n shutil.rmtree(cachedirectory)\n else:\n continue\n\n shutil.copytree(sourcedirectory, cachedirectory)\n\n if settings.FILE_UPLOAD_PERMISSIONS is not None:\n os.chmod(cachedirectory, 02750)\n\n for root, dirs, files in os.walk(cachedirectory):\n for momo in files:\n os.chmod(os.path.join(root, momo),\n settings.FILE_UPLOAD_PERMISSIONS)\n for momo in dirs:\n os.chmod(os.path.join(root, momo), 02750)", "def render_asset(self, name):\n result = \"\"\n if self.has_asset(name):\n asset = self.get_asset(name)\n if asset.files:\n for f in asset.files:\n result += f.render_include() + \"\\r\\n\"\n return result", "def get_extra_assets(self):\n asset_list = []\n if self.extra_assets is None:\n return []\n return [ self.complete_static_filename(asset) \\\n for asset in self.extra_assets ]", "def add_static(ext):\n ext = ext.lower()\n\n compiler = StaticCompiler(ext)\n file_list = compiler.get_staticfiles_list()\n\n return render_to_string(\n \"mub/context_%s.html\" % ext,\n {\n \"items\": file_list,\n \"STATIC_URL\": settings.STATIC_URL,\n \"IS_MINIFIED\": compiler.is_minified\n }\n )", "def dashboard():\n return render_template(\"admin/dashboard.html\", title=\"Dashboard\")", "def autoload():\r\n global _ASSETS_LOADED\r\n if _ASSETS_LOADED:\r\n return False\r\n\r\n # Import this locally, so that we don't have a global Django\r\n # dependency.\r\n from django.conf import settings\r\n\r\n for app in settings.INSTALLED_APPS:\r\n # For each app, we need to look for an assets.py inside that\r\n # app's package. We can't use os.path here -- recall that\r\n # modules may be imported different ways (think zip files) --\r\n # so we need to get the app's __path__ and look for\r\n # admin.py on that path.\r\n #if options.get('verbosity') > 1:\r\n # print \"\\t%s...\" % app,\r\n\r\n # Step 1: find out the app's __path__ Import errors here will\r\n # (and should) bubble up, but a missing __path__ (which is\r\n # legal, but weird) fails silently -- apps that do weird things\r\n # with __path__ might need to roll their own registration.\r\n try:\r\n app_path = import_module(app).__path__\r\n except AttributeError:\r\n #if options.get('verbosity') > 1:\r\n # print \"cannot inspect app\"\r\n continue\r\n\r\n # Step 2: use imp.find_module to find the app's assets.py.\r\n # For some reason imp.find_module raises ImportError if the\r\n # app can't be found but doesn't actually try to import the\r\n # module. So skip this app if its assetse.py doesn't exist\r\n try:\r\n imp.find_module('assets', app_path)\r\n except ImportError:\r\n #if options.get('verbosity') > 1:\r\n # print \"no assets module\"\r\n continue\r\n\r\n # Step 3: import the app's assets file. If this has errors we\r\n # want them to bubble up.\r\n import_module(\"%s.assets\" % app)\r\n #if options.get('verbosity') > 1:\r\n # print \"assets module loaded\"\r\n\r\n # Load additional modules.\r\n for module in getattr(settings, 'ASSETS_MODULES', []):\r\n import_module(\"%s\" % module)\r\n\r\n _ASSETS_LOADED = True", "def process_js():\n source_paths = [\n os.path.join(settings.BASE_DIR, 'themes/CMESH/assets/js/admin.js'),\n os.path.join(settings.BASE_DIR, 'themes/CMESH/assets/js/app.js'),\n os.path.join(settings.BASE_DIR, 'themes/CMESH/assets/js/footnotes.js'),\n os.path.join(settings.BASE_DIR, 'themes/CMESH/assets/js/table_of_contents.js'),\n os.path.join(settings.BASE_DIR, 'themes/CMESH/assets/js/text_resize.js'),\n os.path.join(settings.BASE_DIR, 'themes/CMESH/assets/js/toastr.js'),\n ]\n dest_path = os.path.join(settings.BASE_DIR, 'static/CMESH/js/app.js')\n min_path = os.path.join(settings.BASE_DIR, 'static/CMESH/js/app.min.js')\n\n process_js_files(source_paths, dest_path, min_path)", "def js():\n with lcd(BASEDIR):\n js_ext = (\n 'submodules/jquery-cookie/src/jquery.cookie.js',\n 'submodules/jquery-treegrid/js/jquery.treegrid.js',\n 'submodules/bootstrap/dist/js/bootstrap.js',\n )\n js_own = (\n 'js/variables.js',\n 'js/bmf-autocomplete.js',\n 'js/bmf-calendar.js',\n 'js/bmf-editform.js',\n 'js/bmf-inlineform.js',\n 'js/bmf-buildform.js',\n 'js/menu.js',\n )\n\n local('cp submodules/bootstrap/dist/js/bootstrap.min.js djangobmf/static/djangobmf/js/')\n local('yui-compressor --type js -o djangobmf/static/djangobmf/js/jquery.cookie.min.js submodules/jquery-cookie/src/jquery.cookie.js')\n local('yui-compressor --type js -o djangobmf/static/djangobmf/js/jquery.treegrid.min.js submodules/jquery-treegrid/js/jquery.treegrid.js')\n\n local('cat %s > djangobmf/static/djangobmf/js/djangobmf.js' % ' '.join(js_ext + js_own))\n local('yui-compressor --type js -o djangobmf/static/djangobmf/js/djangobmf.min.js djangobmf/static/djangobmf/js/djangobmf.js')\n local('cat %s > djangobmf/static/djangobmf/js/djangobmf.js' % ' '.join(js_own))", "def include_static_files(app):\n file_path = sphinx_prolog.get_static_path(STATIC_FILE)\n if file_path not in app.config.html_static_path:\n app.config.html_static_path.append(file_path)", "def dashboard():\n\n # get the directories in the data folder\n # (each directory represents another repo)\n repos = os.listdir(DATA)\n\n for repo in repos:\n # remove it if it's not a directory\n if not os.path.isdir(DATA + repo):\n repos.remove(repo)\n\n return render_template('home/dashboard.html', title=\"Dashboard\", repos=repos)", "def createAsset(assFolder, *args):\n createAssetUI(assFolder)", "def atlas_dashboard(request):\n if request.user.is_authenticated():\n try: \n views_helperobj = ViewsHelper()\n objhelper_obj = ObjectHelper()\n\n if request.is_ajax() or request.method == 'POST':\n return views_helperobj.handle_dashboard_post_requests(request)\n \n module_list = views_helperobj.create_module_list()\n dashboard_context = views_helperobj.generate_dashboard_data(request)\n return render_to_response('appv1/dashboard.html', \\\n dashboard_context, context_instance=RequestContext \\\n (request, processors = [custom_processor]))\n except PermissionDenied:\n return render_to_response('appv1/403.html')\n except TemplateDoesNotExist:\n return render_to_response('appv1/404.html')\n except Exception:\n return render_to_response('appv1/500.html')", "def dashboard():\n return render_template('home/dashboard.html')", "def index():\n\n badge_name = current_app.config.get(\"ACCREDITATION_BADGE\")\n if badge_name and not Path(public_bp.static_folder, badge_name).is_file():\n LOG.warning(f'No file with name \"{badge_name}\" in {public_bp.static_folder}')\n badge_name = None\n\n return render_template(\"public/index.html\", version=__version__, accred_badge=badge_name)", "def assets(self, pattern, handler):\n return self.route(GET, pattern, handler)", "def register_scss(assets):\n assets.append_path(app.static_folder, app.static_url_path)\n assets.config['SASS_PATH'] = 'app/scss'\n\n bundle = Bundle('scss/client.scss',\n output='css/gen/client.%(version)s.css',\n depends=('**/*.scss'),\n filters=('scss', 'cssmin'))\n assets.register('scss_client', bundle)", "def dashboard():\n return render_template(\"home/dashboard.html\", title=\"Dashboard\")", "def populateGallery():\n\n # Set the UI parent to be the scroll layout\n global objectScroll\n cmds.setParent(objectScroll)\n\n # List all assets in the direcoty\n assetList = [directory for directory in os.listdir(AC.ASSETS_PATH) if os.path.isdir(os.path.join(AC.ASSETS_PATH, directory))]\n\n # Create a ButtonIcon for each asset\n for asset in assetList:\n addButtonIcon(asset)", "def dashboard():\n return render_template('home/dashboard.html', title=\"Dashboard\")", "def bootstrap(path=None):\n LOGGER.info(path)\n if path and re.match('/assets/.*', path):\n raise ValueError()\n\n return render_template('bootstrap.html')", "def add_handlers(web_app, config):\n base_url = web_app.settings['base_url']\n url = ujoin(base_url, config.page_url)\n assets_dir = config.assets_dir\n\n package_file = os.path.join(assets_dir, 'package.json')\n with open(package_file) as fid:\n data = json.load(fid)\n\n config.version = (config.version or data['jupyterlab']['version'] or\n data['version'])\n config.name = config.name or data['jupyterlab']['name']\n\n handlers = [\n (url + r'/?', LabHandler, {\n 'lab_config': config\n }),\n (url + r\"/(.*)\", FileFindHandler, {\n 'path': assets_dir\n }),\n\n ]\n\n # Backward compatibility.\n if 'publicPath' in data['jupyterlab']:\n handlers.append(\n (data['jupyterlab']['publicPath'] + r\"/(.*)\", FileFindHandler, {\n 'path': assets_dir\n })\n )\n\n web_app.add_handlers(\".*$\", handlers)", "def process_images():\n image_path = os.path.join(settings.BASE_DIR, 'themes/CMESH/assets/img/')\n static_images = os.path.join(settings.BASE_DIR, 'static/CMESH/img/')\n\n copy_files(image_path, static_images)", "def render_merged(self, context):\r\n\r\n output, files, filter = self.resolve(context)\r\n\r\n # make paths absolute\r\n output_path = _abspath(output)\r\n source_paths = [_abspath(s) for s in files]\r\n\r\n # check if the asset should be (re)created\r\n if not os.path.exists(output_path):\r\n if not settings.ASSETS_AUTO_CREATE:\r\n # render the sources after all\r\n return self.render_sources(context)\r\n else:\r\n update_needed = True\r\n else:\r\n update_needed = get_updater()(output_path, source_paths)\r\n\r\n if update_needed:\r\n create_merged(source_paths, output_path, filter)\r\n last_modified = os.stat(output_path).st_mtime\r\n # TODO: do asset tracking here\r\n #get_tracker()()\r\n\r\n # modify the output url for expire header handling\r\n if settings.ASSETS_EXPIRE == 'querystring':\r\n outputfile = \"%s?%d\" % (output, last_modified)\r\n elif settings.ASSETS_EXPIRE == 'filename':\r\n name = output.rsplit('.', 1)\r\n if len(name) > 1: return \"%s.%d.%s\" % (name[0], last_modified, name[1])\r\n else: outputfile = \"%s.%d\" % (name, last_modified)\r\n elif not settings.ASSETS_EXPIRE:\r\n outputfile = output\r\n else:\r\n raise ValueError('Unknown value for ASSETS_EXPIRE option: %s' %\r\n settings.ASSETS_EXPIRE)\r\n\r\n context.update({'ASSET_URL': _absurl(outputfile)})\r\n try:\r\n result = self.childnodes.render(context)\r\n finally:\r\n context.pop()\r\n return result", "def add_contents(html_file):\n\thtml = read_file(html_file)\n\tcontents_js = read_file(os.path.join(SCRIPT_DIR, 'themes', 'contents.js'))\n\thtml = html.replace('function loadFunc() {', contents_js)\n\n\tplace_holder = '<div class=\"header_banner\">'\n\ttags = '<div class=\"navbar_container\">\\n<div class=\"navbar\"/>\\n</div>'\n\ttags += '<div class=\"content_button\">\\n<div class=\"chevron\">></div>\\n</div>'\n\ttags += '\\n' + place_holder\n\n\thtml = html.replace(place_holder, tags)\n\thtml = html.replace('<div id=\"TOC\">', '<div class=\"canvas\">\\n<div id=\"TOC\">')\n\thtml = html.replace('</body>', '</div>\\n</body>')\n\twrite_file(html_file, html)", "def main():\n os.makedirs(\"./img/event_generated/\", exist_ok=True)\n for category in categories():\n render_icon(category + \".png\")", "def call(self, **kwargs):\n # Get additional resources links\n css = []\n for path in (\"creative/vendor/bootstrap/css/bootstrap.min.css\",\n \"creative/vendor/font-awesome/css/font-awesome.min.css\",\n \"creative/vendor/magnific-popup/magnific-popup.css\",\n \"creative/css/creative.css\"):\n css.append(self._cw.data_url(path))\n js = []\n for path in (\"creative/vendor/jquery/jquery.min.js\",\n \"creative/vendor/bootstrap/js/bootstrap.min.js\",\n \"creative/vendor/scrollreveal/scrollreveal.min.js\",\n \"creative/vendor/magnific-popup/jquery.magnific-popup.min.js\",\n \"creative/js/creative.js\"):\n js.append(self._cw.data_url(path))\n\n # Format template\n template = self._cw.vreg.template_env.get_template(\"startup.jinja2\")\n html = template.render(\n header_url=self._cw.data_url(\"creative/img/neurospin.jpg\"),\n login_url=self._cw.build_url(\n \"login\", __message=u\"Please login with your account.\"),\n contact_email=self._cw.vreg.config.get(\n \"administrator-emails\", \"noreply@cea.fr\"),\n css_url=css,\n js_url=js)\n self.w(html)", "def include_admin_script(script_path):\n if not absolute_url_re.match(script_path):\n script_path = '%s%s' % (settings.ADMIN_MEDIA_PREFIX, script_path)\n return '<script type=\"text/javascript\" src=\"%s\"></script>' % script_path", "def loadjs(*args):\n return render(settings, 'JS_FILES', 'staticloader/load_js.html', *args)", "def add_navbar_js(self):\n this_dir, this_filename = os.path.split(__file__)\n file_path = os.path.join(this_dir, \"js\", \"navbar.js\")\n \n with open(file_path, \"r\") as fi:\n navbar = fi.read()\n \n new_script = html.Element(\"script\")\n new_script.text = navbar\n self.book.xpath(\"//head\")[0].insert(1, new_script)\n \n ## Add jquery library\n new_script = html.Element(\"script\")\n new_script.attrib[\"src\"] = \"https://ajax.googleapis.com/ajax/libs/jquery/3.4.1/jquery.min.js\"\n self.book.xpath(\"//head\")[0].insert(1, new_script)", "def loadcss(*args):\n return render(settings, 'CSS_FILES', 'staticloader/load_css.html', *args)", "def dashboard():", "def _asset_index(request, course_key):\r\n course_module = modulestore().get_course(course_key)\r\n\r\n return render_to_response('asset_index.html', {\r\n 'context_course': course_module,\r\n 'asset_callback_url': reverse_course_url('assets_handler', course_key)\r\n })", "def show_dashboard():\n script, div = plots.make_plot()\n script_tab, div_tab = plots.make_tabs()\n script_trend, div_trend = plots.make_trend()\n\n return render_template('layout.html',\n script=script,\n div=div,\n script_trend=script_trend,\n div_trend=div_trend,\n script_tab=script_tab,\n div_tab=div_tab)", "def main():\n # Create / clean output dir\n if os.path.isdir(OUT_DIR):\n shutil.rmtree(OUT_DIR)\n os.mkdir(OUT_DIR)\n\n # Write all assets to the directory\n for fname, bb in create_assets().items():\n filename = os.path.join(OUT_DIR, fname)\n dirname = os.path.dirname(filename)\n if not os.path.isdir(dirname):\n os.makedirs(dirname)\n with open(filename, \"wb\") as f:\n f.write(bb)", "def generate_assets(all_assets):\n\n assets = {}\n for lifecycle, products in all_assets.iteritems():\n assets[lifecycle] = []\n for product in products:\n asset_obj = OrderedDict()\n asset_obj[\"id\"] = product[0]\n asset_obj[\"division\"] = product[1]\n asset_obj[\"web_id\"] = product[4]\n asset_obj[\"brand\"] = product[3]\n asset_obj[\"category\"] = product[2]\n for frame_key, size in PRODUCT_FRAME_SIZES.iteritems():\n if size[1].lower() == '_logo':\n asset_obj[frame_key] = IMAGE_PATH + size[0] + \"_\" + product[0] + size[1] + \".png\"\n else:\n asset_obj[frame_key] = IMAGE_PATH + size[0] + \"_\" + product[0] + size[1] + \".jpg\"\n asset_obj[\"style\"] = None\n assets[lifecycle].append(asset_obj)\n import json\n with open('assets.json', 'w') as outfile:\n json.dump(assets, outfile)", "def list_assets(request):\n user_assets = Asset.objects.filter(user=request.user, deleted=False).all()\n\n json_assets = ASSET_LIST_RESOURCE.to_json(dict(\n user_id=request.user.id,\n next_page_token=uuid.uuid4(),\n assets=user_assets\n ))\n request_format = request.GET.get('format', '')\n if request_format.lower() == 'json':\n return partial_json_response(request, json_assets)\n else:\n render_data = {'resource': json.dumps(json_assets)}\n render_data.update(csrf(request))\n return render('index.html', render_data)", "def widgets(overwrite=True):\n install_nbextension(os.path.join(PKGPATH, 'static'),\n destination='molviz',\n overwrite=overwrite)", "def resource_js(self):\n \n portal_url = getSite().absolute_url()\n \n return \"\"\"\n <script type=\"text/javascript\" src=\"%s/++resource++swfobject.js\"></script>\n <script type=\"text/javascript\" src=\"%s/++resource++audio_player.js\"></script> \n <script type=\"text/javascript\"> \n AudioPlayer.setup(\"%s/++resource++audio_player.swf\", { \n width: 300\n }); \n </script>\n \"\"\" % (portal_url, portal_url, portal_url)", "def dashboard():\n return render_template('home/dashboard.html',title='SycliQ Dashboard')", "def add_child(self, child):\n super(Img, self).add_child(child)\n\n # If this is a relative URL, it's relative to the statics directory\n # of the application\n src = self.get('src')\n if src is not None:\n self.set('src', absolute_url(src, self.renderer.head.static_url))", "def config():\n for key, value in JS_FILES.items():\n pn.config.js_files[key] = value\n pn.config.css_files.append(CSS_FILES[\"all\"])", "def create_js(self):\n for x in self.__js:\n self.__content.append(\"<script src=\\\"%s\\\"></script>\\n\"% (x))", "def asset_tag(request, key, **kwargs):\n theme = request.theme\n asset = theme.stacked_assets[key]\n settings = request.registry.settings\n should_compile = asbool(settings.get('pyramid_frontend.compile'))\n\n if should_compile:\n filename = theme.compiled_asset_path(key)\n url_path = '/compiled/' + theme.key + '/' + filename\n else:\n url_path = asset.url_path\n\n return literal(asset.tag(theme, url_path, production=should_compile,\n **kwargs))", "def __init__(self, assets):\n self._assets = assets", "def watch_assets(cli_config):\n commands = AssetsCommands(cli_config)\n commands.watch_assets()", "def extend():\n global EXTENDED # pylint: disable=global-statement\n if not EXTENDED:\n EXTENDED = True\n pn.config.raw_css.append(CODE_HILITE_PANEL_EXPRESS_CSS.read_text())", "def resources(self):", "def jquery_ui_css():\n return static_file(\"jquery-ui.css\", root=os.path.join(BASEDIR, \"css\"))", "def static_html(subpath):\n return render_template(f'static_html/{subpath}.html')", "def assets_library_url(request):\n return {\n \"PATTERN_LIBRARY_URL\": settings.PATTERN_LIBRARY_URL,\n }", "def setup_js(self):\n script = \"\"\"\n Salamat.contextData.redactorOptions = {imageGetJson: '%s'};\n \"\"\"\n script %= self.reverse('redactor_files', args=(self.namespace,\n self.prefix))\n return HttpResponse(script, content_type='text/javascript')", "def copy_assets(test_files):\n for path in test_files:\n shutil.copy(path, HOST_ASSETS_PATH)", "def render_app_html(webassets_env, service_url, api_url, sentry_public_dsn, ga_tracking_id=None, websocket_url=None,\n extra=None):\n if extra is None:\n extra = {}\n\n template = jinja_env.get_template('app.html.jinja2')\n assets_dict = app_html_context(api_url=api_url, service_url=service_url, ga_tracking_id=ga_tracking_id,\n sentry_public_dsn=sentry_public_dsn, webassets_env=webassets_env,\n websocket_url=websocket_url)\n return template.render(merge(assets_dict, extra))", "def render(self, template_name, **kwargs):\n currentUser = self.current_user\n from_workspace_str = self.get_argument(\"from_workspace\", default=\"0\", strip=False)\n from_workspace = from_workspace_str == \"1\"\n html = self.render_string(template_name, currentUser=currentUser, from_workspace = from_workspace, **kwargs)\n if from_workspace :\n scriptName = self.__class__.__name__\n\n if scriptName.endswith('Handler') :\n scriptName = scriptName[:-7] \n\n path = self.static_url('scripts/' + scriptName + '.js')\n\n js = '<script src=\"' + escape.xhtml_escape(path) + '\" type=\"text/javascript\"></script>'\n html = html + utf8(js)\n self.finish(html)\n return\n\n # Insert the additional JS and CSS added by the modules on the page\n js_embed = []\n js_files = []\n css_embed = []\n css_files = []\n html_heads = []\n html_bodies = []\n for module in getattr(self, \"_active_modules\", {}).values():\n embed_part = module.embedded_javascript()\n if embed_part:\n js_embed.append(utf8(embed_part))\n file_part = module.javascript_files()\n if file_part:\n if isinstance(file_part, (unicode_type, bytes_type)):\n js_files.append(file_part)\n else:\n js_files.extend(file_part)\n embed_part = module.embedded_css()\n if embed_part:\n css_embed.append(utf8(embed_part))\n file_part = module.css_files()\n if file_part:\n if isinstance(file_part, (unicode_type, bytes_type)):\n css_files.append(file_part)\n else:\n css_files.extend(file_part)\n head_part = module.html_head()\n if head_part:\n html_heads.append(utf8(head_part))\n body_part = module.html_body()\n if body_part:\n html_bodies.append(utf8(body_part))\n\n def is_absolute(path):\n return any(path.startswith(x) for x in [\"/\", \"http:\", \"https:\"])\n if js_files:\n # Maintain order of JavaScript files given by modules\n paths = []\n unique_paths = set()\n for path in js_files:\n if not is_absolute(path):\n path = self.static_url(path)\n if path not in unique_paths:\n paths.append(path)\n unique_paths.add(path)\n js = ''.join('<script src=\"' + escape.xhtml_escape(p) +\n '\" type=\"text/javascript\"></script>'\n for p in paths)\n sloc = html.rindex(b'</body>')\n html = html[:sloc] + utf8(js) + b'\\n' + html[sloc:]\n if js_embed:\n js = b'<script type=\"text/javascript\">\\n//<![CDATA[\\n' + \\\n b'\\n'.join(js_embed) + b'\\n//]]>\\n</script>'\n sloc = html.rindex(b'</body>')\n html = html[:sloc] + js + b'\\n' + html[sloc:]\n if css_files:\n paths = []\n unique_paths = set()\n for path in css_files:\n if not is_absolute(path):\n path = self.static_url(path)\n if path not in unique_paths:\n paths.append(path)\n unique_paths.add(path)\n css = ''.join('<link href=\"' + escape.xhtml_escape(p) + '\" '\n 'type=\"text/css\" rel=\"stylesheet\"/>'\n for p in paths)\n hloc = html.index(b'</head>')\n html = html[:hloc] + utf8(css) + b'\\n' + html[hloc:]\n if css_embed:\n css = b'<style type=\"text/css\">\\n' + b'\\n'.join(css_embed) + \\\n b'\\n</style>'\n hloc = html.index(b'</head>')\n html = html[:hloc] + css + b'\\n' + html[hloc:]\n if html_heads:\n hloc = html.index(b'</head>')\n html = html[:hloc] + b''.join(html_heads) + b'\\n' + html[hloc:]\n if html_bodies:\n hloc = html.index(b'</body>')\n html = html[:hloc] + b''.join(html_bodies) + b'\\n' + html[hloc:]\n self.finish(html)", "def show_assets(self, update, context):\n\n # Send preliminary message\n msg = 'Some other message...'\n self.send_str(msg, update, context)\n\n # Send pic\n self.sendPic('assets.png', update, context)", "def compress():\n run_manage_cmd('compress_assets')", "def __init__(self):\n\t\tscript_src = []\n\t\tdefault_src = []\n\t\tconnect_src = []\n\t\tfile_cache = '/manifest/' if config.IS_UWSGI is False and config.CACHE_MANIFEST else config.CACHE_MANIFEST\n\n\t\tif config.CONTENT_SECURITY_POLICY_SCRIPT:\n\t\t\tscript_src.append(config.CONTENT_SECURITY_POLICY_SCRIPT)\n\t\tif config.CONTENT_SECURITY_POLICY_DEFAULT:\n\t\t\tdefault_src.append(config.CONTENT_SECURITY_POLICY_DEFAULT)\n\t\tif config.CONTENT_SECURITY_POLICY_CONNECT:\n\t\t\tconnect_src.append(config.CONTENT_SECURITY_POLICY_CONNECT)\n\n\t\tself.html += '<!DOCTYPE html>\\n'\n\t\tself.html += '<html xmlns=\"http://www.w3.org/1999/xhtml\" xml:lang=\"fr\" lang=\"fr\" manifest=\"%s\">\\n' % file_cache\n\n\t\tif config.CACHE_MANIFEST:\n\t\t\tself.to_head('<meta http-equiv=\"Cache-Control\" content=\"no-store\" />')\n\n\t\tself.to_head(\"\"\"<meta http-equiv=\"Content-Security-Policy\" content=\"\n\t\t\t\t\t\tdefault-src 'self' %s;\n\t\t\t\t\t\tstyle-src 'self' 'unsafe-inline';\n\t\t\t\t\t\tscript-src 'self' 'unsafe-inline' 'unsafe-eval' %s;\n\t\t\t\t\t\tconnect-src 'self' %s;\n\t\t\t\t\t\timg-src 'self' data:;\n\t\t\t\t\t\"/>\\n\"\"\" % (\" \".join(default_src), \" \".join(script_src), \" \".join(connect_src))\n\t\t)", "def index() -> object:\n return render_template('ue_bootstrap.j2', title='UENERGO TAGS')", "def url_assets(self):\n return self.assets(asset_type='URL')", "def server_static(self, filepath):\n root = os.path.join(self.webbase, 'assets')\n return static_file(filepath, root=root)", "def add_javascripts_subscriber(event):\n c = event.request.tmpl_context\n c.javascripts = [\n ('spline', 'lib/jquery-1.7.1.min'),\n ('spline', 'lib/jquery.cookies-2.2.0.min'),\n ('spline', 'lib/jquery.ui-1.8.4.min'),\n ('spline', 'core'),\n ('pokedex', 'pokedex-suggestions'),\n ('pokedex', 'pokedex'), # XXX only on main pokedex pages\n ]", "def __init__(self, static_url):\n super(HeadRenderer, self).__init__()\n\n # Directory where are located the static contents of the application\n self.static_url = static_url\n\n self._named_css = {} # CSS code\n self._css_url = {} # CSS URLs\n self._named_javascript = {} # Javascript code\n self._javascript_url = {} # Javascript URLs\n\n self._order = 0 # Memorize the order of the javascript and css", "def extract_static_assets(html_data):\n static_assets = []\n\n if html_data is None:\n return static_assets\n\n soup = BeautifulSoup(html_data, 'html.parser')\n\n # Find all static images\n images = soup.find_all('img')\n for image in images:\n if image.get('src') is not None:\n static_assets.append(image.get('src'))\n\n # Fina all static scripts\n scripts = soup.find_all('script')\n for script in scripts:\n if script.get('src') is not None:\n static_assets.append(script.get('src'))\n\n # find all static links\n links = soup.find_all('link')\n for link in links:\n if 'stylesheet' in link.get('rel'):\n static_assets.append(link.get('href'))\n continue\n if 'icon' in link.get('rel'):\n static_assets.append(link.get('href'))\n continue\n if 'apple-touch-icon-precomposed' in link.get('rel'):\n static_assets.append(link.get('href'))\n\n return static_assets", "def update_config(self, config):\n toolkit.add_template_directory(config, 'templates')\n toolkit.add_public_directory(config, 'public')\n toolkit.add_resource('fanstatic', 'syngenta')", "def showAssetImage(*args):\n\n selTab = cmds.tabLayout(widgets[\"shotAssRigListTLO\"], q=True, st=True)\n\n fType = \"\"\n asset = \"\"\n assetPath = \"\"\n path = \"\"\n imagePath = \"\"\n\n if selTab == \"Chars\":\n asset = cmds.textScrollList(widgets[\"shotAssRigCharListTSL\"], q=True, si=True)\n if asset:\n imagePath = cFuncs.fixPath(os.path.join(pi.assetFolder, \"characters\", asset[0], \"icon\",\"{0}Icon.png\".format(asset[0])))\n if os.path.isfile(imagePath):\n cFuncs.assetImageUI(imagePath)\n else:\n cmds.warning(\"Can't find an image for {0}\".format(asset[0]))\n\n if selTab == \"Props\":\n asset = cmds.textScrollList(widgets[\"shotAssRigPropListTSL\"], q=True, si=True)\n if asset:\n imagePath = cFuncs.fixPath(os.path.join(pi.assetFolder, \"props\", asset[0], \"icon\",\"{0}Icon.png\".format(asset[0])))\n if os.path.isfile(imagePath):\n cFuncs.assetImageUI(imagePath)\n else:\n cmds.warning(\"Can't find an image for {0}\".format(asset[0]))\n \n if selTab == \"Sets\":\n asset = cmds.textScrollList(widgets[\"shotAssRigSetListTSL\"], q=True, si=True)\n if asset:\n imagePath = cFuncs.fixPath(os.path.join(pi.assetFolder, \"sets\", asset[0], \"icon\",\"{0}Icon.png\".format(asset[0])))\n if os.path.isfile(imagePath):\n cFuncs.assetImageUI(imagePath)\n else:\n cmds.warning(\"Can't find an image for {0}\".format(asset[0]))\n \n # if selTab == \"Anm\":\n # #need to split this up\n # var_shot = cmds.textScrollList(widgets[\"shotAnmMstListTSL\"], q=True, si=True)\n # if var_shot:\n # var, buf, shot = var_shot[0].partition(\".\")\n # path = cFuncs.getVarMaster(cFuncs.fixPath(os.path.join(pi.shotsFolder, shot, \"anm\", var)))", "def deploy_static_media(env=None, asset_version='', quick=False, haus_vars={}):\n print green('Deploying static media {}'.format('__quick__' if quick else ''))\n collectstatic(no_input=True, skip_admin=quick)", "def statics(file, type='img'):\n return bottle.static_file(file, root=HOME+STATIC_PATH+'/'+type)" ]
[ "0.7301845", "0.69285274", "0.6680427", "0.6377354", "0.60470253", "0.59089947", "0.5860708", "0.5840234", "0.5829316", "0.5822734", "0.5812532", "0.57396126", "0.57066774", "0.56420225", "0.556743", "0.5532212", "0.5530811", "0.5510526", "0.5499899", "0.548234", "0.54720134", "0.5444114", "0.5438915", "0.5437831", "0.5430791", "0.5418108", "0.5374133", "0.53738075", "0.5371027", "0.537017", "0.5366176", "0.5365656", "0.5342017", "0.53411627", "0.53282815", "0.53170365", "0.5315866", "0.5290852", "0.52790207", "0.5269074", "0.5267021", "0.52544487", "0.52470875", "0.52396303", "0.5234253", "0.5233208", "0.52326035", "0.52257967", "0.5217066", "0.52149314", "0.5210344", "0.52030617", "0.5200767", "0.5196917", "0.51875454", "0.5178892", "0.5175176", "0.51744056", "0.516289", "0.5160273", "0.5158351", "0.51491326", "0.514146", "0.51345587", "0.5103023", "0.510091", "0.50872874", "0.50819284", "0.50637776", "0.50518966", "0.5047976", "0.50469804", "0.50312597", "0.50292283", "0.5024557", "0.5021267", "0.50124073", "0.4988873", "0.4988339", "0.4982828", "0.49811834", "0.49786383", "0.4967277", "0.4961683", "0.49588576", "0.4948655", "0.49421895", "0.4932332", "0.4914376", "0.4905325", "0.48952627", "0.48809877", "0.48808518", "0.48798934", "0.4872814", "0.48670074", "0.48561907", "0.4853966", "0.4850434", "0.48494366" ]
0.6160059
4
Register an asset required by a dashboard module. Some modules require special scripts or stylesheets, like the
def register_module_asset(self, asset): self._module_assets.append(asset)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_xmodule_assets():\r\n sh('xmodule_assets common/static/xmodule')", "def assets():\n pass", "def assets():", "def script_info_assets(app, static_dir, testcss):\n InvenioAssets(app)\n\n blueprint = Blueprint(__name__, \"test_bp\", static_folder=static_dir)\n\n class Ext(object):\n def __init__(self, app):\n assets = app.extensions[\"invenio-assets\"]\n app.register_blueprint(blueprint)\n\n Ext(app)\n\n yield ScriptInfo(create_app=lambda: app)", "def add_assets_mapping(config, mapping):\n assets = config.registry.queryUtility(IAssets) or Assets()\n assets.update(mapping)\n config.registry.registerUtility(assets, IAssets)", "def add_asset(self, asset_name):\r\n self._assets.extend(asset_name)", "def register_scss(assets):\n assets.append_path(app.static_folder, app.static_url_path)\n assets.config['SASS_PATH'] = 'app/scss'\n\n bundle = Bundle('scss/client.scss',\n output='css/gen/client.%(version)s.css',\n depends=('**/*.scss'),\n filters=('scss', 'cssmin'))\n assets.register('scss_client', bundle)", "def test_import_system_asset(self):\n pass", "def register_module():\n\n # Course Dashboard\n tabs.Registry.register(\n base.OfflineAssignmentBase.DASHBOARD_NAV,\n base.OfflineAssignmentBase.DASHBOARD_TAB,\n base.OfflineAssignmentBase.DESCRIPTION,\n off_ass_dashboard.OfflineAssignmentDashboardHandler)\n\n dashboard.DashboardHandler.add_custom_get_action(\n base.OfflineAssignmentBase.DASHBOARD_DEFAULT_ACTION, None)\n\n dashboard.DashboardHandler.add_nav_mapping(\n base.OfflineAssignmentBase.DASHBOARD_NAV,\n base.OfflineAssignmentBase.NAME,\n )\n dashboard.DashboardHandler.add_custom_get_action(\n base.OfflineAssignmentBase.OFFLINE_ASSIGNMENT_DETAILS_ACTION,\n off_ass_dashboard.OfflineAssignmentDashboardHandler.get_assignment_scores\n )\n\n dashboard.DashboardHandler.add_custom_get_action(\n base.OfflineAssignmentBase.SCORE_OFFLINE_ASSIGNMENT_ACTION,\n off_ass_dashboard.OfflineAssignmentDashboardHandler.get_bulk_score\n )\n\n dashboard.DashboardHandler.add_custom_post_action(\n base.OfflineAssignmentBase.SCORE_OFFLINE_ASSIGNMENT_ACTION,\n off_ass_dashboard.OfflineAssignmentDashboardHandler.post_bulk_score\n )\n\n # Course Staff Custom Handlers\n evaluate.EvaluationHandler.add_custom_get_action(\n offline_course_staff.OfflineAssignmentsCourseStaffBase.LIST_ACTION,\n offline_course_staff.OfflineAssignmentsCourseStaffHandler.get_list_offline\n )\n\n evaluate.EvaluationHandler.add_custom_get_action(\n offline_course_staff.OfflineAssignmentsCourseStaffBase.EVALUATE_ACTION,\n offline_course_staff.OfflineAssignmentsCourseStaffHandler.get_evaluate_offline\n )\n\n evaluate.EvaluationHandler.add_custom_post_action(\n offline_course_staff.OfflineAssignmentsCourseStaffBase.POST_SCORE_ACTION,\n offline_course_staff.OfflineAssignmentsCourseStaffHandler.post_score_offline\n )\n\n associated_js_files_handlers = [\n ('/modules/offline_assignments/editor/(.*)', sites.make_zip_handler(\n os.path.join(\n appengine_config.BUNDLE_ROOT,\n 'modules/offline_assignments/lib/ckeditor.zip'))),\n (\n settings.OfflineAssignmentRESTHandler.URI,\n settings.OfflineAssignmentRESTHandler\n )\n ]\n\n\n question_handlers = [\n (base.OfflineAssignmentBase.UNIT_URL,\n assignment.OfflineAssignmentHandler),\n (question.OfflineAssignmentRESTHandler.URI,\n question.OfflineAssignmentRESTHandler)]\n\n global custom_module\n custom_module = custom_modules.Module(\n base.OfflineAssignmentBase.NAME,\n base.OfflineAssignmentBase.DESCRIPTION,\n associated_js_files_handlers, question_handlers)\n\n custom_unit = custom_units.CustomUnit(\n base.OfflineAssignmentBase.UNIT_TYPE_ID,\n base.OfflineAssignmentBase.NAME,\n question.OfflineAssignmentRESTHandler,\n visible_url,\n cleanup_helper=delete_assignement,\n import_helper=import_assignment,\n is_graded=True)\n\n # Add custom unit details to course staff module\n course_staff.CourseStaff.add_custom_unit(\n base.OfflineAssignmentBase.UNIT_TYPE_ID,\n offline_course_staff.OfflineAssignmentsCourseStaffBase.LIST_ACTION)\n\n return custom_module", "def asset(atype, aname):\n if atype not in ('css', 'js'):\n raise template.TemplateSyntaxError('Type can only be one of css or js.')\n\n if aname not in ASSETS[atype]:\n raise ValueError('Invalid asset: %r' % aname)\n\n meta = ASSETS[atype][aname]\n\n return {\n 'USE_MINIFIED': USE_MINIFIED,\n 'type': atype,\n 'asset': aname,\n 'meta': meta,\n }", "def addAsset(self, name, asset):\n self.__assets[name] = asset\n return True", "def add_asset(urn: str, asset: str, validate_assets: bool) -> None:\n\n if not urn.startswith(\"urn:li:dataProduct:\"):\n urn = f\"urn:li:dataProduct:{urn}\"\n dataproduct_patcher: DataProductPatchBuilder = DataProduct.get_patch_builder(urn)\n dataproduct_patcher.add_asset(asset)\n with get_default_graph() as graph:\n _abort_if_non_existent_urn(graph, urn, \"add assets\")\n if validate_assets:\n _abort_if_non_existent_urn(\n graph,\n asset,\n \"add assets. Use --no-validate-assets if you want to turn off validation\",\n )\n for mcp in dataproduct_patcher.build():\n graph.emit(mcp)", "def add_latesettings_assets(self):\n\n # setting up static file serving\n assetmanager = self.comp('assetmanager')\n\n # add external asset mount point where we can copy public static files so they can be served by a separate traditional web server\n # presumably this directory is being served by a more traditional webserver, at this url we specify below\n assetmanager.add_assetmount(\n massetmanager.MewloAssetMount_ExternalServer('external_assets', filepath = '${mewlofilepath}/public_assets', urlabs = 'http://127.0.0.1/mewlo/mewlo/public_assets' )\n )\n\n # add internal asset mount point where we will serve files internally; a route will be automatically created for any asset source attached to this mount point; we can choose the path prefix for urls served by the route\n assetmanager.add_assetmount(\n massetmanager.MewloAssetMount_InternalRoute('internal_assets', urlpath='assets')\n )\n\n\n # now that we have some mount points, we can specify some files to be hosted on them\n # note that the ids for all asset sources MUST be unique (ATTN:TODO elaborate on this please)\n # first we mount the files in the staticfilesource/ directory as internal assets that we will serve internally via mewlo; the id will be used for alias creation, and for the route\n assetmanager.add_assetsource(\n massetmanager.MewloAssetSource(id='siteinternal', mountid = 'internal_assets', filepath = '${sitefilepath}/staticfilesource', mnamespace=None)\n )\n # then as a test, lets mount same files on the external mount point -- this will cause mewlo to physically copy the files to the external filepath, where presumably another web server can serve them\n assetmanager.add_assetsource(\n massetmanager.MewloAssetSource(id='siteexternal', mountid = 'external_assets', filepath = '${sitefilepath}/staticfilesource', mnamespace=None)\n )\n\n # remember that one should never refer to the assets by a hardcoded url or file path; always use the aliases created by these functions, which will take the form (where ID is the id of the asset source):\n # 'asset_ID_urlrel' | 'asset_ID_urlabs' | 'asset_ID_filepath'\n # you can also use helper function to build these names, which would be better.", "def configure_ext_assets(app, xstatic):\n assets = Environment(app)\n coffee_lib = Bundle(\n 'coffee/lib/*.coffee',\n filters='coffeescript',\n output='gen/lib.js'\n )\n assets.register('coffee_lib', coffee_lib)\n coffee_pages = Bundle(\n 'coffee/pages/*.coffee',\n filters='coffeescript',\n output='gen/pages.js'\n )\n assets.register('coffee_lib', coffee_lib)\n coffee = Bundle(\n coffee_lib,\n coffee_pages,\n output='gen/app.js'\n )\n assets.register('coffee_app', coffee)\n\n coffee_spec = Bundle(\n 'coffee/spec/*.coffee',\n filters='coffeescript',\n output='gen/coffee_spec.js'\n )\n assets.register('coffee_spec', coffee_spec)\n\n vendor_js = Bundle(\n os.path.join(xstatic.path_for('jquery'), 'jquery.min.js'),\n 'vendor/pdfjs-' + app.config['X_PDFJS_VERSION'] + '-dist/build/pdf.js',\n 'vendor/jquery.jeditable.mini.js',\n 'vendor/jquery-ui-1.11.2/jquery-ui.min.js',\n output='gen/vendor_js.js',\n )\n assets.register('vendor_js', vendor_js)\n\n scss_bundle = Bundle(\n 'scss/site.scss',\n depends='**/*.scss',\n filters='pyscss',\n output='gen/app.css'\n )\n assets.register('scss_all', scss_bundle)\n\n this_dir = os.path.dirname(os.path.abspath(__file__))\n scss.config.LOAD_PATHS = [\n os.path.join(xstatic.path_for('bootstrap_scss'), 'scss'),\n os.path.join(this_dir, '../static/vendor/bootswatch-darkly'),\n ]", "def createAsset(assFolder, *args):\n createAssetUI(assFolder)", "def autoload():\r\n global _ASSETS_LOADED\r\n if _ASSETS_LOADED:\r\n return False\r\n\r\n # Import this locally, so that we don't have a global Django\r\n # dependency.\r\n from django.conf import settings\r\n\r\n for app in settings.INSTALLED_APPS:\r\n # For each app, we need to look for an assets.py inside that\r\n # app's package. We can't use os.path here -- recall that\r\n # modules may be imported different ways (think zip files) --\r\n # so we need to get the app's __path__ and look for\r\n # admin.py on that path.\r\n #if options.get('verbosity') > 1:\r\n # print \"\\t%s...\" % app,\r\n\r\n # Step 1: find out the app's __path__ Import errors here will\r\n # (and should) bubble up, but a missing __path__ (which is\r\n # legal, but weird) fails silently -- apps that do weird things\r\n # with __path__ might need to roll their own registration.\r\n try:\r\n app_path = import_module(app).__path__\r\n except AttributeError:\r\n #if options.get('verbosity') > 1:\r\n # print \"cannot inspect app\"\r\n continue\r\n\r\n # Step 2: use imp.find_module to find the app's assets.py.\r\n # For some reason imp.find_module raises ImportError if the\r\n # app can't be found but doesn't actually try to import the\r\n # module. So skip this app if its assetse.py doesn't exist\r\n try:\r\n imp.find_module('assets', app_path)\r\n except ImportError:\r\n #if options.get('verbosity') > 1:\r\n # print \"no assets module\"\r\n continue\r\n\r\n # Step 3: import the app's assets file. If this has errors we\r\n # want them to bubble up.\r\n import_module(\"%s.assets\" % app)\r\n #if options.get('verbosity') > 1:\r\n # print \"assets module loaded\"\r\n\r\n # Load additional modules.\r\n for module in getattr(settings, 'ASSETS_MODULES', []):\r\n import_module(\"%s\" % module)\r\n\r\n _ASSETS_LOADED = True", "def compile_static_assets(assets):\n assets.auto_build = True\n assets.debug = False\n\n css = Bundle(\n \"css/*.css\",\n # filters=\"less,cssmin\",\n output=\"gen/avantui.css\",\n # extra={\"rel\": \"stylesheet/less\"},\n )\n\n js = Bundle(\n \"js/*.js\",\n output='gen/avantui.js'\n )\n\n assets.register(\"avantui_css\", css)\n assets.register(\"avantui_js\", js)\n if app.config[\"ENV\"] == \"development\":\n css.build()\n js.build()\n return assets", "def test_import_test_asset(self):\n pass", "def test_create_system_asset(self):\n pass", "def includeme(config):\n\n config.add_translation_dirs('kotti_dashboard:locale')\n config.add_static_view('static-kotti_dashboard', 'kotti_dashboard:static')\n\n config.scan(__name__)", "def asset_adding_panel(self, context):\r\n \r\n AM = context.window_manager.asset_m\r\n layout = self.layout\r\n box = layout.box()\r\n act_obj = context.active_object\r\n obj_list = [obj for obj in context.scene.objects if obj.select]\r\n thumbnails_path = get_directory('icons')\r\n is_subsurf = False\r\n view = context.space_data\r\n fx_settings = view.fx_settings\r\n ssao_settings = fx_settings.ssao\r\n extentions = (\".jpg\", \".jpeg\", \".png\")\r\n thumb_list = [thumb.rsplit(\".\", 1)[0] for thumb in listdir(thumbnails_path) if thumb.endswith(extentions)]\r\n \r\n if len(obj_list) >= 2:\r\n asset_name = AM.group_name\r\n \r\n else:\r\n asset_name = act_obj.name\r\n if act_obj.modifiers:\r\n for mod in act_obj.modifiers:\r\n if mod.type == 'SUBSURF':\r\n is_subsurf = True\r\n \r\n if asset_name not in thumb_list or asset_name in thumb_list and AM.replace_rename == 'replace':\r\n if asset_name in thumb_list and AM.replace_rename == 'replace':\r\n box.label(\"\\\" {} \\\" already exist\".format(asset_name), icon='ERROR')\r\n box.separator()\r\n row = box.row(align=True)\r\n row.prop(AM, \"replace_rename\", text=\" \", expand=True)\r\n if AM.replace_rename == 'rename':\r\n if multi_object:\r\n box.prop(AM, \"group_name\", text=\"\")\r\n else:\r\n ob = context.object\r\n box.prop(ob, \"name\", text=\"\") \r\n \r\n else:\r\n if len(obj_list) >= 2:\r\n row = box.row()\r\n box.label(\"Choose the asset name\")\r\n box.prop(AM, \"group_name\", text = \"\")\r\n \r\n else:\r\n ob = context.object\r\n box.prop(ob, \"name\", text=\"Name\")\r\n \r\n row = box.row(align = True)\r\n row.prop(AM, \"render_type\", text = \" \", expand = True)\r\n row = box.row()\r\n row.label(\"Thumbnail extention:\")\r\n row = box.row(align = True)\r\n row.prop(AM, \"thumb_ext\", expand = True)\r\n \r\n # ---------------------- # \r\n # RENNDER THUMBNAIL #\r\n # ---------------------- #\r\n \r\n if AM.render_type == 'render':\r\n if len(obj_list) == 1 and not is_subsurf:\r\n box.prop(AM, \"add_subsurf\", text = \"Subsurf\")\r\n box.prop(AM, \"add_smooth\", text = \"Smooth\") \r\n \r\n box.prop(AM, \"material_render\", text=\"Addon material\")\r\n \r\n # --------------------- # \r\n # OPENGL THUMBNAIL #\r\n # --------------------- #\r\n \r\n elif AM.render_type == 'opengl':\r\n row = box.row(align=True)\r\n row.operator(\"object.setup_ogl_render\", text=\"Setup OGL render\" if not \"AM_OGL_Camera\" in [obj.name for obj in context.scene.objects] else \"View camera\", icon='ZOOMIN')\r\n row.operator(\"object.remove_ogl_render\", text=\"\", icon='ZOOMOUT')\r\n row = layout.column()\r\n row = box.row(align=True) \r\n row.label(\"Background:\")\r\n row.prop(AM, \"background_alpha\", text=\"\")\r\n row = box.row(align=True)\r\n row.prop(view, \"show_only_render\")\r\n row = box.row(align=True)\r\n row.prop(view, \"use_matcap\")\r\n if view.use_matcap :\r\n row.prop(AM, \"matcap_options\", text=\"\", icon='TRIA_UP' if AM.matcap_options else 'TRIA_DOWN') \r\n if AM.matcap_options:\r\n row = box.row(align=True)\r\n row.template_icon_view(view, \"matcap_icon\")\r\n row = box.row(align=True)\r\n row.prop(fx_settings, \"use_ssao\", text=\"Ambient Occlusion\")\r\n if fx_settings.use_ssao:\r\n row.prop(AM, \"ao_options\", text=\"\", icon='TRIA_UP' if AM.ao_options else 'TRIA_DOWN') \r\n if AM.ao_options:\r\n subcol = box.column(align=True)\r\n subcol.prop(ssao_settings, \"factor\")\r\n subcol.prop(ssao_settings, \"distance_max\")\r\n subcol.prop(ssao_settings, \"attenuation\")\r\n subcol.prop(ssao_settings, \"samples\")\r\n subcol.prop(ssao_settings, \"color\")\r\n \r\n # -------------------- # \r\n # IMAGE THUMBNAIL #\r\n # -------------------- #\r\n \r\n elif AM.render_type == 'image':\r\n row = box.row(align=True)\r\n row.prop(AM, \"image_type\", text=\" \", expand=True)\r\n if AM.image_type == 'disk':\r\n box.label(\"Choose your thumbnail\")\r\n box.prop(AM, \"custom_thumbnail_path\", text=\"\")\r\n else:\r\n box.prop_search(AM, \"render_name\", bpy.data, \"images\", text=\"\") \r\n \r\n row = box.row(align=True)\r\n if len(obj_list) == 1:\r\n if (asset_name not in thumb_list or AM.replace_rename == 'replace') and (AM.render_type in ['opengl', 'render'] or AM.render_type == 'image' and (AM.image_type == 'disk' and AM.custom_thumbnail_path or AM.image_type == 'rendered' and AM.render_name)):\r\n row.operator(\"object.add_asset_in_library\", text=\"OK\", icon='FILE_TICK') \r\n else:\r\n if AM.group_name and (asset_name not in thumb_list or AM.replace_rename == 'replace') and (AM.render_type in ['opengl', 'render'] or AM.render_type == 'image' and (AM.image_type == 'disk' and AM.custom_thumbnail_path or AM.image_type == 'rendered' and AM.render_name)):\r\n \r\n row.operator(\"object.add_asset_in_library\", text=\"OK\", icon='FILE_TICK') \r\n row.operator(\"object.cancel_panel_choise\", text=\"Cancel\", icon='X')\r\n \r\n else:\r\n box.label(\"\\\" {} \\\" already exist\".format(asset_name), icon='ERROR')\r\n box.separator()\r\n row = box.row(align=True)\r\n row.prop(AM, \"replace_rename\", text=\" \", expand=True)\r\n if AM.replace_rename == 'rename':\r\n if len(obj_list) >= 2:\r\n box.prop(AM, \"group_name\", text=\"\")\r\n else:\r\n ob = context.object\r\n box.prop(ob, \"name\", text=\"\")\r\n row = box.row()\r\n row.operator(\"object.cancel_panel_choise\", text=\"Cancel\", icon='X')", "def publish_asset(\n self,\n *,\n asset_id: str,\n asset_manifest_path: str,\n asset_selector: str,\n asset_type: \"AssetType\",\n ) -> None:\n ...", "def asset_tag(request, key, **kwargs):\n theme = request.theme\n asset = theme.stacked_assets[key]\n settings = request.registry.settings\n should_compile = asbool(settings.get('pyramid_frontend.compile'))\n\n if should_compile:\n filename = theme.compiled_asset_path(key)\n url_path = '/compiled/' + theme.key + '/' + filename\n else:\n url_path = asset.url_path\n\n return literal(asset.tag(theme, url_path, production=should_compile,\n **kwargs))", "def test_import_software_asset(self):\n pass", "def assets_library_url(request):\n return {\n \"PATTERN_LIBRARY_URL\": settings.PATTERN_LIBRARY_URL,\n }", "def _add_static_files(self, req):\n add_script(req, self._get_jqplot('jquery.jqplot'))\n add_stylesheet(req, 'common/js/jqPlot/jquery.jqplot.css')\n # excanvas is needed for IE8 support\n add_script(req, self._get_jqplot('excanvas.min'))\n add_script(req, self._get_jqplot('plugins/jqplot.dateAxisRenderer'))\n add_script(req, self._get_jqplot('plugins/jqplot.highlighter'))\n add_script(req, self._get_jqplot('plugins/jqplot.canvasTextRenderer'))\n add_script(req, self._get_jqplot('plugins/jqplot.canvasAxisTickRenderer'))\n add_script(req, self._get_jqplot('plugins/jqplot.canvasAxisLabelRenderer'))\n add_script(req, self._get_jqplot('plugins/jqplot.enhancedLegendRenderer'))", "def custom_asset(parser, token):\n \n library = global_asset_library()\n \n try:\n parts = token.split_contents()\n asset_type = None\n \n if len(parts) == 2:\n tag_name, asset_path = parts\n elif len(parts) == 3:\n tag_name, asset_path, asset_type = parts\n else:\n raise ValueError\n except ValueError:\n raise template.TemplateSyntaxError(\"%r tag invalid arguments\" % token.contents.split())\n \n for s in [ '\"', '\"' ]:\n if (asset_path.startswith(s) and asset_path.endswith(s)):\n asset_path = asset_path[len(s):-len(s)]\n break\n \n if asset_type is None:\n if asset_path in ASSET_TYPES:\n asset_type = None\n else:\n asset_type = asset_path[asset_path.rfind('.') + 1:]\n \n return CustomAssetNode(asset_path, asset_type, library)", "def register_dcc_resource_path(resources_path):\n\n pass", "def _create_assets(self):\n\n assets = Environment(self.app)\n # jQuery is served as a standalone file\n jquery = Bundle('js/jquery-*.min.js', output='gen/jquery.min.js')\n # JavaScript is combined into one file and minified\n js_all = Bundle('js/js_all/*.js',\n filters='jsmin',\n output='gen/app.min.js')\n # SCSS (Sassy CSS) is compiled to CSS\n scss_all = Bundle('scss/app.scss',\n filters='libsass',\n output='gen/app.css')\n assets.register('jquery', jquery)\n assets.register('js_all', js_all)\n assets.register('scss_all', scss_all)\n return assets", "def deploy_static_media(env=None, asset_version='', quick=False, haus_vars={}):\n print green('Deploying static media {}'.format('__quick__' if quick else ''))\n collectstatic(no_input=True, skip_admin=quick)", "def register_module():\n\n namespaced_handlers = [(ForceResponseHandler.URL, ForceResponseHandler)]\n return custom_modules.Module(\n 'FakeVisualizations', 'Provide visualizations requiring simple, '\n 'paginated, and multiple data streams for testing.',\n [], namespaced_handlers, register_on_enable, None)", "def _add_req(request, group, reqtype, unique_id, filename=None, block=None):\n # prep the dicts for this request/asset_type/group combination\n _init_requested_assets(request)\n requirements = requested_assets[request][reqtype][group]\n\n # only include each requirement once\n if unique_id and unique_id in requested_assets_unique[request]:\n return \"\"\n requested_assets_unique[request][unique_id] = True\n\n # build the appropriate request object\n # TODO: only js/css allowed right now\n if reqtype == \"js\":\n if filename:\n req = JSFile(filename)\n else:\n req = JSBlock(block)\n\n elif reqtype == \"css\":\n if filename:\n req = CSSFile(filename)\n else:\n req = CSSBlock(block)\n\n if request.is_ajax() and req.type == \"block\":\n return req.render()\n else:\n # build the token that gets embedded in the raw html\n token = ASSET_DEFS[reqtype]['token']\n token = token.replace(\"<GROUP>\", group)\n token = token.replace(\"<INDEX>\", str(len(requirements)))\n\n # finally, add the asset\n requirements.append( req )\n\n return token", "def test_replace_software_asset_for_software_component(self):\n pass", "def test_create_software_asset_bundle_from_system_module(self):\n pass", "def add_asset(self, asset, replace=False):\n assert replace or asset.short_name() not in self._assets, (\n f'Attempting to add duplicate Asset: {asset.short_name()}')\n self._assets[asset.short_name()] = asset\n return self", "def _load_asset_definitions(self, asset_mod: types.ModuleType) -> None:\n self.assets = []\n for module_item_str in dir(asset_mod):\n module_item = getattr(asset_mod, module_item_str)\n if module_item.__class__ != type:\n continue\n if not issubclass(module_item, Asset):\n continue\n if not hasattr(module_item, \"name\"):\n continue\n logger.info(f\"loading asset config {module_item}\")\n self.assets.append(module_item)\n # TODO: only add assets that are leaves", "def includeme(config, get_raven=None, panel=None):\n \n # Compose.\n if get_raven is None: #pragma: no cover\n get_raven = get_raven_client\n if panel is None: #pragma: no cover\n panel = raven_js_panel\n \n # Unpack.\n settings = config.registry.settings\n \n # Provide the client as ``request.raven``.\n config.add_request_method(get_raven, 'raven', reify=True)\n \n # Configure the ``raven-js`` panel.\n if hasattr(config, \"add_panel\"):\n # Soft detect if we have pyramid_layout installed\n default_tmpl = 'pyramid_raven:templates/panel.mako'\n panel_tmpl = settings.get('pyramid_raven.panel_tmpl', default_tmpl)\n config.add_panel(panel, 'raven-js', renderer=panel_tmpl)", "def test_update_software_asset_install_script(self):\n pass", "def test_static_package_resource(self):\n resource = StaticResource('pyramid_webpack:jinja2ext.py')\n import pyramid_webpack.jinja2ext\n with resource.open() as i:\n self.assertEqual(i.read(),\n inspect.getsource(pyramid_webpack.jinja2ext))", "def _assets_url(self):\r\n return \"/assets/\" + self._course_key + \"/\"", "def test_enable_extension_registers_static_bundles(self):\n class TestExtension(Extension):\n css_bundles = {\n 'default': {\n 'source_filenames': ['test.css'],\n }\n }\n\n js_bundles = {\n 'default': {\n 'source_filenames': ['test.js'],\n }\n }\n\n pipeline_settings.STYLESHEETS = {}\n pipeline_settings.JAVASCRIPT = {}\n\n extension = self.setup_extension(TestExtension)\n\n self.assertEqual(len(pipeline_settings.STYLESHEETS), 1)\n self.assertEqual(len(pipeline_settings.JAVASCRIPT), 1)\n\n key = '%s-default' % extension.id\n self.assertIn(key, pipeline_settings.STYLESHEETS)\n self.assertIn(key, pipeline_settings.JAVASCRIPT)\n\n css_bundle = pipeline_settings.STYLESHEETS[key]\n js_bundle = pipeline_settings.JAVASCRIPT[key]\n\n self.assertIn('source_filenames', css_bundle)\n self.assertEqual(css_bundle['source_filenames'],\n ['ext/%s/test.css' % extension.id])\n\n self.assertIn('output_filename', css_bundle)\n self.assertEqual(css_bundle['output_filename'],\n 'ext/%s/css/default.min.css' % extension.id)\n\n self.assertIn('source_filenames', js_bundle)\n self.assertEqual(js_bundle['source_filenames'],\n ['ext/%s/test.js' % extension.id])\n\n self.assertIn('output_filename', js_bundle)\n self.assertEqual(js_bundle['output_filename'],\n 'ext/%s/js/default.min.js' % extension.id)", "def AddAssetResourceArg(parser, verb, positional=True):\n name = 'asset' if positional else '--asset'\n return concept_parsers.ConceptParser.ForResource(\n name,\n GetAssetResourceSpec(),\n 'The Asset {}'.format(verb),\n required=True).AddToParser(parser)", "def register_dashboard(self, function):\n\n plugin_file = None\n frame = inspect.stack()[1]\n\n if hasattr(frame, 'filename'):\n # Changed from Python 3.5\n plugin_file = os.path.basename(os.path.normpath(frame.filename))\n else:\n plugin_file = os.path.basename(os.path.normpath(frame[1]))\n\n plugin_name = plugin_file.split(\".\")[0]\n if plugin_name in self.dashboard_handlers:\n L.error(\"Error:\" + plugin_name + \" is already in dashboard\")\n return False\n self.dashboard_handlers[plugin_name] = function\n return True", "def loadAsset(self, *args):\n\n asset = OL.loadAssemblyReference(self.name)\n return asset", "def linkAssets(des, Xrc):\n with open(des, 'r') as f:\n body = f.read()\n f.close()\n with open(des, 'w') as f:\n body = body.replace(\"custom.css\", \"\\\\\" + Xrc[\"gh_repo_name\"] + \"/Assets\" + \"/css\" + \"/custom.css\")\n f.write(body)\n f.close()\n ccc.success(\"linking assets to \" + des)", "def register_loader(key, module):\n register(key, module, loader_dict)", "def include_admin_script(script_path):\n if not absolute_url_re.match(script_path):\n script_path = '%s%s' % (settings.ADMIN_MEDIA_PREFIX, script_path)\n return '<script type=\"text/javascript\" src=\"%s\"></script>' % script_path", "def register(self, path, type='js'):\n if not type in ('js', 'css'):\n raise ValueError('Only js or css types are supported.')\n if type == 'js':\n JS_EXTENSIONS.append(path)\n else:\n CSS_EXTENSIONS.append(path)", "def get_bundle(conf, asset_type, bundle_name):\n \n content_type = 'application/javascript'\n content = []\n \n if asset_type == 'css':\n content_type = 'text/css'\n \n for asset in conf[asset_type][bundle_name]:\n content.append(open(os.path.join(conf['srcDir'], asset_type, asset)).read())\n \n content = ''.join(content)\n \n return '200 OK', content_type, content", "def test_itar_restrict_asset(self):\n pass", "def assets(self, pattern, handler):\n return self.route(GET, pattern, handler)", "def asset(self, asset):\n\n self._asset = asset", "def test_itar_restrict_test_asset(self):\n pass", "def assets_publish(ctx, metadata, brizo, price, service_endpoint, timeout):\n from .api.assets import create\n response = create(metadata,\n secret_store=not brizo,\n price=price,\n service_endpoint=service_endpoint,\n timeout=timeout,\n ocean=ctx.obj['ocean'])\n echo(response)", "def __init__(self, entity_name: str, entity_id: str) -> None:\n self._validate_entity_parameters(entity_name, entity_id)\n self._assets_path = '%s/%s/assets' % (entity_name, entity_id)", "def register_stage(key, module):\n register(key, module, stage_dict)", "def ext_assets(assets: t.List[str]) -> str:\n # XXX: External assets are deprecated, so this function serves them as internal\n # assets\n return gen_assets_url(assets)", "def setup(self, registry):\n logger.info(\"%s %s\", self.dist_name, self.version)\n\n from .frontend import AutoplayFrontend\n registry.add(\"frontend\", AutoplayFrontend)", "def __init__(self, assets):\n self._assets = assets", "def get_asset_path(name):\n return os.path.join(constants.ROOT_DIR, 'assets', name)", "def run(cmd):\n assets.main([cmd, ASSET_NAME] + sys.argv[1:])", "def test_update_software_asset_bundle(self):\n pass", "def assets_handler(request, course_key_string=None, asset_key_string=None):\r\n course_key = CourseKey.from_string(course_key_string)\r\n if not has_course_access(request.user, course_key):\r\n raise PermissionDenied()\r\n\r\n response_format = request.REQUEST.get('format', 'html')\r\n if response_format == 'json' or 'application/json' in request.META.get('HTTP_ACCEPT', 'application/json'):\r\n if request.method == 'GET':\r\n return _assets_json(request, course_key)\r\n else:\r\n asset_key = AssetKey.from_string(asset_key_string) if asset_key_string else None\r\n return _update_asset(request, course_key, asset_key)\r\n elif request.method == 'GET': # assume html\r\n return _asset_index(request, course_key)\r\n else:\r\n return HttpResponseNotFound()", "def third_party_scripts(request):\n return {\n 'ORCHESTRA_THIRD_PARTY_SCRIPTS_TEMPLATE':\n settings.ORCHESTRA_THIRD_PARTY_SCRIPTS_TEMPLATE\n }", "def requireBlock(request, blocktype, content, name, group='default'):\n reqtype = {\n 'script': 'js',\n 'style': 'css',\n }[blocktype]\n\n tag = _add_req(request, group, reqtype, name, block=content )\n return tag", "def add_handlers(web_app, config):\n base_url = web_app.settings['base_url']\n url = ujoin(base_url, config.page_url)\n assets_dir = config.assets_dir\n\n package_file = os.path.join(assets_dir, 'package.json')\n with open(package_file) as fid:\n data = json.load(fid)\n\n config.version = (config.version or data['jupyterlab']['version'] or\n data['version'])\n config.name = config.name or data['jupyterlab']['name']\n\n handlers = [\n (url + r'/?', LabHandler, {\n 'lab_config': config\n }),\n (url + r\"/(.*)\", FileFindHandler, {\n 'path': assets_dir\n }),\n\n ]\n\n # Backward compatibility.\n if 'publicPath' in data['jupyterlab']:\n handlers.append(\n (data['jupyterlab']['publicPath'] + r\"/(.*)\", FileFindHandler, {\n 'path': assets_dir\n })\n )\n\n web_app.add_handlers(\".*$\", handlers)", "def _asset_index(request, course_key):\r\n course_module = modulestore().get_course(course_key)\r\n\r\n return render_to_response('asset_index.html', {\r\n 'context_course': course_module,\r\n 'asset_callback_url': reverse_course_url('assets_handler', course_key)\r\n })", "def test_update_system_asset(self):\n pass", "async def async_setup(hass, config):\n\n url = f'/api/panel_custom/{DOMAIN}/main.js'\n location = hass.config.path(f'custom_components/{DOMAIN}/main.js')\n hass.http.register_static_path(url, location)\n hass.components.frontend.async_register_built_in_panel(\n component_name=\"custom\",\n sidebar_title=\"Z2M\",\n sidebar_icon=\"mdi:zigbee\",\n frontend_url_path=\"z2m-la\",\n config={\n \"_panel_custom\": {\n \"name\": \"z2m-light-admin\",\n \"embed_iframe\": True,\n \"trust_external\": True,\n \"js_url\": url\n }\n },\n require_admin=True,\n )\n return True", "def __init__(self, static_url):\n super(AsyncHeadRenderer, self).__init__(static_url=static_url)\n\n self._anonymous_css = [] # CSS\n self._anonymous_javascript = [] # Javascript code", "def inject_script(widget_id, options):\n\n request = current.request\n s3 = current.response.s3\n\n # Static script\n if s3.debug:\n script = \"/%s/static/scripts/S3/s3.ui.anonymize.js\" % \\\n request.application\n else:\n script = \"/%s/static/scripts/S3/s3.ui.anonymize.min.js\" % \\\n request.application\n scripts = s3.scripts\n if script not in scripts:\n scripts.append(script)\n\n # Widget options\n opts = {}\n if options:\n opts.update(options)\n\n # Widget instantiation\n script = '''$('#%(widget_id)s').anonymize(%(options)s)''' % \\\n {\"widget_id\": widget_id,\n \"options\": json.dumps(opts),\n }\n jquery_ready = s3.jquery_ready\n if script not in jquery_ready:\n jquery_ready.append(script)", "def __init__(self, static_url):\n super(HeadRenderer, self).__init__()\n\n # Directory where are located the static contents of the application\n self.static_url = static_url\n\n self._named_css = {} # CSS code\n self._css_url = {} # CSS URLs\n self._named_javascript = {} # Javascript code\n self._javascript_url = {} # Javascript URLs\n\n self._order = 0 # Memorize the order of the javascript and css", "def add_static_paths(app):\n app.env.book_theme_resources_changed = False\n\n output_static_folder = Path(app.outdir) / \"_static\"\n theme_static_files = resources.contents(theme_static)\n\n if (\n app.config.html_theme_options.get(\"theme_dev_mode\", False)\n and output_static_folder.exists()\n ):\n # during development, the JS/CSS may change, if this is the case,\n # we want to remove the old files and ensure that the new files are loaded\n for path in output_static_folder.glob(\"sphinx-book-theme*\"):\n if path.name not in theme_static_files:\n app.env.book_theme_resources_changed = True\n path.unlink()\n # note sphinx treats theme css different to regular css\n # (it is specified in theme.conf), so we don't directly use app.add_css_file\n for fname in resources.contents(theme_static):\n if fname.endswith(\".css\"):\n if not (output_static_folder / fname).exists():\n (output_static_folder / fname).write_bytes(\n resources.read_binary(theme_static, fname)\n )\n app.env.book_theme_resources_changed = True\n\n # add javascript\n for fname in resources.contents(theme_static):\n if fname.endswith(\".js\"):\n app.add_js_file(fname)", "def resource_js(self):\n \n portal_url = getSite().absolute_url()\n \n return \"\"\"\n <script type=\"text/javascript\" src=\"%s/++resource++swfobject.js\"></script>\n <script type=\"text/javascript\" src=\"%s/++resource++audio_player.js\"></script> \n <script type=\"text/javascript\"> \n AudioPlayer.setup(\"%s/++resource++audio_player.swf\", { \n width: 300\n }); \n </script>\n \"\"\" % (portal_url, portal_url, portal_url)", "def render_asset(self, name):\n result = \"\"\n if self.has_asset(name):\n asset = self.get_asset(name)\n if asset.files:\n for f in asset.files:\n result += f.render_include() + \"\\r\\n\"\n return result", "def test_get_test_asset(self):\n pass", "def register_path(config, spec, discovery=False, indexes=[], request_type=None):\n\n package_name, path = resolve_asset_spec(spec)\n if package_name is not None:\n path = pkg_resources.resource_filename(package_name, path)\n else:\n path = caller_path(path)\n\n if package_name is None: # absolute filename\n package = config.package\n else:\n __import__(package_name)\n package = sys.modules[package_name]\n context = ConfigurationMachine()\n context.registry = config.registry\n context.autocommit = False\n context.package = package\n context.route_prefix = getattr(config, 'route_prefix', None)\n\n directive = skins(context, path, discovery, request_type)\n for index in indexes:\n directive.view(config, index)\n\n for action in directive():\n config.action(*action)", "def add_url_asset(self, value):\n return self.add_asset('URL', value)", "def resourcePath(relative):\r\n try:\r\n # PyInstaller creates a temp folder and stores path in _MEIPASS\r\n base_path = sys._MEIPASS\r\n except Exception:\r\n base_path = os.path.abspath(os.path.join(os.path.dirname(sys.modules[__name__].__file__), 'assets'))\r\n\r\n return os.path.join(base_path, relative)", "def includeme(config):\n config.add_subscriber(add_renderer_globals, BeforeRender)\n config.add_subscriber(add_localizer, NewRequest)\n config.add_subscriber(add_csrf_validation, NewRequest)\n config.add_subscriber(add_resources, NewRequest)", "def post_asset_update(lock, course):\r\n upload_date = datetime(2013, 6, 1, 10, 30, tzinfo=UTC)\r\n asset_location = course.id.make_asset_key('asset', 'sample_static.txt')\r\n url = reverse_course_url('assets_handler', course.id, kwargs={'asset_key_string': unicode(asset_location)})\r\n\r\n resp = self.client.post(\r\n url,\r\n json.dumps(assets._get_asset_json(\"sample_static.txt\", upload_date, asset_location, None, lock)),\r\n \"application/json\"\r\n )\r\n self.assertEqual(resp.status_code, 201)\r\n return json.loads(resp.content)", "def builder_inited(app):\n if app.config.offline_skin_js_path is not None:\n app.add_javascript(path.basename(app.config.offline_skin_js_path))\n else:\n app.add_javascript(ONLINE_SKIN_JS)\n if app.config.offline_wavedrom_js_path is not None:\n app.add_javascript(path.basename(app.config.offline_wavedrom_js_path))\n else:\n app.add_javascript(ONLINE_WAVEDROM_JS)", "def add_renderer_globals(event):\n request = event.get('request')\n # add globals for i18n\n event['_'] = request.translate\n event['localizer'] = request.localizer\n # add application globals from the config file\n settings = request.registry.settings\n event['brand_name'] = settings['anuket.brand_name']", "def add_static(ext):\n ext = ext.lower()\n\n compiler = StaticCompiler(ext)\n file_list = compiler.get_staticfiles_list()\n\n return render_to_string(\n \"mub/context_%s.html\" % ext,\n {\n \"items\": file_list,\n \"STATIC_URL\": settings.STATIC_URL,\n \"IS_MINIFIED\": compiler.is_minified\n }\n )", "def register(self, module):\n tagvalues = \"\\n\".join([\"%s: %s\" % (attr, str(getattr(module, attr))) for attr in dir(module) if attr in ['create', 'menu', 'name', 'label'] ])\n # tagvalues = \"\\n\".join([\"%s\" % (attr) for attr in dir(module) if attr not in ['urls'] ])\n logger.debug(\"module {} registered.\\ndir : {}\".format(module.label, tagvalues ))\n self._registry[module.label] = module\n self._modules[module.name] = module\n pass", "def add_custom_asset(self, custom_asset: CustomAsset) -> str:\n self._raise_if_custom_asset_exists(custom_asset)\n with GlobalDBHandler().conn.write_ctx() as global_db_write_cursor:\n global_db_write_cursor.execute(\n 'INSERT INTO assets(identifier, name, type) VALUES (?, ?, ?)',\n (\n custom_asset.identifier,\n custom_asset.name,\n AssetType.CUSTOM_ASSET.serialize_for_db(),\n ),\n )\n global_db_write_cursor.execute(\n 'INSERT INTO custom_assets(identifier, type, notes) VALUES(?, ?, ?)',\n custom_asset.serialize_for_db(),\n )\n with self.db.user_write() as db_write_cursor:\n self.db.add_asset_identifiers(db_write_cursor, [custom_asset.identifier])\n return custom_asset.identifier", "def ext_css_bundle(context, extension, name):\n return _render_css_bundle(context, extension, name)", "def add_assets(char_code, name, capital, interest):\n try:\n capital = float(capital)\n interest = float(interest)\n except:\n redirect(url_for(\"page_not_found\"))\n if name in app.bank:\n abort(403)\n app.bank[name] = Asset(name, char_code, capital, interest)\n return f\"Asset '{name}' was successfully added\", 200", "def test_register_dynamic_plugin(self):\n pass", "def _register_routes(self):\n dashboard = self\n\n @dashboard.app.after_request\n def prevent_caching(response):\n if 'Cache-Control' not in response.headers:\n response.headers['Cache-Control'] = 'no-store'\n return response\n\n @dashboard.app.context_processor\n def injections():\n session.setdefault('enabled_modules',\n [i for i in range(len(self.modules))\n if self.modules[i].enabled])\n return {\n 'APP_NAME': 'signac-dashboard',\n 'APP_VERSION': __version__,\n 'PROJECT_NAME': self.project.config['project'],\n 'PROJECT_DIR': self.project.config['project_dir'],\n 'modules': self.modules,\n 'enabled_modules': session['enabled_modules'],\n 'module_assets': self._module_assets\n }\n\n # Add pagination support from http://flask.pocoo.org/snippets/44/\n @dashboard.app.template_global()\n def url_for_other_page(page):\n args = request.args.copy()\n args['page'] = page\n return url_for(request.endpoint, **args)\n\n @dashboard.app.template_global()\n def modify_query(**new_values):\n args = request.args.copy()\n for key, value in new_values.items():\n args[key] = value\n return '{}?{}'.format(request.path, url_encode(args))\n\n @dashboard.app.errorhandler(404)\n def page_not_found(error):\n return self._render_error(str(error))\n\n self.add_url('views.home', ['/'])\n self.add_url('views.settings', ['/settings'])\n self.add_url('views.search', ['/search'])\n self.add_url('views.jobs_list', ['/jobs/'])\n self.add_url('views.show_job', ['/jobs/<jobid>'])\n self.add_url('views.get_file', ['/jobs/<jobid>/file/<path:filename>'])\n self.add_url('views.change_modules', ['/modules'], methods=['POST'])", "def test_register_dynamic_plugin1(self):\n pass", "def test_existing_content_asset(self):\n with self.app.test_client() as client:\n response = client.get('/contentassets/logo.png')\n\n self.assert200(response)", "def get_default_javascript():\n return [\"_static/require.js\"]", "def ext_static(context, extension, path):\n return static('ext/%s/%s' % (extension.id, path))", "def get_assets_dir(cls) -> Path:\n return cls._module_base_dir / \"assets\"", "def ext_js_bundle(context, extension, name):\n return _render_js_bundle(context, extension, name)", "def setAsmLocator(self, assetName) : \n abcAssetHero = sd_utils.getAsmLocator(assetName, mode='asset')\n abcShotHero = sd_utils.getAsmLocator(assetName, mode='shot')\n\n if os.path.exists(abcAssetHero) : \n assetText = str(abcAssetHero)\n \n else : \n assetText = '[File not found] %s' % str(abcAssetHero)\n\n if os.path.exists(abcShotHero) : \n shotText = str(abcShotHero)\n\n else : \n shotText = '[File not found] %s' % str(abcShotHero)\n \n self.ui.asset_lineEdit.setText(assetText)\n self.ui.shot_lineEdit.setText(shotText)", "def init_extra_module(self, component_instance, function, mw_data):\n\n component_name = component_instance.blender_obj.name\n parent_name = component_instance.robot_parent.blender_obj.name\n # Check if the name of the poster has been given in mw_data\n try:\n # It should be the 4th parameter\n poster_name = mw_data[3]\n except IndexError as detail:\n # Compose the name of the poster, based on the parent and module names\n poster_name = 'viman_{0}_{1}'.format(parent_name, component_name)\n\n poster_id = init_viman_poster(self, component_instance, poster_name)\n if poster_id != None:\n logger.info(\"Pocolibs created poster of type viman\")\n component_instance.output_functions.append(function)\n # Store the name of the port\n self._poster_dict[component_name] = poster_id", "def test_add_category_to_asset(self):\n pass", "def dashboard():\r\n return render_template('{}/dashboard.html'.format(MODULE_DIR))" ]
[ "0.6323401", "0.6234134", "0.5986547", "0.57688546", "0.57355887", "0.57000977", "0.5596467", "0.5571452", "0.55544966", "0.55511576", "0.5550094", "0.54957074", "0.548881", "0.54596376", "0.5429053", "0.537195", "0.5304316", "0.52905613", "0.52882594", "0.5239327", "0.5237541", "0.52305305", "0.5193852", "0.5167999", "0.5150966", "0.5135034", "0.5114002", "0.5109932", "0.5101455", "0.5091949", "0.5059941", "0.50462294", "0.5026475", "0.50046545", "0.5000261", "0.4974199", "0.49625438", "0.49458936", "0.49430773", "0.4930109", "0.49166137", "0.48858058", "0.48673597", "0.4839493", "0.48322377", "0.48220688", "0.48194554", "0.48075587", "0.48015222", "0.4798843", "0.47903097", "0.47864115", "0.47470292", "0.47253907", "0.47036675", "0.46998948", "0.46986997", "0.4686945", "0.46763352", "0.4673215", "0.4671748", "0.46648857", "0.4664506", "0.46464956", "0.4644446", "0.46403304", "0.46392718", "0.46244273", "0.46136436", "0.46112412", "0.46078953", "0.46077755", "0.45985916", "0.4596867", "0.45909613", "0.4587392", "0.45851284", "0.45823154", "0.45818406", "0.45789894", "0.45742118", "0.45599222", "0.45515615", "0.45456174", "0.45436233", "0.45384946", "0.4527642", "0.45273197", "0.45216238", "0.45181635", "0.4513822", "0.45067808", "0.45054078", "0.44983855", "0.4493657", "0.44836006", "0.44805855", "0.44742975", "0.44659352", "0.44561535" ]
0.70824754
0
Prepare this dashboard instance to run.
def _prepare(self): # Set configuration defaults and save to the project document self.config.setdefault('PAGINATION', True) self.config.setdefault('PER_PAGE', 25) # Create and configure the Flask application self.app = self._create_app(self.config) # Add assets and routes self.assets = self._create_assets() self._register_routes() # Add module assets and routes self._module_assets = [] for module in self.modules: try: module.register(self) except Exception as e: logger.error('Error while registering {} module: {}'.format( module.name, e)) logger.error('Removing module {} from dashboard.'.format( module.name)) self.modules.remove(module) # Clear dashboard and project caches. self.update_cache()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n # Wipe the db\n self.wipe_db()\n\n # Set some global things\n try:\n dashboard_configuration = DashboardConfiguration(type=\"default\")\n dashboard_configuration.save()\n except IntegrityError:\n dashboard_configuration = DashboardConfiguration.objects.filter(type=\"default\").first()\n\n # Add all players from dataset\n group = self.add_players(dashboard_configuration)\n\n # Add all games from the dataset\n self.add_games()\n\n # Create the games played for this group\n self.add_game_played(group)", "def _initialise_run(self) -> None:", "def prepare(self):\n pass", "def prepare(self):\n pass", "def prepare(self):\n pass", "def prepare(self):\n self.__plot_data = [[], []]\n self.final_residual = None\n self.time_value = None\n self.clear_folder_content(self.run_path())\n self.copy_folder_content(self.config_path('system'), self.run_path('system'), overwrite=True)\n self.copy_folder_content(self.config_path('constant'), self.run_path('constant'), overwrite=True)\n self.copy_folder_content(self.config_path('0'), self.run_path('0'), overwrite=True)\n return True", "def prepare(self, config, **kwargs):\n pass", "def initialise(self):\n self.set_up()", "async def prepare(self):\n pass", "def setUp(self):\n\n self.logger_stats = DataScreen()", "def __init__(self, *args, **kwargs):\n super(TurntableCrawler, self).__init__(*args, **kwargs)\n\n parts = self.var(\"name\").split(\"_\")\n\n # Add the job var once job names on disk match job code names in shotgun\n self.setVar('assetName', parts[1], True)\n self.setVar('step', parts[2], True)\n self.setVar('variant', parts[3], True)\n self.setVar('pass', parts[4], True)\n self.setVar('renderName', '{}-{}-{}'.format(\n self.var('assetName'),\n self.var('variant'),\n self.var('pass')\n ),\n True\n )", "def _preparation_workflow(self):\n self._validate_environment()\n self._validate_parameters()\n self._update_verbosity()", "def prepare(self):\n if self.opts['verbose']:\n print(\"Preparing dataset (one-time operation)...\")\n # Create paths files and load them back in\n self._build_ID_sets()\n self._create_ID_files()\n self._load_ID_files()\n if self.opts['verbose']:\n print(\"... done with preparing the dataset.\")", "def prepare(self):\n self.parse_template()\n self.build_argparser()\n self.parse_arguments()\n self.render_template()\n self.update_relation()", "def prepare(self):", "def bootstrap(self):\n None", "def __init__(self, *args, **kwargs):\n super(ShotRenderCrawler, self).__init__(*args, **kwargs)\n\n parts = self.var(\"name\").split(\"_\")\n locationParts = parts[0].split(\"-\")\n\n # Add the job var once job names on disk match job code names in shotgun\n self.setVar('seq', locationParts[1], True)\n self.setVar('shot', parts[0], True)\n self.setVar('step', parts[1], True)\n self.setVar('pass', parts[2], True)\n self.setVar('renderName', '{}-{}'.format(\n self.var('step'),\n self.var('pass')\n ),\n True\n )", "def __init__(self):\n self.args = self._prepare_args(locals())\n self.requires_full_dataset_in_memory = False", "def prepare(cls):", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.start_agents_once = False\n self.start_servers_once = False\n self.setup_start_agents = False\n self.setup_start_servers = False", "async def init(self):\n self.base_tamplates = {}\n self.preparing_task = None\n self.app = aioweb.Application()\n self.runner = aioweb.AppRunner(self.app)", "def prepare(self):\n return self", "def __init__(self, config):\n super().__init__(config)\n\n # Prepare the timer.\n self.timer = 0\n\n # Set the current player index.\n self.current_player_index = 0\n # If we are the client, the server goes first.\n for i in range(len(sys.argv)):\n if sys.argv[i] == \"--client\":\n self.current_player_index = 1\n\n # Prepare the phase counter.\n self.__current_phase = Game.PHASE_PREPARE\n # Prepare the shot location store.\n self.__current_fire_location = None\n self.__current_fire_effect = None", "def setup(self):\n self.log.debug('upm - in upm setup()')\n # Add resource setup code here", "def __setup(self):\n\n backupFolder = self.config['destination']\n self.__createBackupFolder(backupFolder)\n\n # create the project based backup folder\n today = date.today()\n\n if 'projects' in self.config:\n for project in self.config['projects'].iterkeys():\n timestamp = datetime.now().strftime('%d-%H-%M-%S')\n backupDestination = os.path.join(backupFolder, project, str(today.year), today.strftime('%m'), timestamp)\n self.__createBackupFolder(backupDestination)\n self.config['projects'][project]['destination'] = backupDestination", "def _prepare(self):\n logging.warning('-> preparing EMPTY experiments...')", "def setup_class(cls):\n self = cls()\n self.remove_files_created_during_previous_runs()\n if not os.path.exists(self.plaintext_directory):\n os.makedirs(self.plaintext_directory)\n\n if not os.path.exists(self.training_path):\n os.makedirs(self.training_path)\n\n if not os.path.exists(self.heldout_path):\n os.makedirs(self.heldout_path)\n\n prepare_data(self.paths)", "def startUp(self):\n pass", "def pre_start(self):\n self.make_runpath_dirs()", "def setup_class(cls):\n cls.runner = CliRunner()\n cls.agent_name = \"agent_1\"\n cls.cwd = os.getcwd()\n cls.t = tempfile.mkdtemp()\n os.chdir(cls.t)", "def __init__(self):\n self.version = '1.0'\n self.link = 'dashboard'\n self.database_name = 'sqlite:///flask_monitoringdashboard.db'\n self.test_dir = None\n self.username = 'admin'\n self.password = 'admin'\n self.guest_username = 'guest'\n self.guest_password = ['guest_password']\n self.outlier_detection_constant = 2.5\n self.colors = {}\n self.security_token = 'cc83733cb0af8b884ff6577086b87909'\n self.outliers_enabled = True\n\n # define a custom function to retrieve the session_id or username\n self.get_group_by = None", "def initialize(self):\n \n casalog.origin(\"ParallelDataHelper\")\n\n # self._arg is populated inside ParallelTaskHelper._init_()\n self._arg['vis'] = os.path.abspath(self._arg['vis'])\n # MPI setting\n if self._mpi_cluster:\n self._cluster.start_services()\n \n if (self._arg['outputvis'] != \"\"):\n self._arg['outputvis'] = os.path.abspath(self._arg['outputvis']) \n\n outputPath, self.outputBase = os.path.split(self._arg['outputvis'])\n try:\n if self.outputBase[-1] == '.':\n self.outputBase = self.outputBase[:self.outputBase.rindex('.')]\n except ValueError:\n # outputBase must not have a trailing .\n pass\n\n if self.outputBase == '.' or self.outputBase == './':\n raise ValueError, 'Error dealing with outputvis'\n \n # The subMS are first saved inside a temporary directory\n self.dataDir = outputPath + '/' + self.outputBase+'.data'\n if os.path.exists(self.dataDir): \n shutil.rmtree(self.dataDir)\n\n os.mkdir(self.dataDir)", "def setup(self, args={}):\n\n return Status.RUN", "def setup(self) -> None:\n self.setup_logging()\n self.setup_plugins()\n self.post_setup()", "def _pre_setup(self):\n apps.clear_cache()\n call_command('migrate', interactive=False, verbosity=0)\n call_command('loaddata', 'initial_data', verbosity=0)\n super(DatatableViewTestCase, self)._pre_setup()", "def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.setup_start_agents = False", "def work(self):\n self.config_file = self.args.config\n self.init_config()\n self.init_db()\n\n self.kickoff()", "def prepare_process(self):\n max_wallclock_seconds = self.ctx.inputs.metadata.options.get('max_wallclock_seconds', None)\n\n if max_wallclock_seconds is not None and 'time_limit' not in self.ctx.inputs.parameters['INPUT_XSPECTRA']:\n self.set_max_seconds(max_wallclock_seconds)\n\n if self.ctx.restart_calc:\n self.ctx.inputs.parameters['INPUT_XSPECTRA']['restart_mode'] = 'restart'\n self.ctx.inputs.parent_folder = self.ctx.restart_calc.outputs.remote_folder", "def prepare_acquisition(self):\n self.lib.PrepareAcquisition()", "def setup(cls):\n cls.runner = CliRunner()\n cls.agent_name = \"myagent\"\n cls.cwd = os.getcwd()\n cls.t = tempfile.mkdtemp()\n # copy the 'packages' directory in the parent of the agent folder.\n shutil.copytree(Path(CUR_PATH, \"..\", \"packages\"), Path(cls.t, \"packages\"))\n\n os.chdir(cls.t)\n result = cls.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"init\", \"--author\", AUTHOR],\n standalone_mode=False,\n )\n assert result.exit_code == 0\n result = cls.runner.invoke(\n cli,\n [*CLI_LOG_OPTION, \"create\", \"--local\", cls.agent_name],\n standalone_mode=False,\n )\n assert result.exit_code == 0\n os.chdir(cls.agent_name)\n # add connection first time", "def initialise(self):\n self.sc.init.exec_action(self.variables)", "def __init__(self):\n\n super().__init__()\n self._run_flag = True", "def begin(self):\n\n env = self.context.lookup(\"/environment\")\n\n self._test_results_dir = env[\"output_directory\"]\n self._starttime = env[\"starttime\"]\n self._runid = env[\"runid\"]\n\n self._result_filename = os.path.join(self._test_results_dir, \"testrun_results.jsos\")\n self._summary_filename = os.path.join(self._test_results_dir, \"testrun_summary.json\")\n self._import_errors_filename = os.path.join(self._test_results_dir, \"import_errors.jsos\")\n\n return", "def __init__(self):\n self.collectorName = \"zenprocess\"\n self.defaultRRDCreateCommand = None\n self.configCycleInterval = 20 # minutes\n\n #will be updated based on Performance Config property of same name\n self.processCycleInterval = 3 * 60\n\n #will be filled in based on buildOptions\n self.options = None\n\n # the configurationService attribute is the fully qualified class-name\n # of our configuration service that runs within ZenHub\n self.configurationService = 'Products.ZenHub.services.ProcessConfig'", "def __init__(self):\n super(PreProcess, self).__init__()", "def begin(self):\n os.mkdir(self.meta)\n\n self.logname = os.path.join(self.rundir, self.meta, 'log')\n self.logfile = open(self.logname, 'a')\n if settings.verbosity >= 3:\n self.logfile = Tee(self.logfile)\n\n if self.test.setup:\n self.setup_script = self._make_setup_script()\n self.steps_script = self._make_steps_script()\n if self.test.teardown:\n self.teardown_script = self._make_teardown_script()", "def setup_script(self, *args, **kwargs):\n pass", "def __init__(self, *args):\n super().__init__(*args)\n\n self.output_dir = os.path.join(self.config.results_dir, \"cowinner\")\n self.merged_dir = os.path.join(self.output_dir, \"merged\")", "def __post_init__(self) -> None:\n self.language = self.pipeline.language or self.hass.config.language\n\n # stt -> intent -> tts\n if PIPELINE_STAGE_ORDER.index(self.end_stage) < PIPELINE_STAGE_ORDER.index(\n self.start_stage\n ):\n raise InvalidPipelineStagesError(self.start_stage, self.end_stage)\n\n pipeline_data: PipelineData = self.hass.data[DOMAIN]\n if self.pipeline.id not in pipeline_data.pipeline_runs:\n pipeline_data.pipeline_runs[self.pipeline.id] = LimitedSizeDict(\n size_limit=STORED_PIPELINE_RUNS\n )\n pipeline_data.pipeline_runs[self.pipeline.id][self.id] = PipelineRunDebug()", "def setup_method(self):\n MANAGER._tables = {}\n MANAGER._views = {}", "def init(self):\n\n\t\tstatus, param = self.execute(self.mission, 'on_init', self.kingdom)\n\n\t\treturn status", "def __init__(self):\n rospy.logdebug(\"Start ParrotDroneEnv INIT...\")\n\n #Spawn Parrot AR Drone through launch file\n self.ros_pkg_name=\"drone_construct\"\n self.launch_file_name=\"put_drone_in_world.launch\"\n \n super(ParrotDroneEnv, self).__init__(\n ros_pkg_name=self.ros_pkg_name,\n launch_file=self.launch_file_name,\n start_init_physics_parameters=True,\n reset_world_or_sim='WORLD')\n\n rospy.logdebug(\"Finished ParrotDroneEnv INIT...\")", "def setup(self):\n in_dataset, out_dataset = self.get_datasets()\n \n out_dataset[0].create_dataset(in_dataset[0])\n\n in_pData, out_pData = self.get_plugin_datasets()\n\n in_pData[0].plugin_data_setup( '',)\n out_pData[0].plugin_data_setup( 'PROJECTION','multiple')", "def prepare(self):\n # The default implementation takes care of local compilation of the client, if needed. Be sure to call it.\n client.prepare(self)\n # TODO: Add any additional preparations if needed. You're not likely to need those, though.", "def init_run(self):\n raise NotImplementedError", "def prepare_UI(self):", "def setup(self):\n in_dataset, out_dataset = self.get_datasets()\n \n out_dataset[0].create_dataset(in_dataset[0])\n\n in_pData, out_pData = self.get_plugin_datasets()\n\n in_pData[0].plugin_data_setup( 'PROJECTION',multiple)\n out_pData[0].plugin_data_setup( 'PROJECTION',multiple)", "def setup( self ):", "def prepareController(self):\n pass", "def preRunSetup(self):\n self.logDesc(\"Pre Run Setup\")\n #Check for current logged in user\n self.verifyCurrentUser(userRole='Administrator', loginAsUser=True)\n #Creates Sample Template if not exists\n self.createSampleTemplate(templateName=self.templateName)", "def _initialize_runners_startup(self):\n if self.command_group.is_cmd0_runner():\n self._initialize_runner(self.command_group.cmd0)\n if self.command_group.is_cmd1_runner():\n self._initialize_runner(self.command_group.cmd1)\n if self.command_group.is_cmd2_runner():\n self._initialize_runner(self.command_group.cmd2)", "def setUp(self):\n\n # Create the data pipe.\n self.interpreter.pipe.create('dasha', 'mf')\n\n # Create a temporary directory for Dasha outputs.\n ds.tmpdir = mkdtemp()", "def setup(self):\n print(\"Inside setup\")\n self.fetch_from_db()\n # Don't assign to the function output. Assing it to the function object so that whenever we\n # do some changes to layout, it will reflect without server restarting\n app.layout = self.get_root_layout\n return app.server", "def __init__(self, *args, **kwargs):\n super(NetworkStat, self).__init__(*args, **kwargs)\n Clock.schedule_interval(self.init_ui, 1)", "def main(self):\n\n def _run(args):\n kwargs = vars(args)\n if kwargs.get('host', None) is not None:\n self.config['HOST'] = kwargs.pop('host')\n if kwargs.get('port', None) is not None:\n self.config['PORT'] = kwargs.pop('port')\n self.config['PROFILE'] = kwargs.pop('profile')\n self.config['DEBUG'] = kwargs.pop('debug')\n self.run()\n\n parser = argparse.ArgumentParser(\n description=\"signac-dashboard is a web-based data visualization \"\n \"and analysis tool, part of the signac framework.\")\n parser.add_argument(\n '--debug',\n action='store_true',\n help=\"Show traceback on error for debugging.\")\n parser.add_argument(\n '--version',\n action='store_true',\n help=\"Display the version number and exit.\")\n subparsers = parser.add_subparsers()\n\n parser_run = subparsers.add_parser('run')\n parser_run.add_argument(\n '-p', '--profile',\n action='store_true',\n help='Enable flask performance profiling.')\n parser_run.add_argument(\n '-d', '--debug',\n action='store_true',\n help='Enable flask debug mode.')\n parser_run.add_argument(\n '--host', type=str,\n help='Host (binding address). Default: localhost')\n parser_run.add_argument(\n '--port', type=int,\n help='Port to listen on. Default: 8888')\n parser_run.set_defaults(func=_run)\n\n # This is a hack, as argparse itself does not\n # allow to parse only --version without any\n # of the other required arguments.\n if '--version' in sys.argv:\n print('signac-dashboard', __version__)\n sys.exit(0)\n\n args = parser.parse_args()\n\n if args.debug:\n logger.setLevel(logging.DEBUG)\n\n if not hasattr(args, 'func'):\n parser.print_usage()\n sys.exit(2)\n try:\n self.observer.start()\n args.func(args)\n except RuntimeWarning as warning:\n logger.warning(\"Warning: {}\".format(warning))\n if args.debug:\n raise\n sys.exit(1)\n except Exception as error:\n logger.error('Error: {}'.format(error))\n if args.debug:\n raise\n sys.exit(1)\n finally:\n self.observer.stop()\n self.observer.join()", "def _prepare(self):", "def _prepare(self):", "def _setup(self) -> None:\n\t\treturn", "def trial_prep(self):\n pass", "def _startup():\n from octoprint_dashboard.model import User, Config\n if Config.query.scalar() is None:\n print(\"No config, add config via command 'python -m flask config'\")\n shutdown_server()\n if User.query.filter_by(superadmin=True).count() == 0:\n print(\"No superadmin, add superadmin via command 'python -m flask add_superadmin <username>'\")\n shutdown_server()\n\n scheduler.start() # starts background task scheduler\n zeroconf_browser.start() # starts MDNS service discovery", "def init_with_context(self, context):\n output = cache.get('dashboard.sysinfo', None)\n if not output:\n cpu_usage = psutil.cpu_percent(interval=0, percpu=True)\n memuse = psutil.virtual_memory()._asdict()\n memory_usage_mb = {'total': memuse['total'] / 1024.0 ** 3, 'used': memuse['used'] / 1024.0 ** 3, 'free': memuse['free'] / 1024.0 ** 3,\n 'percent': memuse['percent'],\n 'programs': (memuse['total'] - memuse['available']) / 1024.0 ** 3}\n disk_stats = {'free': get_free_disk_space() / 1024.0 ** 3, 'percent': get_free_disk_space(percent=True)}\n if disk_stats['percent'] <= 100:\n disk_stats['status'] = 'success'\n if disk_stats['percent'] <= 50:\n disk_stats['status'] = 'warning'\n if disk_stats['percent'] <= 20:\n disk_stats['status'] = 'danger'\n for process in processes:\n process.cpu_percent = process.get_cpu_percent(interval=0)\n output = render_to_string(\"core/dashboard/system.html\",\n {'cpu_usage': cpu_usage, 'memory': memory_usage_mb, 'disk': disk_stats, 'processes': processes})\n cache.set('dashboard.sysinfo', output, 5)\n self.pre_content = output", "def setup(self):\n header_print(self.data['intro'])\n header_print(self.data['help'])\n random.shuffle(self.data['draw'])\n random.shuffle(self.data['locations'])\n random.shuffle(self.data['events'])\n random.shuffle(self.data['aces'])\n random.shuffle(self.data['personalities'])\n self.stats = {\n 'round': 0,\n 'powers': {\n 'MOONS': 6,\n 'SUNS': 6,\n 'WAVES': 6,\n 'LEAVES': 6,\n 'WYRMS': 6,\n 'KNOTS': 6,\n },\n 'hand': self.data['draw'][:],\n 'discard': [],\n 'active': [],\n 'opponent': {},\n }", "def __init__(self):\n\n self.config = load_config()\n self.set_env_var()", "def setUp(self):\n setup_testenv()\n self.args = DEFAULT_CLI_ARGS\n self.args['--output'] = ['png']", "def setup(self) -> None:\n pass", "def setup(self) -> None:\n pass", "def setup(self) -> None:\n pass", "def __init__(self):\n self.params = {}\n self.counts = 0\n self.failed = 0\n self.path = os.path.join(os.getcwd(), OUTPUT_DIR)\n \n if not os.path.exists(self.path):\n os.makedirs(self.path)", "def __init__(self):\n\n super().__init__()\n\n self.active = True\n self.driver = Driver.instance()\n self.camera = Camera.instance()\n\n # define directories and file paths\n date_str = datetime.today().strftime(\"%Y-%m-%d-%H-%M-%S\")\n self.log_dir = f\"{const.Storage.DATA}/{date_str}\"\n self.img_dir = f\"{self.log_dir}/img/\"\n self.log_path = f\"{self.log_dir}/log.csv\"\n self.img_extension = \"npy\"\n\n # ensure that the necessary directories exist\n os.mkdir(self.log_dir)\n os.mkdir(self.img_dir)\n assert os.path.isdir(self.log_dir), \"data directory could not be created\"\n assert os.path.isdir(self.img_dir), \"image directory could not be created\"", "def setUp(self):\n percentiles_cube = set_up_percentiles_cube()\n self.plugin_instance = ProbabilitiesFromPercentiles2D(\n percentiles_cube, 'new_name')\n self.reference_cube = percentiles_cube[0]\n self.orography_cube = set_up_threshold_cube()", "def init(self):\n\n self.checkDirectory(self.output_dir,\"output\")\n self.checkDirectory(self.working_dir,\"working\")", "def preRunSetup(self):\n self.logDesc(\"Pre Run Setup\") \n #Check for current logged in user\n self.verifyCurrentUser(userRole='Administrator', loginAsUser=True)", "def stage(self):\n\n # prepare projected land allocation data\n self.prep_projected()\n\n # prepare base land use data\n self.prep_base()\n\n # harmonize grid area between projected and base layer land allocation\n self.harmony()\n\n # apply constraints\n self.set_constraints()\n\n # create kernel density filter if not running multiple jobs\n self.kernel_filter()\n\n # set data for step zero\n self.set_step_zero()", "def start(self):\n self.__init__()\n self.set_n_players()\n self.init_players()\n self.init_territory_selection_phase()\n self.init_troop_deployment_phase()\n # self.game_phase()", "def __init__(self, config):\n self.config = config\n self.status = {\n 'serial':'None',\n 'timestamp':'None',\n 'uptime':'None',\n 'free_disk_space_sdcard':'None',\n 'free_disk_space_stick':'None',\n 'wwan_reception':'None',\n 'log':'None',\n }\n self.collect()", "def prepare(self):\n # This is a no-op; the native subprocess environment is ready-to-use.\n pass", "def prepare(self, context):\n raise NotImplementedError", "def _initialize_metadata(self) -> None:\n\n # Write the pidfile. The SchedulerService will monitor it after a grace period.\n self.write_pid()\n self.write_process_name()\n self.write_fingerprint(ensure_text(self.options_fingerprint))\n self._logger.info(f\"pantsd {VERSION} running with PID: {self.pid}\")\n self.write_socket(self._server.port())", "def __init__(self):\n\t\tappionScript.AppionScript.__init__(self)\n\t\tself.rundata = {}\n\t\t### extra appionLoop functions:\n\t\tself._addDefaultParams()\n\t\tself.setFunctionResultKeys()\n\t\tself._setRunAndParameters()\n\t\t#self.specialCreateOutputDirs()\n\t\tself._initializeDoneDict()\n\t\tself.result_dirs={}", "def __init__(self):\n self._create_options()\n self._create_sections()", "def _setup(self):\n pass", "def _setup(self):\n pass", "def _setup(self):\n pass", "def _setup(self):\n pass", "def _setup(self):\n pass", "def init(self):\n tools.drop_view_if_exists(self._cr, \"runbot_build_stat_sql\")\n self._cr.execute(\n \"\"\" CREATE OR REPLACE VIEW runbot_build_stat_sql AS (\n SELECT\n (stat.id::bigint*(2^32)+bun.id::bigint) AS id,\n stat.id AS stat_id,\n stat.key AS key,\n stat.value AS value,\n step.id AS config_step_id,\n step.name AS config_step_name,\n bu.id AS build_id,\n bp.config_id AS build_config_id,\n bu.parent_path AS build_parent_path,\n bu.host AS build_host,\n bun.id AS bundle_id,\n bun.name AS bundle_name,\n bun.sticky AS bundle_sticky,\n ba.id AS batch_id,\n tr.id AS trigger_id,\n tr.name AS trigger_name\n FROM\n runbot_build_stat AS stat\n JOIN\n runbot_build_config_step step ON stat.config_step_id = step.id\n JOIN\n runbot_build bu ON bu.id = stat.build_id\n JOIN\n runbot_build_params bp ON bp.id =bu.params_id\n JOIN\n runbot_batch_slot bas ON bas.build_id = stat.build_id\n JOIN\n runbot_trigger tr ON tr.id = bas.trigger_id\n JOIN\n runbot_batch ba ON ba.id = bas.batch_id\n JOIN\n runbot_bundle bun ON bun.id = ba.bundle_id\n )\"\"\"\n )", "def _initialize(self):\n self.send_init_command()", "def preparation(self):\n # [1] Makes a dir for saving results.\n # if 'Result' dir already exists,\n # a 'temporary' dir will be made.\n\n try:\n os.mkdir(self.dir_for_saving_result)\n except FileExistsError:\n self.viewer.display_message(\"Made a temporary directory.\")\n self.dir_for_saving_result = 'temporary'\n os.mkdir('temporary')\n\n # [2] Copies config file into the same dir as the one where results will be stored\n shutil.copy2(self.config_file_name, self.dir_for_saving_result)", "def _setUp(self):\n self.numOfMeasurements = round(self.config.period / self.config.pollInterval, 0)\n # list of pairs (componentPID, componentName)\n componentsInfo = self._getComponentsInfo()\n for compName, compPID in componentsInfo.items():\n self._setUpProcessDetailAndMeasurements(compPID, compName)", "def _InitializeVizier(self):\n p = self.params\n self._should_report_metrics = False\n\n reporting_job = self._task_params.cluster.reporting_job\n job_split = self._task_params.cluster.reporting_job.split('/')\n\n if len(job_split) != 2:\n # The default setting for reporting job is 'evaler'. This is not valid\n # for use with program. We only warn only since we may not be in a vizier\n # setting.\n tf.logging.info('reporting_job should be of the form '\n 'program_name/dataset_name with exactly one / '\n f'instead got {reporting_job}')\n return\n\n vizier_program_name, vizier_dataset_name = job_split\n if p.name == vizier_program_name and p.dataset_name == vizier_dataset_name:\n tf.logging.info(f'Adding reporting for {reporting_job}')\n self._should_report_metrics = True" ]
[ "0.62227356", "0.61323386", "0.6124156", "0.6124156", "0.6124156", "0.603257", "0.6021469", "0.6008526", "0.59598756", "0.5919939", "0.5891619", "0.58891463", "0.5887354", "0.5885531", "0.58851296", "0.58174354", "0.57714", "0.57629365", "0.57341665", "0.57147014", "0.57033503", "0.56929004", "0.5688639", "0.56826", "0.56601846", "0.56572604", "0.56497276", "0.5635938", "0.5629412", "0.5626734", "0.5620366", "0.56175137", "0.56123877", "0.5600874", "0.55964166", "0.558569", "0.5582999", "0.5578447", "0.5563387", "0.5556074", "0.55549383", "0.5548806", "0.55486846", "0.5518073", "0.5513394", "0.5498674", "0.5494231", "0.54904956", "0.5476248", "0.54724795", "0.5469745", "0.54677767", "0.5467067", "0.54622734", "0.5456571", "0.5452278", "0.5446124", "0.54449713", "0.5444648", "0.54379874", "0.5429001", "0.54168653", "0.54158926", "0.5408586", "0.5407737", "0.54074836", "0.54074836", "0.54008985", "0.54001695", "0.53995866", "0.5397348", "0.53904665", "0.5387223", "0.5382867", "0.53774935", "0.53774935", "0.53774935", "0.5374068", "0.53727096", "0.53710866", "0.53641546", "0.5363783", "0.5363026", "0.5358908", "0.5356773", "0.53566957", "0.53509", "0.5347976", "0.53411466", "0.533917", "0.53372407", "0.53372407", "0.53372407", "0.53372407", "0.53372407", "0.5336743", "0.53345937", "0.53326964", "0.533173", "0.53314805" ]
0.6481295
0
Runs the dashboard webserver.
def run(self, *args, **kwargs): host = self.config.get('HOST', 'localhost') port = self.config.get('PORT', 8888) max_retries = 5 for _ in range(max_retries): try: self.app.run(host, port, *args, **kwargs) break except OSError as e: logger.warning(e) if port: port += 1 pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def webserver_start():\n run(_webserver_command())", "def run_dashboard(\n database_path,\n no_browser,\n port,\n updating_options,\n):\n port = _find_free_port() if port is None else port\n port = int(port)\n\n if not isinstance(database_path, (str, pathlib.Path)):\n raise TypeError(\n \"database_path must be string or pathlib.Path. \",\n f\"You supplied {type(database_path)}.\",\n )\n else:\n database_path = pathlib.Path(database_path)\n if not database_path.exists():\n raise ValueError(\n f\"The database path {database_path} you supplied does not exist.\"\n )\n\n session_data = {\n \"last_retrieved\": 0,\n \"database_path\": database_path,\n \"callbacks\": {},\n }\n\n app_func = partial(\n dashboard_app,\n session_data=session_data,\n updating_options=updating_options,\n )\n apps = {\"/\": Application(FunctionHandler(app_func))}\n\n _start_server(apps=apps, port=port, no_browser=no_browser)", "async def serve_web(self):\n interface = \"0.0.0.0\" if settings.PUBLIC_ACCESS else \"127.0.0.1\"\n port = settings.WEB_PORT\n self.logger.info(f\"web: starting the server on {interface}:{port}...\")\n await self.runner.setup()\n site = aioweb.TCPSite(self.runner, interface, port)\n await site.start()\n self.preparing_task = None", "def run_webserver():\n\tglobal hostname, portnum\n\t#bottle.debug(True)\t# While in development, we want the data\n\tbottle.run(host=hostname, port=portnum) \n\tlogging.info(\"Exiting server.\")", "def dashboard():", "def admin_server(request):\n return run_server(interval='10000')", "def run_server(self, _):\n if not ENABLE_SERVER:\n logger.info('server not enabled, exit')\n return\n app.run(host=API_HOST, port=API_PORT, threaded=API_THREADED)", "def web():\n from mephisto.client.server import app\n\n app.run(debug=False)", "def runserver():\n app.run(host=config.HOST, port=config.PORT, debug=config.DEBUG, threaded=config.THREADED)", "def run(self):\n self.__server.serve_forever()", "def run():\n register_component(\"press\")\n run_app(host=\"0.0.0.0\", port=8080, debug=True, workers=os.cpu_count())", "def console_server(request):\n return run_server(interval='1')", "def run(self):\n server = CherryPyWSGIServer(\n (self.options['host'], int(self.options['port'])),\n WSGIPathInfoDispatcher({\n '/': WSGIHandler(),\n settings.ADMIN_MEDIA_PREFIX: MediaHandler(\n os.path.join(admin.__path__[0], 'media'))\n }),\n int(self.options['threads']), self.options['host'],\n request_queue_size=int(self.options['request_queue_size']))\n try:\n server.start()\n except KeyboardInterrupt:\n server.stop()", "def runserver(args):\n elmrapp.run()\n return \"\"", "def index() -> str:\n return render_template('index.html', username=getpass.getuser(), hostname=socket.gethostname(),\n manager_host=DASHBOARD_MANAGER_HOST.value.decode() or 'localhost',\n manager_port_nr=DASHBOARD_MANAGER_PORT.value)", "def runserver():\n\tapp.run(host = '0.0.0.0', port = 5000)", "def run():\n return render_template('index.html')", "def serve() -> None:\n uvicorn.run(\n \"bartender.web.application:get_app\",\n workers=settings.workers_count,\n host=settings.host,\n port=settings.port,\n reload=settings.reload,\n log_level=settings.log_level,\n factory=True,\n )", "def main(self):\n\n def _run(args):\n kwargs = vars(args)\n if kwargs.get('host', None) is not None:\n self.config['HOST'] = kwargs.pop('host')\n if kwargs.get('port', None) is not None:\n self.config['PORT'] = kwargs.pop('port')\n self.config['PROFILE'] = kwargs.pop('profile')\n self.config['DEBUG'] = kwargs.pop('debug')\n self.run()\n\n parser = argparse.ArgumentParser(\n description=\"signac-dashboard is a web-based data visualization \"\n \"and analysis tool, part of the signac framework.\")\n parser.add_argument(\n '--debug',\n action='store_true',\n help=\"Show traceback on error for debugging.\")\n parser.add_argument(\n '--version',\n action='store_true',\n help=\"Display the version number and exit.\")\n subparsers = parser.add_subparsers()\n\n parser_run = subparsers.add_parser('run')\n parser_run.add_argument(\n '-p', '--profile',\n action='store_true',\n help='Enable flask performance profiling.')\n parser_run.add_argument(\n '-d', '--debug',\n action='store_true',\n help='Enable flask debug mode.')\n parser_run.add_argument(\n '--host', type=str,\n help='Host (binding address). Default: localhost')\n parser_run.add_argument(\n '--port', type=int,\n help='Port to listen on. Default: 8888')\n parser_run.set_defaults(func=_run)\n\n # This is a hack, as argparse itself does not\n # allow to parse only --version without any\n # of the other required arguments.\n if '--version' in sys.argv:\n print('signac-dashboard', __version__)\n sys.exit(0)\n\n args = parser.parse_args()\n\n if args.debug:\n logger.setLevel(logging.DEBUG)\n\n if not hasattr(args, 'func'):\n parser.print_usage()\n sys.exit(2)\n try:\n self.observer.start()\n args.func(args)\n except RuntimeWarning as warning:\n logger.warning(\"Warning: {}\".format(warning))\n if args.debug:\n raise\n sys.exit(1)\n except Exception as error:\n logger.error('Error: {}'.format(error))\n if args.debug:\n raise\n sys.exit(1)\n finally:\n self.observer.stop()\n self.observer.join()", "def main():\n from clize import run\n\n run(\n cases, webserver,\n description=DESCRIPTION, footnotes=FOOTNOTES)", "def run(self):\n self._server = self._get_server()\n self._server.serve_forever()", "def web():\n import web\n web.app.run(host='0.0.0.0', port=5000, debug=True)", "def web():\n import web\n web.app.run(host='0.0.0.0', port=5000, debug=True)", "def run():\n app = Application()\n #app.sentry_client = AsyncSentryClient(app.settings['sentry_url'])\n http_server = HTTPServer(app, xheaders=True)\n http_server.listen(options.port)\n print('Running on port %d' % options.port)", "def Run(self):\n self.BuildWebAppSite()\n\n self.BuildRPCSite(self.env.umpire_cli_port, self.methods_for_cli, '0.0.0.0')\n self.BuildRPCSite(self.env.umpire_rpc_port, self.methods_for_dut)\n\n # Start services.\n reactor.callWhenRunning(self.OnStart)\n # And start reactor loop.\n reactor.run()", "def main():\n return run_server(**parse_server_args())", "def runserver():\n local_addr = \"0.0.0.0:8000\"\n local(\"{} exec web python3 manage.py runserver {} {}\".format(\n dc, local_addr, settings))", "def main():\r\n LOG.info('Starting server build.')\r\n web.run_app(init_app(),\r\n host=os.environ.get('APP_HOST', CONFIG.registry.get('app_host', '0.0.0.0')),\r\n port=int(os.environ.get('APP_PORT', CONFIG.registry.get('app_port', 8080))),\r\n shutdown_timeout=0,\r\n ssl_context=application_security())", "def run(self):\n\t\t\n\t\tself.connect(self.config[\"server\"])", "def index():\n return \"Attendance Flask server\"", "def main():\n args = utils.parse_arguments()\n logging.basicConfig(level=logging.INFO)\n coloredlogs.install(level=0,\n fmt=\"[%(asctime)s][%(levelname)s] [%(name)s.%(funcName)s:%(lineno)d] %(message)s\",\n isatty=True)\n if args.debug:\n l_level = logging.DEBUG\n else:\n l_level = logging.INFO\n\n logging.getLogger(__package__).setLevel(l_level)\n\n LOG.info('RUNNING TAMAGO WEB')\n serve(app, port=8080, host='0.0.0.0')", "def run_html():\n if __name__ != \"__main__\":\n app.run(debug=True)", "def run(self):\n self.app.run()", "def run(self):\n self.app.run()", "def run_server():\n app = init_app()\n app.run(host=app.config['HOST'], port=app.config['PORT'])", "def run():\r\n log.debug('Starter::run()')\r\n try:\r\n # check specified port\r\n if not conf.port:\r\n raise Exception(\"Please specify port number! (use --port)\")\r\n Server(conf.port).run()\r\n except Exception as E:\r\n log.critical(E)", "def run():\n app.run()", "def _start_server(apps, port, no_browser):\n # necessary for the dashboard to work when called from a notebook\n asyncio.set_event_loop(asyncio.new_event_loop())\n\n # this is adapted from bokeh.subcommands.serve\n with report_server_init_errors(port=port):\n server = Server(apps, port=port)\n\n # On a remote server, we do not want to start the dashboard here.\n if not no_browser:\n\n def show_callback():\n server.show(\"/\")\n\n server.io_loop.add_callback(show_callback)\n\n address_string = server.address if server.address else \"localhost\"\n\n print( # noqa: T201\n \"Bokeh app running at:\",\n f\"http://{address_string}:{server.port}{server.prefix}/\",\n )\n server._loop.start()\n server.start()", "def run(self, host: str = '0.0.0.0', port: int = 8080):\n self._loop.run_until_complete(self._configure_plugins())\n web.run_app(self._app, host=host, port=port) # pragma: no cover", "def dashboard():\n return render_template('home/dashboard.html',title='SycliQ Dashboard')", "def start(self):\n self.serve_forever()", "def start(self):\n self.serve_forever()", "def index(request):\r\n return requests.get(DaemonServer._mock_url + '/')", "def run(self):\r\n self.rpc_server.serve_forever(0.5)", "def serve() -> None: # pragma: no cover-behave\n logging.getLogger().setLevel(logging.INFO)\n database = init_database()\n init_bottle(database)\n server_port = os.environ.get(\"SERVER_PORT\", \"5001\")\n bottle.run(server=\"gevent\", host=\"0.0.0.0\", port=server_port, reloader=True, log=logging.getLogger()) # nosec", "def run(selector):\n\n print selector\n exec \"from \" + selector + \" import urls, port\"\n\n if os.environ.get(\"REQUEST_METHOD\", \"\"):\n from wsgiref.handlers import BaseCGIHandler\n BaseCGIHandler(sys.stdin, sys.stdout, sys.stderr, os.environ) \\\n .run(urls)\n else:\n from wsgiref.simple_server import WSGIServer, WSGIRequestHandler\n# with the code like this we are binding to no particular interface, matter?\n httpd = WSGIServer(('', port), WSGIRequestHandler)\n httpd.set_app(urls)\n print \"Serving HTTP on %s port %s ...\" % httpd.socket.getsockname()\n httpd.serve_forever()", "def start(self) -> None:\n if self.bolt_app.logger.level > logging.INFO:\n print(get_boot_message())\n else:\n self.bolt_app.logger.info(get_boot_message())\n\n web.run_app(self.web_app, host=\"0.0.0.0\", port=self.port)", "def local_webserver_start():\n if not _is_webserver_running():\n local(_webserver_command())", "def _run_web_server(q, log_filepath, dist_dir, custom_headers):\n\n os.chdir(dist_dir)\n\n log_fh = log_filepath.open(\"w\", buffering=1)\n sys.stdout = log_fh\n sys.stderr = log_fh\n\n class Handler(http.server.SimpleHTTPRequestHandler):\n def log_message(self, format_, *args):\n print(\n \"[%s] source: %s:%s - %s\"\n % (self.log_date_time_string(), *self.client_address, format_ % args)\n )\n\n def end_headers(self):\n # Enable Cross-Origin Resource Sharing (CORS)\n for k, v in custom_headers.items():\n self.send_header(k, v)\n self.send_header(\"Access-Control-Allow-Origin\", \"*\")\n super().end_headers()\n\n with socketserver.TCPServer((\"\", 0), Handler) as httpd:\n host, port = httpd.server_address\n print(f\"Starting webserver at http://{host}:{port}\")\n httpd.server_name = \"test-server\" # type: ignore[attr-defined]\n httpd.server_port = port # type: ignore[attr-defined]\n q.put(port)\n\n def service_actions():\n try:\n if q.get(False) == \"TERMINATE\":\n print(\"Stopping server...\")\n sys.exit(0)\n except queue.Empty:\n pass\n\n httpd.service_actions = service_actions # type: ignore[assignment]\n httpd.serve_forever()", "def start_server(self):\n app.run(host=str(self.__constants.host),\n port=int(self.__constants.port),\n debug=bool(self.__constants.runindebug))", "def run():\n\n # Construct a server.\n server = wsgiref.simple_server.make_server(\n _config[ 'address' ],\n _config[ 'port' ],\n application\n )\n\n # Run the server.\n server.serve_forever()\n\n # Return result.\n return 0", "def web():\n env['remote_port'] = env['port_map']['8000']\n\n sys.stdout.write('Launching browser on remote port %(remote_port)s\\n' % env)\n\n run('open http://%(relay_server)s:%(remote_port)s' % env)", "def run(self):\n self.app.run(host=\"0.0.0.0\")", "def serve(self):\n\t\timport thread\n\t\tthread.start_new_thread(self._server_thread, tuple())", "def main(methods=[\"GET\"]):\n validate_auth()\n ## issue with path resolution after build\n return send_from_directory(\n #todo: remove templates directory reference; index.html isn't a jinja template\n safe_join(current_app.static_folder, 'templates'),\n 'index.html',\n cache_timeout=-1\n )", "def main():\n cwd = os.getcwd() # static files\n port = int(os.environ.get(\"PORT\", 8000))\n path = os.path.join(cwd, \"paper\") # Path to dl file\n\n app = Application([\n (r'/', GraphView),\n # Static files, repeat for other file names\n (r'/(.*\\.js)', StaticFileHandler, {\"path\": cwd} ),\n (r'/download/(barojas_v193\\.pdf)', StaticFileHandler, {'path': path} ), # Static serving file\n ])\n http_server = HTTPServer(app)\n http_server.listen(port)\n print('RI5C is listening on port:%i' % port)\n IOLoop.current().start()", "def main():\n\n app_config = load_config()\n database_connection = mysql.connector.connect(**app_config[\"database\"])\n panelists = retrieve_all_panelist_appearance_counts(database_connection)\n show_years = retrieve_all_years(database_connection)\n\n rendered_report = render_report(show_years=show_years,\n panelists=panelists,\n report_settings=app_config[\"report\"])\n\n generate_output_files(rendered_report=rendered_report,\n report_settings=app_config[\"report\"])", "def runserver():\n\tlocal(\"revel run github.com/FreeFlightSim/fg-navdb\")", "def main():\n # Verify the database exists and has the correct layout\n db_seeder()\n app = DiminuendoApp()\n http_server = tornado.httpserver.HTTPServer(app)\n http_server.listen(options.port)\n tornado.ioloop.IOLoop.instance().start()", "def run(self):\n log.debug(\"start web server running\")\n webDir = self.config.webDir\n self.root.putChild(\"images\", static.File(webDir+\"/images\"))\n self.root.putChild(\"css\", static.File(webDir+\"/css\")) \n self.root.putChild(\"scripts\", static.File(webDir+\"/scripts\"))\n self.root.putChild(\"style\", static.File(webDir+\"/style\"))\n self.root.putChild(\"docs\", static.File(webDir+\"/docs\"))\n xulDir = self.config.xulDir\n self.root.putChild(\"xulscripts\", static.File(xulDir+\"/scripts\"))\n self.root.putChild(\"xultemplates\", static.File(xulDir+\"/templates\"))\n self.root.putChild(\"templates\", static.File(webDir+\"/templates\"))\n self.root.putChild(\"editor\", self.editor)\n self.root.putChild(\"preferences\", self.preferences)\n self.root.putChild(\"about\", self.about)\n verbose_port_search = 0\n port_test_done = 0\n found_port = 0\n test_port_num = self.config.port\n test_port_count = 0\n max_port_tests = 5000\n while not port_test_done:\n test_port_num = self.config.port + test_port_count\n try:\n if verbose_port_search:\n print \"trying to listenTCP on port# \", test_port_num\n reactor.listenTCP(test_port_num, appserver.NevowSite(self.root),\n interface=\"127.0.0.1\")\n if verbose_port_search:\n print \"still here after listenTCP on port# \", test_port_num\n found_port = 1\n port_test_done = 1\n except CannotListenError, exc:\n if verbose_port_search:\n print \"caught exception after listenTCP on port# \", test_port_num\n last_exception = exc\n test_port_count += 1\n if test_port_count >= max_port_tests:\n port_test_done = 1\n if found_port:\n self.config.port = test_port_num\n if verbose_port_search:\n print \"found available eXe port# \", self.config.port\n reactor.run()\n else:\n print \"Sorry, unable to find an available port in the range of: \", self.config.port, \" - \", test_port_num\n print \"last exception: \", unicode(last_exception)\n log.error(\"Can't listen on interface 127.0.0.1, ports %s-%s, last exception: %s\" % \n (self.config.port, test_port_num, unicode(last_exception)))", "def do_status(self, args):\n webbrowser.open(f\"{args.host}:{args.port}\")", "def cli(self, cmd_args=None):\n logging.basicConfig(level=logging.INFO, format=\"%(message)s\")\n\n parser = argparse.ArgumentParser(description=\"PyGreen, micro web\"\n \"framework/static web site generator\")\n subparsers = parser.add_subparsers(dest=\"action\")\n\n parser_serve = subparsers.add_parser(\"serve\",\n help=\"serve the web site\")\n parser_serve.add_argument(\"-p\", \"--port\", type=int, default=8080)\n parser_serve.add_argument(\"-f\", \"--folder\", default=\".\",\n help=\"folder containing files to serve\")\n parser_serve.add_argument(\"-d\", \"--disable-templates\",\n action=\"store_true\", default=False,\n help=\"just serve static files, do not use \"\n \"invoke Mako\")\n\n def serve():\n if args.disable_templates:\n self.template_exts = set([])\n self.run(port=args.port)\n\n parser_serve.set_defaults(func=serve)\n\n parser_gen = subparsers.add_parser(\"gen\", help=\"generate a static \"\n \"version of the site\")\n parser_gen.add_argument(\"output\", help=\"folder to store the files\")\n parser_gen.add_argument(\"-f\", \"--folder\", default=\".\",\n help=\"folder containing files to serve\")\n\n def gen():\n self.gen_static(args.output)\n\n parser_gen.set_defaults(func=gen)\n\n args = parser.parse_args(cmd_args)\n self.set_folder(args.folder)\n print(parser.description)\n print(\"\")\n args.func()", "def main():\n host = ''\n port = 8088\n HTTPServer((host, port), HandleRequests).serve_forever()", "def run(Concordancer, port=1420, url=None, open_browser=True):\n # Allow access from frontend\n cors = CORS(allow_all_origins=True)\n\n # Falcon server\n app = falcon.API(middleware=[cors.middleware])\n serv = ConcordancerBackend(Concordancer)\n app.add_route('/query', serv)\n app.add_route('/export', serv, suffix='export')\n\n print(f\"Initializing server...\")\n httpd = simple_server.make_server('localhost', port, app)\n print(f\"Start serving at http://localhost:{port}\")\n if url is None:\n url = query_interface_path()\n if open_browser:\n webbrowser.open(url)\n httpd.serve_forever()", "def Main():\n wsgiref.handlers.CGIHandler().run(application)", "def run():\n app.run(debug=True, port=5001)", "def run():\n REDIRECT = False\n LOG_FILE = \"truss.log\"\n app = App(REDIRECT)\n app.MainLoop()", "def run(self):\n # Needed in order to start the file server in detached mode.\n self._execute_command('yum install -y screen', sudo=True, retries=5)\n\n self.logger.info('Starting up SimpleHTTPServer on {0}'\n .format(self.port))\n self._execute_command('cd {0} && screen -dm {1}'\n .format(self.fileserver_path, self.server_cmd),\n pty=False)", "def start():\n # Import any local level utilities that may be used\n # before the web-server is initialized.\n from django.core.management import call_command\n from db.models import ApplicationState\n from db.utilities import generate_models\n\n # Run the migrate command within django.\n # Making sure our models are upto date.\n call_command(command_name=\"migrate\", app=\"titandash\")\n\n # Server is being started, it is safe for us\n # to update our active flag.\n ApplicationState.objects.set(state=True)\n\n # Generate any initial models that we expect\n # to be available by default.\n generate_models()\n\n _url = EEL_DASHBOARD if User.objects.valid() else EEL_LOGIN\n\n logger.info(\"starting titandash application with options: '{options}'\".format(options={\"path\": _url, **EEL_START_OPTIONS}))\n # Start eel, providing our start url defined above, the close callback\n # to deal with cleanup functionality, and default options.\n eel.start(_url, close_callback=close_callback, **EEL_START_OPTIONS)", "def init_dashboard(server):\n dash_app = dash.Dash(\n __name__,\n server=server,\n routes_pathname_prefix='/dashapp/',\n external_stylesheets=[\n '/static/css/data.css',\n dbc.themes.BOOTSTRAP\n ]\n )\n\n # Columns: Age, Height, Weight, BMI, Gender, Systolic BP, Diastolic BP,\n # Cholesterol, Glucose, Smoking, Alcohol Intake, Physical Activity\n df = pd.read_csv(\"flask_app/static/data/readable_cvd_data.csv\")\n cholesterol = df['Cholesterol']\n height = df['Height (cm)']\n weight = df['Weight (kg)']\n bmi = df['BMI']\n cvd_status = df['CVD Status']\n smoker_status = df['Smoker']\n\n importance_df = pd.read_csv('flask_app/static/data/importance_dataset.csv')\n\n # Retrieves smoker data from the imported dataframe and correlates it to whether the patient/user is a smoker\n smoker_pos_cvd = len(df[(smoker_status == 'yes') & cvd_status.isin(['positive'])])\n smoker_neg_cvd = len(df[(smoker_status == 'yes') & cvd_status.isin(['negative'])])\n non_smoker_pos_cvd = len(df[(smoker_status == 'no') & cvd_status.isin(['positive'])])\n non_smoker_neg_cvd = len(df[(smoker_status == 'no') & cvd_status.isin(['negative'])])\n\n cholesterol_well_above_pos_cvd = len(df[(cholesterol == 'well above normal') & cvd_status.isin(['positive'])])\n cholesterol_well_above_neg_cvd = len(df[(cholesterol == 'well above normal') & cvd_status.isin(['negative'])])\n cholesterol_above_pos_cvd = len(df[(cholesterol == 'above normal') & cvd_status.isin(['positive'])])\n cholesterol_above_neg_cvd = len(df[(cholesterol == 'above normal') & cvd_status.isin(['negative'])])\n cholesterol_normal_pos_cvd = len(df[(cholesterol == 'normal') & cvd_status.isin(['positive'])])\n cholesterol_normal_neg_cvd = len(df[(cholesterol == 'normal') & cvd_status.isin(['negative'])])\n\n importance_bar_graph_figure = px.bar(importance_df.sort_values(by=['Importance']), x='Importance',\n y='Variable',\n title='Health variables importance relative to determining the likelihood of '\n 'testing positive for CVD')\n\n # Scatter plot showing relation between height and weight levels\n height_and_weight_scatter_plot = px.scatter(df, x=height, y=weight, size=bmi, color=cvd_status,\n title='Height as it relates to weight and BMI')\n\n subplots_for_cholesterol_pie_charts = make_subplots(rows=1, cols=3,\n specs=[[{\"type\": \"pie\"}, {\"type\": \"pie\"}, {\"type\": \"pie\"}]])\n\n cholesterol_well_above_pie_chart = go.Pie(title=\"Cholesterol WELL Above Normal\",\n labels=['CVD Positive', 'CVD Negative'],\n values=[cholesterol_well_above_pos_cvd, cholesterol_well_above_neg_cvd]\n )\n\n cholesterol_above_pie_chart = go.Pie(title=\"Cholesterol Above Normal\",\n labels=['CVD Positive', 'CVD Negative'],\n values=[cholesterol_above_pos_cvd, cholesterol_above_neg_cvd]\n )\n\n cholesterol_normal_pie_chart = go.Pie(title=\"Cholesterol Normal\",\n labels=['CVD Positive', 'CVD Negative'],\n values=[cholesterol_normal_pos_cvd, cholesterol_normal_neg_cvd]\n )\n\n subplots_for_cholesterol_pie_charts.append_trace(cholesterol_well_above_pie_chart, row=1, col=1)\n subplots_for_cholesterol_pie_charts.append_trace(cholesterol_above_pie_chart, row=1, col=2)\n subplots_for_cholesterol_pie_charts.append_trace(cholesterol_normal_pie_chart, row=1, col=3)\n subplots_for_cholesterol_pie_charts.update_layout(title_text=\"Cholesterol level correlation to CVD status\")\n\n subplots_for_smoking_status = make_subplots(rows=1, cols=2,\n specs=[[{\"type\": \"pie\"}, {\"type\": \"pie\"}]])\n\n smoker_pie_chart = go.Pie(title=\"Smoker\",\n labels=['CVD Positive', 'CVD Negative'],\n values=[smoker_pos_cvd, smoker_neg_cvd]\n )\n\n non_smoker_pie_chart = go.Pie(title=\"Noon Smoker\",\n labels=['CVD Positive', 'CVD Negative'],\n values=[non_smoker_pos_cvd, non_smoker_neg_cvd]\n )\n\n subplots_for_smoking_status.append_trace(smoker_pie_chart, row=1, col=1)\n subplots_for_smoking_status.append_trace(non_smoker_pie_chart, row=1, col=2)\n subplots_for_smoking_status.update_layout(title_text=\"Smoking status correlation to CVD status\")\n\n # Dash dashboard layout\n dash_app.title = \"Dataset\"\n dash_app.layout = html.Div(id='dash-container', children=[\n\n # Bootstrap navbar\n html.Div(children=[\n html.Nav(className=\"navbar navbar-expand-lg navbar-light bg-light\", children=[\n html.Div(className=\"collapse navbar-collapse\", id=\"navbarSupportedContent\", children=[\n html.Ul(className=\"navbar-nav mr-auto\", children=[\n html.Li(className=\"nav-item active\", children=[\n html.A(\"Enter Vitals\", className=\"nav-link\", href=\"/determine_risk\")\n ]),\n html.Li(className=\"nav-item\", children=[\n html.A(\"Dataset\", className=\"nav-link\", href=\"#\")\n ])\n ]),\n html.A(\"Logout\", className=\"nav-link\", href=\"/logout\")\n ])\n ]),\n ]),\n\n html.Br(),\n html.H1('List of analysed data elements:'),\n # Horizontal bar graph for feature importance\n dcc.Graph(\n id='bar_graph',\n figure=importance_bar_graph_figure,\n className='graphBox'\n ),\n\n html.Br(),\n # Scatter plot for height and weight correlation\n dcc.Graph(\n id='scatter_plot',\n figure=height_and_weight_scatter_plot,\n className='graphBox'\n ),\n\n html.Br(),\n # Pie charts for cholesterol correlation\n dcc.Graph(\n id='pie_graph_set_1',\n figure=subplots_for_cholesterol_pie_charts,\n className='graphBox'\n ),\n # Pie charts for smoking correlation\n dcc.Graph(\n id='pie_graph_set_2',\n figure=subplots_for_smoking_status,\n className='graphBox'\n ),\n\n html.Br(),\n html.H1(\"Example list of records\"),\n # Displays head of dataset in readable format\n dash_table.DataTable(\n id='table',\n columns=[\n {\"name\": i, \"id\": i} for i in df.columns\n ],\n # title='Records',\n data=df[:20].to_dict('records'),\n style_cell_conditional=[\n {\n 'if': {'row_index': 'odd'},\n 'backgroundColor': 'rgb(248, 248, 248)'\n }\n ],\n style_header={\n 'backgroundColor': 'rgb(230, 230, 230)',\n 'fontWeight': 'bold',\n 'text-align': 'center'\n },\n\n css=[{\n 'selector': '.dash-spreadsheet-container',\n 'rule': 'border: 1px solid black; border-radius: 15px; overflow: hidden;'\n }]\n )\n ])\n\n return dash_app.server", "def main():\n app.run(host='127.0.0.1', port=443, debug=True)\n CORS(app)", "def main():\n\n cassandra_session = ConnectionPool(\n KEY_SPACE,\n [CASSANDRA_POOL],\n pool_size=1,\n )\n\n context = {\"cassandra_session\": cassandra_session,}\n\n application_tornado = tornado.web.Application([\n (\n r\"/api/1/simple-handler/(.*)\", \n SimpleHandler, \n context\n ),\n ])\n application_tornado.listen(8080)\n tornado.ioloop.IOLoop.current().start()", "def startserver(path):\n global urlpath\n urlpath = path\n app.run(debug=True, host='0.0.0.0', port=4444)", "def main():\n\n # TODO: more advanced argument processing\n\n # Handle port\n port = None\n if len(sys.argv) > 1:\n port_arg = sys.argv[1]\n try:\n port = int(port_arg[1:] if port_arg.startswith(':') else port_arg)\n except:\n pass\n\n try:\n serve(port=port)\n except ValueError, ex:\n # Show input error\n print 'Error:', ex", "def dashboard():\r\n return render_template('{}/dashboard.html'.format(MODULE_DIR))", "def webserver(\n *,\n host=None, port=None, nodebug=False,\n reload=False, load_dotenv=True\n):\n app = web.create_app()\n app.run(\n host=host, port=port,\n use_reloader=True,\n debug=(not nodebug),\n load_dotenv=load_dotenv)", "def run_simple_server(tb_app):\n # Mute the werkzeug logging.\n base_logging.getLogger('werkzeug').setLevel(base_logging.WARNING)\n\n try:\n server = serving.make_server(FLAGS.host, FLAGS.port, tb_app, threaded=True)\n server.daemon_threads = True\n except socket.error:\n if FLAGS.port == 0:\n msg = 'TensorBoard unable to find any open port'\n else:\n msg = (\n 'TensorBoard attempted to bind to port %d, but it was already in use'\n % FLAGS.port)\n logging.error(msg)\n print(msg)\n exit(-1)\n\n port = server.socket.getsockname()[1]\n msg = 'Starting TensorBoard %s at http://%s:%d' % (tb_app.tag, FLAGS.host,\n port)\n print(msg)\n logging.info(msg)\n print('(Press CTRL+C to quit)')\n sys.stdout.flush()\n\n server.serve_forever()", "def run_cache_dir(args):\n # Launch the Dash server.\n logger.info(\"Starting Dash web server on %s:%d\", args.host, args.port)\n if settings.CACHE_TYPE == \"filesystem\" and not settings.CACHE_DIR:\n with tempfile.TemporaryDirectory(prefix=\"scelvis.cache.\") as tmpdir:\n logger.info(\"Using cache directory %s\", tmpdir)\n settings.CACHE_DIR = tmpdir\n run_upload_dir(args)\n else:\n run_upload_dir(args)\n logger.info(\"Web server stopped. Have a nice day!\")", "def dashboard():\n return render_template('home/dashboard.html')", "def run(self):\n options = CPSERVER_OPTIONS\n server = CherryPyWSGIServer(\n (options['host'], int(options['port'])),\n WSGIPathInfoDispatcher({\n '/': WSGIHandler(),\n urlparse(settings.MEDIA_URL).path: MediaHandler(\n settings.MEDIA_ROOT),\n settings.ADMIN_MEDIA_PREFIX: MediaHandler(\n os.path.join(admin.__path__[0], 'media'))\n }),\n int(options['threads']), options['host'],\n request_queue_size=int(options['request_queue_size']))\n try:\n server.start()\n except KeyboardInterrupt:\n server.stop()", "def main() -> None:\n config = get_config()\n app = Application()\n web_config = config[\"web\"]\n webapp = WebApp(config)\n webapp.attach_to(app)\n\n run_config = keep(web_config, {\"host\", \"port\"})\n run_app(app, **run_config)", "def start_server(self, app, **kwargs):\n\n # start server with app and pass Dash arguments\n self.server(app, **kwargs)\n\n # set the default server_url, it implicitly call wait_for_page\n self.server_url = self.server.url", "def dashboard():\n return render_template(\"home/dashboard.html\", title=\"Dashboard\")", "def main(config=None, wiki=None):\n\n config = config or read_config()\n wiki = wiki or Wiki(config)\n app = wiki.application\n\n host, port = (config.get('interface', '0.0.0.0'),\n int(config.get('port', 8080)))\n try:\n from cherrypy import wsgiserver\n except ImportError:\n try:\n from cherrypy import _cpwsgiserver as wsgiserver\n except ImportError:\n import wsgiref.simple_server\n server = wsgiref.simple_server.make_server(host, port, app)\n try:\n server.serve_forever()\n except KeyboardInterrupt:\n pass\n return\n name = wiki.site_name\n server = wsgiserver.CherryPyWSGIServer((host, port), app,\n server_name=name)\n try:\n server.start()\n except KeyboardInterrupt:\n server.stop()", "def server_it():\n\n app = flask.Flask(__name__, static_url_path='/static')\n\n app.route('/')(serve_index)\n\n\n @app.route('/brain/data/<path:path>')\n def serve_brain_data(path):\n data_dir = 'generated/data'\n return flask.send_from_directory(data_dir, path)\n\n @app.route('/brain/<path:path>')\n def serve_roygbiv_html(path):\n try:\n return flask.send_from_directory('brain', path)\n except Exception as e:\n import roygbiv\n viz_dir = os.path.join(os.path.dirname(roygbiv.__file__), 'web')\n return flask.send_from_directory(viz_dir, path)\n\n # GWAS app\n @app.route('/gwas/data/<path:path>')\n def serve_gwas_data(path):\n data_dir = 'generated/data'\n return flask.send_from_directory(data_dir, path)\n\n @app.route('/gwas/')\n @app.route('/gwas/index.html')\n def serve_default():\n import ping.viz\n viz_dir = os.path.dirname(ping.viz.__file__)\n man_dir = os.path.join(viz_dir, 'manhattan')\n return flask.send_from_directory(man_dir, 'manhattan.html')\n\n @app.route('/gwas/<path:path>')\n def serve_gwas_html(path):\n import ping.viz\n viz_dir = os.path.dirname(ping.viz.__file__)\n man_dir = os.path.join(viz_dir, 'manhattan')\n return flask.send_from_directory(man_dir, path)\n\n # Scatter app\n @app.route('/plots/<path:path>')\n def serve_plot(path):\n return flask.send_from_directory('generated/plots', path)\n @app.route('/2015/<path:path>')\n def serve_old(path):\n return flask.send_from_directory('2015', path)\n app.debug = True\n app.run()", "def main():\n try:\n port = 8080\n ip = '0.0.0.0'\n http_server = WSGIServer((ip, port),\n app,\n log=logging,\n error_log=logging,\n )\n print(\"Server started at: {0}:{1}\".format(ip, port))\n http_server.serve_forever()\n except Exception as exc:\n logger.error(exc.message)\n logger.exception(traceback.format_exc())\n finally:\n # Do something here\n pass", "def dashboard():\n # Get current user\n user = current_user\n # Get tip of the day\n tip = gdb.gettipofday()\n # Get current user Leaderboard Status\n leaderboard, current_user_info = gdb.getleaderboard(current_user.userID)\n weektopgainers, monthtopgainers = gdb.gettopgainers()\n # Render template\n return render_template('dashboard.html', user=user,\n leaderboard=leaderboard,\n userbalance=current_user.balance, tip=tip,\n current_user_info=current_user_info)", "def main():\n server = ThreadedServer(MasterControllerService, port=5000)\n server.start()", "def run(self):\n self.__rpc_server.run()", "def main():\n conn = pymongo.MongoClient(settings.DB_URI)\n database = conn[settings.DB_NAME]\n\n application = tornado.web.Application(\n [\n (r\"/\", BaseHandler),\n (r\"/upload\", UploadHandler),\n (r\"/web/([^/]+)\", WebHandler),\n ],\n database=database, secret=settings.SECRET, debug=settings.DEBUG, gzip=True,\n template_path=settings.TEMPLATE_PATH,\n static_path=settings.STATIC_PATH\n )\n application.cache = {}\n\n logging.info(\"starting bbgps...\")\n application.listen(settings.PORT)\n tornado.ioloop.IOLoop.instance().start()", "def run(self):\n run_simple(self.hostname, self.port, self.dispatch,\n use_reloader=self.debug)", "def server():", "def server():", "def create_dashboard(server):\n dash_app = Dash(server=server,\n routes_pathname_prefix='/explodash/',\n external_stylesheets=['/static/css/main.css']\n )\n\n\n # Create Dash Layout\n header = create_header_layout()\n body = create_body_layout(dash_app)\n footer = create_footer_layout()\n\n dash_app.layout = html.Div(id='dash-container', children=[header, body, footer])\n\n return dash_app.server", "def run(self):\n\n for url in self.urls:\n try:\n # Use requests to retrieve web page data\n print(url)\n response = session.get(url, ) # allow_redirects=True)\n\n if response.status_code != 200:\n print('Failed to retrieve page, URL: {0}, error: {1}\\n'.format(url, response.status_code))\n return\n\n # Get web page data from HTML response\n content = get_json_data(response.text)\n\n # Compile data into dictionary to be used for reporting\n summary_data = generate_report(content)\n\n # Generate/print report\n print_report(summary_data)\n\n except Exception as error:\n print('Scraper failed to run for URL {0}, error: {1}, {2}\\n'.format(\n url, type(error).__name__, error\n ))\n\n # time.sleep(1) # for load concerns", "def dashboard():\n return render_template('home/dashboard.html', title=\"Dashboard\")", "def start(self):\n isDefault = SettingsBase.get_setting(self, 'use_default_httpserver')\n if not globals().has_key('Callback'):\n isDefault = False\n\n if isDefault:\n self._cb_handle = Callback(self.cb)\n print (\"Web(%s): using web page %s and\"\n \" using digiweb\") % (self.__name, self.get_page())\n else:\n self._cb_handle = self.get_channels\n try:\n port = SettingsBase.get_setting(self, 'port')\n print (\"Web Presentation (%s): using port %d \"\n \"and BaseHTTPServer\") % (self.__name, port)\n HTTPServer.__init__(self, ('', port), WebRequestHandler)\n except Exception:\n traceback.print_exc()\n self.socket.close()\n # Only start a thread if the Python web-server is\n # used:\n threading.Thread.start(self)", "def server():\n package('apache2')\n require_started('apache2')", "def run(port: int = 8080):\n current_ioloop = ioloop.IOLoop.current()\n\n # Start the web server.\n app = create_app()\n logging.info('starting server: localhost:{}'.format(port))\n server = httpserver.HTTPServer(app)\n\n try:\n server.listen(port)\n current_ioloop.start()\n except KeyboardInterrupt:\n pass\n finally:\n current_ioloop.stop()", "def index():\n global _is_healthy\n return render_template('index.html',\n hostname=gethostname(),\n zone=_get_zone(),\n template=_get_template(),\n healthy=_is_healthy,\n working=_is_working())", "def run(port):\n print \"========= SAND conformance server =============\"\n print \"-----------------------------------------------\"\n import os\n if os.environ.get('PORT') is not None:\n port = int(os.environ['PORT'])\n APP.run(port=port)" ]
[ "0.7217987", "0.7090126", "0.6873404", "0.6859352", "0.6825107", "0.6763319", "0.67429936", "0.6691408", "0.6641341", "0.655142", "0.6534171", "0.6508218", "0.6484628", "0.64228106", "0.6421459", "0.64109594", "0.6343112", "0.63255435", "0.63101995", "0.63063806", "0.6303844", "0.62811935", "0.62811935", "0.62780017", "0.62584925", "0.62482303", "0.6246182", "0.6237841", "0.622257", "0.6209287", "0.62088084", "0.62078774", "0.62032604", "0.62032604", "0.61653227", "0.6144756", "0.61437595", "0.6143405", "0.61424565", "0.6134621", "0.6133734", "0.6133734", "0.6122257", "0.61172634", "0.61138976", "0.6108833", "0.60984945", "0.6090712", "0.60870844", "0.60865027", "0.60743886", "0.60617423", "0.6042022", "0.6038807", "0.60363716", "0.6024075", "0.6022818", "0.60163873", "0.60067874", "0.5995424", "0.59905785", "0.59849507", "0.5979902", "0.5971651", "0.5957591", "0.5957144", "0.5940849", "0.5940613", "0.5934706", "0.59231335", "0.59155345", "0.5912134", "0.5902479", "0.58962387", "0.5893595", "0.5889808", "0.5884724", "0.5872784", "0.5871711", "0.58661807", "0.58647424", "0.5863195", "0.5854243", "0.5837728", "0.58236545", "0.58212817", "0.58193415", "0.5818051", "0.5814641", "0.58111316", "0.5810968", "0.5807489", "0.5807489", "0.5806325", "0.57964027", "0.5796311", "0.5792726", "0.5788283", "0.5781801", "0.57771736", "0.57755166" ]
0.0
-1
Override this method for custom job titles. This method generates job titles. By default, the title is a pretty (but verbose) form of the job state point, based on the project schema.
def job_title(self, job): def _format_num(num): if isinstance(num, bool): return str(num) elif isinstance(num, Real): return str(round(num, 2)) return str(num) try: s = [] for keys in sorted(self._schema_variables()): v = job.statepoint()[keys[0]] try: for key in keys[1:]: v = v[key] except KeyError: # Particular key is present in overall continue # schema, but not this state point. else: s.append('{}={}'.format('.'.join(keys), _format_num(v))) return ' '.join(s) except Exception as error: logger.debug( "Error while generating job title: '{}'. " "Returning job-id as fallback.".format(error)) return str(job)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_job_title(self, job_name):\n return ''", "def _make_title(self):\n ret = self.properties['reason'].capitalize()\n ret += ' has been reported near ' + self.properties['address'].split(',')[0]\n time = datetime.strptime(self.properties['when'], '%Y-%m-%dT%H:%M:%S')\n times = [time.strftime(i).lstrip('0') for i in ('%m', '%d', '%I:%M%p')]\n ret += ' on {}/{} at {}'.format(times[0], times[1], times[2])\n return ret", "def title(self) -> str:\n raise NotImplementedError", "def title(self) -> str:\n pass", "def job_title(self):\n if \"jobTitle\" in self._prop_dict:\n return self._prop_dict[\"jobTitle\"]\n else:\n return None", "def generate_title(self, title=None):\n if title is None:\n title = self.header.get('title', self.title)\n\n title = self.generate(title)\n title = title.replace('<p>', '').replace('</p>', '')\n # no trailing newlines\n title = re.sub(r'\\n+', ' ', title).rstrip()\n return title", "def _make_title(self, ind):\n start = self.df_event_time.loc[ind, 'time']\n date = np.datetime_as_string(start.astype('<M8[ns]'), unit='s')\n start_ns = start - (start // 10**9) * 10**9\n end = self.df_event_time.loc[ind, 'endtime']\n end_ns = end - start + start_ns\n return ''.join((f'##Event {ind} from run {self.run_id}\\n',\n f'##Recorded at ({date[:10]} {date[10:]}) UTC ',\n f'{start_ns} ns - {end_ns} ns'))", "def get_title(self) -> str:\n pass", "def html_title(self, title=None):\r\n if title is None:\r\n return \"<title>PyBossa</title>\"\r\n else:\r\n return \"<title>PyBossa &middot; %s</title>\" % title", "def get_title():", "def getTaskTitle(self) -> unicode:\n ...", "def getTaskTitle(self) -> unicode:\n ...", "def numbered_title(self):\n return f\"{self.title}\"", "def numbered_title(self):\n return f\"{self.title}\"", "def __str__(self):\n return \"{title}\".format(title=self.title)", "def getTitle(self): #$NON-NLS-1$\r", "def getTitle(self): #$NON-NLS-1$\r", "def job_subtitle(self, job):\n return str(job)[:max(8, self._project_min_len_unique_id())]", "def name_with_title(self):\n return \"%s %s\" % (self.title, self.name)", "def _prettyfilename(self):\n return self.title", "def title_string(self):\n return ' '.join(self.title).replace(' - ', '')", "def _get_full_title(self):\n return \"%s - %s %d\" % (self.title, _('Season'), self.season)", "def makeTitle(self):\n l1=Label(self.app, text=\"Asset Allocation Combinations\")\n l1.grid(row=0, column=0)", "def _defaultSyncTitle(self):\n return f'{self.grandparentTitle} - {self.parentTitle} - ({self.seasonEpisode}) {self.title}'", "def title(self):\n return self.definition.title", "def _defaultSyncTitle(self):\n return f'{self.parentTitle} - {self.title}'", "def Title(self, **kwargs):\n full_name = ''\n if self.getFirstname() == '' or self.getLastname() == '':\n if not self.getOrganization():\n return '...'\n else:\n return self.getOrganization()\n format = kwargs.get('format', None)\n if format == 'natural':\n full_name = '%s %s' % (self.getFirstname(), self.getLastname())\n else:\n full_name = '%s %s' % (self.getLastname(), self.getFirstname())\n return '%s' % full_name", "def short_title(self):\n if hasattr(self, \"title\"):\n return self.title\n else:\n return \"\"", "def title(self):\n return self.__title", "def title(self):\n return self.__title", "def title(self):\n return self.__title", "def create_title(title, year=None, time_step=None, base=0, interval=None,\n gage=None, m=None, h=None):\n if type(gage) is list or type(gage) is tuple:\n title = title + ' at listed gages'\n elif gage is not None:\n title = title + ' at '+ gage\n \n if m is not None:\n title = title + ' for Month {mo} of'.format(mo=m)\n elif h is not None:\n title = title + ' for Hour {ho} of'.format(ho=h) \n elif interval is 'seasonal':\n title = title + ' for Months of'\n elif interval is 'diurnal':\n title = title + ' for Hours of'\n if time_step is not None:\n ts = time_step.replace('min', ' minute').replace('T', ' minute').replace('H', ' hour').replace('D', ' day')\n title = title.format(ts=ts)\n if year is not None:\n title = title +' '+ year\n return title", "def longTitle(self, newLongTitle=None):\n pass", "def title(self):\n return ' '.join(self._title)", "def style_title(self) -> str:\n style_title = \"\"\".title\n {margin-bottom: 10px}\\n\"\"\"\n self.html_doc = self.html_doc + style_title\n return self.html_doc", "def Show_Titles( self ):\r\n self.system.Change_Seq( \"Title\" )", "def Title(self, default={}):\n return HEP.TitleObject(self.data.get('title', default))", "def __draw_title(self):\n if self.title is not None:\n self.fig.suptitle(\n self.title, y=self.settings.otherParams[\"figure.title.yposition\"])", "def title(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"title\")", "def get_title(self):\n return \"{id}@{hn}\".format(id=self.model.identity, hn=self.model.hostname)", "def title(self) -> str:\r\n return self._title", "def shortTitle(self, newShortTitle=None):\n pass", "def configured_title(self):\n return self.get('title', self.DEFAULT_SPACE_TITLE)", "def set_title(self, title):\n\t\tpass", "def _build_title(db, place):\n descr = place.get_title()\n location = get_main_location(db, place)\n parish = location.get(PlaceType.PARISH)\n city = location.get(PlaceType.CITY)\n state = location.get(PlaceType.STATE)\n title_descr = \"\"\n if descr:\n title_descr += descr.strip()\n if parish:\n title_descr += ', ' + parish.strip() + _(\" parish\")\n if city:\n title_descr += ', ' + city.strip()\n if state:\n title_descr += ', ' + state.strip() + _(\" state\")\n return _strip_leading_comma(title_descr)", "def _update_title(self, title, tag, lid):\n return title", "def title(self, title):\n\t\tself.head += '<title>' + title + '</title>\\n'", "def Title(self):\n return self.title", "def title(self) -> String:\n pass", "def _get_title_tag(self, item):\n tag = '<{heading}><a href=\"{href}\">{title}</a></{heading}>'\n if self._field_is_visible(\"title\"):\n tile_conf = self.get_tile_configuration()\n title_conf = tile_conf.get(\"title\", None)\n if title_conf:\n heading = title_conf.get(\"htmltag\", \"h2\")\n href = item.absolute_url()\n title = item.Title()\n return tag.format(heading=heading, href=href, title=title)", "def inclusive_title(self):\n return self.title + (\" %s\" % (self.episode_to_string(self.latest_season, self.latest_episode),) if self.is_series() else \"\")", "def _prettyfilename(self):\n return f'{self.title} ({self.year})'", "def title(self) -> str:\n\t\t# pylint: disable=unsubscriptable-object\n\t\treturn self.value[1]", "def dc_title(self):\n return u\"{0} ({1}): {2} {3}\".format(\n self.label, self.in_assessment[0].timepoint,\n self.subjects[0].code_in_study,\n \"...\" if len(self.subjects) > 1 else \"\")", "def set_title(self):\n if self.currentconfig is None:\n self.setWindowTitle(\"(No config)\")\n elif self.cfname is None:\n self.setWindowTitle(\"Working config\")\n else:\n filename = miscutils.removesuffix(os.path.basename(self.cfname)).upper()\n self.setWindowTitle(\"Processing - \" + filename)", "def get_title(self):\n return self.title", "def get_title(self):\n return self.title", "def get_title(self):\n return self.title", "def title_p(self):\n self.run_command('title_p')", "def getTitle(self):\n return self.__title__", "def numbered_title(self):\n return f\"Chapter {self.title}\"", "def title(self) -> str:\n return pulumi.get(self, \"title\")", "def title(self) -> str:\n return pulumi.get(self, \"title\")", "def title(self) -> str:\n return pulumi.get(self, \"title\")", "def title(self, title):\n\n self.container['title'] = title", "def title(self) -> str:\n return self._title", "def title(self) -> str:\n return self._title", "def title(self) -> str:\n return self._title", "def title(self) -> str:\n return self._title", "def title(self) -> str:\n return self._title", "def title(self) -> str:\n return self._title", "def title(self) -> str:\n return self._title", "def title(self):\n\n return self._title", "def header(self):\n return \"Step {}: {}\".format(\".\".join(str(e) for e in self._id), self.title)", "def _prettyfilename(self):\n return f'{self.grandparentTitle} - {self.seasonEpisode} - {self.title}'", "def get_title(self):\n\n return self.title", "def title(self, value):\n self.definition.title = value", "def printTitle(self, data):\r\n\t#try:\r\n #\twx.CallLater(1800, lambda x: x.SetTitle(self.title), self)\r\n\t#except:\r\n\t#\treturn\r\n #self.SetTitle(data)\r\n pass", "def get_context_data(self, **kwargs):\n context = super(BaseJobMixin, self).get_context_data(**kwargs)\n context['form_title'] = self.form_title\n return context", "def prep_titles(self, cost_title: str=\"\") -> (str, str):\n img_title = self.function_name + \\\n '_batch' + str(self.batch_size)\n\n if cost_title == \"\":\n img_title = str(self.experiment_count) + '_accuracy_plot_' + img_title\n title = self.title + \\\n '\\n' + self.function_name + \", \" + \\\n 'mini-batch size: ' + str(self.batch_size) + \\\n '\\nAvg Last 10 Epochs: Training ' + self.tr_mean_str + '%, Testing ' + self.test_mean_str + '%'\n else:\n img_title = str(self.experiment_count) + '_cost_plot_' + img_title\n title = cost_title\n\n print(f'\\nexperiment: {img_title}')\n return title, img_title", "def Title(self, separator=u' / ', first_index=0):\n if self.position is None: # when created by transmogrifier by example\n return self.getId()\n position = self.position.to_object\n if position is None: # the reference was removed\n return self.getId()\n\n position = self.get_position()\n organization = self.get_organization()\n label = self.get_label()\n if position is None and not label:\n return \"(%s)\" % organization.get_full_title(separator=separator, first_index=first_index).encode('utf8')\n # we display the position title or the label\n position_title = label or position.title\n return \"%s (%s)\" % (position_title.encode('utf8'),\n organization.get_full_title(separator=separator, first_index=first_index).encode('utf8'))", "def title(self):\n return self['title']", "def set_title(self):\n plt.title(label=self.title, fontsize=self.titlesize)", "def setTitle(self, meta):\n\n title = ''\n try:\n title += meta['date'] + ' '\n except KeyError:\n pass\n try:\n title += meta['time'] + ' '\n except KeyError:\n pass\n try:\n title += meta['trial']\n except KeyError:\n pass\n\n meta['title'] = title.strip()", "def TitlePrint(title):\n titleLength = len(title)\n barLength = titleLength + 12\n fmtdTitle = '----- {0} -----'.format(title)\n bar = '-' * barLength\n print(bar, fmtdTitle, bar,\n sep='\\n', end='\\n\\n')", "def get_title(self):\n title = self.title\n if not title and self.parent_id:\n title = self.parent.title\n return title", "def __report_title_style(self):\n font = FontStyle()\n font.set(face=FONT_SANS_SERIF, size=16, bold=1)\n para = ParagraphStyle()\n para.set_font(font)\n para.set_header_level(1)\n para.set_top_margin(0.25)\n para.set_bottom_margin(0.25)\n para.set_alignment(PARA_ALIGN_CENTER) \n para.set_description(_('The style used for the title of the report.'))\n self.default_style.add_paragraph_style(\"PLC-ReportTitle\", para)", "def get_title(self):\n if not hasattr(self, '_title'):\n self._title = 'NO TITLE'\n if self._title:\n title = _(self._title)\n title = title.replace('&', '&amp;') \n title = title.replace('\"', '&quot;')\n return title\n else:\n return u''", "def title(self, val):\n self.set_property(\"Title\", val)", "def title(self):\n return self.run_command('title')[0]", "def format_title(self, data):\n return data", "def __str__(self) -> str:\n return f\"{self.analysis.title} v{self.title}\"", "def get_title(self):\n return self._get_title_()", "def title(self):\n if self._title is None:\n if Path(self.rst_path).exists():\n self._title = self.get_title_from_rst()\n elif Path(self.ipynb_path).exists():\n self._title = self.get_title_from_ipynb()\n else:\n pass\n return self._title", "def cli_set_process_title():\n raise NotImplementedError()", "def numbered_title(self):\n return f\"Appendix {self.chapter}. {self.title}\"", "def pretty_title(title):\n output = '-' * 5 + ' ' + title + ' ' + '-' * 5\n return output", "def test_job_title(self):\n inv_search = 'title:engineer not title:programmer'\n spi_search = 'find job engineer not position programmer'\n self._compare_searches(inv_search, spi_search)", "def title(self):\n return self._title", "def title(self):\n return self._title" ]
[ "0.6774112", "0.6684482", "0.6545176", "0.65105885", "0.64988434", "0.6343814", "0.6343415", "0.6338762", "0.62752765", "0.6266623", "0.6256214", "0.6256214", "0.6229947", "0.6229947", "0.6226944", "0.61802864", "0.61802864", "0.6152143", "0.6079925", "0.60740346", "0.6057321", "0.6056076", "0.6050757", "0.60467494", "0.6034354", "0.60245293", "0.60205936", "0.60204035", "0.6003444", "0.6003444", "0.6003444", "0.59908956", "0.59769815", "0.59472424", "0.5941167", "0.59205216", "0.59100044", "0.5906888", "0.59003454", "0.58988863", "0.5888942", "0.58843297", "0.58838445", "0.5867494", "0.5864448", "0.585998", "0.58551615", "0.5853948", "0.5849389", "0.58442885", "0.58425015", "0.5839908", "0.5838514", "0.5837746", "0.58375174", "0.58354264", "0.58354264", "0.58354264", "0.58347577", "0.5834557", "0.58313024", "0.58198893", "0.58198893", "0.58198893", "0.58114284", "0.5809451", "0.5809451", "0.5809451", "0.5809451", "0.5809451", "0.5809451", "0.5809451", "0.58006537", "0.57896954", "0.57891047", "0.5787343", "0.57865816", "0.5772216", "0.5769964", "0.5762592", "0.5748732", "0.57454264", "0.57434374", "0.57391834", "0.57387435", "0.57376283", "0.5724189", "0.571456", "0.5708411", "0.5706279", "0.57031935", "0.5700579", "0.56944", "0.5688736", "0.5681867", "0.5681048", "0.56789833", "0.5670112", "0.5667611", "0.5667611" ]
0.756392
0
Override this method for custom job subtitles. This method generates job subtitles. By default, the subtitle is a minimal unique substring of the job id.
def job_subtitle(self, job): return str(job)[:max(8, self._project_min_len_unique_id())]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subtitle(self, txt):\n num = len(txt)\n ticks = \"-\" * num\n print(txt)\n print(ticks)", "def getSubtitleURL(self):\n\n # If it is a movie, we use this methodology -\n try:\n IndexingParameters = [\"subtitleUrls\", 0, \"url\"]\n TitleParamters = [\n \"catalogMetadata\", \"catalog\", \"title\", \"episodeNumber\"]\n subRequestObject = requests.get(self.subtitleURLContainer)\n\n parsedJsonObject = json.loads(str(subRequestObject.text))\n SubsURL = parsedJsonObject[IndexingParameters[0]][\n IndexingParameters[1]][IndexingParameters[2]]\n if self.title == \"Amazonsubtitles\":\n try:\n self.title = parsedJsonObject[TitleParamters[0]][TitleParamters[1]][TitleParamters[2]] + \"_\" + str(\n parsedJsonObject[TitleParamters[0]][TitleParamters[1]][TitleParamters[3]])\n except:\n pass\n\n return SubsURL\n\n except:\n pass\n pass", "def get_subtitle_print(subs: List[Track]) -> List[str]:\n data = []\n if not subs:\n data.append(\"--\")\n for sub in subs:\n line_items = []\n\n # following sub.title tree checks and supports three different language and title scenarios\n # The second scenario is the recommended option to choose if you are open to choosing any\n # The third scenario should be used if you have nothing unique to state about the track\n # | Language | Track Title | Output |\n # | ------------ | ----------------------------- | --------------------------------------------- |\n # | es / Spanish | Spanish (Latin American, SDH) | - Spanish (Latin American, SDH), SubRip (SRT) |\n # | es / Spanish | Latin American (SDH) | - Spanish, Latin American (SDH), SubRip (SRT) |\n # | es / Spanish | None | - Spanish, SubRip (SRT) |\n language = pycountry.languages.get(alpha_2=sub.language).name\n if sub.title:\n if language.lower() in sub.title.lower():\n line_items.append(sub.title)\n else:\n line_items.append(f\"{language}, {sub.title}\")\n else:\n line_items.append(language)\n\n line_items.append(sub.format.replace(\"UTF-8\", \"SubRip (SRT)\"))\n\n line = \"- \" + \", \".join(line_items)\n data += [\n (\" \" + x if i > 0 else x)\n for i, x in enumerate(textwrap.wrap(line, 64))\n ]\n return data", "def create_subtitle(self):\n label_subtitle = Label(self.frame, text=\"Projet Python 2020\", font=(\"Arial\", 25), bg='light blue',\n fg='white')\n label_subtitle.pack()", "def get_subtitle(annotation, sub_duration, video_clip, seen_annotations):\n if len(annotation[\"text\"]) == 0:\n return None\n\n annotation_txt = calculate_needed_subtitle_height(annotation, seen_annotations, video_clip)\n\n txt_clip = TextClip(annotation_txt, color=\"white\", fontsize=70, font='Sans Serif')\n txt_clip = txt_clip.set_position((\"center\", get_subtitle_offset(annotation, seen_annotations, video_clip)))\n txt_clip = txt_clip.set_start(float(annotation[\"time\"]) / 1000.0)\n txt_clip = txt_clip.set_duration(sub_duration)\n\n return txt_clip", "def get_job_title(self, job_name):\n return ''", "def job_title(self, job):\n def _format_num(num):\n if isinstance(num, bool):\n return str(num)\n elif isinstance(num, Real):\n return str(round(num, 2))\n return str(num)\n\n try:\n s = []\n for keys in sorted(self._schema_variables()):\n v = job.statepoint()[keys[0]]\n try:\n for key in keys[1:]:\n v = v[key]\n except KeyError: # Particular key is present in overall\n continue # schema, but not this state point.\n else:\n s.append('{}={}'.format('.'.join(keys), _format_num(v)))\n return ' '.join(s)\n except Exception as error:\n logger.debug(\n \"Error while generating job title: '{}'. \"\n \"Returning job-id as fallback.\".format(error))\n return str(job)", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument('source_path', help=\"Path to the video or audio file to subtitle\",\n nargs='?')\n parser.add_argument('-C', '--concurrency', help=\"Number of concurrent API requests to make\",\n type=int, default=DEFAULT_CONCURRENCY)\n parser.add_argument('-o', '--output',\n help=\"Output path for subtitles (by default, subtitles are saved in \\\n the same directory and name as the source path)\")\n parser.add_argument('-F', '--format', help=\"Destination subtitle format\",\n default=DEFAULT_SUBTITLE_FORMAT)\n parser.add_argument('-S', '--src-language', help=\"Language spoken in source file\",\n default=DEFAULT_SRC_LANGUAGE)\n parser.add_argument('-D', '--dst-language', help=\"Desired language for the subtitles\",\n default=DEFAULT_DST_LANGUAGE)\n parser.add_argument('-K', '--api-key',\n help=\"The Google Translate API key to be used. \\\n (Required for subtitle translation)\")\n parser.add_argument('--list-formats', help=\"List all available subtitle formats\",\n action='store_true')\n parser.add_argument('--list-languages', help=\"List all available source/destination languages\",\n action='store_true')\n\n parser.add_argument('--min_height', help=\"minimum height from 0 - 100%\", type=float, default=93)\n\n parser.add_argument('--max_height', help=\"maximum height from 0 - 100%\", type=float, default=99)\n\n parser.add_argument('--l_v', help=\"Light sensitive\", type=float, default=210)\n\n parser.add_argument('--debug', help=\"Allows to show cropped image on the desktop\", action='store_true', default=True)\n\n parser.add_argument('--cloud', help=\"Use google cloud compute to extract text\", action='store_true', default=False)\n\n parser.add_argument('--disable_time', help=\"Parse time function\", action='store_true')\n\n parser.add_argument('--all', help=\"Render all files\", action='store_true')\n\n args = parser.parse_args()\n\n if args.list_formats:\n print(\"List of formats:\")\n for subtitle_format in FORMATTERS:\n print(\"{format}\".format(format=subtitle_format))\n return 0\n\n if args.list_languages:\n print(\"List of all languages:\")\n for code, language in sorted(LANGUAGE_CODES.items()):\n print(\"{code}\\t{language}\".format(code=code, language=language))\n return 0\n\n if not validate(args):\n return 1\n\n try:\n if args.all:\n for file in os.listdir():\n # *.avi *.flv *.mkv *.mpg *.mp4 *.webm\n if file.endswith('.avi') or file.endswith('.flv') or file.endswith('.mkv') or file.endswith('.mpg') or file.endswith('.mp4') or file.endswith(\".webm\"):\n st = time.time()\n subtitle_file_path = generate_subtitles(\n source_path=file,\n dst_language=args.dst_language,\n output=args.output,\n debug=args.debug,\n cloud=args.cloud,\n disable_time=args.disable_time,\n min_height=args.min_height,\n max_height=args.max_height,\n l_v=args.l_v,\n )\n print(\"Subtitles file created at {} time consumer: {}\".format(subtitle_file_path, time.time() - st))\n else:\n st = time.time()\n subtitle_file_path = generate_subtitles(\n source_path=args.source_path,\n dst_language=args.dst_language,\n output=args.output,\n debug=args.debug,\n cloud=args.cloud,\n disable_time=args.disable_time,\n min_height=args.min_height,\n max_height=args.max_height,\n l_v=args.l_v,\n )\n print(\"Subtitles file created at {} time consumer: {}\".format(subtitle_file_path, time.time() - st))\n except KeyboardInterrupt:\n return 1\n\n return 0", "def _get_job_id(self) -> str:\n return self.split_name[2][3:]", "def write_subtitle(self, subtitle: str, break_page: bool, class_txt: str) -> str:\n if break_page:\n str_title = \"\"\"<h2 class=\"break-before\">\"\"\" + subtitle + \"\"\"</h2>\\n\"\"\"\n else:\n str_title = \"\"\"<h2 class=\\\"\"\"\" + class_txt + \"\"\"\\\">\"\"\" + subtitle + \"\"\"</h2>\\n\"\"\"\n self.html_doc = self.html_doc + str_title\n return self.html_doc", "def describe_text_translation_job(JobId=None):\n pass", "def subtitle(self):\n worksheet_type = self.options[\"worksheet_type\"].value\n return \"{} - {}\".format(WORKSHEET_OPTIONS[worksheet_type], super().subtitle)", "def friendly_id(self):\n id = f\"{self.annotator_id}_{self.document_title.split('_')[0]}\"\n\n try: # try making an sentence identifier if there is an in_sentence attrib\n sen_id = \",\".join(str(se.element_id + 1) for se in self.in_sentence)\n id += f\"_s{sen_id}\"\n except Exception as e:\n print(e)\n pass\n\n if isinstance(self, Event):\n id += f\"_{self.event_fulltype}\"\n elif isinstance(self, Participant) or isinstance(self, Filler):\n id += f\"_{self.role}\"\n\n text_ellips = (\n (self.text[:15] + \"..\" + self.text[-15:])\n if len(self.text) > 32\n else self.text\n )\n id += f\"-{text_ellips}\"\n return id", "def create_job_id() -> str:\n return str(uuid.uuid1())", "def _get_job_id(self):\n return uuid.uuid4().hex", "def longTitle(self, newLongTitle=None):\n pass", "def _job_id(resource_uuid: str) -> str:\n return resource_uuid if \".\" in resource_uuid else f\"{resource_uuid}.0\"", "def _generate_job_id():\n # CAIP job id can contains only numbers, letters and underscores.\n unique_tag = str(uuid.uuid4()).replace(\"-\", \"_\")\n return \"tf_cloud_train_{}\".format(unique_tag)", "def disable_subtitle(self):\n (\n _,\n __,\n part,\n ) = self._get_current_media()\n part.resetDefaultSubtitleStream()\n self._reset_playback()", "def getSubtitlesContainer(self):\n self.subtitleURLContainer = \"\"\n\n self.subtitleURLContainer += self.parametersDict['PreURL']\n\n for parameters in self.parametersDict:\n if parameters != \"PreURL\":\n self.subtitleURLContainer += \"&\"\n self.subtitleURLContainer += parameters\n self.subtitleURLContainer += \"=\"\n self.subtitleURLContainer += self.parametersDict[parameters]\n pass", "def get_subtitles(self, index: int):\n\n match = self.re_subs[index - 1]\n start = convert_subs_time(match[1])\n end = convert_subs_time(match[2])\n subtitles = match[3]\n subtitles = clean_text(subtitles)\n\n return (subtitles, start, end)", "def create_subtitles(self):\n\n result, selected_observations = self.selectObservations(MULTIPLE)\n if not selected_observations:\n return\n\n # check if state events are paired\n out = \"\"\n not_paired_obs_list = []\n for obsId in selected_observations:\n r, msg = project_functions.check_state_events_obs(obsId, self.pj[ETHOGRAM],\n self.pj[OBSERVATIONS][obsId], self.timeFormat)\n\n if not r:\n out += \"Observation: <strong>{obsId}</strong><br>{msg}<br>\".format(obsId=obsId, msg=msg)\n not_paired_obs_list.append(obsId)\n\n if out:\n out = \"The observations with UNPAIRED state events will be removed from the plot<br><br>\" + out\n self.results = dialog.Results_dialog()\n self.results.setWindowTitle(programName + \" - Check selected observations\")\n self.results.ptText.setReadOnly(True)\n self.results.ptText.appendHtml(out)\n self.results.pbSave.setVisible(False)\n self.results.pbCancel.setVisible(True)\n\n if not self.results.exec_():\n return\n\n selected_observations = [x for x in selected_observations if x not in not_paired_obs_list]\n if not selected_observations:\n return\n\n parameters = self.choose_obs_subj_behav_category(selected_observations, 0)\n if not parameters[\"selected subjects\"] or not parameters[\"selected behaviors\"]:\n return\n export_dir = QFileDialog(self).getExistingDirectory(self, \"Choose a directory to save subtitles\",\n os.path.expanduser(\"~\"),\n options=QFileDialog(self).ShowDirsOnly)\n if not export_dir:\n return\n ok, msg = project_functions.create_subtitles(self.pj, selected_observations, parameters, export_dir)\n if not ok:\n logging.critical(msg)\n QMessageBox.critical(None, programName, msg, QMessageBox.Ok | QMessageBox.Default, QMessageBox.NoButton)", "def to_srt(self, subtitles):\n \n srt_data = ''\n subtitle_num = self.start_index\n for subtitle in subtitles:\n subtitle_num += 1\n \n offset = self.start_time\n \n start_time = self._ms_to_time(subtitle['start_time'] + offset)\n end_time = self._ms_to_time(subtitle['end_time'] + offset)\n \n content = subtitle['content'].replace('<br>', ' ')\n \n srt_data += str(subtitle_num) + '\\r\\n'\n srt_data += '%s --> %s' % (start_time, end_time) + '\\r\\n'\n srt_data += content + '\\r\\n'\n srt_data += '\\r\\n'\n \n self.end_index = subtitle_num\n \n return srt_data", "def replace_job_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"replace_job_id\")", "def get_subtitles(self, title):\n return library.subtitles.get_subtitle_url(title)", "def schedule_text():", "def build_transcript(speaker_label_transcript):\n with open('main_transcript.txt', 'a') as the_file:\n for t in speaker_label_transcript:\n the_file.write(f\"{t['speaker']}:\\n\")\n the_file.write(f\"{t['content']}\\n\\n\")", "def __init__(self, subtitle=None):\n self.subtitle = subtitle\n if not self.subtitle:\n self.parse_subtitles()", "def convert_to_tvr_subtitle(df: pd.DataFrame) -> pd.DataFrame:\n\tpass", "def manage_video_subtitles_save(item, user, old_metadata=None, generate_translation=False):\r\n\r\n _ = item.runtime.service(item, \"i18n\").ugettext\r\n\r\n # 1.\r\n html5_ids = get_html5_ids(item.html5_sources)\r\n possible_video_id_list = [item.youtube_id_1_0] + html5_ids\r\n sub_name = item.sub\r\n for video_id in possible_video_id_list:\r\n if not video_id:\r\n continue\r\n if not sub_name:\r\n remove_subs_from_store(video_id, item)\r\n continue\r\n # copy_or_rename_transcript changes item.sub of module\r\n try:\r\n # updates item.sub with `video_id`, if it is successful.\r\n copy_or_rename_transcript(video_id, sub_name, item, user=user)\r\n except NotFoundError:\r\n # subtitles file `sub_name` is not presented in the system. Nothing to copy or rename.\r\n log.debug(\r\n \"Copying %s file content to %s name is failed, \"\r\n \"original file does not exist.\",\r\n sub_name, video_id\r\n )\r\n\r\n # 2.\r\n if generate_translation:\r\n for lang, filename in item.transcripts.items():\r\n item.transcripts[lang] = os.path.split(filename)[-1]\r\n\r\n # 3.\r\n if generate_translation:\r\n old_langs = set(old_metadata.get('transcripts', {})) if old_metadata else set()\r\n new_langs = set(item.transcripts)\r\n\r\n for lang in old_langs.difference(new_langs): # 3a\r\n for video_id in possible_video_id_list:\r\n if video_id:\r\n remove_subs_from_store(video_id, item, lang)\r\n\r\n reraised_message = ''\r\n for lang in new_langs: # 3b\r\n try:\r\n generate_sjson_for_all_speeds(\r\n item,\r\n item.transcripts[lang],\r\n {speed: subs_id for subs_id, speed in youtube_speed_dict(item).iteritems()},\r\n lang,\r\n )\r\n except TranscriptException as ex:\r\n item.transcripts.pop(lang) # remove key from transcripts because proper srt file does not exist in assets.\r\n reraised_message += ' ' + ex.message\r\n if reraised_message:\r\n item.save_with_metadata(user)\r\n raise TranscriptException(reraised_message)", "def get_job_name(self) -> Text:\n return self._job_name", "def enable_subtitle(self, subtitle):\n self._change_track(subtitle)", "def getTaskTitle(self) -> unicode:\n ...", "def getTaskTitle(self) -> unicode:\n ...", "def subtitle(string):\n print(\"{}\\n{}\\n\".format(bold(string), underline(string, \"-\")))", "def _var_id_sub(self, sprintf):\n id_list = map(lambda x: self.cdict[x][1], sprintf[\"vars\"] )\n return sprintf[\"text\"] % tuple(id_list)", "def submit_job(jobscript, qsub_settings):\n flatten = lambda l: [item for sublist in l for item in sublist]\n batch_options = flatten([sge_option_string(k,v).split() for k, v in qsub_settings[\"options\"].items()])\n batch_resources = flatten([sge_resource_string(k, v).split() for k, v in qsub_settings[\"resources\"].items()])\n try:\n # -terse means only the jobid is returned rather than the normal 'Your job...' string\n jobid = subprocess.check_output([\"qsub\", \"-terse\"] + batch_options + batch_resources + [jobscript]).decode().rstrip()\n except subprocess.CalledProcessError as e:\n raise e\n except Exception as e:\n raise e\n return jobid", "def add_time_to_title( self, title ):\n begin = self.begin; end = self.end\n if 'span' in self.metadata:\n interval = self.metadata['span']\n elif 'given_kw' in self.metadata and 'span' in self.metadata['given_kw']:\n interval = self.metadata['given_kw']['span']\n else:\n interval = self.time_interval( )\n formatting_interval = self.time_interval()\n if formatting_interval == 600:\n format_str = '%H:%M:%S'\n elif formatting_interval == 3600:\n format_str = '%Y-%m-%d %H:%M'\n elif formatting_interval == 86400:\n format_str = '%Y-%m-%d'\n elif formatting_interval == 86400*7:\n format_str = 'Week %U of %Y'\n\n if interval < 600:\n format_name = 'Seconds'\n time_slice = 1\n elif interval < 3600 and interval >= 600:\n format_name = 'Minutes'\n time_slice = 60\n elif interval >= 3600 and interval < 86400:\n format_name = 'Hours'\n time_slice = 3600\n elif interval >= 86400 and interval < 86400*7:\n format_name = 'Days'\n time_slice = 86400\n elif interval >= 86400*7:\n format_name = 'Weeks'\n time_slice = 86400*7\n else:\n format_str = '%x %X'\n format_name = 'Seconds'\n time_slice = 1\n\n begin_tuple = time.gmtime(begin); end_tuple = time.gmtime(end)\n added_title = '\\n%i %s from ' % (int((end-begin)/time_slice), format_name)\n added_title += time.strftime('%s to' % format_str, begin_tuple)\n if time_slice < 86400:\n add_utc = ' UTC'\n else:\n add_utc = ''\n added_title += time.strftime(' %s%s' % (format_str, add_utc), end_tuple)\n return title + added_title", "def jobName(self):\n return f\"{self.config.pipeline.name}-pypette\"[:12]", "def generateUniqueId(context):\n\n fn_normalize = getUtility(IFileNameNormalizer).normalize\n id_normalize = getUtility(IIDNormalizer).normalize\n prefixes = context.bika_setup.getPrefixes()\n\n year = context.bika_setup.getYearInPrefix() and \\\n DateTime().strftime(\"%Y\")[2:] or ''\n separator = '-'\n for e in prefixes:\n if 'separator' not in e:\n e['separator'] = ''\n if e['portal_type'] == context.portal_type:\n separator = e['separator']\n # Analysis Request IDs\n if context.portal_type == \"AnalysisRequest\":\n sample = context.getSample()\n s_prefix = fn_normalize(sample.getSampleType().getPrefix())\n sample_padding = context.bika_setup.getSampleIDPadding()\n ar_padding = context.bika_setup.getARIDPadding()\n sample_id = sample.getId()\n sample_number = sample_id.split(s_prefix)[1]\n ar_number = sample.getLastARNumber()\n ar_number = ar_number and ar_number + 1 or 1\n\n return fn_normalize(\n (\"%s%s\" + separator + \"R%s\") % (s_prefix,\n str(sample_number).zfill(sample_padding),\n str(ar_number).zfill(ar_padding))\n )\n\n # Sample Partition IDs\n if context.portal_type == \"SamplePartition\":\n # We do not use prefixes. There are actually codes that require the 'P'.\n # matches = [p for p in prefixes if p['portal_type'] == 'SamplePartition']\n # prefix = matches and matches[0]['prefix'] or 'samplepartition'\n # padding = int(matches and matches[0]['padding'] or '0')\n\n # at this time the part exists, so +1 would be 1 too many\n partnr = str(len(context.aq_parent.objectValues('SamplePartition')))\n # parent id is normalized already\n return (\"%s\" + separator + \"P%s\") % (context.aq_parent.id, partnr)\n\n if context.bika_setup.getExternalIDServer():\n\n # if using external server\n\n for d in prefixes:\n # Sample ID comes from SampleType\n if context.portal_type == \"Sample\":\n prefix = context.getSampleType().getPrefix()\n padding = context.bika_setup.getSampleIDPadding()\n new_id = str(idserver_generate_id(context, \"%s%s-\" % (prefix, year)))\n if padding:\n new_id = new_id.zfill(int(padding))\n return ('%s%s' + separator + '%s') % (prefix, year, new_id)\n elif d['portal_type'] == context.portal_type:\n prefix = d['prefix']\n padding = d['padding']\n new_id = str(idserver_generate_id(context, \"%s%s-\" % (prefix, year)))\n if padding:\n new_id = new_id.zfill(int(padding))\n return ('%s%s' + separator + '%s') % (prefix, year, new_id)\n # no prefix; use portal_type\n # year is not inserted here\n # portal_type is be normalized to lowercase\n npt = id_normalize(context.portal_type)\n new_id = str(idserver_generate_id(context, npt + \"-\"))\n return ('%s' + separator + '%s') % (npt, new_id)\n\n else:\n\n # No external id-server.\n\n def next_id(prefix):\n # normalize before anything\n prefix = fn_normalize(prefix)\n plone = context.portal_url.getPortalObject()\n # grab the first catalog we are indexed in.\n at = getToolByName(plone, 'archetype_tool')\n if context.portal_type in at.catalog_map:\n catalog_name = at.catalog_map[context.portal_type][0]\n else:\n catalog_name = 'portal_catalog'\n catalog = getToolByName(plone, catalog_name)\n\n # get all IDS that start with prefix\n # this must specifically exclude AR IDs (two -'s)\n rr = re.compile(\"^\"+prefix+separator+\"[\\d+]+$\")\n ids = [int(i.split(prefix+separator)[1]) \\\n for i in catalog.Indexes['id'].uniqueValues() \\\n if rr.match(i)]\n\n #plone_tool = getToolByName(context, 'plone_utils')\n #if not plone_tool.isIDAutoGenerated(l.id):\n ids.sort()\n _id = ids and ids[-1] or 0\n new_id = _id + 1\n\n return str(new_id)\n\n for d in prefixes:\n if context.portal_type == \"Sample\":\n # Special case for Sample IDs\n prefix = fn_normalize(context.getSampleType().getPrefix())\n padding = context.bika_setup.getSampleIDPadding()\n sequence_start = context.bika_setup.getSampleIDSequenceStart()\n new_id = next_id(prefix+year)\n # If sequence_start is greater than new_id. Set\n # sequence_start as new_id. (Jira LIMS-280)\n if sequence_start > int(new_id):\n new_id = str(sequence_start)\n if padding:\n new_id = new_id.zfill(int(padding))\n return ('%s%s' + separator + '%s') % (prefix, year, new_id)\n elif d['portal_type'] == context.portal_type:\n prefix = d['prefix']\n padding = d['padding']\n sequence_start = d.get(\"sequence_start\", None)\n new_id = next_id(prefix+year)\n # Jira-tracker LIMS-280\n if sequence_start and int(sequence_start) > int(new_id):\n new_id = str(sequence_start)\n if padding:\n new_id = new_id.zfill(int(padding))\n return ('%s%s' + separator + '%s') % (prefix, year, new_id)\n\n if context.portal_type == \"StorageUnit\":\n if context.getStorageUnitID():\n return context.getStorageUnitID()\n\n if context.portal_type == \"StorageManagement\":\n prefix = ''\n if context.getType() == \"Freeze\":\n prefix = \"FZ\"\n elif context.getType() == \"Tank\":\n prefix = \"LN\"\n\n if context.aq_parent.portal_type == \"StorageUnit\":\n padding = 3\n year = DateTime().strftime(\"%Y\")[2:]\n new_id = next_id(prefix + year)\n if padding:\n new_id = new_id.zfill(int(padding))\n return ('%s%s' + '-' + '%s') % (prefix, year, new_id)\n else:\n l = context.Title().split(' ')\n if len(l) == 2:\n return l[1]\n elif len(l) == 1:\n return l[0]\n \n return context.Title().replace(' ', '')\n\n if context.portal_type == \"StorageInventory\":\n prefix = 'INV'\n parent = context.aq_parent\n new_id = next_id(prefix)\n\n if parent.portal_type == \"StorageUnit\":\n new_id = new_id.zfill(int(3))\n return ('%s' + '-' + '%s') % (prefix, new_id)\n\n elif parent.portal_type == \"StorageInventory\":\n room = context.aq_parent.aq_parent\n return room.id + '.' + parent.id + '.' + context.Title()\n\n else:\n raise AssertionError(\"Unknown Portal type\")\n\n if context.portal_type == \"Kit\":\n prefix = context.getPrefix() and context.getPrefix() or \"KIT\"\n padding = 3\n new_id = next_id(prefix)\n if padding:\n new_id = new_id.zfill(int(padding))\n\n return ('%s' + '-' + '%s') % (prefix, new_id)\n\n if context.portal_type == \"StorageLocation\":\n return context.Title()\n\n if context.portal_type == \"Aliquot\":\n # subject = context.getSubjectID()\n # prefix = subject + '-SP' if subject else 'SP'\n prefix = 'AL'\n padding = 3\n new_id = next_id(prefix)\n if padding:\n new_id = new_id.zfill(int(padding))\n\n return ('%s' + '-' + '%s') % (prefix, new_id)\n\n if context.portal_type == \"Biospecimen\":\n prefix = \"BS\"\n padding = 3\n new_id = next_id(prefix)\n if padding:\n new_id = new_id.zfill(int(padding))\n\n return ('%s' + '-' + '%s') % (prefix, new_id)\n\n # no prefix; use portal_type\n # no year inserted here\n # use \"IID\" normalizer, because we want portal_type to be lowercased.\n prefix = id_normalize(context.portal_type)\n new_id = next_id(prefix)\n return ('%s' + separator + '%s') % (prefix, new_id)", "def getSubtitles(self):\n\n self.createSoupObject()\n self.getcustomerID()\n self.getToken()\n self.getTitle()\n\n if self.debug:\n print(self.title)\n\n self.getVideoType()\n if self.debug:\n print(self.videoType)\n\n if self.videoType == \"movie\":\n\n self.getAsinID1() # Method-1\n if self.debug:\n print(self.parametersDict['asin'])\n\n returnValue = self.standardFunctionCalls()\n if returnValue != 1:\n self.videoType = \"tv\"\n\n if self.videoType != \"movie\":\n\n self.getAsinID2()\n if self.debug:\n print(self.asinList)\n\n self.parametersDict['asin'] = self.asinList\n currentTitle = self.title\n\n try:\n returnValue = self.standardFunctionCalls()\n except:\n pass\n self.title = currentTitle\n\n return returnValue", "def get_subtitle(self):\n return 'Remove items'", "def play_movie_with_subs(self, filmid):\n self.logger.debug('play_movie_with_subs')\n start = time.time()\n #\n film = self.database.retrieve_film_info(filmid)\n if film is None:\n self.logger.error(\"no film for download \" + self.plugin.language(30991))\n self.notifier.show_error(30990, self.plugin.language(30991))\n return\n ttmname = os.path.join(self.settings.getDatapath(), 'subtitle.ttml')\n srtname = os.path.join(self.settings.getDatapath(), 'subtitle.srt')\n subs = []\n if self.download_subtitle(film, ttmname, srtname, 'subtitle'):\n subs.append(srtname)\n # (_, listitem) = FilmUI(self.plugin).get_list_item(None, film)\n (_, listitem) = FilmlistUi(self.plugin)._generateListItem(film)\n self.logger.debug('SUBTITLE FOUND {} from url {}' , subs, film.url_sub)\n if listitem:\n if subs:\n listitem.setSubtitles(subs)\n self.plugin.set_resolved_url(True, listitem)\n self.logger.debug('play_movie_with_subs processed: {} sec', time.time() - start)", "def make_movie(processed_files_directory='files/', WITH_SUBTITLES=False, WITH_AUDIO=False):\r\n # Declare the text for sub-titles\r\n\r\n if WITH_SUBTITLES: # if the user is willing to have subtitles in the movie\r\n with open(processed_files_directory+'subtitles.txt', 'r', encoding='utf8') as f:\r\n txt = f.read() # read the subtitles file\r\n # Split text to lines.\r\n subtitles = txt.split('\\n')\r\n # Declare VideoFileClip from the movie that I already have.\r\n clip = VideoFileClip(processed_files_directory + \"initial.avi\")\r\n # Declare duration of one sub-title as total duration of the video divided by number of lines.\r\n duration = clip.duration/len(subtitles)\r\n # Set start to zero.\r\n start=0\r\n # Set container for the clips.\r\n videos=[]\r\n # Loop all sub-titles\r\n for line in subtitles:\r\n # Make text clip from the reversed Hebrew text\r\n txt_clip = TextClip(line[::-1], fontsize=30, color='yellow', font='Calibri')\r\n # Set position to the bottom of screen.\r\n txt_clip = txt_clip.set_position('bottom').set_duration(duration)\r\n # Make sub clip of the movie with same duration as text clip.\r\n sub_clip = clip.subclip(start,start+duration)\r\n # Set CompositeVideoClip from the text clip and sub clip.\r\n video = CompositeVideoClip([sub_clip, txt_clip])\r\n # Insert the video to the clips container\r\n videos.append(video)\r\n # Set start time for next sub-title.\r\n start+=duration\r\n # Concatenate all clips of the container.\r\n res = concatenate_videoclips(videos)\r\n clip = res # now the clip is res\r\n else:\r\n clip = VideoFileClip(processed_files_directory+ \"initial.avi\") # the clip won't have subtitles\r\n\r\n\r\n # Set audio clip from mp3 file.\r\n if WITH_AUDIO: # if the user has chosen to include soundtrack in the movie\r\n f = 'audio.mp3' # change to mp3 soundtrack file of the movie\r\n # set the duration of the audioclip to max(duration of clip), even if the audioclip is longer\r\n audioclip = AudioFileClip(processed_files_directory+f)\r\n\r\n # check if the clip length is bigger than the\r\n if clip.duration > audioclip.duration:\r\n number_of_duplicated = int(np.ceil(clip.duration/audioclip.duration))\r\n # duplicate the audioclip in order to later fit the movie's duration\r\n audioclip = concatenate_audioclips([AudioFileClip(processed_files_directory+f) for i in range(number_of_duplicated)])\r\n\r\n # Now fit the audioclip duration to the movie's\r\n audioclip = audioclip.set_duration(clip.duration)\r\n\r\n # Set audio for the container.\r\n if not WITH_SUBTITLES: # if the user wanted to have audio included without subtitles\r\n videoclip = clip.set_audio(audioclip)\r\n else: # if the user wanted to have both audio and subtitles\r\n videoclip = res.set_audio(audioclip)\r\n else:\r\n videoclip = clip # if the user didn't want audio in the movie\r\n\r\n # Write the video file.\r\n f = 'final_movie.mp4' # change to the desired movie filename\r\n videoclip.write_videofile(processed_files_directory+f)", "def job_title(self):\n if \"jobTitle\" in self._prop_dict:\n return self._prop_dict[\"jobTitle\"]\n else:\n return None", "def replaced_by_job_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"replaced_by_job_id\")", "def __str__(self):\n iso_time = str(datetime.datetime.fromtimestamp(self.next_time))\n return \"<Job(%s, %ss, %s)>\" % (iso_time, self.interval, self.func)", "def job_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"job_id\")", "def subject(self):\n subject = loader.render_to_string(self.subject_template_name,\n self.get_context())\n return ''.join(subject.splitlines())", "def format_title(self, ticket_id, subject):\n # TODO: strip block tags?\n title = \"#%i %s\" % (ticket_id, subject)\n return title.strip()", "def generate_subsegment_id():\n return uuid.uuid4().hex[:16]", "def generate_text_chunks(subtitle_file, chunk_size, min_chunk_size):\n\n text_chunks = list()\n chunk_start_times = list()\n chunk_end_times = list()\n\n if subtitle_file[-4:] == \".vtt\":\n # Subtitle file is in vtt format.\n\n words, word_end_times = get_words_with_end_times(subtitle_file)\n\n if words is None:\n print(\"Could not generate text chunks for file: \" + subtitle_file)\n return None, None, None\n\n # Generate text chunks of desired size\n text_chunks, chunk_start_times, chunk_end_times = generate_text_chunks_from_word_list(words, word_end_times,\n chunk_size)\n elif subtitle_file[-4:] == \".txt\":\n # Subtitle file is a plain text.\n # Possibly approximate timestamps?\n pass\n\n # Discard last chunk if too small\n if len(text_chunks[-1]) < min_chunk_size:\n text_chunks.pop()\n chunk_start_times.pop()\n chunk_end_times.pop()\n\n return text_chunks, chunk_start_times, chunk_end_times", "def uploadSubtitles(self, filepath):\n url = f'{self.key}/subtitles'\n filename = os.path.basename(filepath)\n subFormat = os.path.splitext(filepath)[1][1:]\n with open(filepath, 'rb') as subfile:\n params = {'title': filename,\n 'format': subFormat\n }\n headers = {'Accept': 'text/plain, */*'}\n self._server.query(url, self._server._session.post, data=subfile, params=params, headers=headers)\n return self", "def getSubtitleTable(date) -> str:\n return \"\"\"| Start of the day | Weeks until NIMCET |\n| ---------------- | -----------------: |\n| {time} | {weeks} weeks |\"\"\".format(time=formattedTimeNow(), weeks=round((datetime(2021, 5, 21) - date).days/7, 1))", "def _get_random_job_prefix(self,\r\n fixed_prefix='',\r\n max_job_prefix_len=10,\r\n leading_trailing_underscores=True):\r\n\r\n length = max_job_prefix_len - len(fixed_prefix)\r\n if leading_trailing_underscores:\r\n length -= 2\r\n\r\n result = [choice(RANDOM_JOB_PREFIX_CHARS) for i in range(length)]\r\n if leading_trailing_underscores:\r\n return fixed_prefix + '_' + ''.join(result) + '_'\r\n else:\r\n return fixed_prefix + ''.join(result)", "def _make_title(self, ind):\n start = self.df_event_time.loc[ind, 'time']\n date = np.datetime_as_string(start.astype('<M8[ns]'), unit='s')\n start_ns = start - (start // 10**9) * 10**9\n end = self.df_event_time.loc[ind, 'endtime']\n end_ns = end - start + start_ns\n return ''.join((f'##Event {ind} from run {self.run_id}\\n',\n f'##Recorded at ({date[:10]} {date[10:]}) UTC ',\n f'{start_ns} ns - {end_ns} ns'))", "def get_sub_name(self):\n return self.sub_name", "def job_name(parameter):\n return (\n \"job_encut_\"\n + str(parameter[0]).replace(\".\", \"_\")\n + \"_kpoints_\"\n + str(parameter[1][0])\n + \"_\"\n + str(parameter[1][1])\n + \"_\"\n + str(parameter[1][2])\n )", "def get_sub_title(self, article: BeautifulSoup):\n return self.get_text(article, self.parsing_template.sub_title)", "def __str__(self):\n print('=' * 20, \"Subject Information\", '=' * 20)\n print(\"Subject Name: {}\".format(self.name))\n print(\"Pulse Data Length for general questions\")\n print(self.pulse_length[0:20])\n print(\"Number of general Questions: {}\".format(\n len(self.pulse_data[0])))\n print(\"Pulse Data Length for video 1\")\n print(\"Number of questions for video 1: {}\".format(\n len(self.pulse_data[1])))\n print(self.pulse_length[20:40])\n print(\"Pulse Data Length for video 2\")\n print(\"Number of questions for video 2: {}\".format(\n len(self.pulse_data[0])))\n print(self.pulse_length[40:60])\n print('Label Data')\n print(self.label_data)\n print('Label Data shape: {}'.format(self.label_data.shape))\n\n return ''", "def __str__(self):\n start = f\"{self.start:%y/%m/%d %H:%M} - \" if self.start else \"\"\n return f\"Course run {self.id!s} starting {start:s}\"", "def extract_subtitle_track(path_to_mkv):\n handler = SubtitleHandler()\n with open(path_to_mkv, \"rb\") as fp:\n mkvparse.mkvparse(fp, handler)\n\n return handler.subs", "def subtitle(\n pdf, text, indent=10, border=BORDER, font_size=12, font_style=\"B\"\n): # pylint: disable = too-many-arguments\n pdf.cell(indent, border=border)\n pdf.set_font(\"arial\", font_style, font_size)\n pdf.cell(75, 10, text, border, 1)", "def subtitle_enabled(self):\n # type: () -> bool\n return self._subtitle_enabled", "def dc_title(self):\n return u\"{0} ({1}): {2} {3}\".format(\n self.label, self.in_assessment[0].timepoint,\n self.subjects[0].code_in_study,\n \"...\" if len(self.subjects) > 1 else \"\")", "def add_subtitles(media_id, req):\n\n media_hndl = media_map.get(media_id)\n if not media_hndl:\n raise FileNotFoundError('{} is not a registered media_id'.format(media_id))\n\n subfile = None\n if req.data:\n subfile = json.loads(req.data.decode('utf-8')).get('filename')\n if not subfile:\n raise AttributeError('No filename specified in the request')\n\n if not subfile:\n if not media_hndl.path:\n raise NotImplementedError(\n 'Subtitles are currently only supported for local media files')\n\n req = {\n 'type': 'request',\n 'action': 'media.subtitles.get_subtitles',\n 'args': {\n 'resource': media_hndl.path,\n }\n }\n\n try:\n subtitles = send_message(req).output or []\n except Exception as e:\n raise RuntimeError('Could not get subtitles: {}'.format(str(e)))\n\n if not subtitles:\n raise FileNotFoundError('No subtitles found for resource {}'.\n format(media_hndl.path))\n\n req = {\n 'type': 'request',\n 'action': 'media.subtitles.download',\n 'args': {\n 'link': subtitles[0].get('SubDownloadLink'),\n 'media_resource': media_hndl.path,\n 'convert_to_vtt': True,\n }\n }\n\n subfile = (send_message(req).output or {}).get('filename')\n\n media_hndl.set_subtitles(subfile)\n return {\n 'filename': subfile,\n 'url': get_remote_base_url() + '/media/subtitles/' + media_id + '.vtt',\n }", "def test_job_title(self):\n inv_search = 'title:engineer not title:programmer'\n spi_search = 'find job engineer not position programmer'\n self._compare_searches(inv_search, spi_search)", "def download_subtitle(search, results, filename, min_match=0,\n force=0, debug=0):\n s = Subseek()\n subtitleurl = False\n for result in s.order_match(search, results):\n match_text = s.clean_text(s.clean_html(result['text']),False,\n s.detect_encoding(s.clean_html(result['text']))\n ) + \" \" + s.clean_text(s.clean_html(result['description']),False,\n s.detect_encoding(s.clean_html(result['description'])))\n subtitleurl = result['link']\n rating_weight = s.text_weight(search, match_text)\n max_weight = s.text_weight(search)\n rating_match = rating_weight/max_weight*100\n if debug == 1:\n print \"Subtitle File URL: \" + subtitleurl\n print \" Text To Search: \" + search\n print \" Text To Match: \" + match_text\n print \" Text Match (%): \" + str(round(rating_match,2)) + \"%\"\n # check minimal match to use\n if rating_match >= min_match:\n downloaded = s.download(subtitleurl, filename + '.tmp')\n if downloaded == False:\n if debug == 1:\n print \"Error: connection or file creation failed\"\n subtitleurl = False\n else:\n if debug == 1:\n print \"File downloaded\"\n # unrar, unzip or get srt file\n typefile = s.get_sub_file_from_file(filename + '.tmp',\n search, force)\n if typefile == False:\n if debug == 1:\n print \"Subtitle file not found\"\n subtitleurl = False\n else:\n if debug == 1:\n print \"Subtitle file found in \" + typefile\n subtitleurl = True\n break\n else:\n if debug == 1:\n print \"Error: match less than \" + str(round(min_match,2))+ \"%\"\n subtitleurl = False\n break\n\n return subtitleurl", "def format_job_id(\n service: str,\n instance: str,\n git_hash: Optional[str] = None,\n config_hash: Optional[str] = None,\n) -> str:\n service = str(service).replace(\"_\", \"--\")\n instance = str(instance).replace(\"_\", \"--\")\n if git_hash:\n git_hash = str(git_hash).replace(\"_\", \"--\")\n if config_hash:\n config_hash = str(config_hash).replace(\"_\", \"--\")\n formatted = compose_job_id(service, instance, git_hash, config_hash)\n return formatted", "def generate_title(model, tokenizer, photo, max_length):\n in_text = \"startseq\"\n vocab = len(tokenizer.word_index) + 1\n prev_word = \"\"\n\n for i in range(max_length):\n sequence = tokenizer.texts_to_sequences([in_text])[0]\n sequence = pad_sequences([sequence], maxlen=max_length)\n yhat = model.predict([photo, sequence], verbose=0)\n yhat = random.choice(list(range(vocab)), 1, p=yhat[0])\n # yhat = argmax(yhat)\n word = word_for_id(yhat, tokenizer)\n\n if word is None:\n break\n\n if word == prev_word:\n pass\n\n in_text += \" \" + word\n\n prev_word = word\n\n if word == \"endseq\":\n break\n\n return in_text", "def stop_text_translation_job(JobId=None):\n pass", "def test_get_subtitles(self):\n\n (subtitles, start, end) = self.sp.get_subtitles(INDEX)\n self.assertEqual(subtitles, INDEX_SUBTITLES, 'Check get_subtitles.')", "def get_transcript(self, transcript_format='srt'):\r\n lang = self.transcript_language\r\n\r\n if lang == 'en':\r\n if self.sub: # HTML5 case and (Youtube case for new style videos)\r\n transcript_name = self.sub\r\n elif self.youtube_id_1_0: # old courses\r\n transcript_name = self.youtube_id_1_0\r\n else:\r\n log.debug(\"No subtitles for 'en' language\")\r\n raise ValueError\r\n\r\n data = Transcript.asset(self.location, transcript_name, lang).data\r\n filename = u'{}.{}'.format(transcript_name, transcript_format)\r\n content = Transcript.convert(data, 'sjson', transcript_format)\r\n else:\r\n data = Transcript.asset(self.location, None, None, self.transcripts[lang]).data\r\n filename = u'{}.{}'.format(os.path.splitext(self.transcripts[lang])[0], transcript_format)\r\n content = Transcript.convert(data, 'srt', transcript_format)\r\n\r\n if not content:\r\n log.debug('no subtitles produced in get_transcript')\r\n raise ValueError\r\n\r\n return content, filename, Transcript.mime_types[transcript_format]", "def Subtlety(self):\n s = self.subtlety\n assert s in range(1,6), \"Subtlety score out of bounds.\"\n return _char_to_word_[::-1][ s-1 ] + ' Subtlety'", "def generate_event_subject(instance, single_event=False):\n if single_event:\n cnt = len(instance[\"branches\"])\n duration = instance[\"end\"] - instance[\"start\"]\n # Single event subject should just use a simple\n # summary with information about what was worked on.\n return (\n f\"{cnt} Issues Worked On ({strfdelta(duration)})\"\n )\n else:\n branch = instance[\"branch\"]\n issue = instance[\"issue\"]\n duration = instance[\"end\"] - instance[\"start\"]\n # Otherwise, we're generating the subject for a single issue.\n # Using Jira to find the subject.\n if config.jira_enabled:\n if jira_manager.issue_exists(issue=issue):\n summary = jira_manager.issue_summary(issue=issue)\n return (\n f\"{branch} - {summary} - {strfdelta(duration)}\"\n )\n # The issue does not exist OR the Jira configurations\n # are invalid OR Jira is disabled, we'll just default\n # to a basic summary.\n return (\n f\"{branch}\"\n )", "def transcribe(self):\n self.sequence = self.sequence.replace(\"T\",\"U\")\n return", "def generate_srt_from_sjson(sjson_subs, speed):\r\n\r\n output = ''\r\n\r\n equal_len = len(sjson_subs['start']) == len(sjson_subs['end']) == len(sjson_subs['text'])\r\n if not equal_len:\r\n return output\r\n\r\n sjson_speed_1 = generate_subs(speed, 1, sjson_subs)\r\n\r\n for i in range(len(sjson_speed_1['start'])):\r\n item = SubRipItem(\r\n index=i,\r\n start=SubRipTime(milliseconds=sjson_speed_1['start'][i]),\r\n end=SubRipTime(milliseconds=sjson_speed_1['end'][i]),\r\n text=sjson_speed_1['text'][i]\r\n )\r\n output += (unicode(item))\r\n output += '\\n'\r\n return output", "def get_alternative_id(self):\n raise NotImplementedError()", "def _generate_pipeline_labels(self, job):\n jobname = self._get_jobname(job)\n labels = {\"name\": jobname, \"app\": \"snakemake\"}\n return labels", "def _build_transcript(self):\n\n # Create the transcript dictionary\n transcript_dict = dict()\n for splitted in self._get_transcript_entries(self._data_directory):\n transcript_dict[splitted[0]] = vocabulary.sentence_to_ids(splitted[1])\n\n return transcript_dict", "def _generate_title(cls, ca_type):\n special_chars = string_utils.SPECIAL\n return append_random_string(\n \"{}_{}_\".format(ca_type, random_string(\n size=len(special_chars), chars=special_chars)))", "def _job_id(files: list, extra: str):\n files_str = \"\"\n for file in files:\n files_str += file\n job_id = hashlib.sha1(files_str.encode() + extra.encode()).hexdigest()\n return job_id", "def job_id_ext(self, job_id_ext):\n\n self._job_id_ext = job_id_ext", "def _suffix(self) -> str:\n return \"\"", "def test_make_jobs(self):\r\n # no commands should make no jobs files\r\n self.assertEqual(make_jobs([], \"test\", self.queue), [])\r\n\r\n # one job file should be created\r\n filenames = make_jobs([self.command], \"test_qsub\", self.queue)\r\n self.assertTrue(len(filenames) == 1)\r\n observed_text = list(open(filenames[0]))\r\n\r\n self.assertEqual(\"\".join(observed_text),\r\n QSUB_TEXT % (\"72:00:00\", 1, 1, self.queue,\r\n \"test_qsub\", \"oe\",\r\n self.command))", "def _build_sub(self) -> str:\n return dedent(\n \"\"\"\n @SP\n M=M-1\n A=M\n D=M\n @SP\n M=M-1\n A=M\n M=M-D\n @SP\n M=M+1\n \"\"\"\n )", "def link_subtitles_to_files(self):\n target_filenames = self.get_target_filenames()\n subtitle_filenames = []\n\n zip_filenames = self.subtitle_zip_files_dir.glob('*.zip')\n for zip_fn in zip_filenames:\n subtitle_filenames.extend(self.unzip_subtitles(zip_fn))\n\n subtitle_metadata = self.cache_file_metadata(subtitle_filenames)\n target_metadata = self.cache_file_metadata(target_filenames)\n\n for target_file_metadata_summary in target_metadata:\n try:\n source_filename = f'{subtitle_metadata[target_file_metadata_summary][\"fn\"]}.srt'\n target_filename = f'{target_metadata[target_file_metadata_summary][\"fn\"]}.srt'\n source_filepath = pathlib.PurePath(self.temp_storage_dir, source_filename)\n target_filepath = pathlib.PurePath(self.target_dir, target_filename)\n shutil.move(source_filepath, target_filepath)\n print(f'Subtitle for \\'{target_file_metadata_summary}\\' successfully linked.')\n except KeyError:\n logging.warning(f'Subtitle for \\'{target_file_metadata_summary}\\' not found!')\n\n self.clean_temp_storage_dir(subtitle_filenames)", "def tv_tropes_id(title):\n pass", "def __init__(self, subcase_id: int, header: str, title: str, location: str,\n ids: Any, scalar: Any,\n mask_value: Optional[int]=None, nlabels: Optional[int]=None,\n labelsize: Optional[int]=None, ncolors: Optional[int]=None,\n colormap: str='jet', data_map: Any=None,\n data_format: Optional[str]=None, uname: str='GuiResult'):\n self.ids = ids\n super().__init__(\n subcase_id, header, title, location, scalar,\n mask_value, nlabels, labelsize, ncolors, colormap, data_map,\n data_format, uname)", "def schedule_paragraph():", "def get_subject(text_file):\n path_name, sf = os.path.splitext(text_file)\n fname = os.path.basename(path_name)\n fname = fname.replace(\"-Left_Handed\", \"\")\n all_hyphens = [m.start() for m in re.finditer('-', fname)]\n if len(all_hyphens) == 1:\n beg = fname[:len(fname)-2].rindex('_')\n else:\n beg = all_hyphens[-2]\n\n end = all_hyphens[-1]\n subj = fname[beg+1:end]\n subj = subj.lower()\n\n return subj", "def download(self):\n if not os.path.isdir(self.save_path):\n try:\n os.mkdir(self.save_path)\n except IOError:\n print(\"Can't create subfolder. \"\n \"Check that you have write access for \"\n \"{}\".format(self.save_path))\n\n sub_zip_file = request.urlopen(self.download_link)\n sub_gzip = gzip.GzipFile(fileobj=StringIO(sub_zip_file.read()))\n subtitle_content = sub_gzip.read()\n try:\n with open(self.full_path, 'wb') as subtitle_output:\n subtitle_output.write(subtitle_content)\n print(\"Downloaded subtitle...\")\n except IOError:\n print(\"Couldn't save subtitle, permissions issue?\")", "def get_job_id(self):\n return {'job_id': self._job_id}", "def download_subtitle(self, film, ttmname, srtname, filename):\n self.logger.debug('download_subtitle')\n ret = False\n if film.url_sub:\n progress = KodiProgressDialog()\n progress.create(30978, filename + u'.ttml')\n # pylint: disable=broad-except\n try:\n progress.update(0)\n mvutils.url_retrieve_vfs(\n film.url_sub, ttmname, progress.url_retrieve_hook)\n try:\n ttml2srtConverter = ttml2srt()\n ttml2srtConverter.do(xbmcvfs.File(ttmname, 'r'),\n xbmcvfs.File(srtname, 'w'))\n ret = True\n except Exception as err:\n self.logger.error('Failed to convert to srt: {}', err)\n progress.close()\n except Exception as err:\n progress.close()\n self.logger.error(\n 'Failure downloading {}: {}', film.url_sub, err)\n return ret", "def title(self):\n if self.file_name is None:\n return None\n else:\n fname = os.path.split(self.file_name)[-1]\n fname, *ext = fname.rsplit('.', 1)\n procgen = ext and ext[0] in ('json', 'yaml')\n if procgen and self._seed and self._seed.spawn_key:\n # Append the spawn key as the episode number\n fname += '-e' + str(self._seed.spawn_key[-1])\n return fname", "def makeFootnoteId(self, id):\n if self.getConfig(\"UNIQUE_IDS\"):\n return 'fn%s%d-%s' % (self.sep, self.unique_prefix, id)\n else:\n return 'fn%s%s' % (self.sep, id)", "def _format_job_number(job):\n year = str(job[0])\n number = job[1]\n if number < 10:\n string_number = '00' + str(number)\n elif number < 100:\n string_number = '0' + str(number)\n else:\n string_number = str(number)\n\n string_job_number = year + '-' + string_number\n\n return string_job_number", "def numbered_title(self):\n return f\"{self.title}\"", "def numbered_title(self):\n return f\"{self.title}\"", "def gen_qsub_script(\n crop, batch_ids=None, *, scheduler=\"sge\", **kwargs\n): # pragma: no cover\n warnings.warn(\n \"'gen_qsub_script' is deprecated in favour of \"\n \"`gen_cluster_script` and will be removed in the future\",\n FutureWarning,\n )\n return gen_cluster_script(crop, scheduler, batch_ids=batch_ids, **kwargs)" ]
[ "0.6298904", "0.5891134", "0.57354397", "0.57253325", "0.57177824", "0.5635711", "0.5478623", "0.5465625", "0.5414711", "0.54018885", "0.53634965", "0.53630906", "0.535014", "0.5312166", "0.5256253", "0.5206248", "0.5183688", "0.5179891", "0.517678", "0.516628", "0.51632917", "0.5151589", "0.51470906", "0.51376855", "0.5127866", "0.5125466", "0.5065127", "0.50525135", "0.50362927", "0.50329655", "0.5017389", "0.50119025", "0.49950367", "0.49950367", "0.4974771", "0.49604082", "0.49561357", "0.49547046", "0.4954599", "0.49504703", "0.49442914", "0.4937758", "0.4936423", "0.49289498", "0.49235043", "0.49033117", "0.488685", "0.48863554", "0.4883736", "0.48794293", "0.48716748", "0.48440927", "0.48412082", "0.48380283", "0.4831924", "0.48224285", "0.48213565", "0.48118743", "0.4796919", "0.47966623", "0.4794798", "0.47843206", "0.47703353", "0.47348288", "0.4734676", "0.4731295", "0.472658", "0.47253326", "0.472207", "0.47091168", "0.4703664", "0.46986845", "0.4691522", "0.46704668", "0.46608776", "0.46588823", "0.4653969", "0.4650913", "0.4650167", "0.46458206", "0.46450084", "0.46397483", "0.46312538", "0.4623563", "0.46234614", "0.46209756", "0.46194395", "0.4619337", "0.46166426", "0.46164712", "0.4613972", "0.4613218", "0.46123523", "0.46121237", "0.46080354", "0.46047282", "0.46046776", "0.46036568", "0.46036568", "0.4602956" ]
0.74129283
0
Override this method for custom job sorting. This method returns a key that can be compared to sort jobs. By
def job_sorter(self, job): key = natsort.natsort_keygen(key=self.job_title, alg=natsort.REAL) return key(job)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def job_priority_key(self, job):\n raise NotImplemented", "def sort_key(self):\n ...", "def get_sort_key(self) -> str:\n return self.name", "def job_priority_key(self, job):\n camp, user = job.camp, job.user\n end = camp.time_left / user.shares # lower value -> higher priority\n # The `end` should be further multiplied by\n # `_stats.active_shares` / `_stats.cpu_used`.\n # However, that gives the same value for all the jobs\n # and we only need the ordering, not the absolute value.\n return (end, camp.created, user.ID, camp.ID,\n job.submit, job.ID)", "def job_priority_key(self, job):\n if not self._stats.total_usage:\n fairshare = 1\n else:\n user = job.user\n effective = user.cpu_clock_used / self._stats.total_usage\n #shares_norm = user.shares # already normalized\n fairshare = 2.0 ** -(effective / user.shares)\n prio = int(fairshare * 100000) # higher value -> higher priority\n # TODO if needed change the constant to a configuration setting\n # TODO and add more components to the priority value\n return (-prio, job.submit, job.ID)", "def __hash__(self):\r\n return hash(f'{self.job_id},{self.job_size},{self.priority}')", "def sort(self, key_func):\n pass", "def cmp_to_key(mycmp): # Taken from Python 2.7's functools\n class K(object):\n __slots__ = ['obj']\n\n def __init__(self, obj, *args):\n self.obj = obj\n\n def __lt__(self, other):\n return mycmp(self.obj, other.obj) < 0\n\n def __gt__(self, other):\n return mycmp(self.obj, other.obj) > 0\n\n def __eq__(self, other):\n return mycmp(self.obj, other.obj) == 0\n\n def __le__(self, other):\n return mycmp(self.obj, other.obj) <= 0\n\n def __ge__(self, other):\n return mycmp(self.obj, other.obj) >= 0\n\n def __ne__(self, other):\n return mycmp(self.obj, other.obj) != 0\n\n def __hash__(self):\n raise TypeError('hash not implemented')\n return K", "def benchmark_sort_key(benchmark):\n if not \"label\" in benchmark:\n return \"\"\n return benchmark[\"label\"]", "def sort_key(self, order=None):\n\n # XXX: remove this when issue 5169 is fixed\n def inner_key(arg):\n if isinstance(arg, Basic):\n return arg.sort_key(order)\n else:\n return arg\n\n args = self._sorted_args\n args = len(args), tuple([inner_key(arg) for arg in args])\n return self.class_key(), args, S.One.sort_key(), S.One", "def cmp_to_key(mycmp):\n class K(object):\n __slots__ = ['obj']\n\n def __init__(self, obj):\n self.obj = obj\n\n def __lt__(self, other):\n return mycmp(self.obj, other.obj) < 0\n\n def __gt__(self, other):\n return mycmp(self.obj, other.obj) > 0\n\n def __eq__(self, other):\n return mycmp(self.obj, other.obj) == 0\n\n def __le__(self, other):\n return mycmp(self.obj, other.obj) <= 0\n\n def __ge__(self, other):\n return mycmp(self.obj, other.obj) >= 0\n\n __hash__ = None\n\n return K", "def sortKey(self, p_str): # real signature unknown; restored from __doc__\n return QCollatorSortKey", "def key(self):\n return key_for_name(self.name)", "def _grokker_sort_key(args):\n grokker, name, obj = args\n return priority.bind().get(grokker)", "def __cmp__(self,other):\n try:\n other_key = other._make_key()\n except AttributeError:\n return -1\n return cmp(self._make_key(), other_key)", "def keysort(*args, **kwargs): # real signature unknown\n pass", "def sort_by_key(request):\n return request.param", "def sort_by_key(request):\n return request.param", "def cmp_to_key(mycmp):\n\n class K:\n def __init__(self, obj):\n self.obj = obj\n\n def __lt__(self, other):\n return mycmp(self.obj, other.obj) < 0\n\n def __gt__(self, other):\n return mycmp(self.obj, other.obj) > 0\n\n def __eq__(self, other):\n return mycmp(self.obj, other.obj) == 0\n\n def __le__(self, other):\n return mycmp(self.obj, other.obj) <= 0\n\n def __ge__(self, other):\n return mycmp(self.obj, other.obj) >= 0\n\n def __ne__(self, other):\n return mycmp(self.obj, other.obj) != 0\n\n return K", "def _get_field_sort_key(self, field):\n if not field.is_relation:\n return -1\n return 0 if field.many_to_many else 1", "def sortby(self):\n ...", "def _custom_sorter(self, key1, key2):\n\n col = self._col\n ascending = self._colSortFlag[col]\n real = self.get_real_col(col)\n item1 = self.itemDataMap[key1][real]\n item2 = self.itemDataMap[key2][real]\n\n # Internationalization of string sorting with locale module\n if isinstance(item1, str) and isinstance(item2, str):\n cmpVal = locale.strcoll(item1, item2)\n elif isinstance(item1, bytes) or isinstance(item2, bytes):\n cmpVal = locale.strcoll(str(item1), str(item2))\n else:\n cmpVal = cmp(item1, item2)\n\n # If the items are equal, then pick something else to make the sort value unique\n if cmpVal == 0:\n cmpVal = cmp(*self.GetSecondarySortValues(col, key1, key2))\n\n if ascending:\n return cmpVal\n else:\n return -cmpVal", "def key(self, sorting):\n if(sorting & Sorting.NoSorting):\n return (lambda x: 1) # All elements get the same key\n\n if(sorting & Sorting.Date):\n return (lambda x: x.date)\n\n if(sorting & Sorting.Code):\n return (lambda x: x.code)\n\n if(sorting & Sorting.User):\n return (lambda x: x.name)\n\n if(sorting & Sorting.Priviledges):\n # Not having priviledges grants \"points\": the more points the higher in the sort\n return (lambda x: (x.filters & Filters.NonSubs) + (x.filters & Filters.NonMods))\n\n if(sorting & Sorting.TimesRequested):\n return (lambda x: x.times_requested)", "def get_row_list_sorting_key(x):\n name, count = x\n if '_' not in name:\n return name\n s = name.split('_')\n end = s[-1]\n start = '_'.join(s[:-1])\n if utils.is_int(end):\n return (start, int(end))\n return name", "def sortby(self):\n return self._sortby", "def sortKey(self):\n return 'filestore:{0}'.format(id(self.stage))", "def _make_key(self):\n all_position_values = (chromosome_sort_key(self.chromosome), self.min_position, self.max_position, \n self.strand, self.position_before, self.position_after)\n return all_position_values", "def get_key(self):\n return self._determine_key()", "def key(self):\n return self.key_for(self.id)", "def _key_sorting(item):\n key, value = item\n if isinstance(value, Link):\n return (1, key)\n return (0, key)", "def get_row_list_sorting_key(x):\n name, count = x\n if '_' not in name:\n return name\n s = name.split('_')\n end = s[-1]\n start = '_'.join(s[:-1])\n if is_int(end):\n return (start, int(end))\n return name", "def index(self, key):\r\n return self.keyOrder.index(key)", "def hash_key(self):", "def get_key(self) -> int:\n return self.__key", "def get_key(self) -> int:\n return self.key", "def sortKey( self, mode, matrix ):\n current = self.currentImplementation()\n if current:\n return current.sortKey( mode, matrix )\n else:\n return (False,[],None)", "def _get_key(self, val: V) -> Hashable:\n raise NotImplementedError", "def get_sort_by(self):\n\n\t\treturn self.__sort_by", "def key(self):\n if self._key is None:\n # Use _END_OF_TIME so that it is ordered after all real dates\n return _END_OF_TIME, _END_OF_TIME\n return self._key", "def get_key(self, item):\r\n return item[0]", "def functools_cmp_to_key():\n # make a key function using cmp_to_keY(). \n # It means convert cmp function to key literally\n get_key = functools.cmp_to_key(compare_obj)\n def get_key_wrapper(o):\n \"\"\"wrapper function for get_key to allow for print statements\n \"\"\"\n new_key = get_key(o)\n print('key_wrapper({} -> {!r})'.format(o, new_key))\n return new_key\n objs = [CompObj(n) for n in range(5, 0, -1)]\n for o in sorted(objs, key=get_key_wrapper):\n print(o)", "def order_key(self, field='word', order=None):\n try:\n return order(self[field])\n except TypeError:\n try:\n return sort_key(order)(self[field])\n except AttributeError:\n return self[field]", "def sortKey( self, mode, matrix ):\n # distance calculation...\n distance = polygonsort.distances(\n LOCAL_ORIGIN,\n modelView = matrix,\n projection = mode.getProjection(),\n viewport = mode.getViewport(),\n )[0]\n if self.appearance:\n key = self.appearance.sortKey( mode, matrix )\n else:\n key = (False,[],None)\n if key[0]:\n distance = -distance\n return key[0:2]+ (distance,) + key[1:]", "def compare_versions_key(x):\n return cmp_to_key(compare_versions)(x)", "def key(self):\n return self.name", "def get_key(self):\n return self.key", "def get_key(self):\n return self.key", "def _key(self):\n key_args = [self.__class__.__name__] + [str(a) for a in self.args]\n return (\":\".join(key_args))", "def __lt__(self, other):\n return other > self._cmpkey()", "def get_job_id(self):\n return {'job_id': self._job_id}", "def key(self):\n return self.__key", "def key(self) -> str:\n return self.__key", "def sortKey( self, mode, matrix ):\n # TODO: figure out how to handle \n return False,[],None", "def cmp_to_key(cmp_fun, model):\n class K:\n def __init__(self, obj, *args):\n self.obj = obj\n def __lt__(self, other):\n return cmp_fun(self.obj, other.obj, model) < 0\n def __gt__(self, other):\n return cmp_fun(self.obj, other.obj, model) > 0\n def __eq__(self, other):\n return cmp_fun(self.obj, other.obj, model) == 0\n def __le__(self, other):\n return cmp_fun(self.obj, other.obj, model) <= 0\n def __ge__(self, other):\n return cmp_fun(self.obj, other.obj, model) >= 0\n def __ne__(self, other):\n return cmp_fun(self.obj, other.obj, model) != 0\n return K", "def _event_sort_key(cls, event):\n if \"test_name\" in event:\n return event[\"test_name\"]\n else:\n return event.get(\"test_filename\", None)", "def sort(self, key: Callable):\n self.data.sort(key=key)", "def sort(self, key: Callable):\n self.data.sort(key=key)", "def min_key(self):\n return self.__keys[self.__pq[1]]", "def key(self):\n if self._key is None:\n fields = []\n for attr in self.__fields__:\n val = getattr(self, attr)\n if isinstance(val, list):\n val = tuple(val)\n fields.append(val)\n self._key = hash(tuple(fields))\n return self._key", "def job_name(parameter):\n return (\n \"job_encut_\"\n + str(parameter[0]).replace(\".\", \"_\")\n + \"_kpoints_\"\n + str(parameter[1][0])\n + \"_\"\n + str(parameter[1][1])\n + \"_\"\n + str(parameter[1][2])\n )", "def compare_for_key(self, key_1: keyType, key_2: keyType) -> int:\n if hash(key_1) < hash(key_2):\n return -1\n return 1", "def key(self):\n return self.value()._key", "def report_sort_key(self):\n return (self._start_time, self._end_time)", "def _get_sort_key(self) -> np.array:\n data = self.reader.GetOutput()\n raw_cell_coords = np.empty((data.GetNumberOfCells(), 3))\n for i in range(data.GetNumberOfCells()):\n cell_corners = vtk_to_numpy(data.GetCell(i).GetPoints().GetData())\n raw_cell_coords[i] = np.array(\n [cell_corners[:, n].mean() for n in range(cell_corners.shape[1])]\n )\n\n cell_coords = np.array(\n [tuple(line) for line in raw_cell_coords],\n dtype=[(\"r\", \"f4\"), (\"phi\", \"f4\"), (\"z\", \"f4\")],\n )\n return cell_coords.argsort(order=[\"r\", \"phi\"])", "def sortkey(style, reference, context='bibliography'):\n return(reference['title'], reference['date'])", "def item_comparer(self):\n return self.item_comparer_value", "def _column_sorting_key(self, c):\n first_index = 0\n if c.startswith('hybrid'):\n first_index = 1\n elif c.startswith('solar'):\n first_index = 2\n elif c.startswith('wind'):\n first_index = 3\n elif c == MERGE_COLUMN:\n first_index = -1\n return first_index, self._hybrid_meta.columns.get_loc(c)", "def _key(self):\n return None", "def hash_function(self, key):\n index = key % len(self.objects_list)\n return index", "def key(self):\n return None", "def __hash__(self):\n return hash(str(self.key))", "def __hash__(self):\n return hash(str(self.key))", "def getMinKey(self):\n if self.head is None:\n return \"\"\n return self.head.first.key", "def __cmp__(self, that):\n property_compare = self.CmpProperties(that)\n if property_compare:\n return property_compare\n else:\n return cmp(self.__entity.key(), that.__entity.key())", "def get_key(self, state: Dict) -> str:\n\n return \"_\".join(sorted(state))", "def get_cache_key(self):\n return get_cache_key(\n self.__class__.__name__, settings=(self.pk, ))", "def key(self):\n raise NotImplementedError(\"'key' not implemented for Element subclass\")", "def _cache_key(self, pk=\"all\", **kwargs):\n q_filter = \"\".join(\"%s=%s\" % (k, v) for k, v in kwargs.items()) or self.pk\n return \"%s.%s[%s]\" % (self.model.__tablename__, q_filter, pk)", "def __hash__(self) -> int:\n if self.predicate:\n return hash((self.id, self.vprev, self.vnext, self.name))\n return hash(self.name)", "def test_order_by(self):\n manifestb = job_test_utils.create_seed_manifest(name='scale-batch-creator', jobVersion='2.0.0')\n job_type1b = job_test_utils.create_seed_job_type(manifest=manifestb)\n job_test_utils.create_job(job_type=job_type1b, status='RUNNING')\n\n manifestc = job_test_utils.create_seed_manifest(name='scale-batch-creator', jobVersion='3.0.0')\n job_type1c = job_test_utils.create_seed_job_type(manifest=manifestc)\n job_test_utils.create_job(job_type=job_type1c, status='RUNNING')\n\n url = '/%s/jobs/?is_superseded=false&order=job_type__name&order=-job_type__version' % self.api\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 4)\n\n self.assertEqual(result['results'][0]['job_type']['id'], job_type1c.id)\n self.assertEqual(result['results'][1]['job_type']['id'], job_type1b.id)\n self.assertEqual(result['results'][2]['job_type']['id'], self.job_type1.id)\n self.assertEqual(result['results'][3]['job_type']['id'], self.job_type2.id)", "def extract_key(cls, *args, **kwargs) -> Hashable:\n return (args[1], args[2])", "def compare_strings_key(x):\n return cmp_to_key(compare_strings)(x)", "def key(self) -> str:\n return self._key", "def key(self) -> Key:\n return self._key", "def key(self):\n return self._key", "def key(self):\n return self._key", "def key(self):\n return self._key", "def key(self):\n return self._key", "def key_by(self, field: str) -> B[B, E]:\n pass", "def _key(self):\n return (self.name, self.type_.upper(), self.value)", "def get_key(self):\n\n return self._key", "def channel_move_sort_key(channel_key):\n return channel_key[0]", "def cmp(self):\r\n warnings.warn('`cmp` is deprecated, use `key` instead.',\r\n DeprecationWarning)\r\n\r\n # pylint: disable=C0103\r\n def _cmp(a, b):\r\n for accessor, reverse in instructions:\r\n x = accessor.resolve(a)\r\n y = accessor.resolve(b)\r\n try:\r\n res = cmp(x, y)\r\n except TypeError:\r\n res = cmp((repr(type(x)), id(type(x)), x),\r\n (repr(type(y)), id(type(y)), y))\r\n if res != 0:\r\n return -res if reverse else res\r\n return 0\r\n instructions = []\r\n for order_by in self:\r\n if order_by.startswith('-'):\r\n instructions.append((Accessor(order_by[1:]), True))\r\n else:\r\n instructions.append((Accessor(order_by), False))\r\n return _cmp", "def get_job_list():\n\tdirlist = os.listdir(\".\")\n\tjoblist = [x for x in dirlist if \"job.sh\" in x and x in job_dict]\n\ttmplist = [x for x in dirlist if \"job.sh\" in x and x not in job_dict]\n\tdef compare_function(s: str):\n\t\treturn job_dict[s].order\n\tjoblist.sort(key=compare_function)\n\tjoblist.extend(tmplist)\n\treturn joblist", "def _keys_in_sorted(move):\n return (move.picking_id.id, move.product_id.responsible_id.id)", "def cache_key(self):\r\n return self._cache_key(self.pk, self._state.db)", "def getKey(self):\n return self.__key", "def sort_key(self):\n\t\treturn 'login'", "def job_id(self) -> JobId:\r\n return self._job_id", "def __hash__(self):\n\n return hash(self._key)" ]
[ "0.75236887", "0.74009395", "0.71635604", "0.7031687", "0.64361453", "0.6359381", "0.62978643", "0.6156635", "0.61132455", "0.6011168", "0.59698707", "0.59516037", "0.5946783", "0.592785", "0.5922575", "0.59023565", "0.58991647", "0.58991647", "0.5893522", "0.5864389", "0.5845774", "0.58386725", "0.58182067", "0.58018416", "0.5798572", "0.5794754", "0.57921124", "0.57608426", "0.57436067", "0.57374907", "0.5733892", "0.5709083", "0.5699924", "0.56947625", "0.5687371", "0.56733096", "0.5670982", "0.5647263", "0.5626617", "0.55774474", "0.5575902", "0.55339587", "0.5529912", "0.5527874", "0.55244845", "0.5503903", "0.5503903", "0.5495236", "0.54892987", "0.5468938", "0.54678065", "0.5453603", "0.5432013", "0.54299164", "0.5410073", "0.540905", "0.540905", "0.53884983", "0.5384189", "0.5381668", "0.5380478", "0.5379334", "0.5372767", "0.53697526", "0.53686816", "0.5368586", "0.5365143", "0.5364799", "0.5363754", "0.5362256", "0.53584665", "0.53584665", "0.5341844", "0.5331915", "0.5330576", "0.531722", "0.52999014", "0.5296805", "0.52913076", "0.5279595", "0.527871", "0.52739114", "0.5271997", "0.5268549", "0.52627546", "0.52627546", "0.52627546", "0.52627546", "0.526172", "0.5258279", "0.52537906", "0.52490973", "0.52352947", "0.52284414", "0.52252895", "0.5224686", "0.5221767", "0.522094", "0.52162486", "0.5205492" ]
0.78755414
0
Add a route to the dashboard. This method allows custom view functions to be triggered for specified routes. These view functions are imported lazily, when their route
def add_url(self, import_name, url_rules=[], import_file='signac_dashboard', **options): if import_file is not None: import_name = import_file + '.' + import_name for url_rule in url_rules: self.app.add_url_rule( rule=url_rule, view_func=LazyView(dashboard=self, import_name=import_name), **options)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_routes(self):\n pass", "def add_route(config, route, view, route_name=None, renderer='json'):\n route_name = route_name or view.__name__\n config.add_route(route_name, route)\n config.add_view(view, route_name=route_name, renderer=renderer)", "def add_route(self, pattern: str, view: Callable) -> None:\n route = Route(pattern)\n self._routes[route] = view", "def add_view( *args, **kwargs ):", "def add_routes(self):\n# from server.flask import views as flask_views\n# flask_views_custom_methods = filter(lambda x: x.startswith(\"view_\"), dir(flask_views))\n# for custom_method in flask_views_custom_methods:\n# # Retrieve data needed to add the URL rule to the Flask app\n# view_method = getattr(locals()[\"flask_views\"], custom_method)\n# docstring = getattr(view_method, \"__doc__\")\n# index_start = docstring.index(\"@app.route\")\n# index_end = index_start + len(\"@app.route\") + 1\n# custom_method_url = docstring[index_end:].replace(\" \",\"\").replace(\"\\n\",\"\")\n# # Get: (a) method URL to bind flask app, (b), method name, (c) method object to invoke\n# self._app.add_url_rule(custom_method_url, custom_method, view_func=view_method(self._app.mongo))\n self._app.register_blueprint(ro_flask_views)", "def add_view(self, *args, **kwargs):\n return self._resources_manager.add_view(*args, **kwargs)", "def add_route(app, *args):\n for route in args:\n app.router.add_route(route[0], route[1], route[2])", "def add_routes(app: web.Application):\n ActionsView.register_view(app)\n PingView.register_view(app)\n CoreShutdownView.register_view(app)\n CoreRestartView.register_view(app)\n ReloadConfigView.register_view(app)\n ListItemsView.register_view(app)\n GetItemView.register_view(app)\n ItemStatesView.register_view(app)\n ItemStateView.register_view(app)\n ActionsView.register_view(app)\n ExecuteActionView.register_view(app)\n ListModulesView.register_view(app)", "def add_route(self, item):\n self._routes[item.route] = item\n self.httpd.route(item.route, method=\"GET\", callback=item.get)\n self.httpd.route(item.route, method=\"POST\", callback=item.post)\n self.httpd.route(item.route, method=\"PUT\", callback=item.put)\n self.httpd.route(item.route, method=\"DELETE\", callback=item.delete)", "def add_route(self, route, resource):\n\n if is_function(resource):\n _resource = RouteResource(resource)\n self._api_manager.add_route(route, _resource)\n else:\n self._api_manager.add_route(route, resource)", "def add_route(app, url):\n app.server.add_url_rule(url, endpoint=url, view_func=app.index)\n app.routes.append(url)", "def route(self, route: str) -> Callable:\n\n def decorator(f: Callable) -> Callable:\n \"\"\"Decorates the function.\"\"\"\n self.routes[route] = f\n\n return f\n\n return decorator", "def add_route(self, route_name, suffix, **kwargs):\n kwargs['pregenerator'] = self.pregenerator\n kwargs['factory'] = self.factory\n self.config.add_route(route_name, self.full_url_prefix + suffix, **kwargs)", "def add(self, route, callable=None, methods='GET', prefix='', filters=[], formats=[]):\n\n item = Route(route, callable, methods, filters, formats, prefix)\n #log.debug(\"Adding route: %s\" % item)\n for method in item.methods:\n self[method][item.regex] = item", "def route(self, rule, **options):\n def decorator(f):\n self.add_url_rule(rule, f.__name__, **options) # 添加路由规则\n self.view_functions[f.__name__] = f # 更新 视图函数集合, 前面定义,{}\n return f\n return decorator", "def register_route(self, route, app):\n assert route not in self.routes\n self.routes[route] = app", "def add_route(self, route: Route, routing_url: str, methods: typing.Iterable[str] = (\"GET\",)):\n # Create an endpoint name for the route.\n route.routing_url = routing_url\n route.methods = methods\n # Add it to the list of routes to add later.\n self.routes.append(route)\n # Add the self to the route.\n route.bp = self\n\n return route", "def add_routes(self, mapper):\n pass", "def add_route(\n app: bottle.Bottle, path: str, method: str, handler: Callable, apply: list = None\n):\n\n if apply is None:\n apply = []\n if hasattr(handler, \"args\"):\n apply.append(use_args(handler.args))\n app.route(path, method, handler, apply=apply)", "def _register_routes(self):\n dashboard = self\n\n @dashboard.app.after_request\n def prevent_caching(response):\n if 'Cache-Control' not in response.headers:\n response.headers['Cache-Control'] = 'no-store'\n return response\n\n @dashboard.app.context_processor\n def injections():\n session.setdefault('enabled_modules',\n [i for i in range(len(self.modules))\n if self.modules[i].enabled])\n return {\n 'APP_NAME': 'signac-dashboard',\n 'APP_VERSION': __version__,\n 'PROJECT_NAME': self.project.config['project'],\n 'PROJECT_DIR': self.project.config['project_dir'],\n 'modules': self.modules,\n 'enabled_modules': session['enabled_modules'],\n 'module_assets': self._module_assets\n }\n\n # Add pagination support from http://flask.pocoo.org/snippets/44/\n @dashboard.app.template_global()\n def url_for_other_page(page):\n args = request.args.copy()\n args['page'] = page\n return url_for(request.endpoint, **args)\n\n @dashboard.app.template_global()\n def modify_query(**new_values):\n args = request.args.copy()\n for key, value in new_values.items():\n args[key] = value\n return '{}?{}'.format(request.path, url_encode(args))\n\n @dashboard.app.errorhandler(404)\n def page_not_found(error):\n return self._render_error(str(error))\n\n self.add_url('views.home', ['/'])\n self.add_url('views.settings', ['/settings'])\n self.add_url('views.search', ['/search'])\n self.add_url('views.jobs_list', ['/jobs/'])\n self.add_url('views.show_job', ['/jobs/<jobid>'])\n self.add_url('views.get_file', ['/jobs/<jobid>/file/<path:filename>'])\n self.add_url('views.change_modules', ['/modules'], methods=['POST'])", "def add_route(\n self,\n key,\n route=None,\n class_name=None,\n handler=None,\n function=None,\n **class_args,\n ):\n\n if not route and not class_name:\n raise MLRunInvalidArgumentError(\"route or class_name must be specified\")\n if not route:\n route = TaskStep(class_name, class_args, handler=handler)\n route.function = function or route.function\n route = self._routes.update(key, route)\n route.set_parent(self)\n return route", "def add_route(self, view, path, exact=True):\n if path[0] != '/':\n path = '/' + path\n for route in self._routes:\n assert path != route.path, 'Cannot use the same path twice'\n self._routes.append(Route(view=view, path=path, exact=exact))", "def add_route(route, endpoint=None, **kw):\n\n # ensure correct amout of slashes\n def apiurl(route):\n return '/'.join(s.strip('/') for s in [\"\", BASE_URL, route])\n\n # import pdb\n # pdb.set_trace()\n return add_bika_route(apiurl(route), endpoint, **kw)", "def add_view(self, view):\n # Add to views\n self._views.append(view)\n\n # If app was provided in constructor, register view with Flask app\n if self.app is not None:\n self.app.register_blueprint(view.create_blueprint(self))\n if view.is_menu:\n self._add_view_to_menu(view)", "def route(self, path, **params):\n\n def decorate(func):\n \"\"\"\n A function returned as a object in load time,\n which set route to given url along with decorated function.\n \"\"\"\n from aha.dispatch.router import get_router\n r = get_router()\n r.connect(None, path, controller = func, **params)\n return func\n \n return decorate", "def add_routes(self):\n\n # create a routegroup\n routegroup = MewloRouteGroup('testsite_routegroup')\n # overide the parent import-pack-directory for the urls in this group? if we don't it will use the controller root set in SITE config\n # routegroup.set_controllerroot(pkgdirimp_controllers)\n\n routegroup.append(\n MewloRoute(\n id = 'home',\n path = \"/\",\n controller = MewloController(function='requests.request_home')\n ))\n\n\n routegroup.append(\n MewloRoute(\n id = 'hello',\n path = '/test/hello',\n args = [\n MewloRouteArgString(\n id = 'name',\n required = True,\n help = \"name of person to say hello to\",\n ),\n MewloRouteArgInteger(\n id = 'age',\n required = False,\n help = \"age of person (optional)\",\n defaultval = 44,\n )\n ],\n controller = MewloController(function=\"requests.request_sayhello\"),\n # we can pass in any extra data which will just be part of the route that can be examined post-matching\n extras = { 'stuff': \"whatever we want\" },\n # we can force the route to simulate as if certain url call args were assigned (this works whether there are RouteArgs for these or not; no type checking is performed on them)\n # this could be useful in two scenarios: first, if we initially wrote code to handle an arg and then changed our mind and want to not let user set that arg; second, if we reuse a controller function in different places and simulate dif arg values for each\n forcedargs = { 'sign': u\"aries\" },\n ))\n\n\n\n from controllers import requests\n routegroup.append(\n MewloRoute(\n id = 'article',\n path = '/article',\n args = [\n MewloRouteArgString(\n id = 'title',\n required = False,\n positional = True,\n help = \"title of article to display\",\n )\n ],\n # another way to specify the controller is to pass in the actual function reference (rather than as a string)\n controller = MewloController(function=requests.request_article),\n ))\n\n routegroup.append(\n MewloRoute(\n id = 'help',\n path = '/user/help',\n controller = MewloController(root=pkgdirimp_controllers, function='requests.request_help'),\n ))\n routegroup.append(\n MewloRoute(\n id = 'contact',\n path = '/help/contact',\n # we can pass the root pack to the MewloController constructor, which has the benefit of doing the import immediately and raising exception if not found; otherwise the error will come up during preparation\n controller = MewloController(root=pkgdirimp_controllers, function='requests.request_contact'),\n ))\n routegroup.append(\n MewloRoute(\n id = 'about',\n path = '/help/about',\n # we can pass the root pack to the MewloController constructor, which has the benefit of doing the import immediately and raising exception if not found; otherwise the error will come up during preparation\n controller = MewloController(root=pkgdirimp_controllers, function='requests.request_about'),\n ))\n\n\n #static file server\n if (False):\n routegroup.append(\n MewloRoute_StaticFiles(\n id = 'static_files',\n path = '/static',\n controller = MewloController_StaticFiles(\n sourcepath = '${sitefilepath}/staticfilesource'\n ),\n ))\n\n\n # add routegroup we just created to the site\n self.comp('routemanager').append(routegroup)", "def route(self, path: str, **args: t.Any) -> t.Callable:\n def decorator(f: t.Callable) -> None:\n RouteMap.add_route(Route(path, f, args.get('methods', ['GET'])))\n return decorator", "def add_page(self, page, route=None, header=None, **kwargs):\n route = route or page.routes()\n\n if not isinstance(route, list):\n route = [route]\n\n for r in route:\n self.routes.append((r, type(page).__name__))\n self.app.add_url_rule(r, type(page).__name__, page, **kwargs)\n\n if header is not None:\n page.header = header", "def simple_route(config, name, url, fn):\n config.add_route(name, url)\n config.add_view(fn, route_name=name,\n renderer=\"testapp:templates/%s.mako\" % name)", "def __init__(self):\n super(RouteLayer, self).__init__()\n\n routes = [(\"^/ping\", views.ping),\n (\"^/e(co)?(?P<eco_message>[^$]+)$\", views.echo),\n (\"^/p(iada)?\\s*$\", views.get_piada)]\n\n routes.extend(MediaViews(self).routes)\n routes.extend(StaticViews(self).routes)\n # routes.extend(GroupAdminViews(self).routes)\n\n self.views = [(re.compile(pattern), callback) for pattern, callback in routes]", "def route(self, routing_url: str, methods: typing.Iterable[str] = (\"GET\",)):\n\n def _inner(func: callable):\n route = self.wrap_route(func)\n self.add_route(route, routing_url, methods)\n return route\n\n return _inner", "def add_views(self, *args):\n for view in args:\n self.add_view(view)", "async def register_route(\n self, callback: Callable[[Any, dict], Any], route: str = None, **kwargs: Optional[Any]\n ) -> str:\n if route is None:\n route = self.name\n\n if self.AD.http is not None:\n return await self.AD.http.register_route(callback, route, self.name, **kwargs)\n\n else:\n self.logger.warning(\"register_route for %s filed - HTTP component is not configured\", route)", "def route(self, pattern: str) -> Callable:\n\n def warpper(view: Callable) -> Callable:\n self.add_route(pattern, view)\n return view\n\n return warpper", "def wrapped(func):\n self.routes.append((path, {\n 'regex': re.compile('^' + re.sub(self._part_matcher,'(.*?)',path) + '$'),\n 'function':func,\n 'reqs':req,\n 'kwargs':kwargs,\n 'parts':parts_info,\n 'generate':generate\n }))\n\n return func", "def add_url_rule(self, rule, view_func, endpoint=None, **options):\n if endpoint is None:\n endpoint = view_func.__name__\n\n options['endpoint'] = endpoint\n methods = options.pop('methods', None)\n\n # TODO: allow one django urlpattern to route to different rules\n\n rule = self.url_rule_class(rule, **options)\n\n self.url_map.add(rule)\n self.view_functions[endpoint] = self.build_view_wrapper(view_func, rule)", "def route(app, requires_login):\n routes = {\n '/kontoplan/<accounting>': kontoplan,\n '/huvudbok/<accounting>': huvudbok,\n '/balansrakning/<accounting>': balance_report,\n '/resultatrakning/<accounting>': income_statement_report,\n '/verifikationslista/<accounting>': verifications,\n '/arsrapport/<accounting>': year_report,\n '/verifikat/<objectid:verification>': print_verification,\n '/vatreport/<objectid:accounting>': vat_report,\n '/periodrapport/<accounting>': period_report,\n '/salesreport/<objectid:toid>': sales_report,\n '/verifikationslista_andrade/<accounting>': verifications_modified,\n '/accountspayable_report/<accounting>': accountspayable_report,\n '/accountspayable_paymentjournal/<accounting>': accountspayable_paymentjournal\n }\n for route, func in routes.items():\n name = func.__name__\n func = requires_login()(func)\n app.add_url_rule(route, name, func, methods=['GET', 'POST'])", "def add_routes():\n\n # The Home page is accessible to anyone\n @app.route('/admin')\n @roles_required(['Admin', 'Agent'])\n def home_page():\n return render_template('./admin/home.html')\n\n # The Members page is only accessible to authenticated users\n @app.route('/admin/members')\n @roles_required(['Admin', 'Agent']) # Use of @login_required decorator\n def member_page():\n return render_template('./admin/members.html')\n\n # The Admin page requires an 'Admin' role.\n @app.route('/admin/dashboard')\n @roles_required('Admin') # Use of @roles_required decorator\n def admin_page():\n return render_template_string(\"\"\"\n {% extends \"admin_layout.html\" %}\n {% block content %}\n <h2>{%trans%}Admin Page{%endtrans%}</h2>\n <p><a href={{ url_for('user.register') }}>{%trans%}Register{%endtrans%}</a></p>\n <p><a href={{ url_for('user.login') }}>{%trans%}Sign in{%endtrans%}</a></p>\n <p><a href={{ url_for('home_page') }}>{%trans%}Home Page{%endtrans%}</a> (accessible to anyone)</p>\n <p><a href={{ url_for('member_page') }}>{%trans%}Member Page{%endtrans%}</a> (login_required: member@example.com / Password1)</p>\n <p><a href={{ url_for('admin_page') }}>{%trans%}Admin Page{%endtrans%}</a> (role_required: admin@example.com / Password1')</p>\n <p><a href={{ url_for('user.logout') }}>{%trans%}Sign out{%endtrans%}</a></p>\n {% endblock %}\n \"\"\")\n\n @app.route('/register')\n def register_page():\n return render_template('./register.html')\n \n @app.route('/payment')\n @login_required\n def payment_page():\n PP_CLIENT_ID = \"AQnE3_uZrT1Vf56AluXZIR1ir4gUYWAMmxquNRnRzGSVukHeGPzUvu5WsW4FtdYhqrHO06IQkKTr8zOh\"\n user = User.query.filter_by(email=current_user.email).first()\n detail = UserDetail.query.filter_by(user_id=user.id).first()\n plan, last_payment_at = detail.plan, detail.last_payment_at\n plan_id = ''\n if plan == 'free':\n return redirect('/enter-exposure')\n if plan == 'premium':\n plan_id = 'P-6WL802942Y8719627L4PXXFY'\n elif plan == 'business':\n plan_id = 'P-306056489A234290WL4PXXLI'\n return render_template('./payment.html', plan_id=plan_id, PP_CLIENT_ID=PP_CLIENT_ID)\n \n @app.route('/payment-complete')\n def payment_complete_page():\n add_payment(current_user.email)\n return redirect('/enter-exposure')\n\n @app.route('/enter-exposure')\n @login_required\n def enter_exposure_page():\n return render_template('./exposures.html', currencies=CURRENCIES)\n\n @app.route('/report', methods=['GET', 'POST'])\n @login_required\n def report_page():\n user = User.query.filter_by(email=current_user.email).first()\n if request.method == 'POST':\n return render_template(\n './report.html',\n **handle_report(request.form, request.files, None, user.id),\n )\n report = Report.query.get_or_404(request.args.get('id'))\n return render_template(\n './report.html',\n **handle_report(request.form, request.files, report.id, user.id),\n )\n\n @app.route('/suggestion-tool')\n @login_required\n def suggestion_tool_page():\n user = User.query.filter_by(email=current_user.email).first()\n reports = Report.query.filter_by(user_id=user.id).all()\n report_id = request.args.get('report_id') or reports.pop().id\n scenario = request.args.get('scenario') or 1\n\n return render_template(\n './suggestion_tool.html', \n **handle_suggestions(report_id, scenario),\n )\n\n @app.route('/account')\n @login_required\n def account_page():\n user = User.query.filter_by(email=current_user.email).first()\n detail = UserDetail.query.filter_by(user_id=user.id).first()\n reports = Report.query.filter_by(user_id=user.id).all()\n for i in range(len(reports)):\n reports[i].created = reports[i].created.strftime(\"%A, %d-%b-%Y %H:%M:%S GMT%z\")\n return render_template(\n './account.html',\n email=current_user.email,\n first_name=detail.first_name,\n last_name=detail.last_name,\n company_name=detail.company_name,\n plan=detail.plan,\n reports=reports,\n )\n\n @app.route('/contact')\n @login_required\n def contact_page(): \n return render_template(\n './contact.html' \n )\n\n @app.route('/')\n def index():\n return render_template('./splash.html')", "def route(self, path, req = None, generate = False, **kwargs):\n req = req or []\n \n parts = re.findall(self._part_matcher, path)\n parts_info = []\n for part in xrange(len(parts)):\n part_pair = parts[part][1:-1].split('=')\n parts_info.append(part_pair)\n \n \n def wrapped(func):\n \"\"\"decorate the function and bind the route to it\"\"\"\n self.routes.append((path, {\n 'regex': re.compile('^' + re.sub(self._part_matcher,'(.*?)',path) + '$'),\n 'function':func,\n 'reqs':req,\n 'kwargs':kwargs,\n 'parts':parts_info,\n 'generate':generate\n }))\n\n return func\n return wrapped", "def add_route(self, route, method=None, verbs=[\"get\"], pos=0):\n def decorator(cls):\n # parent is the parent class of the relation\n cls_name = cls.__name__.lower()\n handlers=getattr(self.__class__, \"handlers\", None)\n handlers_tmp=getattr(self.__class__, \"handlers_tmp\", None)\n route_tupel= (route,cls, {\"method\":method, \"verbs\" : verbs})\n handlers_tmp.append((route_tupel,pos))\n #print(\"added the following routes: \" + route_tupel)\n if pos < 0:\n if pos == -1:\n handlers.append(route_tupel)\n else:\n handlers.insert(len(handlers)-(pos+1),route_tupel)\n elif pos >= 0:\n handlers.insert(pos,route_tupel)\n \n #print(\"handlers: \" + str(self.handlers))\n print(\"ROUTING: added route for: \" + cls.__name__ + \": \" + route)\n return cls\n return decorator", "def route(self):\n pass", "def decorator(self, decorator: Route.Decorator):\n pass", "def register_view(cls, app):\n view = cls.as_view(cls.endpoint)\n\n all_methods = set(cls.methods)\n if cls.rules is None:\n raise ValueError('No rules found for %r' % (cls, ))\n for rule, methods in cls.rules.items():\n rule_methods = set(methods) & all_methods\n if rule_methods:\n app.add_url_rule(rule=rule, view_func=view, methods=rule_methods)", "def register(self, wsgi_app):\n wsgi_app.add_url_rule(\n rule=self.path,\n view_func=self.controller,\n methods=self.methods)", "def routes(self, *routes):\n self.package.add_routes(*routes)\n for route_group in self.package.routes:\n self.application.make(\"router\").add(\n Route.group(load(route_group, \"ROUTES\", []), middleware=[\"web\"])\n )\n return self", "def add_routes_hook(map, *args, **kwargs):\n map.connect('/dex/media/*path', controller='dex', action='media')\n map.connect('/dex/lookup', controller='dex', action='lookup')\n map.connect('/dex/suggest', controller='dex', action='suggest')\n map.connect('/dex/parse_size', controller='dex', action='parse_size')\n\n # These are more specific than the general pages below, so must be first\n map.connect('/dex/moves/search', controller='dex_search', action='move_search')\n map.connect('/dex/pokemon/search', controller='dex_search', action='pokemon_search')\n\n map.connect('/dex/abilities/{name}', controller='dex', action='abilities')\n map.connect('/dex/items/{pocket}', controller='dex', action='item_pockets')\n map.connect('/dex/items/{pocket}/{name}', controller='dex', action='items')\n map.connect('/dex/locations/{name}', controller='dex', action='locations')\n map.connect('/dex/moves/{name}', controller='dex', action='moves')\n map.connect('/dex/natures/{name}', controller='dex', action='natures')\n map.connect('/dex/pokemon/{name}', controller='dex', action='pokemon')\n map.connect('/dex/pokemon/{name}/flavor', controller='dex', action='pokemon_flavor')\n map.connect('/dex/pokemon/{name}/locations', controller='dex', action='pokemon_locations')\n map.connect('/dex/types/{name}', controller='dex', action='types')\n\n map.connect('/dex/abilities', controller='dex', action='abilities_list')\n map.connect('/dex/items', controller='dex', action='items_list')\n map.connect('/dex/natures', controller='dex', action='natures_list')\n map.connect('/dex/moves', controller='dex', action='moves_list')\n map.connect('/dex/pokemon', controller='dex', action='pokemon_list')\n map.connect('/dex/types', controller='dex', action='types_list')\n\n map.connect('/dex/gadgets/compare_pokemon', controller='dex_gadgets', action='compare_pokemon')\n map.connect('/dex/gadgets/pokeballs', controller='dex_gadgets', action='capture_rate')\n map.connect('/dex/gadgets/stat_calculator', controller='dex_gadgets', action='stat_calculator')\n map.connect('/dex/gadgets/whos_that_pokemon', controller='dex_gadgets', action='whos_that_pokemon')\n\n # JSON API\n map.connect('/dex/api/pokemon', controller='dex_api', action='pokemon')", "def register_command_route(self, route: CommandRoute) -> None:\n self.__command_routes.append(route)\n self.__tg.add_handler(tg_ext.CommandHandler(command=route.command,\n callback=self.__serve_command_route))", "def register_to_blueprint(blueprint, route, methods_to_apifunc):\n methods_to_viewfunc = {}\n for method in methods_to_apifunc:\n methods_to_viewfunc[method] = methods_to_apifunc[method].get_viewfunc()\n\n if 'HEAD' not in methods_to_viewfunc and 'GET' in methods_to_viewfunc:\n methods_to_viewfunc['HEAD'] = methods_to_viewfunc['GET']\n\n blueprint.add_url_rule(\n \"/%s\" % route,\n endpoint=route,\n view_func=error_handler(route_multiplexer(methods_to_viewfunc)),\n methods=list(methods_to_viewfunc.keys()))", "def add_route(self, url, f, **kwargs):\n if url == '' or '?' in url:\n raise ValueError('Invalid URL')\n # Initial params for route\n params = {'methods': ['GET'],\n 'save_headers': [],\n 'max_body_size': 1024,\n 'allowed_access_control_headers': '*',\n 'allowed_access_control_origins': '*',\n }\n params.update(kwargs)\n params['allowed_access_control_methods'] = ', '.join(params['methods'])\n # Convert methods/headers to bytestring\n params['methods'] = [x.encode() for x in params['methods']]\n params['save_headers'] = [x.encode() for x in params['save_headers']]\n # If URL has a parameter\n if url.endswith('>'):\n idx = url.rfind('<')\n path = url[:idx]\n idx += 1\n param = url[idx:-1]\n if path.encode() in self.parameterized_url_map:\n raise ValueError('URL exists')\n params['_param_name'] = param\n self.parameterized_url_map[path.encode()] = (f, params)\n\n if url.encode() in self.explicit_url_map:\n raise ValueError('URL exists')\n self.explicit_url_map[url.encode()] = (f, params)", "def add_url_rule(self, rule, view_func, **options):\n methods = options.pop('methods', None)\n\n if methods is None:\n methods = ('GET',) # add default method\n\n methods = set(methods)\n\n# if not 'POST' or not 'GET' in methods:\n# sys.stderr.write('Method <%s> not implemented for function: <%s>.\\n' % (methods, view_func.__name__))\n# sys.exit(1)\n\n map_url = {'rule': rule,\n 'endpoint': view_func.__name__,\n 'view_func': view_func,\n 'methods': methods,\n '_regex': compile(rule)}\n\n if 'POST' in methods:\n self.post_map_url.append(map_url)\n elif 'GET' in methods:\n self.get_map_url.append(map_url)", "def pre_runroute_callable(self, route, request):\n return None", "def register_message_route(self, route: MessageRoute) -> None:\n self.__message_routes.append(route)\n self.__tg.add_handler(tg_ext.MessageHandler(filters=tg_ext.Filters.regex(route.message),\n callback=self.__serve_message_route))", "def route(self, url_pattern, methods=None):\n def decorated(f):\n self.url_map.append(\n ([m.upper() for m in (methods or ['GET'])],\n URLPattern(url_pattern), f))\n return f\n return decorated", "def __create_routes__(self):\n self.app.add_url_rule('/', 'main_page', self.main_page)\n self.app.add_url_rule('/day', 'day', self.get_current_iteration, methods=['GET'])\n self.app.add_url_rule('/traders', 'traders', self.register_trader, methods=['POST'])\n self.app.add_url_rule('/traders/<id>', 'trader', self.get_trader_state, methods=['GET'])\n self.app.add_url_rule('/stock/price', 'price', self.get_stock_price, methods=['GET'])\n self.app.add_url_rule('/stock/history', 'history', self.get_stock_price_history, methods=['GET'])\n self.app.add_url_rule('/stock/buy', 'buy', self.place_buy_order, methods=['POST'])\n self.app.add_url_rule('/stock/sell', 'sell', self.place_sell_order, methods=['POST'])\n self.app.add_url_rule('/simulation/step', 'step', self.market_session_step, methods=['POST'])\n self.app.add_url_rule('/simulation/run', 'run', self.run_simulation, methods=['POST'])", "def add_route(enode, route, via, shell=None):\n via = ip_address(via)\n\n version = '-4'\n if (via.version == 6) or \\\n (route != 'default' and ip_network(route).version == 6):\n version = '-6'\n\n cmd = 'ip {version} route add {route} via {via}'.format(\n version=version, route=route, via=via\n )\n\n response = enode(cmd, shell=shell)\n assert not response", "def add_route(self, uri_template, resource):\n if not resource:\n raise Exception('Not a valid resource')\n\n path_maps = {}\n try:\n if uri_template:\n super(ResourceAPI, self).add_route(uri_template, resource)\n else:\n for attr in dir(resource):\n method = getattr(resource, attr)\n if callable(method) and hasattr(method,\n RESOURCE_METHOD_FLAG):\n flag = getattr(method, RESOURCE_METHOD_FLAG)\n map = path_maps.get(flag.path)\n if not map:\n uri_fields, template = (\n api_helpers.compile_uri_template(flag.path))\n map = (template, {})\n path_maps[flag.path] = map\n\n new_method = api_helpers._wrap_with_hooks(\n self._before, self._after, method)\n map[1][flag.method] = new_method\n\n for item in path_maps:\n self._routes.insert(0, (path_maps[item][0],\n path_maps[item][1]))\n except Exception:\n LOG.exception('Error occurred while adding the resource')\n LOG.debug(self._routes)", "def register_detail_view(self, blueprint):\n view = apply_decorators(self.detail_view, self.detail_decorators)\n blueprint.add_url_rule(self.detail_rule, self.detail_endpoint, view)", "def add_view(config):\n config.add_route('ogcproxy', '/ogcproxy')\n config.add_view('papyrus_ogcproxy.views:ogcproxy', route_name='ogcproxy')", "def add_rest_routes(self, route, api=None, pos=0):\n def decorator(cls):\n # parent is the parent class of the relation\n cls_name = cls.__name__.lower()\n #print(cls_name)\n # default REST is the following pattern:\n # (r\"/post/(?P<param1>[^\\/]+)/?(?P<param2>[^\\/]+)?/?(?P<param3>[^\\/]+)?\", PostHandler),\n action=\"\"\n # if cls_name.endswith(\"handler\"):\n # action=action[:-7]\n # else:\n # action = cls_name\n # if route:\n action=route\n\n r=r\"/\"+action+r\"/(?P<param1>[^\\/]+)/?(?P<param2>[^\\/]+)?/?(?P<param3>[^\\/]+)?\"\n if api:\n # render the given api in the route URL\n r=r\"/\"+action+r\"/\"+str(api)+r\"/(?P<param1>[^\\/]+)/?(?P<param2>[^\\/]+)?/?(?P<param3>[^\\/]+)?\"\n \n #print(\"added the following routes: \" + r)\n handlers=getattr(self.__class__, \"handlers\", None)\n handlers.append((r,cls))\n \n # use the positioned handlers\n handlers_tmp=getattr(self.__class__, \"handlers_tmp\", None)\n handlers_tmp.append(((r,cls),pos))\n\n r=r\"/\"+action+r\"/*\"\n #print(\"added the following routes: \" + r)\n handlers.append((r,cls))\n handlers_tmp.append(((r,cls),pos))\n #print(\"handlers: \" + str(self.handlers))\n print(\"ROUTING: added RESTful routes for: \" + cls.__name__ + \" as /\" + action)\n #print(dir())\n return cls\n return decorator", "def includeme(config):\n config.add_route('home', '/')\n config.add_route('detail', '/detail/{id:\\d+}')\n config.add_route('update', '/edit/{id:\\d+}')\n config.add_route('create', '/create')", "def register(self):\n # self.register_route(\"GET\", self.__route, lambda req, res: self.status(req, res))\n self.register_route(\"GET\", self.__route, None, self.status)", "def route(self):\n # TODO: wenn keine url, herausfinden, welche ????\n # TODO: wenn url = hostname (fqdn), dann -> google.ch\n if not (self.META.has_key('REMOTE_ADDR') and \n self.GET.has_key('provider')):\n #self.GET.has_key('url')):\n #return HttpResponseRedirect('/index.php')\n # TODO: Auf die Fehlerseite Link zu back.php\n return render_to_response('error.htm', {\n 'error': \"Falsche Parameter auf route.php\",\n })\n src_ip = self.META['REMOTE_ADDR']\n prov = self.GET['provider']\n url = \"http://www.google.ch\"\n if self.GET.has_key('url'):\n url = self.GET['url']\n # Add and save new route\n add_active_route(src_ip = src_ip, prov = prov)\n return HttpResponseRedirect(url)", "def decorate(func):\n from aha.dispatch.router import get_router\n r = get_router()\n r.connect(None, path, controller = func, **params)\n return func", "def route(cls, url, method='GET'):\n def route_decorator(func):\n item = (url, method, func)\n cls._docoratedRouteHandlers.append(item)\n return func\n return route_decorator", "def register_dashboard(self, function):\n\n plugin_file = None\n frame = inspect.stack()[1]\n\n if hasattr(frame, 'filename'):\n # Changed from Python 3.5\n plugin_file = os.path.basename(os.path.normpath(frame.filename))\n else:\n plugin_file = os.path.basename(os.path.normpath(frame[1]))\n\n plugin_name = plugin_file.split(\".\")[0]\n if plugin_name in self.dashboard_handlers:\n L.error(\"Error:\" + plugin_name + \" is already in dashboard\")\n return False\n self.dashboard_handlers[plugin_name] = function\n return True", "def route(self):\n\n mode = self.addon_args.get(\"mode\", [\"main_page\"])[0]\n\n if not mode.startswith(\"_\"):\n getattr(self, mode)()", "def route(self, rule, **options):\n def decorator(f):\n self.add_url_rule(rule, f, **options)\n return f\n return decorator", "def view(self, **options: Any) -> Callable:\n\n def decorator(f):\n rule = \"/\"\n endpoint = options.pop(\"endpoint\", f.__name__)\n self.add_url_rule(rule, endpoint, f, **options)\n return f\n\n return decorator", "def add(app, url = None, path = None, endpoint=None, index='index.html'):\n url = url or app.static_url_path or ''\n path = os.path.abspath(path or app.static_folder or '.')\n endpoint = endpoint or 'static_' + os.path.basename(path)\n\n if path == app.static_folder:\n if url != app.static_url_path:\n raise ValueError('Files in `{}` path are automatically served on `{}` URL by Flask.'\n ' Use different path for serving them at `{}` URL'.format(path, app.static_url_path, url))\n else:\n @app.route(url + '/<path:filename>', endpoint = endpoint)\n def static_files(filename):\n return send_from_directory(path, filename)\n\n if index:\n @app.route(url + '/', endpoint = endpoint + '_index')\n def static_index():\n return send_from_directory(path, index)\n\n if url:\n @app.route(url, endpoint = endpoint + '_index_bare')\n def static_index_bare():\n return send_from_directory(path, index)", "def create_views(self):\n # Extract view objects\n customer_views = CustomerViews().views\n admin_views = AdminViews().views\n\n # Add customer views/routes\n for view in customer_views:\n view_obj = customer_views.get(view)\n endpoint = view_obj.endpoint\n view_name = view_obj.name\n self.add_url_rule(endpoint, view_func=view_obj.as_view(view_name))\n \n # Add admin views/routes\n for view in admin_views:\n view_obj = admin_views.get(view)\n endpoint = view_obj.endpoint\n view_name = view_obj.name\n self.add_url_rule(endpoint, view_func=view_obj.as_view(view_name))", "def _add_route(self, connections):\n route = ArduinoSwitchControlRoute(connections)\n if route.input.label not in self.routes:\n self.routes[route.input.label] = {route.output.label: [route]}\n elif route.output.label not in self.routes[route.input.label]:\n self.routes[route.input.label][route.output.label] = [route]\n else:\n self.routes[route.input.label][route.output.label].append(route)", "def do_add_route(self, line):\n items = line.split(' ')\n if len(items) < 3:\n log.error('route only takes at least 3 arguments: '\n 'network via_address metric')\n else:\n points = []\n i = 2\n while i < len(items):\n points.append((items[i-1], items[i]))\n i += 2\n log.critical('Add route request at %s',\n datetime.datetime.now().strftime('%H.%M.%S.%f'))\n self.fibbing.install_route(items[0], points, True)", "def trigger(self, _result, pod, routes, *_args, **_kwargs):\n routes.add('/_grow/routes', router.RouteInfo('console', {\n 'handler': RoutesDevHandlerHook.serve_routes,\n }))", "def apply(self, callback, route):", "def register_image_route(self, route: ImageRoute) -> None:\n self.__image_routes.append(route)\n self.__tg.add_handler(tg_ext.MessageHandler(filters=tg_ext.Filters.photo,\n callback=self.__serve_image_route))", "def add_explorer_view(\n config: Configurator,\n route: str = \"/docs/\",\n route_name: str = \"pyramid_openapi3.explorer\",\n template: str = \"static/index.html\",\n ui_version: str = \"4.18.3\",\n permission: str = NO_PERMISSION_REQUIRED,\n apiname: str = \"pyramid_openapi3\",\n proto_port: t.Optional[t.Tuple[str, int]] = None,\n) -> None:\n\n def register() -> None:\n resolved_template = AssetResolver().resolve(template)\n\n def explorer_view(request: Request) -> Response:\n settings = config.registry.settings\n if settings.get(apiname) is None:\n raise ConfigurationError(\n \"You need to call config.pyramid_openapi3_spec for the explorer \"\n \"to work.\"\n )\n with open(resolved_template.abspath()) as f:\n if proto_port:\n spec_url = request.route_url(\n settings[apiname][\"spec_route_name\"],\n _scheme=proto_port[0],\n _port=proto_port[1],\n )\n else:\n spec_url = request.route_url(settings[apiname][\"spec_route_name\"])\n\n template = Template(f.read())\n html = template.safe_substitute(\n ui_version=ui_version,\n spec_url=spec_url,\n )\n return Response(html)\n\n config.add_route(route_name, route)\n config.add_view(\n route_name=route_name, permission=permission, view=explorer_view\n )\n\n config.action((f\"{apiname}_add_explorer\",), register, order=PHASE0_CONFIG)", "def register_view(self, viewfunc, url_rule=None) :\n\n\t\tviewid = View.parse_id(viewfunc, self.settings.VIEW_ROOT)\n\t\t\n\t\tif viewid not in self.views :\n\t\t\t# Add view if not exists\n\t\t\tv = View(\n\t\t\t\tid = viewid,\n\t\t\t\tviewfunc = viewfunc,\n\t\t\t\turl_rule = url_rule,\n\t\t\t)\n\t\t\tself.views[viewid] = v\n\n\t\telse :\n\t\t\t# Update view if exists\n\t\t\tv = self.views[viewid]\n\t\t\tv.viewfunc = viewfunc\n\n\t\t\tif url_rule is not None :\n\t\t\t\tv.url_rule = url_rule\n\n\t\treturn v", "def define_route(self, route, **kwargs):\n\n def decorator(cls):\n if is_class(cls):\n resource = cls(**kwargs)\n else:\n resource = cls\n\n self.add_route(route, resource)\n\n return cls\n\n return decorator", "def add_route_to_map(gdf_best_route: gpd.GeoDataFrame, basemap):\n #create a list of colors\n colors = ['orange', 'darkred', 'darkblue', 'purple', 'darkgreen', '#364e4a', 'cadetblues']\n \n # make a feature group for every route\n # merge them to a feature group\n for i, row in gdf_best_route.iterrows():\n fg = folium.FeatureGroup(f\"Route {row['order']} from {row['start_city']} to {row['end_city']}\")\n # add the simple route\n fg.add_child(folium.PolyLine(\n locations=row[\"folium_geom\"], \n popup=f\"From {row['start_city']} to {row['end_city']}\",\n tooltip=f\"Route {row['order']}\",\n color=colors[i], \n dash_array='10',\n weight=4))\n basemap.add_child(fg)\n \n return None", "def add_static_route(appliances=[],\n credentials=[],\n timeout=120,\n no_check_hostname=False,\n save_config=False,\n EthernetInterface=\"\",\n destination=None,\n gateway=None,\n metric=None,\n web=False):\n check_hostname = not no_check_hostname\n if not isinstance(metric, basestring) and metric.isdigit():\n print \"metric must be provided and must be a number >= 0\"\n import sys\n sys.exit(-1)\n env = datapower.Environment(\n appliances,\n credentials,\n timeout,\n check_hostname=check_hostname)\n kwargs = {\n 'ethernet_interface': EthernetInterface,\n 'destination': destination,\n 'gateway': gateway,\n 'metric': metric}\n resp = env.perform_async_action('add_static_route', **kwargs)\n\n if web:\n output = util.render_boolean_results_table(\n resp, suffix=\"add_static_route\")\n\n if save_config:\n env.perform_async_action('SaveConfig', **{'domain': 'default'})\n if web:\n output += util.render_boolean_results_table(\n resp, suffix=\"save config\")\n if web:\n return output, util.render_history(env)", "def register_views(app: Application, base: str):\n cors = aiohttp_cors.setup(app)\n\n for view in views:\n logger.info(\"Registered %s at %s\", view.__name__, base + view.url)\n view.register_route(app, base)\n view.enable_cors(cors)", "def includeme(config): # pragma: no cover\n config.add_route('home', '/')\n config.add_view('kotti.views.edit.actions.contents',\n route_name=u'home',\n permission=u'view',\n renderer='kotti:templates/edit/contents.pt',\n )", "def register_document_route(self, route: DocumentRoute) -> None:\n self.__doсument_routes.append(route)\n self.__tg.add_handler(tg_ext.MessageHandler(filters=tg_ext.Filters.document,\n callback=self.__serve_document_route))", "def add_quest(self, method: str, route: str, handler):\n\n self.aiohttp.router.add_route(method, route, handler)", "def post_runroute_callable(self, request):\n return None", "def _register_view(self, app, resource, *urls, **kwargs):\n endpoint = kwargs.pop('endpoint', None) or resource.__name__.lower()\n self.endpoints.add(endpoint)\n\n if endpoint in getattr(app, 'view_class', {}):\n existing_view_class = app.view_functions[endpoint].__dict__['view_class']\n\n # if you override the endpoint with a different class, avoid the collision by raising an exception\n if existing_view_class != resource:\n raise ValueError('Endpoint {!r} is already set to {!r}.'\n .format(endpoint, existing_view_class.__name__))\n\n if not hasattr(resource, 'endpoint'): # Don't replace existing endpoint\n resource.endpoint = endpoint\n resource_func = self.output(resource.as_view(endpoint))\n\n for decorator in chain(kwargs.pop('decorators', ()), self.decorators):\n resource_func = decorator(resource_func)\n\n for url in urls:\n rule = self._make_url(url, self.blueprint.url_prefix if self.blueprint else None)\n\n # If this Api has a blueprint\n if self.blueprint:\n # And this Api has been setup\n if self.blueprint_setup:\n # Set the rule to a string directly, as the blueprint\n # is already set up.\n self.blueprint_setup.add_url_rule(self._make_url(url, None), view_func=resource_func, **kwargs)\n continue\n else:\n # Set the rule to a function that expects the blueprint\n # prefix to construct the final url. Allows deferment\n # of url finalization in the case that the Blueprint\n # has not yet been registered to an application, so we\n # can wait for the registration prefix\n rule = partial(self._make_url, url)\n else:\n # If we've got no Blueprint, just build a url with no prefix\n rule = self._make_url(url, None)\n # Add the url to the application or blueprint\n app.add_url_rule(rule, view_func=resource_func, **kwargs)", "def add_views(admin, db):\n admin.add_view(PartAdmin(Part, db.session, endpoint='admin_parts', url='parts'))\n admin.add_view(PartComponentAdmin(PartComponent, db.session, endpoint='admin_part_components', url='part_components'))\n pass", "def magic_route(self, rule, **options):\n\n def _decorator(f):\n endpoint = options.pop(\"endpoint\", f.__name__)\n if f not in self._injection_map:\n self._injection_map[f] = self._container.magic_partial(\n f, shared=self._request_singletons\n )\n self.blueprint.add_url_rule(\n rule, endpoint, self._injection_map[f], **options\n )\n return f\n\n return _decorator", "def required(self, f):\n self.storage.protected_routes.add(f)\n return f", "def register(app, fn):\n\n @functools.wraps(fn)\n def config_route(**kwargs):\n \"\"\"\n :param kwargs: str, id of existing entry\n :return: dict or exception\n \"\"\"\n\n return fn(app.config, **kwargs)\n\n app.route(*fn.route_args, **fn.route_kwargs)(config_route)", "def route(self, command):\n\n def _route(func):\n self._command_hash_views[command] = func\n\n def __route(*args, **kwargs):\n return func(*args, **kwargs)\n\n return __route\n\n return _route", "def add_returned_route_on_gw(self, context, router_id, port):\n LOG.debug('OVNL3RouterPlugin::')\n ovn_router_name = utils.ovn_gateway_name(router_id)\n for fixed_ip in port['fixed_ips']:\n subnet_id = fixed_ip['subnet_id']\n subnet = self._plugin.get_subnet(context, subnet_id)\n route = {'destination': subnet['cidr'], 'nexthop': ovn_const.OVN_LROUTER_TRANSIT_PORT_IP}\n with self._ovn.transaction(check_error=True) as txn:\n txn.add(self._ovn.add_static_route(ovn_router_name,\n ip_prefix=route['destination'],\n nexthop=route['nexthop']))", "def routes(self, body):\n pass", "def wrap_route(self, cbl: typing.Callable, *args, **kwargs) -> Route:\n rtt = Route(cbl, *args, **kwargs)\n return rtt", "def includeme(config):\n add_view(config)", "def add_view(self, *args, **kwargs):\r\n if not kwargs.get(\"extra_context\"):\r\n kwargs[\"extra_context\"] = {}\r\n kwargs[\"extra_context\"].update({\r\n \"insert_classes\": self.admin_site.insert_classes,\r\n \"form_url\": \"herp\"\r\n })\r\n return super(ServeeModelAdmin, self).add_view(*args, **kwargs)", "def route(self, rule, **options):\n def decorator(f):\n endpoint = options.pop('endpoint', None)\n self.add_url_rule(rule, f, endpoint, **options)\n return f\n return decorator", "def serve_routes(pod, _request, _matched):\n env = ui.create_jinja_env()\n template = env.get_template('views/base-reroute.html')\n kwargs = {\n 'pod': pod,\n 'partials': [{\n 'partial': 'routes',\n 'routes': pod.router.routes,\n }],\n 'title': 'Pod Routes',\n }\n content = template.render(kwargs)\n response = wrappers.Response(content)\n response.headers['Content-Type'] = 'text/html'\n return response", "def insert(self, route, handler):\n curr = self.root\n for part in route:\n curr = curr.insert(part)\n curr.handler = handler", "def get_route(function):\n return '/%s%s' % (app.config['PUBLIC_API_PREFIX'], app.config['PUBLIC_API_ROUTES'][function])", "def static_routes(self, static_routes):\n \n self._static_routes = static_routes" ]
[ "0.70428646", "0.69580984", "0.6819496", "0.6740185", "0.65938735", "0.6558933", "0.6456988", "0.643927", "0.64224243", "0.6417979", "0.6417649", "0.6408866", "0.6398267", "0.63959163", "0.638843", "0.6382129", "0.63491535", "0.63472974", "0.63417435", "0.6259017", "0.6238091", "0.62042457", "0.6204213", "0.619048", "0.6163764", "0.61403966", "0.61153746", "0.6112323", "0.6067735", "0.60122836", "0.5998037", "0.5873989", "0.5828024", "0.5824536", "0.5817501", "0.581669", "0.57793", "0.57502884", "0.57489777", "0.5728716", "0.57281923", "0.57205105", "0.5698328", "0.5675676", "0.56648755", "0.5659959", "0.56445247", "0.56110686", "0.561047", "0.56098735", "0.56070143", "0.559972", "0.5599486", "0.5589583", "0.5576523", "0.5571028", "0.55543745", "0.5546432", "0.5519806", "0.5518621", "0.5505253", "0.54718137", "0.5467756", "0.54625595", "0.54481995", "0.5447967", "0.54458094", "0.5427575", "0.54268277", "0.5422934", "0.5412128", "0.5406841", "0.54051584", "0.5402099", "0.5382705", "0.53707886", "0.5366838", "0.53625864", "0.53624165", "0.53615415", "0.53515434", "0.53513116", "0.53467363", "0.53090346", "0.5301301", "0.5300422", "0.52951527", "0.527696", "0.52756965", "0.5275185", "0.52534384", "0.52373517", "0.5222118", "0.52201617", "0.52117884", "0.5208477", "0.5203383", "0.517596", "0.5173234", "0.51705027", "0.5161837" ]
0.0
-1
Registers routes with the Flask application. This method configures context processors, templates, and sets up routes for a basic Dashboard instance. Additionally, routes declared by modules are registered by this method.
def _register_routes(self): dashboard = self @dashboard.app.after_request def prevent_caching(response): if 'Cache-Control' not in response.headers: response.headers['Cache-Control'] = 'no-store' return response @dashboard.app.context_processor def injections(): session.setdefault('enabled_modules', [i for i in range(len(self.modules)) if self.modules[i].enabled]) return { 'APP_NAME': 'signac-dashboard', 'APP_VERSION': __version__, 'PROJECT_NAME': self.project.config['project'], 'PROJECT_DIR': self.project.config['project_dir'], 'modules': self.modules, 'enabled_modules': session['enabled_modules'], 'module_assets': self._module_assets } # Add pagination support from http://flask.pocoo.org/snippets/44/ @dashboard.app.template_global() def url_for_other_page(page): args = request.args.copy() args['page'] = page return url_for(request.endpoint, **args) @dashboard.app.template_global() def modify_query(**new_values): args = request.args.copy() for key, value in new_values.items(): args[key] = value return '{}?{}'.format(request.path, url_encode(args)) @dashboard.app.errorhandler(404) def page_not_found(error): return self._render_error(str(error)) self.add_url('views.home', ['/']) self.add_url('views.settings', ['/settings']) self.add_url('views.search', ['/search']) self.add_url('views.jobs_list', ['/jobs/']) self.add_url('views.show_job', ['/jobs/<jobid>']) self.add_url('views.get_file', ['/jobs/<jobid>/file/<path:filename>']) self.add_url('views.change_modules', ['/modules'], methods=['POST'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_routes(self):\n# from server.flask import views as flask_views\n# flask_views_custom_methods = filter(lambda x: x.startswith(\"view_\"), dir(flask_views))\n# for custom_method in flask_views_custom_methods:\n# # Retrieve data needed to add the URL rule to the Flask app\n# view_method = getattr(locals()[\"flask_views\"], custom_method)\n# docstring = getattr(view_method, \"__doc__\")\n# index_start = docstring.index(\"@app.route\")\n# index_end = index_start + len(\"@app.route\") + 1\n# custom_method_url = docstring[index_end:].replace(\" \",\"\").replace(\"\\n\",\"\")\n# # Get: (a) method URL to bind flask app, (b), method name, (c) method object to invoke\n# self._app.add_url_rule(custom_method_url, custom_method, view_func=view_method(self._app.mongo))\n self._app.register_blueprint(ro_flask_views)", "def build_routes(app):\n app.register_blueprint(workflow_plans_blueprint)\n app.register_blueprint(cache_blueprint)\n app.register_blueprint(config_blueprint)\n app.register_blueprint(dataset_blueprint)\n app.register_blueprint(graph_blueprint)\n app.register_blueprint(jobs_blueprint)\n app.register_blueprint(project_blueprint)\n app.register_blueprint(templates_blueprint)\n app.register_blueprint(version_blueprint)\n app.register_blueprint(apispec_blueprint)\n app.register_blueprint(versions_list_blueprint)", "def create_routes(self):\r\n self._app.route('/api/autoconf',\r\n methods=['GET'],\r\n endpoint='api_autoconf')(self.entrypoint)\r\n self._app.route('/api/autoconf/<string:session_id>',\r\n methods=['GET', 'POST', 'DELETE'],\r\n endpoint='api_autoconf_status')(self.entrypoint)\r\n self._app.route('/api/autoconf/rgc',\r\n methods=['POST', 'DELETE', 'GET', 'PATCH'],\r\n endpoint='api_autoconf_rgc')(self.entrypoint)\r\n self._app.route('/api/autoconf/pd',\r\n methods=['POST', 'DELETE', 'GET', 'PATCH'],\r\n endpoint='api_autoconf_pd')(self.entrypoint)", "def register_blueprints(self):\n # Local import due to flask/blueprint circular imports.\n from mmapi.views import api_bp\n self.app.register_blueprint(api_bp, url_prefix='/api')", "def _init_routes(self):\n before_hooks = [\n helpers.require_accepts_json,\n helpers.extract_project_id,\n\n # NOTE(kgriffs): Depends on project_id being extracted, above\n functools.partial(helpers.validate_queue_name,\n self._validate.queue_name)\n ]\n\n self.app = falcon.API(before=before_hooks)\n\n queue_controller = self._storage.queue_controller\n message_controller = self._storage.message_controller\n claim_controller = self._storage.claim_controller\n\n # Home\n self.app.add_route('/v1', v1.V1Resource())\n\n # Queues Endpoints\n queue_collection = queues.CollectionResource(self._validate,\n queue_controller)\n self.app.add_route('/v1/queues', queue_collection)\n\n queue_item = queues.ItemResource(queue_controller, message_controller)\n self.app.add_route('/v1/queues/{queue_name}', queue_item)\n\n stats_endpoint = stats.Resource(queue_controller)\n self.app.add_route('/v1/queues/{queue_name}'\n '/stats', stats_endpoint)\n\n # Metadata Endpoints\n metadata_endpoint = metadata.Resource(self._wsgi_conf, self._validate,\n queue_controller)\n self.app.add_route('/v1/queues/{queue_name}'\n '/metadata', metadata_endpoint)\n\n # Messages Endpoints\n msg_collection = messages.CollectionResource(self._wsgi_conf,\n self._validate,\n message_controller)\n self.app.add_route('/v1/queues/{queue_name}'\n '/messages', msg_collection)\n\n msg_item = messages.ItemResource(message_controller)\n self.app.add_route('/v1/queues/{queue_name}'\n '/messages/{message_id}', msg_item)\n\n # Claims Endpoints\n claim_collection = claims.CollectionResource(self._wsgi_conf,\n self._validate,\n claim_controller)\n self.app.add_route('/v1/queues/{queue_name}'\n '/claims', claim_collection)\n\n claim_item = claims.ItemResource(self._wsgi_conf, self._validate,\n claim_controller)\n self.app.add_route('/v1/queues/{queue_name}'\n '/claims/{claim_id}', claim_item)\n\n # Health\n self.app.add_route('/v1/health', health.HealthResource())", "def register_blueprints_on_app(app):\n app.register_blueprint(views.main_pages)\n app.register_blueprint(views.main_api, url_prefix='/api')", "def _configure_api_routes(self, app: FastAPI):\n authenticator = JWTAuthenticator(self.signer)\n\n data_update_publisher: Optional[DataUpdatePublisher] = None\n if self.publisher is not None:\n data_update_publisher = DataUpdatePublisher(self.publisher)\n\n # Init api routers with required dependencies\n data_updates_router = init_data_updates_router(\n data_update_publisher,\n self.data_sources_config,\n authenticator\n )\n webhook_router = init_git_webhook_router(self.pubsub.endpoint, authenticator)\n security_router = init_security_router(self.signer, StaticBearerAuthenticator(self.master_token))\n\n # mount the api routes on the app object\n app.include_router(bundles_router, tags=[\"Bundle Server\"], dependencies=[Depends(authenticator)])\n app.include_router(data_updates_router, tags=[\"Data Updates\"], dependencies=[Depends(authenticator)])\n app.include_router(webhook_router, tags=[\"Github Webhook\"])\n app.include_router(security_router, tags=[\"Security\"])\n app.include_router(self.pubsub.router, tags=[\"Pub/Sub\"])\n\n if self.jwks_endpoint is not None:\n # mount jwts (static) route\n self.jwks_endpoint.configure_app(app)\n\n # top level routes (i.e: healthchecks)\n @app.get(\"/healthcheck\", include_in_schema=False)\n @app.get(\"/\", include_in_schema=False)\n def healthcheck():\n return {\"status\": \"ok\"}\n\n return app", "def add_routes(app: web.Application):\n ActionsView.register_view(app)\n PingView.register_view(app)\n CoreShutdownView.register_view(app)\n CoreRestartView.register_view(app)\n ReloadConfigView.register_view(app)\n ListItemsView.register_view(app)\n GetItemView.register_view(app)\n ItemStatesView.register_view(app)\n ItemStateView.register_view(app)\n ActionsView.register_view(app)\n ExecuteActionView.register_view(app)\n ListModulesView.register_view(app)", "def initialize_routes(api):\n api.add_resource(WatchlistsApi, '/api/watchlists')\n api.add_resource(WatchlistApi, '/api/watchlist/<id>')\n api.add_resource(RegisterUserApi, '/api/auth/register')\n api.add_resource(LoginUserApi, '/api/auth/login')\n api.add_resource(ResetPassword, '/api/auth/reset')\n api.add_resource(ResetFogottenPassword, '/api/auth/reset/password')\n api.add_resource(ForgotPassword, '/api/auth/forgot')\n api.add_resource(ForgotPasswordReset, '/reset/password/<token>')\n api.add_resource(Home, '/')\n api.add_resource(Logout, '/logout')\n api.add_resource(Dashboard, '/dashboard')\n api.add_resource(DashboardSearch, '/dashboard/search')\n api.add_resource(SearchMovies, '/search/movies/<title>')\n api.add_resource(SearchMovieDetails, '/search/movie/details/<id>')\n api.add_resource(SearchTvShows, '/search/shows/<title>')\n api.add_resource(SearchShowDetails, '/search/show/details/<id>')\n api.add_resource(SearchTrendingMovies, '/search/trending/movies')\n api.add_resource(Recommend, '/recommend')", "def register_routes(self, api):\n # Device Registration\n api.add_resource(controllers.UserDeviceRegistration, '/device-registration')", "def initialize_routes(app):\n # Authentification \n app.add_resource(auth.LoginApi, '/auth/login')\n app.add_resource(auth.SignupApi, '/auth/SignupApi')\n # Intialisation et activation d'un parking\n app.add_resource(parkingInit.InitilizeAParking, '/administrate/add')\n app.add_resource(parkingInit.ActivateParking, '/administrate/activate')\n app.add_resource(parkingInit.InitilizeAParking, '/administrate/getall', endpoint='getall')\n # Gestion de Clients\n app.add_resource(useresResources.GestionUstilisateurs, '/administrate/usesrs/get')\n app.add_resource(useresResources.GestionUstilisateurs, '/administrate/usesrs/getById/<int:idUser>', endpoint='get_by_id')\n # statistiques financéres\n app.add_resource(stats.Money, '/administrate/finance/monthly', endpoint='monthly')\n app.add_resource(stats.Money, '/administrate/finance/yearly', endpoint='yearly')", "def init_app(app):\n app.register_blueprint(index_bl)\n app.register_blueprint(main_bl, url_prefix=\"/main\")\n app.register_blueprint(map_bl, url_prefix=\"/map\")\n app.register_blueprint(login_bl, url_prefix=\"/login\")\n app.register_blueprint(prof_bl, url_prefix=\"/profile\")\n app.register_blueprint(average_bl, url_prefix=\"/average\")", "def add_routes():\n\n # The Home page is accessible to anyone\n @app.route('/admin')\n @roles_required(['Admin', 'Agent'])\n def home_page():\n return render_template('./admin/home.html')\n\n # The Members page is only accessible to authenticated users\n @app.route('/admin/members')\n @roles_required(['Admin', 'Agent']) # Use of @login_required decorator\n def member_page():\n return render_template('./admin/members.html')\n\n # The Admin page requires an 'Admin' role.\n @app.route('/admin/dashboard')\n @roles_required('Admin') # Use of @roles_required decorator\n def admin_page():\n return render_template_string(\"\"\"\n {% extends \"admin_layout.html\" %}\n {% block content %}\n <h2>{%trans%}Admin Page{%endtrans%}</h2>\n <p><a href={{ url_for('user.register') }}>{%trans%}Register{%endtrans%}</a></p>\n <p><a href={{ url_for('user.login') }}>{%trans%}Sign in{%endtrans%}</a></p>\n <p><a href={{ url_for('home_page') }}>{%trans%}Home Page{%endtrans%}</a> (accessible to anyone)</p>\n <p><a href={{ url_for('member_page') }}>{%trans%}Member Page{%endtrans%}</a> (login_required: member@example.com / Password1)</p>\n <p><a href={{ url_for('admin_page') }}>{%trans%}Admin Page{%endtrans%}</a> (role_required: admin@example.com / Password1')</p>\n <p><a href={{ url_for('user.logout') }}>{%trans%}Sign out{%endtrans%}</a></p>\n {% endblock %}\n \"\"\")\n\n @app.route('/register')\n def register_page():\n return render_template('./register.html')\n \n @app.route('/payment')\n @login_required\n def payment_page():\n PP_CLIENT_ID = \"AQnE3_uZrT1Vf56AluXZIR1ir4gUYWAMmxquNRnRzGSVukHeGPzUvu5WsW4FtdYhqrHO06IQkKTr8zOh\"\n user = User.query.filter_by(email=current_user.email).first()\n detail = UserDetail.query.filter_by(user_id=user.id).first()\n plan, last_payment_at = detail.plan, detail.last_payment_at\n plan_id = ''\n if plan == 'free':\n return redirect('/enter-exposure')\n if plan == 'premium':\n plan_id = 'P-6WL802942Y8719627L4PXXFY'\n elif plan == 'business':\n plan_id = 'P-306056489A234290WL4PXXLI'\n return render_template('./payment.html', plan_id=plan_id, PP_CLIENT_ID=PP_CLIENT_ID)\n \n @app.route('/payment-complete')\n def payment_complete_page():\n add_payment(current_user.email)\n return redirect('/enter-exposure')\n\n @app.route('/enter-exposure')\n @login_required\n def enter_exposure_page():\n return render_template('./exposures.html', currencies=CURRENCIES)\n\n @app.route('/report', methods=['GET', 'POST'])\n @login_required\n def report_page():\n user = User.query.filter_by(email=current_user.email).first()\n if request.method == 'POST':\n return render_template(\n './report.html',\n **handle_report(request.form, request.files, None, user.id),\n )\n report = Report.query.get_or_404(request.args.get('id'))\n return render_template(\n './report.html',\n **handle_report(request.form, request.files, report.id, user.id),\n )\n\n @app.route('/suggestion-tool')\n @login_required\n def suggestion_tool_page():\n user = User.query.filter_by(email=current_user.email).first()\n reports = Report.query.filter_by(user_id=user.id).all()\n report_id = request.args.get('report_id') or reports.pop().id\n scenario = request.args.get('scenario') or 1\n\n return render_template(\n './suggestion_tool.html', \n **handle_suggestions(report_id, scenario),\n )\n\n @app.route('/account')\n @login_required\n def account_page():\n user = User.query.filter_by(email=current_user.email).first()\n detail = UserDetail.query.filter_by(user_id=user.id).first()\n reports = Report.query.filter_by(user_id=user.id).all()\n for i in range(len(reports)):\n reports[i].created = reports[i].created.strftime(\"%A, %d-%b-%Y %H:%M:%S GMT%z\")\n return render_template(\n './account.html',\n email=current_user.email,\n first_name=detail.first_name,\n last_name=detail.last_name,\n company_name=detail.company_name,\n plan=detail.plan,\n reports=reports,\n )\n\n @app.route('/contact')\n @login_required\n def contact_page(): \n return render_template(\n './contact.html' \n )\n\n @app.route('/')\n def index():\n return render_template('./splash.html')", "def register_blueprints(app):\n blueprints = {INDEX, DASHBOARD, COMMENT_SECTION}\n\n for blueprint in blueprints:\n app.register_blueprint(blueprint)", "def __create_routes__(self):\n self.app.add_url_rule('/', 'main_page', self.main_page)\n self.app.add_url_rule('/day', 'day', self.get_current_iteration, methods=['GET'])\n self.app.add_url_rule('/traders', 'traders', self.register_trader, methods=['POST'])\n self.app.add_url_rule('/traders/<id>', 'trader', self.get_trader_state, methods=['GET'])\n self.app.add_url_rule('/stock/price', 'price', self.get_stock_price, methods=['GET'])\n self.app.add_url_rule('/stock/history', 'history', self.get_stock_price_history, methods=['GET'])\n self.app.add_url_rule('/stock/buy', 'buy', self.place_buy_order, methods=['POST'])\n self.app.add_url_rule('/stock/sell', 'sell', self.place_sell_order, methods=['POST'])\n self.app.add_url_rule('/simulation/step', 'step', self.market_session_step, methods=['POST'])\n self.app.add_url_rule('/simulation/run', 'run', self.run_simulation, methods=['POST'])", "def routes(self, *routes):\n self.package.add_routes(*routes)\n for route_group in self.package.routes:\n self.application.make(\"router\").add(\n Route.group(load(route_group, \"ROUTES\", []), middleware=[\"web\"])\n )\n return self", "def register_blueprints():\n from app.routes import blog, client\n blueprints = [blog, client]\n\n for bp in blueprints:\n app.register_blueprint(bp)", "def add_routes(self):\n pass", "def register_routes(self):\n @inlineCallbacks\n def registered(response):\n if response.code != 200:\n text = yield response.text()\n self._env.logger.error('{} {}'.format(response.code, text))\n\n try:\n api_register = '{}://{}:{}/api/1.0.0/register'.format(\n self._env.api_protocol,\n self._env.api_host,\n self._env.api_port\n )\n remote_ms = self._env.get('remote_ms', None)\n\n for path in self._env.swagger.paths:\n uri = self._env.swagger.base + path.split('{')[0].rstrip('/')\n if remote_ms:\n route = {\n 'protocol': 'https',\n 'host': remote_ms,\n 'port': 443,\n }\n else:\n if self._env.get('flask_private'):\n route = {\n 'protocol': self._env.get('flask_protocol'),\n 'host': self._env.get('flask_host'),\n 'port': self._env.get('flask_port'),\n }\n else:\n route = {\n 'protocol': self._env.flask_protocol,\n 'host': self._env.flask_host,\n 'port': self._env.flask_port,\n }\n route = dict(route, **{'uri': uri, 'key': self._key})\n #self._env.logger.info('Route> {}'.format(str(route)))\n treq.post(api_register, data={'details': dumps(route)}).addCallback(registered)\n\n swagger_paths = ['/ui/css', '/ui/lib', '/ui/images', '/swagger.json']\n ui = '/' + self._env.get('swagger_ui', 'ui')+'/'\n swagger_paths.append(ui)\n\n for path in swagger_paths:\n uri = self._env.swagger.base\n if len(uri):\n if uri[-1] == '/':\n uri = uri[:-1]\n uri += path\n if self._env.get('flask_private'):\n route = {\n 'protocol': self._env.get('flask_protocol'),\n 'host': self._env.get('flask_host'),\n 'port': self._env.get('flask_port'),\n 'uri': uri,\n 'key': self._key,\n 'ui': path == ui,\n 'name': self._env.get('my_name', 'no local name', 'microservice')\n }\n else:\n route = {\n 'protocol': self._env.flask_protocol,\n 'host': self._env.flask_host,\n 'port': self._env.flask_port,\n 'uri': uri,\n 'key': self._key,\n 'ui': path == ui,\n 'name': self._env.get('my_name', 'no local name', 'microservice')\n }\n treq.post(api_register, data={'details': dumps(route)}).addCallback(registered)\n\n return True\n except Exception as e:\n self._env.logger.error('error registering routes \"{}\"'.format(str(e)))", "def create_routes():\n app_dir = os.path.dirname(os.path.abspath(__file__))\n controller_dir = os.path.join(app_dir, \"controllers\")\n routes = Mapper(directory=controller_dir)\n routes.connect(\"/\", controller=\"root\", action=\"index\")\n routes.connect(\"/body\", controller=\"root\", action=\"body\")\n routes.connect(\"/raise_exception\", controller=\"root\", action=\"raise_exception\")\n routes.connect(\"/raise_wrong_code\", controller=\"root\", action=\"raise_wrong_code\")\n routes.connect(\"/raise_custom_code\", controller=\"root\", action=\"raise_custom_code\")\n routes.connect(\"/raise_code_method\", controller=\"root\", action=\"raise_code_method\")\n routes.connect(\"/render\", controller=\"root\", action=\"render\")\n routes.connect(\"/path-params/{year:\\d+}/{month}/\", controller=\"root\", action=\"path_params\") # noqa: W605\n routes.connect(\"/render_exception\", controller=\"root\", action=\"render_exception\")\n routes.connect(\"/response_headers\", controller=\"root\", action=\"response_headers\")\n routes.connect(\"/identify\", controller=\"root\", action=\"identify\")\n return routes", "def register_blueprints(app):\n from .main import main_blueprint\n app.register_blueprint(main_blueprint)\n\n from .submissions import submissions_blueprint\n app.register_blueprint(submissions_blueprint, url_prefix='/submissions')\n from .revisions import revisions_blueprint\n app.register_blueprint(revisions_blueprint, url_prefix='/revisions')", "def register_blueprints(app):\n app.register_blueprint(user)\n app.register_blueprint(messages)\n app.register_blueprint(auth, url_prefix='/auth')\n app.register_blueprint(tasks)\n app.register_blueprint(core)\n app.register_blueprint(errors)", "def register_blueprints(app):\n app.register_blueprint(general.general)\n app.register_blueprint(validate.validate, url_prefix='')\n\n # All done!\n app.logger.info(\"Blueprints registered\")", "def setup_routes(application):\n\n # Do the controller import here after the models have been loaded\n from app.controllers import view_controller, api_controller\n\n # Some helpers to make defining the routes a bit cleaner\n def get(path, rule, func, *args, **kwargs):\n kwargs['methods'] = ['GET']\n application.add_url_rule(path, rule, func, *args, **kwargs)\n\n def post(path, rule, func, *args, **kwargs):\n kwargs['methods'] = ['POST']\n application.add_url_rule(path, rule, func, *args, **kwargs)\n\n get('/', 'index', view_controller.index)\n get('/upload', 'upload', view_controller.upload)\n get('/search', 'search', view_controller.search)\n\n post('/api/publish', 'api_publish', api_controller.publish)\n get('/api/search/<text>', 'api_search', api_controller.search)\n get('/api/search/', 'api_search_empty', api_controller.search)\n get('/api/get/<int:id>', 'api_get_file', api_controller.get_file)", "def build_routes(config):\r\n\r\n config.add_route(\"home\", \"/\")\r\n config.add_route(\"dashboard\", \"/dashboard\")\r\n\r\n # Add routes for the combo loader to match up to static file requests.\r\n config.add_route('convoy', '/combo')\r\n\r\n JS_FILES = config.get_settings()['app_root'] + '/bookie/static/js/build'\r\n application = combo_app(JS_FILES)\r\n config.add_view(\r\n wsgiapp2(application),\r\n route_name='convoy')\r\n\r\n # auth routes\r\n config.add_route(\"login\", \"login\")\r\n config.add_route(\"logout\", \"logout\")\r\n config.add_route(\"reset\", \"{username}/reset/{reset_key}\")\r\n config.add_route(\"signup\", \"signup\")\r\n config.add_route(\"signup_process\", \"signup_process\")\r\n\r\n # celery routes\r\n config.add_route(\"celery_hourly_stats\", \"jobhourly\")\r\n\r\n # bmark routes\r\n config.add_route(\"bmark_recent\", \"recent\")\r\n config.add_route(\"bmark_recent_tags\", \"recent/*tags\")\r\n\r\n config.add_route(\"bmark_recent_rss\", \"rss\")\r\n config.add_route(\"bmark_recent_rss_tags\", \"rss/*tags\")\r\n\r\n config.add_route(\"bmark_readable\", \"bmark/readable/{hash_id}\")\r\n\r\n # user based bmark routes\r\n config.add_route(\"user_bmark_recent\", \"{username}/recent\")\r\n config.add_route(\"user_bmark_recent_tags\", \"{username}/recent/*tags\")\r\n\r\n config.add_route(\"user_bmark_rss\", \"{username}/rss\")\r\n config.add_route(\"user_bmark_rss_tags\", \"{username}/rss/*tags\")\r\n\r\n config.add_route(\"user_bmark_edit\", \"{username}/edit/{hash_id}\")\r\n config.add_route(\"user_bmark_edit_error\",\r\n \"{username}/edit_error/{hash_id}\")\r\n config.add_route(\"user_bmark_new\", \"{username}/new\")\r\n config.add_route(\"user_bmark_new_error\", \"{username}/new_error\")\r\n config.add_route(\r\n \"user_delete_all_bookmarks\",\r\n \"{username}/account/delete_all_bookmarks\")\r\n\r\n # config.add_route(\"bmark_delete\", \"/bmark/delete\")\r\n # config.add_route(\"bmark_confirm_delete\", \"/bmark/confirm/delete/{bid}\")\r\n\r\n # tag related routes\r\n config.add_route(\"tag_list\", \"tags\")\r\n config.add_route(\"tag_bmarks\", \"tags/*tags\")\r\n\r\n # user tag related\r\n config.add_route(\"user_tag_list\", \"{username}/tags\")\r\n config.add_route(\"user_tag_bmarks\", \"{username}/tags/*tags\")\r\n\r\n config.add_route(\"user_import\", \"{username}/import\")\r\n config.add_route(\"search\", \"search\")\r\n config.add_route(\"user_search\", \"{username}/search\")\r\n\r\n config.add_route(\"search_results\", \"results\")\r\n config.add_route(\"user_search_results\", \"{username}/results\")\r\n\r\n # matches based on the header\r\n # HTTP_X_REQUESTED_WITH\r\n # ajax versions are used in the mobile search interface\r\n config.add_route(\"search_results_ajax\", \"results/*terms\", xhr=True)\r\n config.add_route(\"search_results_rest\", \"results/*terms\")\r\n config.add_route(\"user_search_results_ajax\",\r\n \"{username}/results*terms\",\r\n xhr=True)\r\n config.add_route(\"user_search_results_rest\", \"{username}/results*terms\")\r\n\r\n config.add_route(\"redirect\", \"redirect/{hash_id}\")\r\n config.add_route(\"user_redirect\", \"{username}/redirect/{hash_id}\")\r\n\r\n config.add_route(\"user_account\", \"{username}/account\")\r\n config.add_route(\"user_export\", \"{username}/export\")\r\n config.add_route(\"user_stats\", \"{username}/stats\")\r\n\r\n #\r\n # NEW API\r\n #\r\n\r\n # stats\r\n config.add_route('api_bookmark_stats',\r\n '/api/v1/stats/bookmarks',\r\n request_method='GET')\r\n config.add_route('api_user_stats',\r\n '/api/v1/stats/users',\r\n request_method='GET')\r\n\r\n # ping checks\r\n config.add_route('api_ping',\r\n '/api/v1/{username}/ping',\r\n request_method='GET')\r\n config.add_route('api_ping_missing_user',\r\n '/api/v1/ping',\r\n request_method='GET')\r\n config.add_route('api_ping_missing_api',\r\n '/ping',\r\n request_method='GET')\r\n\r\n # auth related\r\n config.add_route(\"api_user_account\",\r\n \"/api/v1/{username}/account\",\r\n request_method=\"GET\")\r\n config.add_route(\"api_user_account_update\",\r\n \"/api/v1/{username}/account\",\r\n request_method=\"POST\")\r\n config.add_route(\"api_user_api_key\",\r\n \"/api/v1/{username}/api_key\",\r\n request_method=\"GET\")\r\n config.add_route(\"api_reset_api_key\",\r\n \"/api/v1/{username}/api_key\",\r\n request_method=\"POST\")\r\n config.add_route(\"api_user_reset_password\",\r\n \"/api/v1/{username}/password\",\r\n request_method=\"POST\")\r\n\r\n config.add_route(\"api_user_suspend_remove\",\r\n \"api/v1/suspend\",\r\n request_method=\"DELETE\")\r\n config.add_route(\"api_user_suspend\",\r\n \"api/v1/suspend\",\r\n request_method=\"POST\")\r\n config.add_route(\"api_user_invite\",\r\n \"api/v1/{username}/invite\",\r\n request_method=\"POST\")\r\n\r\n # many bookmark api calls\r\n config.add_route(\"api_bmarks_export\", \"api/v1/{username}/bmarks/export\")\r\n\r\n # we have to search before we hit the bmarks keys so that it doesn't think\r\n # the tag is \"search\"\r\n config.add_route(\"api_bmark_search\", \"api/v1/bmarks/search/*terms\")\r\n config.add_route(\"api_bmark_search_user\",\r\n \"/api/v1/{username}/bmarks/search/*terms\")\r\n\r\n config.add_route('api_bmarks', 'api/v1/bmarks')\r\n config.add_route('api_bmarks_tags', 'api/v1/bmarks/*tags')\r\n config.add_route('api_bmarks_user', 'api/v1/{username}/bmarks')\r\n config.add_route('api_bmarks_user_tags', 'api/v1/{username}/bmarks/*tags')\r\n config.add_route('api_count_bmarks_user',\r\n 'api/v1/{username}/stats/bmarkcount')\r\n\r\n # user bookmark api calls\r\n config.add_route(\"api_bmark_add\",\r\n \"/api/v1/{username}/bmark\",\r\n request_method=\"POST\")\r\n config.add_route(\"api_bmark_update\",\r\n \"/api/v1/{username}/bmark/{hash_id}\",\r\n request_method=\"POST\")\r\n config.add_route(\"api_extension_sync\", \"/api/v1/{username}/extension/sync\")\r\n\r\n config.add_route(\"api_bmark_hash\",\r\n \"/api/v1/{username}/bmark/{hash_id}\",\r\n request_method=\"GET\")\r\n config.add_route(\"api_bmark_remove\",\r\n \"/api/v1/{username}/bmark/{hash_id}\",\r\n request_method=\"DELETE\")\r\n\r\n config.add_route(\"api_tag_complete_user\",\r\n \"/api/v1/{username}/tags/complete\")\r\n config.add_route(\"api_tag_complete\",\r\n \"/api/v1/tags/complete\")\r\n\r\n # admin api calls\r\n config.add_route(\"api_admin_readable_todo\", \"/api/v1/a/readable/todo\")\r\n config.add_route(\r\n \"api_admin_readable_reindex\",\r\n \"/api/v1/a/readable/reindex\")\r\n config.add_route(\r\n \"api_admin_accounts_inactive\",\r\n \"/api/v1/a/accounts/inactive\")\r\n config.add_route(\r\n \"api_admin_accounts_invites_add\",\r\n \"/api/v1/a/accounts/invites/{username}/{count}\",\r\n request_method=\"POST\")\r\n config.add_route(\r\n \"api_admin_accounts_invites\",\r\n \"/api/v1/a/accounts/invites\",\r\n request_method=\"GET\")\r\n config.add_route(\r\n \"api_admin_imports_list\",\r\n \"/api/v1/a/imports/list\",\r\n request_method=\"GET\")\r\n config.add_route(\r\n \"api_admin_imports_reset\",\r\n \"/api/v1/a/imports/reset/{id}\",\r\n request_method=\"POST\")\r\n\r\n config.add_route(\r\n \"api_admin_users_list\",\r\n \"/api/v1/a/users/list\",\r\n request_method=\"GET\")\r\n config.add_route(\r\n \"api_admin_new_user\",\r\n \"/api/v1/a/users/add\",\r\n request_method=\"POST\")\r\n config.add_route(\r\n \"api_admin_del_user\",\r\n \"/api/v1/a/users/delete/{username}\",\r\n request_method=\"DELETE\")\r\n config.add_route(\r\n \"api_admin_bmark_remove\",\r\n \"/api/v1/a/bmark/{username}/{hash_id}\",\r\n request_method=\"DELETE\")\r\n\r\n config.add_route(\r\n \"api_admin_applog\",\r\n \"/api/v1/a/applog/list\",\r\n request_method=\"GET\")\r\n\r\n config.add_route(\r\n \"api_admin_non_activated\",\r\n \"/api/v1/a/nonactivated\",\r\n request_method=\"GET\")\r\n\r\n config.add_route(\r\n \"api_admin_delete_non_activated\",\r\n \"/api/v1/a/nonactivated\",\r\n request_method=\"DELETE\")\r\n\r\n # these are single word matching, they must be after /recent /popular etc\r\n config.add_route(\"user_home\", \"{username}\")\r\n\r\n return config", "def init_app():\n app = Flask(__name__)\n\n with app.app_context():\n # Import parts of our core Flask app\n from . import routes\n\n from .plotlydash.index import init_dashboard\n app = init_dashboard(app)\n\n return app", "def register_routes(\n config: Configurator,\n route_name_ext: str = \"x-pyramid-route-name\",\n root_factory_ext: str = \"x-pyramid-root-factory\",\n apiname: str = \"pyramid_openapi3\",\n route_prefix: t.Optional[str] = None,\n) -> None:\n\n def action() -> None:\n spec = config.registry.settings[apiname][\"spec\"]\n for pattern, path_item in spec[\"paths\"].items():\n route_name = path_item.get(route_name_ext)\n if route_name:\n root_factory = path_item.get(root_factory_ext)\n config.add_route(\n route_name,\n pattern=route_prefix + pattern\n if route_prefix is not None\n else pattern,\n factory=root_factory or None,\n )\n\n config.action((\"pyramid_openapi3_register_routes\",), action, order=PHASE1_CONFIG)", "def configure_blueprints(app):\n\n for blueprint in _blueprints:\n app.register_blueprint(blueprint)", "def configure_app(self):\n self.app.route('/', callback=self.get_api)", "def add_app_routes(app):\n\n # Routes for demo pages to visit with a web browser\n @app.route('/')\n def index():\n return render_template('index.html')\n\n @app.route('/video_stream_demo')\n def video_stream_demo():\n \"\"\"Video streaming demo page.\"\"\"\n return render_template('video_stream_demo.html')\n\n @app.route('/image_capture_demo')\n def image_capture_demo():\n \"\"\"Image capture demo page.\"\"\"\n return render_template('image_capture_demo.html')\n\n\n\n # Routes to use to use for programmatic connectivity\n @app.route('/video_feed')\n def video_feed():\n \"\"\"Video streaming route.\"\"\"\n return Response(gen(Camera()),\n mimetype='multipart/x-mixed-replace; boundary=frame')\n\n @app.route('/image')\n def image():\n \"\"\"Image capture route.\"\"\"\n return Response(gen_image(Camera()),\n mimetype='image/jpeg')\n\n # TODO: Probably makes more sense to have a POST url \n # so it'll be easier to set multiple settings\n @app.route('/settings')\n def settings():\n \"\"\"Settings route\"\"\"\n stop_req = request.args.get('stop')\n frame_sleep_req = request.args.get('frame_sleep')\n\n global stop\n if stop_req == '1':\n stop = True\n elif stop_req == '0':\n stop = False\n\n global frame_sleep\n if frame_sleep_req:\n frame_sleep = int(frame_sleep_req)\n\n return jsonify({'message': 'Set settings: {}'.format(request.args)})\n\n\n return app", "def create_routes(api: Api):\n api.add_resource(SignUpApi, '/user/signup/')\n api.add_resource(LoginApi, '/user/login/')\n\n api.add_resource(UsersApi, '/users/')\n\n api.add_resource(CafeteriasCreationAPI, '/createcafeteria/')\n api.add_resource(CreateItemsAPI, '/createcafeteriaitems/')", "def create_app():\r\n app = Flask(__name__, instance_relative_config=False)\r\n app.config.from_object('config.Config') \r\n \r\n api = Api(app) \r\n \r\n with app.app_context():\r\n from .flights import TicketRoute, FlightRoute\r\n api.add_resource(TicketRoute,\"/api/tickets\")\r\n api.add_resource(FlightRoute,\"/api/flights\")\r\n \r\n \r\n return app", "def register_blueprints(app):\n app.register_blueprint(hello_world.bp_config.bp)", "def registerBlueprints(module_name):\n module = importlib.import_module(\n \"app.modules.\" + module_name, package=None)\n bp = getattr(module, module_name)\n server_logger.info(\"Registering module: \" + module_name)\n if bp.name == \"index\":\n server.register_blueprint(bp)\n else:\n server.register_blueprint(bp, url_prefix='/' + bp.name)", "def configure_routing(config):\n # Static file access. Separate root for each subdirectory, because Pyramid\n # treats these as first-class routables rather than a last-ditch fallback\n config.add_static_view('/css', 'floof:assets/css')\n config.add_static_view('/files', 'floof:assets/files') # dummy file store\n config.add_static_view('/icons', 'floof:assets/icons')\n config.add_static_view('/images', 'floof:assets/images')\n config.add_static_view('/js', 'floof:assets/js')\n # TODO this doesn't actually work\n config.add_static_view('/favicon.ico', 'floof:assets/favicon.ico')\n\n\n r = config.add_route\n\n # Miscellaneous root stuff\n r('root', '/')\n r('filestore', '/filestore/{class_}/{key}', pregenerator=filestore_pregenerator)\n r('reproxy', '/reproxy')\n r('log', '/log')\n\n # Registration and auth\n r('account.login', '/account/login')\n r('account.login_begin', '/account/login_begin')\n r('account.login_finish', '/account/login_finish')\n r('account.register', '/account/register')\n r('account.add_identity', '/account/add_identity')\n r('account.persona.login', '/account/persona/login')\n r('account.logout', '/account/logout')\n\n r('account.profile', '/account/profile')\n\n # Regular user control panel\n r('controls.index', '/account/controls')\n r('controls.auth', '/account/controls/authentication')\n r('controls.persona', '/account/controls/persona')\n r('controls.persona.add', '/account/controls/persona/add')\n r('controls.persona.remove', '/account/controls/persona/remove')\n r('controls.openid', '/account/controls/openid')\n r('controls.openid.add', '/account/controls/openid/add')\n r('controls.openid.add_finish', '/account/controls/openid/add_finish')\n r('controls.openid.remove', '/account/controls/openid/remove')\n r('controls.rels', '/account/controls/relationships')\n r('controls.rels.watch', '/account/controls/relationships/watch')\n r('controls.rels.unwatch', '/account/controls/relationships/unwatch')\n r('controls.info', '/account/controls/user_info')\n\n r('controls.certs', '/account/controls/certificates')\n r('controls.certs.add', '/account/controls/certificates/add')\n r('controls.certs.generate_server',\n '/account/controls/certificates/gen/cert-{name}.p12')\n r('controls.certs.details',\n '/account/controls/certificates/details/{serial:[0-9a-f]+}')\n r('controls.certs.download',\n '/account/controls/certificates/download/cert-{name}-{serial:[0-9a-f]+}.pem')\n r('controls.certs.revoke',\n '/account/controls/certificates/revoke/{serial:[0-9a-f]+}')\n\n # User pages\n kw = sqla_route_options('user', 'name', model.User.name)\n r('users.view', '/users/{name}', **kw)\n r('users.art', '/users/{name}/art', **kw)\n r('users.art_by_album', '/users/{name}/art/{album}', **kw)\n r('users.profile', '/users/{name}/profile', **kw)\n r('users.watchstream', '/users/{name}/watchstream', **kw)\n r('albums.user_index', '/users/{name}/albums', **kw)\n\n r('api:users.list', '/users.json')\n\n # Artwork\n kw = sqla_route_options('artwork', 'id', model.Artwork.id)\n kw['pregenerator'] = artwork_pregenerator\n r('art.browse', '/art')\n r('art.upload', '/art/upload')\n r('art.view', r'/art/{id:\\d+}{title:(-.+)?}', **kw)\n r('art.add_tags', r'/art/{id:\\d+}/add_tags', **kw)\n r('art.remove_tags', r'/art/{id:\\d+}/remove_tags', **kw)\n r('art.rate', r'/art/{id:\\d+}/rate', **kw)\n\n # Tags\n # XXX what should the tag name regex be, if anything?\n # XXX should the regex be checked in the 'factory' instead? way easier that way...\n kw = sqla_route_options('tag', 'name', model.Tag.name)\n r('tags.list', '/tags')\n r('tags.view', '/tags/{name}', **kw)\n r('tags.artwork', '/tags/{name}/artwork', **kw)\n\n # Albums\n # XXX well this is getting complicated! needs to check user, needs to check id, needs to generate correctly, needs a title like art has\n user_router = SugarRouter(config, '/users/{user}', model.User.name)\n album_router = user_router.chain('/albums/{album}', model.Album.id, rel=model.Album.user)\n album_router.add_route('albums.artwork', '')\n\n # Administration\n r('admin.dashboard', '/admin')\n r('admin.log', '/admin/log')\n\n # Debugging\n r('debug.blank', '/debug/blank')\n r('debug.crash', '/debug/crash')\n r('debug.mako-crash', '/debug/mako-crash')\n r('debug.status.303', '/debug/303')\n r('debug.status.400', '/debug/400')\n r('debug.status.403', '/debug/403')\n r('debug.status.404', '/debug/404')\n\n # Comments; made complex because they can attach to different parent URLs.\n # Rather than hack around how Pyramid's routes works, we can just use our\n # own class that does what we want!\n\n # XXX 1: make this work for users as well\n # XXX 2: make the other routes work\n # XXX 3: possibly find a way to verify that the same logic is used here and for the main routes\n parent_route_names = ('art.view', 'user.view')\n mapper = config.get_routes_mapper()\n parent_routes = [mapper.get_route(name) for name in parent_route_names]\n commentables = dict(\n users=model.User.name,\n art=model.Artwork.id,\n )\n\n def comments_factory(request):\n # XXX prefetching on these?\n type = request.matchdict['type']\n identifier = request.matchdict['identifier']\n\n try:\n sqla_column = commentables[type]\n entity = model.session.query(sqla_column.parententity).filter(sqla_column == identifier).one()\n except (NoResultFound, KeyError):\n # 404!\n raise NotFound()\n\n if 'comment_id' not in request.matchdict:\n return contextualize(entity.discussion)\n\n # URLs to specific comments should have those comments as the context\n try:\n return contextualize(\n model.session .query(model.Comment)\n .with_parent(entity.discussion)\n .filter(model.Comment.id == request.matchdict['comment_id'])\n .one())\n except NoResultFound:\n raise NotFound()\n\n\n def comments_pregenerator(request, elements, kw):\n resource = None\n comment = kw.get('comment', None)\n\n if comment:\n kw['comment_id'] = comment.id\n\n if 'resource' not in kw:\n resource = comment.discussion.resource\n\n if not resource:\n resource = kw['resource']\n\n # XXX users...\n entity = resource.member\n kw['type'] = 'art'\n kw['identifier'] = entity.id\n return elements, kw\n\n r('comments.list', '/{type}/{identifier}/comments', factory=comments_factory)\n r('comments.write', '/{type}/{identifier}/comments/write', factory=comments_factory, pregenerator=comments_pregenerator)\n r('comments.view', '/{type}/{identifier}/comments/{comment_id}', factory=comments_factory, pregenerator=comments_pregenerator)\n r('comments.edit', '/{type}/{identifier}/comments/{comment_id}/edit', factory=comments_factory, pregenerator=comments_pregenerator)\n r('comments.reply', '/{type}/{identifier}/comments/{comment_id}/write', factory=comments_factory, pregenerator=comments_pregenerator)", "def register_controllers(app: FastAPI) -> None:\n app.include_router(base.router)\n app.include_router(checks.router, prefix=\"/checks\", tags=[\"checks\"])", "def create_app():\n app = Flask(__name__, instance_relative_config=False)\n app.config.from_object('config.Config')\n\n # Initialize Plugins\n bootstrap = Bootstrap(app) # noqa: F841\n\n with app.app_context():\n # Include our Routes\n from . import routes # noqa: F401\n\n # # Register Blueprints\n # app.register_blueprint(auth.auth_bp)\n # app.register_blueprint(admin.admin_bp)\n\n return app", "def initialize_blueprints(app, *blueprints):\n for blueprint in blueprints:\n app.register_blueprint(blueprint)", "def create_app():\n app = Flask(__name__, instance_relative_config=False)\n app.config.from_object('config.Config')\n app.config['TEMPLATES_AUTO_RELOAD'] = True\n\n\n # Initialize Plugins\n db.init_app(app)\n login_manager.login_view = 'auth_bp.login'\n login_manager.init_app(app)\n\n cache.init_app(app)\n sess.init_app(app)\n\n @app.context_processor\n def inject_session():\n return dict(session=sess)\n\n @app.context_processor\n def inject_datetime():\n return dict(dnow=datetime.now())\n\n @app.context_processor\n def check_permissions():\n def check_perms(perm, permset):\n return Permissions.check(perm, permset)\n return dict(check_perms=check_perms)\n\n @app.context_processor\n def lookup_permissions():\n def lookup_perm(perm):\n return Permissions.lookup(perm)\n return dict(lookup_perm=lookup_perm)\n\n app.add_template_global(Permissions.lookups(), 'permissions')\n\n with app.app_context():\n # Include our Routes\n from .main import main_bp\n from .auth import auth_bp\n from .admin import admin_bp\n from .snapshots import snap_bp\n from .geo import geo_bp\n from .ppe import ppe_bp\n from .space import space_bp\n from .staff import staff_bp\n from .trans import trans_bp\n\n # Register Blueprints\n app.register_blueprint(main_bp)\n app.register_blueprint(auth_bp)\n app.register_blueprint(admin_bp)\n app.register_blueprint(snap_bp)\n app.register_blueprint(geo_bp)\n app.register_blueprint(ppe_bp)\n app.register_blueprint(space_bp)\n app.register_blueprint(staff_bp)\n app.register_blueprint(trans_bp)\n\n\n return app", "def register_foaas_routes(foaas_app):\n for route_path, route_text in fix_routes(foaas_routes):\n register_route(foaas_app, route_path, route_text)", "def add_routes(self):\n\n # create a routegroup\n routegroup = MewloRouteGroup('testsite_routegroup')\n # overide the parent import-pack-directory for the urls in this group? if we don't it will use the controller root set in SITE config\n # routegroup.set_controllerroot(pkgdirimp_controllers)\n\n routegroup.append(\n MewloRoute(\n id = 'home',\n path = \"/\",\n controller = MewloController(function='requests.request_home')\n ))\n\n\n routegroup.append(\n MewloRoute(\n id = 'hello',\n path = '/test/hello',\n args = [\n MewloRouteArgString(\n id = 'name',\n required = True,\n help = \"name of person to say hello to\",\n ),\n MewloRouteArgInteger(\n id = 'age',\n required = False,\n help = \"age of person (optional)\",\n defaultval = 44,\n )\n ],\n controller = MewloController(function=\"requests.request_sayhello\"),\n # we can pass in any extra data which will just be part of the route that can be examined post-matching\n extras = { 'stuff': \"whatever we want\" },\n # we can force the route to simulate as if certain url call args were assigned (this works whether there are RouteArgs for these or not; no type checking is performed on them)\n # this could be useful in two scenarios: first, if we initially wrote code to handle an arg and then changed our mind and want to not let user set that arg; second, if we reuse a controller function in different places and simulate dif arg values for each\n forcedargs = { 'sign': u\"aries\" },\n ))\n\n\n\n from controllers import requests\n routegroup.append(\n MewloRoute(\n id = 'article',\n path = '/article',\n args = [\n MewloRouteArgString(\n id = 'title',\n required = False,\n positional = True,\n help = \"title of article to display\",\n )\n ],\n # another way to specify the controller is to pass in the actual function reference (rather than as a string)\n controller = MewloController(function=requests.request_article),\n ))\n\n routegroup.append(\n MewloRoute(\n id = 'help',\n path = '/user/help',\n controller = MewloController(root=pkgdirimp_controllers, function='requests.request_help'),\n ))\n routegroup.append(\n MewloRoute(\n id = 'contact',\n path = '/help/contact',\n # we can pass the root pack to the MewloController constructor, which has the benefit of doing the import immediately and raising exception if not found; otherwise the error will come up during preparation\n controller = MewloController(root=pkgdirimp_controllers, function='requests.request_contact'),\n ))\n routegroup.append(\n MewloRoute(\n id = 'about',\n path = '/help/about',\n # we can pass the root pack to the MewloController constructor, which has the benefit of doing the import immediately and raising exception if not found; otherwise the error will come up during preparation\n controller = MewloController(root=pkgdirimp_controllers, function='requests.request_about'),\n ))\n\n\n #static file server\n if (False):\n routegroup.append(\n MewloRoute_StaticFiles(\n id = 'static_files',\n path = '/static',\n controller = MewloController_StaticFiles(\n sourcepath = '${sitefilepath}/staticfilesource'\n ),\n ))\n\n\n # add routegroup we just created to the site\n self.comp('routemanager').append(routegroup)", "def register_pc_blueprints(app):\n blueprints = [\n registration_page,\n spectrum_inquiry_page,\n grant_page,\n heartbeat_page,\n relinquishment_page,\n deregistration_page,\n ]\n register_blueprints(app, blueprints, app.config['API_PREFIX'])", "def init_app(self, app, path='templates.yaml'):\n if self._route is None:\n raise TypeError(\"route is a required argument when app is not None\")\n\n app.clova = self\n\n app.add_url_rule(self._route, view_func=self._flask_view_func, methods=['POST'])\n app.jinja_loader = ChoiceLoader([app.jinja_loader, YamlLoader(app, path)])", "def register_blueprints(self):\n for module in copy.copy(sys.modules).values():\n for blueprint in module_functionalities(module, 'MARA_FLASK_BLUEPRINTS', flask.Blueprint):\n self.register_blueprint(blueprint)", "def setup_routes():\n root = CherryTarball()\n d = cherrypy.dispatch.RoutesDispatcher()\n d.connect('main', '/', controller=root)\n # This enumerates the tarball and connects each file within to a URL in the dispatcher\n tar = tarfile.open(tarball_path)\n for tarobj in tar.getmembers():\n if tarobj.isdir():\n pass # Skip directories\n else:\n d.connect(tarobj.name, tarobj.name, controller=root, action='static', filepath=tarobj.name)\n dispatcher = d\n return dispatcher", "def blueprints_fabrics(app, blueprints):\n\n for blueprint in blueprints:\n app.register_blueprint(blueprint)", "def create_app():\n app = Flask(__name__, instance_relative_config=False)\n app.config.from_object('config.Config')\n db.init_app(app)\n flask_bcrypt.init_app(app)\n jwt.init_app(app)\n\n with app.app_context():\n # Import Blueprints\n from .routes.users_route import users_bp\n from .routes.messages_route import messages_bp\n\n # REGISTER ROUTES\n app.register_blueprint(users_bp, url_prefix=\"/users\")\n app.register_blueprint(messages_bp, url_prefix=\"/messages\")\n\n\n return app", "def register(self):\n # self.register_route(\"GET\", self.__route, lambda req, res: self.status(req, res))\n self.register_route(\"GET\", self.__route, None, self.status)", "def reg_bps(app):\n from . import categories_bp, items_bp, users_bp\n\n app.register_blueprint(categories_bp, url_prefix='/categories')\n app.register_blueprint(items_bp, url_prefix='/items')\n app.register_blueprint(users_bp)", "def register_route(app):\n\n @app.teardown_appcontext\n def teardown_session(e):\n \"\"\"\n Exit the context of my_db and OT_spider when app's context is teared down.\n :param e: event.\n :return: None.\n \"\"\"\n my_db.close()\n OT_spider.close()\n\n @app.errorhandler(404)\n def page_not_found(e):\n \"\"\"\n Render assigned template when error code 404 occurs.\n :param e: error event.\n :return: error/404.html.\n \"\"\"\n return render_template(\"error/404.html\"), 404\n\n @app.errorhandler(403)\n def access_forbidden(e):\n \"\"\"\n Render assigned template when error code 403 occurs.\n :param e: error event.\n :return: error/403.html.\n \"\"\"\n return render_template(\"error/403.html\"), 403\n\n @app.errorhandler(500)\n def internal_server_error(e):\n \"\"\"\n Render assigned template when error code 500 occurs.\n :param e: error event.\n :return: error/500.html.\n \"\"\"\n return render_template(\"error/500.html\"), 500\n\n @app.before_request\n def filter_request():\n \"\"\"\n Intercept requests with disallowed methods and/or fake user agent.\n :return: None.\n \"\"\"\n if request.method not in ALLOWED_METHODS:\n return \"Method Not Allowed\", 405\n ua = str(request.user_agent)\n if \"Mozilla\" not in ua or \"Gecko\" not in ua:\n return \"No Scrappers!\", 403\n\n @app.after_request\n def set_res_headers(response):\n \"\"\"\n Set headers to all responses.\n :param response: flask.wrappers.Response object.\n :return: response to send back to client.\n \"\"\"\n response.headers[\"Server\"] = \"OurTieba\"\n response.headers[\"X-Content-Type-Options\"] = \"nosniff\"\n response.headers[\"X-Frame-Options\"] = \"sameorigin\"\n if app.config.get(\"ENABLE_CSP\"):\n response.headers[\"Content-Security-Policy\"] = \"script-src \" + \" \".join(WHITELIST) + \"; object-src 'self'\"\n return response\n\n @app.template_filter(\"index_format\")\n def add_zeros(i, length): # format index in photos.html\n \"\"\"\n Pad zeros to i, and turn it into a string. The length is at least 2. Used in photos.html.\n :param i: int. Integer to pad.\n :param length: int. Base integer.\n :return: A padded string.\n\n For example,\n add_zeros(1, 2) -> \"01\";\n add_zeros(1, 12) -> \"01\";\n add_zeros(13, 101) -> \"013\".\n \"\"\"\n return (\"{:0>\" + str(max(len(str(length)), 2)) + \"d}\").format(i)", "def create_app():\n app = Flask(__name__)\n app.config.from_pyfile('config.py')\n\n login_manager.init_app(app) # initialize flask_login with our app\n # redirect route when @login_required fails\n login_manager.login_view = 'routes.signin'\n db.init_app(app)\n\n from .routes import routes\n app.register_blueprint(routes)\n\n return app", "def register_route(self, route, app):\n assert route not in self.routes\n self.routes[route] = app", "def initialize_routes(app):\n\n\t@app.route('/api/analyse', methods= [ 'GET' ])\n\tdef api_route():\n\n\t\tinputStr = request.args.get('text')\n\n\t\tlabels = senti.test_probability([ inputStr ])\n\t\tlabel = senti.test([ inputStr ], False)\n\n\t\tjson_response = json.dumps({\n\t\t\t'input': inputStr,\n\t\t\t'label': label[0],# 'neg' if labels[0] < .5 else 'pos',\n\t\t\t'probabilities': {\n\t\t\t\t'pos': labels[0],\n\t\t\t\t'neg': 1 - labels[0],\n\t\t\t}\n\t\t})\n\n\t\tresponse= make_response(json_response, 200)\n\t\tresponse.headers['Content-Type'] = 'application/json'\n\n\t\treturn response\n\n\n\t@app.route('/')\n\tdef hello():\n\t\treturn render_template('index.html')", "def register_blueprints(app):\n @app.route('/')\n def hello():\n return '<html><body>{{ cookiecutter.project_name }} - Hello World</body></html>'\n\n @app.route('/healthz')\n def healthz():\n {% if cookiecutter.use_sqlalchemy == 'True' %}\n \"\"\"\n Verify the DB is there.\n\n :return: 200 if all good, otherwise it raises an exception which returns 500\n \"\"\"\n {{cookiecutter.app_name}}.extensions.db.session.query('1').from_statement('SELECT 1').all()\n {% endif %}\n return '', httplib.OK\n\n app.register_blueprint({{ cookiecutter.app_name }}.api.v0_1.get_blueprint())", "def create_app():\n app = Flask(__name__)\n app.register_blueprint(playlists)\n app.register_blueprint(comments)\n return app", "def register_blueprints(app):\n app.register_blueprint(vs_association, url_prefix='/vs_association')", "def add_routes(self, mapper):\n pass", "def create_app():\n\n # --------------------- #\n # Initial configuration #\n # --------------------- #\n\n instance_path = config.INSTANCE_PATH\n\n # Creates the instance path if it doesn't exist\n if not os.path.exists(instance_path):\n os.makedirs(instance_path)\n\n app = Flask(__name__, instance_path=instance_path)\n\n # Logging utility setup\n if app.config['ENV'] == 'development' or app.config['DEBUG'] is True:\n log_level = logging.DEBUG\n else:\n if hasattr(logging, config.DEBUG_LOG_LEVEL):\n log_level = getattr(logging, config.DEBUG_LOG_LEVEL)\n else:\n print(\n \"WARNING: log level value from config file is not a valid attribute: {}\".format(config.DEBUG_LOG_LEVEL))\n print(f\"Defaulting to '{logging.WARNING}\")\n log_level = logging.WARNING\n\n logging.basicConfig(\n format='[%(asctime)s] %(levelname)s in %(module)s: %(message)s',\n level=log_level,\n filename=config.LOG_PATH\n )\n\n # Intercepts generic server errors and logs them\n @app.errorhandler(werkzeug.exceptions.HTTPException)\n def handle_errors(e):\n logging.error(str(e))\n return str(e), 500\n\n # Handles correct favicon\n @app.route('/favicon.ico')\n def favicon():\n return send_from_directory(os.path.join(app.root_path, 'static'),\n 'favicon.ico', mimetype='image/vnd.microsoft.icon')\n\n # --------- #\n # Web pages #\n # --------- #\n\n # Root page\n @app.route('/')\n def index():\n \"\"\"Simple root page.\n\n The \"@app.route('/')\" decorator assigns this function\n to the '/' address, so that when you visit '/', a\n request is sent to the server, which will call this function.\n\n Once this function is called it returns an html page\n produced from the 'index.html' file.\n\n Returns\n -------\n html page\n \"\"\"\n return render_template('index.html')\n\n @app.route('/test_drawing')\n def test_drawing_page():\n return render_template('test_drawing.html')\n\n @app.route('/log', methods=['GET'])\n def view_log():\n \"\"\"Display the log\"\"\"\n if request.values.get(\"clear\") == \"True\":\n with open(config.LOG_PATH, \"w\") as log_file:\n log_file.write(\"\")\n logging.info(\"Log file cleared from browser.\")\n print(request.values.get(\"clear\"))\n with open(config.LOG_PATH) as log_file:\n log_text = log_file.read()\n return render_template('log.html', log_text=log_text)\n\n @app.route('/study_legacy')\n def study_legacy():\n \"\"\"Renders the study page.\n\n Returns\n -------\n html page\n\n \"\"\"\n return render_template('study-legacy.html')\n\n @app.route('/study')\n def study():\n \"\"\"Renders the study page.\n\n Returns\n -------\n html page\n\n \"\"\"\n return render_template('study.html')\n\n # --------------------- #\n # API-related functions #\n # --------------------- #\n\n @app.route('/api_initialise_gp_and_sample', methods=['GET', 'POST'])\n def api_initialise_gp_and_sample():\n \"\"\"Initialises a GP based on the given parameters.\n\n The parameters are retrieved from the settings file. After initialising\n the GP it samples a function from it to be the true function. Finally\n it chooses a query point uniformly at random.\n\n All the data is sent to the frontend as a JSON object to be used by the frontend.\n\n Returns\n -------\n JSON data\n\n \"\"\"\n\n # Retrieves the data from the request object\n interface_settings = utils.get_response_and_log(request)\n logging.debug(\"Interface settings: {}\".format(str(interface_settings)))\n\n # Loads the settings file\n settings_file_name = interface_settings[\n 'settings_name'] # if 'settings_name' in interface_settings else 'default'\n settings = io.load_settings(settings_file_name)\n logging.debug(\"File settings: {}\".format(str(settings)))\n\n # Integrate the settings with those provided by the interface, if any\n for key in interface_settings.keys():\n if key not in settings:\n settings[key] = interface_settings[key]\n\n # Fail early and provide some error message when crucial data is missing.\n try:\n utils.assert_required_data(settings, ['x_limits', 'n_points', 'noise'])\n except AssertionError as e:\n logging.error(str(e))\n logging.error(\"Provided keys: {}\".format(settings.keys()))\n return str(e), 400 # BAD_REQUEST\n\n # Generate user and session IDs if not provided\n user_id: int = settings['user_id'] if 'user_id' in settings else io.get_new_user_id(\n study_name=settings_file_name)\n settings['user_id'] = str(user_id)\n\n # Ensure save dir exists\n if not (\"save\" in settings and settings[\"save\"] == False):\n io.ensure_savedir_exists(study_name=settings_file_name, sub_path=str(user_id))\n\n session_id: int = settings['session_id'] if 'session_id' in settings else io.get_new_session_id(user_id,\n study_name=settings_file_name)\n settings['user_id'] = str(user_id)\n settings['session_id'] = str(session_id)\n\n # Call GP data_gp_initialisation function\n x, y_true, query_index, mean_vector, confidence_up, confidence_down = user_study_gp.data_gp_initialisation(\n settings['x_limits'][0],\n settings['x_limits'][1],\n settings['n_points'],\n settings['kernel'],\n settings['kernel_args'],\n settings['noise']\n )\n\n # Convert the data to JSON\n data = {\n \"settings\": settings,\n \"iteration\": 0,\n \"new_point_index\": query_index, # index of new point to be queried\n \"new_point_x\": x[query_index], # new point to be queried\n 'x_data': [], # queried data points (initially empty)\n 'y_data': [], # values given by the user for the queried points (initially empty)\n 'y_data_actual': [], # actual value of f(queried point)\n 'x_limits': settings['x_limits'],\n 'n_points': settings['n_points'],\n \"x\": x, # x points in the interval (linspace)\n \"y\": y_true, # f(x) true values in the x points\n \"mean\": mean_vector,\n \"std\": confidence_up + confidence_down, # list concatenation\n }\n\n # Update session_id to match session, when running a full user study\n if \"max_sessions\" in settings:\n if \"update_session\" in interface_settings and interface_settings[\"update_session\"] == True:\n data[\"session\"] = interface_settings[\"session\"] + 1\n else:\n data[\"session\"] = 0\n session_id = data[\"session\"]\n\n if \"save\" in settings and settings[\"save\"] == False:\n logging.debug(\"Not saving data because of settings[\\\"save\\\"] = False\")\n else:\n io.save_data(data,\n study_name=settings_file_name,\n user_id=user_id,\n session_id=session_id,\n incremental=settings['save_split'])\n\n return utils.remove_nan(json.dumps(data))\n\n @app.route('/api_update_gp', methods=['GET', 'POST'])\n def api_update_gp():\n \"\"\"Updates a GP based on the given parameters.\n\n The parameters are retrieved from the request object. It updates the GP with the new points. Finally it chooses\n a new query point.\n\n All the data is sent to the frontend as a JSON object.\n\n Returns\n -------\n JSON data\n\n \"\"\"\n logging.info(\"Called: api_update_gp\")\n data = utils.get_response_and_log(request)\n try:\n utils.assert_required_data(data,\n [\n 'settings', # settings of the user study\n 'x_data', # queried data points\n 'y_data', # values by the user for the queried points\n \"x_limits\", # beginning and end of the interval\n \"x\", # x points\n \"iteration\" # current iteration\n ])\n except AssertionError as e:\n logging.error(str(e))\n logging.error(\"Provided keys: {}\".format(data.keys()))\n return str(e), 400 # BAD_REQUEST\n\n if (\"x_data\" in data and \"y_data\" in data) and (len(data[\"x_data\"]) >= 1 and len(data[\"y_data\"]) >= 1):\n logging.info(\"Received new data point: ({}, {}), updating..\".format(\n data[\"x_data\"][-1],\n data[\"y_data\"][-1])\n )\n\n settings = data['settings']\n\n # Update vanilla GP\n query_index, mean_vector, upper_confidence, lower_confidence = user_study_gp.update(data[\"x\"],\n settings[\"kernel\"],\n settings[\"kernel_args\"],\n data[\"x_data\"],\n data[\"y_data\"],\n settings[\"noise\"])\n\n # Update data\n data[\"new_point_index\"] = query_index\n data[\"new_point_x\"] = data[\"x\"][query_index]\n data[\"mean\"] = mean_vector\n data[\"std\"] = upper_confidence + lower_confidence\n data[\"iteration\"] += 1\n\n data_json = utils.remove_nan(json.dumps(data))\n if \"save\" in settings and settings[\"save\"] == False:\n logging.debug(\"Not saving data because of settings[\\\"save\\\"] = False\")\n else:\n logging.debug(f'Study name: {settings[\"settings_name\"]}')\n io.save_data(data,\n study_name=settings[\"settings_name\"],\n user_id=settings['user_id'],\n session_id=settings['session_id'],\n incremental=settings['save_split'])\n return data_json\n\n return app", "def init_app(app):\n api.add_namespace(ns)\n app.register_blueprint(bp, url_prefix='/api/v1')", "def includeme(config):\n config.add_route('home', '/')\n config.add_route('detail', '/detail/{id:\\d+}')\n config.add_route('update', '/edit/{id:\\d+}')\n config.add_route('create', '/create')", "def add_routes_hook(map, *args, **kwargs):\n map.connect('/dex/media/*path', controller='dex', action='media')\n map.connect('/dex/lookup', controller='dex', action='lookup')\n map.connect('/dex/suggest', controller='dex', action='suggest')\n map.connect('/dex/parse_size', controller='dex', action='parse_size')\n\n # These are more specific than the general pages below, so must be first\n map.connect('/dex/moves/search', controller='dex_search', action='move_search')\n map.connect('/dex/pokemon/search', controller='dex_search', action='pokemon_search')\n\n map.connect('/dex/abilities/{name}', controller='dex', action='abilities')\n map.connect('/dex/items/{pocket}', controller='dex', action='item_pockets')\n map.connect('/dex/items/{pocket}/{name}', controller='dex', action='items')\n map.connect('/dex/locations/{name}', controller='dex', action='locations')\n map.connect('/dex/moves/{name}', controller='dex', action='moves')\n map.connect('/dex/natures/{name}', controller='dex', action='natures')\n map.connect('/dex/pokemon/{name}', controller='dex', action='pokemon')\n map.connect('/dex/pokemon/{name}/flavor', controller='dex', action='pokemon_flavor')\n map.connect('/dex/pokemon/{name}/locations', controller='dex', action='pokemon_locations')\n map.connect('/dex/types/{name}', controller='dex', action='types')\n\n map.connect('/dex/abilities', controller='dex', action='abilities_list')\n map.connect('/dex/items', controller='dex', action='items_list')\n map.connect('/dex/natures', controller='dex', action='natures_list')\n map.connect('/dex/moves', controller='dex', action='moves_list')\n map.connect('/dex/pokemon', controller='dex', action='pokemon_list')\n map.connect('/dex/types', controller='dex', action='types_list')\n\n map.connect('/dex/gadgets/compare_pokemon', controller='dex_gadgets', action='compare_pokemon')\n map.connect('/dex/gadgets/pokeballs', controller='dex_gadgets', action='capture_rate')\n map.connect('/dex/gadgets/stat_calculator', controller='dex_gadgets', action='stat_calculator')\n map.connect('/dex/gadgets/whos_that_pokemon', controller='dex_gadgets', action='whos_that_pokemon')\n\n # JSON API\n map.connect('/dex/api/pokemon', controller='dex_api', action='pokemon')", "def create_app():\n app = Flask(__name__)\n\n # app.secret_key = os.urandom(12)\n # jwt_manager = JWTManager()\n # jwt_manager.init_app(app)\n\n CORS(app)\n\n app.register_blueprint(redflag_blueprint, url_prefix=\"/api/v1/red-flags\")\n app.register_blueprint(user_blueprint, url_prefix=\"/api/v1/users\")\n app.register_blueprint(intervention_blueprint, url_prefix=\"/api/v1/interventions\")\n app.register_blueprint(auth_blueprint, url_prefix=\"/api/v1/auth\")\n app.register_blueprint(index_blueprint, url_prefix=\"/api/v1\")\n app.register_blueprint(base_url_blueprint, url_prefix=\"/\")\n app.register_blueprint(media_blueprint, url_prefix=\"/api/v1/files/uploads\")\n # app.register_blueprint(media_edit_blueprint, url_prefix=\"/api/v1/\")\n\n app.register_error_handler(400, bad_request_error)\n app.register_error_handler(404, page_not_found)\n app.register_error_handler(405, method_not_allowed)\n app.register_error_handler(500, internal_server_error)\n\n swagger_ui_blueprint = get_swaggerui_blueprint(SWAGGER_UI_URL, API_URL)\n app.register_blueprint(swagger_ui_blueprint, url_prefix=SWAGGER_UI_URL)\n\n return app", "def init_controllers(app):\n for controller in os.listdir(os.getcwd() + \"/controllers\"):\n module_name, ext = os.path.splitext(controller)\n if module_name.endswith('_controller') and ext == '.py':\n module = __import__(\"controllers.%s\" % module_name)\n PYSTHClient.controllers.append(\n module.__getattribute__(module_name))\n for controller in PYSTHClient.controllers:\n app.register_blueprint(controller.PAGE)", "def create_app():\n # pylint: disable=C0415\n # note: Ignoring 'Import outside toplevel' to avoid import while init\n\n from todo_app.config.swagger import SwaggerConfig\n from todo_app.routes.users import user_management_process\n from todo_app.routes.todo_item import todo_item_management_process\n from todo_app.routes.user_todo_list import todo_list_management_process\n\n # Define the WSGI application object\n app = Flask(__name__)\n\n # DEBUG ONLY!\n app.config[\"WTF_CSRF_ENABLED\"] = CommonConfig.wtf_csrf\n app.config[\"SECRET_KEY\"] = CommonConfig.app_secret_key\n\n @app.route(\"/\")\n def home():\n return \"We are working for new feature development! Please come back later!\"\n\n @app.route('/favicon.ico')\n def favicon():\n # To avoid 404 error\n return {}, 200\n\n\n\n # pylint: disable=W0613\n # note: Ignoring Unused argument 'resp_or_exc' as it's related to app\n # @app.teardown_appcontext\n # def cleanup(resp_or_exc):\n # handler.db_session.remove()\n\n # Register the blueprint here\n \n app.register_blueprint(\n SwaggerConfig.SWAGGERUI_BLUEPRINT, url_prefix=SwaggerConfig.SWAGGER_URL\n )\n app.register_blueprint(user_management_process, url_prefix=\"/api/v1/users\")\n app.register_blueprint(todo_item_management_process, url_prefix=\"/api/v1/todo/item\")\n app.register_blueprint(todo_list_management_process, url_prefix=\"/api/v1/todo/list\")\n \n\n return app", "def register_to_blueprint(blueprint, route, methods_to_apifunc):\n methods_to_viewfunc = {}\n for method in methods_to_apifunc:\n methods_to_viewfunc[method] = methods_to_apifunc[method].get_viewfunc()\n\n if 'HEAD' not in methods_to_viewfunc and 'GET' in methods_to_viewfunc:\n methods_to_viewfunc['HEAD'] = methods_to_viewfunc['GET']\n\n blueprint.add_url_rule(\n \"/%s\" % route,\n endpoint=route,\n view_func=error_handler(route_multiplexer(methods_to_viewfunc)),\n methods=list(methods_to_viewfunc.keys()))", "def get_routes():\n return sum([load_module(m).ROUTES for m in settings.INSTALLED_HANDLERS], []) + ROUTES", "def import_views():\n from .views.views import views\n from .views.users import users\n from .views.purchases import purchases\n app.register_blueprint(views)\n app.register_blueprint(users)\n app.register_blueprint(purchases)", "def init_app(self, app):\n self.app = app\n\n self._init_extension()\n\n # Register views\n for view in self._views:\n app.register_blueprint(view.create_blueprint(self))", "def main(global_config, **settings):\n config = Configurator(settings=settings)\n config.include('pyramid_jinja2')\n\n # Adding a renderer for custom model objects\n custom_json = JSON()\n model.register_custom_json(custom_json)\n config.add_renderer('json', custom_json)\n\n config.add_static_view('static', 'static', cache_max_age=3600)\n config.add_route('index', '/')\n config.add_route('api_board', '/api/{board}/', request_method='GET')\n config.add_route('api_thread', '/api/{board}/{thread}/', request_method='GET')\n config.add_route('board', '/{board}/', request_method='GET')\n config.add_route('new_thread', '/{board}/', request_method='POST')\n config.add_route('thread', '/{board}/{thread}/', request_method='GET')\n config.add_route('reply', '/{board}/{thread}/', request_method='POST')\n config.scan()\n return config.make_wsgi_app()", "def create_app(self):\n app = Flask(__name__)\n\n app.config[\"auth_func\"] = self.auth_func\n app.config[\"hydrator_func\"] = self.hydrator_func\n app.config[\"request_hydrator_func\"] = self.request_hydrator_func\n app.config[\"database_uri\"] = self.database_uri\n app.config[\"hmac_secret\"] = self.hmac_secret\n\n cors = CORS()\n cors.init_app(app, resources={r\"/*\": {\"origins\": self.cors_origins, \"supports_credentials\": True}})\n\n app.register_blueprint(api_v0.bp)\n\n @app.route(\"/\")\n def health_check():\n \"\"\"Can be called by e.g. Kubernetes to verify that the API is up\n\n Returns:\n str: the static string \"Comet-API\", could be anything\n \"\"\"\n return \"Comet-API\"\n\n return app", "def create_app(config=None):\n app = Flask(__name__)\n app.register_blueprint(teacher_api)\n app.run()", "def configure(app):\n api.add_resource(Event, '/event/')\n api.add_resource(EventItem, '/event/<event_id>')\n app.register_blueprint(bp_restapi)", "def create_app():\n app = Flask(__name__, instance_relative_config=True)\n app.config.from_object('config.Config')\n\n # Initialize Plugins\n db.init_app(app)\n login_manager.init_app(app)\n \"\"\" Initialize plugins \"\"\"\n\n login_manager.login_message = 'You must be logged in to access this page'\n login_manager.login_message_category = 'info'\n login_manager.session_protection = 'strong'\n login_manager.login_view = 'auth_bp.login'\n\n # from .modules.user.models import User\n from .modules.user.methods import UserMethod\n @login_manager.user_loader\n def load_user(session_token):\n # def load_user(user_id):\n print('load_user - user_id - session_token: ', session_token)\n print('loading auth...')\n # since the user_id is just the primary key of our auth table, auth it in the query for the auth\n return UserMethod.get_user_session_token(session_token)\n\n with app.app_context():\n \"\"\" Blueprints \"\"\"\n from .modules.auth.views import auth_bp\n \"\"\" Blueprint for Auth routes in App \"\"\"\n from .modules.catalog.views import catalog_bp\n \"\"\" Blueprint for Catalog routes in App \"\"\"\n from .modules.category.views import category_bp\n \"\"\" Blueprint for Category routes in App \"\"\"\n from .modules.item.views import item_bp\n \"\"\" Blueprint for Item routes in App \"\"\"\n from .modules.user.views import user_bp\n \"\"\" Blueprint for User routes in App \"\"\"\n\n \"\"\"\" Register Blueprints \"\"\"\n app.register_blueprint(auth_bp)\n app.register_blueprint(catalog_bp)\n app.register_blueprint(category_bp)\n app.register_blueprint(item_bp)\n app.register_blueprint(user_bp)\n\n from .modules.catalog.models import Catalog\n from .modules.category.models import Category\n from .modules.item.models import Item\n \"\"\"Import the models so that sqlalchemy can detect them and create the DB \"\"\"\n\n db.create_all()\n \"\"\" Create the DB \"\"\"\n return app", "def register_blueprints(app) -> None:\n app.register_blueprint(core_app)\n\n for ext in get_valid_extensions():\n try:\n ext_module = importlib.import_module(f\"lnbits.extensions.{ext.code}\")\n app.register_blueprint(getattr(ext_module, f\"{ext.code}_ext\"), url_prefix=f\"/{ext.code}\")\n except Exception:\n raise ImportError(f\"Please make sure that the extension `{ext.code}` follows conventions.\")", "def create_app(test_config=None):\n app = Flask(__name__)\n\n # apply the blueprints to the app\n from app import common\n\n app.register_blueprint(common.bp)\n\n # default url for site\n app.add_url_rule(\"/\", endpoint=\"index\")\n\n return app", "def init_app(app):\n\n def register(path, resource):\n app.add_url_rule(path, view_func=resource.as_view(resource.__name__))\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n docs.register(resource, endpoint=resource.__name__)\n\n docs = FlaskApiSpec(app)\n register(\"/organisms\", Organisms)\n register(\"/organisms/<int:id>\", Organism)\n register(\"/strains\", Strains)\n register(\"/strains/<int:id>\", Strain)\n register(\"/experiments\", Experiments)\n register(\"/experiments/<int:id>\", Experiment)\n register(\"/experiments/<int:id>/data\", ExperimentData)\n register(\"/media\", Media)\n register(\"/media/<int:id>\", Medium)\n register(\"/media/compounds\", MediumCompounds)\n register(\"/media/compounds/<int:id>\", MediumCompound)\n register(\"/conditions\", Conditions)\n register(\"/conditions/<int:id>\", Condition)\n register(\"/conditions/<int:id>/data\", ConditionData)\n register(\"/samples\", Samples)\n register(\"/samples/<int:id>\", Sample)\n register(\"/fluxomics\", Fluxomics)\n register(\"/fluxomics/batch\", FluxomicsBatch)\n register(\"/fluxomics/<int:id>\", Fluxomic)\n register(\"/metabolomics\", Metabolomics)\n register(\"/metabolomics/batch\", MetabolomicsBatch)\n register(\"/metabolomics/<int:id>\", Metabolomic)\n register(\"/proteomics\", Proteomics)\n register(\"/proteomics/batch\", ProteomicsBatch)\n register(\"/proteomics/<int:id>\", Proteomic)\n register(\"/uptake-secretion-rates\", UptakeSecretionRates)\n register(\"/uptake-secretion-rates/<int:id>\", UptakeSecretionRate)\n register(\"/molar-yields\", MolarYields)\n register(\"/molar-yields/<int:id>\", MolarYield)\n register(\"/growth-rates\", GrowthRates)\n register(\"/growth-rates/<int:id>\", GrowthRate)", "def create_app(dictionary_with_strategies):\n\n app = Flask(__name__, static_url_path='',\n static_folder='../dist',\n template_folder='../dist')\n\n @app.route('/')\n def home():\n return redirect(url_for('static', filename='index.html'))\n\n app.url_map.strict_slashes = False\n app.config['Strategies'] = dictionary_with_strategies\n register_blueprints(app, \"/api\")\n\n return app", "def setup():\n LOG.info(\"Creating API.\")\n api = Flask(__name__)\n LOG.info(\"Registering blueprints.\")\n api.register_blueprint(health_check_blueprint.setup())\n LOG.info(\"Registering error handlers.\")\n api.register_error_handler(Exception, default_error_handler)\n LOG.info(\"Setting up config variables.\")\n api.config['PROPAGATE_EXCEPTIONS'] = True\n return api", "def main(global_config, **settings):\n engine = engine_from_config(settings, 'sqlalchemy.')\n DBSession.configure(bind=engine)\n Base.metadata.bind = engine\n config = Configurator(settings=settings)\n config.add_static_view('static', 'static', cache_max_age=0)\n\n config.add_route('view', '/view')\n config.add_route('view_robot', '/view/r{robot_number:\\d+}')\n\n config.add_route('scout', '/')\n config.add_route('scout_robot', '/r{robot_number:\\d+}')\n# config.add_route('scout_match', '/m{match_number:\\d+}')\n config.add_route('scout_robot_match',\n '/r{robot_number:\\d+}m{match_number:\\d+}')\n config.scan()\n return config.make_wsgi_app()", "def register_blueprints(app, blueprints, url_prefix):\n for blueprint in blueprints:\n app.register_blueprint(blueprint, url_prefix=url_prefix)", "def main(global_config, **settings):\n config = Configurator(settings=settings)\n config.include('pyramid_jinja2')\n config.add_static_view('static', 'static', cache_max_age=3600)\n config.add_route('home', '/')\n config.add_route('total_users', '/totalusers/')\n config.add_route('pageviews_weekly', '/pageviews/weekly/')\n config.add_route('pageviews_monthly', '/pageviews/monthly/')\n config.add_route('pageviews', '/pageviews/')\n config.add_route('devices', '/devices/')\n config.add_route('moreinfo', '/moreinfo/{profile_id}')\n config.scan()\n return config.make_wsgi_app()", "def generate_app_routes(conf: T.Dict[T.Text, T.Any]) -> T.List[web.RouteDef]:\n app_routes = [\n web.get(\"/api/verify\", rest.verify),\n web.get(\"/api/interpolate\", rest.interpolate),\n ]\n if conf.get(\"graphiql\"):\n app_routes.append(graphql.get_view(graphiql=True))\n else:\n app_routes.append(graphql.get_view(graphiql=False))\n return app_routes", "def route(app, requires_login):\n routes = {\n '/kontoplan/<accounting>': kontoplan,\n '/huvudbok/<accounting>': huvudbok,\n '/balansrakning/<accounting>': balance_report,\n '/resultatrakning/<accounting>': income_statement_report,\n '/verifikationslista/<accounting>': verifications,\n '/arsrapport/<accounting>': year_report,\n '/verifikat/<objectid:verification>': print_verification,\n '/vatreport/<objectid:accounting>': vat_report,\n '/periodrapport/<accounting>': period_report,\n '/salesreport/<objectid:toid>': sales_report,\n '/verifikationslista_andrade/<accounting>': verifications_modified,\n '/accountspayable_report/<accounting>': accountspayable_report,\n '/accountspayable_paymentjournal/<accounting>': accountspayable_paymentjournal\n }\n for route, func in routes.items():\n name = func.__name__\n func = requires_login()(func)\n app.add_url_rule(route, name, func, methods=['GET', 'POST'])", "def main(global_config, **settings):\n LOGGER.info('= main :: settings = %s', settings)\n\n config = Configurator(settings=settings)\n\n # Home\n config.add_route('home', '/')\n\n # Lastly, we scan the config and make the app\n # config.scan()\n\n return config.make_wsgi_app()", "def blueprints(app):\n for blueprint in FLASK_BLUEPRINTS:\n app.register_blueprint(blueprint)\n\n return None", "def _prepare(self):\n\n # Set configuration defaults and save to the project document\n self.config.setdefault('PAGINATION', True)\n self.config.setdefault('PER_PAGE', 25)\n\n # Create and configure the Flask application\n self.app = self._create_app(self.config)\n\n # Add assets and routes\n self.assets = self._create_assets()\n self._register_routes()\n\n # Add module assets and routes\n self._module_assets = []\n for module in self.modules:\n try:\n module.register(self)\n except Exception as e:\n logger.error('Error while registering {} module: {}'.format(\n module.name, e))\n logger.error('Removing module {} from dashboard.'.format(\n module.name))\n self.modules.remove(module)\n\n # Clear dashboard and project caches.\n self.update_cache()", "def main(global_config, **settings):\n config = Configurator(settings=settings)\n config.include('pyramid_jinja2')\n config.add_static_view('static', 'static', cache_max_age=3600)\n config.add_route('register_view', '/')\n config.add_route('confirm', '/confirm')\n config.add_static_view('deform_static', 'deform:static/')\n config.scan()\n return config.make_wsgi_app()", "def add_rest_routes(self, route, api=None, pos=0):\n def decorator(cls):\n # parent is the parent class of the relation\n cls_name = cls.__name__.lower()\n #print(cls_name)\n # default REST is the following pattern:\n # (r\"/post/(?P<param1>[^\\/]+)/?(?P<param2>[^\\/]+)?/?(?P<param3>[^\\/]+)?\", PostHandler),\n action=\"\"\n # if cls_name.endswith(\"handler\"):\n # action=action[:-7]\n # else:\n # action = cls_name\n # if route:\n action=route\n\n r=r\"/\"+action+r\"/(?P<param1>[^\\/]+)/?(?P<param2>[^\\/]+)?/?(?P<param3>[^\\/]+)?\"\n if api:\n # render the given api in the route URL\n r=r\"/\"+action+r\"/\"+str(api)+r\"/(?P<param1>[^\\/]+)/?(?P<param2>[^\\/]+)?/?(?P<param3>[^\\/]+)?\"\n \n #print(\"added the following routes: \" + r)\n handlers=getattr(self.__class__, \"handlers\", None)\n handlers.append((r,cls))\n \n # use the positioned handlers\n handlers_tmp=getattr(self.__class__, \"handlers_tmp\", None)\n handlers_tmp.append(((r,cls),pos))\n\n r=r\"/\"+action+r\"/*\"\n #print(\"added the following routes: \" + r)\n handlers.append((r,cls))\n handlers_tmp.append(((r,cls),pos))\n #print(\"handlers: \" + str(self.handlers))\n print(\"ROUTING: added RESTful routes for: \" + cls.__name__ + \" as /\" + action)\n #print(dir())\n return cls\n return decorator", "def make_routes(routelist):\n return webapp2.WSGIApplication(routelist, debug=True)", "def create_app():\n # Creates flask object with directory for to serve static files\n app = Flask(__name__, static_url_path=C.STATIC_FILE_PATH)\n\n # Enabling CORS for the application\n CORS(app)\n\n app.debug = True\n # Registering books controller\n from app.controllers.books import mod\n app.register_blueprint(mod)\n\n # Test Route\n @app.route('/hello')\n def hello_world():\n return 'Hello World!'\n\n # Index route - serves index.html\n @mod.route('/')\n def main():\n return mod.send_static_file(\"index.html\")\n\n # serve routes from index by default\n app.add_url_rule('/', endpoint='index')\n\n return app", "def mount_routes(self, class_instance):\r\n for callback_name in dir(class_instance):\r\n callback = getattr(class_instance, callback_name)\r\n if hasattr(callback, self.ROUTES_ATTRIBUTE) or hasattr(callback, self.ERROR_ATTRIBUTE):\r\n # Bind the un-annotated callback to this class\r\n self._bind_method(class_instance, callback_name)\r\n # Apply view annotations\r\n if hasattr(callback, self.VIEW_ATTRIBUTE):\r\n args, kw = getattr(callback, self.VIEW_ATTRIBUTE)\r\n callback = bottle.view(*args, **kw)(callback)\r\n setattr(self, callback_name, callback)\r\n # Apply route annotations\r\n for args, kw in getattr(callback, self.ROUTES_ATTRIBUTE, ()):\r\n kw = self._apply_plugins(class_instance, copy.deepcopy(kw))\r\n kw.update(callback=callback)\r\n self._app.route(*args, **kw)\r\n for error_code in getattr(callback, self.ERROR_ATTRIBUTE, ()):\r\n self._app.error(error_code)(callback)", "def register_blueprints(*blueprints):\n\t\tblueprints = list(blueprints) + reload_blueprints()\n\t\tfor blueprint in blueprints:\n\t\t\tapp.register_blueprint(blueprint)", "def setup_rest(app: web.Application):\n _logger.debug(\"Setting up %s ...\", __name__)\n\n spec_path: Path = storage_resources.get_path(\"api/v0/openapi.yaml\")\n\n # Connects handlers\n for routes in [\n handlers_health.routes,\n handlers_locations.routes,\n handlers_datasets.routes,\n handlers_files.routes,\n handlers_simcore_s3.routes,\n ]:\n set_default_route_names(routes)\n app.router.add_routes(routes)\n\n _logger.debug(\"routes: %s\", get_named_routes_as_message(app))\n\n # prepare container for upload tasks\n app[UPLOAD_TASKS_KEY] = {}\n\n # Enable error, validation and envelop middleware on API routes\n append_rest_middlewares(app, api_version=f\"/{api_vtag}\")\n\n # Adds swagger doc UI\n setup_swagger(\n app,\n swagger_url=\"/dev/doc\",\n swagger_from_file=f\"{spec_path}\",\n ui_version=3,\n )", "def main(global_config, **settings):\n config = Configurator(settings=settings)\n\n config.add_directive('add_restful_routes', routing.add_restful_routes)\n set_globals(**settings)\n\n from . import config as global_config\n\n secret = str(uuid.uuid4())\n\n # Beaker include\n config.include('pyramid_beaker')\n\n if global_config.AUTH_ENABLED is True:\n\n authn_policy = AuthTktAuthenticationPolicy(secret,\n callback=model.user_callback, hashalg='sha512', include_ip=global_config.AUTH_INCLUDE_IP)\n authz_policy = ACLAuthorizationPolicy()\n\n config.set_authentication_policy(authn_policy)\n config.set_authorization_policy(authz_policy)\n\n model.make_restful_app()\n routing.make_routes(config)\n config.scan()\n\n return config.make_wsgi_app()", "def register_blueprints(app, blueprints):\n for blueprint in blueprints:\n app.register_blueprint(blueprint)\n return None", "def main(global_config, **settings):\n config = Configurator(settings=settings)\n \n config.add_route('decorated_without_definitions', '/decorated-without-definitions')\n\n config.add_route('simple_docrequest', '/simple-docrequest')\n\n config.add_route('choices_docrequest', '/choices-docrequest')\n\n config.add_route('list_docrequest', '/list-docrequest')\n\n config.add_route('with_url_param', '/with-url-param/{url_param}')\n\n config.scan()\n return config.make_wsgi_app()", "def create_app():\n\n ###############################\n # Create a FLASK application\n app = Flask(__name__)\n # Note: since the app is defined inside this file,\n # the static dir will be searched inside this subdirectory\n\n ###############################\n # Apply configuration\n app.config.from_object(CONFIG_MODULE + '.MyConfig')\n logger = get_logger(__name__, False) # app.config['DEBUG'])\n\n ###############################\n # # Cache\n # # http://flask.pocoo.org/docs/0.10/patterns/caching/#setting-up-a-cache\n # from werkzeug.contrib.cache import SimpleCache\n # cache = SimpleCache()\n\n # ###############################\n # # Database\n # db.init_app(app)\n\n # ###############################\n # # Application context\n # with app.app_context():\n # db.create_all()\n # logger.info(\"Initialized Database\")\n\n # ###############################\n # Add basic things to this app\n app.register_blueprint(cms)\n\n ###############################\n # Flask LOGIN\n lm.init_app(app)\n lm.login_view = '.login'\n\n # Logging\n @app.after_request\n def log_response(resp):\n\n log = logger.debug\n if resp.status_code == hcodes.HTTP_NOT_MODIFIED:\n log = logger.debug\n\n if 'static/' not in req.url and '/js/' not in req.url:\n log = logger.info\n\n from commons.logs import obscure_passwords\n log(\"{} {} {} {}\".format(\n req.method, req.url,\n obscure_passwords(req.data), resp))\n return resp\n\n return app", "def add_domain_routes(app):\n\n @app.route(\"/v1/list_agencies/\", methods=[\"GET\"])\n @get_dabs_sub_tier_agencies\n def list_agencies(cgac_sub_tiers, frec_sub_tiers):\n \"\"\" Get all agencies the current user has DABS access to.\n Args:\n cgac_sub_tiers - List of all CGAC SubTierAgencies generated by the get_dabs_sub_tier_agencies decorator,\n required to list only sub_tier_agencies that user has DABS permissions for\n frec_sub_tiers - List of all FREC SubTierAgencies generated by the get_dabs_sub_tier_agencies decorator,\n required to list only sub_tier_agencies that user has DABS permissions for\n \"\"\"\n return JsonResponse.create(StatusCode.OK, get_accessible_agencies(cgac_sub_tiers, frec_sub_tiers))\n\n @app.route(\"/v1/list_all_agencies/\", methods=[\"GET\"])\n def list_all_agencies():\n \"\"\" List all CGAC and FREC Agencies \"\"\"\n return JsonResponse.create(StatusCode.OK, get_all_agencies())\n\n @app.route(\"/v1/list_sub_tier_agencies/\", methods=[\"GET\"])\n @get_fabs_sub_tier_agencies\n def list_sub_tier_agencies(sub_tier_agencies):\n \"\"\" List all Sub-Tier Agencies user has FABS permissions for\n Args:\n sub_tier_agencies - List of all SubTierAgencies generated by the get_fabs_sub_tier_agencies decorator,\n required to list only sub_tier_agencies that user has FABS permissions for\n \"\"\"\n return JsonResponse.create(StatusCode.OK, organize_sub_tier_agencies(sub_tier_agencies))", "def create_app():\n app = Flask(__name__)\n\n @app.route('/')\n def root():\n \"\"\"Base view.\"\"\"\n return 'TODO - part 2 and beyond!'\n\n return app", "def create_app(config_name):\n\n app = Flask(__name__)\n api = Api(app)\n CORS(app)\n\n app.config.from_object(config.configurations[config_name])\n \"\"\"This ensures that the urls /login and /login/ are recognized as same\n without considering the trailing slash \"\"\"\n app.url_map.strict_slashes = False\n\n with app.app_context():\n from app.resources.products import MenuResource\n from app.resources.orders import OrderResource\n from app.resources.addresses import AddressResource\n from app.resources.users import LoginResource, SignUpResource\n api.add_resource(MenuResource, \"/api/v1/menu\", \"/api/v1/menu/<int:product_id>\")\n api.add_resource(OrderResource, \"/api/v1/orders\",\n \"/api/v1/orders/<int:order_id>\")\n api.add_resource(AddressResource, \"/api/v1/addresses\",\n \"/api/v1/addresses/<int:address_id>\")\n api.add_resource(LoginResource, \"/api/v1/auth/login\")\n api.add_resource(SignUpResource, \"/api/v1/auth/signup\")\n\n @app.errorhandler(404)\n def error_404(e):\n return jsonify({\"code\": \"404\", \"message\": \"Not found\"}), 200\n\n @app.errorhandler(500)\n def error_500(e):\n return jsonify(\n {\"code\": \"503\", \"message\": \"We have some trouble\"\n \"processing your request\"\n \" please try again later\"}), 500\n\n @app.errorhandler(405)\n def error_405(e):\n return jsonify({\"code\": \"405\", \"message\": \"We dont allow\"\n \" the request method\",\n \"ok\": False}), 200\n\n @app.route(\"/\")\n def home():\n return render_template(\"index.html\")\n\n return app" ]
[ "0.72350115", "0.6712779", "0.67111504", "0.66817683", "0.6653695", "0.6587917", "0.6505443", "0.64713275", "0.6450273", "0.63990045", "0.63630635", "0.63256997", "0.63007975", "0.6283785", "0.6241679", "0.6237398", "0.62150884", "0.61836624", "0.6177383", "0.6159926", "0.6128027", "0.61154926", "0.611158", "0.6100133", "0.6093866", "0.6023158", "0.6012855", "0.5919565", "0.5889659", "0.58879066", "0.58800614", "0.58469945", "0.5846732", "0.5828492", "0.5798997", "0.57908136", "0.57716274", "0.57520396", "0.5737807", "0.5725588", "0.57218933", "0.5701975", "0.5690605", "0.56774867", "0.56329006", "0.5631989", "0.56304234", "0.56149536", "0.5581733", "0.55643755", "0.5558449", "0.5555056", "0.5546805", "0.55314326", "0.55210745", "0.54949045", "0.54886055", "0.5476527", "0.54649264", "0.5464806", "0.54616934", "0.54411906", "0.54381996", "0.54330283", "0.5426775", "0.5424993", "0.5414458", "0.54102767", "0.53832495", "0.5377517", "0.5377173", "0.53766364", "0.53749394", "0.537474", "0.5373921", "0.53700924", "0.5365815", "0.5365311", "0.5332477", "0.53299016", "0.5318964", "0.5317735", "0.5314801", "0.53141284", "0.53051347", "0.5295814", "0.5293797", "0.52930933", "0.52904576", "0.5284178", "0.5265627", "0.5259263", "0.5252381", "0.5249989", "0.5246907", "0.52353543", "0.522733", "0.5222669", "0.5218895", "0.52160317" ]
0.7511353
0
Clear project and dashboard server caches. The dashboard relies on caching for performance. If the data space is altered, this method may need to be called before the dashboard reflects those changes.
def update_cache(self): # Try to update signac project cache. Requires signac 0.9.2 or later. with warnings.catch_warnings(): warnings.simplefilter(action='ignore', category=FutureWarning) try: self.project.update_cache() except Exception: pass # Clear caches of all dashboard methods members = inspect.getmembers(self, predicate=inspect.ismethod) for func in filter(lambda f: hasattr(f, 'cache_clear'), map(lambda x: x[1], members)): func.cache_clear()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clear_cache():\n # TODO\n pass", "def clear_cache(self):\n\n for dataset in self._datasets:\n dataset.clear_cache()", "def clear_cache(self):\n pass", "def _clear_cache(self):\n keys = [\"nodes\", \"availability\", \"capacity\", \"cost\"]\n for key in keys:\n if key in self.__dict__:\n del self.__dict__[key]", "def clear(self, cacheDir):", "def clear_data_cache():\n load_glove.cache_clear()", "def clear_required_caches():\n\n return get_component(CachingPackage.COMPONENT_NAME).clear_required_caches()", "def clear_cache(self):\n requests.get(url=self.proxy_url+'/clear_cache')", "def clear_cache(self):\n self.part_cache.clear()", "def _clear_cache(self):\n\n self._cache = dict()", "def _clear_cache(self):\n self.cache = {}", "def clear_cache(self):\n local_app_data = os.getenv('LOCALAPPDATA')\n edge_root = os.path.join(local_app_data, 'Packages',\n 'Microsoft.MicrosoftEdge_8wekyb3d8bbwe')\n directories = ['AC', 'AppData']\n for directory in directories:\n path = os.path.join(edge_root, directory)\n try:\n shutil.rmtree(path)\n except Exception:\n pass", "def cache_clear(self):\n\t\tself.__cache = {}", "def clear_cache():\n cache = Cache()\n cache.reset()", "def clear_all(self):\n self.clear_redis()\n self.clear_cache()", "def clear(self):\n if self.__log:\n self.__logger.info(\"Cleared cache\")\n shutil.rmtree(self.cacheDir) # Remoeve the cache directory\n os.mkdir(self.cacheDir) # Create cache dir again\n self.__recentAccessed = [] # Reset recent accessed nodes", "def test_clear_cache(self):\n api_helpers.clear_cache()", "def clear_scache(cls) -> None:\n cls.scache = {}", "def clear_cache(self):\n self._cache = dict()", "def flushCaches(self):\n self.rehabTreeCache = {} \n self.frailRehabTreeCache = {} \n self.frailTreeCache = {}", "def clear_dashboard(dashId):\n default_tables = {\n \"Counts\": {\n \"sizex\": 10,\n \"sizey\": 13,\n \"row\": 1,\n \"col\": 1\n },\n \"Top Campaigns\": {\n \"sizex\": 25,\n \"sizey\": 8,\n \"row\": 1,\n \"col\": 20\n },\n \"Recent Indicators\": {\n \"sizex\": 50,\n \"sizey\": 8,\n \"row\": 15,\n \"col\": 1\n },\n \"Recent Emails\": {\n \"sizex\": 50,\n \"sizey\": 8,\n \"row\": 23,\n \"col\": 1\n },\n \"Recent Samples\": {\n \"sizex\": 50,\n \"sizey\": 8,\n \"row\": 31,\n \"col\": 1\n },\n }\n try:\n for search in SavedSearch.objects(dashboard=dashId):\n if search.isDefaultOnDashboard:\n tempDict = default_tables[search.name]\n search.sizex = tempDict[\"sizex\"]\n search.sizey = tempDict[\"sizey\"]\n search.row = tempDict[\"row\"]\n search.col = tempDict[\"col\"]\n search.save()\n else:\n search.update(unset__col=1,unset__row=1,unset__sizex=1)\n except Exception as e:\n print e\n return {'success': False, \n 'message': \"An unexpected error occurred while resetting dash. Please refresh and try again\"}\n return {'success': True, \n 'message': \"Dashboard Reset\"}", "def reset_cache(self):\n if self.cache_address is not None:\n for add in self.cache:\n os.remove(add + \".cd\")\n os.remove(add + \".cl\")\n self.cache = [None] * len(self)", "def clear(self):\n try:\n shutil.rmtree(self._cache_path)\n self._init_cache_path()\n except Exception:\n return", "def reset_cache():\n global _CACHE\n _CACHE.clear()", "def clear_cache(self):\n self.mongo_database.cache.delete_many({})", "def clear_cache(sender, **kwargs):\n# print \"Post save() -> clear cache\"\n cache.clear() # FIXME: This cleaned the complete cache for every site!", "def _clean_cache(self):\n del self._cache\n self._cache = {}", "def invalidateCaches(self):\n\n self._vertexCacheValid = False\n self._genusCacheValid = False\n self._vertexCharacteristicCacheValid = False\n self._coreCacheValid = False", "def clear_cache():\n sudo('service varnish restart')", "def _clear_caches(self):\n self._brushes = {}\n self._formats = {}", "def clear_cache():\n run(\"/etc/init.d/memcached restart\")", "def clear(self) -> None:\n self._maybe_show_deprecation_warning()\n _resource_caches.clear_all()", "def reset_cache(self):\n self.izx.reset_cache()\n self.ezx.reset_cache()", "def purge_cache(self):\n\n self.local_store.purge_cache()", "def clearcache():\n g.pafs = {}\n g.streams = {}\n g.url_memo = collections.OrderedDict()\n dbg(\"%scache cleared%s\", c.p, c.w)\n g.message = \"cache cleared\"", "def clear_all(self) -> None:\n with self._caches_lock:\n self._function_caches = {}", "def clear_cache():\n os.remove(CACHE_FILE)", "def cache_clear():\n # type: () -> None\n with Cache() as c:\n c.clear()", "def reset_cache(self):\n self._cache_complete = False\n self._cache = {}\n self._catcache = {}", "def cache_clear():\n # type: () -> None\n with Cache(CACHE_URI) as c:\n c.clear()", "def clearCache(ham: Dict[str, Any]) -> None:\n\n ham[\"cache\"] = {\n \"matrix_of_drift\": [],\n \"operator\": {\n \"drift\": {},\n \"control\": {}\n },\n \"sequence\": {}\n }", "def clean_cache(self):\n return", "def clear_cache():\n run(\"rm -rf ~/public_html/var/cache/mage*\")\n run(\"redis-cli FLUSHALL\")", "def remove_cache(self) -> None:\n self.indexes = None", "def clear_cache():\n path = join(\"data\", \"cache\")\n file_list = os.listdir(path)\n file_list.remove(\".gitkeep\") # Exclude .gitkeep\n for filename in file_list:\n os.remove(join(path, filename))", "def destroy_cache():\n # TODO\n pass", "def _purge():\r\n _cache.clear()", "def clear_cache(self):\n self._cache = {}\n DrugBank._cache_record = {}\n DrugBank._top_root = None", "def set_emptying_cache():\r\n from pylons import g\r\n from r2.lib.cache import SelfEmptyingCache\r\n g.cache.caches = [SelfEmptyingCache(),] + list(g.cache.caches[1:])", "def clear(self):\n self._grasp_data = None\n self._status = None\n self._cache = dict()\n self._trajectory_result = None", "def invalidate_cache(self):\n #self.objects.objects = []\n return True", "def clear(self):\n self._cache = dict()", "def decache(self):", "def clear(self):\n self._plot_data_cache = {}\n self._outstanding_requests = {}", "def reset(self):\n\n self.simple_cache = {}\n self.complex_cache = {}\n self.target_cache = {}", "def clear_cache(self):\n cache.delete(\"site-%s-consumer-count\" % self.site.id)", "def flush_caches(self):\n spotify.Error.maybe_raise(\n lib.sp_session_flush_caches(self._sp_session))", "def clear_cache():\n global custom_memory, custom_hit, custom_miss\n custom_memory = {}\n custom_hit = 0\n custom_miss = 0\n return", "def clear_datastore():\n local('lib/remote_api_shell.py tweetlocker -p /_/shell -c '\n '\"from lib.utils import clear_datastore; clear_datastore()\"',\n capture=False)", "def invalidate_cache(self):\n self._invalidate_http_cache()", "def clear_cache(self):\n for fld in self.fields:\n fld.clear_cache()", "def clear(self, warn=True):\r\n if warn:\r\n self.warn('Flushing completely the cache')\r\n rm_subdirs(self.cachedir)", "def clear_cache(self):\n return self.fetcher.clear_cache()", "def invalidate_caches(self) -> None:\n for seg in self.segments:\n seg.invalidate_caches()\n\n self._recalculate_caches()", "def flush():\n for k in cache._thecache.keys():\n del cache._thecache[k]", "def tearDown(self):\n api.clear_cache()", "def tearDown(self):\n api.clear_cache()", "def clearCache(*args, allNodes: bool=True, computed: bool=True, dirty: bool=True,\n **kwargs)->int:\n pass", "def _clean_cache(self):\n\n torch = import_optional_dependency(\"torch\")\n if self.device == torch.device('cuda'):\n with torch.cuda.device(self.device):\n torch.cuda.empty_cache()", "def clear(self):\n for project in Project.objects:\n project.delete()", "def clearCache(cls):\n cls._cameraCache = None", "def _clear_model_caches(self):\n for comp in getattr(self.model, u'component', []):\n for math in getattr(comp, u'math', []):\n math._unset_cached_links()\n for var in self.model.get_all_variables():\n var.clear_dependency_info()\n assignment_exprs = self.model.search_for_assignments()\n for expr in assignment_exprs:\n expr.clear_dependency_info()", "def clear(self) -> None:\n self._REGISTERED_ENVS.clear()\n self._manifests = []\n self._sync = True", "def clear_existing_cache():\n [\n f.unlink()\n for f in Path(DATA_DIR / \"cache\").glob(\"*\")\n if f.is_file() and \"\".join(tuple(map(str, __version__))) not in f.name\n ]", "def _clear_cache(self, course_version_guid=None):\n if self.request_cache is None:\n return\n\n if course_version_guid:\n try:\n del self.request_cache.data.setdefault('course_cache', {})[course_version_guid]\n except KeyError:\n pass\n else:\n self.request_cache.data['course_cache'] = {}", "def forget(self) -> None:\n\n cherrypy.engine.publish(\n \"cache:clear\",\n self.cache_key\n )", "def clear_cache():\n if os.path.exists(get_cachedir()):\n for filename in os.listdir(get_cachedir()):\n if not filename.endswith('.cache'):\n continue\n\n path = os.path.join(get_cachedir(), filename)\n os.unlink(path)", "def empty_cache():\n\n if os.path.exists(cache_dir):\n shutil.rmtree(cache_dir)\n os.makedirs(cache_dir)", "def clear_cache(cls):\n monitor_running = False\n if cls._monitor_thread:\n monitor_running = cls._monitor_thread.is_alive()\n cls.stop_cache_monitor()\n with cls._monitor_lock, cls._lock:\n while cls._cache:\n cls._cache.pop().delete()\n if monitor_running:\n cls.start_cache_monitor()", "def clean(self):\n super(NoneCache, self).clean()", "def clear_all(self):\n self.clear_files_paths()\n self.clear_programs()", "def _invalidate_http_cache(self):\n self._requests_cache = {}", "def cache_clear():\r\n with lock:\r\n for value in cache.values():\r\n on_eviction(value[RESULT])\r\n cache.clear()\r\n root = nonlocal_root[0]\r\n root[:] = [root, root, None, None]\r\n stats[:] = [0, 0]", "def clearImageCache(self):\n if os.path.exists(\"./cache/\"):\n shutil.rmtree(\"./cache/\")", "def clear(self):\n try:\n self._cache.flushdb()\n except Exception as err:\n return self.warn_or_error(err)", "def clear_cache(self, timeout=60):\r\n # navigate to the settings page\r\n self.driver.get('chrome://settings/clearBrowserData')\r\n # wait for the button to appear\r\n wait = WebDriverWait(self.driver, timeout)\r\n time.sleep(2)\r\n\r\n if not self.click_clear_browsing_button():\r\n self.click_cache_checkbox()\r\n time.sleep(2)\r\n self.click_clear_browsing_button()", "def reset_cache(self):\n self.cache = [None] * self.n_layers\n self.offset = 0\n logger.debug('Reset cache.')", "def _invalidate_local_get_event_cache_all(self) -> None:\n self._get_event_cache.clear()\n self._event_ref.clear()\n self._current_event_fetches.clear()", "def clear_cache(self):\n ida_strlist.clear_strlist()", "def reset_cache(self, force_reset=False):\n if force_reset:\n self.write_data_cache(self._empty_data())\n else:\n msg = 'All information about stored datasets will be lost if you proceed! ' + \\\n 'Set \\'force_reset=True\\' to proceed with the reset of dbcollection.json.'\n warnings.warn(msg, UserWarning, stacklevel=2)", "def clear_cache(self): # pragma: no cover\n # Overwite with an empty dictionary\n with open(self.cacheFile, \"wb\") as f:\n pkl.dump({}, f)\n return", "def cache_clear(self):\n self.fold_term.cache_clear()", "def clear_all() -> None:\n datastore.db.client.drop_database(DATABASE_NAME)\n ClassifierCache.clear_all()", "def clearAllSettings(self) -> None:\n ...", "def clean_cache_step(self):\n logger.info('Step {}, cleaning cache'.format(self.name))\n self.output = None\n return self", "def cache_clear():\r\n nonlocal hits, misses, full\r\n\r\n # no await from here ...\r\n cache.clear()\r\n root[:] = [root, root, None, None]\r\n hits = misses = 0\r\n full = False\r\n # to there\r", "def clear(self, cacheDir):\n\n for clearer in self._clearers :\n clearer.clear(cacheDir);", "def flush_cache(cls, ):\n cls.Lock.acquire()\n cls.UsbDevices.clear()\n cls.Lock.release()", "def flush_local_cache(self):\n self._local_cache = {}", "def clearobscaches(repo):\n # only clear cache is there is obsstore data in this repo\n if 'obsstore' in repo._filecache:\n repo.obsstore.caches.clear()" ]
[ "0.67237765", "0.67170703", "0.6651066", "0.65608287", "0.6541997", "0.64387864", "0.642453", "0.63758636", "0.6363097", "0.6251716", "0.62509453", "0.62315863", "0.6219807", "0.6218337", "0.6209813", "0.62015516", "0.61923134", "0.61808413", "0.6152699", "0.61314887", "0.6119369", "0.61038476", "0.6102192", "0.60880035", "0.6086108", "0.6078355", "0.6063535", "0.60626245", "0.60619646", "0.60574824", "0.6027536", "0.60217506", "0.5973107", "0.5945084", "0.5944512", "0.5939769", "0.5936778", "0.5925635", "0.591678", "0.59134924", "0.59105325", "0.59060484", "0.5904688", "0.5902708", "0.58875793", "0.58751065", "0.5871328", "0.5864608", "0.585888", "0.58473414", "0.5839572", "0.58250606", "0.58177894", "0.5800081", "0.57956535", "0.57861054", "0.57854277", "0.57659835", "0.57517904", "0.5740511", "0.5738829", "0.5733829", "0.5726398", "0.5720559", "0.57069397", "0.56939536", "0.56939536", "0.56881374", "0.5685649", "0.56832266", "0.5682265", "0.5663135", "0.56563324", "0.5656007", "0.56370956", "0.56331676", "0.56318164", "0.562733", "0.5621226", "0.56200486", "0.5619043", "0.5615401", "0.5607631", "0.5606186", "0.5595956", "0.55910367", "0.5586949", "0.5581241", "0.55724597", "0.5563188", "0.5560576", "0.55571455", "0.5556318", "0.55423075", "0.55417866", "0.55388415", "0.5534005", "0.55272293", "0.5514333", "0.55118054" ]
0.72654325
0
Call the dashboard as a WSGI application.
def __call__(self, environ, start_response): return self.app(environ, start_response)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\r\n run_wsgi_app(app)", "def main():\n run_wsgi_app(APP)", "def Main():\n wsgiref.handlers.CGIHandler().run(application)", "def main(_, **settings):\n config = Configurator(settings=settings)\n register_includes(config)\n register_json_renderer(config)\n register_routes(config)\n\n config.scan()\n return config.make_wsgi_app()", "def serve() -> None:\n uvicorn.run(\n \"bartender.web.application:get_app\",\n workers=settings.workers_count,\n host=settings.host,\n port=settings.port,\n reload=settings.reload,\n log_level=settings.log_level,\n factory=True,\n )", "def wsgi_app():\n return bottle.default_app()", "def wsgi_app():\n return bottle.default_app()", "def init_app():\n app = Flask(__name__)\n\n with app.app_context():\n # Import parts of our core Flask app\n from . import routes\n\n from .plotlydash.index import init_dashboard\n app = init_dashboard(app)\n\n return app", "def app():\n return aplicattion", "def main(global_config, **settings):\n config = Configurator(settings=settings, root_factory=root_factory)\n config.include('substanced')\n config.include('.resources')\n config.scan()\n return config.make_wsgi_app()", "def main(global_config, **settings):\n config = Configurator(settings=settings)\n config.include(includeme)\n return config.make_wsgi_app()", "def main(global_config, **settings):\n LOGGER.info('= main :: settings = %s', settings)\n\n config = Configurator(settings=settings)\n\n # Home\n config.add_route('home', '/')\n\n # Lastly, we scan the config and make the app\n # config.scan()\n\n return config.make_wsgi_app()", "def main() -> None:\n config = get_config()\n app = Application()\n web_config = config[\"web\"]\n webapp = WebApp(config)\n webapp.attach_to(app)\n\n run_config = keep(web_config, {\"host\", \"port\"})\n run_app(app, **run_config)", "def app():\n return create_app()", "def web():\n from mephisto.client.server import app\n\n app.run(debug=False)", "def dashboard():", "def main(global_config, **settings):\n config = Configurator(settings=settings)\n config.include('pyramid_jinja2')\n config.add_static_view('static', 'static', cache_max_age=3600)\n config.add_route('home', '/')\n config.add_route('total_users', '/totalusers/')\n config.add_route('pageviews_weekly', '/pageviews/weekly/')\n config.add_route('pageviews_monthly', '/pageviews/monthly/')\n config.add_route('pageviews', '/pageviews/')\n config.add_route('devices', '/devices/')\n config.add_route('moreinfo', '/moreinfo/{profile_id}')\n config.scan()\n return config.make_wsgi_app()", "def start(self) -> None:\n if self.bolt_app.logger.level > logging.INFO:\n print(get_boot_message())\n else:\n self.bolt_app.logger.info(get_boot_message())\n\n web.run_app(self.web_app, host=\"0.0.0.0\", port=self.port)", "def main():\r\n LOG.info('Starting server build.')\r\n web.run_app(init_app(),\r\n host=os.environ.get('APP_HOST', CONFIG.registry.get('app_host', '0.0.0.0')),\r\n port=int(os.environ.get('APP_PORT', CONFIG.registry.get('app_port', 8080))),\r\n shutdown_timeout=0,\r\n ssl_context=application_security())", "def main(global_config, **settings):\n #import pdb; pdb.set_trace()\n config = Configurator(settings=settings)\n\n # logging config for pserve / wsgi\n if settings and 'logging_config_file' in settings:\n from pyramid.paster import setup_logging\n setup_logging(settings['logging_config_file'])\n\n from . import views\n config.include(views.do_view_config)\n config.scan('pelias.adapter.pyramid')\n\n # CORS -- might not make this call in production (eliminate a bit of overheads, as CORS is handled by Apache)\n if settings and settings.get('enable_cors_headers') == 'true':\n config.add_subscriber(app_utils.add_cors_headers_response_callback, NewRequest)\n\n return config.make_wsgi_app()", "def app(environ, start_response):\n status = '200 OK'\n response_headers = [('Content-Type', 'text/plain')]\n start_response(status, response_headers)\n return ['Hello world from a simple WSGI application!\\n']", "def main(global_config, **settings):\n config = Configurator(settings=settings)\n config.include('pyramid_jinja2')\n config.add_static_view('static', 'static', cache_max_age=3600)\n config.add_route('register_view', '/')\n config.add_route('confirm', '/confirm')\n config.add_static_view('deform_static', 'deform:static/')\n config.scan()\n return config.make_wsgi_app()", "def entry_point():\n return render_template(\"index.html\")", "def main(global_config, **settings):\n authn_policy = AuthTktAuthenticationPolicy(secret='miloSecretMessageForAuthToken', \n\t\t\t\t\t\t\t\t\t\t\t callback=adminfinder)\n authz_policy = ACLAuthorizationPolicy()\n my_session_factory = UnencryptedCookieSessionFactoryConfig('miloSecretMessageToSignTheCookie')\n config = Configurator(root_factory=Root, settings=settings,\n \t\t\t\t\t\t\t\tauthentication_policy=authn_policy,\n \t\t\t\t\t\t\t\tauthorization_policy=authz_policy,\n \t\t\t\t\t\t\t\tsession_factory = my_session_factory)\n config.add_subscriber(add_renderer_globals, BeforeRender)\n config.add_static_view('static', 'milo_app:static')\n config.add_static_view('css', 'milo_app:static/css')\n config.add_static_view('js', 'milo_app:static/js')\n config.add_static_view('images', 'milo_app:static/images')\n config.add_static_view('icons', 'milo_app:static/images/icons')\n config.scan()\n connect(settings['db_name'])\n return config.make_wsgi_app()", "def main():\n print(\"def main\")\n return APP.run()", "def startapp():", "def __call__(self, environ, start_response):\n # TODO: Consider supporting multiple applications mounted at root URL.\n # Then, consider providing priority of mounted applications.\n # One application could explicitly override some routes of other.\n script = environ.get('PATH_INFO', '')\n path_info = ''\n while '/' in script:\n if script in self.mounts:\n app = self.mounts[script]\n break\n items = script.split('/')\n script = '/'.join(items[:-1])\n path_info = '/%s%s' % (items[-1], path_info)\n else:\n app = self.mounts.get(script, self.app)\n original_script_name = environ.get('SCRIPT_NAME', '')\n environ['SCRIPT_NAME'] = original_script_name + script\n environ['PATH_INFO'] = path_info\n return app(environ, start_response)", "def main(global_config, **settings):\n with open('config.yaml', 'r') as fp:\n local_config = yaml.safe_load(fp)['config']\n Calendar.initialize(local_config['calendar'])\n ICloud.initialize(local_config['icloud'])\n Weather.initialize(local_config['weather'])\n\n config = Configurator(settings=settings)\n config.include('pyramid_jinja2')\n config.add_static_view('static', 'static', cache_max_age=3600)\n config.add_route('home', '/')\n config.add_route('oauthCallback', '/oauthCallback')\n config.scan()\n return config.make_wsgi_app()", "def create_app():\n from server.web import create_app\n # If we do a static javascript app via flask, add it here\n # from server.web import create_app as create_web_app\n return create_app()", "def __call__(self, environ, start_response):\n\t\tsegments = get_path_info(environ).strip('/').split('/', 2)\n\t\tif len(segments) < 2:\n\t\t\tapp = hateoas_app\n\t\telse:\n\t\t\tpfx = segments[0] + '/' + segments[1]\n\t\t\tapp = self.instances.get(pfx, NotFound())\n\t\treturn app(environ, start_response)", "def run(self):\n self.app.run()", "def run(self):\n self.app.run()", "def main():\n cfg.CONF(sys.argv[1:], project='blazar', prog='blazar-api')\n notifier.init()\n service_utils.prepare_service(sys.argv)\n if not CONF.enable_v1_api:\n app = v2_app.make_app()\n else:\n app = wsgi_app.VersionSelectorApplication()\n\n wsgi.server(eventlet.listen((CONF.host, CONF.port), backlog=500), app)", "def make_app():\n return tornado.web.Application([\n tornado.web.URLSpec(r\"/ws/\", WebSocket, name=\"websocket\"),\n tornado.web.URLSpec(r\"/\", StartPage, name='index'),\n (r\"/static/\", tornado.web.StaticFileHandler,\n dict(path=SETTINGS['static_path'])),\n ], **SETTINGS)", "def application(environ, start_response, app=[]):\n if not app:\n app.append(make_application())\n return app[0](environ, start_response)", "def app(request):\n app = flask.Flask(__name__)\n return app", "def main(global_config, **settings):\n config = Configurator(settings=settings)\n config.add_static_view('static', 'static', cache_max_age=3600)\n\n db_url = urlparse(settings['mongo_uri'])\n conn = pymongo.Connection(host=db_url.hostname,\n port=db_url.port)\n config.registry.settings['db_conn'] = conn\n\n def add_mongo_db(event):\n settings = event.request.registry.settings\n db = settings['db_conn'][db_url.path[1:]]\n if db_url.username and db_url.password:\n db.authenticate(db_url.username, db_url.password)\n event.request.db = db\n # event.request.fs = GridFS(db)\n\n config.add_subscriber(add_mongo_db, NewRequest)\n\n config.add_route('home', '/')\n config.add_route('get_trip_data', '/get_trip_data')\n config.add_route('get_stage_data', '/get_stage_data')\n config.add_route('save', '/save')\n\n config.scan()\n return config.make_wsgi_app()", "def main(global_config, **settings):\n config = Configurator(settings=settings)\n\n init_includes(config)\n init_routing(config)\n init_db(config)\n return config.make_wsgi_app()", "def dashboard():\n return render_template('home/dashboard.html',title='SycliQ Dashboard')", "def index():\n return \"Attendance Flask server\"", "def main():\n app = App()\n app.run()", "def run():\n app.run()", "def main(global_config, **settings):\n config = Configurator(settings=settings)\n config.include('clldmpg')\n config.include('clld_glottologfamily_plugin')\n config.registry.registerUtility(MyMapMarker(), IMapMarker)\n config.registry.registerUtility(LexibankCtxFactoryQuery(), ICtxFactoryQuery)\n return config.make_wsgi_app()", "def app():\n app = create_app()\n return app", "def app_factory():\n app = web.Application()\n app.add_routes([\n web.get('/ping', handle_ping),\n ])\n return app", "def main(global_config, **settings):\n config = Configurator(settings=settings)\n # adding a Translation Directory\n config.add_translation_dirs('vpt.transformer:locale/')\n config.add_static_view('static', 'static', cache_max_age=3600)\n # publish transforms directory to download transformed files\n config.add_static_view('transforms', 'transforms', cache_max_age=3600)\n # home view\n config.add_route('home', '/')\n config.scan()\n return config.make_wsgi_app()", "def main(global_config, **settings):\n config = Configurator(settings=settings)\n config.include('pyramid_jinja2')\n\n # Adding a renderer for custom model objects\n custom_json = JSON()\n model.register_custom_json(custom_json)\n config.add_renderer('json', custom_json)\n\n config.add_static_view('static', 'static', cache_max_age=3600)\n config.add_route('index', '/')\n config.add_route('api_board', '/api/{board}/', request_method='GET')\n config.add_route('api_thread', '/api/{board}/{thread}/', request_method='GET')\n config.add_route('board', '/{board}/', request_method='GET')\n config.add_route('new_thread', '/{board}/', request_method='POST')\n config.add_route('thread', '/{board}/{thread}/', request_method='GET')\n config.add_route('reply', '/{board}/{thread}/', request_method='POST')\n config.scan()\n return config.make_wsgi_app()", "def main(global_config, **settings):\n\n config = Configurator(settings=settings)\n config.include('pyramid_jinja2')\n config.include('.models')\n config.include('.routes')\n config.scan()\n return config.make_wsgi_app()", "def bootstrap_wsgi():\n return get_wsgi_application()", "def app(environ: t.Dict, start_response):\n # Print the request object details in environ.items()\n for k, v in environ.items():\n print(k, v)\n\n # Let's capture the request path\n path = environ.get(\"PATH_INFO\")\n\n # Handle our different routes. Render different templates.\n # Allow user to add \"/\" or not to URL string\n # NOTE: Don't use elif statement! It skips 'data' assignment!\n if path.endswith(\"/\"):\n path = path[:-1] # remove the trailing \"/\"\n if path == \"\": # the root / index\n data = home(environ)\n elif path == \"/contact\":\n data = contact_us(environ)\n elif path == \"/box-office\":\n data = read_box_office_data(environ)\n else:\n data = render_template(template_name=\"404.html\", context={\"path\": path})\n\n # Encode data to BYTE string\n data = data.encode(\"utf-8\")\n\n # Gunicorn's start_response to get a response going\n start_response(\n f\"200 OK\",\n [(\"Content-Type\", \"text/html\"), (\"Content-Length\", str(len(data)))],\n # You can remove these headers and the browser will still parse it.\n # Modern browsers are smart enough to infer how to parse the request\n )\n # Where does this print to? Server logs I bet... YES!\n # print(f\"{data=}\\n{iter([data])}\")\n return iter([data]) # <list_iterator object at 0x10f9f1340>", "def main(methods=[\"GET\"]):\n validate_auth()\n ## issue with path resolution after build\n return send_from_directory(\n #todo: remove templates directory reference; index.html isn't a jinja template\n safe_join(current_app.static_folder, 'templates'),\n 'index.html',\n cache_timeout=-1\n )", "def main(global_config, **settings):\n # add settings in here?\n config = Configurator(settings=settings)\n config.include('pyramid_jinja2')\n config.include('.models')\n config.include('.routes')\n config.include('.security')\n config.scan()\n return config.make_wsgi_app()", "def dashboard():\r\n return render_template('{}/dashboard.html'.format(MODULE_DIR))", "def dashboard():\n return render_template('home/dashboard.html')", "def run():\n register_component(\"press\")\n run_app(host=\"0.0.0.0\", port=8080, debug=True, workers=os.cpu_count())", "def main():\n app.run(debug=True)", "def webserver_start():\n run(_webserver_command())", "def get_apps(config: Config) -> Tuple[Flask, Dash]:\n\n flask_app = Flask('shyft', template_folder=os.path.join(CONTENT_DIR, 'templates'),\n static_folder=os.path.join(CONTENT_DIR, 'static'))\n logger.debug(f'static: {flask_app.static_folder}')\n\n # Prevent caching of files (such as thumbnails)\n # NOTE: We may actually want to cache when not debugging, as there shouldn't be different activities loaded with the\n # same ID in normal usage.\n flask_app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0\n\n dash_app = Dash(__name__, server=flask_app, external_stylesheets=STYLESHEETS, title='Shyft')\n\n controller = MainController(dash_app, config)\n\n @flask_app.route('/thumbnails/<id>.png')\n def get_thumbnail(id: str):\n try:\n activity_id = id_str_to_ints(id)[0]\n except ValueError:\n return abort(404, description=f'Invalid activity ID specified: \"{id}\".')\n # print(f'Activity with ID {activity_id}: {am.get_metadata_by_id(activity_id)}')\n metadata = controller.activity_manager.get_metadata_by_id(activity_id)\n return send_file(metadata.thumbnail_file, mimetype='image/png')\n\n @flask_app.route('/gpx_files')\n def get_gpx_file():\n logger.debug(f'gpx_files endpoint reached with GET params: \"{request.args}\".')\n return controller.serve_files_from_get_params(request.args, lambda md: md.gpx_file,\n f'{APP_NAME}_gpx_files.zip',\n 'No GPX files found for selected activities.')\n\n @flask_app.route('/tcx_files')\n def get_tcx_file():\n logger.debug(f'tcx_files endpoint reached with GET params: \"{request.args}\".')\n return controller.serve_files_from_get_params(request.args, lambda md: md.tcx_file,\n f'{APP_NAME}_tcx_files.zip',\n 'No TCX files found for selected activities.')\n\n @flask_app.route('/source_files')\n def get_source_file():\n logger.debug(f'source_files endpoint reached with GET params: \"{request.args}\".')\n return controller.serve_files_from_get_params(request.args, lambda md: md.source_file,\n f'{APP_NAME}_source_files.zip',\n 'No source files found for selected activities.')\n\n @flask_app.route('/delete', methods=['POST', 'GET'])\n def delete():\n logger.debug(f'/delete endpoint reached with args: {request.form}')\n if not request.form:\n logger.warning('delete function received empty request.form. Not deleting anything.')\n else:\n try:\n activity_ids = [md.activity_id for md in\n controller.url_params_to_metadata(request.form)]\n except ValueError:\n return abort(404, f'Bad query. Check logs for details.')\n for i in activity_ids:\n try:\n controller.activity_manager.delete_activity(i)\n except ValueError:\n controller.msg_bus.add_message(\n f'Could not delete activity with ID {i}. It may not exist.',\n logging.ERROR\n )\n if len(activity_ids) == 1:\n controller.msg_bus.add_message(f'Deleted activity with ID {activity_ids[0]}.')\n else:\n controller.msg_bus.add_message(f'Deleted {len(activity_ids)} activities.')\n if send_to := request.args.get('redirect'):\n return redirect(send_to)\n else:\n return redirect('/')\n\n @flask_app.route('/_calendar')\n def calendar():\n return render_template('calendar.html', query=urlparse(request.url).query, stylesheets=STYLESHEETS)\n\n @flask_app.route('/json/calendar_data')\n def metadata_json():\n return Response(controller.url_params_to_calendar_data(request.args),\n mimetype='application/vnd.api+json')\n\n\n return flask_app, dash_app", "def _create_app(self, config={}):\n app = Flask('signac-dashboard')\n app.config.update({\n 'SECRET_KEY': os.urandom(24),\n 'SEND_FILE_MAX_AGE_DEFAULT': 300, # Cache control for static files\n })\n\n # Load the provided config\n app.config.update(config)\n\n # Enable profiling\n if app.config.get('PROFILE'):\n logger.warning(\"Application profiling is enabled.\")\n from werkzeug.contrib.profiler import ProfilerMiddleware\n app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[10])\n\n # Set up default signac-dashboard static and template paths\n signac_dashboard_path = os.path.dirname(__file__)\n app.static_folder = signac_dashboard_path + '/static'\n app.template_folder = signac_dashboard_path + '/templates'\n\n # Set up custom template paths\n # The paths in DASHBOARD_PATHS give the preferred order of template\n # loading\n loader_list = []\n for dashpath in list(app.config.get('DASHBOARD_PATHS', [])):\n logger.warning(\"Adding '{}' to dashboard paths.\".format(dashpath))\n loader_list.append(\n jinja2.FileSystemLoader(dashpath + '/templates'))\n\n # The default loader goes last and is overridden by any custom paths\n loader_list.append(app.jinja_loader)\n\n app.jinja_loader = jinja2.ChoiceLoader(loader_list)\n\n turbolinks(app)\n\n return app", "def dashboard():\n return render_template(\"home/dashboard.html\", title=\"Dashboard\")", "def app_factory(global_conf, load_app_kwds={}, **kwargs):\n # Create the Galaxy application unless passed in\n kwargs = load_app_properties(\n kwds=kwargs,\n **load_app_kwds\n )\n if 'app' in kwargs:\n app = kwargs.pop('app')\n else:\n from galaxy.webapps.coralsnp_reports.app import UniverseApplication\n app = UniverseApplication(global_conf=global_conf, **kwargs)\n atexit.register(app.shutdown)\n # Create the universe WSGI application\n webapp = CoralSNPReportsWebApplication(app, session_cookie='galaxycoralsnpreportssession', name=\"coralsnp_reports\")\n add_ui_controllers(webapp, app)\n # These two routes handle our simple needs at the moment\n webapp.add_route('/{controller}/{action}', controller=\"root\", action='index')\n webapp.add_route('/{action}', controller='root', action='index')\n webapp.finalize_config()\n # Wrap the webapp in some useful middleware\n if kwargs.get('middleware', True):\n webapp = wrap_in_middleware(webapp, global_conf, app.application_stack, **kwargs)\n if asbool(kwargs.get('static_enabled', True)):\n webapp = wrap_if_allowed(webapp, app.application_stack, wrap_in_static,\n args=(global_conf,),\n kwargs=kwargs)\n # Close any pooled database connections before forking\n try:\n galaxy.model.corals.mapping.metadata.bind.dispose()\n except Exception:\n log.exception(\"Unable to dispose of pooled coralsnp_reports model database connections.\")\n # Return\n return webapp", "def run_dashboard(\n database_path,\n no_browser,\n port,\n updating_options,\n):\n port = _find_free_port() if port is None else port\n port = int(port)\n\n if not isinstance(database_path, (str, pathlib.Path)):\n raise TypeError(\n \"database_path must be string or pathlib.Path. \",\n f\"You supplied {type(database_path)}.\",\n )\n else:\n database_path = pathlib.Path(database_path)\n if not database_path.exists():\n raise ValueError(\n f\"The database path {database_path} you supplied does not exist.\"\n )\n\n session_data = {\n \"last_retrieved\": 0,\n \"database_path\": database_path,\n \"callbacks\": {},\n }\n\n app_func = partial(\n dashboard_app,\n session_data=session_data,\n updating_options=updating_options,\n )\n apps = {\"/\": Application(FunctionHandler(app_func))}\n\n _start_server(apps=apps, port=port, no_browser=no_browser)", "def main():\n app.run(host='127.0.0.1', port=443, debug=True)\n CORS(app)", "def dashboard():\n return render_template('home/dashboard.html', title=\"Dashboard\")", "def create_app():\n app = Flask(__name__)\n\n\n @app.route('/')\n def barebones():\n return 'the barebones'\n\n return app", "def run():\n app = Application()\n #app.sentry_client = AsyncSentryClient(app.settings['sentry_url'])\n http_server = HTTPServer(app, xheaders=True)\n http_server.listen(options.port)\n print('Running on port %d' % options.port)", "def main(args=None):\n app()\n return 0", "def main(global_config, **settings):\n config = Configurator(settings=settings)\n config.include('pyramid_chameleon')\n config.add_static_view('static', 'static', cache_max_age=3600)\n config.add_route('home', '/')\n \n config.registry.registerUtility(Basic(settings), INotification, 'basic')\n config.registry.registerUtility(Twitter(settings), INotification, 'twitter')\n\n #config.registry.registerUtility(MailService(settings), INotification, 'mail')\n\n #config.registry.registerUtility(Facebook(settings), INotification, 'facebook')\n\n #config.registry.registerUtility(Twitter(settings), INotification, 'twitter')\n\n\n config.scan()\n return config.make_wsgi_app()", "def application(env, start):\n\n global application\n config = read_config()\n script_dir = os.path.dirname(os.path.abspath(__file__))\n if config.get('pages_path') is None:\n config.set('pages_path', os.path.join(script_dir, 'docs'))\n wiki = Wiki(config)\n application = wiki.application\n return application(env, start)", "def main(global_config, **settings):\n SETTINGS = settings\n config = Configurator(settings=settings,)\n config.include('pyramid_jinja2')\n config.include('.models')\n config.include('.cors')\n config.add_cors_preflight_handler()\n config.include('.routes')\n config.include('.security')\n config.include('..greggo')\n config.add_static_view('static', path='repoll:static')\n config.scan()\n return config.make_wsgi_app()", "def main(global_config, **settings):\n with Configurator(settings=settings) as config:\n config.include('twitcher.models')\n config.include('twitcher.frontpage')\n config.include('twitcher.oauth2')\n config.include('twitcher.api')\n config.include('twitcher.owsproxy')\n config.scan()\n return config.make_wsgi_app()", "def start():\n from paste.deploy import loadapp, loadserver\n from moksha.config.environment import load_environment\n from moksha.config.middleware import make_app\n ini = 'config:' + path('development.ini').abspath()\n wsgi_app = loadapp(ini)\n serve = loadserver(ini)\n serve(wsgi_app)", "def main():\n conn = pymongo.MongoClient(settings.DB_URI)\n database = conn[settings.DB_NAME]\n\n application = tornado.web.Application(\n [\n (r\"/\", BaseHandler),\n (r\"/upload\", UploadHandler),\n (r\"/web/([^/]+)\", WebHandler),\n ],\n database=database, secret=settings.SECRET, debug=settings.DEBUG, gzip=True,\n template_path=settings.TEMPLATE_PATH,\n static_path=settings.STATIC_PATH\n )\n application.cache = {}\n\n logging.info(\"starting bbgps...\")\n application.listen(settings.PORT)\n tornado.ioloop.IOLoop.instance().start()", "def index():\n return app.send_static_file(\"index.html\")", "def __call__(self, environ, start_response):\n self.preprocess(environ)\n return self.app(environ, start_response)", "def index():\n return app.send_static_file('index.html')", "def main(global_config, **settings):\n journal_file = settings['ledger_file']\n journal = ledger.read_journal(journal_file)\n\n config = Configurator(settings=settings)\n\n config.add_request_method(lambda _: journal,\n 'journal',\n reify=True)\n\n config.include('pyramid_chameleon')\n config.add_static_view('static', 'static', cache_max_age=3600)\n config.add_route('home', '/')\n config.add_route('transactions', '/api/transactions')\n config.add_route('transaction', '/api/transactions/:id')\n config.scan()\n return config.make_wsgi_app()", "def main(self):\n\n def _run(args):\n kwargs = vars(args)\n if kwargs.get('host', None) is not None:\n self.config['HOST'] = kwargs.pop('host')\n if kwargs.get('port', None) is not None:\n self.config['PORT'] = kwargs.pop('port')\n self.config['PROFILE'] = kwargs.pop('profile')\n self.config['DEBUG'] = kwargs.pop('debug')\n self.run()\n\n parser = argparse.ArgumentParser(\n description=\"signac-dashboard is a web-based data visualization \"\n \"and analysis tool, part of the signac framework.\")\n parser.add_argument(\n '--debug',\n action='store_true',\n help=\"Show traceback on error for debugging.\")\n parser.add_argument(\n '--version',\n action='store_true',\n help=\"Display the version number and exit.\")\n subparsers = parser.add_subparsers()\n\n parser_run = subparsers.add_parser('run')\n parser_run.add_argument(\n '-p', '--profile',\n action='store_true',\n help='Enable flask performance profiling.')\n parser_run.add_argument(\n '-d', '--debug',\n action='store_true',\n help='Enable flask debug mode.')\n parser_run.add_argument(\n '--host', type=str,\n help='Host (binding address). Default: localhost')\n parser_run.add_argument(\n '--port', type=int,\n help='Port to listen on. Default: 8888')\n parser_run.set_defaults(func=_run)\n\n # This is a hack, as argparse itself does not\n # allow to parse only --version without any\n # of the other required arguments.\n if '--version' in sys.argv:\n print('signac-dashboard', __version__)\n sys.exit(0)\n\n args = parser.parse_args()\n\n if args.debug:\n logger.setLevel(logging.DEBUG)\n\n if not hasattr(args, 'func'):\n parser.print_usage()\n sys.exit(2)\n try:\n self.observer.start()\n args.func(args)\n except RuntimeWarning as warning:\n logger.warning(\"Warning: {}\".format(warning))\n if args.debug:\n raise\n sys.exit(1)\n except Exception as error:\n logger.error('Error: {}'.format(error))\n if args.debug:\n raise\n sys.exit(1)\n finally:\n self.observer.stop()\n self.observer.join()", "def main():\n args = utils.parse_arguments()\n logging.basicConfig(level=logging.INFO)\n coloredlogs.install(level=0,\n fmt=\"[%(asctime)s][%(levelname)s] [%(name)s.%(funcName)s:%(lineno)d] %(message)s\",\n isatty=True)\n if args.debug:\n l_level = logging.DEBUG\n else:\n l_level = logging.INFO\n\n logging.getLogger(__package__).setLevel(l_level)\n\n LOG.info('RUNNING TAMAGO WEB')\n serve(app, port=8080, host='0.0.0.0')", "def application(self, environ, start_response):\n uri = environ['PATH_INFO'].encode('latin-1').decode()\n is_test = request_uri(environ) == BASE_NAME + uri\n\n # Guess the file type required\n if re.match(\".*\\.html\", uri):\n mime = \"html\"\n elif re.match(\".*\\.rdf\", uri):\n mime = \"pretty-xml\"\n elif re.match(\".*\\.ttl\", uri):\n mime = \"turtle\"\n elif re.match(\".*\\.nt\", uri):\n mime = \"nt\"\n elif re.match(\".*\\.json\", uri):\n mime = \"json-ld\"\n elif 'HTTP_ACCEPT' in environ:\n if (SPARQL_PATH and\n (uri == SPARQL_PATH or uri == (SPARQL_PATH+\"/\"))):\n mime = self.best_mime_type(environ['HTTP_ACCEPT'],\n \"sparql-json\")\n else:\n mime = self.best_mime_type(environ['HTTP_ACCEPT'], \"html\")\n else:\n mime = \"html\"\n\n # The welcome page\n if uri == \"/\" or uri == \"/index.html\":\n start_response('200 OK', [('Content-type',\n 'text/html; charset=utf-8')])\n if not exists(DB_FILE):\n return [self.render_html(DISPLAY_NAME, pystache.render(\n open(resolve(\"html/onboarding.mustache\")).read(),\n {'context': CONTEXT}), is_test)]\n else:\n return [self.render_html(\n DISPLAY_NAME,\n pystache.render(open(resolve(\"html/index.html\")).read(),\n {'property_facets': FACETS, 'context': CONTEXT}),\n is_test).encode('utf-8')]\n # The search page\n elif (SEARCH_PATH and\n (uri == SEARCH_PATH or uri == (SEARCH_PATH + \"/\"))):\n if 'QUERY_STRING' in environ:\n qs_parsed = parse_qs(environ['QUERY_STRING'])\n if 'query' in qs_parsed:\n query = qs_parsed['query'][0]\n if 'property' in qs_parsed:\n prop = qs_parsed['property'][0]\n else:\n prop = None\n if 'offset' in qs_parsed:\n offset = int(qs_parsed['offset'][0])\n else:\n offset = 0\n return self.search(start_response, query, prop, offset)\n else:\n return self.send400(start_response, YZ_NO_RESULTS)\n else:\n return self.send400(start_response, YZ_NO_QUERY)\n # The dump file\n elif uri == DUMP_URI:\n start_response('200 OK', [('Content-type', 'appliction/x-gzip'),\n ('Content-length',\n str(os.stat(DUMP_FILE).st_size))])\n return [open(resolve(DUMP_FILE), \"rb\").read()]\n # The favicon (i.e., the logo users see in the\n # browser next to the title)\n elif (uri.startswith(\"/favicon.ico\") and\n exists(resolve(\"assets/favicon.ico\"))):\n start_response(\n '200 OK', [('Content-type', 'image/png'),\n ('Content-length',\n str(os.stat(\n resolve(\"assets/favicon.ico\")).st_size))])\n return [open(resolve(\"assets/favicon.ico\"), \"rb\").read()]\n # Any assets requests\n elif uri.startswith(ASSETS_PATH) and exists(resolve(uri[1:])):\n start_response(\n '200 OK', [('Content-type', mimetypes.guess_type(uri)[0]),\n ('Content-length',\n str(os.stat(resolve(uri[1:])).st_size))])\n x = open(resolve(uri[1:]), \"rb\").read()\n return [x]\n # SPARQL requests\n elif SPARQL_PATH and (uri == SPARQL_PATH or uri == (SPARQL_PATH+\"/\")):\n if 'QUERY_STRING' in environ:\n qs = parse_qs(environ['QUERY_STRING'])\n if 'query' in qs:\n return self.sparql_query(\n qs['query'][0], mime,\n qs.get('default-graph-uri', [None])[0],\n start_response)\n else:\n start_response('200 OK', [('Content-type',\n 'text/html; charset=utf-8')])\n s = open(resolve(\"html/sparql.html\")).read()\n return [self.render_html(\n DISPLAY_NAME,\n s, is_test).encode('utf-8')]\n else:\n start_response('200 OK', [('Content-type',\n 'text/html; charset=utf-8')])\n s = open(resolve(\"html/sparql.html\")).read()\n return [self.render_html(DISPLAY_NAME, s,\n is_test).encode('utf-8')]\n elif LIST_PATH and (uri == LIST_PATH or uri == (LIST_PATH + \"/\")):\n offset = 0\n prop = None\n obj = None\n obj_offset = 0\n if 'QUERY_STRING' in environ:\n qs = parse_qs(environ['QUERY_STRING'])\n if 'offset' in qs:\n try:\n offset = int(qs['offset'][0])\n except ValueError:\n return self.send400(start_response)\n if 'prop' in qs:\n prop = \"<%s>\" % qs['prop'][0]\n if 'obj' in qs:\n obj = qs['obj'][0]\n if 'obj_offset' in qs and re.match(\"\\d+\", qs['obj_offset'][0]):\n obj_offset = int(qs['obj_offset'][0])\n\n return self.list_resources(start_response, offset,\n prop, obj, obj_offset)\n elif METADATA_PATH and (uri == METADATA_PATH or\n uri == (\"/\" + METADATA_PATH) or\n uri == (\"/\" + METADATA_PATH + \".rdf\") or\n uri == (METADATA_PATH + \".rdf\") or\n uri == (\"/\" + METADATA_PATH + \".ttl\") or\n uri == (METADATA_PATH + \".ttl\") or\n uri == (\"/\" + METADATA_PATH + \".nt\") or\n uri == (METADATA_PATH + \".nt\") or\n uri == (\"/\" + METADATA_PATH + \".json\") or\n uri == (METADATA_PATH + \".json\")):\n graph = dataid()\n if mime == \"html\":\n content = self.rdfxml_to_html(graph, BASE_NAME + METADATA_PATH,\n YZ_METADATA, is_test)\n else:\n try:\n self.add_namespaces(graph)\n if mime == \"json-ld\":\n content = yuzu.jsonld.write(\n graph, BASE_NAME + id)\n else:\n content = graph.serialize(format=mime).decode('utf-8')\n except Exception as e:\n print (e)\n return self.send501(start_response)\n start_response(\n '200 OK',\n [('Content-type', self.mime_types[mime] + \"; charset=utf-8\"),\n ('Vary', 'Accept'), ('Content-length', str(len(content)))])\n return [content.encode('utf-8')]\n elif exists(resolve(\"html/%s.html\" % re.sub(\"/$\", \"\", uri))):\n start_response('200 OK', [('Content-type',\n 'text/html; charset=utf-8')])\n s = pystache.render(open(resolve(\n \"html/%s.html\" % re.sub(\"/$\", \"\", uri))).read(),\n {'context': CONTEXT,\n 'dump_uri': DUMP_URI})\n return [self.render_html(DISPLAY_NAME, s,\n is_test).encode('utf-8')]\n # Anything else is sent to the backend\n elif re.match(\"^/(.*?)(|\\.nt|\\.html|\\.rdf|\\.ttl|\\.json)$\", uri):\n id, _ = re.findall(\n \"^/(.*?)(|\\.nt|\\.html|\\.rdf|\\.ttl|\\.json)$\", uri)[0]\n graph = self.backend.lookup(id)\n if graph is None:\n return self.send404(start_response)\n labels = sorted([str(o) for s, p, o in\n graph.triples(\n (URIRef(BASE_NAME + id), RDFS.label, None))])\n if labels:\n title = ', '.join(labels)\n else:\n title = DISPLAYER.uri_to_str(BASE_NAME + id)\n if mime == \"html\":\n content = self.rdfxml_to_html(graph, BASE_NAME + id, title,\n is_test)\n else:\n try:\n self.add_namespaces(graph)\n if mime == \"json-ld\":\n content = yuzu.jsonld.write(\n graph, BASE_NAME + id)\n else:\n content = graph.serialize(format=mime).decode('utf-8')\n except Exception as e:\n print (e)\n return self.send501(start_response)\n start_response(\n '200 OK',\n [('Content-type', self.mime_types[mime] + \"; charset=utf-8\"),\n ('Vary', 'Accept'), ('Content-length', str(len(content)))])\n return [content.encode('utf-8')]\n else:\n return self.send404(start_response)", "def main(**settings):\n # Pyramid requires an authorization policy to be active.\n # Enable JWT authentication.\n all_routes = []\n for route in routes:\n if route not in all_routes:\n all_routes.append(route)\n config.add_route(*route)\n print route\n else:\n print \"Found conflicting routes, ignoring \"\n print route\n config.scan('app.base.api.main')\n return CORS(config.make_wsgi_app(), headers=\"*\", methods=\"*\", origin=\"*\")", "def main(global_config, **settings):\n engine = engine_from_config(settings, 'sqlalchemy.')\n DBSession.configure(bind=engine)\n Base.metadata.bind = engine\n config = Configurator(settings=settings)\n config.add_static_view('static', 'static', cache_max_age=0)\n\n config.add_route('view', '/view')\n config.add_route('view_robot', '/view/r{robot_number:\\d+}')\n\n config.add_route('scout', '/')\n config.add_route('scout_robot', '/r{robot_number:\\d+}')\n# config.add_route('scout_match', '/m{match_number:\\d+}')\n config.add_route('scout_robot_match',\n '/r{robot_number:\\d+}m{match_number:\\d+}')\n config.scan()\n return config.make_wsgi_app()", "def analysis_dash(flask_app):\n dash_app = dash.Dash(server=flask_app, title='Analysis', assets_folder='../dash_app/assets',\n routes_pathname_prefix=\"/analysis_dash/\",\n external_stylesheets=[dbc.themes.BOOTSTRAP, FA])\n\n ## Creating the app layout\n dash_app.layout = html.Div(id='page_content', children=[\n html.Div(id='analysis_page'),\n html.Header(nav_buttons()),\n html.Main(analysis_layout()),\n ])\n dash_callback(dash_app)\n return dash_app.server", "def run(self):\n server = CherryPyWSGIServer(\n (self.options['host'], int(self.options['port'])),\n WSGIPathInfoDispatcher({\n '/': WSGIHandler(),\n settings.ADMIN_MEDIA_PREFIX: MediaHandler(\n os.path.join(admin.__path__[0], 'media'))\n }),\n int(self.options['threads']), self.options['host'],\n request_queue_size=int(self.options['request_queue_size']))\n try:\n server.start()\n except KeyboardInterrupt:\n server.stop()", "def app(self):\n return self.__app", "def run():\n return render_template('index.html')", "def main():\n CLI_APP.run()", "def run():\n\n # Construct a server.\n server = wsgiref.simple_server.make_server(\n _config[ 'address' ],\n _config[ 'port' ],\n application\n )\n\n # Run the server.\n server.serve_forever()\n\n # Return result.\n return 0", "def main(global_config, **settings):\n engine = engine_from_config(settings, 'sqlalchemy.')\n DBSession.configure(bind=engine)\n Base.metadata.bind = engine\n config = Configurator(settings=settings)\n config.add_static_view('static', 'static', cache_max_age=3600)\n config.add_route('home', '/')\n config.include(pyramid_beaker)\n config.scan()\n config['safe'] = loadSafe(config['safe_path'])\n return config.make_wsgi_app()", "def main():\n try:\n port = 8080\n ip = '0.0.0.0'\n http_server = WSGIServer((ip, port),\n app,\n log=logging,\n error_log=logging,\n )\n print(\"Server started at: {0}:{1}\".format(ip, port))\n http_server.serve_forever()\n except Exception as exc:\n logger.error(exc.message)\n logger.exception(traceback.format_exc())\n finally:\n # Do something here\n pass", "def __call__(self, *args, **kwargs):\n with app.app_context(): # pragma: no cover\n return self.run(*args, **kwargs)", "def index():\n print('This is the root of the app, should have something better')\n return 'Root, this is where some front end would go on a server'", "def start():\n app.run()", "def index() -> str:\n return render_template('index.html', username=getpass.getuser(), hostname=socket.gethostname(),\n manager_host=DASHBOARD_MANAGER_HOST.value.decode() or 'localhost',\n manager_port_nr=DASHBOARD_MANAGER_PORT.value)", "def main():\n return render_template('index.html')", "def main():\n return render_template('index.html')", "def main():\n\n addon_url = sys.argv[0]\n addon_handle = int(sys.argv[1])\n addon_args = urlparse.parse_qs(sys.argv[2][1:])\n\n # Route request to action.\n Plugin(addon_url, addon_handle, addon_args).route()", "def dashboard():\n return render_template(\"admin/dashboard.html\", title=\"Dashboard\")", "def main(gloabl_config, **settings):\n if os.environ.get('DATABASE_URL', ''):\n settings['sqlalchemy.url'] = os.environ['DATABASE_URL']\n config = Configurator(settings=settings)\n config.include('pyramid_jinja2')\n config.include('.models')\n config.include('.routes')\n config.include('.security')\n config.scan()\n return config.make_wsgi_app()", "def create_app(self):\n app = Flask(__name__)\n\n app.config[\"auth_func\"] = self.auth_func\n app.config[\"hydrator_func\"] = self.hydrator_func\n app.config[\"request_hydrator_func\"] = self.request_hydrator_func\n app.config[\"database_uri\"] = self.database_uri\n app.config[\"hmac_secret\"] = self.hmac_secret\n\n cors = CORS()\n cors.init_app(app, resources={r\"/*\": {\"origins\": self.cors_origins, \"supports_credentials\": True}})\n\n app.register_blueprint(api_v0.bp)\n\n @app.route(\"/\")\n def health_check():\n \"\"\"Can be called by e.g. Kubernetes to verify that the API is up\n\n Returns:\n str: the static string \"Comet-API\", could be anything\n \"\"\"\n return \"Comet-API\"\n\n return app" ]
[ "0.74744", "0.72382814", "0.69897854", "0.6828364", "0.66787845", "0.6616081", "0.6616081", "0.658992", "0.6578323", "0.65606695", "0.65123874", "0.6492135", "0.64652956", "0.64537275", "0.6407495", "0.6406011", "0.640311", "0.63525355", "0.635047", "0.6333705", "0.6266929", "0.6257466", "0.62568545", "0.6250467", "0.6238659", "0.6233285", "0.62304145", "0.62298185", "0.62247986", "0.62148994", "0.62131226", "0.62131226", "0.6194815", "0.6182473", "0.61813515", "0.61806226", "0.6176558", "0.6174827", "0.61733013", "0.6170572", "0.61645365", "0.6160346", "0.61523604", "0.6150583", "0.61501116", "0.6146311", "0.61349094", "0.61345106", "0.6116884", "0.6104909", "0.60963976", "0.60753787", "0.6074368", "0.60725313", "0.6067168", "0.6061196", "0.605286", "0.604642", "0.60454166", "0.60350585", "0.60212207", "0.60147125", "0.60111594", "0.6008738", "0.6006874", "0.6006849", "0.6002362", "0.59972304", "0.59900004", "0.5986247", "0.59861493", "0.5982529", "0.59764487", "0.5975505", "0.5967883", "0.5960054", "0.5950862", "0.59472185", "0.593986", "0.5936563", "0.59282815", "0.5918578", "0.5917667", "0.5916914", "0.5916759", "0.5913646", "0.59121156", "0.59104204", "0.59102595", "0.5909825", "0.5904404", "0.5894026", "0.5891789", "0.58902", "0.5883613", "0.5883613", "0.588252", "0.5869657", "0.5868132", "0.5862256" ]
0.6479684
12
Runs the command line interface. Call this function to use signacdashboard from its command line
def main(self): def _run(args): kwargs = vars(args) if kwargs.get('host', None) is not None: self.config['HOST'] = kwargs.pop('host') if kwargs.get('port', None) is not None: self.config['PORT'] = kwargs.pop('port') self.config['PROFILE'] = kwargs.pop('profile') self.config['DEBUG'] = kwargs.pop('debug') self.run() parser = argparse.ArgumentParser( description="signac-dashboard is a web-based data visualization " "and analysis tool, part of the signac framework.") parser.add_argument( '--debug', action='store_true', help="Show traceback on error for debugging.") parser.add_argument( '--version', action='store_true', help="Display the version number and exit.") subparsers = parser.add_subparsers() parser_run = subparsers.add_parser('run') parser_run.add_argument( '-p', '--profile', action='store_true', help='Enable flask performance profiling.') parser_run.add_argument( '-d', '--debug', action='store_true', help='Enable flask debug mode.') parser_run.add_argument( '--host', type=str, help='Host (binding address). Default: localhost') parser_run.add_argument( '--port', type=int, help='Port to listen on. Default: 8888') parser_run.set_defaults(func=_run) # This is a hack, as argparse itself does not # allow to parse only --version without any # of the other required arguments. if '--version' in sys.argv: print('signac-dashboard', __version__) sys.exit(0) args = parser.parse_args() if args.debug: logger.setLevel(logging.DEBUG) if not hasattr(args, 'func'): parser.print_usage() sys.exit(2) try: self.observer.start() args.func(args) except RuntimeWarning as warning: logger.warning("Warning: {}".format(warning)) if args.debug: raise sys.exit(1) except Exception as error: logger.error('Error: {}'.format(error)) if args.debug: raise sys.exit(1) finally: self.observer.stop() self.observer.join()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cli():\n config, auth, execute_now = read_command_line_arguments()\n main(config, auth, execute_now)", "def cli():\n pass", "def main(args):\n cli = CLI()\n # Check arguments\n cli.parse_arguments(args)", "def main_cli():\n pass", "def main():\n CLI_APP.run()", "def cli():\r\n pass", "def cli():\n parser=argparse.ArgumentParser(\n description = 'Rotate through a given AWS account for per application keys. Keys are temporarily loaded into environment variables. Asks for a SSO cookie value.')\n parser.add_argument('role', help = 'Role to harvest session keys as')\n parser.add_argument(\n '-c', '--command', help = 'Custom command to run.', default = None)\n parser.add_argument('-a', '--application',\n help = 'Provide a specific application', default = None)\n parser.add_argument(\n '-l', '--list', help = 'Provide a list of applications. Lists should be one Application#,Application Name per line', default = None)\n parser.add_argument(\n '-p', '--awspx', help = 'Run awspx across all applications. Install from https://github.com/FSecureLABS/awspx', action=argparse.BooleanOptionalAction, default = False)\n parser.add_argument(\n '-s', '--scoutsuite', help = 'Run ScoutSuite across all applications. Install from https://github.com/nccgroup/ScoutSuite', action=argparse.BooleanOptionalAction, default = False)\n args=parser.parse_args()\n\n print(\"Please provide an SSO cookie value. Obtain from the dev console on a web browser, probably named something like x-amz-sso_authn\")\n token=input()\n\n return args.role, args.list, args.application, args.command, token, args.awspx, args.scoutsuite", "def cli():\n logger.debug('cli() called')", "def cli():\n\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n pass", "def cli():\n return", "def main():\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-a\")\n args = parser.parse_args()\n\n try:\n account_id = args.a\n if(not account_id):\n print('Please specify an account id with -a')\n return\n if not Helpers().isActiveAccount(account_id):\n Log(\"info\", \"this account isn't active. Exiting\", '', account_id)\n return\n NotifyViaEmail(account_id).main()\n MonthlyStop(account_id)\n \n ControlSpend(account_id).main()\n \n # TODO: catch proper exception\n except:\n Log(\"error\", \"error starting run_budget_commander.py from command line\", traceback.format_exc())\n # TODO: return proper exception\n raise", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def cli():", "def main():\n arguments = docopt(__doc__, version=VERSION)\n\n # Handle the configure as a special case -- this way we won't get invalid\n # API credential messages when we're trying to configure stormpath-export.\n if arguments['configure']:\n configure()\n return\n\n exporter = StormpathExport(arguments['<base_url>'])\n exporter.export(arguments['<location>'])", "def main(*args):\n try:\n cli()\n except HassReleaseError as err:\n click.secho(\"An error occurred: {}\".format(err), fg=\"red\")", "def cli(args): # noqa; pylint: disable=unused-argument", "def cli() -> None:", "def cli() -> None:", "def main():\n\tcli = Cli()\n\tcli.run()", "def launch_cli() -> None:\n app.run(main, flags_parser=_parse_flags)", "def cli() -> None:\n pass", "def cli() -> None:\n pass", "def cli() -> None:\n pass", "def cli() -> None:\n pass", "def cli() -> None:\n pass", "def cli():\n ...", "def main():\n parser = argparse.ArgumentParser(description='Creates a Mist site within your organization')\n parser.add_argument('config', metavar='config_file', type=argparse.FileType(\n 'r'), help='file containing all the configuration information')\n args = parser.parse_args()\n configs = json.load(args.config)\n\n claim_ap(configs)", "def _cli():\n pass", "def cmd_entry():\n import argparse\n\n parser = argparse.ArgumentParser(\n description=\"Web based frontend to the health record system databaser\"\n )\n parser.add_argument('-c', '--config', required=True, help=\"Config file to load\")\n args = parser.parse_args()\n\n main(args.config)", "def main():\n # Set up the command line options\n creds = Credentials(['apic', 'nosnapshotfiles'],\n description=(\"This application replicates the switch \"\n \"CLI command 'show interface fex'\"))\n creds.add_argument('-s', '--switch',\n type=str,\n default=None,\n help='Specify a particular switch id, e.g. \"101\"')\n creds.add_argument('-i', '--interface',\n type=str,\n default=None,\n help='Specify a particular interface id, e.g. \"eth1/10\"')\n creds.add_argument('-b', '--brief',\n action='store_true',\n help='Display a brief summary')\n args = creds.get()\n\n interface_collector = InterfaceCollector(args.url, args.login, args.password)\n\n if args.brief:\n interface_collector.show_brief(node=args.switch, intf_id=args.interface)\n else:\n print 'detailed view is still under development...try brief view instead'", "def main():\n log(\"NGG CLI\", color=\"green\", figlet=True)\n log(\"Welcome to NGG CLI!\", \"yellow\")", "def cli(log_level):\n util.setup_logger(log_level)\n # if not util.check_in_path('aria2c'):\n # raise logger.ERROR(\"Aria2 is not in path. Please follow installation instructions: https://github.com/vn-ki/anime-downloader/wiki/Installation\")", "def main(self):\n cmd, path, args = self._parse_args()\n if cmd == \"shell\":\n print \"You are now in ubs shell.\"\n print \"Use \\\"python %s help\\\" to see other choice.\" % sys.argv[0]\n self.shell()\n elif cmd == \"help\":\n self.print_path_help(path)\n sys.exit(0) \n elif cmd == \"run\":\n self.route(path, args)\n else:\n raise Exception(\"unknown CMD %s\" % cmd)", "def cli_main(*cli_args):\n return runner.invoke(main, cli_args)", "def main(args):\n options = parse_cmd_parameters_(args)\n execute_(options)", "def main():\n parser = argparse.ArgumentParser()\n # Set the default entrypoint for nothing.\n parser.set_defaults(func=lambda x: None)\n # Configure the CLI for this script.\n appsec_wtf.cli.exec_poc.set_cli_opts(parser)\n\n # Parse the CLI arguments.\n args = parser.parse_args()\n # Execute the entry point of the command being executed.\n args.func(args)", "def cli(ctx):\n pass" ]
[ "0.73368174", "0.6704575", "0.66263586", "0.6620892", "0.65879875", "0.65856606", "0.6576689", "0.6504645", "0.64861155", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6461405", "0.6443124", "0.6436074", "0.6425422", "0.6425422", "0.6425422", "0.6425422", "0.6425422", "0.6425422", "0.6425422", "0.6425422", "0.6425422", "0.6425422", "0.6425422", "0.6425422", "0.6425422", "0.6425422", "0.6425422", "0.6425422", "0.6425422", "0.6425422", "0.6425422", "0.6425422", "0.6425422", "0.6425422", "0.6425422", "0.6425422", "0.6425422", "0.6425422", "0.6425422", "0.6425422", "0.641188", "0.6410708", "0.63924044", "0.63189113", "0.63189113", "0.62600976", "0.61968356", "0.6194015", "0.6194015", "0.6194015", "0.6194015", "0.6194015", "0.61928475", "0.61583567", "0.61117357", "0.61029184", "0.6068511", "0.60519254", "0.6007188", "0.5996664", "0.5978517", "0.59644467", "0.59633434", "0.5929731" ]
0.70329654
1
Compresses a tensor with the given compression context, and then returns it with the context needed to decompress it.
def compress(self, tensor):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compress(self, tensor, *args, **kwargs):\n return self.compressor.compress(tensor)", "def compress(self, tensor, *args, **kwargs):\n return self.compressor.compress(tensor)", "def decompress(self, tensor, ctx, *args, **kwargs):\n return tensor", "def compress(self, tensor, *args, **kwargs):\n return tensor, None", "def compress(self, tensor, *args, **kwargs):\n pass", "def decompress(self, tensor, ctx, *args, **kwargs):\n tensor_decompressed = tensor\n dtype = ctx\n if 'float' in str(dtype):\n tensor_decompressed = tensor.astype(dtype, copy=False)\n return tensor_decompressed", "def decompress(self, tensor, ctx, *args, **kwargs):\n pass", "def decompress(self, tensor, ctx, *args, **kwargs):\n tensor = self.compressor.decompress(tensor, ctx, *args, **kwargs)\n \n # uncompressed gradients need to do nag explicitly\n if not self.inited:\n if size(tensor.shape) < self.threshold:\n self.mom = nd.zeros_like(tensor)\n self.nag = True\n self.inited = True\n\n if self.nag:\n self.mom += tensor\n nd._internal._mul_scalar(self.mom, self.mu, out=self.mom)\n tensor += self.mom\n\n return tensor", "def compress(self, tensor, *args, **kwargs):\n tensor_compressed = tensor\n if 'float' in str(tensor.dtype):\n # Only allow compression from other floating point types\n tensor_compressed = tensor.astype('float16', copy=False)\n return tensor_compressed, tensor.dtype", "def decompress(self, tensor, ctx, *args, **kwargs):\n if \"x\" not in kwargs:\n raise ValueError(\"x is missing\")\n\n x = kwargs[\"x\"].astype(tensor.dtype, copy=False) \n \n if not self.inited:\n self.cache = nd.zeros_like(tensor)\n if size(tensor.shape) >= self.threshold:\n self.mom = nd.zeros_like(tensor)\n self.wdmom = True\n self.inited = True\n \n # weight decay\n nd._internal._mul_scalar(x, self.wd, out=self.cache)\n\n # weight decay momentum\n if self.wdmom:\n self.mom += self.cache\n nd._internal._mul_scalar(self.mom, self.mu, out=self.mom)\n tensor += self.mom\n\n tensor += self.cache\n return self.compressor.decompress(tensor, ctx, *args, **kwargs)", "def __handle_compression(self, x):\n if self.__compress:\n return zlib.compress(x)\n return x", "def decompress(self, tensors):", "def compress(value):\n pickled = pickle_util.dump(value)\n return zlib.compress(pickled)", "def decompress(args):\n # Three integers for tensor shapes + nine encoded strings.\n np_dtypes = [np.integer] * 3 + [np.bytes_] * 9\n with open(args.input_file, \"rb\") as f:\n packed = tfc.PackedTensors(f.read())\n arrays = packed.unpack_from_np_dtypes(np_dtypes)\n\n # Build model and restore optimized parameters.\n model = CompressionModel(args)\n checkpoint = tf.train.Checkpoint(model=model)\n restore_path = tf.train.latest_checkpoint(args.checkpoint_dir)\n checkpoint.restore(restore_path)\n curr_decoded = model.decompress(arrays)\n row=int(args.input_file.split('/')[-1].split('.')[0])\n\n # Write reconstructed images out as PNG files.\n for col in range(np.shape(curr_decoded)[1]):\n img = curr_decoded[0,col,:,:,:]/255\n save_img(args.output_file,0,img,row,col+1)", "def create_compressed_model(\n model: Module,\n config: NNCFConfig,\n compression_state: Optional[Dict[str, Any]] = None,\n dummy_forward_fn: Callable[[Module], Any] = None,\n wrap_inputs_fn: Callable[[Tuple, Dict], Tuple[Tuple, Dict]] = None,\n wrap_outputs_fn: Callable[[Tuple, Dict], Tuple[Tuple, Dict]] = None,\n dump_graphs=True,\n) -> Tuple[CompressionAlgorithmController, NNCFNetwork]:\n if isinstance(model, NNCFNetwork):\n raise RuntimeError(\n \"The model object has already been compressed.\\n\"\n \"NNCF for PyTorch modifies the model object in-place, and repeat calls to \"\n \"`nncf.torch.create_compressed_model` with the same model object passed as argument \"\n \"will lead to an incorrect attempt to compress the model twice.\\n\"\n \"Make sure that the model object you are passing has not already been compressed (for \"\n \"instance, by testing `if isinstance(model, nncf.torch.nncf_network.NNCFNetwork)`).\\n\"\n \"If you are encountering this in a Jupyter notebook context - make sure that when \"\n \"re-running cells involving `nncf.torch.create_compressed_model` the original model object \"\n \"is also re-created (via constructor call).\"\n )\n\n if config.get(\"target_device\") == \"VPU\":\n warning_deprecated(\"VPU device is deprecated and will no longer be supported in the future.\")\n\n set_debug_log_dir(config.get(\"log_dir\", \".\"))\n\n is_legacy_model_state_dict = (\n compression_state is not None\n and BaseController.BUILDER_STATE not in compression_state\n and BaseController.CONTROLLER_STATE not in compression_state\n )\n maybe_convert_legacy_names_in_compress_state(compression_state)\n\n should_init = compression_state is None\n\n nncf_network = create_nncf_network(model, config, dummy_forward_fn, wrap_inputs_fn, wrap_outputs_fn)\n\n if dump_graphs and is_main_process():\n nncf_network.nncf.get_graph().visualize_graph(osp.join(config.get(\"log_dir\", \".\"), \"original_graph.dot\"))\n builder = create_compression_algorithm_builder(config, should_init)\n\n is_state_loadable = not is_legacy_model_state_dict and compression_state is not None\n if is_state_loadable:\n builder.load_state(compression_state[BaseController.BUILDER_STATE])\n compressed_model = builder.apply_to(nncf_network)\n compression_ctrl = builder.build_controller(compressed_model)\n\n if is_state_loadable:\n compression_ctrl.load_state(compression_state[BaseController.CONTROLLER_STATE])\n\n compressed_model.nncf.set_compression_controller(compression_ctrl)\n\n # Required to ensure that the model leaving create_compressed_model has correct compressed graph.\n # In particular, this is currently required for correct functioning of RNNs.\n compressed_model.nncf.rebuild_graph()\n\n try:\n if is_legacy_model_state_dict:\n from nncf.torch import load_state # pylint: disable=cyclic-import\n\n state_dict_to_load = compression_state.get(\"state_dict\", compression_state)\n load_state(compressed_model, state_dict_to_load, is_resume=True)\n finally:\n if dump_graphs and is_main_process():\n compressed_model_graph = compressed_model.nncf.get_graph()\n compressed_model_graph.visualize_graph(osp.join(config.get(\"log_dir\", \".\"), \"compressed_graph.dot\"))\n\n synchronize_all_processes_in_distributed_mode()\n return compression_ctrl, compressed_model", "def gzdeflate():\n return zlib.compress(val)", "def compress(self, *args):\n return _osgAnimation.Vec3Packed_compress(self, *args)", "def compress(condition, a, axis=None, out=None):\n return a.compress(condition, axis, out)", "def Compress(indata, algo, with_header=True):\n if algo == 'none':\n return indata\n fname = GetOutputFilename('%s.comp.tmp' % algo)\n WriteFile(fname, indata)\n if algo == 'lz4':\n data = Run('lz4', '--no-frame-crc', '-c', fname, binary=True)\n # cbfstool uses a very old version of lzma\n elif algo == 'lzma':\n outfname = GetOutputFilename('%s.comp.otmp' % algo)\n Run('lzma_alone', 'e', fname, outfname, '-lc1', '-lp0', '-pb0', '-d8')\n data = ReadFile(outfname)\n elif algo == 'gzip':\n data = Run('gzip', '-c', fname, binary=True)\n else:\n raise ValueError(\"Unknown algorithm '%s'\" % algo)\n if with_header:\n hdr = struct.pack('<I', len(data))\n data = hdr + data\n return data", "def compress(emb):\n if params.sum_word_vecs:\n return np.sum(emb, axis=0)\n if params.max_pool_word_vecs:\n return np.amax(emb, axis=0)\n if params.concat_word_vecs:\n return concat_word_vecs(emb, params.max_transcript_len)\n if params.avg_word_vecs:\n return np.mean(emb, axis=0)", "def encode(self, compress=0):\n raw = bytes(self._encode())\n return gzip.compress(raw, compress) if compress else raw", "def compress(value):\n\n process = Popen([\"xz\", \"--compress\", \"--force\"], stdin=PIPE, stdout=PIPE)\n return process.communicate(value)[0]", "def compress(value):\n\n process = Popen([\"xz\", \"--compress\", \"--force\"], stdin=PIPE, stdout=PIPE)\n return process.communicate(value)[0]", "def apply_compressed_sensing(self, inputs, rng):\n print('using compressed sensing!')\n train_path = os.path.join(\n self.data_dir, 'assist{0}-{1}'.format(self.which_year, 'train'))\n\n if self.which_set == 'test':\n loaded = np.load(train_path + '-compression-matrix.npz')\n self.compress_matrix = loaded['compress_matrix']\n self.compress_dim = self.compress_matrix.shape[1]\n elif self.which_set == 'train':\n self.compress_matrix = self.make_compression_matrix(train_path, rng)\n\n inputs = self.compress_inputs(inputs)\n return inputs", "def _get_compressed(self):\n assert self.compression_type != CompressionType.NONE\n tmp_mset = MessageSet(messages=self._messages)\n uncompressed = bytearray(len(tmp_mset))\n tmp_mset.pack_into(uncompressed, 0)\n if self.compression_type == CompressionType.GZIP:\n compressed = compression.encode_gzip(buffer(uncompressed))\n elif self.compression_type == CompressionType.SNAPPY:\n compressed = compression.encode_snappy(buffer(uncompressed))\n else:\n raise TypeError(\"Unknown compression: %s\" % self.compression_type)\n return Message(compressed, compression_type=self.compression_type)", "def compress_weights(model: torch.nn.Module, use_fake_quantize: bool = False) -> torch.nn.Module:\n compressed_model, _ = replace_modules_by_nncf_modules(model)\n insert_pre_compression_operations(model, use_fake_quantize)\n\n return compressed_model", "def _compress_content(self, content):\n zbuf = io.BytesIO()\n zfile = gzip.GzipFile(mode=\"wb\", compresslevel=9, fileobj=zbuf)\n\n try:\n zfile.write(content.read())\n finally:\n zfile.close()\n\n content.file = zbuf\n content.seek(0)\n\n return content", "def get_composite_image(\n self,\n labels=None,\n compress_dim=300,\n num_channels=3,\n num_of_images=\"all\",\n sample=False,\n reverse=False,\n ):\n compressed_img_dict = {}\n img_data = self.image_data.rgb_dict\n if not labels:\n labels = img_data.keys()\n for label in labels:\n self.log.info(label + \" is being compressed.\")\n total_images = len(img_data[label])\n if num_of_images == \"all\":\n vectors = img_data[label]\n elif type(num_of_images) == int:\n vectors = img_data[label]\n if sample:\n vectors = random.sample(vectors, num_of_images)\n if reverse:\n vectors.reverse()\n vectors = vectors[0:num_of_images]\n\n compressed_img_dict[label] = np.zeros(\n (compress_dim, compress_dim, num_channels)\n )\n compressed_img_dict[label] = np.sum(vectors, axis=0) / (1.0 * len(vectors))\n\n self.compressed_img_dict = compressed_img_dict\n return compressed_img_dict", "def compressBuffer(self, buffer):\r\n # http://jython.xhaus.com/http-compression-in-python-and-jython/\r\n zbuf = cStringIO.StringIO()\r\n zfile = gzip.GzipFile(mode='wb', fileobj=zbuf, compresslevel=9)\r\n zfile.write(buffer)\r\n zfile.close()\r\n return zbuf.getvalue()", "def compressed_image(self):\n return self._compressed_image", "def make_compression_matrix(self, train_path, rng):\n self.compress_dim = 100 # value used in original DKT paper\n if rng:\n compress_matrix = rng.randn(self.encoding_dim, self.compress_dim)\n else:\n compress_matrix = np.random.randn(self.encoding_dim, self.compress_dim)\n\n np.savez(train_path + '-compression-matrix', compress_matrix=compress_matrix)\n return compress_matrix", "def compress_zlib(self, string):\n #encode the input sting\n self.string = string.encode()\n return zlib.compress(self.string)", "def compress(self, src, dst):\n info = readelf_get_info(src)\n starting_size = os.path.getsize(src)\n if starting_size != info[\"size\"]:\n raise RuntimeError(\"size of file '%s' differs from header claim: %i != %i\" %\n (src, starting_size, info[\"size\"]))\n rfd = open(src, \"rb\")\n wfd = open(dst, \"wb\")\n data = rfd.read(starting_size)\n wfd.write(data[info[\"entry\"]:])\n rfd.close()\n wfd.close()\n self.__uncompressed_size = len(data) - info[\"entry\"]\n if is_verbose():\n print(\"Wrote compressable program block '%s': %i bytes\" % (dst, self.__uncompressed_size))\n self.__contexts = []\n self.__weights = []\n (so, se) = run_command([self.__command, dst])\n lines = so.split(\"\\n\")\n for ii in lines:\n terms = ii.split()\n if terms and terms[0].startswith(\"Final\"):\n compressed_size = int(terms[1])\n for jj in terms[2:]:\n individual_term = jj.split(\"*\")\n self.__weights += [int(individual_term[0], 10)]\n self.__contexts += [int(individual_term[1], 16)]\n if is_verbose():\n print(\"Program block compressed into '%s': %i bytes\" % (dst + \".pack\", compressed_size))\n print(\"Compression weights: %s\" % (str(self.__weights)))\n print(\"Compression contexts: %s\" % (str(self.__contexts)))\n rfd = open(dst + \".pack\", \"rb\")\n compressed_contexts = []\n compressed_weights = []\n uncompressed_size = rfd.read(4)\n uncompressed_size = (struct.unpack(\"I\", uncompressed_size))[0]\n if uncompressed_size != self.__uncompressed_size:\n raise RuntimeError(\"size given to packer does not match size information in file: %i != %i\" %\n (self.__uncompressed_size, uncompressed_size))\n context_count = rfd.read(1)\n context_count = (struct.unpack(\"B\", context_count))[0]\n for ii in range(context_count):\n compressed_weights += struct.unpack(\"B\", rfd.read(1))\n for ii in range(context_count):\n compressed_contexts += struct.unpack(\"B\", rfd.read(1))\n if compressed_contexts != self.__contexts:\n raise RuntimeError(\"contexts reported by packer do not match context information in file: %s != %s\" %\n (str(self.__contexts), str(compressed_contexts)))\n if compressed_weights != self.__weights:\n raise RuntimeError(\"weights reported by packer do not match weight information in file: %s != %s\" %\n (str(self.__weights), str(compressed_weights)))\n read_data = rfd.read()\n rfd.close()\n if len(read_data) != compressed_size:\n raise RuntimeError(\"size reported by packer does not match length of file: %i != %i\" %\n (compressed_size, len(read_data)))\n self.__data = []\n for ii in read_data:\n self.__data += struct.unpack(\"B\", ii)", "def compress_file(compression, pretty, src, dst):\n str_tail = \"sed 1d\"\n str_cleanup = \";exit\"\n if pretty:\n str_tail = \"tail -n+2\"\n str_cleanup = \";rm ~;exit\"\n if \"lzma\" == compression:\n command = [\"xz\", \"--format=lzma\", \"--lzma1=preset=9e,lc=1,lp=0,pb=0\", \"--stdout\"]\n header = \"HOME=/tmp/i;%s $0|lzcat>~;chmod +x ~;~%s\" % (str_tail, str_cleanup)\n elif \"raw\" == compression:\n command = [\"xz\", \"-9\", \"--extreme\", \"--format=raw\", \"--stdout\"]\n header = \"HOME=/tmp/i;%s $0|xzcat -F raw>~;chmod +x ~;~%s\" % (str_tail, str_cleanup)\n elif \"xz\" == compression:\n command = [\"xz\", \"--format=xz\", \"--lzma2=preset=9e,lc=1,pb=0\", \"--stdout\"]\n header = \"HOME=/tmp/i;%s $0|xzcat>~;chmod +x ~;~%s\" % (str_tail, str_cleanup)\n else:\n raise RuntimeError(\"unknown compression format '%s'\" % compression)\n (compressed, se) = run_command(command + [src], False)\n wfd = open(dst, \"wb\")\n wfd.write((header + \"\\n\").encode())\n wfd.write(compressed)\n wfd.close()\n make_executable(dst)\n print(\"Wrote '%s': %i bytes\" % (dst, os.path.getsize(dst)))", "def compress_to_tgz(in_path, tgz_fp):\n t = tarfile.open(name = tgz_fp, mode = 'w:gz')\n t.add(in_path, path.basename(in_path))\n t.close()", "def compressible(f):\n @wraps(f)\n def compressor(*args, **kwargs):\n @flask.after_this_request\n def compress(response):\n if (response.status_code < 200 or\n response.status_code >= 300 or\n 'Content-Encoding' in response.headers):\n # Don't encode anything other than a 2xx response\n # code. Don't encode a response that's\n # already been encoded.\n return response\n\n accept_encoding = flask.request.headers.get('Accept-Encoding', '')\n if not 'gzip' in accept_encoding.lower():\n return response\n\n # At this point we know we're going to be changing the\n # outgoing response.\n\n # TODO: I understand what direct_passthrough does, but am\n # not sure what it has to do with this, and commenting it\n # out doesn't change the results or cause tests to\n # fail. This is pure copy-and-paste magic.\n response.direct_passthrough = False\n\n buffer = BytesIO()\n gzipped = gzip.GzipFile(mode='wb', fileobj=buffer)\n gzipped.write(response.data)\n gzipped.close()\n response.data = buffer.getvalue()\n\n response.headers['Content-Encoding'] = 'gzip'\n response.vary.add('Accept-Encoding')\n response.headers['Content-Length'] = len(response.data)\n\n return response\n\n return f(*args, **kwargs)\n return compressor", "def compress(path, path_out, terms, iterations, annotate, silent):\n if terms is None:\n terms = DEFAULT_TERMS\n\n if not silent:\n print(f\"Compressing image...\")\n\n result = compress_image_to_file(path=path, terms=terms,\n iterations=iterations,\n path_out=path_out,\n annotate=annotate)\n\n output_path = result['output_path']\n\n if not silent:\n print(f\"Compressed to:\\n{output_path}\")\n print(f\"Terms in singular value expansion: {terms}\")\n print(f\"Power method iterations: {result['iterations']}\")\n print(f\"Compression ratio: {result['compression_ratio']}\")\n\n return result", "def non_local_block(tensor, intermediate_dim=None, compression=2,\r\n mode='embedded', add_residual=True):\r\n ip_shape = tensor.shape\r\n\r\n if mode not in ['gaussian', 'embedded', 'dot', 'concatenate']:\r\n raise ValueError('`mode` must be one of `gaussian`, `embedded`, `dot` or `concatenate`')\r\n\r\n if compression is None:\r\n compression = 1\r\n\r\n dim1, dim2, dim3 = None, None, None\r\n\r\n if len(ip_shape) == 4: # spatial / image data\r\n batchsize, dim1, dim2, channels = ip_shape\r\n else:\r\n raise ValueError('Input dimension has to be either 3 (temporal), 4 (spatial) or 5 (spatio-temporal)')\r\n\r\n # verify correct intermediate dimension specified\r\n if intermediate_dim is None:\r\n intermediate_dim = channels // 2\r\n\r\n if intermediate_dim < 1:\r\n intermediate_dim = 1\r\n\r\n else:\r\n intermediate_dim = int(intermediate_dim)\r\n\r\n if intermediate_dim < 1:\r\n raise ValueError('`intermediate_dim` must be either `None` or positive integer greater than 1.')\r\n theta = Conv2D(filters=intermediate_dim,kernel_size=(1,1),strides=(1,1),padding='same',use_bias=False,kernel_initializer='he_normal')(tensor)\r\n theta = K.reshape(theta,[-1, intermediate_dim])\r\n\r\n # phi path\r\n phi = Conv2D(filters=intermediate_dim,kernel_size=(1,1),strides=(1,1),padding='same',use_bias=False,kernel_initializer='he_normal')(tensor)\r\n phi = K.reshape(phi,[-1, intermediate_dim])\r\n\r\n if compression > 1:\r\n # shielded computation\r\n phi = MaxPooling1D(compression)(phi)\r\n\r\n f = K.dot(theta, phi)\r\n f = Activation('softmax')(f)\r\n\r\n # g path\r\n g = Conv2D(filters=intermediate_dim, kernel_size=(1, 1), strides=(\r\n 1, 1), padding='same', use_bias=False, kernel_initializer='he_normal')(tensor)\r\n g = K.reshape(g, [-1, intermediate_dim])\r\n\r\n if compression > 1 and mode == 'embedded':\r\n # shielded computation\r\n g = MaxPooling1D(compression)(g)\r\n\r\n # compute output path\r\n y = K.dot(f, g)\r\n\r\n # reshape to input tensor format\r\n y = K.reshape(y,[intermediate_dim, dim1, dim2])\r\n\r\n # project filters\r\n y = Conv2D(filters=intermediate_dim,kernel_size=(1,1),strides=(1,1),padding='same',use_bias=False,kernel_initializer='he_normal')(y)\r\n\r\n # residual connection\r\n if add_residual:\r\n y = add([tensor, y])\r\n\r\n return y", "def compress(self, *args):\n return _osgAnimation.Vec3ArrayPacked_compress(self, *args)", "def __handle_decompression(self, x):\n if self.__compress:\n return zlib.decompress(x)\n return x", "def compress_encode(value):\n return base64.b64encode(zlib.compress(value.encode(\"ascii\"))).decode(\"ascii\")", "def gzip_compress(data):\n s = BytesIO()\n g = gzip.GzipFile(fileobj=s, mode='wb')\n g.write(data)\n g.close()\n return s.getvalue()", "def image_compress(img):\n img_mode = img.mode\n img_size = img.size\n img = img.tobytes()\n zlib.compress(img)\n\n return img_mode, img_size, img", "def get_compress_and_decompress_func(compression_algorithm, compression_level=9):\n # type: (str, int) -> Tuple[Callable, Callable]\n if compression_algorithm in [\"deflate\", \"zlib\"]:\n import zlib\n\n if sys.version_info < (3, 6, 0):\n # Work around for Python <= 3.6 where compress is not a keyword argument, but a regular\n # argument\n @functools.wraps(zlib.compress)\n def compress_func(data):\n return zlib.compress(data, compression_level)\n\n else:\n compress_func = functools.partial(zlib.compress, level=compression_level) # type: ignore\n decompress_func = zlib.decompress # type: ignore\n elif compression_algorithm == \"bz2\":\n import bz2\n\n @functools.wraps(bz2.compress)\n def compress_func(data):\n return bz2.compress(data, compression_level)\n\n decompress_func = bz2.decompress # type: ignore\n elif compression_algorithm == \"zstandard\":\n import zstandard\n\n compressor = zstandard.ZstdCompressor(level=compression_level)\n decompressor = zstandard.ZstdDecompressor()\n compress_func = compressor.compress # type: ignore\n decompress_func = decompressor.decompress # type: ignore\n elif compression_algorithm == \"lz4\":\n import lz4.frame as lz4 # pylint: disable=no-name-in-module\n\n # NOTE: Java implementation which we currently use on the server side doesn't support\n # dependent block stream.\n # See https://github.com/Parsely/pykafka/issues/914 for details\n def compress_func(data):\n try:\n # For lz4 >= 0.12.0\n return lz4.compress(data, compression_level, block_linked=False)\n except TypeError:\n # For older versions\n # For earlier versions of lz4\n return lz4.compress(data, compression_level, block_mode=1)\n\n decompress_func = lz4.decompress # type: ignore\n elif compression_algorithm == \"snappy\":\n import snappy # pylint: disable=import-error\n\n compress_func = snappy.compress # type: ignore\n decompress_func = snappy.decompress # type: ignore\n elif compression_algorithm == \"brotli\":\n import brotli # pylint: disable=import-error\n\n compress_func = functools.partial(brotli.compress, quality=compression_level) # type: ignore\n decompress_func = brotli.decompress # type: ignore\n elif compression_algorithm == \"none\":\n compress_func = noop_compress\n decompress_func = noop_decompress # type: ignore\n else:\n raise ValueError(\"Unsupported algorithm: %s\" % (compression_algorithm))\n\n return compress_func, decompress_func", "def compress():\n local('python manage.py compress \\\n --settings={{ project_name }}.settings.production')", "def include_compression(nets, compression='none', linear_max=796.87416837456942, input_node_name='cochleagram_no_compression', output_node_name='cochleagram', linear_params=None, rate_level_kwargs={}, custom_compression_op=None):\n # compression of the cochleagram\n if compression=='quarter':\n nets[output_node_name] = tf.sqrt(tf.sqrt(nets[input_node_name], name=output_node_name))\n elif compression=='quarter_plus':\n nets[output_node_name] = tf.sqrt(tf.sqrt(nets[input_node_name]+1e-01, name=output_node_name))\n elif compression=='point3':\n nets[output_node_name] = tf.pow(nets[input_node_name],0.3, name=output_node_name)\n elif compression=='stable_point3':\n nets[output_node_name] = tf.identity(stable_power_compression(nets[input_node_name]*linear_max),name=output_node_name) \n elif compression=='stable_point3_norm_grads':\n nets[output_node_name] = tf.identity(stable_power_compression_norm_grad(nets[input_node_name]*linear_max),name=output_node_name) \n elif compression=='linearbelow1':\n nets[output_node_name] = tf.where((nets[input_node_name]*linear_max)<1, nets[input_node_name]*linear_max, tf.pow(nets[input_node_name]*linear_max,0.3), name=output_node_name)\n elif compression=='stable_linearbelow1':\n nets['stable_power_compressed_%s'%output_node_name] = tf.identity(stable_power_compression(nets[input_node_name]*linear_max),name='stable_power_compressed_%s'%output_node_name)\n nets[output_node_name] = tf.where((nets[input_node_name]*linear_max)<1, nets[input_node_name]*linear_max, nets['stable_power_compressed_%s'%output_node_name], name=output_node_name)\n elif compression=='linearbelow1sqrt':\n nets[output_node_name] = tf.where((nets[input_node_name]*linear_max)<1, nets[input_node_name]*linear_max, tf.sqrt(nets[input_node_name]*linear_max), name=output_node_name)\n elif compression=='quarter_clipped':\n nets[output_node_name] = tf.sqrt(tf.sqrt(tf.maximum(nets[input_node_name],1e-01), name=output_node_name))\n elif compression=='none':\n nets[output_node_name] = nets[input_node_name]\n elif compression=='sqrt':\n nets[output_node_name] = tf.sqrt(nets[input_node_name], name=output_node_name)\n elif compression=='dB': # NOTE: this compression does not work well for the backwards pass, results in nans\n nets[output_node_name + '_noclipped'] = 20 * tflog10(nets[input_node_name])/tf.reduce_max(nets[input_node_name])\n nets[output_node_name] = tf.maximum(nets[output_node_name + '_noclipped'], -60)\n elif compression=='dB_plus': # NOTE: this compression does not work well for the backwards pass, results in nans\n nets[output_node_name + '_noclipped'] = 20 * tflog10(nets[input_node_name]+1)/tf.reduce_max(nets[input_node_name]+1)\n nets[output_node_name] = tf.maximum(nets[output_node_name + '_noclipped'], -60, name=output_node_name)\n elif compression=='linear':\n assert (type(linear_params)==list) and len(linear_params)==2, \"Specifying linear compression but not specifying the compression parameters in linear_params=[m, b]\"\n nets[output_node_name] = linear_params[0]*nets[input_node_name] + linear_params[1]\n elif compression=='rate_level':\n nets[output_node_name] = AN_rate_level_function(nets[input_node_name], name=output_node_name, **rate_level_kwargs)\n elif compression=='custom':\n nets[output_node_name] = custom_compression_op(nets[input_node_name], name=output_node_name)\n\n return nets", "def compress(block):\n\n # Transform RGB to YCbCr\n yc_bl = np.zeros((8, 8, 3), dtype=np.int8)\n \n for i in range(8):\n for j in range(8):\n rgb_cmp = np.asmatrix(block[i][j])\n y,cb,cr = (np.array((rgb_cmp*yc_mat+yc_pad).astype(np.uint8))[0]-128).astype(np.int8)\n yc_bl[i][j] = np.array([y, cb, cr])\n \n # Switch YCbCr block to 3 block for each Y, Cb, Cr component and calculate DCT for them\n y_dct = sf.dct(yc_bl[:,:,0], norm='ortho')\n cb_dct = sf.dct(yc_bl[:,:,1], norm='ortho')\n cr_dct = sf.dct(yc_bl[:,:,2], norm='ortho')\n \n # From DCT data to quantization data\n y_quant = np.round(y_dct / quant_tbl).astype(np.int8)\n cb_quant = np.round(cb_dct / quant_tbl).astype(np.int8)\n cr_quant = np.round(cr_dct / quant_tbl)).astype(np.int8)\n \n # Convert 8x8 block to zigzag 1x64 block\n y_zz = zig_zag(y_quant)\n cb_zz = zig_zag(cb_quant)\n cr_zz = zig_zag(cr_quant)\n \n # Calc DC and AC, put together to list\n y_cmp, cb_cmp, cr_cmp = dc_and_ac_calc(y_zz, cb_zz, cr_zz)\n \n # Encode using entropy coding\n y_encode = encode(y_cmp)\n cb_encode = encode(cb_cmp)\n cr_encode = encode(cr_cmp)\n \n return [y_encode, cb_encode, cr_encode]", "def optimize(data):\n try:\n optimized_data = tinify.from_buffer(data).to_buffer()\n return optimized_data\n except tinify.AccountError as e:\n # This exception may rise, since a Free account is being used (only 500 requests/month)\n logger.error(\"There is a problem with the TinyPNG Account: {0}\".format(e))\n except tinify.ServerError as e:\n logger.error(\"There seem to be problems in the compression server: {0}\".format(e))\n except Exception as e:\n logger.error(\"The image could not be compressed: {0}\".format(e))\n finally:\n return data", "def Compress(input_filename, output_filename):\n _Write(zlib.compress(_Read(input_filename)), output_filename)", "def compress_wrapper(args: Namespace) -> None:\n directory_path = os.path.join(DATASETS_DIR, args.directory)\n compress_datasets(directory_path, args.holdout)", "def tar_gz_compress(self, destination):\n\n if destination is not None and isinstance(destination, str):\n with tarfile_open(destination, \"w:gz\") as tar:\n tar.add(self.file)", "def _compress_string(content):\n zbuf = StringIO()\n zfile = GzipFile(mode='wb', compresslevel=6, fileobj=zbuf)\n zfile.write(content)\n zfile.close()\n return zbuf.getvalue()", "def compress_file(path):\n\n process = Popen([\"xz\", \"--compress\", \"--force\", \"--stdout\", path], stdout=PIPE)\n return process.communicate()[0]", "def compress_file(path):\n\n process = Popen([\"xz\", \"--compress\", \"--force\", \"--stdout\", path], stdout=PIPE)\n return process.communicate()[0]", "def _compress_snapshots(self):\n\n\t\tC_shape = (self._snapshots.shape[1], self._snapshots.shape[0])\n\n\t\tif self.compression_matrix is 'uniform':\n\t\t\tC = np.random.uniform(0, 1, size=(C_shape))\n\t\telif self.compression_matrix is 'sparse':\n\t\t\tC = scipy.sparse.random(*C_shape, density=1.)\n\t\telif self.compression_matrix is 'normal':\n\t\t\tC = np.random.normal(0, 1, size=(C_shape))\n\t\telif self.compression_matrix is 'sample':\n\t\t\tC = np.zeros(C_shape)\n\t\t\tC[np.arange(self._snapshots.shape[1]),\n\t\t\t np.random.choice(*self._snapshots.shape, replace=False)] = 1.\n\t\telse:\n\t\t\tC = self.compression_matrix\n\n\t\t# compress the matrix\n\t\tY = C.dot(self._snapshots)\n\n\t\treturn Y", "def oh_compress(key, seed, blocks, secondary):\n for block, block_size in blocks:\n size_tag = block_size % (CHUNK_SIZE * BLOCK_SIZE)\n tag = (seed ^ size_tag) * W\n yield oh_compress_one_block(key, block, tag, secondary)", "def compress(value):\n\t# type: (Any, ) -> Any\n\n\t# sets are not processed because they cannot contain lists or bytearrays anyway.\n\n\tif isinstance(value, (tuple, list)): # tuple *can* contain mutables\n\t\treturn tuple(compress(x) for x in value)\n\telif isinstance(value, bytearray):\n\t\treturn bytes(value) # bytearray can only be bytes or List[int] right?\n\telif isinstance(value, dict):\n\t\treturn {k: compress(v) for k, v in value.items()}\n\telse:\n\t\treturn value", "def convt_block(layer, concat, fsize, name):\n with tf.variable_scope(name):\n\n layer = tf.layers.conv2d_transpose(layer, filters=fsize, kernel_size=2, strides=2, \n kernel_regularizer=l2_reg(1e-1), name='convt')\n layer = tf.concat([layer, concat], axis=-1, name='concat')\n\n return layer", "def compress(content, threshold=512):\n compression_enabled = CONF.logging.http_request_compression\n\n if is_dict(content):\n for key in content:\n content[key] = compress(content[key])\n if is_string(content) and compression_enabled:\n if len(content) > threshold:\n less_data = content[:50]\n compressed_data = base64.b64encode(\n zlib.compress(bytes(content.encode(\"utf-8\"))))\n if not six.PY2:\n compressed_data = str(compressed_data.decode(\"utf-8\"))\n return pprint.pformat(\n \"\\n***Content compressed by Syntribos.***\"\n \"\\nFirst fifty characters of content:\\n\"\n \"***{data}***\"\n \"\\nBase64 encoded compressed content:\\n\"\n \"{compressed}\"\n \"\\n***End of compressed content.***\\n\".format(\n data=less_data, compressed=compressed_data))\n return content", "def compress(self, x,f1,f2,f3,f4,f5,f6,f7,f8,outputfile,path,row):\n mse, bpp, x_hat, pack = self._run(\"compress\", x=x,feature1=f1,feature2=f2,feature3=f3,feature4=f4,\n feature5=f5,feature6=f6,feature7=f7,feature8=f8)\n\n # Write a binary file with the shape information and the compressed string.\n packed = tfc.PackedTensors()\n tensors, arrays = zip(*pack)\n packed.pack(tensors, arrays)\n with open(outputfile, \"wb\") as f:\n f.write(packed.string)\n\n x *= 255 # x_hat is already in the [0..255] range\n psnr = tf.squeeze(tf.image.psnr(x_hat, x, 255))\n msssim = tf.squeeze(tf.image.ssim_multiscale(x_hat, x, 255))\n\n # The actual bits per pixel including overhead.\n x_shape = tf.shape(x)\n num_pixels = tf.cast(tf.reduce_prod(x_shape[:-1]), dtype=tf.float32)\n packed_bpp = len(packed.string) * 8 / num_pixels\n \n for col in range(np.shape(x_hat)[1]):\n img = x_hat[0,col,:,:,:]/255 \n save_img(path,0,img,row,col+1)\n return x_hat, psnr, msssim, packed_bpp", "def compression_source(self) -> CompressionSource:\n return self._compression_source", "def compress(uncompressed):\r\n \r\n # Build the dictionary.\r\n dict_size = 256\r\n dictionary = dict((chr(i), i) for i in range(dict_size))\r\n # in Python 3: dictionary = {chr(i): i for i in range(dict_size)}\r\n \r\n w = \"\"\r\n result = []\r\n for c in uncompressed:\r\n wc = w + c\r\n if wc in dictionary:\r\n w = wc\r\n else:\r\n result.append(dictionary[w])\r\n # Add wc to the dictionary.\r\n dictionary[wc] = dict_size\r\n dict_size += 1\r\n w = c\r\n \r\n # Output the code for w.\r\n if w:\r\n result.append(dictionary[w])\r\n return result", "def decompress(compressed):\r\n \r\n \r\n # Build the dictionary.\r\n dict_size = 256\r\n dictionary = dict((i, chr(i)) for i in range(dict_size))\r\n # in Python 3: dictionary = {i: chr(i) for i in range(dict_size)}\r\n \r\n # use StringIO, otherwise this becomes O(N^2)\r\n # due to string concatenation in a loop\r\n result = StringIO()\r\n w = chr(compressed.pop(0))\r\n result.write(w)\r\n for k in compressed:\r\n if k in dictionary:\r\n entry = dictionary[k]\r\n elif k == dict_size:\r\n entry = w + w[0]\r\n else:\r\n raise ValueError('Bad compressed k: %s' % k)\r\n result.write(entry)\r\n \r\n # Add w+entry[0] to the dictionary.\r\n dictionary[dict_size] = w + entry[0]\r\n dict_size += 1\r\n \r\n w = entry\r\n return result.getvalue()", "def pack_tensor(tensor, bit):\r\n tmp_tensor_shape = list(tensor.shape)\r\n tmp_tensor_shape.append(1)\r\n tmp_tensor = np.zeros(tuple(tmp_tensor_shape), dtype=np.uint8)\r\n tmp_tensor[..., 0] = tensor\r\n\r\n binary_tensor = np.unpackbits(tmp_tensor, axis=-1)\r\n binary_tensor_shape = tmp_tensor_shape\r\n binary_tensor_shape[-1] = 8\r\n\r\n i = find_optimal_compress_dim(tensor.shape, bit)\r\n packed_dim = int(tensor.shape[i] * bit / 8) + (tensor.shape[i] * bit % 8 > 0)\r\n binary_packed_tensor_shape = binary_tensor_shape\r\n binary_packed_tensor_shape[i] = packed_dim\r\n\r\n packed_tensor = np.zeros(tuple(binary_packed_tensor_shape), dtype=np.uint8)\r\n padded_binary_tensor = binary_tensor[..., 8-bit: 8].reshape((-1))\r\n padding_width = packed_tensor.size - padded_binary_tensor.size\r\n padded_binary_tensor = np.pad(padded_binary_tensor, (0, padding_width), 'constant', constant_values=(0, 0))\r\n binary_packed_tensor = padded_binary_tensor.reshape(tuple(binary_packed_tensor_shape))\r\n\r\n packed_tensor = np.packbits(binary_packed_tensor, axis=-1)\r\n packed_tensor = packed_tensor[..., 0]\r\n\r\n return packed_tensor", "def swish_(t: Tensor) -> Tensor:\n ctx = get_current_context()\n g = ctx.graph\n pb_g = g._pb_graph\n\n check_in_graph(g, t=t)\n\n settings = ctx._get_op_settings(\"swish_inplace\")\n op = pb_g.createConnectedOp_SwishInplaceOp(\n {0: t.id}, \n {\n 0: g._create_tensor_id(\"swish_inplace_out\")\n }, \n settings\n )\n\n return Tensor._from_pb_tensor(op.outTensor(0))", "def gzinflate(val):\n return zlib.decompress(val)", "def compress(dbconfig, target_name):\n fmt = dbconfig.get(\"format\", None)\n if fmt in [\"tarball\", \".tar.gz\", \"tar.gz\"]:\n info(\"zipping and compressing \" + target_name)\n output_name = target_name + \".tar.gz\"\n cmd = [\"tar\", \"zcvf\", output_name, target_name]\n subprocess.call(cmd)\n info(\"removing \" + target_name)\n cmd = [\"rm\", \"-r\", target_name]\n subprocess.call(cmd)\n elif fmt in [\".gz\", \"gz\", \"compress\", \"compressed\", \"gzip\", \"gzipped\"]:\n info(\"compressing \" + target_name)\n cmd = [\"gzip\", \"-r\", \"-q\", target_name]\n output_name = target_name + \".gz\"\n subprocess.call(cmd)\n else:\n error(\"invalid \\\"compress\\\" setting, should be tarball or compress, \" + target_name)\n output_name = \"\"\n return output_name", "def compress(self, diameter=None, length=None, area=None, x=None, y=None, z=None, origin=0):\r\n self._origin = origin\r\n n = len(self.x)\r\n # Update values of vectors\r\n diameter[:n] = self.diameter\r\n length[:n] = self.length\r\n area[:n] = self.area\r\n x[:n] = self.x\r\n y[:n] = self.y\r\n z[:n] = self.z\r\n # Attributes are now views on these vectors\r\n self.diameter = diameter[:n]\r\n self.length = length[:n]\r\n self.area = area[:n]\r\n self.x = x[:n]\r\n self.y = y[:n]\r\n self.z = z[:n]\r\n for kid in self.children:\r\n kid.compress(diameter=diameter[n:], length=length[n:], area=area[n:], x=x[n:], y=y[n:], z=z[n:], origin=n)\r\n n += len(kid)\r\n self.iscompressed = True", "def compression(s):", "def compression_origin(self) -> CompressionOrigin:\n return self._compression_origin", "def _gzipencode(content):\n import gzip\n out = BytesIO()\n f = gzip.GzipFile(fileobj=out, mode='w', compresslevel=5)\n f.write(content)\n f.close()\n return out.getvalue()", "def compress(self, P):\n\t\traise Exception(NotImplemented)", "def compression(self) -> str:\n ...", "def compress_and_encrypt(files, password=None, pgp_key=''):\n if pgp_key:\n zipfile = _get_compressed_file(files)\n return _get_encrypted_file(zipfile, pgp_key)\n else:\n return _get_compressed_file(files, password)", "def vcf_compress(fn):\n ret = cmd_exe(f\"vcf-sort {fn} | bgzip > {fn}.gz && tabix {fn}.gz\")", "def compress(in_file, out_file):\n with open(in_file, \"rb\") as f1:\n text = f1.read()\n freq = make_freq_dict(text)\n tree = huffman_tree(freq)\n codes = get_codes(tree)\n number_nodes(tree)\n print(\"Bits per symbol:\", avg_length(tree, freq))\n result = (num_nodes_to_bytes(tree) + tree_to_bytes(tree) +\n size_to_bytes(len(text)))\n result += generate_compressed(text, codes)\n with open(out_file, \"wb\") as f2:\n f2.write(result)", "def context(tensor):\n raise NotImplementedError", "def compress_inputs(self, inputs):\n num_students = inputs.shape[0]\n inputs = inputs.toarray()\n inputs = np.dot(inputs.reshape(-1, self.encoding_dim), self.compress_matrix)\n self.encoding_dim = self.compress_dim\n\n return sp.csr_matrix(inputs.reshape(num_students, -1))", "def compress(tlv):\n if not type(tlv) == CCNxTlv:\n raise TypeError(\"tlv must be CCNxTlv\")\n\n encoded = None\n if tlv.length < _length_3_4:\n vle = CCNxCompressorVariableLength.__find_pattern(tlv, _pattern_3_4)\n if vle is not None:\n encoded = CCNxCompressorVariableLength.__compress_pattern_3_4(tlv, vle)\n\n if encoded is None and tlv.length < _length_4_9:\n vle = CCNxCompressorVariableLength.__find_pattern(tlv, _pattern_4_9)\n if vle is not None:\n encoded = CCNxCompressorVariableLength.__compress_pattern_4_9(tlv, vle)\n\n return encoded", "def _compress_file(filename: str, basename: str):\n write_mode = _get_write_mode(filename)\n\n with tempfile.TemporaryDirectory() as tmpdir:\n shutil.move(filename, os.path.join(tmpdir, basename))\n with tarfile.open(filename, write_mode) as tarball:\n tarball.add(tmpdir, arcname='')", "def convert_to_jpg_then_compress(self):\n\t\tself._compressed_file_name = 'c_' + self.file_name\n\t\tself._compressed_save_path = self.full_path.replace(self.file_name, self._compressed_file_name).replace('.png', '.jpg')\n\n\t\timage = Image.open(self.full_path)\n\t\timage.save(self._compressed_save_path)\n\n\t\timage = Image.open(self._compressed_save_path)\n\t\timage.save(self._compressed_save_path, quality=85, progressive=False)\n\n\t\tself._compressed_file_size = ufo.get_file_size_in_bytes(self._compressed_save_path)\n\n\t\ttransfer_path = self._compressed_save_path.replace('c_' + self.file_name, self.file_name).replace('/configuration_files/', '/quasar_site_django/')\n\t\tufo.copy_file_to_path(self._compressed_save_path, transfer_path)", "def encode(self, compressed, hash160=False):\n # calculate the bytes\n if compressed:\n prefix = b'\\x02' if self.y % 2 == 0 else b'\\x03'\n pkb = prefix + self.x.to_bytes(32, 'big')\n else:\n pkb = b'\\x04' + self.x.to_bytes(32, 'big') + self.y.to_bytes(32, 'big')\n # hash if desired\n return ripemd160(sha256(pkb)) if hash160 else pkb", "def _compress_mask(self, mask: Tensor) -> Tensor:\n if self.dim is None or len(mask.size()) == 1:\n mask = mask.clone()\n else:\n mask_dim = list(range(len(mask.size())))\n for dim in self.dim:\n mask_dim.remove(dim)\n mask = torch.sum(mask, dim=mask_dim)\n\n if self.block_sparse_size is not None:\n # operation like pooling\n lower_case_letters = 'abcdefghijklmnopqrstuvwxyz'\n ein_expression = ''\n for i, step in enumerate(self.block_sparse_size):\n mask = mask.unfold(i, step, step)\n ein_expression += lower_case_letters[i]\n ein_expression = '...{},{}'.format(ein_expression, ein_expression)\n mask = torch.einsum(ein_expression, mask, torch.ones(self.block_sparse_size).to(mask.device))\n\n return (mask != 0).type_as(mask)", "def compress_image(image_path, out_path, key):\n tinify.key = key\n try:\n source = tinify.from_file(image_path)\n source.to_file(out_path)\n return out_path\n except:\n traceback.print_exc()\n return False", "def get_datapoints_compressed(self, rid, t0, t1, nmax = 300):\n dp = self.get_datapoints(rid, t0, t1, nmax)\n return zlib.compress(pickle.dumps(dp))", "def save_compressed(data, filename, compression_type='bz2', create_link=False):\n # write to compressed HDF5 file\n hdf5 = open_compressed(filename, 'w')\n save(data, hdf5)\n close_compressed(filename, hdf5, compression_type, create_link)", "def compress_G1(pt: G1Uncompressed) -> G1Compressed:\n if is_inf(pt):\n # Set c_flag = 1 and b_flag = 1. leave a_flag = x = 0\n return G1Compressed(POW_2_383 + POW_2_382)\n else:\n x, y = normalize(pt)\n # Record y's leftmost bit to the a_flag\n a_flag = (y.n * 2) // q\n # Set c_flag = 1 and b_flag = 0\n return G1Compressed(x.n + a_flag * POW_2_381 + POW_2_383)", "def _gzip_reader_fn():\n return tf.TFRecordReader(\n options=tf.python_io.TFRecordOptions(\n compression_type=tf.python_io.TFRecordCompressionType.GZIP))", "def encode(self, x: Tensor) ->Tensor:\n return self.encoder(x)[0]", "def two_tier_embedding_compression(embeddings, bits, quantizer=None):\n assert bits <= 8\n n = 2**bits\n quantized_embeddings = embeddings.copy()\n index_table = np.zeros(embeddings.shape, dtype=np.uint8)\n cluster_index_table = np.zeros(index_table.shape[0], dtype=np.uint8)\n codebook_table = np.zeros((n, n))\n\n km1 = KMeans(n)\n km1.fit(embeddings)\n tier1 = km1.predict(embeddings)\n\n km_models = [0] * n\n block_sizes = [0] * n\n for block_label in tqdm(range(n)):\n mask = block_label == tier1\n indices = np.arange(embeddings.shape[0])[mask]\n block = embeddings[mask]\n km2 = KMeans(n)\n km2.fit(block.flatten().reshape(-1, 1))\n if quantizer:\n km2.cluster_centers_ = quantizer(km2.cluster_centers_).numpy()\n km2.cluster_centers_.sort(axis=0)\n\n km_models[block_label] = km2\n codebook_table[block_label, :] = km2.cluster_centers_.flatten()\n cluster_index_table[indices] = block_label\n block_sizes[block_label] = block.shape[0]\n for i in indices:\n preds = km2.predict(embeddings[i, :].reshape(-1, 1))\n index_table[indices, :] = preds\n quantized_embeddings[i, :] = km2.cluster_centers_[preds].flatten()\n print('block_sizes:', block_sizes)\n return index_table, cluster_index_table, codebook_table, quantized_embeddings", "def gz_tar(full_prefix):\n tarfile = os.path.join(outputdir, full_prefix + '.tar')\n try:\n with open(tarfile, 'rb') as f_in, gzip.open(tarfile + '.gz', 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)\n os.remove(tarfile)\n except Exception as e:\n log.error(\"Tarfile {0} was not generated. Module(s) run collected no info?\".format(tarfile))\n log.error(e)\n\n return tarfile + '.gz'", "def _compress(protected, unprotected, ciphertext):\n\n if protected:\n raise RuntimeError(\"Protection produced a message that has uncompressable fields.\")\n\n piv = unprotected.pop(COSE_PIV, b\"\")\n if len(piv) > COMPRESSION_BITS_N:\n raise ValueError(\"Can't encode overly long partial IV\")\n\n firstbyte = len(piv)\n if COSE_KID in unprotected:\n firstbyte |= COMPRESSION_BIT_K\n kid_data = unprotected.pop(COSE_KID)\n else:\n kid_data = b\"\"\n\n if COSE_KID_CONTEXT in unprotected:\n firstbyte |= COMPRESSION_BIT_H\n kid_context = unprotected.pop(COSE_KID_CONTEXT)\n s = len(kid_context)\n if s > 255:\n raise ValueError(\"KID Context too long\")\n s_kid_context = bytes((s,)) + kid_context\n else:\n s_kid_context = b\"\"\n\n if COSE_COUNTERSIGNATURE0 in unprotected:\n firstbyte |= COMPRESSION_BIT_G\n\n # In theory at least. In practice, that's an empty value to later\n # be squished in when the compressed option value is available for\n # signing.\n ciphertext += unprotected.pop(COSE_COUNTERSIGNATURE0)\n\n if unprotected:\n raise RuntimeError(\"Protection produced a message that has uncompressable fields.\")\n\n if firstbyte:\n option = bytes([firstbyte]) + piv + s_kid_context + kid_data\n else:\n option = b\"\"\n\n return (option, ciphertext)", "def compress(self, data):\r\n return self.add_chunk(data)", "def uncompress(self, *args):\n return _osgAnimation.Vec3Packed_uncompress(self, *args)", "def compress(self,float32):\n\n F16_EXPONENT_BITS = 0x1F\n F16_EXPONENT_SHIFT = 10\n F16_EXPONENT_BIAS = 15\n F16_MANTISSA_BITS = 0x3ff\n F16_MANTISSA_SHIFT = (23 - F16_EXPONENT_SHIFT)\n F16_MAX_EXPONENT = (F16_EXPONENT_BITS << F16_EXPONENT_SHIFT)\n\n if type(float32) == float:\n f32 = self.unpack(float32)\n else:\n f32 = float32\n f16 = 0\n sign = (f32 >> 16) & 0x8000\n exponent = ((f32 >> 23) & 0xff) - 127\n mantissa = f32 & 0x007fffff\n \n if exponent == 128:\n f16 = sign | F16_MAX_EXPONENT\n if mantissa:\n f16 |= (mantissa & F16_MANTISSA_BITS)\n elif exponent > 15:\n f16 = sign | F16_MAX_EXPONENT\n elif exponent > -15:\n exponent += F16_EXPONENT_BIAS\n mantissa >>= F16_MANTISSA_SHIFT\n f16 = sign | exponent << F16_EXPONENT_SHIFT | mantissa\n else:\n f16 = sign\n return f16", "def compress(self, s):\n data = zlib.compress(s)\n # drop gzip headers and tail\n return data[2:-4]", "def compressString(s):\n import cStringIO, gzip\n\n # Nasty monkeypatch to avoid gzip changing every time\n class FakeTime:\n def time(self):\n return 1111111111.111\n\n gzip.time = FakeTime()\n\n zbuf = cStringIO.StringIO()\n zfile = gzip.GzipFile(mode='wb', compresslevel=9, fileobj=zbuf)\n zfile.write(s)\n zfile.close()\n return zbuf.getvalue()", "def compress(args):\n # This is the sequential version, processing 8 rows one after another.\n x_folders_path = args.input_file\n x_test_images_path = np.asarray(make_mat(x_folders_path))\n # Build model, restore optimized parameters.\n model = CompressionModel(args)\n checkpoint = tf.train.Checkpoint(model=model)\n restore_path = tf.train.latest_checkpoint(args.checkpoint_dir)\n checkpoint.restore(restore_path)\n # Read LF image rows and create feature tensors.\n for row in range(8): \n x_val_images_path_curr = select_views(x_test_images_path, row+1) \n for img in range(8):\n if img == 0:\n images = np.expand_dims(read_image_test(x_val_images_path_curr[img]),axis = 0)\n else:\n temp = np.expand_dims(read_image_test(x_val_images_path_curr[img]),axis = 0)\n images = np.concatenate((images, temp),axis = 0)\n pos_x = ((row+1)/5.0) * np.ones(np.shape(images[0,:,:,:]))\n images = np.expand_dims(images, axis = 0)\n pos_x = np.expand_dims(pos_x, axis = 0) \n pos_y = np.ones(np.shape(images[:,0,:,:,:]))\n batch_feature1 = np.stack((images[:,0,:,:,:],images[:,4,:,:,:],pos_x,(1.0/5.0)*pos_y), axis=1)\n batch_feature2 = np.stack((images[:,1,:,:,:],images[:,4,:,:,:],pos_x,(2.0/5.0)*pos_y), axis=1)\n batch_feature3 = np.stack((images[:,2,:,:,:],images[:,4,:,:,:],pos_x,(3.0/5.0)*pos_y), axis=1)\n batch_feature4 = np.stack((images[:,3,:,:,:],images[:,4,:,:,:],pos_x,(4.0/5.0)*pos_y), axis=1)\n batch_feature5 = np.stack((images[:,4,:,:,:],images[:,4,:,:,:],pos_x,(5.0/5.0)*pos_y), axis=1)\n batch_feature6 = np.stack((images[:,5,:,:,:],images[:,4,:,:,:],pos_x,(6.0/5.0)*pos_y), axis=1)\n batch_feature7 = np.stack((images[:,6,:,:,:],images[:,4,:,:,:],pos_x,(7.0/5.0)*pos_y), axis=1)\n batch_feature8 = np.stack((images[:,7,:,:,:],images[:,4,:,:,:],pos_x,(8.0/5.0)*pos_y), axis=1)\n\n if not os.path.exists(args.output_file):\n os.mkdir(args.output_file)\n outputfile=args.output_file+'/'+str(row+1)+'.tfci'\n \n # Write the input images as png files.\n for col in range(np.shape(images)[1]):\n inpimg = images[:,col,:,:,:]\n save_img(args.output_file,1,inpimg,row+1,col+1)\n\n # Compress the LF image rows.\n curr_decoded, psnr, msssim, bpp = model.compress(images,batch_feature1,batch_feature2,\n batch_feature3,batch_feature4,batch_feature5,\n batch_feature6,batch_feature7,batch_feature8,\n outputfile,args.output_file,row+1)\n print(\"PSNR:%.2f, MS-SSIM:%.2f, BPP:%.2f\"%(psnr,msssim,bpp))", "def get_compressed(self, value):\r\n output = []\r\n lz_data = (value >> 8) & 0xFF\r\n lz_counter = value & 0xFF\r\n # Define the relative offset on LZ Window\r\n lz_offset = ((lz_counter & 0xF0) << 4) | lz_data\r\n # Define the LZ Counter for repeat data N times\r\n lz_counter = (lz_counter & 0xF) + 0x2\r\n # Start Repeat Loop\r\n while (lz_counter >= 0):\r\n # Seek the window on LZ Offset and get the LZ Data\r\n self.__lzwindow__.seek(lz_offset, FROM_START)\r\n lz_data = (lz_data & 0xFF00) + \\\r\n int.from_bytes(self.__lzwindow__.read(1), byteorder='big')\r\n # Write the LZ data to the output\r\n output.append((lz_data & 0xFF).to_bytes(1, byteorder='big'))\r\n # Seek the LZ Window on current LZ Window Counter value and write the current LZ Data (LZBuffer)\r\n self.__lzwindow__.seek(self.__lzwindowcounter__, FROM_START)\r\n self.__lzwindow__.write((lz_data & 0xFF).to_bytes(1, byteorder='big'))\r\n # Increment LZ Window Counter\r\n self.__lzwindowcounter__ = (\r\n self.__lzwindowcounter__ + 0x1) & self.__lzwindowmax__\r\n # Increment LZ Offset\r\n lz_offset = (lz_offset + 0x1) & self.__lzwindowmax__\r\n # Decrement number of data to decompress\r\n self.__maxlen__ -= 0x1\r\n # Decrement LZ Loop counter\r\n lz_counter -= 0x1\r\n return output", "def compress(cls, img, as_string=False):\n h0, w0 = img.shape\n w = binary_cast([w0], 'H', 'BB')\n h = binary_cast([h0], 'H', 'BB')\n cp = np.concatenate((w, h, img.astype('uint8').flatten()))\n # VLR.cmp: more 2x compression\n scp = VariableLength.compress(cp)\n if as_string:\n return scp\n # translate string into unit8 for storage\n vcp = np.array([ord(d) for d in scp]).astype('uint8')\n return vcp" ]
[ "0.7178514", "0.7178514", "0.6956097", "0.68778074", "0.6652845", "0.64798415", "0.6468104", "0.61270404", "0.60731876", "0.5983608", "0.56424665", "0.5547921", "0.5408176", "0.5383199", "0.53278595", "0.5266407", "0.5179403", "0.5145993", "0.5139783", "0.51359445", "0.51349926", "0.510412", "0.510412", "0.5073774", "0.49986", "0.4980507", "0.49748343", "0.49552888", "0.49256283", "0.4917228", "0.48944536", "0.48634207", "0.48469475", "0.47720605", "0.4761397", "0.47397208", "0.47338143", "0.47299954", "0.47190925", "0.47134265", "0.4697946", "0.46895674", "0.46877858", "0.46215332", "0.46198332", "0.4619558", "0.46006903", "0.4595375", "0.45876873", "0.45812076", "0.45708042", "0.45695207", "0.4563512", "0.4563512", "0.45433185", "0.4541583", "0.45401162", "0.4539745", "0.4533474", "0.453331", "0.4532222", "0.4529758", "0.45242923", "0.45230198", "0.45187637", "0.45158494", "0.45045668", "0.44963363", "0.44897547", "0.44768596", "0.44747958", "0.446968", "0.44334593", "0.44241622", "0.44196403", "0.44155622", "0.44138718", "0.4412577", "0.44104287", "0.44063586", "0.43950406", "0.4377009", "0.43731415", "0.43638608", "0.43626347", "0.43602404", "0.43586805", "0.4321505", "0.43161303", "0.4305788", "0.4303118", "0.42987886", "0.4292662", "0.42844144", "0.4275241", "0.42694685", "0.42658502", "0.42359114", "0.42311907", "0.4229589" ]
0.67097557
4
Decompress the tensor with the given decompression context.
def decompress(self, tensors):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decompress(self, tensor, ctx, *args, **kwargs):\n pass", "def decompress(self, tensor, ctx, *args, **kwargs):\n return tensor", "def decompress(self, tensor, ctx, *args, **kwargs):\n tensor = self.compressor.decompress(tensor, ctx, *args, **kwargs)\n \n # uncompressed gradients need to do nag explicitly\n if not self.inited:\n if size(tensor.shape) < self.threshold:\n self.mom = nd.zeros_like(tensor)\n self.nag = True\n self.inited = True\n\n if self.nag:\n self.mom += tensor\n nd._internal._mul_scalar(self.mom, self.mu, out=self.mom)\n tensor += self.mom\n\n return tensor", "def decompress(self, tensor, ctx, *args, **kwargs):\n tensor_decompressed = tensor\n dtype = ctx\n if 'float' in str(dtype):\n tensor_decompressed = tensor.astype(dtype, copy=False)\n return tensor_decompressed", "def decompress(self, tensor, ctx, *args, **kwargs):\n if \"x\" not in kwargs:\n raise ValueError(\"x is missing\")\n\n x = kwargs[\"x\"].astype(tensor.dtype, copy=False) \n \n if not self.inited:\n self.cache = nd.zeros_like(tensor)\n if size(tensor.shape) >= self.threshold:\n self.mom = nd.zeros_like(tensor)\n self.wdmom = True\n self.inited = True\n \n # weight decay\n nd._internal._mul_scalar(x, self.wd, out=self.cache)\n\n # weight decay momentum\n if self.wdmom:\n self.mom += self.cache\n nd._internal._mul_scalar(self.mom, self.mu, out=self.mom)\n tensor += self.mom\n\n tensor += self.cache\n return self.compressor.decompress(tensor, ctx, *args, **kwargs)", "def __handle_decompression(self, x):\n if self.__compress:\n return zlib.decompress(x)\n return x", "def decompress(args):\n # Three integers for tensor shapes + nine encoded strings.\n np_dtypes = [np.integer] * 3 + [np.bytes_] * 9\n with open(args.input_file, \"rb\") as f:\n packed = tfc.PackedTensors(f.read())\n arrays = packed.unpack_from_np_dtypes(np_dtypes)\n\n # Build model and restore optimized parameters.\n model = CompressionModel(args)\n checkpoint = tf.train.Checkpoint(model=model)\n restore_path = tf.train.latest_checkpoint(args.checkpoint_dir)\n checkpoint.restore(restore_path)\n curr_decoded = model.decompress(arrays)\n row=int(args.input_file.split('/')[-1].split('.')[0])\n\n # Write reconstructed images out as PNG files.\n for col in range(np.shape(curr_decoded)[1]):\n img = curr_decoded[0,col,:,:,:]/255\n save_img(args.output_file,0,img,row,col+1)", "def Decompress(indata, algo, with_header=True):\n if algo == 'none':\n return indata\n if with_header:\n data_len = struct.unpack('<I', indata[:4])[0]\n indata = indata[4:4 + data_len]\n fname = GetOutputFilename('%s.decomp.tmp' % algo)\n with open(fname, 'wb') as fd:\n fd.write(indata)\n if algo == 'lz4':\n data = Run('lz4', '-dc', fname, binary=True)\n elif algo == 'lzma':\n outfname = GetOutputFilename('%s.decomp.otmp' % algo)\n Run('lzma_alone', 'd', fname, outfname)\n data = ReadFile(outfname, binary=True)\n elif algo == 'gzip':\n data = Run('gzip', '-cd', fname, binary=True)\n else:\n raise ValueError(\"Unknown algorithm '%s'\" % algo)\n return data", "def Decompress(input_filename, output_filename):\n _Write(zlib.decompress(_Read(input_filename)), output_filename)", "def decompress(data):\n compression_type = ord(data[0:1])\n if compression_type == 0:\n return data\n elif compression_type == 2:\n return zlib.decompress(data[1:], 15)\n elif compression_type == 16:\n return bz2.decompress(data[1:])\n else:\n msg = \"Unsupported compression type: {}\".format(compression_type)\n raise RuntimeError(msg)", "def Decompress(self, var_name):\n self.Write('%s_uncompressed = new uint8_t[%s_uncompressed_size];', var_name,\n var_name)\n self.Write('uLongf %s_temp_size = %s_uncompressed_size;', var_name,\n var_name)\n self.Write('CHECK_EQ(Z_OK, uncompress(%s_uncompressed, &%s_temp_size,',\n var_name, var_name)\n self.Write(' %s_data, %s_size));', var_name,\n var_name)", "def decompress(data):\n pickled = zlib.decompress(data)\n return pickle_util.load(pickled)", "def download_uncompress(url, path=\".\", compression=None, context=None):\n\n # infer compression from url\n if compression is None:\n compression = os.path.splitext(url)[1][1:]\n\n # check compression format and set mode\n if compression in [\"gz\", \"bz2\"]:\n mode = \"r|\" + compression\n elif compression == \"tar\":\n mode = \"r:\"\n else:\n raise ValueError(\"The file must be of type tar/gz/bz2.\")\n\n # download and untar/uncompress at the same time\n if context is not None:\n stream = urlopen(url, context=context)\n else:\n stream = urlopen(url)\n tf = tarfile.open(fileobj=stream, mode=mode)\n tf.extractall(path)", "def decompress(fileobj, dir=None):\n tf = tempfile.NamedTemporaryFile(\n 'wb', prefix='vulnix.nvd.', suffix='.xml', delete=False, dir=dir)\n logger.debug(\"Uncompressing {}\".format(tf.name))\n with gzip.open(fileobj, 'rb') as f_in:\n shutil.copyfileobj(f_in, tf)\n tf.close()\n return tf.name", "def compress(self, tensor):", "def decompress_zlib(in_str):\n import zlib\n s = zlib.decompress(in_str)\n return s", "def decompress_dump(func, input_bytes):\n o = func(input_bytes)\n if o:\n return o\n for cmd, search_bytes in COMPRESSION_ALGO:\n for decompressed in try_decompress(cmd, search_bytes, input_bytes):\n if decompressed:\n o = decompress_dump(func, decompressed)\n if o:\n return o\n # Force decompress the whole file even if header doesn't match\n decompressed = try_decompress_bytes(cmd, input_bytes)\n if decompressed:\n o = decompress_dump(func, decompressed)\n if o:\n return o", "def compress(self, tensor, *args, **kwargs):\n return self.compressor.compress(tensor)", "def compress(self, tensor, *args, **kwargs):\n return self.compressor.compress(tensor)", "def decompress_file(in_file: str, out_file: str) -> None:\n with open(in_file, \"rb\") as f:\n num_nodes = f.read(1)[0]\n buf = f.read(num_nodes * 4)\n node_lst = bytes_to_nodes(buf)\n # use generate_tree_general or generate_tree_postorder here\n tree = generate_tree_postorder(node_lst, num_nodes - 1)\n size = bytes_to_int(f.read(4))\n with open(out_file, \"wb\") as g:\n text = f.read()\n g.write(decompress_bytes(tree, text, size))", "def decompress_zlib(self, string):\n #encode the input string\n self.string = string\n return zlib.decompress(self.string).decode()", "def uncompress(self, compressed):\n\t\traise Exception(NotImplemented)", "def gzinflate(val):\n return zlib.decompress(val)", "def __handle_compression(self, x):\n if self.__compress:\n return zlib.compress(x)\n return x", "def compress(self, tensor, *args, **kwargs):\n return tensor, None", "def decompress(value):\n\n process = Popen([\"xz\", \"--decompress\", \"--stdout\", \"--force\"],\n stdin=PIPE, stdout=PIPE)\n return process.communicate(value)[0]", "def decompress(value):\n\n process = Popen([\"xz\", \"--decompress\", \"--stdout\", \"--force\"],\n stdin=PIPE, stdout=PIPE)\n return process.communicate(value)[0]", "def deconv_decoder(latent_tensor, output_shape, is_training=True):\n del is_training\n d1 = tf.layers.dense(latent_tensor, 256, activation=tf.nn.relu)\n d2 = tf.layers.dense(d1, 1024, activation=tf.nn.relu)\n d2_reshaped = tf.reshape(d2, shape=[-1, 4, 4, 64])\n d3 = tf.layers.conv2d_transpose(\n inputs=d2_reshaped,\n filters=64,\n kernel_size=4,\n strides=2,\n activation=tf.nn.relu,\n padding=\"same\",\n )\n\n d4 = tf.layers.conv2d_transpose(\n inputs=d3,\n filters=32,\n kernel_size=4,\n strides=2,\n activation=tf.nn.relu,\n padding=\"same\",\n )\n\n d5 = tf.layers.conv2d_transpose(\n inputs=d4,\n filters=32,\n kernel_size=4,\n strides=2,\n activation=tf.nn.relu,\n padding=\"same\",\n )\n d6 = tf.layers.conv2d_transpose(\n inputs=d5,\n filters=output_shape[2],\n kernel_size=4,\n strides=2,\n padding=\"same\",\n )\n return tf.reshape(d6, [-1] + output_shape)", "def decompress(byte_array):\n byte0 = byte_array[0]\n decoded = None\n if (byte0 & _mask_3_4) == _pattern_3_4:\n decoded = CCNxCompressorVariableLength.__decompress_3_4(byte_array)\n elif (byte0 & _mask_4_9) == _pattern_4_9:\n decoded = CCNxCompressorVariableLength.__decompress_4_9(byte_array)\n elif (byte0 & _mask_15_5) == _pattern_15_5:\n decoded = CCNxCompressorVariableLength.__decompress_15_5(byte_array)\n elif (byte0 & _mask_16_10) == _pattern_16_10:\n decoded = CCNxCompressorVariableLength.__decompress_16_10(byte_array)\n elif (byte0 & _mask_16_16) == _pattern_16_16:\n decoded = CCNxCompressorVariableLength.__decompress_16_16(byte_array)\n\n return decoded", "def decompose(self, *args, **kwargs):\n return _image.image_decompose(self, *args, **kwargs)", "def compress(self, tensor, *args, **kwargs):\n pass", "def uncompress(in_file, out_file):\n with open(in_file, \"rb\") as f:\n num_nodes = f.read(1)[0]\n buf = f.read(num_nodes * 4)\n node_lst = bytes_to_nodes(buf)\n # use generate_tree_general or generate_tree_postorder here\n tree = generate_tree_general(node_lst, num_nodes - 1)\n size = bytes_to_size(f.read(4))\n with open(out_file, \"wb\") as g:\n text = f.read()\n g.write(generate_uncompressed(tree, text, size))", "def lz4_uncompress(input_data, expected_decompressed_size):\n assert isinstance(input_data,bytes), \"input_data must be of type bytes\"\n assert isinstance(expected_decompressed_size,int), \"expected_decompressed_size must be of type int\"\n\n dst_buf = create_string_buffer(expected_decompressed_size)\n status = liblz4.LZ4_decompress_safe(input_data,dst_buf,len(input_data),expected_decompressed_size)\n if status != expected_decompressed_size:\n return None\n else:\n return dst_buf.raw", "def _decompress(self, tile: bytes) -> np.ndarray:\n try:\n return getattr(self, f\"_{self.compression}\")(tile)\n except AttributeError as e:\n raise NotImplementedError(\n f\"{self.compression} is not currently supported\"\n ) from e", "def decode(self, z: torch.Tensor) -> torch.Tensor:\n raise NotImplementedError", "def _decompress_tarball(*, in_fileobj, out_fileobj):\n with tarfile.open(fileobj=in_fileobj, mode=\"r\") as it, tarfile.open(\n fileobj=out_fileobj, mode=\"w|\"\n ) as ot:\n for member in it.getmembers():\n extracted = it.extractfile(member)\n ot.addfile(member, extracted)", "def decode(self, z):\n result = self.decoder_input(z)\n result = result.view(-1, 512, 2, 2)\n result = self.decoder(result)\n result = self.final_layer(result)\n return result", "def decompress(filename):\n print(\"\\n# start decompression of file: %s \\n#############################################\" % filename)\n\n # check if handed filename has the extension .gz\n if \".gz\" in filename: # if file has the extension gz\n\n if os.path.exists(filename): # check if the file exists\n\n print(\"# filename enthält die endung gz : %s\" % filename)\n\n # if the file was found on the system split its string at the '.gz' position and use evereything before\n filename_txt = filename.split(\".gz\")[0]\n\n print(\"# txt filename: \", filename_txt)\n\n # create an txt file with string before the '.gz' extension and decompress the content of the .gz file\n with gzip.open(filename, 'rb') as decompressFile:\n with open(filename_txt, 'wb') as receivingFile:\n shutil.copyfileobj(decompressFile, receivingFile)\n\n # if the file was not found download it and decompress the content\n else:\n print(\"# ERROR - %s konnte nicht gefunden werden!!\" % filename)\n\n download(HOST, DIRECTORY, filename)\n filename_txt = decompress(filename)\n print(\"# return \", filename_txt)\n return filename_txt\n # if the handed filename has no '.gz' extension throw an Exception\n else:\n\n raise Exception(\"# not a gzip file\")", "def decompress_gzip(in_str):\n # gzip can only handle file object therefore using StringIO\n copmressed_stream = StringIO.StringIO(in_str)\n gzipper = gzip.GzipFile(fileobj=copmressed_stream)\n s = gzipper.read()\n gzipper.close()\n return s", "def decompress_gzip(in_str):\n import gzip\n # gzip can only handle file object therefore using StringIO\n copmressed_stream = StringIO.StringIO(in_str)\n gzipper = gzip.GzipFile(fileobj=copmressed_stream)\n s = gzipper.read()\n gzipper.close()\n return s", "def decompress(self, s):\n return zlib.decompress(s, -zlib.MAX_WBITS)", "def gzdeflate():\n return zlib.compress(val)", "def uncompress(self, *args):\n return _osgAnimation.Vec3Packed_uncompress(self, *args)", "def decompress_stream(src, dst):\n with gzip.GzipFile(fileobj=src, mode='rb') as gz:\n for block in iterfile(gz):\n dst.write(block)", "def NTFSDecompressUnit(Buffer):\n\n from io import BytesIO\n from struct import unpack\n\n\n NTFS_CLUSTER_SIZE = 4096\n NTFS_COMPRESSION_UNIT_SIZE = 16 * NTFS_CLUSTER_SIZE\n\n def is_valid_write_request(offset, length):\n return offset + length <= 2 * 1024 * 1024 * 1024 # Reject obviously invalid write requests.\n\n if len(Buffer) > NTFS_COMPRESSION_UNIT_SIZE or len(Buffer) < NTFS_CLUSTER_SIZE:\n return b'' # Invalid length of input data.\n\n LZNT1_COMPRESSION_BITS = []\n\n offset_bits = 0\n y = 16\n\n # Taken from: CyXpress.pyx\n if len(LZNT1_COMPRESSION_BITS) == 0:\n LZNT1_COMPRESSION_BITS = [0] * 4096\n\n for x in range(0, 4096):\n LZNT1_COMPRESSION_BITS[x] = 4 + offset_bits\n if x == y:\n y = y * 2\n offset_bits += 1\n # End\n\n\n src_index = 0\n dst_index = 0\n dbuf_obj = BytesIO()\n\n while src_index < len(Buffer):\n header_bytes = Buffer[src_index: src_index + 2]\n src_index += 2\n\n if len(header_bytes) < 2:\n break # Truncated header.\n\n header, = unpack('<H', header_bytes)\n\n if header == 0:\n break # End of the buffer.\n\n if header & 0x7000 != 0x3000:\n break # Invalid signature.\n\n if header & 0x8000 == 0:\n # Not a compressed block, copy literal data.\n block_size = (header & 0x0FFF) + 1\n\n if not is_valid_write_request(dst_index, block_size):\n break # Bogus data.\n\n dbuf_obj.seek(dst_index)\n bytes_ = Buffer[src_index: src_index + block_size]\n dbuf_obj.write(bytes_)\n\n if len(bytes_) == block_size:\n src_index += block_size\n dst_index += block_size\n continue\n else:\n break # Truncated literal data.\n\n # A compressed block.\n dst_chunk_start = dst_index\n src_chunk_end = src_index + (header & 0x0FFF) + 1\n\n bogus_data = False\n while src_index < src_chunk_end and src_index < len(Buffer) and not bogus_data:\n flags = Buffer[src_index]\n if type(flags) is not int:\n flags = ord(flags)\n\n src_index += 1\n\n for token in range(0, 8):\n if src_index >= src_chunk_end:\n break\n\n if src_index >= len(Buffer):\n # Truncated chunk.\n break\n\n flag = flags & 1\n flags = flags >> 1\n\n if flag == 0:\n # A literal byte, copy it.\n if not is_valid_write_request(dst_index, 1):\n # Bogus data.\n bogus_data = True\n break\n\n dbuf_obj.seek(dst_index)\n bytes_ = Buffer[src_index: src_index + 1]\n dbuf_obj.write(bytes_)\n\n if len(bytes_) == 1:\n dst_index += 1\n src_index += 1\n continue\n else:\n # Truncated chunk.\n bogus_data = True\n break\n\n # A compression tuple.\n table_idx = dst_index - dst_chunk_start\n try:\n length_bits = 16 - LZNT1_COMPRESSION_BITS[table_idx]\n except IndexError:\n # Bogus data.\n bogus_data = True\n break\n\n length_mask = (1 << length_bits) - 1\n\n ctuple_bytes = Buffer[src_index: src_index + 2]\n src_index += 2\n\n if len(ctuple_bytes) < 2:\n # Truncated chunk.\n bogus_data = True\n break\n\n ctuple, = unpack('<H', ctuple_bytes)\n back_off_rel = (ctuple >> length_bits) + 1\n back_off = dst_index - back_off_rel\n back_len = (ctuple & length_mask) + 3\n\n if back_off < dst_chunk_start:\n # Bogus compression tuple.\n bogus_data = True\n break\n\n for i in range(0, back_len):\n # Decompress data.\n dbuf_obj.seek(back_off)\n bytes_ = dbuf_obj.read(1)\n if len(bytes_) != 1:\n # Invalid offset.\n bogus_data = True\n break\n\n if not is_valid_write_request(dst_index, 1):\n # Bogus data.\n bogus_data = True\n break\n\n dbuf_obj.seek(dst_index)\n dbuf_obj.write(bytes_)\n\n dst_index += 1\n back_off += 1\n\n if bogus_data:\n break\n\n if bogus_data:\n break\n\n dbuf = dbuf_obj.getvalue()\n dbuf_obj.close()\n\n return dbuf", "def testDecompress(self):\n decompressor = xz_decompressor.XZDecompressor()\n\n compressed_data = (\n b'\\xfd7zXZ\\x00\\x00\\x01i\"\\xde6\\x02\\xc0\\x13\\x0f!\\x01\\x16\\x00\\xc0\\xb7\\xdc'\n b'\\xe9\\x01\\x00\\x0eThis is a test.\\x00\\x00]\\xc9\\xc3\\xc6\\x00\\x01#\\x0f\\xdb'\n b'\\xdf\\x90\\x0e\\x90B\\x99\\r\\x01\\x00\\x00\\x00\\x00\\x01YZ')\n\n uncompressed_data, _ = decompressor.Decompress(compressed_data)\n expected_uncompressed_data = b'This is a test.'\n self.assertEqual(uncompressed_data, expected_uncompressed_data)\n\n # Test to trigger xz raising EOFError.\n with self.assertRaises(errors.BackEndError):\n decompressor.Decompress(b'This is a test.')\n\n # Test to trigger xz raising IOError.\n decompressor = xz_decompressor.XZDecompressor()\n\n with self.assertRaises(errors.BackEndError):\n decompressor.Decompress(b'This is a test.')", "def image_decompress(img_mode, img_size, img):\n img = Image.frombytes(img_mode, img_size, img)\n\n return img", "def decode(self, z):\n l1 = self.fc3(z)\n l1 = l1.unsqueeze(0).unsqueeze(0).unsqueeze(0).permute(0,3,1,2)\n h1 = F.relu(self.deconv1(l1))\n h2 = F.relu(self.deconv2(h1))\n h3 = F.relu(self.deconv3(h2))\n return torch.sigmoid(self.deconv4(h3))", "def decompress_file(path, temp_dir='tmp'):\n if path.endswith('.gz'):\n logger.info('Decompressing {} to {}'.format(path, temp_dir))\n return decompress_gzip(\n path,\n os.path.join(temp_dir,\n os.path.splitext(os.path.basename(path))[0])\n )\n else:\n return path", "def deconv_block(input_tensor: tf.Tensor, features: int, name: str) -> tf.Tensor:\n out = input_tensor\n\n out = KL.Conv2D(\n int(features // 2),\n 1,\n strides=(1, 1),\n name=name + f\"_c{1}\",\n )(input_tensor)\n out = KL.Activation(\"relu\")(KL.BatchNormalization()(out))\n\n out = KL.Conv2DTranspose(\n int(features // 2),\n (4, 4),\n strides=(2, 2),\n padding=\"same\",\n name=name + f\"_d\",\n )(out)\n out = KL.Activation(\"relu\")(KL.BatchNormalization()(out))\n\n out = KL.Conv2D(\n features,\n 1,\n strides=(1, 1),\n name=name + f\"_c{2}\",\n )(out)\n out = KL.Activation(\"relu\")(KL.BatchNormalization()(out))\n\n return out", "def decompress(self, bit_strings):\n return self._run(\"decompress\", bit_strings=bit_strings)", "def testDecompress(self):\n decompressor = xz_decompressor.LZMADecompressor()\n\n compressed_data = (\n b']\\x00\\x00\\x80\\x00\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\x00*\\x1a\\t\\'d\\x1c'\n b'\\x87\\x8aO\\xcaL\\xf4\\xf8!\\xda\\x88\\xd8\\xff\\xff\\xeb\\xcc\\x00')\n\n uncompressed_data, _ = decompressor.Decompress(compressed_data)\n expected_uncompressed_data = b'This is a test.'\n self.assertEqual(uncompressed_data, expected_uncompressed_data)\n\n # Test to trigger lzma raising EOFError.\n with self.assertRaises(errors.BackEndError):\n decompressor.Decompress(b'This is a test.')\n\n # Test to trigger lzma raising IOError.\n decompressor = xz_decompressor.LZMADecompressor()\n\n with self.assertRaises(errors.BackEndError):\n decompressor.Decompress(b'This is a test.')", "def _uncompress(fname, outdir, msg=msg):\n import os\n assert os.access(fname, os.R_OK), \"could not access [%s]\" % fname\n fname = os.path.abspath(os.path.realpath(fname))\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n orig_dir = os.getcwd()\n try:\n os.chdir(outdir)\n ext = os.path.splitext(fname)[1][1:] # drop the dot\n if ext in ('gz', 'bz2'):\n import tarfile\n f = tarfile.open(fname, 'r:%s'%ext)\n f.extractall()\n else:\n err = 'extension [%s] not handled (yet?)' % ext\n msg.error(err)\n raise ValueError(err)\n finally:\n os.chdir(orig_dir)", "def test_lz4_decompression_avoids_deep_copy():\n pytest.importorskip(\"lz4\")\n a = bytearray(1_000_000)\n b = compressions[\"lz4\"].compress(a)\n c = compressions[\"lz4\"].decompress(b)\n assert isinstance(c, bytearray)", "def decompressFile(source, target):\n data = cake.filesys.readFile(source)\n try:\n data = zlib.decompress(data)\n except zlib.error, e:\n raise EnvironmentError(str(e))\n cake.filesys.writeFile(target, data)", "def decompressFile(infile, outfile):\n decoder = Decoder(infile)\n for data in decoder.bytes():\n outfile.write(data)", "def _deflate(self, tile: bytes) -> np.ndarray:\n decoded = self._reshape(\n np.frombuffer(imagecodecs.zlib_decode(tile), self.dtype)\n )\n self._unpredict(decoded)\n return np.rollaxis(decoded, 2, 0)", "def decode(self, z):\n result = self.decoder_input(z)\n result = result.view(-1, 512, 4, 4)\n result = self.decoder(result)\n return result", "def decompress(compressed):\r\n \r\n \r\n # Build the dictionary.\r\n dict_size = 256\r\n dictionary = dict((i, chr(i)) for i in range(dict_size))\r\n # in Python 3: dictionary = {i: chr(i) for i in range(dict_size)}\r\n \r\n # use StringIO, otherwise this becomes O(N^2)\r\n # due to string concatenation in a loop\r\n result = StringIO()\r\n w = chr(compressed.pop(0))\r\n result.write(w)\r\n for k in compressed:\r\n if k in dictionary:\r\n entry = dictionary[k]\r\n elif k == dict_size:\r\n entry = w + w[0]\r\n else:\r\n raise ValueError('Bad compressed k: %s' % k)\r\n result.write(entry)\r\n \r\n # Add w+entry[0] to the dictionary.\r\n dictionary[dict_size] = w + entry[0]\r\n dict_size += 1\r\n \r\n w = entry\r\n return result.getvalue()", "def lz4_decompress(src, dlen, dst=None):\n if dst is None:\n dst = bytearray()\n print(str(src))\n b = bytes(src)\n d=lz4zfs.decompress(b,dlen)\n l=len(d)\n if (dlen != l):\n print(\"[-] decompress size differ from %d, got %d\" %(dlen,l))\n raise RuntimeError(\"decompress size differ from %d, got %d\" %(dlen,l))\n else:\n if (dlen < l):\n dst[0:dlen] = d;\n else:\n dst[0:l] = d;\n print(str(dst))\n return dst", "def decompress(cls, imgz):\n # translate back uint8 into string\n if not isinstance(imgz, str):\n imgz = ''.join([chr(d) for d in imgz])\n # zlib decompression\n imgz = VariableLength.decompress(imgz)\n ####\n w = binary_cast(imgz[:2], 'BB', 'H')[0]\n h = binary_cast(imgz[2:4], 'BB', 'H')[0]\n img = imgz[4:]\n img = np.reshape(img, (h, w))\n return img", "def decode(self, z):\n out = self.fc_decoder(z)\n out = out.view(-1, 512, 2, 2)\n out = self.decoder(out)\n return out", "def decompress_data(src, dst):\n assert os.path.exists(src), \"{} does not exist. Please download the \\\n entire repository and keep it as it originally is\".format(src)\n\n # create folder layout at the destination folder\n subset_list = [\"train\", \"val\"]\n _create_layout(dst, subset_list)\n\n # extract data\n for subset in subset_list:\n subset_img_src = os.path.join(src, \"images\", subset + \".zip\")\n subset_img_dst = os.path.join(dst, \"images\", subset)\n _extract_multi_vol_zip(subset_img_src, subset_img_dst)\n _extract_all_gz_in_dir(subset_img_dst)\n\n subset_lbl_src = os.path.join(src, \"labels\", subset + \".zip\")\n subset_lbl_dst = os.path.join(dst, \"labels\", subset)\n _extract_zip(subset_lbl_src, subset_lbl_dst)\n _extract_all_gz_in_dir(subset_lbl_dst)\n\n print(\"Finished decompressing {}.\".format(subset))", "def test_decompress_file():\n gz_file = os.path.join(\n tempfile.gettempdir(),\n \"jade-unit-test-file.gz\",\n )\n with gzip.open(gz_file, \"wb\") as f:\n f.write(b\"Hello World\")\n assert os.path.exists(gz_file)\n\n new_file = decompress_file(gz_file)\n assert os.path.exists(new_file)\n with open(new_file, \"r\") as f:\n data = f.read()\n assert data == \"Hello World\"\n\n if os.path.exists(gz_file):\n os.remove(gz_file)\n\n if os.path.exists(new_file):\n os.remove(new_file)", "def DecompressionFile(src_fp, algorithm):\n if algorithm == \"lzma\":\n return lzma.open(src_fp, \"r\")\n\n if algorithm == \"snappy\":\n return SnappyFile(src_fp, \"rb\")\n\n if algorithm:\n raise InvalidConfigurationError(\"invalid compression algorithm: {!r}\".format(algorithm))\n\n return src_fp", "def _decode_infer(self, decoder, _encoder_output, features, labels):\r\n\r\n return decoder(_encoder_output, labels)", "def _DecompressMessageList(\n packed_message_list: rdf_flows.PackedMessageList,\n) -> rdf_flows.MessageList:\n compression = packed_message_list.compression\n if compression == rdf_flows.PackedMessageList.CompressionType.UNCOMPRESSED:\n data = packed_message_list.message_list\n\n elif compression == rdf_flows.PackedMessageList.CompressionType.ZCOMPRESSION:\n try:\n data = zlib.decompress(packed_message_list.message_list)\n except zlib.error as e:\n raise RuntimeError(\"Failed to decompress: %s\" % e) from e\n else:\n raise RuntimeError(\"Compression scheme not supported\")\n\n try:\n result = rdf_flows.MessageList.FromSerializedBytes(data)\n except rdfvalue.DecodeError as e:\n raise RuntimeError(\"RDFValue parsing failed.\") from e\n\n return result", "def Decompress(inputFilePath, outputFilePath):\n # TODO: Add tests for this function\n compressedString = FilePathIntoString(inputFilePath)\n dictionary = ExtractTextDictionaryFromString(compressedString)\n\n compressedWords = SplitIntoWords(compressedString)\n\n # Remove the dictionary from the first word\n firstWord = compressedWords[0]\n firstWordArray = firstWord.split(\"\\n\")\n compressedWords[0] = firstWordArray[len(firstWordArray) - 1]\n\n uncompressedWords = UncompressWordArray(compressedWords, dictionary)\n outputString = WordArrayToString(uncompressedWords)\n WriteToFile(outputFilePath, outputString)", "def compress(self, tensor, *args, **kwargs):\n tensor_compressed = tensor\n if 'float' in str(tensor.dtype):\n # Only allow compression from other floating point types\n tensor_compressed = tensor.astype('float16', copy=False)\n return tensor_compressed, tensor.dtype", "def stream_decompress(src, dst, blocksize=_STREAM_TO_STREAM_BLOCK_SIZE):\r\n decompressor = StreamDecompressor()\r\n while True:\r\n buf = src.read(blocksize)\r\n if not buf: break\r\n buf = decompressor.decompress(buf)\r\n if buf: dst.write(buf)\r\n decompressor.flush() # makes sure the stream ended well\r", "def debianize( strFilename ):\n \n #~ data = gzip.GzipFile( strFilename ).read();\n #~ print data;\n #~ return;\n \n #~ data = gzip.open( strFilename ).read();\n #~ print data;\n #~ return; \n \n #~ uncompressedData = bz2.BZ2File(strFilename).read()\n #~ print str(uncompressedData)\n #~ return;\n \n #~ file = open( strFilename, 'rb' );\n #~ data = file.read();\n #~ file.close();\n #~ print debug.dumpHexa( data );\n \n #~ ar = tarfile.open(strFilename, 'r:*')\n #~ for item in ar:\n #~ print( str(item) );\n #~ print( \"%s:\" % item.name );\n #~ #print debug.dumpHexa(item.buf);\n #~ #print zlib.decompress(item.buf)\n #~ #print zlib.decompress(ar.extractfile(item).read())\n #~ data = ar.extractfile(item.name).read()\n #~ print data # works !\n #~ ar.close() \n #~ return;\n \n fileLists = [];\n file = open( strFilename );\n data = file.read();\n file.close();\n \n print( \"data len: %d\" % len( data ) );\n\n nDataCompressedOffset = 0; # 132\n\n # works fine on toto.gz\n #~ f = gzip.open(strFilename, 'rb')\n #~ file_content = f.read()\n #~ print file_content\n #~ f.close() \n \n #~ decompressor = bz2.BZ2Decompressor();\n #~ uncompressed = decompressor.decompress(data[nDataCompressedOffset:]);\n \n #~ uncompressed = zlib.decompress(data[nDataCompressedOffset:]);\n \n uncompressed = decompress( data );\n print( \"uncompressed: %s\" % str( uncompressed ) );", "def _decompress(self, jpgData):\n f = StringIO(jpgData)\n img = Image.open(f)\n\n # thumbs are corrupting in opengl, not sure why\n if img.size == (75, 56):\n img = img.resize((128, 128))\n\n # if the image has an EXIF rotation, the thumb will be\n # straightened but the image won't be. That should be fixed here.\n \n return img.size, img.tostring()", "def complex_decoder(self, z, reuse=True):\n\t\tz = tf.convert_to_tensor(z)\n\t\treuse=tf.AUTO_REUSE\n\n\t\tif self.vimco_samples > 1:\n\t\t\tsamples = []\n\n\t\twith tf.variable_scope('model', reuse=reuse):\n\t\t\twith tf.variable_scope('decoder', reuse=reuse):\n\t\t\t\tif len(z.get_shape().as_list()) == 2:\n\t\t\t\t\t# test\n\t\t\t\t\td = tf.layers.dense(z, 256, activation=tf.nn.elu, use_bias=False, reuse=reuse, name='fc1')\t\t\n\t\t\t\t\td = tf.reshape(d, (-1, 1, 1, 256))\n\t\t\t\t\tdeconv1 = tf.layers.conv2d_transpose(d, 256, 4, padding=\"VALID\", activation=tf.nn.elu, reuse=reuse, name='deconv1')\n\t\t\t\t\tdeconv2 = tf.layers.conv2d_transpose(deconv1, 64, 4, strides=(2,2), padding=\"SAME\", activation=tf.nn.elu, reuse=reuse, name='deconv2')\n\t\t\t\t\tdeconv3 = tf.layers.conv2d_transpose(deconv2, 64, 4, strides=(2,2), padding=\"SAME\", activation=tf.nn.elu, reuse=reuse, name='deconv3')\n\t\t\t\t\tdeconv4 = tf.layers.conv2d_transpose(deconv3, 32, 4, strides=(2,2), padding=\"SAME\", activation=tf.nn.elu, reuse=reuse, name='deconv4')\n\t\t\t\t\t# output channel = 3\n\t\t\t\t\tdeconv5 = tf.layers.conv2d_transpose(deconv4, 3, 4, strides=(2,2), padding=\"SAME\", activation=tf.nn.sigmoid, reuse=reuse, name='deconv5')\n\t\t\t\t\treturn deconv5\n\t\t\t\telse:\n\t\t\t\t\t# train; iterate through one vimco sample at a time\n\t\t\t\t\tfor i in range(self.vimco_samples):\n\t\t\t\t\t\tz_sample = z[i]\n\t\t\t\t\t\td = tf.layers.dense(z_sample, 256, activation=tf.nn.elu, use_bias=False, reuse=reuse, name='fc1')\t\t\n\t\t\t\t\t\td = tf.reshape(d, (-1, 1, 1, 256))\n\t\t\t\t\t\tdeconv1 = tf.layers.conv2d_transpose(d, 256, 4, padding=\"VALID\", activation=tf.nn.elu, reuse=reuse, name='deconv1')\n\t\t\t\t\t\tdeconv2 = tf.layers.conv2d_transpose(deconv1, 64, 4, strides=(2,2), padding=\"SAME\", activation=tf.nn.elu, reuse=reuse, name='deconv2')\n\t\t\t\t\t\tdeconv3 = tf.layers.conv2d_transpose(deconv2, 64, 4, strides=(2,2), padding=\"SAME\", activation=tf.nn.elu, reuse=reuse, name='deconv3')\n\t\t\t\t\t\tdeconv4 = tf.layers.conv2d_transpose(deconv3, 32, 4, strides=(2,2), padding=\"SAME\", activation=tf.nn.elu, reuse=reuse, name='deconv4')\n\t\t\t\t\t\t# output channel = 3\n\t\t\t\t\t\tdeconv5 = tf.layers.conv2d_transpose(deconv4, 3, 4, strides=(2,2), padding=\"SAME\", activation=tf.nn.sigmoid, reuse=reuse, name='deconv5')\n\t\t\t\t\t\tsamples.append(deconv5)\n\t\tx_reconstr_logits = tf.stack(samples, axis=0)\n\t\tprint(x_reconstr_logits.get_shape())\n\t\treturn x_reconstr_logits", "def uncompress(location, target_dir, decompressor, suffix=EXTRACT_SUFFIX):\n # FIXME: do not create a sub-directory and instead strip the \"compression\"\n # extension such gz, etc. or introspect the archive header to get the file\n # name when present.\n if DEBUG:\n logger.debug('uncompress: ' + location)\n\n tmp_loc, warnings = uncompress_file(location, decompressor)\n\n target_location = os.path.join(target_dir, os.path.basename(location) + suffix)\n if os.path.exists(target_location):\n fileutils.delete(target_location)\n shutil.move(tmp_loc, target_location)\n return warnings", "def de_gzip(data):\n cmps = StringIO.StringIO(data)\n gzipper = gzip.GzipFile(fileobj=cmps)\n return gzipper.read()", "def de_gzip(data):\n cmps = StringIO.StringIO(data)\n gzipper = gzip.GzipFile(fileobj=cmps)\n return gzipper.read()", "def test_decompress_2(self):\n b_array = bytearray([3]) + bytearray(b'abcdef')\\\n + bytearray([0, 32]) + bytearray([0, 113])\n actual = LZ77.decompress(b_array)\n expected = 'abcdefdeabc'\n self.assertEqual(actual, expected)", "def _untransform(self, X: Tensor) -> Tensor:\n pass # pragma: no cover", "def _decompress_data():\n\n dest_dir = get_cachedir()\n if dest_dir is None:\n print('No cache dir found, not decompressing anything.')\n return\n\n filename = _data_url.split('/')[-1]\n tarball = dest_dir / filename\n\n print(\"Trying to decompress file {}\".format(tarball))\n with tarfile.open(str(tarball), \"r:bz2\") as tar:\n tar.extractall(str(dest_dir))\n\n data_dir = dest_dir / 'data'\n pickle_files = data_dir.glob('*.pickle')\n print(\"Data directory {} contains {} pickle files\"\n .format(data_dir, len(list(pickle_files))))", "def decompress(self, file):\n\t\t\n\t\tbit_string = \"\"\n\n\t\tbyte = file.read(1)\n\t\twhile(len(byte) > 0):\n\t\t\tbyte = ord(byte)\n\t\t\tbits = bin(byte)[2:].rjust(8, '0')\n\t\t\tbit_string += bits\n\t\t\tbyte = file.read(1)\n\n\t\tencoded_text = self.remove_padding(bit_string)\n\n\t\tdecompressed_text = self.decode_text(encoded_text)\n\t\t\n\t\tprint(\"Decompressed\")\n\t\treturn decompressed_text", "def do_LZW_DeCompression(dict_of_abc, list_of_data):\n \n #https://www.youtube.com/watch?v=MQM_DsX-LBI\n \n out = []\n predchozi_out = []\n for i in range(len(list_of_data)):\n new = []\n new.extend(predchozi_out)\n if list_of_data[i] in dict_of_abc:\n o = dict_of_abc[list_of_data[i]]\n out.extend(o)\n predchozi_out = o\n \n #pokud je o list, beru z nej pouze prvni prvek\n if len(o) > 1:\n new.append(o[0])\n else:\n new.extend(o)\n\n index_founded = dict_cointains_list(dict_of_abc, new)\n if index_founded == -1:\n #pokud new neni ve slovniku, pridam ho tam\n dict_of_abc[len(dict_of_abc) +1] = new\n\n return dict_of_abc, out", "def uncompress(compressed_file, dest_dir = None):\n\n\trouting_pairs = (\n\t\t(\".tar.gz\", _uncompress_targz),\n\t\t(\".tgz\", _uncompress_targz),\n\t\t(\".tar\", _uncompress_tar),\n\t\t(\".zip\", _uncompress_zip)\n\t)\n\n\tfound_handler = None\n\tfor suffix, handler in routing_pairs:\n\t\tif compressed_file.filename.endswith(suffix):\n\t\t\tfound_handler = handler\n\t\t\tbreak\n\telse:\n\t\traise ValueError(\"Compressed file does not have known format.\")\n\n\t# If we didn't get a directory to place the uncompressed files into, create\n\t# a temporary one.\n\tif dest_dir is None:\n\t\tdest_dir = tempfile.mkdtemp()\n\n\ttempfile_handle, tempfile_path = tempfile.mkstemp()\n\tos.close(tempfile_handle)\n\n\ttry:\n\t\tcompressed_file.save(tempfile_path)\n\n\t\tfound_handler(tempfile_path, dest_dir)\n\tfinally:\n\t\tos.remove(tempfile_path)\n\n\treturn dest_dir", "def decompress(file, output, pw):\n try:\n bsc.decompress_file(file, output, pw)\n print(Fore.GREEN + \"Decompressed!\")\n except bsc.InvalidPasswordException:\n print(Fore.RED + \"Password is invalid!\")\n except bsc.InvalidFileFormatException:\n print(Fore.RED + \"File not compressed with BSC!\")\n except FileNotFoundError:\n print(Fore.RED + \"File not found!\")", "def decoder(self, z):\n x1 = self.dec_conv(z)\n return x1", "def decompression_huffman(compress_seq:str, add_binary:int, dict_seq_binary:dict):\r\n decompressed_seq = decompression(compress_seq)\r\n dict_bin_change = dict_change(dict_seq_binary)\r\n seq_decomp, sequence_restored = retransformation(decompressed_seq, dict_bin_change, add_binary)\r\n return seq_decomp, sequence_restored", "def fc_decoder(latent_tensor, output_shape, is_training=True):\n del is_training\n d1 = tf.layers.dense(latent_tensor, 1200, activation=tf.nn.tanh)\n d2 = tf.layers.dense(d1, 1200, activation=tf.nn.tanh)\n d3 = tf.layers.dense(d2, 1200, activation=tf.nn.tanh)\n d4 = tf.layers.dense(d3, np.prod(output_shape))\n return tf.reshape(d4, shape=[-1] + output_shape)", "def decompress_pickle(file):\n data = bz2.BZ2File(file, 'rb')\n data = cPickle.load(data)\n return data", "def maybe_lzma_decompress(path) -> str:\n decompressed_path, ext = os.path.splitext(path)\n if ext != '.lzma':\n raise ValueError(\n 'Only decompressing LZMA files is supported. If the file '\n 'is LZMA compressed, rename the url to have a .lzma suffix.')\n if os.path.exists(decompressed_path):\n log(f'Reusing cached file {decompressed_path!r}')\n else:\n log(f'Decompressing {path!r} to {decompressed_path!r}')\n with lzma.open(path, 'rb') as fi:\n with open(decompressed_path, 'wb') as fo:\n shutil.copyfileobj(fi, fo)\n return decompressed_path", "def untgz(tgz_filename, out_dir):\r\n logging.info(\"Source: %s\" % tgz_filename)\r\n tgz = TgzHelper(tgz_filename, out_dir)\r\n tgz.extract()", "def decoder(self, features=[8], name=\"decoder\") -> KM.Model:\n input_tensor = KL.Input(shape=(2, 2, features[0]))\n\n decoded = input_tensor\n\n for i, feature_num in enumerate(features[1:], start=1):\n decoded = deconv_block(\n decoded, feature_num, name + f\"_deconv_{len(features)-i}\"\n )\n\n # Final reconstruction back to the original image size\n decoded = KL.Conv2DTranspose(\n 3,\n (4, 4),\n strides=(2, 2),\n padding=\"same\",\n kernel_initializer=\"he_normal\",\n use_bias=True,\n activation=\"tanh\",\n name=name + f\"_out\",\n )(decoded)\n decoded = DropBlock2D(block_size=5, keep_prob=0.8)(decoded)\n return KM.Model(inputs=input_tensor, outputs=decoded, name=name)", "def uncompress_file(location, decompressor):\n # FIXME: do not create a sub-directory and instead strip the \"compression\"\n # extension such gz, etc. or introspect the archive header to get the file\n # name when present.\n assert location\n assert decompressor\n\n warnings = []\n base_name = fileutils.file_base_name(location)\n target_location = os.path.join(fileutils.get_temp_dir(\n prefix='extractcode-extract-'), base_name)\n\n with decompressor(location, 'rb') as compressed:\n with open(target_location, 'wb') as uncompressed:\n buffer_size = 32 * 1024 * 1024\n while True:\n chunk = compressed.read(buffer_size)\n if not chunk:\n break\n uncompressed.write(chunk)\n\n if getattr(decompressor, 'has_trailing_garbage', False):\n warnings.append(location + ': Trailing garbage found and ignored.')\n\n return target_location, warnings", "def get_compress_and_decompress_func(compression_algorithm, compression_level=9):\n # type: (str, int) -> Tuple[Callable, Callable]\n if compression_algorithm in [\"deflate\", \"zlib\"]:\n import zlib\n\n if sys.version_info < (3, 6, 0):\n # Work around for Python <= 3.6 where compress is not a keyword argument, but a regular\n # argument\n @functools.wraps(zlib.compress)\n def compress_func(data):\n return zlib.compress(data, compression_level)\n\n else:\n compress_func = functools.partial(zlib.compress, level=compression_level) # type: ignore\n decompress_func = zlib.decompress # type: ignore\n elif compression_algorithm == \"bz2\":\n import bz2\n\n @functools.wraps(bz2.compress)\n def compress_func(data):\n return bz2.compress(data, compression_level)\n\n decompress_func = bz2.decompress # type: ignore\n elif compression_algorithm == \"zstandard\":\n import zstandard\n\n compressor = zstandard.ZstdCompressor(level=compression_level)\n decompressor = zstandard.ZstdDecompressor()\n compress_func = compressor.compress # type: ignore\n decompress_func = decompressor.decompress # type: ignore\n elif compression_algorithm == \"lz4\":\n import lz4.frame as lz4 # pylint: disable=no-name-in-module\n\n # NOTE: Java implementation which we currently use on the server side doesn't support\n # dependent block stream.\n # See https://github.com/Parsely/pykafka/issues/914 for details\n def compress_func(data):\n try:\n # For lz4 >= 0.12.0\n return lz4.compress(data, compression_level, block_linked=False)\n except TypeError:\n # For older versions\n # For earlier versions of lz4\n return lz4.compress(data, compression_level, block_mode=1)\n\n decompress_func = lz4.decompress # type: ignore\n elif compression_algorithm == \"snappy\":\n import snappy # pylint: disable=import-error\n\n compress_func = snappy.compress # type: ignore\n decompress_func = snappy.decompress # type: ignore\n elif compression_algorithm == \"brotli\":\n import brotli # pylint: disable=import-error\n\n compress_func = functools.partial(brotli.compress, quality=compression_level) # type: ignore\n decompress_func = brotli.decompress # type: ignore\n elif compression_algorithm == \"none\":\n compress_func = noop_compress\n decompress_func = noop_decompress # type: ignore\n else:\n raise ValueError(\"Unsupported algorithm: %s\" % (compression_algorithm))\n\n return compress_func, decompress_func", "def decompress(infile, path, members=None):\n with open(infile, 'rb') as inf, open(path, 'w', encoding='utf8') as tof:\n decom_str = gzip.decompress(inf.read()).decode('utf-8')\n tof.write(decom_str)", "def decimate(self, *args, **kwargs):\n return _image.image_decimate(self, *args, **kwargs)", "def _unzip(self, data):\r\n with io.BytesIO(data) as buf:\r\n with gzip.GzipFile(fileobj=buf) as unzipped:\r\n return unzipped.read()", "def convert_deconvolution(node, **kwargs):\n name, inputs, attrs = get_inputs(node, kwargs)\n\n kernel_dims = list(parse_helper(attrs, \"kernel\"))\n stride_dims = list(parse_helper(attrs, \"stride\", [1, 1]))\n pad_dims = list(parse_helper(attrs, \"pad\", [0, 0]))\n num_group = int(attrs.get(\"num_group\", 1))\n dilations = list(parse_helper(attrs, \"dilate\", [1, 1]))\n adj_dims = list(parse_helper(attrs, \"adj\", [0, 0]))\n\n pad_dims = pad_dims + pad_dims\n\n deconv_node = onnx.helper.make_node(\n \"ConvTranspose\",\n inputs=inputs,\n outputs=[name],\n kernel_shape=kernel_dims,\n strides=stride_dims,\n dilations=dilations,\n output_padding=adj_dims,\n pads=pad_dims,\n group=num_group,\n name=name\n )\n\n return [deconv_node]", "def DecodeFunc(self, inp_instance):\n\n def _DecodeFn():\n \"\"\"Decode call to be compiled for TPU.\"\"\"\n _, decode_dict = self._model.ConstructDecodeGraph(\n input_batch=inp_instance.TpuDequeueBatch())\n self.decode_nm = py_utils.NestedMap(decode_dict)\n return self.decode_nm.Flatten()\n\n self._compile_op, batch_parallel_res = tpu.split_compile_and_shard(\n _DecodeFn,\n num_shards=self.data_parallelism,\n device_assignment=py_utils.GetTpuDeviceAssignment())\n\n if self.decode_nm:\n decode_tensors = self.decode_nm.Pack(batch_parallel_res)\n else:\n decode_tensors = py_utils.NestedMap()\n if py_utils.IsEagerMode():\n # The CPU pass through data will be from the infeed function.\n cpu_pt = {}\n else:\n cpu_pt = inp_instance.DequeueCpuPassthrough()\n return decode_tensors, cpu_pt", "def uncompress_gzip(location, target_dir):\n\n return uncompress(location, target_dir, decompressor=gzip.GzipFile)", "def _decompress_blkx(self, blkx: BLKXTable, write_path: str):\n file_name = f\"{blkx.ID}: {_slugify(blkx.Name)}\"\n file_path = os.path.join(write_path, file_name)\n\n with open(file_path, \"wb\") as target_fp:\n for blkx_chunk in blkx.BLKXChunkEntry:\n if blkx_chunk.EntryType == \"0x80000005\": # zlib\n self.dmg_fp.seek(blkx_chunk.CompressedOffset)\n target_fp.write(self.dmg_fp.read(blkx_chunk.CompressedLength))\n # TODO: support more methods", "def decompression_inversion():\n dna_seq, bin_seq, comp_seq, file_comp = binary_to_seq()\n \n #bwt reconstruction\n table = [\"\"] * len(dna_seq)\n\n for i in range(0,len(dna_seq),1):\n table = [dna_seq[i] + table[i] for i in range(0,len(dna_seq))]\n table = sorted(table)\n \n original_seq = None \n for row in table : \n if row.endswith(\"$\"):\n original_seq = row\n\n inverse_bwt = original_seq.rstrip(\"$\") \n \n \n #write the original sequence in a new created file \n file_path = os.path.splitext(file_comp)[0]\n file_inv = open(file_path + \"_decompressed_original.txt\", \"w\") \n file_inv.write(inverse_bwt) \n file_inv.close()\n \n messagebox.showinfo(\"Information\", \"Your decompressed and bwt reconstruction has been saved in \" \\\n +file_path +\"_decompressed_original.txt file.\")\n \n return dna_seq, comp_seq, inverse_bwt" ]
[ "0.8031564", "0.7931041", "0.75796515", "0.7512713", "0.71366394", "0.68911856", "0.66436976", "0.6177685", "0.60084254", "0.5998883", "0.5772824", "0.5761908", "0.5596383", "0.55957705", "0.5553985", "0.54953575", "0.5484418", "0.54572964", "0.54572964", "0.5454716", "0.5433453", "0.5411343", "0.5391271", "0.5338415", "0.52969426", "0.5290805", "0.5290805", "0.5260717", "0.5238364", "0.5206854", "0.52055424", "0.5152955", "0.51524246", "0.5152348", "0.51338094", "0.5119792", "0.507447", "0.5073052", "0.50624794", "0.5049761", "0.5048622", "0.5041795", "0.5040703", "0.50144804", "0.4994402", "0.49906734", "0.49865335", "0.49840555", "0.49794394", "0.49772668", "0.49579677", "0.49563178", "0.4936525", "0.48989695", "0.48866335", "0.48812464", "0.48794818", "0.4871146", "0.4869671", "0.48661798", "0.48527154", "0.48429215", "0.48420793", "0.4811409", "0.48034576", "0.48007363", "0.4754709", "0.4753654", "0.4744342", "0.4734678", "0.47295833", "0.47141415", "0.47103375", "0.47006404", "0.47005227", "0.47005227", "0.4695596", "0.46930984", "0.46869516", "0.46656466", "0.4649477", "0.46390828", "0.46366167", "0.46147126", "0.4612166", "0.4611239", "0.46094608", "0.45827815", "0.4577734", "0.45770547", "0.45737222", "0.45600677", "0.45596206", "0.4538918", "0.4534075", "0.4532304", "0.45120922", "0.4501068", "0.44942826", "0.446942" ]
0.71033555
5
Compresses a tensor with the given compression context, and then returns it with the context needed to decompress it.
def encode(self, seq):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compress(self, tensor, *args, **kwargs):\n return self.compressor.compress(tensor)", "def compress(self, tensor, *args, **kwargs):\n return self.compressor.compress(tensor)", "def decompress(self, tensor, ctx, *args, **kwargs):\n return tensor", "def compress(self, tensor, *args, **kwargs):\n return tensor, None", "def compress(self, tensor):", "def compress(self, tensor, *args, **kwargs):\n pass", "def decompress(self, tensor, ctx, *args, **kwargs):\n tensor_decompressed = tensor\n dtype = ctx\n if 'float' in str(dtype):\n tensor_decompressed = tensor.astype(dtype, copy=False)\n return tensor_decompressed", "def decompress(self, tensor, ctx, *args, **kwargs):\n pass", "def decompress(self, tensor, ctx, *args, **kwargs):\n tensor = self.compressor.decompress(tensor, ctx, *args, **kwargs)\n \n # uncompressed gradients need to do nag explicitly\n if not self.inited:\n if size(tensor.shape) < self.threshold:\n self.mom = nd.zeros_like(tensor)\n self.nag = True\n self.inited = True\n\n if self.nag:\n self.mom += tensor\n nd._internal._mul_scalar(self.mom, self.mu, out=self.mom)\n tensor += self.mom\n\n return tensor", "def compress(self, tensor, *args, **kwargs):\n tensor_compressed = tensor\n if 'float' in str(tensor.dtype):\n # Only allow compression from other floating point types\n tensor_compressed = tensor.astype('float16', copy=False)\n return tensor_compressed, tensor.dtype", "def decompress(self, tensor, ctx, *args, **kwargs):\n if \"x\" not in kwargs:\n raise ValueError(\"x is missing\")\n\n x = kwargs[\"x\"].astype(tensor.dtype, copy=False) \n \n if not self.inited:\n self.cache = nd.zeros_like(tensor)\n if size(tensor.shape) >= self.threshold:\n self.mom = nd.zeros_like(tensor)\n self.wdmom = True\n self.inited = True\n \n # weight decay\n nd._internal._mul_scalar(x, self.wd, out=self.cache)\n\n # weight decay momentum\n if self.wdmom:\n self.mom += self.cache\n nd._internal._mul_scalar(self.mom, self.mu, out=self.mom)\n tensor += self.mom\n\n tensor += self.cache\n return self.compressor.decompress(tensor, ctx, *args, **kwargs)", "def __handle_compression(self, x):\n if self.__compress:\n return zlib.compress(x)\n return x", "def decompress(self, tensors):", "def compress(value):\n pickled = pickle_util.dump(value)\n return zlib.compress(pickled)", "def decompress(args):\n # Three integers for tensor shapes + nine encoded strings.\n np_dtypes = [np.integer] * 3 + [np.bytes_] * 9\n with open(args.input_file, \"rb\") as f:\n packed = tfc.PackedTensors(f.read())\n arrays = packed.unpack_from_np_dtypes(np_dtypes)\n\n # Build model and restore optimized parameters.\n model = CompressionModel(args)\n checkpoint = tf.train.Checkpoint(model=model)\n restore_path = tf.train.latest_checkpoint(args.checkpoint_dir)\n checkpoint.restore(restore_path)\n curr_decoded = model.decompress(arrays)\n row=int(args.input_file.split('/')[-1].split('.')[0])\n\n # Write reconstructed images out as PNG files.\n for col in range(np.shape(curr_decoded)[1]):\n img = curr_decoded[0,col,:,:,:]/255\n save_img(args.output_file,0,img,row,col+1)", "def create_compressed_model(\n model: Module,\n config: NNCFConfig,\n compression_state: Optional[Dict[str, Any]] = None,\n dummy_forward_fn: Callable[[Module], Any] = None,\n wrap_inputs_fn: Callable[[Tuple, Dict], Tuple[Tuple, Dict]] = None,\n wrap_outputs_fn: Callable[[Tuple, Dict], Tuple[Tuple, Dict]] = None,\n dump_graphs=True,\n) -> Tuple[CompressionAlgorithmController, NNCFNetwork]:\n if isinstance(model, NNCFNetwork):\n raise RuntimeError(\n \"The model object has already been compressed.\\n\"\n \"NNCF for PyTorch modifies the model object in-place, and repeat calls to \"\n \"`nncf.torch.create_compressed_model` with the same model object passed as argument \"\n \"will lead to an incorrect attempt to compress the model twice.\\n\"\n \"Make sure that the model object you are passing has not already been compressed (for \"\n \"instance, by testing `if isinstance(model, nncf.torch.nncf_network.NNCFNetwork)`).\\n\"\n \"If you are encountering this in a Jupyter notebook context - make sure that when \"\n \"re-running cells involving `nncf.torch.create_compressed_model` the original model object \"\n \"is also re-created (via constructor call).\"\n )\n\n if config.get(\"target_device\") == \"VPU\":\n warning_deprecated(\"VPU device is deprecated and will no longer be supported in the future.\")\n\n set_debug_log_dir(config.get(\"log_dir\", \".\"))\n\n is_legacy_model_state_dict = (\n compression_state is not None\n and BaseController.BUILDER_STATE not in compression_state\n and BaseController.CONTROLLER_STATE not in compression_state\n )\n maybe_convert_legacy_names_in_compress_state(compression_state)\n\n should_init = compression_state is None\n\n nncf_network = create_nncf_network(model, config, dummy_forward_fn, wrap_inputs_fn, wrap_outputs_fn)\n\n if dump_graphs and is_main_process():\n nncf_network.nncf.get_graph().visualize_graph(osp.join(config.get(\"log_dir\", \".\"), \"original_graph.dot\"))\n builder = create_compression_algorithm_builder(config, should_init)\n\n is_state_loadable = not is_legacy_model_state_dict and compression_state is not None\n if is_state_loadable:\n builder.load_state(compression_state[BaseController.BUILDER_STATE])\n compressed_model = builder.apply_to(nncf_network)\n compression_ctrl = builder.build_controller(compressed_model)\n\n if is_state_loadable:\n compression_ctrl.load_state(compression_state[BaseController.CONTROLLER_STATE])\n\n compressed_model.nncf.set_compression_controller(compression_ctrl)\n\n # Required to ensure that the model leaving create_compressed_model has correct compressed graph.\n # In particular, this is currently required for correct functioning of RNNs.\n compressed_model.nncf.rebuild_graph()\n\n try:\n if is_legacy_model_state_dict:\n from nncf.torch import load_state # pylint: disable=cyclic-import\n\n state_dict_to_load = compression_state.get(\"state_dict\", compression_state)\n load_state(compressed_model, state_dict_to_load, is_resume=True)\n finally:\n if dump_graphs and is_main_process():\n compressed_model_graph = compressed_model.nncf.get_graph()\n compressed_model_graph.visualize_graph(osp.join(config.get(\"log_dir\", \".\"), \"compressed_graph.dot\"))\n\n synchronize_all_processes_in_distributed_mode()\n return compression_ctrl, compressed_model", "def gzdeflate():\n return zlib.compress(val)", "def compress(self, *args):\n return _osgAnimation.Vec3Packed_compress(self, *args)", "def compress(condition, a, axis=None, out=None):\n return a.compress(condition, axis, out)", "def Compress(indata, algo, with_header=True):\n if algo == 'none':\n return indata\n fname = GetOutputFilename('%s.comp.tmp' % algo)\n WriteFile(fname, indata)\n if algo == 'lz4':\n data = Run('lz4', '--no-frame-crc', '-c', fname, binary=True)\n # cbfstool uses a very old version of lzma\n elif algo == 'lzma':\n outfname = GetOutputFilename('%s.comp.otmp' % algo)\n Run('lzma_alone', 'e', fname, outfname, '-lc1', '-lp0', '-pb0', '-d8')\n data = ReadFile(outfname)\n elif algo == 'gzip':\n data = Run('gzip', '-c', fname, binary=True)\n else:\n raise ValueError(\"Unknown algorithm '%s'\" % algo)\n if with_header:\n hdr = struct.pack('<I', len(data))\n data = hdr + data\n return data", "def compress(emb):\n if params.sum_word_vecs:\n return np.sum(emb, axis=0)\n if params.max_pool_word_vecs:\n return np.amax(emb, axis=0)\n if params.concat_word_vecs:\n return concat_word_vecs(emb, params.max_transcript_len)\n if params.avg_word_vecs:\n return np.mean(emb, axis=0)", "def encode(self, compress=0):\n raw = bytes(self._encode())\n return gzip.compress(raw, compress) if compress else raw", "def compress(value):\n\n process = Popen([\"xz\", \"--compress\", \"--force\"], stdin=PIPE, stdout=PIPE)\n return process.communicate(value)[0]", "def compress(value):\n\n process = Popen([\"xz\", \"--compress\", \"--force\"], stdin=PIPE, stdout=PIPE)\n return process.communicate(value)[0]", "def apply_compressed_sensing(self, inputs, rng):\n print('using compressed sensing!')\n train_path = os.path.join(\n self.data_dir, 'assist{0}-{1}'.format(self.which_year, 'train'))\n\n if self.which_set == 'test':\n loaded = np.load(train_path + '-compression-matrix.npz')\n self.compress_matrix = loaded['compress_matrix']\n self.compress_dim = self.compress_matrix.shape[1]\n elif self.which_set == 'train':\n self.compress_matrix = self.make_compression_matrix(train_path, rng)\n\n inputs = self.compress_inputs(inputs)\n return inputs", "def _get_compressed(self):\n assert self.compression_type != CompressionType.NONE\n tmp_mset = MessageSet(messages=self._messages)\n uncompressed = bytearray(len(tmp_mset))\n tmp_mset.pack_into(uncompressed, 0)\n if self.compression_type == CompressionType.GZIP:\n compressed = compression.encode_gzip(buffer(uncompressed))\n elif self.compression_type == CompressionType.SNAPPY:\n compressed = compression.encode_snappy(buffer(uncompressed))\n else:\n raise TypeError(\"Unknown compression: %s\" % self.compression_type)\n return Message(compressed, compression_type=self.compression_type)", "def compress_weights(model: torch.nn.Module, use_fake_quantize: bool = False) -> torch.nn.Module:\n compressed_model, _ = replace_modules_by_nncf_modules(model)\n insert_pre_compression_operations(model, use_fake_quantize)\n\n return compressed_model", "def _compress_content(self, content):\n zbuf = io.BytesIO()\n zfile = gzip.GzipFile(mode=\"wb\", compresslevel=9, fileobj=zbuf)\n\n try:\n zfile.write(content.read())\n finally:\n zfile.close()\n\n content.file = zbuf\n content.seek(0)\n\n return content", "def get_composite_image(\n self,\n labels=None,\n compress_dim=300,\n num_channels=3,\n num_of_images=\"all\",\n sample=False,\n reverse=False,\n ):\n compressed_img_dict = {}\n img_data = self.image_data.rgb_dict\n if not labels:\n labels = img_data.keys()\n for label in labels:\n self.log.info(label + \" is being compressed.\")\n total_images = len(img_data[label])\n if num_of_images == \"all\":\n vectors = img_data[label]\n elif type(num_of_images) == int:\n vectors = img_data[label]\n if sample:\n vectors = random.sample(vectors, num_of_images)\n if reverse:\n vectors.reverse()\n vectors = vectors[0:num_of_images]\n\n compressed_img_dict[label] = np.zeros(\n (compress_dim, compress_dim, num_channels)\n )\n compressed_img_dict[label] = np.sum(vectors, axis=0) / (1.0 * len(vectors))\n\n self.compressed_img_dict = compressed_img_dict\n return compressed_img_dict", "def compressBuffer(self, buffer):\r\n # http://jython.xhaus.com/http-compression-in-python-and-jython/\r\n zbuf = cStringIO.StringIO()\r\n zfile = gzip.GzipFile(mode='wb', fileobj=zbuf, compresslevel=9)\r\n zfile.write(buffer)\r\n zfile.close()\r\n return zbuf.getvalue()", "def compressed_image(self):\n return self._compressed_image", "def make_compression_matrix(self, train_path, rng):\n self.compress_dim = 100 # value used in original DKT paper\n if rng:\n compress_matrix = rng.randn(self.encoding_dim, self.compress_dim)\n else:\n compress_matrix = np.random.randn(self.encoding_dim, self.compress_dim)\n\n np.savez(train_path + '-compression-matrix', compress_matrix=compress_matrix)\n return compress_matrix", "def compress_zlib(self, string):\n #encode the input sting\n self.string = string.encode()\n return zlib.compress(self.string)", "def compress(self, src, dst):\n info = readelf_get_info(src)\n starting_size = os.path.getsize(src)\n if starting_size != info[\"size\"]:\n raise RuntimeError(\"size of file '%s' differs from header claim: %i != %i\" %\n (src, starting_size, info[\"size\"]))\n rfd = open(src, \"rb\")\n wfd = open(dst, \"wb\")\n data = rfd.read(starting_size)\n wfd.write(data[info[\"entry\"]:])\n rfd.close()\n wfd.close()\n self.__uncompressed_size = len(data) - info[\"entry\"]\n if is_verbose():\n print(\"Wrote compressable program block '%s': %i bytes\" % (dst, self.__uncompressed_size))\n self.__contexts = []\n self.__weights = []\n (so, se) = run_command([self.__command, dst])\n lines = so.split(\"\\n\")\n for ii in lines:\n terms = ii.split()\n if terms and terms[0].startswith(\"Final\"):\n compressed_size = int(terms[1])\n for jj in terms[2:]:\n individual_term = jj.split(\"*\")\n self.__weights += [int(individual_term[0], 10)]\n self.__contexts += [int(individual_term[1], 16)]\n if is_verbose():\n print(\"Program block compressed into '%s': %i bytes\" % (dst + \".pack\", compressed_size))\n print(\"Compression weights: %s\" % (str(self.__weights)))\n print(\"Compression contexts: %s\" % (str(self.__contexts)))\n rfd = open(dst + \".pack\", \"rb\")\n compressed_contexts = []\n compressed_weights = []\n uncompressed_size = rfd.read(4)\n uncompressed_size = (struct.unpack(\"I\", uncompressed_size))[0]\n if uncompressed_size != self.__uncompressed_size:\n raise RuntimeError(\"size given to packer does not match size information in file: %i != %i\" %\n (self.__uncompressed_size, uncompressed_size))\n context_count = rfd.read(1)\n context_count = (struct.unpack(\"B\", context_count))[0]\n for ii in range(context_count):\n compressed_weights += struct.unpack(\"B\", rfd.read(1))\n for ii in range(context_count):\n compressed_contexts += struct.unpack(\"B\", rfd.read(1))\n if compressed_contexts != self.__contexts:\n raise RuntimeError(\"contexts reported by packer do not match context information in file: %s != %s\" %\n (str(self.__contexts), str(compressed_contexts)))\n if compressed_weights != self.__weights:\n raise RuntimeError(\"weights reported by packer do not match weight information in file: %s != %s\" %\n (str(self.__weights), str(compressed_weights)))\n read_data = rfd.read()\n rfd.close()\n if len(read_data) != compressed_size:\n raise RuntimeError(\"size reported by packer does not match length of file: %i != %i\" %\n (compressed_size, len(read_data)))\n self.__data = []\n for ii in read_data:\n self.__data += struct.unpack(\"B\", ii)", "def compress_file(compression, pretty, src, dst):\n str_tail = \"sed 1d\"\n str_cleanup = \";exit\"\n if pretty:\n str_tail = \"tail -n+2\"\n str_cleanup = \";rm ~;exit\"\n if \"lzma\" == compression:\n command = [\"xz\", \"--format=lzma\", \"--lzma1=preset=9e,lc=1,lp=0,pb=0\", \"--stdout\"]\n header = \"HOME=/tmp/i;%s $0|lzcat>~;chmod +x ~;~%s\" % (str_tail, str_cleanup)\n elif \"raw\" == compression:\n command = [\"xz\", \"-9\", \"--extreme\", \"--format=raw\", \"--stdout\"]\n header = \"HOME=/tmp/i;%s $0|xzcat -F raw>~;chmod +x ~;~%s\" % (str_tail, str_cleanup)\n elif \"xz\" == compression:\n command = [\"xz\", \"--format=xz\", \"--lzma2=preset=9e,lc=1,pb=0\", \"--stdout\"]\n header = \"HOME=/tmp/i;%s $0|xzcat>~;chmod +x ~;~%s\" % (str_tail, str_cleanup)\n else:\n raise RuntimeError(\"unknown compression format '%s'\" % compression)\n (compressed, se) = run_command(command + [src], False)\n wfd = open(dst, \"wb\")\n wfd.write((header + \"\\n\").encode())\n wfd.write(compressed)\n wfd.close()\n make_executable(dst)\n print(\"Wrote '%s': %i bytes\" % (dst, os.path.getsize(dst)))", "def compress_to_tgz(in_path, tgz_fp):\n t = tarfile.open(name = tgz_fp, mode = 'w:gz')\n t.add(in_path, path.basename(in_path))\n t.close()", "def compressible(f):\n @wraps(f)\n def compressor(*args, **kwargs):\n @flask.after_this_request\n def compress(response):\n if (response.status_code < 200 or\n response.status_code >= 300 or\n 'Content-Encoding' in response.headers):\n # Don't encode anything other than a 2xx response\n # code. Don't encode a response that's\n # already been encoded.\n return response\n\n accept_encoding = flask.request.headers.get('Accept-Encoding', '')\n if not 'gzip' in accept_encoding.lower():\n return response\n\n # At this point we know we're going to be changing the\n # outgoing response.\n\n # TODO: I understand what direct_passthrough does, but am\n # not sure what it has to do with this, and commenting it\n # out doesn't change the results or cause tests to\n # fail. This is pure copy-and-paste magic.\n response.direct_passthrough = False\n\n buffer = BytesIO()\n gzipped = gzip.GzipFile(mode='wb', fileobj=buffer)\n gzipped.write(response.data)\n gzipped.close()\n response.data = buffer.getvalue()\n\n response.headers['Content-Encoding'] = 'gzip'\n response.vary.add('Accept-Encoding')\n response.headers['Content-Length'] = len(response.data)\n\n return response\n\n return f(*args, **kwargs)\n return compressor", "def compress(path, path_out, terms, iterations, annotate, silent):\n if terms is None:\n terms = DEFAULT_TERMS\n\n if not silent:\n print(f\"Compressing image...\")\n\n result = compress_image_to_file(path=path, terms=terms,\n iterations=iterations,\n path_out=path_out,\n annotate=annotate)\n\n output_path = result['output_path']\n\n if not silent:\n print(f\"Compressed to:\\n{output_path}\")\n print(f\"Terms in singular value expansion: {terms}\")\n print(f\"Power method iterations: {result['iterations']}\")\n print(f\"Compression ratio: {result['compression_ratio']}\")\n\n return result", "def non_local_block(tensor, intermediate_dim=None, compression=2,\r\n mode='embedded', add_residual=True):\r\n ip_shape = tensor.shape\r\n\r\n if mode not in ['gaussian', 'embedded', 'dot', 'concatenate']:\r\n raise ValueError('`mode` must be one of `gaussian`, `embedded`, `dot` or `concatenate`')\r\n\r\n if compression is None:\r\n compression = 1\r\n\r\n dim1, dim2, dim3 = None, None, None\r\n\r\n if len(ip_shape) == 4: # spatial / image data\r\n batchsize, dim1, dim2, channels = ip_shape\r\n else:\r\n raise ValueError('Input dimension has to be either 3 (temporal), 4 (spatial) or 5 (spatio-temporal)')\r\n\r\n # verify correct intermediate dimension specified\r\n if intermediate_dim is None:\r\n intermediate_dim = channels // 2\r\n\r\n if intermediate_dim < 1:\r\n intermediate_dim = 1\r\n\r\n else:\r\n intermediate_dim = int(intermediate_dim)\r\n\r\n if intermediate_dim < 1:\r\n raise ValueError('`intermediate_dim` must be either `None` or positive integer greater than 1.')\r\n theta = Conv2D(filters=intermediate_dim,kernel_size=(1,1),strides=(1,1),padding='same',use_bias=False,kernel_initializer='he_normal')(tensor)\r\n theta = K.reshape(theta,[-1, intermediate_dim])\r\n\r\n # phi path\r\n phi = Conv2D(filters=intermediate_dim,kernel_size=(1,1),strides=(1,1),padding='same',use_bias=False,kernel_initializer='he_normal')(tensor)\r\n phi = K.reshape(phi,[-1, intermediate_dim])\r\n\r\n if compression > 1:\r\n # shielded computation\r\n phi = MaxPooling1D(compression)(phi)\r\n\r\n f = K.dot(theta, phi)\r\n f = Activation('softmax')(f)\r\n\r\n # g path\r\n g = Conv2D(filters=intermediate_dim, kernel_size=(1, 1), strides=(\r\n 1, 1), padding='same', use_bias=False, kernel_initializer='he_normal')(tensor)\r\n g = K.reshape(g, [-1, intermediate_dim])\r\n\r\n if compression > 1 and mode == 'embedded':\r\n # shielded computation\r\n g = MaxPooling1D(compression)(g)\r\n\r\n # compute output path\r\n y = K.dot(f, g)\r\n\r\n # reshape to input tensor format\r\n y = K.reshape(y,[intermediate_dim, dim1, dim2])\r\n\r\n # project filters\r\n y = Conv2D(filters=intermediate_dim,kernel_size=(1,1),strides=(1,1),padding='same',use_bias=False,kernel_initializer='he_normal')(y)\r\n\r\n # residual connection\r\n if add_residual:\r\n y = add([tensor, y])\r\n\r\n return y", "def compress(self, *args):\n return _osgAnimation.Vec3ArrayPacked_compress(self, *args)", "def __handle_decompression(self, x):\n if self.__compress:\n return zlib.decompress(x)\n return x", "def compress_encode(value):\n return base64.b64encode(zlib.compress(value.encode(\"ascii\"))).decode(\"ascii\")", "def gzip_compress(data):\n s = BytesIO()\n g = gzip.GzipFile(fileobj=s, mode='wb')\n g.write(data)\n g.close()\n return s.getvalue()", "def image_compress(img):\n img_mode = img.mode\n img_size = img.size\n img = img.tobytes()\n zlib.compress(img)\n\n return img_mode, img_size, img", "def get_compress_and_decompress_func(compression_algorithm, compression_level=9):\n # type: (str, int) -> Tuple[Callable, Callable]\n if compression_algorithm in [\"deflate\", \"zlib\"]:\n import zlib\n\n if sys.version_info < (3, 6, 0):\n # Work around for Python <= 3.6 where compress is not a keyword argument, but a regular\n # argument\n @functools.wraps(zlib.compress)\n def compress_func(data):\n return zlib.compress(data, compression_level)\n\n else:\n compress_func = functools.partial(zlib.compress, level=compression_level) # type: ignore\n decompress_func = zlib.decompress # type: ignore\n elif compression_algorithm == \"bz2\":\n import bz2\n\n @functools.wraps(bz2.compress)\n def compress_func(data):\n return bz2.compress(data, compression_level)\n\n decompress_func = bz2.decompress # type: ignore\n elif compression_algorithm == \"zstandard\":\n import zstandard\n\n compressor = zstandard.ZstdCompressor(level=compression_level)\n decompressor = zstandard.ZstdDecompressor()\n compress_func = compressor.compress # type: ignore\n decompress_func = decompressor.decompress # type: ignore\n elif compression_algorithm == \"lz4\":\n import lz4.frame as lz4 # pylint: disable=no-name-in-module\n\n # NOTE: Java implementation which we currently use on the server side doesn't support\n # dependent block stream.\n # See https://github.com/Parsely/pykafka/issues/914 for details\n def compress_func(data):\n try:\n # For lz4 >= 0.12.0\n return lz4.compress(data, compression_level, block_linked=False)\n except TypeError:\n # For older versions\n # For earlier versions of lz4\n return lz4.compress(data, compression_level, block_mode=1)\n\n decompress_func = lz4.decompress # type: ignore\n elif compression_algorithm == \"snappy\":\n import snappy # pylint: disable=import-error\n\n compress_func = snappy.compress # type: ignore\n decompress_func = snappy.decompress # type: ignore\n elif compression_algorithm == \"brotli\":\n import brotli # pylint: disable=import-error\n\n compress_func = functools.partial(brotli.compress, quality=compression_level) # type: ignore\n decompress_func = brotli.decompress # type: ignore\n elif compression_algorithm == \"none\":\n compress_func = noop_compress\n decompress_func = noop_decompress # type: ignore\n else:\n raise ValueError(\"Unsupported algorithm: %s\" % (compression_algorithm))\n\n return compress_func, decompress_func", "def compress():\n local('python manage.py compress \\\n --settings={{ project_name }}.settings.production')", "def include_compression(nets, compression='none', linear_max=796.87416837456942, input_node_name='cochleagram_no_compression', output_node_name='cochleagram', linear_params=None, rate_level_kwargs={}, custom_compression_op=None):\n # compression of the cochleagram\n if compression=='quarter':\n nets[output_node_name] = tf.sqrt(tf.sqrt(nets[input_node_name], name=output_node_name))\n elif compression=='quarter_plus':\n nets[output_node_name] = tf.sqrt(tf.sqrt(nets[input_node_name]+1e-01, name=output_node_name))\n elif compression=='point3':\n nets[output_node_name] = tf.pow(nets[input_node_name],0.3, name=output_node_name)\n elif compression=='stable_point3':\n nets[output_node_name] = tf.identity(stable_power_compression(nets[input_node_name]*linear_max),name=output_node_name) \n elif compression=='stable_point3_norm_grads':\n nets[output_node_name] = tf.identity(stable_power_compression_norm_grad(nets[input_node_name]*linear_max),name=output_node_name) \n elif compression=='linearbelow1':\n nets[output_node_name] = tf.where((nets[input_node_name]*linear_max)<1, nets[input_node_name]*linear_max, tf.pow(nets[input_node_name]*linear_max,0.3), name=output_node_name)\n elif compression=='stable_linearbelow1':\n nets['stable_power_compressed_%s'%output_node_name] = tf.identity(stable_power_compression(nets[input_node_name]*linear_max),name='stable_power_compressed_%s'%output_node_name)\n nets[output_node_name] = tf.where((nets[input_node_name]*linear_max)<1, nets[input_node_name]*linear_max, nets['stable_power_compressed_%s'%output_node_name], name=output_node_name)\n elif compression=='linearbelow1sqrt':\n nets[output_node_name] = tf.where((nets[input_node_name]*linear_max)<1, nets[input_node_name]*linear_max, tf.sqrt(nets[input_node_name]*linear_max), name=output_node_name)\n elif compression=='quarter_clipped':\n nets[output_node_name] = tf.sqrt(tf.sqrt(tf.maximum(nets[input_node_name],1e-01), name=output_node_name))\n elif compression=='none':\n nets[output_node_name] = nets[input_node_name]\n elif compression=='sqrt':\n nets[output_node_name] = tf.sqrt(nets[input_node_name], name=output_node_name)\n elif compression=='dB': # NOTE: this compression does not work well for the backwards pass, results in nans\n nets[output_node_name + '_noclipped'] = 20 * tflog10(nets[input_node_name])/tf.reduce_max(nets[input_node_name])\n nets[output_node_name] = tf.maximum(nets[output_node_name + '_noclipped'], -60)\n elif compression=='dB_plus': # NOTE: this compression does not work well for the backwards pass, results in nans\n nets[output_node_name + '_noclipped'] = 20 * tflog10(nets[input_node_name]+1)/tf.reduce_max(nets[input_node_name]+1)\n nets[output_node_name] = tf.maximum(nets[output_node_name + '_noclipped'], -60, name=output_node_name)\n elif compression=='linear':\n assert (type(linear_params)==list) and len(linear_params)==2, \"Specifying linear compression but not specifying the compression parameters in linear_params=[m, b]\"\n nets[output_node_name] = linear_params[0]*nets[input_node_name] + linear_params[1]\n elif compression=='rate_level':\n nets[output_node_name] = AN_rate_level_function(nets[input_node_name], name=output_node_name, **rate_level_kwargs)\n elif compression=='custom':\n nets[output_node_name] = custom_compression_op(nets[input_node_name], name=output_node_name)\n\n return nets", "def compress(block):\n\n # Transform RGB to YCbCr\n yc_bl = np.zeros((8, 8, 3), dtype=np.int8)\n \n for i in range(8):\n for j in range(8):\n rgb_cmp = np.asmatrix(block[i][j])\n y,cb,cr = (np.array((rgb_cmp*yc_mat+yc_pad).astype(np.uint8))[0]-128).astype(np.int8)\n yc_bl[i][j] = np.array([y, cb, cr])\n \n # Switch YCbCr block to 3 block for each Y, Cb, Cr component and calculate DCT for them\n y_dct = sf.dct(yc_bl[:,:,0], norm='ortho')\n cb_dct = sf.dct(yc_bl[:,:,1], norm='ortho')\n cr_dct = sf.dct(yc_bl[:,:,2], norm='ortho')\n \n # From DCT data to quantization data\n y_quant = np.round(y_dct / quant_tbl).astype(np.int8)\n cb_quant = np.round(cb_dct / quant_tbl).astype(np.int8)\n cr_quant = np.round(cr_dct / quant_tbl)).astype(np.int8)\n \n # Convert 8x8 block to zigzag 1x64 block\n y_zz = zig_zag(y_quant)\n cb_zz = zig_zag(cb_quant)\n cr_zz = zig_zag(cr_quant)\n \n # Calc DC and AC, put together to list\n y_cmp, cb_cmp, cr_cmp = dc_and_ac_calc(y_zz, cb_zz, cr_zz)\n \n # Encode using entropy coding\n y_encode = encode(y_cmp)\n cb_encode = encode(cb_cmp)\n cr_encode = encode(cr_cmp)\n \n return [y_encode, cb_encode, cr_encode]", "def optimize(data):\n try:\n optimized_data = tinify.from_buffer(data).to_buffer()\n return optimized_data\n except tinify.AccountError as e:\n # This exception may rise, since a Free account is being used (only 500 requests/month)\n logger.error(\"There is a problem with the TinyPNG Account: {0}\".format(e))\n except tinify.ServerError as e:\n logger.error(\"There seem to be problems in the compression server: {0}\".format(e))\n except Exception as e:\n logger.error(\"The image could not be compressed: {0}\".format(e))\n finally:\n return data", "def Compress(input_filename, output_filename):\n _Write(zlib.compress(_Read(input_filename)), output_filename)", "def compress_wrapper(args: Namespace) -> None:\n directory_path = os.path.join(DATASETS_DIR, args.directory)\n compress_datasets(directory_path, args.holdout)", "def tar_gz_compress(self, destination):\n\n if destination is not None and isinstance(destination, str):\n with tarfile_open(destination, \"w:gz\") as tar:\n tar.add(self.file)", "def _compress_string(content):\n zbuf = StringIO()\n zfile = GzipFile(mode='wb', compresslevel=6, fileobj=zbuf)\n zfile.write(content)\n zfile.close()\n return zbuf.getvalue()", "def compress_file(path):\n\n process = Popen([\"xz\", \"--compress\", \"--force\", \"--stdout\", path], stdout=PIPE)\n return process.communicate()[0]", "def compress_file(path):\n\n process = Popen([\"xz\", \"--compress\", \"--force\", \"--stdout\", path], stdout=PIPE)\n return process.communicate()[0]", "def _compress_snapshots(self):\n\n\t\tC_shape = (self._snapshots.shape[1], self._snapshots.shape[0])\n\n\t\tif self.compression_matrix is 'uniform':\n\t\t\tC = np.random.uniform(0, 1, size=(C_shape))\n\t\telif self.compression_matrix is 'sparse':\n\t\t\tC = scipy.sparse.random(*C_shape, density=1.)\n\t\telif self.compression_matrix is 'normal':\n\t\t\tC = np.random.normal(0, 1, size=(C_shape))\n\t\telif self.compression_matrix is 'sample':\n\t\t\tC = np.zeros(C_shape)\n\t\t\tC[np.arange(self._snapshots.shape[1]),\n\t\t\t np.random.choice(*self._snapshots.shape, replace=False)] = 1.\n\t\telse:\n\t\t\tC = self.compression_matrix\n\n\t\t# compress the matrix\n\t\tY = C.dot(self._snapshots)\n\n\t\treturn Y", "def oh_compress(key, seed, blocks, secondary):\n for block, block_size in blocks:\n size_tag = block_size % (CHUNK_SIZE * BLOCK_SIZE)\n tag = (seed ^ size_tag) * W\n yield oh_compress_one_block(key, block, tag, secondary)", "def compress(value):\n\t# type: (Any, ) -> Any\n\n\t# sets are not processed because they cannot contain lists or bytearrays anyway.\n\n\tif isinstance(value, (tuple, list)): # tuple *can* contain mutables\n\t\treturn tuple(compress(x) for x in value)\n\telif isinstance(value, bytearray):\n\t\treturn bytes(value) # bytearray can only be bytes or List[int] right?\n\telif isinstance(value, dict):\n\t\treturn {k: compress(v) for k, v in value.items()}\n\telse:\n\t\treturn value", "def convt_block(layer, concat, fsize, name):\n with tf.variable_scope(name):\n\n layer = tf.layers.conv2d_transpose(layer, filters=fsize, kernel_size=2, strides=2, \n kernel_regularizer=l2_reg(1e-1), name='convt')\n layer = tf.concat([layer, concat], axis=-1, name='concat')\n\n return layer", "def compress(content, threshold=512):\n compression_enabled = CONF.logging.http_request_compression\n\n if is_dict(content):\n for key in content:\n content[key] = compress(content[key])\n if is_string(content) and compression_enabled:\n if len(content) > threshold:\n less_data = content[:50]\n compressed_data = base64.b64encode(\n zlib.compress(bytes(content.encode(\"utf-8\"))))\n if not six.PY2:\n compressed_data = str(compressed_data.decode(\"utf-8\"))\n return pprint.pformat(\n \"\\n***Content compressed by Syntribos.***\"\n \"\\nFirst fifty characters of content:\\n\"\n \"***{data}***\"\n \"\\nBase64 encoded compressed content:\\n\"\n \"{compressed}\"\n \"\\n***End of compressed content.***\\n\".format(\n data=less_data, compressed=compressed_data))\n return content", "def compress(self, x,f1,f2,f3,f4,f5,f6,f7,f8,outputfile,path,row):\n mse, bpp, x_hat, pack = self._run(\"compress\", x=x,feature1=f1,feature2=f2,feature3=f3,feature4=f4,\n feature5=f5,feature6=f6,feature7=f7,feature8=f8)\n\n # Write a binary file with the shape information and the compressed string.\n packed = tfc.PackedTensors()\n tensors, arrays = zip(*pack)\n packed.pack(tensors, arrays)\n with open(outputfile, \"wb\") as f:\n f.write(packed.string)\n\n x *= 255 # x_hat is already in the [0..255] range\n psnr = tf.squeeze(tf.image.psnr(x_hat, x, 255))\n msssim = tf.squeeze(tf.image.ssim_multiscale(x_hat, x, 255))\n\n # The actual bits per pixel including overhead.\n x_shape = tf.shape(x)\n num_pixels = tf.cast(tf.reduce_prod(x_shape[:-1]), dtype=tf.float32)\n packed_bpp = len(packed.string) * 8 / num_pixels\n \n for col in range(np.shape(x_hat)[1]):\n img = x_hat[0,col,:,:,:]/255 \n save_img(path,0,img,row,col+1)\n return x_hat, psnr, msssim, packed_bpp", "def compression_source(self) -> CompressionSource:\n return self._compression_source", "def compress(uncompressed):\r\n \r\n # Build the dictionary.\r\n dict_size = 256\r\n dictionary = dict((chr(i), i) for i in range(dict_size))\r\n # in Python 3: dictionary = {chr(i): i for i in range(dict_size)}\r\n \r\n w = \"\"\r\n result = []\r\n for c in uncompressed:\r\n wc = w + c\r\n if wc in dictionary:\r\n w = wc\r\n else:\r\n result.append(dictionary[w])\r\n # Add wc to the dictionary.\r\n dictionary[wc] = dict_size\r\n dict_size += 1\r\n w = c\r\n \r\n # Output the code for w.\r\n if w:\r\n result.append(dictionary[w])\r\n return result", "def decompress(compressed):\r\n \r\n \r\n # Build the dictionary.\r\n dict_size = 256\r\n dictionary = dict((i, chr(i)) for i in range(dict_size))\r\n # in Python 3: dictionary = {i: chr(i) for i in range(dict_size)}\r\n \r\n # use StringIO, otherwise this becomes O(N^2)\r\n # due to string concatenation in a loop\r\n result = StringIO()\r\n w = chr(compressed.pop(0))\r\n result.write(w)\r\n for k in compressed:\r\n if k in dictionary:\r\n entry = dictionary[k]\r\n elif k == dict_size:\r\n entry = w + w[0]\r\n else:\r\n raise ValueError('Bad compressed k: %s' % k)\r\n result.write(entry)\r\n \r\n # Add w+entry[0] to the dictionary.\r\n dictionary[dict_size] = w + entry[0]\r\n dict_size += 1\r\n \r\n w = entry\r\n return result.getvalue()", "def pack_tensor(tensor, bit):\r\n tmp_tensor_shape = list(tensor.shape)\r\n tmp_tensor_shape.append(1)\r\n tmp_tensor = np.zeros(tuple(tmp_tensor_shape), dtype=np.uint8)\r\n tmp_tensor[..., 0] = tensor\r\n\r\n binary_tensor = np.unpackbits(tmp_tensor, axis=-1)\r\n binary_tensor_shape = tmp_tensor_shape\r\n binary_tensor_shape[-1] = 8\r\n\r\n i = find_optimal_compress_dim(tensor.shape, bit)\r\n packed_dim = int(tensor.shape[i] * bit / 8) + (tensor.shape[i] * bit % 8 > 0)\r\n binary_packed_tensor_shape = binary_tensor_shape\r\n binary_packed_tensor_shape[i] = packed_dim\r\n\r\n packed_tensor = np.zeros(tuple(binary_packed_tensor_shape), dtype=np.uint8)\r\n padded_binary_tensor = binary_tensor[..., 8-bit: 8].reshape((-1))\r\n padding_width = packed_tensor.size - padded_binary_tensor.size\r\n padded_binary_tensor = np.pad(padded_binary_tensor, (0, padding_width), 'constant', constant_values=(0, 0))\r\n binary_packed_tensor = padded_binary_tensor.reshape(tuple(binary_packed_tensor_shape))\r\n\r\n packed_tensor = np.packbits(binary_packed_tensor, axis=-1)\r\n packed_tensor = packed_tensor[..., 0]\r\n\r\n return packed_tensor", "def swish_(t: Tensor) -> Tensor:\n ctx = get_current_context()\n g = ctx.graph\n pb_g = g._pb_graph\n\n check_in_graph(g, t=t)\n\n settings = ctx._get_op_settings(\"swish_inplace\")\n op = pb_g.createConnectedOp_SwishInplaceOp(\n {0: t.id}, \n {\n 0: g._create_tensor_id(\"swish_inplace_out\")\n }, \n settings\n )\n\n return Tensor._from_pb_tensor(op.outTensor(0))", "def gzinflate(val):\n return zlib.decompress(val)", "def compress(dbconfig, target_name):\n fmt = dbconfig.get(\"format\", None)\n if fmt in [\"tarball\", \".tar.gz\", \"tar.gz\"]:\n info(\"zipping and compressing \" + target_name)\n output_name = target_name + \".tar.gz\"\n cmd = [\"tar\", \"zcvf\", output_name, target_name]\n subprocess.call(cmd)\n info(\"removing \" + target_name)\n cmd = [\"rm\", \"-r\", target_name]\n subprocess.call(cmd)\n elif fmt in [\".gz\", \"gz\", \"compress\", \"compressed\", \"gzip\", \"gzipped\"]:\n info(\"compressing \" + target_name)\n cmd = [\"gzip\", \"-r\", \"-q\", target_name]\n output_name = target_name + \".gz\"\n subprocess.call(cmd)\n else:\n error(\"invalid \\\"compress\\\" setting, should be tarball or compress, \" + target_name)\n output_name = \"\"\n return output_name", "def compress(self, diameter=None, length=None, area=None, x=None, y=None, z=None, origin=0):\r\n self._origin = origin\r\n n = len(self.x)\r\n # Update values of vectors\r\n diameter[:n] = self.diameter\r\n length[:n] = self.length\r\n area[:n] = self.area\r\n x[:n] = self.x\r\n y[:n] = self.y\r\n z[:n] = self.z\r\n # Attributes are now views on these vectors\r\n self.diameter = diameter[:n]\r\n self.length = length[:n]\r\n self.area = area[:n]\r\n self.x = x[:n]\r\n self.y = y[:n]\r\n self.z = z[:n]\r\n for kid in self.children:\r\n kid.compress(diameter=diameter[n:], length=length[n:], area=area[n:], x=x[n:], y=y[n:], z=z[n:], origin=n)\r\n n += len(kid)\r\n self.iscompressed = True", "def compression(s):", "def compression_origin(self) -> CompressionOrigin:\n return self._compression_origin", "def _gzipencode(content):\n import gzip\n out = BytesIO()\n f = gzip.GzipFile(fileobj=out, mode='w', compresslevel=5)\n f.write(content)\n f.close()\n return out.getvalue()", "def compress(self, P):\n\t\traise Exception(NotImplemented)", "def compression(self) -> str:\n ...", "def compress_and_encrypt(files, password=None, pgp_key=''):\n if pgp_key:\n zipfile = _get_compressed_file(files)\n return _get_encrypted_file(zipfile, pgp_key)\n else:\n return _get_compressed_file(files, password)", "def vcf_compress(fn):\n ret = cmd_exe(f\"vcf-sort {fn} | bgzip > {fn}.gz && tabix {fn}.gz\")", "def compress(in_file, out_file):\n with open(in_file, \"rb\") as f1:\n text = f1.read()\n freq = make_freq_dict(text)\n tree = huffman_tree(freq)\n codes = get_codes(tree)\n number_nodes(tree)\n print(\"Bits per symbol:\", avg_length(tree, freq))\n result = (num_nodes_to_bytes(tree) + tree_to_bytes(tree) +\n size_to_bytes(len(text)))\n result += generate_compressed(text, codes)\n with open(out_file, \"wb\") as f2:\n f2.write(result)", "def context(tensor):\n raise NotImplementedError", "def compress_inputs(self, inputs):\n num_students = inputs.shape[0]\n inputs = inputs.toarray()\n inputs = np.dot(inputs.reshape(-1, self.encoding_dim), self.compress_matrix)\n self.encoding_dim = self.compress_dim\n\n return sp.csr_matrix(inputs.reshape(num_students, -1))", "def compress(tlv):\n if not type(tlv) == CCNxTlv:\n raise TypeError(\"tlv must be CCNxTlv\")\n\n encoded = None\n if tlv.length < _length_3_4:\n vle = CCNxCompressorVariableLength.__find_pattern(tlv, _pattern_3_4)\n if vle is not None:\n encoded = CCNxCompressorVariableLength.__compress_pattern_3_4(tlv, vle)\n\n if encoded is None and tlv.length < _length_4_9:\n vle = CCNxCompressorVariableLength.__find_pattern(tlv, _pattern_4_9)\n if vle is not None:\n encoded = CCNxCompressorVariableLength.__compress_pattern_4_9(tlv, vle)\n\n return encoded", "def _compress_file(filename: str, basename: str):\n write_mode = _get_write_mode(filename)\n\n with tempfile.TemporaryDirectory() as tmpdir:\n shutil.move(filename, os.path.join(tmpdir, basename))\n with tarfile.open(filename, write_mode) as tarball:\n tarball.add(tmpdir, arcname='')", "def convert_to_jpg_then_compress(self):\n\t\tself._compressed_file_name = 'c_' + self.file_name\n\t\tself._compressed_save_path = self.full_path.replace(self.file_name, self._compressed_file_name).replace('.png', '.jpg')\n\n\t\timage = Image.open(self.full_path)\n\t\timage.save(self._compressed_save_path)\n\n\t\timage = Image.open(self._compressed_save_path)\n\t\timage.save(self._compressed_save_path, quality=85, progressive=False)\n\n\t\tself._compressed_file_size = ufo.get_file_size_in_bytes(self._compressed_save_path)\n\n\t\ttransfer_path = self._compressed_save_path.replace('c_' + self.file_name, self.file_name).replace('/configuration_files/', '/quasar_site_django/')\n\t\tufo.copy_file_to_path(self._compressed_save_path, transfer_path)", "def encode(self, compressed, hash160=False):\n # calculate the bytes\n if compressed:\n prefix = b'\\x02' if self.y % 2 == 0 else b'\\x03'\n pkb = prefix + self.x.to_bytes(32, 'big')\n else:\n pkb = b'\\x04' + self.x.to_bytes(32, 'big') + self.y.to_bytes(32, 'big')\n # hash if desired\n return ripemd160(sha256(pkb)) if hash160 else pkb", "def _compress_mask(self, mask: Tensor) -> Tensor:\n if self.dim is None or len(mask.size()) == 1:\n mask = mask.clone()\n else:\n mask_dim = list(range(len(mask.size())))\n for dim in self.dim:\n mask_dim.remove(dim)\n mask = torch.sum(mask, dim=mask_dim)\n\n if self.block_sparse_size is not None:\n # operation like pooling\n lower_case_letters = 'abcdefghijklmnopqrstuvwxyz'\n ein_expression = ''\n for i, step in enumerate(self.block_sparse_size):\n mask = mask.unfold(i, step, step)\n ein_expression += lower_case_letters[i]\n ein_expression = '...{},{}'.format(ein_expression, ein_expression)\n mask = torch.einsum(ein_expression, mask, torch.ones(self.block_sparse_size).to(mask.device))\n\n return (mask != 0).type_as(mask)", "def compress_image(image_path, out_path, key):\n tinify.key = key\n try:\n source = tinify.from_file(image_path)\n source.to_file(out_path)\n return out_path\n except:\n traceback.print_exc()\n return False", "def get_datapoints_compressed(self, rid, t0, t1, nmax = 300):\n dp = self.get_datapoints(rid, t0, t1, nmax)\n return zlib.compress(pickle.dumps(dp))", "def save_compressed(data, filename, compression_type='bz2', create_link=False):\n # write to compressed HDF5 file\n hdf5 = open_compressed(filename, 'w')\n save(data, hdf5)\n close_compressed(filename, hdf5, compression_type, create_link)", "def compress_G1(pt: G1Uncompressed) -> G1Compressed:\n if is_inf(pt):\n # Set c_flag = 1 and b_flag = 1. leave a_flag = x = 0\n return G1Compressed(POW_2_383 + POW_2_382)\n else:\n x, y = normalize(pt)\n # Record y's leftmost bit to the a_flag\n a_flag = (y.n * 2) // q\n # Set c_flag = 1 and b_flag = 0\n return G1Compressed(x.n + a_flag * POW_2_381 + POW_2_383)", "def _gzip_reader_fn():\n return tf.TFRecordReader(\n options=tf.python_io.TFRecordOptions(\n compression_type=tf.python_io.TFRecordCompressionType.GZIP))", "def encode(self, x: Tensor) ->Tensor:\n return self.encoder(x)[0]", "def two_tier_embedding_compression(embeddings, bits, quantizer=None):\n assert bits <= 8\n n = 2**bits\n quantized_embeddings = embeddings.copy()\n index_table = np.zeros(embeddings.shape, dtype=np.uint8)\n cluster_index_table = np.zeros(index_table.shape[0], dtype=np.uint8)\n codebook_table = np.zeros((n, n))\n\n km1 = KMeans(n)\n km1.fit(embeddings)\n tier1 = km1.predict(embeddings)\n\n km_models = [0] * n\n block_sizes = [0] * n\n for block_label in tqdm(range(n)):\n mask = block_label == tier1\n indices = np.arange(embeddings.shape[0])[mask]\n block = embeddings[mask]\n km2 = KMeans(n)\n km2.fit(block.flatten().reshape(-1, 1))\n if quantizer:\n km2.cluster_centers_ = quantizer(km2.cluster_centers_).numpy()\n km2.cluster_centers_.sort(axis=0)\n\n km_models[block_label] = km2\n codebook_table[block_label, :] = km2.cluster_centers_.flatten()\n cluster_index_table[indices] = block_label\n block_sizes[block_label] = block.shape[0]\n for i in indices:\n preds = km2.predict(embeddings[i, :].reshape(-1, 1))\n index_table[indices, :] = preds\n quantized_embeddings[i, :] = km2.cluster_centers_[preds].flatten()\n print('block_sizes:', block_sizes)\n return index_table, cluster_index_table, codebook_table, quantized_embeddings", "def gz_tar(full_prefix):\n tarfile = os.path.join(outputdir, full_prefix + '.tar')\n try:\n with open(tarfile, 'rb') as f_in, gzip.open(tarfile + '.gz', 'wb') as f_out:\n shutil.copyfileobj(f_in, f_out)\n os.remove(tarfile)\n except Exception as e:\n log.error(\"Tarfile {0} was not generated. Module(s) run collected no info?\".format(tarfile))\n log.error(e)\n\n return tarfile + '.gz'", "def _compress(protected, unprotected, ciphertext):\n\n if protected:\n raise RuntimeError(\"Protection produced a message that has uncompressable fields.\")\n\n piv = unprotected.pop(COSE_PIV, b\"\")\n if len(piv) > COMPRESSION_BITS_N:\n raise ValueError(\"Can't encode overly long partial IV\")\n\n firstbyte = len(piv)\n if COSE_KID in unprotected:\n firstbyte |= COMPRESSION_BIT_K\n kid_data = unprotected.pop(COSE_KID)\n else:\n kid_data = b\"\"\n\n if COSE_KID_CONTEXT in unprotected:\n firstbyte |= COMPRESSION_BIT_H\n kid_context = unprotected.pop(COSE_KID_CONTEXT)\n s = len(kid_context)\n if s > 255:\n raise ValueError(\"KID Context too long\")\n s_kid_context = bytes((s,)) + kid_context\n else:\n s_kid_context = b\"\"\n\n if COSE_COUNTERSIGNATURE0 in unprotected:\n firstbyte |= COMPRESSION_BIT_G\n\n # In theory at least. In practice, that's an empty value to later\n # be squished in when the compressed option value is available for\n # signing.\n ciphertext += unprotected.pop(COSE_COUNTERSIGNATURE0)\n\n if unprotected:\n raise RuntimeError(\"Protection produced a message that has uncompressable fields.\")\n\n if firstbyte:\n option = bytes([firstbyte]) + piv + s_kid_context + kid_data\n else:\n option = b\"\"\n\n return (option, ciphertext)", "def compress(self, data):\r\n return self.add_chunk(data)", "def uncompress(self, *args):\n return _osgAnimation.Vec3Packed_uncompress(self, *args)", "def compress(self,float32):\n\n F16_EXPONENT_BITS = 0x1F\n F16_EXPONENT_SHIFT = 10\n F16_EXPONENT_BIAS = 15\n F16_MANTISSA_BITS = 0x3ff\n F16_MANTISSA_SHIFT = (23 - F16_EXPONENT_SHIFT)\n F16_MAX_EXPONENT = (F16_EXPONENT_BITS << F16_EXPONENT_SHIFT)\n\n if type(float32) == float:\n f32 = self.unpack(float32)\n else:\n f32 = float32\n f16 = 0\n sign = (f32 >> 16) & 0x8000\n exponent = ((f32 >> 23) & 0xff) - 127\n mantissa = f32 & 0x007fffff\n \n if exponent == 128:\n f16 = sign | F16_MAX_EXPONENT\n if mantissa:\n f16 |= (mantissa & F16_MANTISSA_BITS)\n elif exponent > 15:\n f16 = sign | F16_MAX_EXPONENT\n elif exponent > -15:\n exponent += F16_EXPONENT_BIAS\n mantissa >>= F16_MANTISSA_SHIFT\n f16 = sign | exponent << F16_EXPONENT_SHIFT | mantissa\n else:\n f16 = sign\n return f16", "def compress(self, s):\n data = zlib.compress(s)\n # drop gzip headers and tail\n return data[2:-4]", "def compressString(s):\n import cStringIO, gzip\n\n # Nasty monkeypatch to avoid gzip changing every time\n class FakeTime:\n def time(self):\n return 1111111111.111\n\n gzip.time = FakeTime()\n\n zbuf = cStringIO.StringIO()\n zfile = gzip.GzipFile(mode='wb', compresslevel=9, fileobj=zbuf)\n zfile.write(s)\n zfile.close()\n return zbuf.getvalue()", "def compress(args):\n # This is the sequential version, processing 8 rows one after another.\n x_folders_path = args.input_file\n x_test_images_path = np.asarray(make_mat(x_folders_path))\n # Build model, restore optimized parameters.\n model = CompressionModel(args)\n checkpoint = tf.train.Checkpoint(model=model)\n restore_path = tf.train.latest_checkpoint(args.checkpoint_dir)\n checkpoint.restore(restore_path)\n # Read LF image rows and create feature tensors.\n for row in range(8): \n x_val_images_path_curr = select_views(x_test_images_path, row+1) \n for img in range(8):\n if img == 0:\n images = np.expand_dims(read_image_test(x_val_images_path_curr[img]),axis = 0)\n else:\n temp = np.expand_dims(read_image_test(x_val_images_path_curr[img]),axis = 0)\n images = np.concatenate((images, temp),axis = 0)\n pos_x = ((row+1)/5.0) * np.ones(np.shape(images[0,:,:,:]))\n images = np.expand_dims(images, axis = 0)\n pos_x = np.expand_dims(pos_x, axis = 0) \n pos_y = np.ones(np.shape(images[:,0,:,:,:]))\n batch_feature1 = np.stack((images[:,0,:,:,:],images[:,4,:,:,:],pos_x,(1.0/5.0)*pos_y), axis=1)\n batch_feature2 = np.stack((images[:,1,:,:,:],images[:,4,:,:,:],pos_x,(2.0/5.0)*pos_y), axis=1)\n batch_feature3 = np.stack((images[:,2,:,:,:],images[:,4,:,:,:],pos_x,(3.0/5.0)*pos_y), axis=1)\n batch_feature4 = np.stack((images[:,3,:,:,:],images[:,4,:,:,:],pos_x,(4.0/5.0)*pos_y), axis=1)\n batch_feature5 = np.stack((images[:,4,:,:,:],images[:,4,:,:,:],pos_x,(5.0/5.0)*pos_y), axis=1)\n batch_feature6 = np.stack((images[:,5,:,:,:],images[:,4,:,:,:],pos_x,(6.0/5.0)*pos_y), axis=1)\n batch_feature7 = np.stack((images[:,6,:,:,:],images[:,4,:,:,:],pos_x,(7.0/5.0)*pos_y), axis=1)\n batch_feature8 = np.stack((images[:,7,:,:,:],images[:,4,:,:,:],pos_x,(8.0/5.0)*pos_y), axis=1)\n\n if not os.path.exists(args.output_file):\n os.mkdir(args.output_file)\n outputfile=args.output_file+'/'+str(row+1)+'.tfci'\n \n # Write the input images as png files.\n for col in range(np.shape(images)[1]):\n inpimg = images[:,col,:,:,:]\n save_img(args.output_file,1,inpimg,row+1,col+1)\n\n # Compress the LF image rows.\n curr_decoded, psnr, msssim, bpp = model.compress(images,batch_feature1,batch_feature2,\n batch_feature3,batch_feature4,batch_feature5,\n batch_feature6,batch_feature7,batch_feature8,\n outputfile,args.output_file,row+1)\n print(\"PSNR:%.2f, MS-SSIM:%.2f, BPP:%.2f\"%(psnr,msssim,bpp))", "def get_compressed(self, value):\r\n output = []\r\n lz_data = (value >> 8) & 0xFF\r\n lz_counter = value & 0xFF\r\n # Define the relative offset on LZ Window\r\n lz_offset = ((lz_counter & 0xF0) << 4) | lz_data\r\n # Define the LZ Counter for repeat data N times\r\n lz_counter = (lz_counter & 0xF) + 0x2\r\n # Start Repeat Loop\r\n while (lz_counter >= 0):\r\n # Seek the window on LZ Offset and get the LZ Data\r\n self.__lzwindow__.seek(lz_offset, FROM_START)\r\n lz_data = (lz_data & 0xFF00) + \\\r\n int.from_bytes(self.__lzwindow__.read(1), byteorder='big')\r\n # Write the LZ data to the output\r\n output.append((lz_data & 0xFF).to_bytes(1, byteorder='big'))\r\n # Seek the LZ Window on current LZ Window Counter value and write the current LZ Data (LZBuffer)\r\n self.__lzwindow__.seek(self.__lzwindowcounter__, FROM_START)\r\n self.__lzwindow__.write((lz_data & 0xFF).to_bytes(1, byteorder='big'))\r\n # Increment LZ Window Counter\r\n self.__lzwindowcounter__ = (\r\n self.__lzwindowcounter__ + 0x1) & self.__lzwindowmax__\r\n # Increment LZ Offset\r\n lz_offset = (lz_offset + 0x1) & self.__lzwindowmax__\r\n # Decrement number of data to decompress\r\n self.__maxlen__ -= 0x1\r\n # Decrement LZ Loop counter\r\n lz_counter -= 0x1\r\n return output", "def compress(cls, img, as_string=False):\n h0, w0 = img.shape\n w = binary_cast([w0], 'H', 'BB')\n h = binary_cast([h0], 'H', 'BB')\n cp = np.concatenate((w, h, img.astype('uint8').flatten()))\n # VLR.cmp: more 2x compression\n scp = VariableLength.compress(cp)\n if as_string:\n return scp\n # translate string into unit8 for storage\n vcp = np.array([ord(d) for d in scp]).astype('uint8')\n return vcp" ]
[ "0.7178514", "0.7178514", "0.6956097", "0.68778074", "0.67097557", "0.6652845", "0.64798415", "0.6468104", "0.61270404", "0.60731876", "0.5983608", "0.56424665", "0.5547921", "0.5408176", "0.5383199", "0.53278595", "0.5266407", "0.5179403", "0.5145993", "0.5139783", "0.51359445", "0.51349926", "0.510412", "0.510412", "0.5073774", "0.49986", "0.4980507", "0.49748343", "0.49552888", "0.49256283", "0.4917228", "0.48944536", "0.48634207", "0.48469475", "0.47720605", "0.4761397", "0.47397208", "0.47338143", "0.47299954", "0.47190925", "0.47134265", "0.4697946", "0.46895674", "0.46877858", "0.46215332", "0.46198332", "0.4619558", "0.46006903", "0.4595375", "0.45876873", "0.45812076", "0.45708042", "0.45695207", "0.4563512", "0.4563512", "0.45433185", "0.4541583", "0.45401162", "0.4539745", "0.4533474", "0.453331", "0.4532222", "0.4529758", "0.45242923", "0.45230198", "0.45187637", "0.45158494", "0.45045668", "0.44963363", "0.44897547", "0.44768596", "0.44747958", "0.446968", "0.44334593", "0.44241622", "0.44196403", "0.44155622", "0.44138718", "0.4412577", "0.44104287", "0.44063586", "0.43950406", "0.4377009", "0.43731415", "0.43638608", "0.43626347", "0.43602404", "0.43586805", "0.4321505", "0.43161303", "0.4305788", "0.4303118", "0.42987886", "0.4292662", "0.42844144", "0.4275241", "0.42694685", "0.42658502", "0.42359114", "0.42311907", "0.4229589" ]
0.0
-1
Decompress the tensor with the given decompression context.
def decode(self, coded_set):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decompress(self, tensor, ctx, *args, **kwargs):\n pass", "def decompress(self, tensor, ctx, *args, **kwargs):\n return tensor", "def decompress(self, tensor, ctx, *args, **kwargs):\n tensor = self.compressor.decompress(tensor, ctx, *args, **kwargs)\n \n # uncompressed gradients need to do nag explicitly\n if not self.inited:\n if size(tensor.shape) < self.threshold:\n self.mom = nd.zeros_like(tensor)\n self.nag = True\n self.inited = True\n\n if self.nag:\n self.mom += tensor\n nd._internal._mul_scalar(self.mom, self.mu, out=self.mom)\n tensor += self.mom\n\n return tensor", "def decompress(self, tensor, ctx, *args, **kwargs):\n tensor_decompressed = tensor\n dtype = ctx\n if 'float' in str(dtype):\n tensor_decompressed = tensor.astype(dtype, copy=False)\n return tensor_decompressed", "def decompress(self, tensor, ctx, *args, **kwargs):\n if \"x\" not in kwargs:\n raise ValueError(\"x is missing\")\n\n x = kwargs[\"x\"].astype(tensor.dtype, copy=False) \n \n if not self.inited:\n self.cache = nd.zeros_like(tensor)\n if size(tensor.shape) >= self.threshold:\n self.mom = nd.zeros_like(tensor)\n self.wdmom = True\n self.inited = True\n \n # weight decay\n nd._internal._mul_scalar(x, self.wd, out=self.cache)\n\n # weight decay momentum\n if self.wdmom:\n self.mom += self.cache\n nd._internal._mul_scalar(self.mom, self.mu, out=self.mom)\n tensor += self.mom\n\n tensor += self.cache\n return self.compressor.decompress(tensor, ctx, *args, **kwargs)", "def decompress(self, tensors):", "def __handle_decompression(self, x):\n if self.__compress:\n return zlib.decompress(x)\n return x", "def decompress(args):\n # Three integers for tensor shapes + nine encoded strings.\n np_dtypes = [np.integer] * 3 + [np.bytes_] * 9\n with open(args.input_file, \"rb\") as f:\n packed = tfc.PackedTensors(f.read())\n arrays = packed.unpack_from_np_dtypes(np_dtypes)\n\n # Build model and restore optimized parameters.\n model = CompressionModel(args)\n checkpoint = tf.train.Checkpoint(model=model)\n restore_path = tf.train.latest_checkpoint(args.checkpoint_dir)\n checkpoint.restore(restore_path)\n curr_decoded = model.decompress(arrays)\n row=int(args.input_file.split('/')[-1].split('.')[0])\n\n # Write reconstructed images out as PNG files.\n for col in range(np.shape(curr_decoded)[1]):\n img = curr_decoded[0,col,:,:,:]/255\n save_img(args.output_file,0,img,row,col+1)", "def Decompress(indata, algo, with_header=True):\n if algo == 'none':\n return indata\n if with_header:\n data_len = struct.unpack('<I', indata[:4])[0]\n indata = indata[4:4 + data_len]\n fname = GetOutputFilename('%s.decomp.tmp' % algo)\n with open(fname, 'wb') as fd:\n fd.write(indata)\n if algo == 'lz4':\n data = Run('lz4', '-dc', fname, binary=True)\n elif algo == 'lzma':\n outfname = GetOutputFilename('%s.decomp.otmp' % algo)\n Run('lzma_alone', 'd', fname, outfname)\n data = ReadFile(outfname, binary=True)\n elif algo == 'gzip':\n data = Run('gzip', '-cd', fname, binary=True)\n else:\n raise ValueError(\"Unknown algorithm '%s'\" % algo)\n return data", "def Decompress(input_filename, output_filename):\n _Write(zlib.decompress(_Read(input_filename)), output_filename)", "def decompress(data):\n compression_type = ord(data[0:1])\n if compression_type == 0:\n return data\n elif compression_type == 2:\n return zlib.decompress(data[1:], 15)\n elif compression_type == 16:\n return bz2.decompress(data[1:])\n else:\n msg = \"Unsupported compression type: {}\".format(compression_type)\n raise RuntimeError(msg)", "def Decompress(self, var_name):\n self.Write('%s_uncompressed = new uint8_t[%s_uncompressed_size];', var_name,\n var_name)\n self.Write('uLongf %s_temp_size = %s_uncompressed_size;', var_name,\n var_name)\n self.Write('CHECK_EQ(Z_OK, uncompress(%s_uncompressed, &%s_temp_size,',\n var_name, var_name)\n self.Write(' %s_data, %s_size));', var_name,\n var_name)", "def decompress(data):\n pickled = zlib.decompress(data)\n return pickle_util.load(pickled)", "def download_uncompress(url, path=\".\", compression=None, context=None):\n\n # infer compression from url\n if compression is None:\n compression = os.path.splitext(url)[1][1:]\n\n # check compression format and set mode\n if compression in [\"gz\", \"bz2\"]:\n mode = \"r|\" + compression\n elif compression == \"tar\":\n mode = \"r:\"\n else:\n raise ValueError(\"The file must be of type tar/gz/bz2.\")\n\n # download and untar/uncompress at the same time\n if context is not None:\n stream = urlopen(url, context=context)\n else:\n stream = urlopen(url)\n tf = tarfile.open(fileobj=stream, mode=mode)\n tf.extractall(path)", "def decompress(fileobj, dir=None):\n tf = tempfile.NamedTemporaryFile(\n 'wb', prefix='vulnix.nvd.', suffix='.xml', delete=False, dir=dir)\n logger.debug(\"Uncompressing {}\".format(tf.name))\n with gzip.open(fileobj, 'rb') as f_in:\n shutil.copyfileobj(f_in, tf)\n tf.close()\n return tf.name", "def compress(self, tensor):", "def decompress_zlib(in_str):\n import zlib\n s = zlib.decompress(in_str)\n return s", "def decompress_dump(func, input_bytes):\n o = func(input_bytes)\n if o:\n return o\n for cmd, search_bytes in COMPRESSION_ALGO:\n for decompressed in try_decompress(cmd, search_bytes, input_bytes):\n if decompressed:\n o = decompress_dump(func, decompressed)\n if o:\n return o\n # Force decompress the whole file even if header doesn't match\n decompressed = try_decompress_bytes(cmd, input_bytes)\n if decompressed:\n o = decompress_dump(func, decompressed)\n if o:\n return o", "def compress(self, tensor, *args, **kwargs):\n return self.compressor.compress(tensor)", "def compress(self, tensor, *args, **kwargs):\n return self.compressor.compress(tensor)", "def decompress_file(in_file: str, out_file: str) -> None:\n with open(in_file, \"rb\") as f:\n num_nodes = f.read(1)[0]\n buf = f.read(num_nodes * 4)\n node_lst = bytes_to_nodes(buf)\n # use generate_tree_general or generate_tree_postorder here\n tree = generate_tree_postorder(node_lst, num_nodes - 1)\n size = bytes_to_int(f.read(4))\n with open(out_file, \"wb\") as g:\n text = f.read()\n g.write(decompress_bytes(tree, text, size))", "def decompress_zlib(self, string):\n #encode the input string\n self.string = string\n return zlib.decompress(self.string).decode()", "def uncompress(self, compressed):\n\t\traise Exception(NotImplemented)", "def gzinflate(val):\n return zlib.decompress(val)", "def __handle_compression(self, x):\n if self.__compress:\n return zlib.compress(x)\n return x", "def compress(self, tensor, *args, **kwargs):\n return tensor, None", "def decompress(value):\n\n process = Popen([\"xz\", \"--decompress\", \"--stdout\", \"--force\"],\n stdin=PIPE, stdout=PIPE)\n return process.communicate(value)[0]", "def decompress(value):\n\n process = Popen([\"xz\", \"--decompress\", \"--stdout\", \"--force\"],\n stdin=PIPE, stdout=PIPE)\n return process.communicate(value)[0]", "def deconv_decoder(latent_tensor, output_shape, is_training=True):\n del is_training\n d1 = tf.layers.dense(latent_tensor, 256, activation=tf.nn.relu)\n d2 = tf.layers.dense(d1, 1024, activation=tf.nn.relu)\n d2_reshaped = tf.reshape(d2, shape=[-1, 4, 4, 64])\n d3 = tf.layers.conv2d_transpose(\n inputs=d2_reshaped,\n filters=64,\n kernel_size=4,\n strides=2,\n activation=tf.nn.relu,\n padding=\"same\",\n )\n\n d4 = tf.layers.conv2d_transpose(\n inputs=d3,\n filters=32,\n kernel_size=4,\n strides=2,\n activation=tf.nn.relu,\n padding=\"same\",\n )\n\n d5 = tf.layers.conv2d_transpose(\n inputs=d4,\n filters=32,\n kernel_size=4,\n strides=2,\n activation=tf.nn.relu,\n padding=\"same\",\n )\n d6 = tf.layers.conv2d_transpose(\n inputs=d5,\n filters=output_shape[2],\n kernel_size=4,\n strides=2,\n padding=\"same\",\n )\n return tf.reshape(d6, [-1] + output_shape)", "def decompress(byte_array):\n byte0 = byte_array[0]\n decoded = None\n if (byte0 & _mask_3_4) == _pattern_3_4:\n decoded = CCNxCompressorVariableLength.__decompress_3_4(byte_array)\n elif (byte0 & _mask_4_9) == _pattern_4_9:\n decoded = CCNxCompressorVariableLength.__decompress_4_9(byte_array)\n elif (byte0 & _mask_15_5) == _pattern_15_5:\n decoded = CCNxCompressorVariableLength.__decompress_15_5(byte_array)\n elif (byte0 & _mask_16_10) == _pattern_16_10:\n decoded = CCNxCompressorVariableLength.__decompress_16_10(byte_array)\n elif (byte0 & _mask_16_16) == _pattern_16_16:\n decoded = CCNxCompressorVariableLength.__decompress_16_16(byte_array)\n\n return decoded", "def decompose(self, *args, **kwargs):\n return _image.image_decompose(self, *args, **kwargs)", "def compress(self, tensor, *args, **kwargs):\n pass", "def uncompress(in_file, out_file):\n with open(in_file, \"rb\") as f:\n num_nodes = f.read(1)[0]\n buf = f.read(num_nodes * 4)\n node_lst = bytes_to_nodes(buf)\n # use generate_tree_general or generate_tree_postorder here\n tree = generate_tree_general(node_lst, num_nodes - 1)\n size = bytes_to_size(f.read(4))\n with open(out_file, \"wb\") as g:\n text = f.read()\n g.write(generate_uncompressed(tree, text, size))", "def lz4_uncompress(input_data, expected_decompressed_size):\n assert isinstance(input_data,bytes), \"input_data must be of type bytes\"\n assert isinstance(expected_decompressed_size,int), \"expected_decompressed_size must be of type int\"\n\n dst_buf = create_string_buffer(expected_decompressed_size)\n status = liblz4.LZ4_decompress_safe(input_data,dst_buf,len(input_data),expected_decompressed_size)\n if status != expected_decompressed_size:\n return None\n else:\n return dst_buf.raw", "def _decompress(self, tile: bytes) -> np.ndarray:\n try:\n return getattr(self, f\"_{self.compression}\")(tile)\n except AttributeError as e:\n raise NotImplementedError(\n f\"{self.compression} is not currently supported\"\n ) from e", "def decode(self, z: torch.Tensor) -> torch.Tensor:\n raise NotImplementedError", "def _decompress_tarball(*, in_fileobj, out_fileobj):\n with tarfile.open(fileobj=in_fileobj, mode=\"r\") as it, tarfile.open(\n fileobj=out_fileobj, mode=\"w|\"\n ) as ot:\n for member in it.getmembers():\n extracted = it.extractfile(member)\n ot.addfile(member, extracted)", "def decode(self, z):\n result = self.decoder_input(z)\n result = result.view(-1, 512, 2, 2)\n result = self.decoder(result)\n result = self.final_layer(result)\n return result", "def decompress(filename):\n print(\"\\n# start decompression of file: %s \\n#############################################\" % filename)\n\n # check if handed filename has the extension .gz\n if \".gz\" in filename: # if file has the extension gz\n\n if os.path.exists(filename): # check if the file exists\n\n print(\"# filename enthält die endung gz : %s\" % filename)\n\n # if the file was found on the system split its string at the '.gz' position and use evereything before\n filename_txt = filename.split(\".gz\")[0]\n\n print(\"# txt filename: \", filename_txt)\n\n # create an txt file with string before the '.gz' extension and decompress the content of the .gz file\n with gzip.open(filename, 'rb') as decompressFile:\n with open(filename_txt, 'wb') as receivingFile:\n shutil.copyfileobj(decompressFile, receivingFile)\n\n # if the file was not found download it and decompress the content\n else:\n print(\"# ERROR - %s konnte nicht gefunden werden!!\" % filename)\n\n download(HOST, DIRECTORY, filename)\n filename_txt = decompress(filename)\n print(\"# return \", filename_txt)\n return filename_txt\n # if the handed filename has no '.gz' extension throw an Exception\n else:\n\n raise Exception(\"# not a gzip file\")", "def decompress_gzip(in_str):\n # gzip can only handle file object therefore using StringIO\n copmressed_stream = StringIO.StringIO(in_str)\n gzipper = gzip.GzipFile(fileobj=copmressed_stream)\n s = gzipper.read()\n gzipper.close()\n return s", "def decompress_gzip(in_str):\n import gzip\n # gzip can only handle file object therefore using StringIO\n copmressed_stream = StringIO.StringIO(in_str)\n gzipper = gzip.GzipFile(fileobj=copmressed_stream)\n s = gzipper.read()\n gzipper.close()\n return s", "def decompress(self, s):\n return zlib.decompress(s, -zlib.MAX_WBITS)", "def gzdeflate():\n return zlib.compress(val)", "def uncompress(self, *args):\n return _osgAnimation.Vec3Packed_uncompress(self, *args)", "def decompress_stream(src, dst):\n with gzip.GzipFile(fileobj=src, mode='rb') as gz:\n for block in iterfile(gz):\n dst.write(block)", "def NTFSDecompressUnit(Buffer):\n\n from io import BytesIO\n from struct import unpack\n\n\n NTFS_CLUSTER_SIZE = 4096\n NTFS_COMPRESSION_UNIT_SIZE = 16 * NTFS_CLUSTER_SIZE\n\n def is_valid_write_request(offset, length):\n return offset + length <= 2 * 1024 * 1024 * 1024 # Reject obviously invalid write requests.\n\n if len(Buffer) > NTFS_COMPRESSION_UNIT_SIZE or len(Buffer) < NTFS_CLUSTER_SIZE:\n return b'' # Invalid length of input data.\n\n LZNT1_COMPRESSION_BITS = []\n\n offset_bits = 0\n y = 16\n\n # Taken from: CyXpress.pyx\n if len(LZNT1_COMPRESSION_BITS) == 0:\n LZNT1_COMPRESSION_BITS = [0] * 4096\n\n for x in range(0, 4096):\n LZNT1_COMPRESSION_BITS[x] = 4 + offset_bits\n if x == y:\n y = y * 2\n offset_bits += 1\n # End\n\n\n src_index = 0\n dst_index = 0\n dbuf_obj = BytesIO()\n\n while src_index < len(Buffer):\n header_bytes = Buffer[src_index: src_index + 2]\n src_index += 2\n\n if len(header_bytes) < 2:\n break # Truncated header.\n\n header, = unpack('<H', header_bytes)\n\n if header == 0:\n break # End of the buffer.\n\n if header & 0x7000 != 0x3000:\n break # Invalid signature.\n\n if header & 0x8000 == 0:\n # Not a compressed block, copy literal data.\n block_size = (header & 0x0FFF) + 1\n\n if not is_valid_write_request(dst_index, block_size):\n break # Bogus data.\n\n dbuf_obj.seek(dst_index)\n bytes_ = Buffer[src_index: src_index + block_size]\n dbuf_obj.write(bytes_)\n\n if len(bytes_) == block_size:\n src_index += block_size\n dst_index += block_size\n continue\n else:\n break # Truncated literal data.\n\n # A compressed block.\n dst_chunk_start = dst_index\n src_chunk_end = src_index + (header & 0x0FFF) + 1\n\n bogus_data = False\n while src_index < src_chunk_end and src_index < len(Buffer) and not bogus_data:\n flags = Buffer[src_index]\n if type(flags) is not int:\n flags = ord(flags)\n\n src_index += 1\n\n for token in range(0, 8):\n if src_index >= src_chunk_end:\n break\n\n if src_index >= len(Buffer):\n # Truncated chunk.\n break\n\n flag = flags & 1\n flags = flags >> 1\n\n if flag == 0:\n # A literal byte, copy it.\n if not is_valid_write_request(dst_index, 1):\n # Bogus data.\n bogus_data = True\n break\n\n dbuf_obj.seek(dst_index)\n bytes_ = Buffer[src_index: src_index + 1]\n dbuf_obj.write(bytes_)\n\n if len(bytes_) == 1:\n dst_index += 1\n src_index += 1\n continue\n else:\n # Truncated chunk.\n bogus_data = True\n break\n\n # A compression tuple.\n table_idx = dst_index - dst_chunk_start\n try:\n length_bits = 16 - LZNT1_COMPRESSION_BITS[table_idx]\n except IndexError:\n # Bogus data.\n bogus_data = True\n break\n\n length_mask = (1 << length_bits) - 1\n\n ctuple_bytes = Buffer[src_index: src_index + 2]\n src_index += 2\n\n if len(ctuple_bytes) < 2:\n # Truncated chunk.\n bogus_data = True\n break\n\n ctuple, = unpack('<H', ctuple_bytes)\n back_off_rel = (ctuple >> length_bits) + 1\n back_off = dst_index - back_off_rel\n back_len = (ctuple & length_mask) + 3\n\n if back_off < dst_chunk_start:\n # Bogus compression tuple.\n bogus_data = True\n break\n\n for i in range(0, back_len):\n # Decompress data.\n dbuf_obj.seek(back_off)\n bytes_ = dbuf_obj.read(1)\n if len(bytes_) != 1:\n # Invalid offset.\n bogus_data = True\n break\n\n if not is_valid_write_request(dst_index, 1):\n # Bogus data.\n bogus_data = True\n break\n\n dbuf_obj.seek(dst_index)\n dbuf_obj.write(bytes_)\n\n dst_index += 1\n back_off += 1\n\n if bogus_data:\n break\n\n if bogus_data:\n break\n\n dbuf = dbuf_obj.getvalue()\n dbuf_obj.close()\n\n return dbuf", "def testDecompress(self):\n decompressor = xz_decompressor.XZDecompressor()\n\n compressed_data = (\n b'\\xfd7zXZ\\x00\\x00\\x01i\"\\xde6\\x02\\xc0\\x13\\x0f!\\x01\\x16\\x00\\xc0\\xb7\\xdc'\n b'\\xe9\\x01\\x00\\x0eThis is a test.\\x00\\x00]\\xc9\\xc3\\xc6\\x00\\x01#\\x0f\\xdb'\n b'\\xdf\\x90\\x0e\\x90B\\x99\\r\\x01\\x00\\x00\\x00\\x00\\x01YZ')\n\n uncompressed_data, _ = decompressor.Decompress(compressed_data)\n expected_uncompressed_data = b'This is a test.'\n self.assertEqual(uncompressed_data, expected_uncompressed_data)\n\n # Test to trigger xz raising EOFError.\n with self.assertRaises(errors.BackEndError):\n decompressor.Decompress(b'This is a test.')\n\n # Test to trigger xz raising IOError.\n decompressor = xz_decompressor.XZDecompressor()\n\n with self.assertRaises(errors.BackEndError):\n decompressor.Decompress(b'This is a test.')", "def image_decompress(img_mode, img_size, img):\n img = Image.frombytes(img_mode, img_size, img)\n\n return img", "def decode(self, z):\n l1 = self.fc3(z)\n l1 = l1.unsqueeze(0).unsqueeze(0).unsqueeze(0).permute(0,3,1,2)\n h1 = F.relu(self.deconv1(l1))\n h2 = F.relu(self.deconv2(h1))\n h3 = F.relu(self.deconv3(h2))\n return torch.sigmoid(self.deconv4(h3))", "def decompress_file(path, temp_dir='tmp'):\n if path.endswith('.gz'):\n logger.info('Decompressing {} to {}'.format(path, temp_dir))\n return decompress_gzip(\n path,\n os.path.join(temp_dir,\n os.path.splitext(os.path.basename(path))[0])\n )\n else:\n return path", "def deconv_block(input_tensor: tf.Tensor, features: int, name: str) -> tf.Tensor:\n out = input_tensor\n\n out = KL.Conv2D(\n int(features // 2),\n 1,\n strides=(1, 1),\n name=name + f\"_c{1}\",\n )(input_tensor)\n out = KL.Activation(\"relu\")(KL.BatchNormalization()(out))\n\n out = KL.Conv2DTranspose(\n int(features // 2),\n (4, 4),\n strides=(2, 2),\n padding=\"same\",\n name=name + f\"_d\",\n )(out)\n out = KL.Activation(\"relu\")(KL.BatchNormalization()(out))\n\n out = KL.Conv2D(\n features,\n 1,\n strides=(1, 1),\n name=name + f\"_c{2}\",\n )(out)\n out = KL.Activation(\"relu\")(KL.BatchNormalization()(out))\n\n return out", "def decompress(self, bit_strings):\n return self._run(\"decompress\", bit_strings=bit_strings)", "def testDecompress(self):\n decompressor = xz_decompressor.LZMADecompressor()\n\n compressed_data = (\n b']\\x00\\x00\\x80\\x00\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\x00*\\x1a\\t\\'d\\x1c'\n b'\\x87\\x8aO\\xcaL\\xf4\\xf8!\\xda\\x88\\xd8\\xff\\xff\\xeb\\xcc\\x00')\n\n uncompressed_data, _ = decompressor.Decompress(compressed_data)\n expected_uncompressed_data = b'This is a test.'\n self.assertEqual(uncompressed_data, expected_uncompressed_data)\n\n # Test to trigger lzma raising EOFError.\n with self.assertRaises(errors.BackEndError):\n decompressor.Decompress(b'This is a test.')\n\n # Test to trigger lzma raising IOError.\n decompressor = xz_decompressor.LZMADecompressor()\n\n with self.assertRaises(errors.BackEndError):\n decompressor.Decompress(b'This is a test.')", "def _uncompress(fname, outdir, msg=msg):\n import os\n assert os.access(fname, os.R_OK), \"could not access [%s]\" % fname\n fname = os.path.abspath(os.path.realpath(fname))\n if not os.path.exists(outdir):\n os.makedirs(outdir)\n orig_dir = os.getcwd()\n try:\n os.chdir(outdir)\n ext = os.path.splitext(fname)[1][1:] # drop the dot\n if ext in ('gz', 'bz2'):\n import tarfile\n f = tarfile.open(fname, 'r:%s'%ext)\n f.extractall()\n else:\n err = 'extension [%s] not handled (yet?)' % ext\n msg.error(err)\n raise ValueError(err)\n finally:\n os.chdir(orig_dir)", "def test_lz4_decompression_avoids_deep_copy():\n pytest.importorskip(\"lz4\")\n a = bytearray(1_000_000)\n b = compressions[\"lz4\"].compress(a)\n c = compressions[\"lz4\"].decompress(b)\n assert isinstance(c, bytearray)", "def decompressFile(source, target):\n data = cake.filesys.readFile(source)\n try:\n data = zlib.decompress(data)\n except zlib.error, e:\n raise EnvironmentError(str(e))\n cake.filesys.writeFile(target, data)", "def decompressFile(infile, outfile):\n decoder = Decoder(infile)\n for data in decoder.bytes():\n outfile.write(data)", "def _deflate(self, tile: bytes) -> np.ndarray:\n decoded = self._reshape(\n np.frombuffer(imagecodecs.zlib_decode(tile), self.dtype)\n )\n self._unpredict(decoded)\n return np.rollaxis(decoded, 2, 0)", "def decode(self, z):\n result = self.decoder_input(z)\n result = result.view(-1, 512, 4, 4)\n result = self.decoder(result)\n return result", "def decompress(compressed):\r\n \r\n \r\n # Build the dictionary.\r\n dict_size = 256\r\n dictionary = dict((i, chr(i)) for i in range(dict_size))\r\n # in Python 3: dictionary = {i: chr(i) for i in range(dict_size)}\r\n \r\n # use StringIO, otherwise this becomes O(N^2)\r\n # due to string concatenation in a loop\r\n result = StringIO()\r\n w = chr(compressed.pop(0))\r\n result.write(w)\r\n for k in compressed:\r\n if k in dictionary:\r\n entry = dictionary[k]\r\n elif k == dict_size:\r\n entry = w + w[0]\r\n else:\r\n raise ValueError('Bad compressed k: %s' % k)\r\n result.write(entry)\r\n \r\n # Add w+entry[0] to the dictionary.\r\n dictionary[dict_size] = w + entry[0]\r\n dict_size += 1\r\n \r\n w = entry\r\n return result.getvalue()", "def lz4_decompress(src, dlen, dst=None):\n if dst is None:\n dst = bytearray()\n print(str(src))\n b = bytes(src)\n d=lz4zfs.decompress(b,dlen)\n l=len(d)\n if (dlen != l):\n print(\"[-] decompress size differ from %d, got %d\" %(dlen,l))\n raise RuntimeError(\"decompress size differ from %d, got %d\" %(dlen,l))\n else:\n if (dlen < l):\n dst[0:dlen] = d;\n else:\n dst[0:l] = d;\n print(str(dst))\n return dst", "def decompress(cls, imgz):\n # translate back uint8 into string\n if not isinstance(imgz, str):\n imgz = ''.join([chr(d) for d in imgz])\n # zlib decompression\n imgz = VariableLength.decompress(imgz)\n ####\n w = binary_cast(imgz[:2], 'BB', 'H')[0]\n h = binary_cast(imgz[2:4], 'BB', 'H')[0]\n img = imgz[4:]\n img = np.reshape(img, (h, w))\n return img", "def decode(self, z):\n out = self.fc_decoder(z)\n out = out.view(-1, 512, 2, 2)\n out = self.decoder(out)\n return out", "def decompress_data(src, dst):\n assert os.path.exists(src), \"{} does not exist. Please download the \\\n entire repository and keep it as it originally is\".format(src)\n\n # create folder layout at the destination folder\n subset_list = [\"train\", \"val\"]\n _create_layout(dst, subset_list)\n\n # extract data\n for subset in subset_list:\n subset_img_src = os.path.join(src, \"images\", subset + \".zip\")\n subset_img_dst = os.path.join(dst, \"images\", subset)\n _extract_multi_vol_zip(subset_img_src, subset_img_dst)\n _extract_all_gz_in_dir(subset_img_dst)\n\n subset_lbl_src = os.path.join(src, \"labels\", subset + \".zip\")\n subset_lbl_dst = os.path.join(dst, \"labels\", subset)\n _extract_zip(subset_lbl_src, subset_lbl_dst)\n _extract_all_gz_in_dir(subset_lbl_dst)\n\n print(\"Finished decompressing {}.\".format(subset))", "def test_decompress_file():\n gz_file = os.path.join(\n tempfile.gettempdir(),\n \"jade-unit-test-file.gz\",\n )\n with gzip.open(gz_file, \"wb\") as f:\n f.write(b\"Hello World\")\n assert os.path.exists(gz_file)\n\n new_file = decompress_file(gz_file)\n assert os.path.exists(new_file)\n with open(new_file, \"r\") as f:\n data = f.read()\n assert data == \"Hello World\"\n\n if os.path.exists(gz_file):\n os.remove(gz_file)\n\n if os.path.exists(new_file):\n os.remove(new_file)", "def DecompressionFile(src_fp, algorithm):\n if algorithm == \"lzma\":\n return lzma.open(src_fp, \"r\")\n\n if algorithm == \"snappy\":\n return SnappyFile(src_fp, \"rb\")\n\n if algorithm:\n raise InvalidConfigurationError(\"invalid compression algorithm: {!r}\".format(algorithm))\n\n return src_fp", "def _decode_infer(self, decoder, _encoder_output, features, labels):\r\n\r\n return decoder(_encoder_output, labels)", "def _DecompressMessageList(\n packed_message_list: rdf_flows.PackedMessageList,\n) -> rdf_flows.MessageList:\n compression = packed_message_list.compression\n if compression == rdf_flows.PackedMessageList.CompressionType.UNCOMPRESSED:\n data = packed_message_list.message_list\n\n elif compression == rdf_flows.PackedMessageList.CompressionType.ZCOMPRESSION:\n try:\n data = zlib.decompress(packed_message_list.message_list)\n except zlib.error as e:\n raise RuntimeError(\"Failed to decompress: %s\" % e) from e\n else:\n raise RuntimeError(\"Compression scheme not supported\")\n\n try:\n result = rdf_flows.MessageList.FromSerializedBytes(data)\n except rdfvalue.DecodeError as e:\n raise RuntimeError(\"RDFValue parsing failed.\") from e\n\n return result", "def Decompress(inputFilePath, outputFilePath):\n # TODO: Add tests for this function\n compressedString = FilePathIntoString(inputFilePath)\n dictionary = ExtractTextDictionaryFromString(compressedString)\n\n compressedWords = SplitIntoWords(compressedString)\n\n # Remove the dictionary from the first word\n firstWord = compressedWords[0]\n firstWordArray = firstWord.split(\"\\n\")\n compressedWords[0] = firstWordArray[len(firstWordArray) - 1]\n\n uncompressedWords = UncompressWordArray(compressedWords, dictionary)\n outputString = WordArrayToString(uncompressedWords)\n WriteToFile(outputFilePath, outputString)", "def compress(self, tensor, *args, **kwargs):\n tensor_compressed = tensor\n if 'float' in str(tensor.dtype):\n # Only allow compression from other floating point types\n tensor_compressed = tensor.astype('float16', copy=False)\n return tensor_compressed, tensor.dtype", "def stream_decompress(src, dst, blocksize=_STREAM_TO_STREAM_BLOCK_SIZE):\r\n decompressor = StreamDecompressor()\r\n while True:\r\n buf = src.read(blocksize)\r\n if not buf: break\r\n buf = decompressor.decompress(buf)\r\n if buf: dst.write(buf)\r\n decompressor.flush() # makes sure the stream ended well\r", "def debianize( strFilename ):\n \n #~ data = gzip.GzipFile( strFilename ).read();\n #~ print data;\n #~ return;\n \n #~ data = gzip.open( strFilename ).read();\n #~ print data;\n #~ return; \n \n #~ uncompressedData = bz2.BZ2File(strFilename).read()\n #~ print str(uncompressedData)\n #~ return;\n \n #~ file = open( strFilename, 'rb' );\n #~ data = file.read();\n #~ file.close();\n #~ print debug.dumpHexa( data );\n \n #~ ar = tarfile.open(strFilename, 'r:*')\n #~ for item in ar:\n #~ print( str(item) );\n #~ print( \"%s:\" % item.name );\n #~ #print debug.dumpHexa(item.buf);\n #~ #print zlib.decompress(item.buf)\n #~ #print zlib.decompress(ar.extractfile(item).read())\n #~ data = ar.extractfile(item.name).read()\n #~ print data # works !\n #~ ar.close() \n #~ return;\n \n fileLists = [];\n file = open( strFilename );\n data = file.read();\n file.close();\n \n print( \"data len: %d\" % len( data ) );\n\n nDataCompressedOffset = 0; # 132\n\n # works fine on toto.gz\n #~ f = gzip.open(strFilename, 'rb')\n #~ file_content = f.read()\n #~ print file_content\n #~ f.close() \n \n #~ decompressor = bz2.BZ2Decompressor();\n #~ uncompressed = decompressor.decompress(data[nDataCompressedOffset:]);\n \n #~ uncompressed = zlib.decompress(data[nDataCompressedOffset:]);\n \n uncompressed = decompress( data );\n print( \"uncompressed: %s\" % str( uncompressed ) );", "def _decompress(self, jpgData):\n f = StringIO(jpgData)\n img = Image.open(f)\n\n # thumbs are corrupting in opengl, not sure why\n if img.size == (75, 56):\n img = img.resize((128, 128))\n\n # if the image has an EXIF rotation, the thumb will be\n # straightened but the image won't be. That should be fixed here.\n \n return img.size, img.tostring()", "def complex_decoder(self, z, reuse=True):\n\t\tz = tf.convert_to_tensor(z)\n\t\treuse=tf.AUTO_REUSE\n\n\t\tif self.vimco_samples > 1:\n\t\t\tsamples = []\n\n\t\twith tf.variable_scope('model', reuse=reuse):\n\t\t\twith tf.variable_scope('decoder', reuse=reuse):\n\t\t\t\tif len(z.get_shape().as_list()) == 2:\n\t\t\t\t\t# test\n\t\t\t\t\td = tf.layers.dense(z, 256, activation=tf.nn.elu, use_bias=False, reuse=reuse, name='fc1')\t\t\n\t\t\t\t\td = tf.reshape(d, (-1, 1, 1, 256))\n\t\t\t\t\tdeconv1 = tf.layers.conv2d_transpose(d, 256, 4, padding=\"VALID\", activation=tf.nn.elu, reuse=reuse, name='deconv1')\n\t\t\t\t\tdeconv2 = tf.layers.conv2d_transpose(deconv1, 64, 4, strides=(2,2), padding=\"SAME\", activation=tf.nn.elu, reuse=reuse, name='deconv2')\n\t\t\t\t\tdeconv3 = tf.layers.conv2d_transpose(deconv2, 64, 4, strides=(2,2), padding=\"SAME\", activation=tf.nn.elu, reuse=reuse, name='deconv3')\n\t\t\t\t\tdeconv4 = tf.layers.conv2d_transpose(deconv3, 32, 4, strides=(2,2), padding=\"SAME\", activation=tf.nn.elu, reuse=reuse, name='deconv4')\n\t\t\t\t\t# output channel = 3\n\t\t\t\t\tdeconv5 = tf.layers.conv2d_transpose(deconv4, 3, 4, strides=(2,2), padding=\"SAME\", activation=tf.nn.sigmoid, reuse=reuse, name='deconv5')\n\t\t\t\t\treturn deconv5\n\t\t\t\telse:\n\t\t\t\t\t# train; iterate through one vimco sample at a time\n\t\t\t\t\tfor i in range(self.vimco_samples):\n\t\t\t\t\t\tz_sample = z[i]\n\t\t\t\t\t\td = tf.layers.dense(z_sample, 256, activation=tf.nn.elu, use_bias=False, reuse=reuse, name='fc1')\t\t\n\t\t\t\t\t\td = tf.reshape(d, (-1, 1, 1, 256))\n\t\t\t\t\t\tdeconv1 = tf.layers.conv2d_transpose(d, 256, 4, padding=\"VALID\", activation=tf.nn.elu, reuse=reuse, name='deconv1')\n\t\t\t\t\t\tdeconv2 = tf.layers.conv2d_transpose(deconv1, 64, 4, strides=(2,2), padding=\"SAME\", activation=tf.nn.elu, reuse=reuse, name='deconv2')\n\t\t\t\t\t\tdeconv3 = tf.layers.conv2d_transpose(deconv2, 64, 4, strides=(2,2), padding=\"SAME\", activation=tf.nn.elu, reuse=reuse, name='deconv3')\n\t\t\t\t\t\tdeconv4 = tf.layers.conv2d_transpose(deconv3, 32, 4, strides=(2,2), padding=\"SAME\", activation=tf.nn.elu, reuse=reuse, name='deconv4')\n\t\t\t\t\t\t# output channel = 3\n\t\t\t\t\t\tdeconv5 = tf.layers.conv2d_transpose(deconv4, 3, 4, strides=(2,2), padding=\"SAME\", activation=tf.nn.sigmoid, reuse=reuse, name='deconv5')\n\t\t\t\t\t\tsamples.append(deconv5)\n\t\tx_reconstr_logits = tf.stack(samples, axis=0)\n\t\tprint(x_reconstr_logits.get_shape())\n\t\treturn x_reconstr_logits", "def uncompress(location, target_dir, decompressor, suffix=EXTRACT_SUFFIX):\n # FIXME: do not create a sub-directory and instead strip the \"compression\"\n # extension such gz, etc. or introspect the archive header to get the file\n # name when present.\n if DEBUG:\n logger.debug('uncompress: ' + location)\n\n tmp_loc, warnings = uncompress_file(location, decompressor)\n\n target_location = os.path.join(target_dir, os.path.basename(location) + suffix)\n if os.path.exists(target_location):\n fileutils.delete(target_location)\n shutil.move(tmp_loc, target_location)\n return warnings", "def de_gzip(data):\n cmps = StringIO.StringIO(data)\n gzipper = gzip.GzipFile(fileobj=cmps)\n return gzipper.read()", "def de_gzip(data):\n cmps = StringIO.StringIO(data)\n gzipper = gzip.GzipFile(fileobj=cmps)\n return gzipper.read()", "def test_decompress_2(self):\n b_array = bytearray([3]) + bytearray(b'abcdef')\\\n + bytearray([0, 32]) + bytearray([0, 113])\n actual = LZ77.decompress(b_array)\n expected = 'abcdefdeabc'\n self.assertEqual(actual, expected)", "def _untransform(self, X: Tensor) -> Tensor:\n pass # pragma: no cover", "def _decompress_data():\n\n dest_dir = get_cachedir()\n if dest_dir is None:\n print('No cache dir found, not decompressing anything.')\n return\n\n filename = _data_url.split('/')[-1]\n tarball = dest_dir / filename\n\n print(\"Trying to decompress file {}\".format(tarball))\n with tarfile.open(str(tarball), \"r:bz2\") as tar:\n tar.extractall(str(dest_dir))\n\n data_dir = dest_dir / 'data'\n pickle_files = data_dir.glob('*.pickle')\n print(\"Data directory {} contains {} pickle files\"\n .format(data_dir, len(list(pickle_files))))", "def decompress(self, file):\n\t\t\n\t\tbit_string = \"\"\n\n\t\tbyte = file.read(1)\n\t\twhile(len(byte) > 0):\n\t\t\tbyte = ord(byte)\n\t\t\tbits = bin(byte)[2:].rjust(8, '0')\n\t\t\tbit_string += bits\n\t\t\tbyte = file.read(1)\n\n\t\tencoded_text = self.remove_padding(bit_string)\n\n\t\tdecompressed_text = self.decode_text(encoded_text)\n\t\t\n\t\tprint(\"Decompressed\")\n\t\treturn decompressed_text", "def do_LZW_DeCompression(dict_of_abc, list_of_data):\n \n #https://www.youtube.com/watch?v=MQM_DsX-LBI\n \n out = []\n predchozi_out = []\n for i in range(len(list_of_data)):\n new = []\n new.extend(predchozi_out)\n if list_of_data[i] in dict_of_abc:\n o = dict_of_abc[list_of_data[i]]\n out.extend(o)\n predchozi_out = o\n \n #pokud je o list, beru z nej pouze prvni prvek\n if len(o) > 1:\n new.append(o[0])\n else:\n new.extend(o)\n\n index_founded = dict_cointains_list(dict_of_abc, new)\n if index_founded == -1:\n #pokud new neni ve slovniku, pridam ho tam\n dict_of_abc[len(dict_of_abc) +1] = new\n\n return dict_of_abc, out", "def uncompress(compressed_file, dest_dir = None):\n\n\trouting_pairs = (\n\t\t(\".tar.gz\", _uncompress_targz),\n\t\t(\".tgz\", _uncompress_targz),\n\t\t(\".tar\", _uncompress_tar),\n\t\t(\".zip\", _uncompress_zip)\n\t)\n\n\tfound_handler = None\n\tfor suffix, handler in routing_pairs:\n\t\tif compressed_file.filename.endswith(suffix):\n\t\t\tfound_handler = handler\n\t\t\tbreak\n\telse:\n\t\traise ValueError(\"Compressed file does not have known format.\")\n\n\t# If we didn't get a directory to place the uncompressed files into, create\n\t# a temporary one.\n\tif dest_dir is None:\n\t\tdest_dir = tempfile.mkdtemp()\n\n\ttempfile_handle, tempfile_path = tempfile.mkstemp()\n\tos.close(tempfile_handle)\n\n\ttry:\n\t\tcompressed_file.save(tempfile_path)\n\n\t\tfound_handler(tempfile_path, dest_dir)\n\tfinally:\n\t\tos.remove(tempfile_path)\n\n\treturn dest_dir", "def decompress(file, output, pw):\n try:\n bsc.decompress_file(file, output, pw)\n print(Fore.GREEN + \"Decompressed!\")\n except bsc.InvalidPasswordException:\n print(Fore.RED + \"Password is invalid!\")\n except bsc.InvalidFileFormatException:\n print(Fore.RED + \"File not compressed with BSC!\")\n except FileNotFoundError:\n print(Fore.RED + \"File not found!\")", "def decoder(self, z):\n x1 = self.dec_conv(z)\n return x1", "def decompression_huffman(compress_seq:str, add_binary:int, dict_seq_binary:dict):\r\n decompressed_seq = decompression(compress_seq)\r\n dict_bin_change = dict_change(dict_seq_binary)\r\n seq_decomp, sequence_restored = retransformation(decompressed_seq, dict_bin_change, add_binary)\r\n return seq_decomp, sequence_restored", "def fc_decoder(latent_tensor, output_shape, is_training=True):\n del is_training\n d1 = tf.layers.dense(latent_tensor, 1200, activation=tf.nn.tanh)\n d2 = tf.layers.dense(d1, 1200, activation=tf.nn.tanh)\n d3 = tf.layers.dense(d2, 1200, activation=tf.nn.tanh)\n d4 = tf.layers.dense(d3, np.prod(output_shape))\n return tf.reshape(d4, shape=[-1] + output_shape)", "def decompress_pickle(file):\n data = bz2.BZ2File(file, 'rb')\n data = cPickle.load(data)\n return data", "def maybe_lzma_decompress(path) -> str:\n decompressed_path, ext = os.path.splitext(path)\n if ext != '.lzma':\n raise ValueError(\n 'Only decompressing LZMA files is supported. If the file '\n 'is LZMA compressed, rename the url to have a .lzma suffix.')\n if os.path.exists(decompressed_path):\n log(f'Reusing cached file {decompressed_path!r}')\n else:\n log(f'Decompressing {path!r} to {decompressed_path!r}')\n with lzma.open(path, 'rb') as fi:\n with open(decompressed_path, 'wb') as fo:\n shutil.copyfileobj(fi, fo)\n return decompressed_path", "def untgz(tgz_filename, out_dir):\r\n logging.info(\"Source: %s\" % tgz_filename)\r\n tgz = TgzHelper(tgz_filename, out_dir)\r\n tgz.extract()", "def decoder(self, features=[8], name=\"decoder\") -> KM.Model:\n input_tensor = KL.Input(shape=(2, 2, features[0]))\n\n decoded = input_tensor\n\n for i, feature_num in enumerate(features[1:], start=1):\n decoded = deconv_block(\n decoded, feature_num, name + f\"_deconv_{len(features)-i}\"\n )\n\n # Final reconstruction back to the original image size\n decoded = KL.Conv2DTranspose(\n 3,\n (4, 4),\n strides=(2, 2),\n padding=\"same\",\n kernel_initializer=\"he_normal\",\n use_bias=True,\n activation=\"tanh\",\n name=name + f\"_out\",\n )(decoded)\n decoded = DropBlock2D(block_size=5, keep_prob=0.8)(decoded)\n return KM.Model(inputs=input_tensor, outputs=decoded, name=name)", "def uncompress_file(location, decompressor):\n # FIXME: do not create a sub-directory and instead strip the \"compression\"\n # extension such gz, etc. or introspect the archive header to get the file\n # name when present.\n assert location\n assert decompressor\n\n warnings = []\n base_name = fileutils.file_base_name(location)\n target_location = os.path.join(fileutils.get_temp_dir(\n prefix='extractcode-extract-'), base_name)\n\n with decompressor(location, 'rb') as compressed:\n with open(target_location, 'wb') as uncompressed:\n buffer_size = 32 * 1024 * 1024\n while True:\n chunk = compressed.read(buffer_size)\n if not chunk:\n break\n uncompressed.write(chunk)\n\n if getattr(decompressor, 'has_trailing_garbage', False):\n warnings.append(location + ': Trailing garbage found and ignored.')\n\n return target_location, warnings", "def get_compress_and_decompress_func(compression_algorithm, compression_level=9):\n # type: (str, int) -> Tuple[Callable, Callable]\n if compression_algorithm in [\"deflate\", \"zlib\"]:\n import zlib\n\n if sys.version_info < (3, 6, 0):\n # Work around for Python <= 3.6 where compress is not a keyword argument, but a regular\n # argument\n @functools.wraps(zlib.compress)\n def compress_func(data):\n return zlib.compress(data, compression_level)\n\n else:\n compress_func = functools.partial(zlib.compress, level=compression_level) # type: ignore\n decompress_func = zlib.decompress # type: ignore\n elif compression_algorithm == \"bz2\":\n import bz2\n\n @functools.wraps(bz2.compress)\n def compress_func(data):\n return bz2.compress(data, compression_level)\n\n decompress_func = bz2.decompress # type: ignore\n elif compression_algorithm == \"zstandard\":\n import zstandard\n\n compressor = zstandard.ZstdCompressor(level=compression_level)\n decompressor = zstandard.ZstdDecompressor()\n compress_func = compressor.compress # type: ignore\n decompress_func = decompressor.decompress # type: ignore\n elif compression_algorithm == \"lz4\":\n import lz4.frame as lz4 # pylint: disable=no-name-in-module\n\n # NOTE: Java implementation which we currently use on the server side doesn't support\n # dependent block stream.\n # See https://github.com/Parsely/pykafka/issues/914 for details\n def compress_func(data):\n try:\n # For lz4 >= 0.12.0\n return lz4.compress(data, compression_level, block_linked=False)\n except TypeError:\n # For older versions\n # For earlier versions of lz4\n return lz4.compress(data, compression_level, block_mode=1)\n\n decompress_func = lz4.decompress # type: ignore\n elif compression_algorithm == \"snappy\":\n import snappy # pylint: disable=import-error\n\n compress_func = snappy.compress # type: ignore\n decompress_func = snappy.decompress # type: ignore\n elif compression_algorithm == \"brotli\":\n import brotli # pylint: disable=import-error\n\n compress_func = functools.partial(brotli.compress, quality=compression_level) # type: ignore\n decompress_func = brotli.decompress # type: ignore\n elif compression_algorithm == \"none\":\n compress_func = noop_compress\n decompress_func = noop_decompress # type: ignore\n else:\n raise ValueError(\"Unsupported algorithm: %s\" % (compression_algorithm))\n\n return compress_func, decompress_func", "def decompress(infile, path, members=None):\n with open(infile, 'rb') as inf, open(path, 'w', encoding='utf8') as tof:\n decom_str = gzip.decompress(inf.read()).decode('utf-8')\n tof.write(decom_str)", "def decimate(self, *args, **kwargs):\n return _image.image_decimate(self, *args, **kwargs)", "def _unzip(self, data):\r\n with io.BytesIO(data) as buf:\r\n with gzip.GzipFile(fileobj=buf) as unzipped:\r\n return unzipped.read()", "def convert_deconvolution(node, **kwargs):\n name, inputs, attrs = get_inputs(node, kwargs)\n\n kernel_dims = list(parse_helper(attrs, \"kernel\"))\n stride_dims = list(parse_helper(attrs, \"stride\", [1, 1]))\n pad_dims = list(parse_helper(attrs, \"pad\", [0, 0]))\n num_group = int(attrs.get(\"num_group\", 1))\n dilations = list(parse_helper(attrs, \"dilate\", [1, 1]))\n adj_dims = list(parse_helper(attrs, \"adj\", [0, 0]))\n\n pad_dims = pad_dims + pad_dims\n\n deconv_node = onnx.helper.make_node(\n \"ConvTranspose\",\n inputs=inputs,\n outputs=[name],\n kernel_shape=kernel_dims,\n strides=stride_dims,\n dilations=dilations,\n output_padding=adj_dims,\n pads=pad_dims,\n group=num_group,\n name=name\n )\n\n return [deconv_node]", "def DecodeFunc(self, inp_instance):\n\n def _DecodeFn():\n \"\"\"Decode call to be compiled for TPU.\"\"\"\n _, decode_dict = self._model.ConstructDecodeGraph(\n input_batch=inp_instance.TpuDequeueBatch())\n self.decode_nm = py_utils.NestedMap(decode_dict)\n return self.decode_nm.Flatten()\n\n self._compile_op, batch_parallel_res = tpu.split_compile_and_shard(\n _DecodeFn,\n num_shards=self.data_parallelism,\n device_assignment=py_utils.GetTpuDeviceAssignment())\n\n if self.decode_nm:\n decode_tensors = self.decode_nm.Pack(batch_parallel_res)\n else:\n decode_tensors = py_utils.NestedMap()\n if py_utils.IsEagerMode():\n # The CPU pass through data will be from the infeed function.\n cpu_pt = {}\n else:\n cpu_pt = inp_instance.DequeueCpuPassthrough()\n return decode_tensors, cpu_pt", "def uncompress_gzip(location, target_dir):\n\n return uncompress(location, target_dir, decompressor=gzip.GzipFile)", "def _decompress_blkx(self, blkx: BLKXTable, write_path: str):\n file_name = f\"{blkx.ID}: {_slugify(blkx.Name)}\"\n file_path = os.path.join(write_path, file_name)\n\n with open(file_path, \"wb\") as target_fp:\n for blkx_chunk in blkx.BLKXChunkEntry:\n if blkx_chunk.EntryType == \"0x80000005\": # zlib\n self.dmg_fp.seek(blkx_chunk.CompressedOffset)\n target_fp.write(self.dmg_fp.read(blkx_chunk.CompressedLength))\n # TODO: support more methods", "def decompression_inversion():\n dna_seq, bin_seq, comp_seq, file_comp = binary_to_seq()\n \n #bwt reconstruction\n table = [\"\"] * len(dna_seq)\n\n for i in range(0,len(dna_seq),1):\n table = [dna_seq[i] + table[i] for i in range(0,len(dna_seq))]\n table = sorted(table)\n \n original_seq = None \n for row in table : \n if row.endswith(\"$\"):\n original_seq = row\n\n inverse_bwt = original_seq.rstrip(\"$\") \n \n \n #write the original sequence in a new created file \n file_path = os.path.splitext(file_comp)[0]\n file_inv = open(file_path + \"_decompressed_original.txt\", \"w\") \n file_inv.write(inverse_bwt) \n file_inv.close()\n \n messagebox.showinfo(\"Information\", \"Your decompressed and bwt reconstruction has been saved in \" \\\n +file_path +\"_decompressed_original.txt file.\")\n \n return dna_seq, comp_seq, inverse_bwt" ]
[ "0.8031564", "0.7931041", "0.75796515", "0.7512713", "0.71366394", "0.71033555", "0.68911856", "0.66436976", "0.6177685", "0.60084254", "0.5998883", "0.5772824", "0.5761908", "0.5596383", "0.55957705", "0.5553985", "0.54953575", "0.5484418", "0.54572964", "0.54572964", "0.5454716", "0.5433453", "0.5411343", "0.5391271", "0.5338415", "0.52969426", "0.5290805", "0.5290805", "0.5260717", "0.5238364", "0.5206854", "0.52055424", "0.5152955", "0.51524246", "0.5152348", "0.51338094", "0.5119792", "0.507447", "0.5073052", "0.50624794", "0.5049761", "0.5048622", "0.5041795", "0.5040703", "0.50144804", "0.4994402", "0.49906734", "0.49865335", "0.49840555", "0.49794394", "0.49772668", "0.49579677", "0.49563178", "0.4936525", "0.48989695", "0.48866335", "0.48812464", "0.48794818", "0.4871146", "0.4869671", "0.48661798", "0.48527154", "0.48429215", "0.48420793", "0.4811409", "0.48034576", "0.48007363", "0.4754709", "0.4753654", "0.4744342", "0.4734678", "0.47295833", "0.47141415", "0.47103375", "0.47006404", "0.47005227", "0.47005227", "0.4695596", "0.46930984", "0.46869516", "0.46656466", "0.4649477", "0.46390828", "0.46366167", "0.46147126", "0.4612166", "0.4611239", "0.46094608", "0.45827815", "0.4577734", "0.45770547", "0.45737222", "0.45600677", "0.45596206", "0.4538918", "0.4534075", "0.4532304", "0.45120922", "0.4501068", "0.44942826", "0.446942" ]
0.0
-1
Runs optimization over n rounds of k sequential trials.
def test_run_experiment_locally(self) -> None: experiment = Experiment( name="torchx_booth_sequential_demo", search_space=SearchSpace(parameters=self._parameters), optimization_config=OptimizationConfig(objective=self._objective), runner=self._runner, is_test=True, properties={Keys.IMMUTABLE_SEARCH_SPACE_AND_OPT_CONF: True}, ) scheduler = Scheduler( experiment=experiment, generation_strategy=( choose_generation_strategy( search_space=experiment.search_space, ) ), options=SchedulerOptions(), ) try: for _ in range(3): scheduler.run_n_trials(max_trials=2) # TorchXMetric always returns trial index; hence the best experiment # for min objective will be the params for trial 0. scheduler.report_results() except FailureRateExceededError: pass # TODO(ehotaj): Figure out why this test fails in OSS. # Nothing to assert, just make sure experiment runs.
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self, n_trials=10):\n # Create study object\n if self.study is None:\n self.study = optuna.create_study(\n direction=\"minimize\",\n sampler=optuna.samplers.RandomSampler(seed=123)\n )\n # Run trials\n self.study.optimize(\n lambda x: self.objective(x),\n n_trials=n_trials,\n n_jobs=-1\n )", "def multi_run(replications: int, iters: List, n: int):\n global call_count\n kwargs = {\n # 'alpha': 0.75,\n # 'rho': 'VaR',\n 'alpha': 0.75,\n 'rho': 'CVaR',\n 'x0': 2,\n 'n0': n,\n 'mu_1': -15,\n 'mu_2': 10,\n 'sigma_1': 4,\n 'sigma_2': 2\n }\n\n out_dict = {\n 'SA': dict(),\n 'SA_SAA': dict(),\n 'NM': dict(),\n 'NM_SAA': dict(),\n 'LBFGS': dict(),\n 'LBFGS_SAA': dict(),\n 'EI': dict(),\n 'EI_SAA': dict()\n }\n total_calls = dict()\n for key in out_dict.keys():\n total_calls[key] = dict()\n for it_count in iters:\n kwargs['iter_count'] = it_count\n for key in out_dict.keys():\n out_dict[key][it_count] = dict()\n total_calls[key][it_count] = 0\n i = 0\n while i < replications:\n try:\n out_dict['SA'][it_count][i] = SA_run(seed=i, **kwargs)\n total_calls['SA'][it_count] += call_count\n call_count = 0\n out_dict['SA_SAA'][it_count][i] = SA_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['SA_SAA'][it_count] += call_count\n call_count = 0\n out_dict['NM'][it_count][i] = NM_run(seed=i, **kwargs)\n total_calls['NM'][it_count] += call_count\n call_count = 0\n out_dict['NM_SAA'][it_count][i] = NM_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['NM_SAA'][it_count] += call_count\n call_count = 0\n out_dict['LBFGS'][it_count][i] = LBFGS_run(seed=i, **kwargs)\n total_calls['LBFGS'][it_count] += call_count\n call_count = 0\n out_dict['LBFGS_SAA'][it_count][i] = LBFGS_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['LBFGS_SAA'][it_count] += call_count\n call_count = 0\n out_dict['EI'][it_count][i] = EI_run(seed=i, **kwargs)\n total_calls['EI'][it_count] += call_count\n call_count = 0\n out_dict['EI_SAA'][it_count][i] = EI_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['EI_SAA'][it_count] += call_count\n call_count = 0\n i += 1\n except:\n continue\n np.save('call_counts_cvar_%d.npy' % n, total_calls)\n evaluate(out_dict, n)", "def dynamic_iteration(k, n):\n # If only one egg remains, n attempts must be made to find the correct floor.\n if k == 1:\n return n\n # Lookup table for previous solutions.\n W = [[0 for y in range(n + 1)] for x in range(k)]\n # Initialize the first row.\n for i in range(n + 1):\n W[0][i] = i\n # Start on second row, working downward.\n for i in range(1, k):\n # Calculate values for each cell.\n for j in range(1, n + 1):\n W[i][j] = min((max(W[i][j - x], W[i - 1][x - 1]) for x in range(1, j + 1))) + 1\n # Return the result.\n return W[k - 1][n]", "def num_step_reach(rr, a, b, k, num_steps = 5):\n\n # Time this function\n start_time = time.time()\n\n # Help us divide and conquer with multiple threads\n pool = ThreadPool(processes = 2)\n\n # Create our BDD variables, matching those exactly in rr (except zz's)\n xx_list = [bddvar(\"xx{}\".format(i)) for i in range(k)]\n yy_list = [bddvar(\"yy{}\".format(i)) for i in range(k)]\n zz_list = [bddvar(\"zz{}\".format(i)) for i in range(k)]\n\n # Compose for each step\n # NOTE: Very slow for many nodes and edges; may want divide and conquer here\n hh = rr\n yyzz_compose_dict = {a:b for a, b in zip(yy_list, zz_list)}\n xxzz_compose_dict = {a:b for a, b in zip(xx_list, zz_list)}\n for i in range(0, num_steps - 1):\n\n # Kickoff threads on async composition\n yyzz_async_result = pool.apply_async(hh.compose, [yyzz_compose_dict])\n xxzz_async_result = pool.apply_async(rr.compose, [xxzz_compose_dict])\n\n # Block: get results from threads\n yyzz_compose = yyzz_async_result.get()\n xxzz_compose = xxzz_async_result.get()\n\n # Conjunct them and do smoothing\n hh = (yyzz_compose & xxzz_compose).smoothing(set(zz_list))\n\n print(\"\\tComposed for step\", i)\n\n print(\"Completed in {} seconds\".format(round(time.time() - start_time)))\n print(\"Number of satisfiable variables\", len(list(hh.satisfy_all())))\n\n # See if a and b can reach eachother in the given number of steps\n restrict_dict = {c:d for c, d in zip(set(xx_list), to_bin(a, k))}\n restrict_dict.update({c:d for c, d in zip(set(yy_list), to_bin(b, k))})\n return hh.restrict(restrict_dict)", "def monte_carlo_trials(nb_trials, nb_ok, lock):\n\n # First perform the trials\n # Do not use shared resource because other processes doesn't need to know\n # about computation step\n nb_in_quarter_results = 0\n for i in range(nb_trials):\n x = random.uniform(0, 1)\n y = random.uniform(0, 1)\n if x * x + y * y <= 1.0:\n nb_in_quarter_results += 1\n\n # Finally update shared resource\n # Do it only once, then processes doesn't struggle with each other to\n # update it\n with lock:\n nb_ok.value += nb_in_quarter_results", "def train_q(n=1000):\n for i in range(50):\n p1_strategy = strategies.QStrategy('X')\n p2_strategy = strategies.QStrategy('O')\n p1 = player.Player('X', p1_strategy)\n p2 = player.Player('O', p2_strategy)\n board = tictactoe.Board()\n game = rl_game.Game(p1, p2, board)\n game.play_many(n)\n p1.strategy.save_q()\n p2.strategy.save_q()", "def run_simulation(n, experiments, iterations, budget, recovery_count, performance_factor, current_top,\r\n precomputed=True, dataset=None):\r\n T = 0\r\n k = 5\r\n delta = 0.1\r\n range_m = 1*np.arange(1, budget+1)\r\n scores, true_top = init(n, precomputed=precomputed, dataset=dataset)\r\n true_ranks = get_ranks(scores)\r\n\r\n for exp in tqdm.tqdm(range(experiments), desc=\"experiments\"):\r\n for itr in tqdm.tqdm(range(iterations), desc=\"iterations\"):\r\n t = 1\r\n count = 0\r\n A = np.arange(n)\r\n P = np.zeros((n, n))\r\n S = np.zeros(n)\r\n r_count = 0\r\n data = []\r\n for b in range(budget):\r\n m = range_m[b]\r\n\r\n est, t, A, P, S, count, r_count, data = sparse_borda(n, t, T, k, A, P, S, delta, scores, m*n, count, r_count, data)\r\n\r\n ranking, ranks, top = get_ranking(n, est)\r\n if(top == true_top):\r\n recovery_count[b][exp] += 1\r\n performance_factor[b][exp] += ranks[true_top]\r\n current_top[b][exp] += true_ranks[top]\r\n\r\n current_top /= iterations\r\n performance_factor /= iterations\r\n\r\n return ranking, ranks, data, scores, true_top, est, recovery_count, performance_factor, current_top", "def rkStep(ebitParams, mySpecies, species, tstep, populationAtT0, populationAtTtstep):\r\n # longer function param calls yes but it speeds it up calculateK by 10%...\r\n # print(\"\\nRunning an RK step... \")\r\n\r\n # mySpecies.k1, mySpecies.r1 = calculateKR(ebitParams, mySpecies, species, mySpecies.tmpPop, mySpecies.Z, mySpecies.ionizationRates, mySpecies.chargeExchangeRates, mySpecies.rrRates, mySpecies.k1, mySpecies.r1, populationAtT0, mySpecies.tmpPop, 0.0, tstep)\r\n # mySpecies.k2, mySpecies.r2 = calculateKR(ebitParams, mySpecies, species, mySpecies.tmpPop, mySpecies.Z, mySpecies.ionizationRates, mySpecies.chargeExchangeRates, mySpecies.rrRates, mySpecies.k2, mySpecies.r2, populationAtT0, mySpecies.k1, 0.5, tstep)\r\n # mySpecies.k3, mySpecies.r3 = calculateKR(ebitParams, mySpecies, species, mySpecies.tmpPop, mySpecies.Z, mySpecies.ionizationRates, mySpecies.chargeExchangeRates, mySpecies.rrRates, mySpecies.k3, mySpecies.r3, populationAtT0, mySpecies.k2, 0.5, tstep)\r\n # mySpecies.k4, mySpecies.r4 = calculateKR(ebitParams, mySpecies, species, mySpecies.tmpPop, mySpecies.Z, mySpecies.ionizationRates, mySpecies.chargeExchangeRates, mySpecies.rrRates, mySpecies.k4, mySpecies.r4, populationAtT0, mySpecies.k3, 1.0, tstep)\r\n \r\n mySpecies.k1 = calculateK(ebitParams, mySpecies, species, mySpecies.tmpPop, mySpecies.Z, mySpecies.ionizationRates, mySpecies.chargeExchangeRates, mySpecies.rrRates, mySpecies.k1, populationAtT0, mySpecies.tmpPop, 0.0, tstep)\r\n mySpecies.k2 = calculateK(ebitParams, mySpecies, species, mySpecies.tmpPop, mySpecies.Z, mySpecies.ionizationRates, mySpecies.chargeExchangeRates, mySpecies.rrRates, mySpecies.k2, populationAtT0, mySpecies.k1, 0.5, tstep)\r\n mySpecies.k3 = calculateK(ebitParams, mySpecies, species, mySpecies.tmpPop, mySpecies.Z, mySpecies.ionizationRates, mySpecies.chargeExchangeRates, mySpecies.rrRates, mySpecies.k3, populationAtT0, mySpecies.k2, 0.5, tstep)\r\n mySpecies.k4 = calculateK(ebitParams, mySpecies, species, mySpecies.tmpPop, mySpecies.Z, mySpecies.ionizationRates, mySpecies.chargeExchangeRates, mySpecies.rrRates, mySpecies.k4, populationAtT0, mySpecies.k3, 1.0, tstep)\r\n \r\n\r\n # print(\"k values for q=1:\")\r\n # print(\"k1 %s\"%mySpecies.k1[1])\r\n # print(\"k2 %s\"%mySpecies.k2[1])\r\n # print(\"k3 %s\"%mySpecies.k3[1])\r\n # print(\"k4 %s\"%mySpecies.k4[1])\r\n\r\n # Updates the population of each charge state in the species.\r\n for qindex in range(0, mySpecies.Z + 1):\r\n # new energy value = ( kT(q-1)(pop gained by q-1) - kT(q)(lost by q) + kT(q+1)(gained by q+1) ) / total change in population \r\n # populationAtTtstep[qindex] = populationAtT0[qindex] + ((1 / 6) * (sum(mySpecies.r1[qindex]) + (2 * sum(mySpecies.r2[qindex]) + sum(mySpecies.r3[qindex]) ) + sum(mySpecies.r4[qindex]) ))\r\n populationAtTtstep[qindex] = populationAtT0[qindex] + ((1 / 6) * (mySpecies.k1[qindex] + (2 * (mySpecies.k2[qindex] + mySpecies.k3[qindex])) + mySpecies.k4[qindex]) )\r\n\r\n # New calculation of time stepped energy\r\n # deltaPop = [loss by q(i) from EI, gain by q(i) from CX or RR]\r\n # for q in range(0, mySpecies.Z+1):\r\n # deltaPop = [(mySpecies.r1[q][i] + (2 * (mySpecies.r2[q][i] + mySpecies.r3[q][i])) + mySpecies.r4[q][i])/6 for i in range(0,2)]\r\n # # print(\"DeltaPop for q=%s\"%q+\": %s\"%deltaPop)\r\n # if q==0:\r\n # try:\r\n # #this one is with gain only...\r\n # energyAtTtstep[q] = (energyAtT0[q]*(populationAtT0[q]) + energyAtT0[q+1]*deltaPop[1]) / (populationAtT0[q]+deltaPop[1])\r\n # #this one is with gain and loss... caused problems\r\n # # energyAtTtstep[q] = (energyAtT0[q]*(populationAtT0[q]-deltaPop[0]) + energyAtT0[q+1]*deltaPop[1]) / (populationAtTtstep[q])\r\n # except ZeroDivisionError:\r\n # energyAtTtstep[q] = energyAtT0[q]\r\n # elif q==mySpecies.Z:\r\n # lowerQ = [(mySpecies.r1[q-1][i] + (2 * (mySpecies.r2[q-1][i] + mySpecies.r3[q-1][i])) + mySpecies.r4[q-1][i])/6 for i in range(0,2)]\r\n # try:\r\n # #gain only\r\n # energyAtTtstep[q] = (energyAtT0[q]*(populationAtT0[q]) + energyAtT0[q-1]*lowerQ[0]) / (populationAtT0[q]+deltaPop[1]+lowerQ[0])\r\n # # gain and loss\r\n # # energyAtTtstep[q] = (energyAtT0[q]*(populationAtT0[q]-deltaPop[0]) + energyAtT0[q-1]*lowerQ[0]) / (populationAtTtstep[q])\r\n # except ZeroDivisionError:\r\n # energyAtTtstep[q] = energyAtT0[q]\r\n # else:\r\n # lowerQ = [(mySpecies.r1[q-1][i] + (2 * (mySpecies.r2[q-1][i] + mySpecies.r3[q-1][i])) + mySpecies.r4[q-1][i])/6 for i in range(0,2)]\r\n # # print(\"lowerQ: %s\"%lowerQ)\r\n # try:\r\n # #gain\r\n # # print(\"energyAtT0[q-1] = %s\"%energyAtT0[q-1] + \", lowerQ[0] = %s\"%lowerQ[0]+\", populationAtT0[q]=%s\"%populationAtT0[q]+\", deltaPop[1]=%s\"%deltaPop[1])\r\n # energyAtTtstep[q] = (energyAtT0[q]*(populationAtT0[q]) + energyAtT0[q-1]*lowerQ[0] + energyAtT0[q+1]*deltaPop[1]) / (populationAtT0[q]+deltaPop[1]+lowerQ[0])\r\n # #gain and loss\r\n # # energyAtTtstep[q] = (energyAtT0[q]*(populationAtT0[q]-deltaPop[0]) + energyAtT0[q-1]*lowerQ[0] + energyAtT0[q+1]*deltaPop[1]) / (populationAtTtstep[q])\r\n # except ZeroDivisionError:\r\n # energyAtTtstep[q] = energyAtT0[q]\r\n\r\n \r\n # print(\"Initial pop: %s\"%populationAtT0 + \",\\nfinal pop: %s\"%populationAtTtstep)\r\n # print(\"Initial temp: %s\"%energyAtT0 + \",\\nfinal temp: %s\"%energyAtTtstep)\r\n return", "def MCS(n,k):\n\tglobal dict_all\n\tdict_val=copy.deepcopy(dict_all)\n\t#start_time = time.time()\n\tfinal = {}\t\t\t\t\t # Store all result with the count as key. For example final[1]=[[1,0,0],[0,1,1]]\n\tseq = []\t\t\t\t\t\t# Store the count with no duplication\n\tfor i in range(n):\n\t\tleaf={}\t\t\t\t\t\t# leaf is the dictionary to store the random value of each leaf\n\t\t#count=0\n\t\tfor i in leaves:\n\t\t\tleaf[i] = choice([0,1])\n\t\t\tdict_val[i]=leaf[i]\n\t\t\t#count += leaf[i]\n\t\tresult = Cal_FT(dict_val)\t\n\t\t'''\n\t\tif result:\n\t\t\tcutset = []\n\t\t\tfor i in leaves:\n\t\t\t\tcutset.append(str(leaf[i]))\n\t\t\tcutset=\"\".join(cutset)\n\t\t\tif cutset not in final:\n\t\t\t\tfinal[cutset]=count\n\tfinal_sorted=sorted(zip(final.values(),final.keys())) \t\t\t\t#Order the cutset by its count\n\tfor i in range(k):\t\t\t\t\t\t\t\t\t\t\t\t\t#Print the first k result\n\t\tcutset=list(final_sorted[i][1])\n\t\tresult=[]\n\t\tfor index in range(len(cutset)):\n\t\t\tif cutset[index] is \"1\":\n\t\t\t\tresult.append(leaves[index])\n\t\tprint result\n\t#end_time=time.time()\n\t#print \"Running time is\", end_time-start_time\n\t'''", "def SkoptPaperStats(maxIters, numRuns):\n\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n\n assert comm.Get_size() == numRuns, \"Please ensure there is one process running per run i.e \" + str(numRuns) + \" processes.\"\n \n # Define the problem bounds.\n skoptBounds = [(10, 1300), (40, 230), (0, 90), (0, 90)]\n\n # Use the same seed list as previously.\n seedList = [572505, 357073, 584216, 604873, 854690, 573165, 298975, 650770, 243921, 191168]\n\n # The target for each algorithm. This was determined by using the values in the literature, so there is clearly some deviation either due to the detuning or computation.\n globalFoM = 1.033\n\n if rank == 0:\n timeList = []\n iterationList = []\n\n # Define which solver will be used.\n optimiser = skopt.Optimizer(skoptBounds, base_estimator = \"GP\", n_initial_points = int(np.ceil(maxIters/10)), random_state = seedList[rank])\n\n # Start timing.\n startTime = time.time()\n timeElapsed = None\n iterationSuccess = None\n\n # Start optimisation.\n for iteration in range(maxIters):\n\n # Make one suggestion.\n nextParams = optimiser.ask()\n\n # Check what FoM this gives. Go negative as this is a minimisation routine.\n fEval = FitnessSkopt(nextParams)\n\n # Update best FoM.\n if abs(fEval) >= globalFoM:\n # The algorithm has managed to surpass or equal the paper value.\n iterationSuccess = iteration\n timeElapsed = time.time() - startTime\n \n if rank == 0:\n iterationList.append(iterationSuccess)\n timeList.append(timeElapsed)\n\n break\n \n # Tell the optimiser about the result.\n optimiser.tell(nextParams, fEval)\n\n # Run complete. Send results to main process. Tags are unique identifiers.\n if rank != 0:\n comm.send(timeElapsed, dest = 0, tag = 1)\n comm.send(iterationSuccess, dest = 0, tag = 2)\n\n # Wait for all the processes to end.\n comm.Barrier()\n\n if rank == 0:\n # Aggregate the data.\n for process in range(comm.Get_size() - 1):\n # Get the data.\n individualTime = None\n individualTime = comm.recv(individualTime, source = process + 1, tag = 1)\n\n individualIter = None\n individualIter = comm.recv(individualIter, source = process + 1, tag = 2)\n\n if individualIter is not None:\n # Both values must therefore be non-null.\n iterationList.append(individualIter)\n timeList.append(individualTime)\n\n avgRuntime = np.average(timeList)\n avgIters = np.average(iterationList)\n try:\n\n fastestTime = np.min(timeList)\n\n except ValueError:\n \n # List is empty.\n fastestTime = float('NaN')\n\n numSuccess = len(iterationList)\n successRate = numSuccess/numRuns\n\n print(\"Bayesian optimisation paper testing complete! Here are the stats:\")\n print(\"Number of successful runs: \" + str(numSuccess) + \" (Success rate of \" + str(successRate) + \")\")\n print(\"Average iterations required for success: \" + str(avgIters))\n print(\"Average time required for success: \" + str(avgRuntime))\n print(\"Fastest convergence time: \" + str(fastestTime))\n print(\"------------------------------------------------------------------------------------------------------------------\")\n \n return", "def run(self, n_steps: int, n_parallel: int = 1, **kwargs):\n batch_size = kwargs.get(\"batch_size\", 1)\n n_parallel = min(n_parallel, batch_size)\n with self.scheduler(n_parallel=n_parallel) as scheduler:\n for i in range(n_steps):\n samples = self.optimiser.run_step(\n batch_size=batch_size,\n minimise=kwargs.get(\"minimise\", False)\n )\n jobs = [\n self._job(task=self.objective, args=s.as_dict())\n for s in samples\n ]\n scheduler.dispatch(jobs)\n evaluations = [\n r.data for r in scheduler.collect(\n n_results=batch_size, timeout=self._timeout\n )\n ]\n self.optimiser.update(samples, evaluations)\n for s, e, j in zip(samples, evaluations, jobs):\n self.reporter.log((s, e), meta={\"job_id\": j.id})", "def run(data, params):\n start_time = time.process_time()\n\n # 'n' is the number of candidates, also the number of ranks\n n = params['n']\n # 'N' is the total number of voters\n N = params['N']\n # 's0' is the optional ground truth full ranking of the candidates\n # (distribution is drawn off this full ranking)\n s0 = params['s0']\n\n # Order candidates by non-decreasing pair-wise contest wins \n # (ascending order with lexicographic tie-breaking)\n precedenceMatrix = utils.precedenceMatrix(data, n)\n\n # Credits to Sayan-Paul for starter code for merge sort\n # See: https://github.com/Sayan-Paul/Sort-Library-in-Python/blob/master/sortlib.py\n def mergesort(ar):\n if len(ar)<=1:\n return ar\n middle=len(ar)/2\n left =ar[:middle]\n right=ar[middle:]\n left=mergesort(left)\n right=mergesort(right)\n res=merge(left,right)\n return res\n\n def merge(left,right):\n res=[]\n while len(left)+len(right):\n if len(left)*len(right):\n if precedenceMatrix[left[0],right[0]]<=precedenceMatrix[right[0],left[0]]:\n res.append(left[0])\n left=left[1:]\n else:\n res.append(right[0])\n right=right[1:]\n elif len(left):\n res.append(left[0])\n left=left[1:]\n elif len(right):\n res.append(right[0])\n right=right[1:]\n return res\n\n candidates = [i for i in range(n)]\n sortedCandidates = mergesort(candidates)\n\n sigma = tuple(sortedCandidates)\n\n time_elapsed = (time.process_time() - start_time) * 1000\n\n return ALGORITHM_NAME, utils.generalizedKendallTauDistance(data, sigma, n, N, s0), time_elapsed, sigma", "def solution(N, K):\n\n # not really sure what to return if N=1, it should be invalid?\n rounds = N - 1\n next_num = N\n while K > 0 and next_num >= 4:\n next_num = next_num - next_num % 2\n next_num = next_num // 2\n rounds -= (next_num - 1)\n K -= 1\n return rounds", "def Skopt5DStats(numIters, numRuns):\n\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n\n assert comm.Get_size() == numRuns, \"Please ensure there is one process running per run i.e \" + str(numRuns) + \" processes.\"\n \n # Define the problem bounds.\n skoptBounds = [(10, 1300), (40, 230), (0, 90), (0, 90), (0, 90)]\n\n # Use the seedlist from the other runs.\n seedList = [843484, 61806, 570442, 867402, 192390, 60563, 899483, 732848, 243267, 439621] \n\n if rank == 0:\n timeList = []\n bestFoMList = []\n\n # Define which solver will be used.\n optimiser = skopt.Optimizer(skoptBounds, base_estimator = \"RF\", n_initial_points = int(np.ceil(numIters/10)), random_state = seedList[rank])\n\n # Start timing.\n startTime = time.time()\n bestFoM = 0\n\n # Start optimisation.\n for iteration in range(numIters):\n\n # Find out which point to sample next.\n nextParams = optimiser.ask()\n\n # Evaluate the objective function.\n nextFoM = FitnessSkopt5D(nextParams)\n\n if abs(nextFoM) > bestFoM:\n bestFoM = abs(nextFoM)\n \n # Update the model.\n optimiser.tell(nextParams, nextFoM)\n\n # One run complete.\n timeElapsed = time.time() - startTime\n # Run complete. Send results to main process. Tags are unique identifiers.\n if rank != 0:\n comm.send(timeElapsed, dest = 0, tag = 1)\n comm.send(bestFoM, dest = 0, tag = 2)\n \n # Wait for all the processes to end.\n comm.Barrier()\n \n if rank == 0:\n # Add own data first.\n bestFoMList.append(bestFoM)\n timeList.append(timeElapsed)\n\n for process in range(comm.Get_size() - 1):\n # Get the data.\n individualTime = None\n individualTime = comm.recv(individualTime, source = process + 1, tag = 1)\n\n individualFoM = None\n individualFoM = comm.recv(individualFoM, source = process + 1, tag = 2)\n\n bestFoMList.append(individualFoM)\n timeList.append(individualTime)\n\n avgRuntime = np.average(timeList)\n avgFoM = np.average(bestFoMList)\n avgFoMPerTime = np.average(np.divide(bestFoMList, timeList))\n avgFoMPerIter = np.average(np.divide(bestFoMList, numIters))\n absBestFoM = np.max(bestFoMList)\n\n print(\"Bayesian optimisation 5D testing complete! Here are the stats:\")\n print(\"Average runtime per run (s): \" + str(avgRuntime))\n print(\"Average FoM: \" + str(avgFoM))\n print(\"Average FoM per unit time: \" + str(avgFoMPerTime))\n print(\"Average FoM per unit iteration: \" + str(avgFoMPerIter))\n print(\"Absolute best FoM determined: \" + str(absBestFoM))\n print(\"------------------------------------------------------------------------------------------------------------------\")\n \n return", "def realTryHard(g, n, verbose=False, graphname=\"\"):\r\n tot = None\r\n for i in range(n):\r\n # print(\"Starting batch \", i)\r\n res = tryEverything(g, verbose, graphname)\r\n if tot is None:\r\n tot = res\r\n else:\r\n for j in res:\r\n tot[j][1] += res[j][1]\r\n for j in tot:\r\n tot[j][1] /= n\r\n tot[j][1] = round(tot[j][1], 3)\r\n return tot", "def define_pseudo_potts_helper_functions(n, k):\n\n assert n>1\n assert k>1\n\n @njit\n def get_multipliers_r(r, multipliers, n=n, k=k):\n \"\"\"Return r's field and all couplings to spin r.\n\n Parameters\n ----------\n r : int\n multipliers : ndarray\n All fields and couplings concatenated together.\n\n Returns\n -------\n ndarray\n Relevant multipliers.\n list\n Index of where multipliers appear in full multipliers array.\n \"\"\"\n \n ix = [r+n*i for i in range(k)]\n multipliersr = np.zeros(k-1+n)\n for i in range(k):\n multipliersr[i] = multipliers[r+n*i]\n\n # fill in the couplings\n ixcounter = k\n for i in range(n):\n if i!=r:\n if i<r:\n ix.append( sub_to_ind(n, i, r) + k*n )\n multipliersr[ixcounter] = multipliers[ix[ixcounter]]\n else:\n ix.append( sub_to_ind(n, r, i) + k*n )\n multipliersr[ixcounter] = multipliers[ix[ixcounter]]\n ixcounter += 1\n return multipliersr, ix\n\n @njit\n def calc_observables_r(r, X, n=n, k=k):\n \"\"\"Return the observables relevant for calculating the conditional probability of\n spin r.\n\n Parameters\n ----------\n r : int\n Spin index.\n X : ndarray\n Data samples of dimensions (n_samples, n_dim).\n\n Returns\n -------\n ndarray\n observables\n list of ndarray\n observables if spin r were to occupy all other possible states\n ndarray\n Each col details the occupied by spin r in each array of the previous return\n value, i.e., the first col of this array tells me what r has been changed to\n in the first array in the above list.\n \"\"\"\n\n obs = np.zeros((X.shape[0],k-1+n), dtype=np.int8)\n # keep another copy of observables where the spin iterates thru all other possible states\n otherobs = [np.zeros((X.shape[0],k-1+n), dtype=np.int8)\n for i in range(k-1)]\n # note the hypothetical states occupied by spin r. this makes it easier to keep track of things later\n otherstates = np.zeros((X.shape[0],k-1), dtype=np.int8)\n \n # for each data sample in X\n for rowix in range(X.shape[0]):\n counter = 0\n # record state of spin r in obs and hypothetical scenarios when it is another state in otherobs\n for i in range(k):\n if X[rowix,r]==i:\n obs[rowix,i] = 1\n else:\n otherobs[counter][rowix,i] = 1\n otherstates[rowix,counter] = i\n counter += 1\n ixcounter = k\n \n for i in range(n-1):\n for j in range(i+1,n):\n if i==r:\n obs[rowix,ixcounter] = X[rowix,i]==X[rowix,j]\n kcounter = 0\n for state in range(k):\n if state!=X[rowix,r]:\n otherobs[kcounter][rowix,ixcounter] = X[rowix,j]==state\n kcounter += 1\n ixcounter += 1\n elif j==r:\n obs[rowix,ixcounter] = X[rowix,i]==X[rowix,j]\n kcounter = 0\n for state in range(k):\n if state!=X[rowix,r]:\n otherobs[kcounter][rowix,ixcounter] = X[rowix,i]==state\n kcounter += 1\n ixcounter += 1\n return obs, otherobs, otherstates\n\n return get_multipliers_r, calc_observables_r", "def combinations(n, k):\n return factorial(n) / (factorial(k) * factorial(n - k))", "def snowflake2(t, n):\n for i in range(3):\n koch(t, n)\n t.rt(120)", "def _optimization_loop(self, iteration=0):\n self.logger.print_optimization_header()\n\n while iteration < self.iterations:\n try:\n self._execute_experiment()\n except RepeatedExperimentError:\n # G.debug_(F'Skipping repeated Experiment: {_ex!s}\\n')\n if len(self.similar_experiments) + len(self.tested_keys) >= self.search_space_size:\n G.log_(f\"Hyperparameter search space has been exhausted\")\n break\n self.skipped_iterations += 1\n continue\n except StopIteration:\n if len(self.similar_experiments) + len(self.tested_keys) >= self.search_space_size:\n G.log_(f\"Hyperparameter search space has been exhausted\")\n break\n # G.debug_(f'Re-initializing hyperparameter grid after testing {len(self.tested_keys)} keys')\n self._set_hyperparameter_space()\n continue\n\n self.logger.print_result(\n self.current_hyperparameters_list,\n self.current_score,\n experiment_id=self.current_experiment.experiment_id,\n )\n\n if (\n (self.best_experiment is None) # First evaluation\n or (self.do_maximize and (self.best_score < self.current_score)) # New best max\n or (not self.do_maximize and (self.best_score > self.current_score)) # New best min\n ):\n self.best_experiment = self.current_experiment.experiment_id\n self.best_score = self.current_score\n\n iteration += 1", "def optimize(self, goal_length=100, optimization_steps=15):\n self.update()\n # start set of control points\n cpoints = {}\n j = 0\n for i in map(lambda x: np.array([x[0], x[1]]), self.controlpoints):\n cpoints[j] = i\n j += 1\n \n # optimization parameters\n self.goal_length = goal_length\n iterations = optimization_steps\n self.iterations = (iterations, 0)\n \n # the return set of snakes\n snakes = []\n \n # do the actual iterations over the whole snake\n print '\\nOptimizing\\nstarting at %s' % self.energy\n # set up time measurement\n starttime = time()\n runtimes = []\n for i in range(iterations):\n # take time at the beginning of the iteration\n iterstarttime = time()\n # set iteration if necessary\n if not self.optimized:\n self.iteration = (iterations, i+1)\n # update the step size if not overridden by fixed step size\n if not self.step_size_fixed:\n self.setStepSize(self.ExternalEnergy.getStepSize(iteration=self.iteration))\n # refine all control points with a greedy optimization\n cpoints = self.greedyOptimize(cpoints)\n energy_before = self.energy\n # partially reset the snake\n self.reset(fullreset = False)\n # and add the optimized set of control points\n self.addControlPoints(*cpoints.values())\n # update the snake so the energies get recalculated\n self.update()\n energy_after = self.energy\n \n assert cpoints.values() == self.controlpoints, 'ungleich: %s, %s' % (cpoints.values(), self.controlpoints)\n assert self.totalEnergy(cpoints.values()) == self.bestenergy_debug, '%s != %s' % (self.totalEnergy(cpoints.values()), self.bestenergy_debug)\n assert self.totalEnergy(self.controlpoints) == self.bestenergy_debug, '%s != %s' % (self.totalEnergy(self.controlpoints), self.bestenergy_debug)\n \n # if this iteration is the first on a new scale\n # allow the new value to be greater than the one before\n if not self.ExternalEnergy.scalestep(self.iteration):\n assert energy_before >= energy_after, '%s is not smaller(or equal) than %s' % (energy_after, energy_before)\n \n print '%s. optimized to %s (iteration: (%s, %s), scale_index: %s, scale_step: %s, step_size: %s)' % (i, self.energy, self.iteration[0], self.iteration[1], self.ExternalEnergy.scaleIndex(self.iteration), self.ExternalEnergy.scalestep(self.iteration), self.step_size)\n # append the current state to the return list of snakes\n snakes.append({'controlpoints': self.controlpoints, 'contour': self.contour, 'flip': self.flip, 'externalenergies': self.ext_energies})\n # repaint the snake\n self.qimageviewer.repaint()\n # track every iteration run time\n runtimes.append(time()-iterstarttime)\n # calculate the average iteration runtime\n avgruntime = reduce(lambda x, y: x+y, runtimes)/float(len(runtimes))\n print 'optimized after %s secs w/ iteration average of %s secs' % (time()-starttime, avgruntime)\n \n print 'ext', self.ext_energies\n print 'spc', self.spc_energies\n print 'crv', self.crv_energies\n \n return snakes", "def run(self):\n if not self._no_progress and self._verbose:\n from progressbar import ProgressBar\n progress = ProgressBar()\n iter_range = progress(range(self._iters))\n else:\n iter_range = range(self._iters)\n\n if self._no_progress and self._time_iters:\n from time import time\n\n i = 0\n try:\n for i in iter_range:\n if self._verbose and self._no_progress:\n print(\"Iteration \" + repr(i))\n\n if self._no_progress and self._time_iters:\n start = time()\n\n self.iteration += 1\n\n self._forward(self._p_k, self._v_k)\n sigma_k = measure(self._p_k, self._v_k)\n alpha_k = self._rho_k / sigma_k\n if self._double:\n update_m_double(self._m, alpha_k, self._p_k)\n sub_scaled_vector_double(self._residual_k,\n self._residual_k,\n alpha_k, self._v_k)\n else:\n update_m(self._m, alpha_k, self._p_k)\n sub_scaled_vector(self._residual_k, self._residual_k,\n alpha_k, self._v_k)\n self._v_k = gpuarray_copy(self._residual_k)\n rho_k_plus_1 = measure(self._v_k, self._residual_k)\n rho_k_t = np.abs(rho_k_plus_1)\n\n if (rho_k_t / self._rho_0 <= self._relative_tolerance) \\\n or (rho_k_t <= self._absolute_tolerance):\n print(\"Converged.\")\n self.converged = True\n break\n\n if self._double:\n add_scaled_vector_double(self._p_k, self._v_k,\n rho_k_plus_1/self._rho_k,\n self._p_k)\n else:\n add_scaled_vector(self._p_k, self._v_k,\n rho_k_plus_1/self._rho_k, self._p_k)\n\n self._rho_k = rho_k_plus_1\n\n if self._noisy:\n print(\" Residual=\" + str(rho_k_t))\n\n if self._no_progress and self._time_iters:\n print(\"Elapsed time for iteration \" + str(i) + \": \" +\n str(time() - start) + \" seconds\")\n\n if self._save_images:\n save_image(np.abs(self._m.get().reshape(self._data.nX1,\n self._data.nX2)),\n self._out_dir, i, self._image_format)\n if self._save_matlab:\n save_matlab(self._m.get().reshape(self._data.nX1,\n self._data.nX2),\n self._out_dir, i)\n except KeyboardInterrupt:\n print(\"Reconstruction aborted (CTRL-C) at iteration \" + str(i))\n finally:\n if self._save_images:\n save_image(np.abs(self._m.get().reshape(self._data.nX1,\n self._data.nX2)),\n self._out_dir, \"result\", self._image_format)\n if self._save_matlab:\n save_matlab(self._m.get().reshape(self._data.nX1,\n self._data.nX2),\n self._out_dir, \"result\")\n self.iteration = i+1\n return (self._m.get().reshape(self._data.nX1, self._data.nX2),\n self.iteration)", "def simulate(n: int, iterations: int, strategy: Callable[[List[bool]], int], case: Case) -> float:\n\n if case is Case.random:\n steps = []\n for _ in range(iterations):\n train = generate_train(n, case)\n steps.append(strategy(train))\n\n return sum(steps) / iterations\n else: # No need to run iterations for identical trains\n train = generate_train(n, case)\n return strategy(train)", "def run_several_iterations(iterations, means, horizon):\n\n # Initializing the results vector.\n results = [0]*horizon\n\n for iteration in range(iterations):\n\n # The current cumulative regret.\n results = np.add(results, run_sparring_algorithm(means[:, iteration], horizon))\n\n # Returning the average cumulative regret.\n return results/(iterations +.0)", "def reduce_n_times(tree, n, weights, last_round=False):\n k = []\n if n==1:\n aggs_ = list(np.unique(np.array(aggs(tree, weights))))\n agg_weights = [apply_aggregation(t, weights) for t in aggs_]\n H = [calculate_H(x) for x in agg_weights]\n sorted_H = [i[0] for i in sorted(enumerate(H), key=lambda x:x[1], reverse=True)][0:10]\n for i in sorted_H:\n k.append(aggs_[i])\n return k\n if n>1:\n trees = reduce_n_times(tree, n-1, weights)\n results = []\n for tree in trees:\n results = results + aggs(tree, weights)\n l = min([len(x) for x in results])\n results = filter(lambda x: len(x) == l, results)\n results = list(np.unique(np.array(results)))\n agg_weights = [apply_aggregation(t, weights) for t in results]\n S = [calculate_S(x) for x in agg_weights]\n if last_round == True:\n sorted_S = [i[0] for i in sorted(enumerate(S), key=lambda x:x[1], reverse=True)]\n for i in sorted_S:\n k.append(results[i])\n else:\n sorted_S = [i[0] for i in sorted(enumerate(S), key=lambda x:x[1], reverse=True)][0:10]\n for i in sorted_S:\n k.append(results[i])\n return k", "def _kshape(x, k, n_init=1, max_iter=100, n_jobs = 1, random_state=None,normalize=True ):\r\n #print \"n jobs run in parallel: \" + str(cpu_count() ) \r\n random_state = check_random_state(random_state)\r\n best_tot_dist,best_centroids,best_idx = None,None,None\r\n \r\n if n_jobs ==1:\r\n\r\n for i_init in range(n_init): \r\n # n_init is the number of random starting points\r\n # pdb.set_trace()\r\n \r\n idx, centroids,tot_dist = _kshape_single(x, k, max_iter=max_iter, random_state= random_state,normalize=normalize) \r\n if best_tot_dist is None or tot_dist < best_tot_dist:\r\n best_idx = idx.copy()\r\n best_centroids = centroids.copy()\r\n best_tot_dist = tot_dist\r\n else: # n_jobs not =1 # if -1, all CPUs are used\r\n # parallelisation of kshape runs\r\n seeds = random_state.randint(np.iinfo(np.int32).max,size=n_init)\r\n results = Parallel(n_jobs=n_jobs, verbose=0)(\r\n delayed(_kshape_single)(x,k,max_iter=max_iter, random_state=seed, normalize=normalize)\r\n for seed in seeds )\r\n # Get results with the lowest distances\r\n idx, centroids,tot_dist, iterations = zip(*results)\r\n best = np.argmin(tot_dist) \r\n best_idx = idx[best]\r\n best_centroids = centroids[best]\r\n best_tot_dist = tot_dist[best]\r\n sys.stdout.write(\"Done: k=\"+str(k)+\"\\n\")\r\n return {'centroids':best_centroids, 'labels':best_idx, 'distance':best_tot_dist,'centroids_all':centroids,'labels_all':idx,'distance_all':tot_dist,'iterations':iterations}", "def define_potts_helper_functions(k):\n\n @njit\n def calc_observables(X, k=k):\n \"\"\"\n Parameters\n ----------\n X : ndarray of dtype np.int64\n Dimensions (n_samples, n_spins).\n\n Returns\n -------\n ndarray\n Dimensions (n_samples, n_observables).\n \"\"\"\n\n n = X.shape[1]\n Y = np.zeros((len(X), n*k+n*(n-1)//2), dtype=np.int8)\n \n # average orientation (magnetization)\n # note that fields for the third state are often set to 0\n counter = 0\n for i in range(k):\n for j in range(n):\n Y[:,counter] = X[:,j]==i\n counter += 1\n \n # pairwise correlations\n for i in range(n-1):\n for j in range(i+1, n):\n Y[:,counter] = X[:,i]==X[:,j]\n counter += 1\n \n return Y\n\n def calc_e(X, multipliers, k=k, calc_observables=calc_observables):\n \"\"\"\n Parameters\n ----------\n X : ndarray of dtype np.int64\n Dimensions (n_samples, n_spins).\n multipliers : ndarray of dtype np.float64\n\n Returns\n -------\n ndarray\n Energies of each observable.\n \"\"\"\n\n return -calc_observables(X, k).dot(multipliers)\n\n def mch_approximation(sample, dlamda, calc_e=calc_e):\n \"\"\"Function for making MCH approximation step for Potts model.\n \n Parameters\n ----------\n sample : ndarray\n Of dimensions (n_sample, n_spins).\n dlamda : ndarray\n Change in parameters.\n \n Returns\n -------\n ndarray\n Predicted correlations.\n \"\"\"\n\n dE = calc_e(sample, dlamda)\n ZFraction = len(dE) / np.exp(logsumexp(-dE))\n predsisj = (np.exp(-dE[:,None]) / len(dE) * calc_observables(sample)).sum(0) * ZFraction \n assert not ((predsisj<0).any() or\n (predsisj>(1+1e-10)).any()),\"Predicted values are beyond limits, (%E,%E)\"%(predsisj.min(),\n predsisj.max())\n return predsisj\n\n return calc_e, calc_observables, mch_approximation", "def compute(self, X, Y, n):\n inner_cv = KFold(5, shuffle=True, random_state=1673)\n\n print('-> grid searching and cross validation ...')\n for training, validation, j in self._k_fold_cross_validation(X, 5, n):\n\n x, y, valid_x, valid_y = X.loc[training, :], Y[training], X.loc[validation, :], Y[validation]\n x_features, valid_features = self.sat_features.loc[training, :], self.sat_features.loc[validation, :]\n\n if 'kNN' in self.model_list:\n parameters = {'n_neighbors': range(1, 18, 2)}\n model = KNeighborsRegressor(weights='distance')\n self.kNN = GridSearchCV(estimator=model, param_grid=parameters, cv=inner_cv, scoring=r2)\n\n res = self.kNN.fit(x, y).predict(valid_x)\n self.results['kNN'].append(list(res))\n self.scores['kNN'].append(R2(valid_y, res))\n\n if 'Kriging' in self.model_list:\n parameters = {\"kernel\": [RBF(l) for l in [[1, 1]]]}\n model = GaussianProcessRegressor(alpha=0.1, n_restarts_optimizer=0)\n self.Kriging = GridSearchCV(estimator=model, param_grid=parameters, cv=inner_cv, scoring=r2)\n\n res = self.Kriging.fit(x, y).predict(valid_x)\n self.results['Kriging'].append(list(res))\n self.scores['Kriging'].append(R2(valid_y, res))\n\n if 'RmSense' in self.model_list:\n parameters = {\"alpha\": [0.001, 0.01, 0.1, 1, 10, 100, 1000]}\n model = Ridge()\n self.RmSense = GridSearchCV(estimator=model, param_grid=parameters, cv=inner_cv, scoring=r2)\n #print('INFO: best alpha - ', self.RmSense.fit(x_features, y).best_params_)\n\n res = self.RmSense.fit(x_features, y).predict(valid_features)\n self.results['RmSense'].append(list(res))\n self.scores['RmSense'].append(R2(valid_y, res))\n\n if 'Ensamble' in self.model_list:\n res = (self.RmSense.predict(valid_features) + self.kNN.predict(valid_x)) / 2.\n self.results['Ensamble'].append(list(res))\n self.scores['Ensamble'].append(R2(valid_y, res))\n\n for m in self.model_list:\n print('score {}: {}'.format(m, np.mean(self.scores[m])))", "def basic_experiments(k, p, num_iter, transforms, train_path, test_path):\n start_time = time.time()\n print(\"Generating advice\")\n with open(train_path, \"r\") as f:\n train_data = f.readlines()\n advice = generate_advice(train_data, transforms)\n print(\"Done: \" + str(time.time() - start_time) + \"\\n\")\n\n print(\"Generating real count for test data\")\n with open(test_path, \"r\") as f:\n test_data = f.readlines()\n real_count = generate_advice(test_data, transforms)\n print(\"Done: \" + str(time.time() - start_time))\n actual = [x.moment(p) for x in real_count]\n print(\"Actual moments:\")\n print(actual)\n\n print(\"Estimating using noisy advice:\")\n est_with_advice = []\n for i in range(num_iter):\n seed = str(random.randint(1, 2**64 + 1)) + str(random.randint(1, 2**64 + 1))\n this_hash_exp = lambda x: hash_exp(x, seed)\n est_with_advice.append(\n estimate_using_advice(test_data, transforms, k, p, this_hash_exp,\n advice))\n print(\"Done: \" + str(time.time() - start_time))\n print(est_with_advice)\n\n print(\"Estimating using perfect advice:\")\n est_perfect_advice = []\n for i in range(num_iter):\n seed = str(random.randint(1, 2**64 + 1)) + str(random.randint(1, 2**64 + 1))\n this_hash_exp = lambda x: hash_exp(x, seed)\n est_perfect_advice.append(\n estimate_using_advice(test_data, transforms, k, p, this_hash_exp,\n real_count))\n print(\"Done: \" + str(time.time() - start_time))\n print(est_perfect_advice)\n\n print(\"Estimating using PPSWOR:\")\n est_ppswor = []\n for i in range(num_iter):\n cur = []\n for x in real_count:\n cur.append(ppswor_estimate_moment(x.counts.items(), k, p))\n est_ppswor.append(cur)\n print(\"Done: \" + str(time.time() - start_time))\n print(est_ppswor)\n\n print(\"Estimating using ell_2 PPSWOR:\")\n est_ppswor_l2 = []\n for i in range(num_iter):\n cur = []\n for x in real_count:\n cur.append(ppswor_estimate_moment(x.counts.items(), k, p, 2))\n est_ppswor_l2.append(cur)\n print(\"Done: \" + str(time.time() - start_time))\n print(est_ppswor_l2)", "def get_result_dataset(att_trees, data, k=DEFAULT_K, n=10):\n data_back = copy.deepcopy(data)\n length = len(data_back)\n print \"K=%d\" % k\n joint = 5000\n datasets = []\n check_time = length / joint\n if length % joint == 0:\n check_time -= 1\n for i in range(check_time):\n datasets.append(joint * (i + 1))\n datasets.append(length)\n all_ncp = []\n all_rtime = []\n all_pollution = []\n deletion_all_ncp = []\n deletion_all_rtime = []\n for pos in datasets:\n ncp = rtime = pollution = 0\n if __DEBUG:\n print '#' * 30\n print \"size of dataset %d\" % pos\n print \"Enhanced Mondrian\"\n for j in range(n):\n temp = random.sample(data, pos)\n result, eval_result = mondrian(att_trees, temp, k)\n ncp += eval_result[0]\n rtime += eval_result[1]\n pollution += eval_result[2]\n data = copy.deepcopy(data_back)\n # save_to_file((att_trees, temp, result, k, L))\n ncp /= n\n rtime /= n\n pollution /= n\n if __DEBUG:\n print \"Average NCP %0.2f\" % ncp + \"%\"\n print \"Running time %0.2f\" % rtime + \"seconds\"\n print \"Missing Pollution = %.2f %%\" % pollution + \"%\"\n print \"Mondrian\"\n all_ncp.append(round(ncp, 2))\n all_rtime.append(round(rtime, 2))\n all_pollution.append(round(pollution, 2))\n ncp = rtime = 0\n for j in range(n):\n temp = random.sample(data, pos)\n result, eval_result = mondrian_delete_missing(att_trees, temp, k)\n ncp += eval_result[0]\n rtime += eval_result[1]\n data = copy.deepcopy(data_back)\n ncp /= n\n rtime /= n\n if __DEBUG:\n print \"Average NCP %0.2f\" % ncp + \"%\"\n print \"Running time %0.2f\" % rtime + \"seconds\"\n deletion_all_ncp.append(round(ncp, 2))\n deletion_all_rtime.append(round(rtime, 2))\n print \"Mondrian\"\n print \"All NCP\", deletion_all_ncp\n print \"All Running time\", deletion_all_rtime\n print \"Enhanced Mondrian\"\n print \"All NCP\", all_ncp\n print \"All Running time\", all_rtime\n print \"Missing Pollution\", all_pollution", "def split_simsplit_3epochs_iter5(params, ns):\n #24 parameters \n nu1a, nu2a, nu3a, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, m1_12, m1_13, m1_21, m1_23, m1_31, m1_32, m2_12, m2_13, m2_21, m2_23, m2_31, m2_32, T1, T2, T3 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2]) \n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T1\n nu_T1 = [nu1a, nu2a, nu3a]\n mig1 = numpy.array([[0, m1_12, m1_13],[m1_21, 0, m1_23], [m1_31, m1_32, 0]]) \n fs.integrate(nu_T1, T1, m=mig1)\n ## Population function and migration matrix for T2\n nu_T2 = [nu1b, nu2b, nu3b]\n mig2 = numpy.array([[0, m2_12, m2_13],[m2_21, 0, m2_23], [m2_31, m2_32, 0]]) \n fs.integrate(nu_T2, T2, m=mig2)\n ## Population function and migration matrix for T3 (bottleneck to capture single population representation of lineage)\n nu_T3 = [nu1c, nu2c, nu3c]\n fs.integrate(nu_T3, T3) \n return fs", "def k_fold_cross_validation_populationwise(self, K: int=5, rem_day4:bool=True, smote:bool=True, shuffle: bool=False) -> dict:\n counter = 0\n results = {}\n for df in self.dataframes:\n k_folds = {}\n\n header = set(df['label'].tolist())\n # Removing Day 4\n trails = set()\n for i in header:\n trail = eval(i)\n if trail[0] != 4 and trail[0] != 3:\n trails.add(i)\n else:\n if not(rem_day4):\n trails.add(i)\n\n header = trails\n\n # Getting all the matrices from the trials\n for k in range(K):\n k_folds[k] = {\"X_train\": [], \"X_test\": [], \"y_test\": [], \"y_train\": []}\n for trial in header:\n # geting rows with (day, Trail)-label\n rows = df.loc[df['label'] == trial].to_numpy()\n # getting response label\n response = rows[0][-1]\n # getting the actual data from the matrix\n rows = np.delete(rows, np.s_[0,1,-1], axis=1)\n\n chunks = np.array_split(rows, K)\n for chunk in chunks[k]:\n k_folds[k][\"X_test\"].append(chunk.astype(np.float))\n k_folds[k][\"y_test\"].append(response)\n\n train_chunks = np.delete(chunks, k, axis=0)\n for chunk in train_chunks:\n for ch in chunk:\n k_folds[k][\"X_train\"].append(ch.astype(np.float))\n k_folds[k][\"y_train\"].append(response)\n\n for k in range(K):\n\n self.X_test = np.asarray(k_folds[k][\"X_test\"])\n self.y_test = np.asarray(k_folds[k][\"y_test\"])\n self.X_train = np.asarray(k_folds[k][\"X_train\"])\n self.y_train = np.asarray(k_folds[k][\"y_train\"])\n\n if smote:\n self.use_SMOTE()\n\n if shuffle:\n self.shuffle_labels()\n\n k_folds[k][\"X_test\"] = self.X_test\n k_folds[k][\"X_train\"] = self.X_train\n k_folds[k][\"y_test\"] = self.y_test\n k_folds[k][\"y_train\"] = self.y_train\n \n results[self.populations[counter]] = k_folds\n counter +=1\n\n return results", "def fn(n, k):\n if n == 1: return k # base case \n return sum(fn(n-1, kk) for kk in range(1, k+1))", "def time_run(fnk):\n xval = []\n yval = []\n for n in range(10, 1000, 10):\n xval.append(n)\n graph = gdc.make_upa_graph(n, 5)\n c_time = time.time()\n fnk(graph)\n time_passed = time.time() - c_time\n yval.append(time_passed)\n return xval, yval", "def run_trials_exact(n, d, eta, num_trials, data_, log_file=None, data_file=None):\n # store variables by references in params dictionary\n params = {\"n\": n, \"d\": d, \"eta\": eta}\n \n trial_param, trial_param_vals = utils.find_trial_param\n\n utils.check_log(log_file)\n\n data_shape = data_.shape\n num_rows = data_shape[0]\n num_columns = data_shape[1]\n\n times = []\n\n for x in trial_param_vals:\n # update trial parameter in vars dictionary\n params[trial_param] = x\n\n # update all parameters (will be unchanged except for the trial parameter)\n n_ = params[\"n\"]\n d_ = params[\"d\"]\n eta_ = params[\"eta\"]\n\n # elapsed time for each trial\n trials = [0]*num_trials\n\n for i in range(num_trials):\n utils.progress_report(x, trial_param, i)\n\n # get combination of rows/columns\n rows = np.random.choice(num_rows, size=n_, replace=False)\n columns = np.random.choice(num_columns, size=d_, replace=False)\n\n # load subset of data and calculate M\n trial_data = data.loading.subset_data(data_, rows, columns)\n M = meb.geometry.M_estimate(trial_data)\n\n # solve model and store variables\n c, r, _, trials[i] = meb.gurobi_solvers.mebwo(trial_data, eta_, M, log_file=log_file)\n \n if log_file is not None:\n utils.benchmark_logger(filepath=log_file, elapsed=trials[i], n=n_, d=d_, eta=eta_, M=M, r=r, c=c, trial_number=i, num_trials=num_trials, data_filepath=data_file, rows=rows, columns=columns)\n \n times.append(trials)\n \n avg_times = utils.calc_avg_times(times)\n\n return avg_times", "def n_choose_k(N,K):\n return factorial(N) // (factorial(N - K) * factorial(K))", "def get_result_k(att_trees, data):\n data_back = copy.deepcopy(data)\n all_ncp = []\n all_rtime = []\n all_pollution = []\n deletion_all_ncp = []\n deletion_all_rtime = []\n # for k in range(5, 105, 5):\n for k in [2, 5, 10, 25, 50, 100]:\n if __DEBUG:\n print '#' * 30\n print \"K=%d\" % k\n print \"Enhanced Mondrian\"\n _, eval_result = mondrian(att_trees, data, k)\n data = copy.deepcopy(data_back)\n all_ncp.append(round(eval_result[0], 2))\n all_rtime.append(round(eval_result[1], 2))\n all_pollution.append(round(eval_result[2], 2))\n if __DEBUG:\n print \"NCP %0.2f\" % eval_result[0] + \"%\"\n print \"Running time %0.2f\" % eval_result[1] + \"seconds\"\n print \"Missing Pollution = %.2f %%\" % eval_result[2]\n print \"Mondrian\"\n _, eval_result = mondrian_delete_missing(att_trees, data, k)\n data = copy.deepcopy(data_back)\n if __DEBUG:\n print \"NCP %0.2f\" % eval_result[0] + \"%\"\n print \"Running time %0.2f\" % eval_result[1] + \"seconds\"\n deletion_all_ncp.append(round(eval_result[0], 2))\n deletion_all_rtime.append(round(eval_result[1], 2))\n print \"Mondrian\"\n print \"All NCP\", deletion_all_ncp\n print \"All Running time\", deletion_all_rtime\n print \"Enhanced Mondrian\"\n print \"All NCP\", all_ncp\n print \"All Running time\", all_rtime\n print \"Missing Pollution\", all_pollution", "def train(self):\n\n # Step 1 - Obtain optimized weights for final model ------------------------------------------------------------\n\n t0 = time()\n\n # Check the training data for potential hazardous problems\n self.check_training_samples()\n\n opt_results = pd.DataFrame()\n kf_opt = StratifiedKFold(n_splits=self.kfold_cv, shuffle=True)\n rep_str, opt_str = '', ''\n\n if self.verbose:\n print('\\n\\n__ TRAINING STEP 1/2 \\_______________________________')\n print(' \\ Train with reverse %d-fold CV - %d time(s) /\\n' % (self.kfold_cv, self.n_repeat))\n\n for i_rep in range(self.n_repeat):\n\n if self.verbose:\n rep_str = '\\n_/--- Rep %d/%d' % (i_rep + 1, self.n_repeat)\n\n # Sample clf-net parameters to test\n param = [\n np.random.normal(loc=self.n_estimators,\n scale=self.n_estimators*self.param_tune_scale,\n size=self.kfold_cv),\n np.random.normal(loc=self.min_impurity_decrease,\n scale=self.min_impurity_decrease*self.param_tune_scale,\n size=self.kfold_cv),\n np.random.normal(loc=self.min_sample_leaf,\n scale=np.ceil(self.min_sample_leaf*self.param_tune_scale),\n size=self.kfold_cv),\n ]\n scores = list()\n\n for j_fold, (opt_idxs, cv_train_idxs) in enumerate(kf_opt.split(\n X=self.datas[self.train_idx].nidx_train,\n y=self.datas[self.train_idx].gen_labels(condense_labels=True))):\n\n if self.verbose:\n print(rep_str + ' - CV %d/%d ---\\_____\\n' % (j_fold + 1, self.kfold_cv))\n\n # set clf-net parameters\n self.n_estimators = param[0][j_fold]\n self.min_impurity_decrease = param[1][j_fold]\n self.min_sample_leaf = param[2][j_fold]\n self.clf_net = self.gen_rfc()\n\n # Split data\n opt_nidxs = np.array([self.datas[self.train_idx].nidx_train[i] for i in opt_idxs])\n cv_train_nidxs = np.array([self.datas[self.train_idx].nidx_train[i] for i in cv_train_idxs])\n\n # Partition train/eval nidx for reverse k-fold CV training\n _, _, opt_eval_nidxs, opt_train_nidxs = train_test_split(\n np.zeros(len(opt_nidxs)),\n opt_nidxs,\n test_size=1/(self.kfold_cv - 1),\n shuffle=True,\n stratify=self.datas[self.train_idx].gen_labels(nidxs=opt_nidxs, condense_labels=True))\n\n # Train clfs\n if self.verbose:\n print('\\n> Training base classifiers ...')\n self._train_clfs(train_nidxs=cv_train_nidxs)\n\n # Evaluate train with cv_train data\n if self.verbose:\n print('\\n> Evaluating base classifiers with cv_train partition ...')\n self.clfs_predict(nidxs_target=cv_train_nidxs, data=self.datas[self.train_idx], to_eval=True,\n eval_idx=self.train_idx)\n\n # Evaluate pre-optimization with opt_train data\n if self.verbose:\n print('\\n> Evaluating base classifiers with cv_eval partition ...')\n cv_res = self.clfs_predict(nidxs_target=opt_train_nidxs, data=self.datas[self.train_idx], to_eval=True,\n nidxs_train=cv_train_nidxs, eval_idx=self.train_idx)\n\n # Train clf-opt with opt_train partition results\n if self.verbose:\n print('\\n> Training clf-opt ...')\n self._train_clf_opt(predictions=cv_res)\n\n # Evaluate clf-opt with opt_eval partition\n if self.verbose:\n print('\\n> Evaluating optimized classifier with opt_test partition ...')\n opt_res = self.clfs_predict(nidxs_target=opt_eval_nidxs, data=self.datas[self.train_idx], to_eval=True,\n nidxs_train=cv_train_nidxs, eval_idx=self.train_idx)\n opt_results = opt_results.append(opt_res, ignore_index=True)\n\n # Append score to optimize clf-net parameter\n r = self.scores(opt_res['ytruth'], opt_res['ynet'])\n if not self.aim:\n scores.append(r['aucroc'])\n else:\n aim = self.aim.replace('hard', '')\n scores.append(r[aim])\n\n # reset link2featidx\n self.datas[self.train_idx].link2featidx = {}\n\n # Aggregate results from clf-net parameter search\n self._set_clf_net_param(param, scores)\n\n # STEP 2 - Train final model -----------------------------------------------------------------------------------\n # .clf_opt is already trained through previous iterations by using warm_start\n\n if self.verbose:\n print('\\n__ TRAINING STEP 2/2 \\_______________________________')\n print(' \\ Train final model with all train data /\\n')\n\n # Train clfs with all the data\n self._train_clfs()\n\n # Evaluate final clf-opt with all data\n print('\\n> Evaluating final classifier ...')\n self.clfs_predict(nidxs_target=self.datas[self.train_idx].nidx_train, to_eval=True, eval_idx=self.train_idx)\n print('** Because this is evaluating with the training data, classifier performances should be very high.')\n\n # Assign model ID - this is here so that if retrained, it would be known that it is not the same model anymore\n self.id = 'm_%s' % gen_id()\n\n if self.verbose:\n te = (time() - t0) / 60\n print('\\n Training took %.1f minutes on %d processors' % (te, os.cpu_count()))\n print('\\n__ __________')\n print(' \\ Training complete! /\\n')\n\n return opt_results", "def run_iterations(self, n, verbose = False):\n for i in range(n):\n # Calculate total number of neighbors for each cell\n all_neighbors = self.get_all_neighbors()\n all_num_neighbors = np.sum(all_neighbors, axis = (-2,-1)) - self.board\n # Determine new state for each cell using lookup table and number of neighbors\n self.board[:] = np.where(self.board, \n self.lookup[1][all_num_neighbors], \n self.lookup[0][all_num_neighbors])\n # Verbosity check\n if verbose:\n print(self.board)", "def combinations(n, k):\r\n return exp(gammaln(n + 1) - gammaln(k + 1) - gammaln(n - k + 1))", "def run_kohonen_dynamicLearningRate(data,fun,size_k: int=6, eta: float=0.1, tmax: int=5000, convergence=0):\n dim = 28*28\n data_range = 255.0\n dy, dx = data.shape\n \n #initialise the centers randomly\n centers = np.random.rand(size_k**2, dim) * data_range\n \n #build a neighborhood matrix\n neighbor = np.arange(size_k**2).reshape((size_k, size_k))\n \n #set the random order in which the datapoints should be presented\n i_random = np.arange(tmax) % dy\n np.random.shuffle(i_random)\n \n #error for convergence criterion\n error = [np.inf]\n\n for t, i in enumerate(i_random):\n old_centers = copy(centers)\n sigma = fun(t)\n som_step(centers, data[i,:],neighbor,eta,sigma)\n \n if t % 1E4 == 0:\n print('iteration {}'.format(t))\n \n if convergence == 1:\n #convergence: distance between samples and best matching prototypes \n error.append(calculate_error(centers,data))\n# if np.abs((error[-2]-error[-1])/error[1]) < eps :\n# break\n \n elif convergence == 2:\n #convergence: non significant weight update\n err = np.linalg.norm(centers-old_centers)\n error.append(err)\n# if err < eps_2:\n# break\n\n \"\"\" # for visualization, you can use this:\n for i in range(size_k**2):\n plb.subplot(size_k,size_k,i)\n \n plb.imshow(np.reshape(centers[i,:], [28, 28]),interpolation='bilinear')\n plb.axis('off')\n \n # leave the window open at the end of the loop\n plb.show()\n plb.draw() \"\"\"\n return centers, error[1:]", "def MC3_estimate(self, n_iterations, thinning, wsize):\n # type: (int, int, int) -> None\n # set counter\n counter = 0\n\n # create log files\n if self.temperature == 1:\n self.writer.create_params()\n\n # calculate initial model parameters\n self.log_likelihood = self.evo_model.likelihood_language_sensitive(tc=False)\n\n # create local variables\n next_move = self.evo_model.next_step_RWM\n ll_computation = self.evo_model.likelihood_language_sensitive\n\n # store initial parameters\n if self.temperature == 1:\n self.writer.save_state(counter, self.log_likelihood, True)\n\n counter += 1\n\n rho_acc = 0.0\n n_accept = 0.0\n step = 1\n\n # calculate window size\n\n don, rec, swap_iteration = self.get_next_swap(step)\n\n while step < n_iterations:\n if self.temperature == 1:\n\n if step % 10 == 0:\n print(str(step) + \" ----- \" + str(rho_acc) + \" ----- \" + str(self.log_likelihood))\n\n # the proposal move is performed inside the evolutionary model\n if random.choice([True, False, False]):\n self.log_likelihood, nacc = self.evo_model.next_slice(self.log_likelihood)\n n_accept += nacc\n else:\n m_h_ratio, prior_prob, candidate, tc = next_move(wsize)\n\n # calculate new likelihood\n new_likelihood = -np.inf if np.isinf(m_h_ratio) else ll_computation(tc=tc)\n\n # sanity check\n if math.isnan(new_likelihood):\n self.evo_model.revert_RWM()\n\n else:\n # calculate acceptance ratio\n ll_ratio = (new_likelihood - self.log_likelihood) + prior_prob\n ll_ratio *= self.temperature\n n_score = (ll_ratio + m_h_ratio)\n\n rho = min(0.0, n_score)\n\n u = np.log(random.uniform(0, 1))\n\n if u < rho:\n # accept move\n n_accept += 1.0\n self.log_likelihood = new_likelihood\n\n else:\n # reject move\n self.evo_model.revert_RWM()\n\n rho_acc = n_accept / step\n\n # store parameters if desired for current iteration\n if self.temperature == 1:\n if step % thinning == 0:\n self.writer.save_state(step // thinning, self.log_likelihood, False)\n counter += 1\n\n if swap_iteration == step:\n # attempt swap\n self.try_swap(don, rec, swap_iteration)\n\n # get next swap iteration\n don, rec, swap_iteration = self.get_next_swap(step)\n\n step += 1", "def setNumIterations(*argv):", "def optimize(self, ngen):\n res = 0\n for res in self(ngen):\n pass\n return res", "def refugia_adj_5_simsplit_4epochs_iter3 (params, ns):\n #28 parameters \n nu1a, nu2a, nu3a, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, nu1d, nu2d, nu3d, m1_12, m1_13, m1_21, m1_23, m1_31, m1_32, m3_12, m3_13, m3_21, m3_23, m3_31, m3_32, T1, T2, T3, T4 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2]) \n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T1 (to reflect sum effect of all previous glacial-interglacial cycles)\n nu_T1 = [nu1a, nu2a, nu3a]\n mig1 = numpy.array([[0, m1_12, m1_13],[m1_21, 0, m1_23], [m1_31, m1_32, 0]]) \n fs.integrate(nu_T1, T1, m=mig1)\n ## Population function and migration matrix for T2 (to reflect period of isolation during last glacial)\n nu_T2 = [nu1b, nu2b, nu3b]\n fs.integrate(nu_T2, T2)\n ## Population function and migration matrix for T3 (to reflect inter-glacial expansion)\n nu_T3 = [nu1c, nu2c, nu3c]\n mig3 = numpy.array([[0, m3_12, m3_13],[m3_21, 0, m3_23], [m3_31, m3_32, 0]]) \n fs.integrate(nu_T3, T3, m=mig3)\n ## Population function and migration matrix for T3 (bottleneck to capture single population representation of lineage)\n nu_T4 = [nu1d, nu2d, nu3d]\n fs.integrate(nu_T4, T4) \n return fs", "def test(numTrials):\n # Your Code Here\n hits = 0.0\n for i in range(numTrials):\n result = trial()\n #print result\n hits += result\n return hits / numTrials", "def multiple_runs(self, n: int, **kwargs: tp.Any) -> tp.Dict[str, tp.List[tp.Tuple[ARR, ARR]]]:\n predictions = [self.single_run(**kwargs) for i in range(n)]\n return {\n 'train': [pred['train'] for pred in predictions],\n 'test': [pred['test'] for pred in predictions]\n }", "def reduce_run():", "def timing_gemm(trials, k):\n # values to test\n vals = (100, 300, 500, 1000, 1500, 2000, 2500, 3000)\n dtypes = ('float64', 'float32')\n trans_tuple = ('n', 't')\n bp_total = 0.0\n np_total = 0.0\n\n for n in vals:\n # test all combinations of all possible values\n for (dtype, trans_a, trans_b) in product(dtypes, trans_tuple, trans_tuple):\n bp_time, np_time = timing_test(dtype, trans_a, trans_b, n, k, trials)\n bp_total += bp_time\n np_total += np_time\n\n print(\"\\nk: %d, m=n: %d, BLASpy Average: %.5fs, NumPy Average: %.5fs\"\n % (k, n, bp_total / 8, np_total / 8))", "def train(n):\n\n ai = NimAI()\n\n print(f\"Play {n} training games\")\n for _ in range(n): \n game = Nim()\n\n # Keep track of last move made by either player\n last = {\n 0: {\"state\": None, \"action\": None},\n 1: {\"state\": None, \"action\": None}\n }\n\n # Game loop\n while True:\n\n # Keep track of current state and action\n state = game.piles.copy()\n action = ai.chooseAction(game.piles)\n\n # Keep track of last state and action\n last[game.player][\"state\"] = state\n last[game.player][\"action\"] = action\n\n # Make move and switch players\n game.move(action)\n new_state = game.piles.copy()\n\n # When game is over, update Q values with rewards\n if game.winner is not None:\n # The game is over when a player just made a move that lost him the game.\n # The move from the previous player was therefore game winning.\n # Both events are used to update the AI.\n # new_state is [0, 0, 0, 0] here and its used to update the AI, because\n # future rewards should not be considered in the Q-learning formula.\n ai.update(state, action, new_state, -1)\n ai.update(\n last[game.player][\"state\"],\n last[game.player][\"action\"],\n new_state,\n 1\n )\n break\n\n # If game is continuing, no rewards yet\n elif last[game.player][\"state\"] is not None:\n ai.update(\n last[game.player][\"state\"],\n last[game.player][\"action\"],\n new_state,\n 0\n )\n\n print(\"Done training\")\n\n # Return the trained AI\n return ai", "def run(self, n):\n new_trajectories = self.enumerate_trajectories(self.gpm.Graph, n, self.source, self.target, max_iter=self.max_iter)\n self._trajectories += new_trajectories", "def main(num_trials, num_actions):\n\tfor i in xrange(int(num_trials)):\n\t\ttrial(i+1, int(num_actions))", "def value_iteration(vision, n_tongs):\n q_value = np.zeros([11, 11, 4, 3], dtype=np.float32)\n for _ in range(1000):\n delta = 0\n for px in range(11):\n for py in range(11):\n item = recognize_item(px, py, vision)\n # Not update for locations collecting items, to avoid endless loops.\n if (item == 'diamond' and n_tongs > 0) or item == 'tongs' or item == 'jellybean':\n continue\n q_new = np.zeros([4, 3], dtype=np.float32)\n for d in range(4):\n for a in range(3):\n px_next, py_next, d_next, r = simulate_step(px, py, d, a, vision, n_tongs)\n q_new[d, a] = r\n if 0 <= px_next < 11 and 0 <= py_next < 11: # next position in vision\n q_new[d, a] += np.max(q_value[px_next, py_next, d_next])\n delta = max(delta, norm(q_new - q_value[px, py], ord=np.inf))\n q_value[px, py] = q_new\n if delta < 1e-3:\n break\n return q_value", "def combination(n, k):\n if (k > n) or (n < 0) or (k < 0):\n return 0\n val = 1\n for j in range(min(k, N - k)):\n val = (val * (N - j)) // (j + 1)\n return val", "def split_simsplit_3epochs_iter4(params, ns):\n #24 parameters \n nu1a, nu2a, nu3a, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, m1_12, m1_13, m1_21, m1_23, m1_31, m1_32, m2_12, m2_13, m2_21, m2_23, m2_31, m2_32, T1, T2, T3 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2]) \n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T1\n nu_T1 = [nu1a, nu2a, nu3a]\n mig1 = numpy.array([[0, m1_12, m1_13],[m1_21, 0, m1_23], [m1_31, m1_32, 0]]) \n fs.integrate(nu_T1, T1, m=mig1)\n ## Population function and migration matrix for T2\n nu_T2 = [nu1b, nu2b, nu3b]\n mig2 = numpy.array([[0, m2_12, m2_13],[m2_21, 0, m2_23], [m2_31, m2_32, 0]]) \n fs.integrate(nu_T2, T2, m=mig2)\n ## Population function and migration matrix for T3 (bottleneck to capture single population representation of lineage)\n nu_T3 = [nu1c, nu2c, nu3c]\n fs.integrate(nu_T3, T3) \n return fs", "def train_rounds(self, epochs, rounds, sim_per_round, batch_size, n_obs, **kwargs):\n\n # Make sure n_obs is fixed, otherwise not working \n assert type(n_obs) is int,\\\n 'Round-based training currently only works with fixed n_obs. Use online learning for variable n_obs or fix n_obs to an integer value.'\n\n losses = dict()\n for r in range(1, rounds+1):\n \n # Data generation step\n if r == 1:\n # Simulate initial data\n print('Simulating initial {} data sets...'.format(sim_per_round))\n params, sim_data = self._forward_inference(sim_per_round, n_obs, **kwargs)\n else:\n # Simulate further data\n print('Simulating new {} data sets and appending to previous...'.format(sim_per_round))\n print('New total number of simulated data sets: {}'.format(sim_per_round * r))\n params_r, sim_data_r = self._forward_inference(sim_per_round, n_obs, **kwargs)\n\n # Add new simulations to previous data\n params = np.concatenate((params, params_r), axis=0)\n sim_data = np.concatenate((sim_data, sim_data_r), axis=0)\n\n # Train offline with generated stuff\n losses_r = self.train_offline(epochs, batch_size, params, sim_data)\n losses[r] = losses_r\n\n return losses", "def fn(n, k):\n if n == k: return 1\n if k == 0: return 0\n return ((n-1)*fn(n-1, k) + fn(n-1, k-1)) % 1_000_000_007", "def batch_anneal(self, times=10):\n for i in range(1, times + 1):\n print(f\"Iteration {i}/{times} -------------------------------\")\n self.T = self.T_save\n self.iteration = 1\n self.cur_solution, self.cur_fitness = self.initial_solution()\n self.anneal()", "def partial_permutations(n, k):\n return int((factorial(n) / factorial(n - k)) % 1000000)", "def refugia_adj_5_simsplit_4epochs_iter5 (params, ns):\n #28 parameters \n nu1a, nu2a, nu3a, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, nu1d, nu2d, nu3d, m1_12, m1_13, m1_21, m1_23, m1_31, m1_32, m3_12, m3_13, m3_21, m3_23, m3_31, m3_32, T1, T2, T3, T4 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2]) \n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T1 (to reflect sum effect of all previous glacial-interglacial cycles)\n nu_T1 = [nu1a, nu2a, nu3a]\n mig1 = numpy.array([[0, m1_12, m1_13],[m1_21, 0, m1_23], [m1_31, m1_32, 0]]) \n fs.integrate(nu_T1, T1, m=mig1)\n ## Population function and migration matrix for T2 (to reflect period of isolation during last glacial)\n nu_T2 = [nu1b, nu2b, nu3b]\n fs.integrate(nu_T2, T2)\n ## Population function and migration matrix for T3 (to reflect inter-glacial expansion)\n nu_T3 = [nu1c, nu2c, nu3c]\n mig3 = numpy.array([[0, m3_12, m3_13],[m3_21, 0, m3_23], [m3_31, m3_32, 0]]) \n fs.integrate(nu_T3, T3, m=mig3)\n ## Population function and migration matrix for T3 (bottleneck to capture single population representation of lineage)\n nu_T4 = [nu1d, nu2d, nu3d]\n fs.integrate(nu_T4, T4) \n return fs", "def iterate(rk):\n y = scipy.sparse.linalg.spsolve(P1, rk)\n RHS = scipy.sparse.csr_matrix.dot(P4, y) + rk\n zk = scipy.sparse.linalg.spsolve(P3, RHS)\n return zk", "def runcount(test_keys, sigma, sigma_max, sigma_step,\n npoints_min, npoints_max, npoints_step):\n run = 1\n for key in test_keys:\n if key:\n while sigma < sigma_max:\n npoints = npoints_min\n while npoints < npoints_max:\n npoints += npoints_step\n run += 1\n sigma += sigma_step\n return run", "def run(self, n: int, verbose: bool = False):\n self.__start_generation()\n self.__calculate_fitness()\n for i in range(n):\n self.__next_generation()\n self.__calculate_fitness()\n if verbose:\n self.__show(i)\n return self.population.fittest_two_individual(self.population.individuals)", "def refugia_adj_5_simsplit_4epochs_iter1 (params, ns):\n #28 parameters \n nu1a, nu2a, nu3a, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, nu1d, nu2d, nu3d, m1_12, m1_13, m1_21, m1_23, m1_31, m1_32, m3_12, m3_13, m3_21, m3_23, m3_31, m3_32, T1, T2, T3, T4 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2]) \n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T1 (to reflect sum effect of all previous glacial-interglacial cycles)\n nu_T1 = [nu1a, nu2a, nu3a]\n mig1 = numpy.array([[0, m1_12, m1_13],[m1_21, 0, m1_23], [m1_31, m1_32, 0]]) \n fs.integrate(nu_T1, T1, m=mig1)\n ## Population function and migration matrix for T2 (to reflect period of isolation during last glacial)\n nu_T2 = [nu1b, nu2b, nu3b]\n fs.integrate(nu_T2, T2)\n ## Population function and migration matrix for T3 (to reflect inter-glacial expansion)\n nu_T3 = [nu1c, nu2c, nu3c]\n mig3 = numpy.array([[0, m3_12, m3_13],[m3_21, 0, m3_23], [m3_31, m3_32, 0]]) \n fs.integrate(nu_T3, T3, m=mig3)\n ## Population function and migration matrix for T3 (bottleneck to capture single population representation of lineage)\n nu_T4 = [nu1d, nu2d, nu3d]\n fs.integrate(nu_T4, T4) \n return fs", "def tournament(population, k=3, elitism=0, kw=None, **kwargs):\n _population = sorted(population, reverse=True, key=itemgetter('fitness'))\n\n # pop off the N best individuals where N is elitism\n pool = map(_population.pop, [0] * elitism)\n\n # update the value of k to reflect the elitism count\n _k = min(k, len(_population)) + elitism\n\n while len(pool) < _k:\n pool.append(random.choice(_population))\n pool = [dict(t) for t in set([tuple(d.items()) for d in pool])]\n return sorted(pool, key=lambda x: x['fitness'], reverse=True)", "def refugia_adj_5_simsplit_4epochs_iter2 (params, ns):\n #28 parameters \n nu1a, nu2a, nu3a, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, nu1d, nu2d, nu3d, m1_12, m1_13, m1_21, m1_23, m1_31, m1_32, m3_12, m3_13, m3_21, m3_23, m3_31, m3_32, T1, T2, T3, T4 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2]) \n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T1 (to reflect sum effect of all previous glacial-interglacial cycles)\n nu_T1 = [nu1a, nu2a, nu3a]\n mig1 = numpy.array([[0, m1_12, m1_13],[m1_21, 0, m1_23], [m1_31, m1_32, 0]]) \n fs.integrate(nu_T1, T1, m=mig1)\n ## Population function and migration matrix for T2 (to reflect period of isolation during last glacial)\n nu_T2 = [nu1b, nu2b, nu3b]\n fs.integrate(nu_T2, T2)\n ## Population function and migration matrix for T3 (to reflect inter-glacial expansion)\n nu_T3 = [nu1c, nu2c, nu3c]\n mig3 = numpy.array([[0, m3_12, m3_13],[m3_21, 0, m3_23], [m3_31, m3_32, 0]]) \n fs.integrate(nu_T3, T3, m=mig3)\n ## Population function and migration matrix for T3 (bottleneck to capture single population representation of lineage)\n nu_T4 = [nu1d, nu2d, nu3d]\n fs.integrate(nu_T4, T4) \n return fs", "def split_simsplit_3epochs_iter3(params, ns):\n #24 parameters \n nu1a, nu2a, nu3a, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, m1_12, m1_13, m1_21, m1_23, m1_31, m1_32, m2_12, m2_13, m2_21, m2_23, m2_31, m2_32, T1, T2, T3 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2]) \n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T1\n nu_T1 = [nu1a, nu2a, nu3a]\n mig1 = numpy.array([[0, m1_12, m1_13],[m1_21, 0, m1_23], [m1_31, m1_32, 0]]) \n fs.integrate(nu_T1, T1, m=mig1)\n ## Population function and migration matrix for T2\n nu_T2 = [nu1b, nu2b, nu3b]\n mig2 = numpy.array([[0, m2_12, m2_13],[m2_21, 0, m2_23], [m2_31, m2_32, 0]]) \n fs.integrate(nu_T2, T2, m=mig2)\n ## Population function and migration matrix for T3 (bottleneck to capture single population representation of lineage)\n nu_T3 = [nu1c, nu2c, nu3c]\n fs.integrate(nu_T3, T3) \n return fs", "def run_qae_optimization(training_states, n_repetitions, exact=no_noise, noisy=gate_error):\n result_list = []\n def proxy(params, training_states, n_repetitions, exact=no_noise, noisy=gate_error):\n \"\"\"Embedded function version\n \"\"\"\n input_list = fix_list(params, all_param_array=all_param, var_param_array=var_param, fixed_vals_array=fixed_vals)\n fidelities = []\n for training_state in training_states:\n fid = cusp_stage2.compute_stage2_cost_function(*input_list, alpha=training_state, n_repetitions=n_repetitions,\n exact=exact, noisy=noisy)\n fidelities.append(fid)\n avg_fid = np.mean(fidelities)\n result_list.append(1-avg_fid)\n print(1-avg_fid)\n return 1. - avg_fid\n\n \n # Initialize parameters\n half_turn_min = 0\n half_turn_max = 2\n init_params = np.random.uniform(low=half_turn_min, high=half_turn_max,\n size=num_param)\n\n # Optimization using Nelder-Mead.\n h2_qae_wrap = lambda params: proxy(params, training_states=training_states,\n n_repetitions=n_repetitions, exact=exact, noisy=noisy)\n \n if noisy:\n maxiter = 60\n else:\n maxiter = None\n \n res = minimize(h2_qae_wrap, init_params, args=(),\n method='Nelder-Mead', tol=None, \n options={'disp': False, 'maxiter': maxiter, 'xatol': 0.001,\n 'return_all': False, 'fatol': 0.001})\n np.savetxt('stage2_data.csv',result_list, delimiter=',')\n return res.x", "def task_3():\n threshold = [0.86, 0.87, 0.88, 0.89]\n for t in threshold: \n # Create a list to store the number of iteration that DE converge \n # @ given threshold for p1(5, 40), p2(10, 20), p3(20, 10), p4(40, 5) \n iter_p1 = []\n iter_p2 = [] \n iter_p3 = [] \n iter_p4 = []\n \n # Create a list to store the cost at the end of the DE \n # p1(5, 40), p2(10, 20), p3(20, 10), p4(40, 5)\n cost_p1 = []\n cost_p2 = []\n cost_p3 = []\n cost_p4 = [] \n \n # Run the experiment and record the result for the given threshold\n experiment(t)\n \n # Loop over experiment and record the number of iteration of each set of param\n # for 30 times\n for i in range(30):\n record = experiment(t) # replace the argument with the testing threshold\n iter_p1.append(record[0][0])\n iter_p2.append(record[1][0])\n iter_p3.append(record[2][0])\n iter_p4.append(record[3][0])\n \n cost_p1.append(record[0][1])\n cost_p2.append(record[1][1])\n cost_p3.append(record[2][1])\n cost_p4.append(record[3][1])\n \n # Convert the result into a dictionary then transform it to a pandas DataFrame\n iteration_dict = {\"iteration(5,40)\":iter_p1, \"iteration(10,20)\":iter_p2, \n \"iteration(20,10)\": iter_p3, \"iteration(40,5)\": iter_p4}\n cost_dict = {\"cost(5,40)\": cost_p1, \"cost(10,20)\": cost_p2, \n \"cost(20,10)\": cost_p3, \"cost(40,5)\":cost_p4}\n df_iteration = pd.DataFrame.from_dict(iteration_dict) \n df_iteration.to_csv(\"iteration_\" + str(t) + \".csv\")\n \n df_cost = pd.DataFrame.from_dict(cost_dict)\n df_cost.to_csv(\"cost_\" + str(t) + \".csv\")", "def experiment(improved):\n\n N_list = [5 ,10 ,20]\n K_list = [3 , 7 ,9]\n P_list = [0.3 , 0.4 , 0.5 ,0.6 ,0.7]\n\n data = pandas.read_csv('train.csv')\n\n avg_list = []\n for i in range(0,len(N_list) * len(K_list) * len(P_list)):\n avg_list.append([0 , None])\n\n kf = KFold(n_splits=5, shuffle=True, random_state=209418441)\n rotation_index = 1\n for train_index, test_index in kf.split(data):\n\n train = data.iloc[train_index]\n test = data.iloc[test_index]\n index = 0\n for n in N_list:\n for k in K_list:\n for p in P_list:\n\n #print('testing for N= ',n,', K = ',k, 'P = ',p)\n KNN = forest.KNN_forest(N=n, K=k, P=p, data = train , improved=improved)\n success_rate = utls.tests.succ_rate_test.test(test,KNN.Classify)\n avg_list[index][0] += success_rate\n avg_list[index][1] = (n,k,p)\n #print(' rate is: ',avg_list[index][0]/rotation_index)\n index += 1\n rotation_index +=1\n\n\n\n best_option = max(avg_list,key= lambda x:x[0])\n #print(' ****** DONE ******')\n #print('best n,k,p are : ' , best_option[1] , ' with success rate: ' , best_option[0])\n\n return best_option[1]", "def train(self,env, iter_n=2000):\n\n\t\tfor i in range(iter_n):\n\t\t\tif i > 50:\n\t\t\t\tif all(reward > 195 for reward in self.step_count[-10:]):\n\t\t\t\t\tprint('solved at episode {}'.format(i))\n\t\t\t\t\tbreak\n\t\t\tstate = self.env.reset()\n\t\t\tstate = np.reshape(state, [1, self.state_size])\n\n\t\t\tepisode_complete = False\n\t\t\tstep = 0\n\t\t\twhile not episode_complete and (step < self.max_steps):\n\t\t\t\taction = self.define_action(state)\n\t\t\t\tnew_state, reward, episode_complete, info = env.step(action)\n\t\t\t\tnew_state = np.reshape(new_state, [1, self.state_size])\n\n\t\t\t\tself.memory.append((state, action, reward, new_state, episode_complete))\n\t\t\t\tself.round_reward += reward\n\t\t\t\tstate = new_state\n\t\t\t\tstep += 1\n\t\t\t\tif episode_complete:\n\t\t\t\t\tself.round_reward += -10\n\t\t\t\t\tself.update_target_model()\n\t\t\t\t\tself.print_results(i, iter_n, step)\n\t\t\t\t\tif i != 0: # Update totals in memory if not the first run\n\t\t\t\t\t\tself.update_totals(i, step)\n\t\t\t\tif len(self.memory) > self.training_iter:\n\t\t\t\t\tself.replay()\n\t\t\tif self.epsilon > self.epsilon_min:\n\t\t\t\tself.epsilon *= self.epsilon_decay\n\n\t\treturn self.all_iterations, self.all_rewards, self.step_count", "def nth_iteration(Iterations, Moves_ahead, GA_iterations, n_samples,\n current_gen_spectra, next_gen_conc, x_test,\n conc_array_actual, spectra_array_actual, seed,\n median_fitness_list, max_fitness_list,\n iteration, mutation_rate_list, fitness_multiplier_list):\n set_seed(seed)\n mutation_rate, fitness_multiplier, best_move, best_move_turn, \\\n max_fitness, surrogate_score, desired_1, current_gen_spectra_1, \\\n best_conc_array, \\\n dictionary_of_moves = MCTS(Iterations, Moves_ahead,\n GA_iterations, current_gen_spectra,\n next_gen_conc, x_test, conc_array_actual,\n spectra_array_actual, seed, n_samples)\n print('The best move has a fitness value of', max_fitness)\n print('The best move occurs in', best_move_turn, 'turns.')\n print()\n print('The surrogate model has a score of:', surrogate_score)\n print()\n mutation_rate_list.append(mutation_rate)\n fitness_multiplier_list.append(fitness_multiplier)\n current_gen_spectra = current_gen_spectra.T\n current_gen_spectra = MinMaxScaler().fit(current_gen_spectra). \\\n transform(current_gen_spectra).T\n next_gen_conc, median_fitness, max_fitness = perform_iteration(\n current_gen_spectra, next_gen_conc, x_test, 20,\n n_samples, mutation_rate, fitness_multiplier)\n best_conc_array = \\\n best_conc_array[np.argsort(best_conc_array[:, -1])][-1, :]\n print(next_gen_conc)\n return mutation_rate, fitness_multiplier, mutation_rate_list, \\\n fitness_multiplier_list, best_move, best_move_turn, \\\n max_fitness, surrogate_score, next_gen_conc, \\\n best_conc_array, dictionary_of_moves", "def main(n):\n return sum(f(i) for i in xrange(n))", "def infer(self, n_iter=150):\n if self.ppm:\n print(\"Running infer is forbidden for principled predictive model.\")\n return\n if DEBUG:\n # fix some variables to their true values\n self._fix_post_assigns(self.ground_truth['true_omega'], self.ground_truth['true_beta'])\n\n with self.sess.as_default():\n for i in range(n_iter):\n\n # users\n start_time = time.time()\n self.sess.run(self.u_update_one, feed_dict={self.edge_idx: self.edge_idx_d})\n self.sess.run(self.u_update_two, feed_dict={self.edge_idx: self.edge_idx_d})\n\n # items\n if not(self.fix_item_params):\n start_time = time.time()\n self.sess.run(self.i_update_one, feed_dict={self.edge_idx: self.edge_idx_d})\n self.sess.run(self.i_update_two, feed_dict={self.edge_idx: self.edge_idx_d})\n\n # edges\n start_time = time.time()\n if self.simple_graph:\n for sg_edge_param_update in self.sg_edge_param_update:\n self.sess.run(sg_edge_param_update, feed_dict={self.edge_idx: self.edge_idx_d})\n else:\n for lphi_update in self.lphi_update:\n self.sess.run(lphi_update, feed_dict={self.edge_idx: self.edge_idx_d})\n\n # mean degree (caching)\n start_time = time.time()\n self.sess.run(self.deg_update, feed_dict={self.edge_vals: self.edge_vals_d, self.edge_idx: self.edge_idx_d})\n\n ### Print the total item and user mass ###\n if np.mod(i, 30) == 0:\n self._logging(i)\n print(\"appx_elbo: {}\".format(self.sess.run(self.appx_elbo,\n feed_dict={self.edge_idx: self.edge_idx_d})))\n\n ## DONE TRAINING\n self.user_affil_est = to_prob(self.theta_shp / self.theta_rte).eval()\n self.item_affil_est = to_prob(self.beta_shp / self.beta_rte).eval()\n if DEBUG: \n self.true_user_affil = to_prob(self.ground_truth['true_theta']).eval()\n self.true_item_affil = to_prob(self.ground_truth['true_beta']).eval()\n\n # User params\n gam_shp, gam_rte, theta_shp, theta_rte, g = self.sess.run([self.gam_shp, self.gam_rte, self.theta_shp, self.theta_rte, self.g])\n\n # Item params\n omega_shp, omega_rte, beta_shp, beta_rte, w = self.sess.run([self.omega_shp, self.omega_rte, self.beta_shp, self.beta_rte, self.w])\n\n return gam_shp, gam_rte, theta_shp, theta_rte, g, omega_shp, omega_rte, beta_shp, beta_rte, w", "def hyper_parameter_test(elements, args):\n\n greedy_factors = np.linspace(\n args.greedy_start, args.greedy_end, args.greedy_num_samples\n )\n k_values = np.arange(args.k_start - 1, args.k_end, args.k_step) + 1\n k_values = [int(k) for k in k_values]\n if args.gc_prune_test:\n gc_prune = [True, False]\n else:\n gc_prune = [True]\n if args.forest:\n forest = [True, False]\n else:\n forest = [False]\n \n all_runs = {}\n factors = [p for p in product(forest, greedy_factors, k_values, gc_prune)]\n for factor in factors:\n all_runs[factor] = []\n all_signatures_used = []\n splitter = RepeatedKFold(args.n_split, args.n_repeat, random_state = args.random_seed)\n i = 0\n for tree_indexes, search_indexes in splitter.split(elements):\n print(\"current run number:\", i)\n i+=1\n tree_elems = elements[tree_indexes]\n search_elems = elements[search_indexes]\n \n if args.forest:\n forest = VPForest(\n tree_elems, random=args.random_vp, max_leaf_size=args.leaf_size\n )\n tree = VPTree(tree_elems, random=args.random_vp, max_leaf_size=args.leaf_size)\n tree_elem_names = [elem.identifier for elem in tree_elems]\n search_elem_names = [elem.identifier for elem in search_elems]\n all_signatures_used.append((tree_elem_names, search_elem_names))\n start = time.time()\n for factor in factors:\n if factor[0]:\n run_NNS = one_nn_search_run(forest, search_elems, factor, args.parallel)\n else:\n run_NNS = one_nn_search_run(tree, search_elems, factor, args.parallel)\n all_runs[factor].append(run_NNS)\n\n print(\"search time:\", time.time()-start)\n data = NNData(all_runs, all_signatures_used, factors)\n with open(args.o, \"wb\") as f:\n pickle.dump(data, f)", "def mbieLoop (self) :\n self.iterCnt = 0\n while self.iterCnt < 5000:\n s = self.mdp.s0\n for h in range(self.H) :\n self.QUpper = QBoundsSolver(self.mdp, self.PHat, self.QUpper, self.Ntotal, 0.1, True, self.stop)\n a = np.argmax(self.QUpper[s])\n s_, self.R[s,a] = self.mdp.step(s, a)\n self.updateVisitStatistics(s, a, s_)\n s = s_\n\n if self.iterCnt % 10 == 0: \n print(self.iterCnt)\n print(self.QUpper)\n\n self.iterCnt += 1", "def _get_k_most_influential_helper(\n influence_src_dataloader: DataLoader,\n influence_batch_fn: Callable,\n inputs: Tuple[Any, ...],\n k: int = 5,\n proponents: bool = True,\n show_progress: bool = False,\n desc: Optional[str] = None,\n) -> Tuple[Tensor, Tensor]:\n # For each test instance, maintain the best indices and corresponding distances\n # initially, these will be empty\n topk_indices = torch.Tensor().long()\n topk_tracin_scores = torch.Tensor()\n\n multiplier = 1.0 if proponents else -1.0\n\n # needed to map from relative index in a batch fo index within entire `dataloader`\n num_instances_processed = 0\n\n # if show_progress, create progress bar\n total: Optional[int] = None\n if show_progress:\n try:\n total = len(influence_src_dataloader)\n except AttributeError:\n pass\n influence_src_dataloader = progress(\n influence_src_dataloader,\n desc=desc,\n total=total,\n )\n\n for batch in influence_src_dataloader:\n\n # calculate tracin_scores for the batch\n batch_tracin_scores = influence_batch_fn(inputs, batch)\n batch_tracin_scores *= multiplier\n\n # get the top-k indices and tracin_scores for the batch\n batch_size = batch_tracin_scores.shape[1]\n batch_topk_tracin_scores, batch_topk_indices = torch.topk(\n batch_tracin_scores, min(batch_size, k), dim=1\n )\n batch_topk_indices = batch_topk_indices + num_instances_processed\n num_instances_processed += batch_size\n\n # combine the top-k for the batch with those for previously seen batches\n topk_indices = torch.cat(\n [topk_indices.to(batch_topk_indices.device), batch_topk_indices], dim=1\n )\n topk_tracin_scores = torch.cat(\n [\n topk_tracin_scores.to(batch_topk_tracin_scores.device),\n batch_topk_tracin_scores,\n ],\n dim=1,\n )\n\n # retain only the top-k in terms of tracin_scores\n topk_tracin_scores, topk_argsort = torch.topk(\n topk_tracin_scores, min(k, topk_indices.shape[1]), dim=1\n )\n topk_indices = torch.gather(topk_indices, dim=1, index=topk_argsort)\n\n # if seeking opponents, we were actually keeping track of negative tracin_scores\n topk_tracin_scores *= multiplier\n\n return topk_indices, topk_tracin_scores", "def split_simsplit_3epochs_iter1(params, ns):\n #24 parameters \n nu1a, nu2a, nu3a, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, m1_12, m1_13, m1_21, m1_23, m1_31, m1_32, m2_12, m2_13, m2_21, m2_23, m2_31, m2_32, T1, T2, T3 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2]) \n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T1\n nu_T1 = [nu1a, nu2a, nu3a]\n mig1 = numpy.array([[0, m1_12, m1_13],[m1_21, 0, m1_23], [m1_31, m1_32, 0]]) \n fs.integrate(nu_T1, T1, m=mig1)\n ## Population function and migration matrix for T2\n nu_T2 = [nu1b, nu2b, nu3b]\n mig2 = numpy.array([[0, m2_12, m2_13],[m2_21, 0, m2_23], [m2_31, m2_32, 0]]) \n fs.integrate(nu_T2, T2, m=mig2)\n ## Population function and migration matrix for T3 (bottleneck to capture single population representation of lineage)\n nu_T3 = [nu1c, nu2c, nu3c]\n fs.integrate(nu_T3, T3) \n return fs", "def iteration(self):\n T = self.generate_T()\n R = self.reproduce(T)\n self.P = self.choose_mi_best(R)\n #print(self.P)", "def step(self, closure: Callable = None):\n loss = self.optimizer.step(closure)\n self._k_counter += 1\n\n if self._k_counter >= self.k:\n self._k_counter = 0\n # Lookahead and cache the current optimizer parameters\n for group in self.optimizer.param_groups:\n for p in group[\"params\"]:\n param_state = self.state[p]\n p.data.mul_(self.alpha).add_(\n param_state[\"slow_params\"], alpha=1.0 - self.alpha\n )\n param_state[\"slow_params\"].copy_(p.data)\n return loss", "def make_parallel(self, n):\n return super().make_parallel(n, True)", "def marbles(n: int, k: int) -> int:\n # return (n-1) Choose (k - 1)\n # which is the number of possibilities with the given constraints.\n return n_choose_k(n - 1, k - 1)", "def refugia_adj_5_simsplit_4epochs_iter4 (params, ns):\n #28 parameters \n nu1a, nu2a, nu3a, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, nu1d, nu2d, nu3d, m1_12, m1_13, m1_21, m1_23, m1_31, m1_32, m3_12, m3_13, m3_21, m3_23, m3_31, m3_32, T1, T2, T3, T4 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2]) \n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T1 (to reflect sum effect of all previous glacial-interglacial cycles)\n nu_T1 = [nu1a, nu2a, nu3a]\n mig1 = numpy.array([[0, m1_12, m1_13],[m1_21, 0, m1_23], [m1_31, m1_32, 0]]) \n fs.integrate(nu_T1, T1, m=mig1)\n ## Population function and migration matrix for T2 (to reflect period of isolation during last glacial)\n nu_T2 = [nu1b, nu2b, nu3b]\n fs.integrate(nu_T2, T2)\n ## Population function and migration matrix for T3 (to reflect inter-glacial expansion)\n nu_T3 = [nu1c, nu2c, nu3c]\n mig3 = numpy.array([[0, m3_12, m3_13],[m3_21, 0, m3_23], [m3_31, m3_32, 0]]) \n fs.integrate(nu_T3, T3, m=mig3)\n ## Population function and migration matrix for T3 (bottleneck to capture single population representation of lineage)\n nu_T4 = [nu1d, nu2d, nu3d]\n fs.integrate(nu_T4, T4) \n return fs", "def fit_several_times(self,n_repetitions=1,forget_current_sol=False):\n\n # Initialization\n if forget_current_sol: \n # start from scratch\n best_sol, best_sol_cost = None, np.inf \n else:\n # keep current solution as candidate\n best_sol, best_sol_cost = self.current_sol, self.current_sol_cost\n \n # Main loop, perform independent trials\n for i_repetition in range(n_repetitions):\n self.fit_once(random_restart=True)\n self.update_current_sol_and_cost()\n \n if self.current_sol_cost < best_sol_cost:\n best_sol, best_sol_cost = self.current_sol, self.current_sol_cost\n \n # Set the current sol to the best one we found\n self.current_sol, self.current_sol_cost = best_sol, best_sol_cost\n return self.current_sol", "def partitions(n, k):\n if k == 1:\n yield (n,)\n return\n for i in range(1, n):\n for p in partitions(n-i, k-1):\n yield (i,) + p", "def grid_evaluation(param_list_one, param_list_two, param_eval, n_trials=16, \n aggr_method=np.mean, save_dir='data/', file_name='grid evaluation',\n save_to_disk=True, save_each=1000, chunksize=1.):\n \n \n if not list(param_list_two): # If `param_list_two` is empty\n params = param_list_one\n grid_shape = (len(param_list_one),)\n is_really_grid = False\n \n else:\n params = list(itertools.product(param_list_one, param_list_two))\n grid_shape = (len(param_list_one), len(param_list_two))\n is_really_grid = True\n \n def grid_fun(point): # Function to compute for each grid point\n \n trial_out = np.nan * np.ones((n_trials,))\n \n for i in np.arange(n_trials):\n \n if is_really_grid:\n trial_out[i] = param_eval(point[0], point[1])\n else: # If `param_list_two` is empty\n trial_out[i] = param_eval(point)\n \n return aggr_method(trial_out)\n \n n_grid_pts = len(params)\n \n # Recording procedure\n def record_experiment(grid):\n now = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n save_path = save_dir + now + ' ' + file_name + '.pkl'\n experiment = {\n 'date': now,\n 'rows': param_list_one,\n 'cols': param_list_two,\n 'n_trials': n_trials,\n 'grid': np.reshape(grid, grid_shape),\n 'path': save_path\n }\n if save_to_disk:\n utils.save_obj(experiment, save_path)\n return experiment\n \n # Set a pool of workers\n nb_workers = min(mp.cpu_count(), 24)\n print('Working with {} processes.'.format(nb_workers))\n pool = mp.Pool(nb_workers)\n \n # Iterate `grid_fun` across workers\n it = pool.imap(grid_fun, params, chunksize=chunksize)\n grid = np.nan * np.ones((n_grid_pts,))\n\n for idx, val in enumerate(tqdm(it, total=n_grid_pts)):\n grid[idx] = val\n \n # Make sure that we save after each couple of iterations\n if (idx >= save_each) and (idx % save_each == 0): \n experiment = record_experiment(grid)\n \n # Close pool\n pool.close()\n pool.join()\n \n experiment = record_experiment(grid)\n \n return experiment", "def run_mcts(self, runs_per_round):\n for i in range(runs_per_round):\n self.select(self.env, 'r')\n self.env_reset()\n counts = [self.Nsa[('r', a)] for a in range(self.actions)]\n # print(\"counts \", counts)\n # print(\"Q-values\", [self.Qsa[('r', a)] for a in range(self.actions)])\n # print()\n return np.argmax(counts)", "def simulate_trajectories(kav):\n print \"Simulating \"+str(kav)\n wt_trajectories = []\n avp_trajectories = []\n vip_trajectories = []\n for tn in range(100):\n # get random initial condition\n # initial phases\n init_conditions_AV = [single_osc.lc(wt_T*np.random.rand()) \n for i in range(AVPcells+VIPcells)]\n init_conditions_NAV = [single_osc.lc(wt_T*np.random.rand())[:-1]\n for i in range(NAVcells)]\n y0_random = np.hstack(init_conditions_AV+init_conditions_NAV)\n\n # do the simulation\n model = GonzeModelManyCells(param, kav=kav, \n initial_values=y0_random)\n wt_trajectories.append(model.run(show_labels=False, seed=0))\n\n # avp bmalko\n avp_model = GonzeModelManyCells(param, bmalko='AVP', kav=kav, \n initial_values=y0_random)\n avp_trajectories.append(avp_model.run(show_labels=False, seed=0))\n\n # vip bmalko\n vip_model = GonzeModelManyCells(param, bmalko='VIP', kav=kav, \n initial_values=y0_random)\n vip_trajectories.append(vip_model.run(show_labels=False, seed=0))\n\n # save results\n with open(\"Data/params/wt_\"+str(kav)+\".pickle\", \"wb\") as output_file:\n pickle.dump(wt_trajectories, output_file)\n with open(\"Data/params/avp_\"+str(kav)+\".pickle\", \"wb\") as output_file:\n pickle.dump(avp_trajectories, output_file)\n with open(\"Data/params/vip_\"+str(kav)+\".pickle\", \"wb\") as output_file:\n pickle.dump(vip_trajectories, output_file)\n\n return {'wt': wt_trajectories,\n 'avp': avp_trajectories,\n 'vip': vip_trajectories}", "def run(self, N, return_proposal=False):\n n = 0\n Zp = []\n Xp = []\n Ep = []\n Jp = []\n Z = []\n X = []\n E = []\n J = []\n while n < N:\n sample_s, sample_z, sample_x, sample_e, sample_J = self._propose_batch()\n Zp.append(sample_z)\n Xp.append(sample_x)\n Ep.append(sample_e)\n Jp.append(sample_J)\n acc_s, acc_z, acc_x, acc_e, acc_J = self._accept_batch(sample_s, sample_z, sample_x, sample_e, sample_J)\n Z.append(acc_z)\n X.append(acc_x)\n E.append(acc_e)\n J.append(acc_J)\n n += sample_e.size\n Zp = np.vstack(Zp)[:N]\n Xp = np.vstack(Xp)[:N]\n Ep = np.concatenate(Ep)[:N]\n Jp = np.concatenate(Jp)[:N]\n Z = np.vstack(Z)[:N]\n X = np.vstack(X)[:N]\n E = np.concatenate(E)[:N]\n J = np.concatenate(J)[:N]\n #return Zp, Xp, Ep, Jp\n if return_proposal:\n return Zp, Xp, Ep, Jp, Z, X, E, J\n else:\n return Z, X, E, J", "def simulate(self, n, dt=None):\n for _ in range(n):\n self.step(dt)", "def optimize(self,s,max_steps,td_n):\r\n \r\n s1, d1, a1, r1, s2, done = self.ram.sample(self.batch_size)\r\n s1 = torch.from_numpy(s1).float()\r\n #print(s1.shape)\r\n d1 = torch.from_numpy(d1).float()\r\n d1 = d1.unsqueeze(1)\r\n d1.requires_grad = True\r\n d1_copy = d1.detach().cpu().numpy()\r\n self.mse_logic.append(d1_copy[-1][-1])\r\n a1 = torch.from_numpy(a1).float()\r\n #print(a1.shape)\r\n r1 = torch.from_numpy(r1).float()\r\n r1 = torch.squeeze(r1)\r\n #print(r1.size())\r\n s2 = torch.from_numpy(s2).float()\r\n done = torch.from_numpy(done).float()\r\n self.t = s\r\n self.T = max_steps\r\n self.td_n = td_n\r\n self.td_tau = self.t-self.td_n+1\r\n if self.td_tau >= 0:\r\n for i in range(self.td_tau+1, min(self.td_tau+self.td_n, self.T)):\r\n self.G = self.gamma**(i-self.td_tau-1)*(r1)\r\n if self.td_tau+self.td_n < self.T:\r\n #-----------optimize critic -----------------------------------\r\n a2 = self.target_actor.forward(s2).detach()\r\n noise = a1.data.normal_(0,0.2)\r\n noise = noise.clamp(-0.5,0.5)\r\n a2 = (a2+noise).clamp(-1,1)\r\n next_val = torch.squeeze(self.target_critic.forward(s2,a2).detach())\r\n y_expected = self.G + self.gamma**(self.td_n)*next_val*(1-done)\r\n y_predicted = torch.squeeze(self.critic.forward(s1, a1))\r\n # compute critic loss, and update the critic\r\n loss_critic = self.l_loss(y_predicted, y_expected.detach()).unsqueeze(0)\r\n self.critic_optimizer.zero_grad()\r\n loss_critic.backward()\r\n self.critic_optimizer.step()\r\n \r\n #-----------optimize actor --------------------------------------\r\n pred_a1 = self.actor.forward(s1)\r\n pred_a1_copy = pred_a1.detach().numpy()\r\n self.actor_logic.append(pred_a1_copy[-1][-1])\r\n loss_actor = -self.critic.forward(s1, pred_a1).mean()\r\n #entropy = torch.mean(pred_a1*torch.log(pred_a1))\r\n loss_policy = loss_actor\r\n self.loss_l1_list.append(loss_policy.item())\r\n mse_policy = self.target_critic(s1,d1).mean()\r\n loss_mse = self.mse(loss_actor, mse_policy)\r\n self.loss_mse_list.append(loss_mse.item())\r\n loss = sum([(1-self.lambda_mse)*loss_policy, self.lambda_mse*loss_mse])\r\n self.loss_final_list.append(loss.item())\r\n self.actor_optimizer.zero_grad()\r\n loss.backward()\r\n self.actor_optimizer.step()\r\n \r\n soft_update(self.target_actor, self.actor, self.tau)\r\n soft_update(self.target_critic, self.critic, self.tau)\r\n \r\n #self.actor.state_dict(self.target_actor.state_dict())\r\n #self.critic.state_dict(self.target_critic.state_dict())\r", "def solve(self):\n\n # Open status display\n fmtstr, nsep = self.display_start()\n\n # Start solve timer\n self.timer.start(['solve', 'solve_wo_func', 'solve_wo_rsdl',\n 'solve_wo_btrack'])\n\n # Main optimisation iterations\n for self.k in range(self.k, self.k + self.opt['MaxMainIter']):\n\n # Update record of X and Y from previous iteration\n self.on_iteration_start()\n\n # Compute backtracking\n if self.opt['Backtrack'] is not None and self.k >= 0:\n self.timer.stop('solve_wo_btrack')\n # Compute backtracking\n self.backtrack.update(self)\n self.timer.start('solve_wo_btrack')\n else:\n # Compute just proximal step\n self.xstep()\n # Update by combining previous iterates\n self.ystep()\n\n # Compute residuals and stopping thresholds\n self.timer.stop(['solve_wo_rsdl', 'solve_wo_btrack'])\n if not self.opt['FastSolve']:\n frcxd, adapt_tol = self.compute_residuals()\n self.timer.start('solve_wo_rsdl')\n\n # Compute and record other iteration statistics and\n # display iteration stats if Verbose option enabled\n self.timer.stop(['solve_wo_func', 'solve_wo_rsdl',\n 'solve_wo_btrack'])\n if not self.opt['FastSolve']:\n itst = self.iteration_stats(self.k, frcxd)\n self.itstat.append(itst)\n self.display_status(fmtstr, itst)\n self.timer.start(['solve_wo_func', 'solve_wo_rsdl',\n 'solve_wo_btrack'])\n\n # Call callback function if defined\n if self.opt['Callback'] is not None:\n if self.opt['Callback'](self):\n break\n\n # Stop if residual-based stopping tolerances reached\n if not self.opt['FastSolve']:\n if frcxd < adapt_tol:\n break\n\n # Increment iteration count\n self.k += 1\n\n # Record solve time\n self.timer.stop(['solve', 'solve_wo_func', 'solve_wo_rsdl',\n 'solve_wo_btrack'])\n\n # Print final separator string if Verbose option enabled\n self.display_end(nsep)\n\n return self.getmin()", "def split_simsplit_3epochs_iter2(params, ns):\n #24 parameters \n nu1a, nu2a, nu3a, nu1b, nu2b, nu3b, nu1c, nu2c, nu3c, m1_12, m1_13, m1_21, m1_23, m1_31, m1_32, m2_12, m2_13, m2_21, m2_23, m2_31, m2_32, T1, T2, T3 = params\n sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1] + ns[2])\n fs = moments.Spectrum(sts)\n fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1] + ns[2]) \n fs = moments.Manips.split_2D_to_3D_2(fs, ns[1], ns[2])\n ## Population function and migration matrix for T1\n nu_T1 = [nu1a, nu2a, nu3a]\n mig1 = numpy.array([[0, m1_12, m1_13],[m1_21, 0, m1_23], [m1_31, m1_32, 0]]) \n fs.integrate(nu_T1, T1, m=mig1)\n ## Population function and migration matrix for T2\n nu_T2 = [nu1b, nu2b, nu3b]\n mig2 = numpy.array([[0, m2_12, m2_13],[m2_21, 0, m2_23], [m2_31, m2_32, 0]]) \n fs.integrate(nu_T2, T2, m=mig2)\n ## Population function and migration matrix for T3 (bottleneck to capture single population representation of lineage)\n nu_T3 = [nu1c, nu2c, nu3c]\n fs.integrate(nu_T3, T3) \n return fs", "def update_kkrimp_params(self):\n\n decrease_mixing_fac = False\n switch_agressive_mixing = False\n switch_higher_accuracy = False\n initial_settings = False\n\n # only do something other than simple mixing after first kkr run\n if self.ctx.loop_count != 1:\n # first determine if previous step was successful (otherwise try to find some rms value and decrease mixing to try again)\n if not self.ctx.kkr_step_success:\n decrease_mixing_fac = True\n message = 'INFO: last KKR calculation failed. Trying decreasing mixfac'\n self.report(message)\n\n convergence_on_track = self.convergence_on_track()\n\n # check if calculation was on its way to converge\n if not convergence_on_track:\n decrease_mixing_fac = True\n message = 'INFO: Last KKR did not converge. Trying decreasing mixfac'\n self.report(message)\n # reset last_remote to last successful calculation\n last_calcs_list = list(range(len(self.ctx.calcs))) # needs to be list to support slicing\n if len(last_calcs_list) > 1:\n last_calcs_list = array(last_calcs_list)[::-1] # make sure to go from latest calculation backwards\n for icalc in last_calcs_list:\n message = f\"INFO: last calc success? {icalc} {self.ctx.KKR_steps_stats['success'][icalc]}\"\n self.report(message)\n if self.ctx.KKR_steps_stats['success'][icalc]:\n if self.ctx.KKR_steps_stats['last_rms'][icalc] < self.ctx.KKR_steps_stats['first_rms'][icalc]:\n self.ctx.last_remote = self.ctx.calcs[icalc].outputs.remote_folder\n break # exit loop if last_remote was found successfully\n else:\n self.ctx.last_remote = None\n else:\n self.ctx.last_remote = None\n # now cover case when last_remote needs to be set to initial remote folder (from input)\n if self.ctx.last_remote is None:\n if 'kkrimp_remote' in self.inputs:\n messager = 'INFO: no successful and converging calculation to take RemoteData from. Reuse RemoteData from input instead.'\n self.report(message)\n self.ctx.last_remote = self.inputs.kkrimp_remote\n elif 'impurity_info' in self.inputs or 'remote_data' in self.inputs:\n self.ctx.last_remote = None\n # check if last_remote has finally been set and abort if this is not the case\n if self.ctx.last_remote is None:\n messager = 'ERROR: last remote not found'\n self.report(message)\n return self.exit_codes.ERROR_SETTING_LAST_REMOTE # pylint: disable=no-member\n\n # check if mixing strategy should be changed\n last_mixing_scheme = self.ctx.last_params.get_dict()['IMIX']\n if last_mixing_scheme is None:\n last_mixing_scheme = 0\n\n if convergence_on_track:\n last_rms = self.ctx.last_rms_all[-1]\n if last_rms < self.ctx.threshold_aggressive_mixing and last_mixing_scheme == 0:\n switch_agressive_mixing = True\n message = 'INFO: rms low enough, switch to agressive mixing'\n self.report(message)\n\n # check if switch to higher accuracy should be done\n if not self.ctx.kkr_higher_accuracy:\n if self.ctx.kkr_converged: # or last_rms < self.ctx.threshold_switch_high_accuracy:\n switch_higher_accuracy = True\n\n\n# self.report(\"INFO: rms low enough, switch to higher accuracy settings\")\n else:\n initial_settings = True\n self.ctx.kkr_step_success = True\n\n if self.ctx.loop_count > 1:\n last_rms = self.ctx.last_rms_all[-1]\n\n # extract values from host calculation\n host_GF_calc = self.inputs.remote_data.get_incoming(node_class=CalcJobNode).first().node\n host_GF_outparams = host_GF_calc.outputs.output_parameters.get_dict()\n host_GF_inparams = host_GF_calc.inputs.parameters.get_dict()\n nspin = host_GF_outparams.get('nspin')\n non_spherical = host_GF_inparams.get('INS')\n if non_spherical is None:\n non_spherical = kkrparams.get_KKRcalc_parameter_defaults()[0].get('INS')\n self.ctx.spinorbit = host_GF_outparams.get('use_newsosol')\n\n # if needed update parameters\n if decrease_mixing_fac or switch_agressive_mixing or switch_higher_accuracy or initial_settings or self.ctx.mag_init:\n if initial_settings:\n label = 'initial KKR scf parameters'\n description = 'initial parameter set for scf calculation'\n else:\n label = ''\n description = ''\n\n # step 1: extract info from last input parameters and check consistency\n para_check = kkrparams(params_type='kkrimp')\n para_check.get_all_mandatory()\n message = 'INFO: get kkrimp keywords'\n self.report(message)\n\n # init new_params dict where updated params are collected\n new_params = {}\n\n # step 1.2: check if all mandatory keys are there and add defaults if missing\n missing_list = para_check.get_missing_keys(use_aiida=True)\n if missing_list != []:\n kkrdefaults = kkrparams.get_KKRcalc_parameter_defaults()[0]\n kkrdefaults_updated = []\n for key_default, val_default in list(kkrdefaults.items()):\n if key_default in missing_list:\n new_params[key_default] = kkrdefaults.get(key_default)\n kkrdefaults_updated.append(key_default)\n if len(kkrdefaults_updated) > 0:\n self.report('ERROR: no default param found')\n return self.exit_codes.ERROR_MISSING_PARAMS # pylint: disable=no-member\n else:\n message = f'updated KKR parameter node with default values: {kkrdefaults_updated}'\n self.report(message)\n\n # step 2: change parameter (contained in new_params dictionary)\n last_mixing_scheme = para_check.get_value('IMIX')\n if last_mixing_scheme is None:\n last_mixing_scheme = 0\n\n strmixfac = self.ctx.strmix\n aggrmixfac = self.ctx.aggrmix\n nsteps = self.ctx.nsteps\n\n # TODO: maybe add decrease mixing factor option as in kkr_scf wc\n # step 2.1 fill new_params dict with values to be updated\n if decrease_mixing_fac:\n if last_mixing_scheme == 0:\n self.report(f'(strmixfax, mixreduce)= ({strmixfac}, {self.ctx.mixreduce})')\n self.report(f'type(strmixfax, mixreduce)= {type(strmixfac)} {type(self.ctx.mixreduce)}')\n strmixfac = strmixfac * self.ctx.mixreduce\n self.ctx.strmix = strmixfac\n label += f'decreased_mix_fac_str (step {self.ctx.loop_count})'\n description += f'decreased STRMIX factor by {self.ctx.mixreduce}'\n else:\n self.report(f'(aggrmixfax, mixreduce)= ({aggrmixfac}, {self.ctx.mixreduce})')\n self.report(f'type(aggrmixfax, mixreduce)= {type(aggrmixfac)} {type(self.ctx.mixreduce)}')\n aggrmixfac = aggrmixfac * self.ctx.mixreduce\n self.ctx.aggrmix = aggrmixfac\n label += 'decreased_mix_fac_bry'\n description += f'decreased AGGRMIX factor by {self.ctx.mixreduce}'\n\n if switch_agressive_mixing:\n last_mixing_scheme = self.ctx.type_aggressive_mixing\n label += ' switched_to_agressive_mixing'\n description += f' switched to agressive mixing scheme (IMIX={last_mixing_scheme})'\n\n # add number of scf steps, spin\n new_params['SCFSTEPS'] = nsteps\n new_params['NSPIN'] = nspin\n new_params['INS'] = non_spherical\n\n # add ldos runoption if dos_run = True\n if self.ctx.dos_run:\n if self.ctx.lmdos:\n runflags = new_params.get('RUNFLAG', []) + ['lmdos']\n else:\n runflags = new_params.get('RUNFLAG', []) + ['ldos']\n new_params['RUNFLAG'] = runflags\n new_params['SCFSTEPS'] = 1\n\n # turn on Jij calculation if jij_run == True\n if self.ctx.jij_run:\n new_params['CALCJIJMAT'] = 1\n\n # add newsosol\n if self.ctx.spinorbit:\n testflags = new_params.get('TESTFLAG', []) + ['tmatnew']\n new_params['TESTFLAG'] = testflags\n new_params['SPINORBIT'] = 1\n new_params['NCOLL'] = 1\n # TODO add deprecation warning and remove these lines (can be set with params_overwrite instead)\n if self.ctx.mesh_params.get('RADIUS_LOGPANELS', None) is not None:\n new_params['RADIUS_LOGPANELS'] = self.ctx.mesh_params['RADIUS_LOGPANELS']\n if self.ctx.mesh_params.get('NCHEB', None) is not None:\n new_params['NCHEB'] = self.ctx.mesh_params['NCHEB']\n if self.ctx.mesh_params.get('NPAN_LOG', None) is not None:\n new_params['NPAN_LOG'] = self.ctx.mesh_params['NPAN_LOG']\n if self.ctx.mesh_params.get('NPAN_EQ', None) is not None:\n new_params['NPAN_EQ'] = self.ctx.mesh_params['NPAN_EQ']\n new_params['CALCORBITALMOMENT'] = 1\n else:\n new_params['SPINORBIT'] = 0\n new_params['NCOLL'] = 0\n new_params['CALCORBITALMOMENT'] = 0\n new_params['TESTFLAG'] = []\n\n # set mixing schemes and factors\n if last_mixing_scheme > 2:\n new_params['ITDBRY'] = self.ctx.broyden_num\n new_params['IMIX'] = last_mixing_scheme\n new_params['MIXFAC'] = aggrmixfac\n new_params['NSIMPLEMIXFIRST'] = self.ctx.nsimplemixfirst\n elif last_mixing_scheme == 0:\n new_params['IMIX'] = last_mixing_scheme\n new_params['MIXFAC'] = strmixfac\n\n # add mixing scheme to context\n self.ctx.last_mixing_scheme = last_mixing_scheme\n\n if switch_higher_accuracy:\n self.ctx.kkr_higher_accuracy = True\n\n # add convergence settings\n if self.ctx.loop_count == 1 or self.ctx.last_mixing_scheme == 0:\n new_params['QBOUND'] = self.ctx.threshold_aggressive_mixing\n else:\n new_params['QBOUND'] = self.ctx.convergence_criterion\n\n # initial magnetization\n if initial_settings and self.ctx.mag_init:\n if self.ctx.hfield[0] <= 0.0 or self.ctx.hfield[1] == 0:\n self.report(\n '\\nWARNING: magnetization initialization chosen but hfield is zero. Automatically change back to default value (hfield={})\\n'\n .format(self._wf_default['hfield'])\n )\n self.ctx.hfield = self._wf_default['hfield']\n new_params['HFIELD'] = self.ctx.hfield\n elif self.ctx.mag_init and self.ctx.mag_init_step_success: # turn off initialization after first (successful) iteration\n new_params['HFIELD'] = [0.0, 0]\n elif not self.ctx.mag_init:\n self.report(\"INFO: mag_init is False. Overwrite 'HFIELD' to '0.0' and 'LINIPOL' to 'False'.\")\n # reset mag init to avoid resinitializing\n new_params['HFIELD'] = [0.0, 0]\n\n # set nspin to 2 if mag_init is used\n if self.ctx.mag_init:\n nspin_in = nspin\n if nspin_in is None:\n nspin_in = 1\n if nspin_in < 2:\n self.report('WARNING: found NSPIN=1 but for maginit needs NPIN=2. Overwrite this automatically')\n new_params['NSPIN'] = 2\n message = f'new_params: {new_params}'\n self.report(message)\n\n # overwrite values from additional input node\n if 'params_overwrite' in self.inputs:\n print('use params_overwrite', self.inputs.params_overwrite.get_dict())\n self._overwrite_parameters_from_input(new_params)\n\n # step 2.2 update values\n try:\n for key, val in new_params.items():\n para_check.set_value(key, val, silent=True)\n except:\n message = 'ERROR: failed to set some parameters'\n self.report(message)\n return self.exit_codes.ERROR_PARAMETER_UPDATE # pylint: disable=no-member\n\n # step 3:\n message = f'INFO: update parameters to: {para_check.get_set_values()}'\n self.report(message)\n updatenode = Dict(para_check.get_dict())\n updatenode.label = label\n updatenode.description = description\n paranode_new = updatenode #update_params_wf(self.ctx.last_params, updatenode)\n self.ctx.last_params = paranode_new\n else:\n message = 'INFO: reuse old settings'\n self.report(message)\n\n message = 'INFO: done updating kkr param step'\n self.report(message)", "def run_timesteps(self, nsteps=1):\n if not self.initialized:\n raise RuntimeError(\"OversetSimulation has not been initialized\")\n\n wclabels = \"Pre Conn Solve Post\".split()\n tstart = self.last_timestep + 1\n tend = self.last_timestep + 1 + nsteps\n self.printer.echo(\"Running %d timesteps starting from %d\"%(nsteps, tstart))\n for nt in range(tstart, tend):\n with self.timer(\"Pre\", incremental=True):\n for ss in self.solvers:\n ss.pre_advance_stage1()\n\n with self.timer(\"Conn\", incremental=True):\n if self._do_connectivity(nt):\n self.perform_overset_connectivity()\n\n with self.timer(\"Pre\", incremental=False):\n for ss in self.solvers:\n ss.pre_advance_stage2()\n\n with self.timer(\"Conn\"):\n self.exchange_solution()\n\n with self.timer(\"Solve\"):\n for ss in self.solvers:\n ss.advance_timestep()\n\n with self.timer(\"Post\"):\n for ss in self.solvers:\n ss.post_advance()\n\n self.comm.Barrier()\n wctime = self.timer.get_timings(wclabels)\n wctime_str = ' '.join(\"%s: %.4f\"%(k, v) for k, v in wctime.items())\n self.printer.echo(\"WCTime:\", \"%5d\"%nt, wctime_str, \"Total:\",\n \"%.4f\"%sum(wctime.values()))\n self.last_timestep = tend", "def number_of_iterations(self) -> int:\n pass", "def run_optimisation(model_path, tank1_outflow, tank2_outflow, tank3_outflow,\n h1_final, h2_final, h3_final, max_control, sim_control,\n h10=20.0, h20=20.0, h30=20.0, alpha1=0.5, alpha2=0.5,\n alpha3=0.5, ipopt_tolerance=1e-3,\n t_start=0, t_final=50.0, elements_number=50):\n # 2. Compute initial guess trajectories by means of simulation\n # Compile the optimization initialization model\n init_sim_fmu = compile_fmu(\"TanksPkg.ThreeTanks\", model_path)\n # Load the model\n simulation_model = load_fmu(init_sim_fmu)\n set_model_parameters(simulation_model,\n {'u': sim_control, \"h10\": h10, \"h20\": h20, \"h30\": h30,\n \"C1\": tank1_outflow, \"C2\": tank2_outflow,\n \"C3\": tank3_outflow, \"alpha1\": alpha1,\n \"alpha2\": alpha2, \"alpha3\": alpha3})\n init_result = simulation_model.simulate(start_time=t_start,\n final_time=t_final)\n # 3. Solve the optimal control problem\n # Compile and load optimization problem\n optimisation_model = \"TanksPkg.three_tanks_time_optimal\"\n op = transfer_optimization_problem(optimisation_model, model_path)\n # Set parameters\n set_model_parameters(op, {\"h10\": h10, \"h20\": h20, \"h30\": h30,\n 'h1_final': h1_final, 'h2_final': h2_final,\n 'h3_final': h3_final, \"C1\": tank1_outflow,\n \"C2\": tank2_outflow, \"C3\": tank3_outflow,\n \"alpha1\": alpha1, \"alpha2\": alpha2,\n \"alpha3\": alpha3, 'u_max': max_control})\n\n # Set options\n opt_options = op.optimize_options()\n opt_options['n_e'] = elements_number\n opt_options['variable_scaling'] = False\n opt_options['init_traj'] = init_result\n opt_options['IPOPT_options']['tol'] = ipopt_tolerance\n opt_options['verbosity'] = 1\n # Solve the optimal control problem\n res = op.optimize(options=opt_options)\n opt_result = {\"h1\": res['h1'], \"h2\": res['h2'], \"h3\": res['h3'],\n \"u\": res['u'], \"time\": res['time']}\n return opt_result", "def WarpStep(iters=5):\n MSG(\"WarpStep\")\n for j in range(iters):\n warp.step()\n return", "def multiple_eval_for_loops_v2():", "def compute_profiling_time(key, expected_num_spikes, rate, t_stop, n,\n winlen, binsize, num_rep=10):\n\n time_fast_fca = 0.\n time_fpgrowth = 0.\n for rep in range(num_rep):\n # Generating artificial data\n data = []\n for i in range(n):\n np.random.seed(0)\n data.append(stg.homogeneous_poisson_process(\n rate=rate, t_start=0*pq.s, t_stop=t_stop))\n\n # Extracting Closed Frequent Itemset with FP-Growth\n t0 = time.time()\n # Binning the data and clipping (binary matrix)\n binary_matrix = conv.BinnedSpikeTrain(data, binsize).to_bool_array()\n # Computing the context and the binary matrix encoding the relation\n # between objects (window positions) and attributes (spikes,\n # indexed with a number equal to neuron idx*winlen+bin idx)\n context, transactions, rel_matrix = spade._build_context(binary_matrix,\n winlen)\n # Applying FP-Growth\n fim_results = [i for i in spade._fpgrowth(\n transactions,\n rel_matrix=rel_matrix,\n winlen=winlen)]\n time_fpgrowth += time.time() - t0\n\n # Extracting Closed Frequent Itemset with Fast_fca\n t1 = time.time()\n # Binning the data and clipping (binary matrix)\n binary_matrix = conv.BinnedSpikeTrain(data, binsize).to_bool_array()\n # Computing the context and the binary matrix encoding the relation\n # between objects (window positions) and attributes (spikes,\n # indexed with a number equal to neuron idx*winlen+bin idx)\n context, transactions, rel_matrix = \\\n spade._build_context(binary_matrix, winlen)\n # Applying FP-Growth\n fim_results = spade._fast_fca(context, winlen=winlen)\n time_fast_fca += time.time() - t1\n\n time_profiles = {'fp_growth': time_fpgrowth/num_rep,\n 'fast_fca': time_fast_fca/num_rep}\n\n # Storing data\n res_path = '../results/{}/{}/'.format(key, expected_num_spikes)\n # Create path is not already existing\n path_temp = './'\n for folder in split_path(res_path):\n path_temp = path_temp + '/' + folder\n mkdirp(path_temp)\n\n np.save(res_path + '/profiling_results.npy', {'results': time_profiles,\n 'parameters': {'rate': rate, 't_stop': t_stop, 'n': n,\n 'winlen': winlen, 'binsize': binsize}})", "def stirling(k, r) :\n\n return sum((-1)**(r-i)*binomial(r, i)*i**k for i in range(r+1)) / math.factorial(r)", "def test_multiple_games(self, iteration=10):\n # TODO: multithread?\n for i in range(iteration):\n self.test_one_game()" ]
[ "0.62938315", "0.6189227", "0.6007014", "0.5923211", "0.58506715", "0.58172965", "0.58066565", "0.58060586", "0.57466394", "0.5720371", "0.5709016", "0.57055277", "0.56977755", "0.56941396", "0.5682223", "0.5663299", "0.56584656", "0.5658233", "0.56204224", "0.5607117", "0.5597962", "0.5591526", "0.5574413", "0.5569978", "0.55536854", "0.55532277", "0.55487543", "0.5547276", "0.5525577", "0.55245984", "0.55070615", "0.54981446", "0.5478564", "0.547515", "0.5465812", "0.546369", "0.5462137", "0.5458883", "0.54559344", "0.5449383", "0.5447858", "0.5447472", "0.5444142", "0.544302", "0.5435536", "0.54269147", "0.54236454", "0.5417142", "0.5415845", "0.54075265", "0.54059887", "0.5403671", "0.5394145", "0.53840816", "0.53757805", "0.53756803", "0.537525", "0.5370207", "0.5366103", "0.5351621", "0.53463364", "0.5344487", "0.5343568", "0.5339152", "0.5338284", "0.533811", "0.5337357", "0.53365123", "0.5324911", "0.5322658", "0.53176653", "0.53165627", "0.5313058", "0.53067327", "0.53011084", "0.52989864", "0.5298362", "0.5297127", "0.5295268", "0.5294496", "0.5291052", "0.52866673", "0.52835923", "0.52826554", "0.5281601", "0.52797174", "0.5279139", "0.52719253", "0.5267691", "0.5265336", "0.52650464", "0.52606475", "0.5255125", "0.52526575", "0.52505577", "0.52456754", "0.5245088", "0.5242701", "0.524079", "0.5227807", "0.52224237" ]
0.0
-1
Runs optimization over k x n rounds of k parallel trials. This asks Ax to run up to max_parallelism_cap trials in parallel by submitting them to the scheduler at the same time.
def test_run_experiment_locally_in_batches(self) -> None: parallelism = 2 rounds = 3 experiment = Experiment( name="torchx_booth_parallel_demo", search_space=SearchSpace(parameters=self._parameters), optimization_config=OptimizationConfig(objective=self._objective), runner=self._runner, is_test=True, properties={Keys.IMMUTABLE_SEARCH_SPACE_AND_OPT_CONF: True}, ) scheduler = Scheduler( experiment=experiment, generation_strategy=( choose_generation_strategy( search_space=experiment.search_space, max_parallelism_cap=parallelism, ) ), options=SchedulerOptions( run_trials_in_batches=True, total_trials=(parallelism * rounds) ), ) try: scheduler.run_all_trials() # TorchXMetric always returns trial index; hence the best experiment # for min objective will be the params for trial 0. scheduler.report_results() except FailureRateExceededError: pass # TODO(ehotaj): Figure out why this test fails in OSS. # Nothing to assert, just make sure experiment runs.
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self, n_steps: int, n_parallel: int = 1, **kwargs):\n batch_size = kwargs.get(\"batch_size\", 1)\n n_parallel = min(n_parallel, batch_size)\n with self.scheduler(n_parallel=n_parallel) as scheduler:\n for i in range(n_steps):\n samples = self.optimiser.run_step(\n batch_size=batch_size,\n minimise=kwargs.get(\"minimise\", False)\n )\n jobs = [\n self._job(task=self.objective, args=s.as_dict())\n for s in samples\n ]\n scheduler.dispatch(jobs)\n evaluations = [\n r.data for r in scheduler.collect(\n n_results=batch_size, timeout=self._timeout\n )\n ]\n self.optimiser.update(samples, evaluations)\n for s, e, j in zip(samples, evaluations, jobs):\n self.reporter.log((s, e), meta={\"job_id\": j.id})", "def monte_carlo_trials(nb_trials, nb_ok, lock):\n\n # First perform the trials\n # Do not use shared resource because other processes doesn't need to know\n # about computation step\n nb_in_quarter_results = 0\n for i in range(nb_trials):\n x = random.uniform(0, 1)\n y = random.uniform(0, 1)\n if x * x + y * y <= 1.0:\n nb_in_quarter_results += 1\n\n # Finally update shared resource\n # Do it only once, then processes doesn't struggle with each other to\n # update it\n with lock:\n nb_ok.value += nb_in_quarter_results", "def test_parallel_run():\n def delay(sec):\n \"\"\"delay test func\"\"\"\n if isinstance(sec, (float, int)):\n time.sleep(sec)\n else:\n for s in sec:\n time.sleep(s)\n\n times = [0.01 for i in range(100)]\n serial_time = reduce(lambda x, y: x + y, times, 0)\n parallel_time = times[-1]\n num_parallel = 4\n\n num_batches = len(times) // num_parallel + 1\n\n ideal_time = serial_time / num_parallel\n\n t0 = time.time()\n retval_queue = parallel_run(\n func=delay, kind='threads', num_parallel=num_parallel,\n divided_args_mask=None, divided_kwargs_names=['sec'],\n scalar_func=False, sec=times\n )\n logging.trace('entry pulled from queue: %s', retval_queue.get())\n runtime = time.time() - t0\n\n speedup = serial_time / runtime\n logging.trace('serial runtime = %.3f s', serial_time)\n logging.trace('ideal runtime = %.3f s', ideal_time)\n logging.trace('actual runtime = %.3f s', runtime)\n\n logging.trace('ideal speedup = %.3f', serial_time / ideal_time)\n logging.trace('actual speedup = %.3f', speedup)\n\n relative_speedup = ideal_time / runtime\n logging.trace('speedup/ideal = %.3f', relative_speedup)\n assert relative_speedup >= 0.3, 'rel speedup = %.4f' % relative_speedup\n logging.info('<< PASS : test_parallel_run >>')", "def _kshape(x, k, n_init=1, max_iter=100, n_jobs = 1, random_state=None,normalize=True ):\r\n #print \"n jobs run in parallel: \" + str(cpu_count() ) \r\n random_state = check_random_state(random_state)\r\n best_tot_dist,best_centroids,best_idx = None,None,None\r\n \r\n if n_jobs ==1:\r\n\r\n for i_init in range(n_init): \r\n # n_init is the number of random starting points\r\n # pdb.set_trace()\r\n \r\n idx, centroids,tot_dist = _kshape_single(x, k, max_iter=max_iter, random_state= random_state,normalize=normalize) \r\n if best_tot_dist is None or tot_dist < best_tot_dist:\r\n best_idx = idx.copy()\r\n best_centroids = centroids.copy()\r\n best_tot_dist = tot_dist\r\n else: # n_jobs not =1 # if -1, all CPUs are used\r\n # parallelisation of kshape runs\r\n seeds = random_state.randint(np.iinfo(np.int32).max,size=n_init)\r\n results = Parallel(n_jobs=n_jobs, verbose=0)(\r\n delayed(_kshape_single)(x,k,max_iter=max_iter, random_state=seed, normalize=normalize)\r\n for seed in seeds )\r\n # Get results with the lowest distances\r\n idx, centroids,tot_dist, iterations = zip(*results)\r\n best = np.argmin(tot_dist) \r\n best_idx = idx[best]\r\n best_centroids = centroids[best]\r\n best_tot_dist = tot_dist[best]\r\n sys.stdout.write(\"Done: k=\"+str(k)+\"\\n\")\r\n return {'centroids':best_centroids, 'labels':best_idx, 'distance':best_tot_dist,'centroids_all':centroids,'labels_all':idx,'distance_all':tot_dist,'iterations':iterations}", "def num_step_reach(rr, a, b, k, num_steps = 5):\n\n # Time this function\n start_time = time.time()\n\n # Help us divide and conquer with multiple threads\n pool = ThreadPool(processes = 2)\n\n # Create our BDD variables, matching those exactly in rr (except zz's)\n xx_list = [bddvar(\"xx{}\".format(i)) for i in range(k)]\n yy_list = [bddvar(\"yy{}\".format(i)) for i in range(k)]\n zz_list = [bddvar(\"zz{}\".format(i)) for i in range(k)]\n\n # Compose for each step\n # NOTE: Very slow for many nodes and edges; may want divide and conquer here\n hh = rr\n yyzz_compose_dict = {a:b for a, b in zip(yy_list, zz_list)}\n xxzz_compose_dict = {a:b for a, b in zip(xx_list, zz_list)}\n for i in range(0, num_steps - 1):\n\n # Kickoff threads on async composition\n yyzz_async_result = pool.apply_async(hh.compose, [yyzz_compose_dict])\n xxzz_async_result = pool.apply_async(rr.compose, [xxzz_compose_dict])\n\n # Block: get results from threads\n yyzz_compose = yyzz_async_result.get()\n xxzz_compose = xxzz_async_result.get()\n\n # Conjunct them and do smoothing\n hh = (yyzz_compose & xxzz_compose).smoothing(set(zz_list))\n\n print(\"\\tComposed for step\", i)\n\n print(\"Completed in {} seconds\".format(round(time.time() - start_time)))\n print(\"Number of satisfiable variables\", len(list(hh.satisfy_all())))\n\n # See if a and b can reach eachother in the given number of steps\n restrict_dict = {c:d for c, d in zip(set(xx_list), to_bin(a, k))}\n restrict_dict.update({c:d for c, d in zip(set(yy_list), to_bin(b, k))})\n return hh.restrict(restrict_dict)", "def run(self, n_trials=10):\n # Create study object\n if self.study is None:\n self.study = optuna.create_study(\n direction=\"minimize\",\n sampler=optuna.samplers.RandomSampler(seed=123)\n )\n # Run trials\n self.study.optimize(\n lambda x: self.objective(x),\n n_trials=n_trials,\n n_jobs=-1\n )", "def _run_parallel(parameters):\n\n # make parallel context global\n global pc\n\n print parameters\n # create parallel context instance\n pc = h.ParallelContext()\n\n print 'i am', pc.id(), 'of', pc.nhost()\n # start workers, begins an infinitely loop where master workers posts jobs and workers pull jobs until all jobs are finished\n pc.runworker()\n \n # print len(parameters)\n # # # distribute experiment and parameters to workers\n for param in parameters:\n # print len(parameters)\n # print param\n pc.submit(_f_parallel, param)\n # print param\n\n # # continue runnning until all workers are finished\n while pc.working():\n print pc.id(), 'is working'\n\n # # close parallel context \n pc.done()", "def forqs_parallel(configs):\n pool = Pool(21)\n pool.map(forqs_sim, configs)\n pool.close()\n pool.join()", "def parallel_run():\n from IPython.parallel import Client\n\n c = Client() # here is where the client establishes the connection\n lv = c.load_balanced_view() # this object represents the engines (workers)\n\n\n rays = []\n maxs=25\n bounding = AABA(xmin=0, ymin=0, zmin=0, xmax=maxs, ymax=maxs, zmax=maxs,)\n gridd = np.zeros((maxs,maxs,maxs))\n # spectrum for red to nir leaves\n red_nir_leaves = spectrum(np.array([0.5, 0.85]), np.array([0.1, 0.6]), np.array([0.5, 0.1]))\n # spectrum for soil\n red_nir_soil = spectrum(np.array([0.5, 0.85]), np.array([0.3, 0.4]), np.array([0.0, 0.0]))\n\n\n # scattering setup\n scatt = BRDSF(red_nir_leaves, 0.0)\n lf = leaf(55.0, 0.8) # leaf angle distribution and leaf area density\n\n\n tasks = []\n for x in xrange(maxs):\n for y in xrange(maxs):\n tasks.append(lv.apply(prun, x,y, maxs, gridd, scatt, red_nir_soil, bounding, lf))\n\n result = [task.get() for task in tasks] # blocks until all results are back\n\n return results", "def SkoptPaperStats(maxIters, numRuns):\n\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n\n assert comm.Get_size() == numRuns, \"Please ensure there is one process running per run i.e \" + str(numRuns) + \" processes.\"\n \n # Define the problem bounds.\n skoptBounds = [(10, 1300), (40, 230), (0, 90), (0, 90)]\n\n # Use the same seed list as previously.\n seedList = [572505, 357073, 584216, 604873, 854690, 573165, 298975, 650770, 243921, 191168]\n\n # The target for each algorithm. This was determined by using the values in the literature, so there is clearly some deviation either due to the detuning or computation.\n globalFoM = 1.033\n\n if rank == 0:\n timeList = []\n iterationList = []\n\n # Define which solver will be used.\n optimiser = skopt.Optimizer(skoptBounds, base_estimator = \"GP\", n_initial_points = int(np.ceil(maxIters/10)), random_state = seedList[rank])\n\n # Start timing.\n startTime = time.time()\n timeElapsed = None\n iterationSuccess = None\n\n # Start optimisation.\n for iteration in range(maxIters):\n\n # Make one suggestion.\n nextParams = optimiser.ask()\n\n # Check what FoM this gives. Go negative as this is a minimisation routine.\n fEval = FitnessSkopt(nextParams)\n\n # Update best FoM.\n if abs(fEval) >= globalFoM:\n # The algorithm has managed to surpass or equal the paper value.\n iterationSuccess = iteration\n timeElapsed = time.time() - startTime\n \n if rank == 0:\n iterationList.append(iterationSuccess)\n timeList.append(timeElapsed)\n\n break\n \n # Tell the optimiser about the result.\n optimiser.tell(nextParams, fEval)\n\n # Run complete. Send results to main process. Tags are unique identifiers.\n if rank != 0:\n comm.send(timeElapsed, dest = 0, tag = 1)\n comm.send(iterationSuccess, dest = 0, tag = 2)\n\n # Wait for all the processes to end.\n comm.Barrier()\n\n if rank == 0:\n # Aggregate the data.\n for process in range(comm.Get_size() - 1):\n # Get the data.\n individualTime = None\n individualTime = comm.recv(individualTime, source = process + 1, tag = 1)\n\n individualIter = None\n individualIter = comm.recv(individualIter, source = process + 1, tag = 2)\n\n if individualIter is not None:\n # Both values must therefore be non-null.\n iterationList.append(individualIter)\n timeList.append(individualTime)\n\n avgRuntime = np.average(timeList)\n avgIters = np.average(iterationList)\n try:\n\n fastestTime = np.min(timeList)\n\n except ValueError:\n \n # List is empty.\n fastestTime = float('NaN')\n\n numSuccess = len(iterationList)\n successRate = numSuccess/numRuns\n\n print(\"Bayesian optimisation paper testing complete! Here are the stats:\")\n print(\"Number of successful runs: \" + str(numSuccess) + \" (Success rate of \" + str(successRate) + \")\")\n print(\"Average iterations required for success: \" + str(avgIters))\n print(\"Average time required for success: \" + str(avgRuntime))\n print(\"Fastest convergence time: \" + str(fastestTime))\n print(\"------------------------------------------------------------------------------------------------------------------\")\n \n return", "def run_optimization(config,\n blackbox_optimizer,\n init_current_input,\n init_best_input,\n init_best_core_hyperparameters,\n init_best_value,\n init_iteration,\n workers,\n log_bool=False):\n current_input = init_current_input\n best_input = init_best_input\n best_core_hyperparameters = init_best_core_hyperparameters\n best_value = [init_best_value]\n iteration = init_iteration\n\n while True:\n print(iteration)\n sys.stdout.flush()\n success, current_input = run_step_rpc_blackbox_optimizer(\n config, current_input, blackbox_optimizer, workers, iteration,\n best_input, best_core_hyperparameters, best_value, log_bool)\n if success:\n iteration += 1\n if iteration == config.nb_iterations:\n break", "def run(self):\n if not self._no_progress and self._verbose:\n from progressbar import ProgressBar\n progress = ProgressBar()\n iter_range = progress(range(self._iters))\n else:\n iter_range = range(self._iters)\n\n if self._no_progress and self._time_iters:\n from time import time\n\n i = 0\n try:\n for i in iter_range:\n if self._verbose and self._no_progress:\n print(\"Iteration \" + repr(i))\n\n if self._no_progress and self._time_iters:\n start = time()\n\n self.iteration += 1\n\n self._forward(self._p_k, self._v_k)\n sigma_k = measure(self._p_k, self._v_k)\n alpha_k = self._rho_k / sigma_k\n if self._double:\n update_m_double(self._m, alpha_k, self._p_k)\n sub_scaled_vector_double(self._residual_k,\n self._residual_k,\n alpha_k, self._v_k)\n else:\n update_m(self._m, alpha_k, self._p_k)\n sub_scaled_vector(self._residual_k, self._residual_k,\n alpha_k, self._v_k)\n self._v_k = gpuarray_copy(self._residual_k)\n rho_k_plus_1 = measure(self._v_k, self._residual_k)\n rho_k_t = np.abs(rho_k_plus_1)\n\n if (rho_k_t / self._rho_0 <= self._relative_tolerance) \\\n or (rho_k_t <= self._absolute_tolerance):\n print(\"Converged.\")\n self.converged = True\n break\n\n if self._double:\n add_scaled_vector_double(self._p_k, self._v_k,\n rho_k_plus_1/self._rho_k,\n self._p_k)\n else:\n add_scaled_vector(self._p_k, self._v_k,\n rho_k_plus_1/self._rho_k, self._p_k)\n\n self._rho_k = rho_k_plus_1\n\n if self._noisy:\n print(\" Residual=\" + str(rho_k_t))\n\n if self._no_progress and self._time_iters:\n print(\"Elapsed time for iteration \" + str(i) + \": \" +\n str(time() - start) + \" seconds\")\n\n if self._save_images:\n save_image(np.abs(self._m.get().reshape(self._data.nX1,\n self._data.nX2)),\n self._out_dir, i, self._image_format)\n if self._save_matlab:\n save_matlab(self._m.get().reshape(self._data.nX1,\n self._data.nX2),\n self._out_dir, i)\n except KeyboardInterrupt:\n print(\"Reconstruction aborted (CTRL-C) at iteration \" + str(i))\n finally:\n if self._save_images:\n save_image(np.abs(self._m.get().reshape(self._data.nX1,\n self._data.nX2)),\n self._out_dir, \"result\", self._image_format)\n if self._save_matlab:\n save_matlab(self._m.get().reshape(self._data.nX1,\n self._data.nX2),\n self._out_dir, \"result\")\n self.iteration = i+1\n return (self._m.get().reshape(self._data.nX1, self._data.nX2),\n self.iteration)", "def optimize_restarts(self, num_restarts=10, **kwargs):\n size = self.size\n rank = self.rank\n comm = self.comm\n my_num_restarts = num_restarts / size\n if my_num_restarts == 0:\n my_num_restarts = 1\n num_restarts = my_num_restarts * size\n if self.verbosity >= 2:\n print '> optimizing hyper-parameters using multi-start'\n print '> num available cores:', size\n print '> num restarts:', num_restarts\n print '> num restarts per core:', my_num_restarts\n # Let everybody work with its own data\n self.randomize()\n super(Parallelizer, self).optimize_restarts(num_restarts=my_num_restarts,\n verbose=self.verbosity>=2,\n **kwargs)\n if self.use_mpi:\n best_x_opt, log_like = reduce_max(self.optimizer_array.copy(),\n self.log_likelihood(),\n comm=comm)\n if self.verbosity >= 2:\n print '> best hyperparameters:', best_x_opt\n self.optimizer_array = best_x_opt", "def optimize_restarts(self, Nrestarts=10, robust=False, verbose=True, parallel=False, num_processes=None, **kwargs):\n initial_parameters = self._get_params_transformed()\n\n if parallel:\n try:\n jobs = []\n pool = mp.Pool(processes=num_processes)\n for i in range(Nrestarts):\n self.randomize()\n job = pool.apply_async(opt_wrapper, args = (self,), kwds = kwargs)\n jobs.append(job)\n\n pool.close() # signal that no more data coming in\n pool.join() # wait for all the tasks to complete\n except KeyboardInterrupt:\n print \"Ctrl+c received, terminating and joining pool.\"\n pool.terminate()\n pool.join()\n\n for i in range(Nrestarts):\n try:\n if not parallel:\n self.randomize()\n self.optimize(**kwargs)\n else:\n self.optimization_runs.append(jobs[i].get())\n\n if verbose:\n print(\"Optimization restart {0}/{1}, f = {2}\".format(i+1, Nrestarts, self.optimization_runs[-1].f_opt))\n except Exception as e:\n if robust:\n print(\"Warning - optimization restart {0}/{1} failed\".format(i+1, Nrestarts))\n else:\n raise e\n\n if len(self.optimization_runs):\n i = np.argmin([o.f_opt for o in self.optimization_runs])\n self._set_params_transformed(self.optimization_runs[i].x_opt)\n else:\n self._set_params_transformed(initial_parameters)", "def test_workon_with_parallel_backend(self):\n\n def foo(x):\n return [dict(name=\"result\", type=\"objective\", value=x * 2)]\n\n import joblib\n\n with joblib.parallel_backend(\"loky\"):\n experiment = workon(\n foo, space={\"x\": \"uniform(0, 10)\"}, max_trials=5, name=\"voici\"\n )\n\n assert experiment.name == \"voici\"\n assert len(experiment.fetch_trials()) == 5\n\n with joblib.parallel_backend(\"loky\", n_jobs=-1):\n experiment = workon(\n foo, space={\"x\": \"uniform(0, 10)\"}, max_trials=3, name=\"voici\"\n )\n\n assert experiment.name == \"voici\"\n assert len(experiment.fetch_trials()) == 3", "def work(i, kfolds, alphas):\n\t# load data\n\tTheta = np.loadtxt('Regression_Data/Theta.txt')\n\tdadt = np.loadtxt('Regression_Data/a_dot.txt')\n\tnsamples, nfeatures = Theta.shape\n\tnn = dadt.shape[1]\n \n\t# average mean square error across the folds\n\tMSE_mean = np.zeros(len(alphas))\n\tMSE_std = np.zeros(len(alphas))\n\tMSE_full = np.zeros(len(alphas))\n\tMSE_full_rel = np.zeros(len(alphas))\n\n\t# number of nonzero coefficients\n\tnnz = np.zeros(len(alphas))\n\tcomm = MPI.COMM_WORLD\n\t# coefficients\n\tcoeffs = np.zeros((len(alphas), nfeatures))\n\n\tfor j, alpha in enumerate(alphas):\n\t\tmodel = linear_model.LassoCV(cv=kfolds,\n\t\t\t\t\t\talphas=[alpha],\n\t\t\t\t\t\tfit_intercept=False,\n\t\t\t\t\t\tmax_iter=3000,\n\t\t\t\t\t\ttol=1e-4).fit(Theta, dadt[:, i])\n \n\t\n\t\tprint('Worker %d :: doing alpha=%.2e :: completed %.2f %%\\n' % (comm.Get_rank(), model.alpha_, 100.0*float(j+1)/len(alphas)))\n\n\t\tsys.stdout.flush()\n\t\t# apparently this mse_path is already taking into\n\t\t# account the whole dataset, so we do not need to multiply by kfolds\n\t\tcoeffs[j] = model.coef_\n\t\tMSE_mean[j] = np.sqrt(nsamples*np.mean(model.mse_path_))\n\t\tMSE_std[j] = np.sqrt(np.std(nsamples*model.mse_path_))\n\n\t\t#MSE_full_rel[j] = np.mean(((np.dot(Theta, model.coef_) - dadt[:, i])**2)/np.linalg.norm(dadt[:, i])**2)\n\t\tMSE_full_rel[j] = np.mean(np.linalg.norm(np.dot(Theta, model.coef_) - dadt[:, i])/np.linalg.norm(dadt[:, i]))\t\t\n\t\t\n\t\t#MSE_full[j] = np.mean((np.dot(Theta, model.coef_) - dadt[:, i])**2)\t\t\n\t\tMSE_full[j] = np.mean(np.linalg.norm(np.dot(Theta, model.coef_) - dadt[:, i]))\n\t\t\n\t\tnnz[j] = np.count_nonzero(model.coef_)\n\n\t\t# save data\n\t\ttry:\n\t\t\t#shutil.rmtree('Regression_Results')\n\t\t\tos.mkdir('Regression_Results')\n\t\texcept OSError:\n\t\t\tpass\n\n\t\t\n\t\tnp.savetxt('Regression_Results/MSE_mean_%03d' % i, MSE_mean,delimiter=' ')\n\t\tnp.savetxt('Regression_Results/MSE_std_%03d' % i, MSE_std,delimiter=' ')\n\t\tnp.savetxt('Regression_Results/MSE_full_%03d' % i, MSE_full,delimiter= ' ')\n\t\tnp.savetxt('Regression_Results/MSE_full_rel_%03d' % i, MSE_full_rel,delimiter= ' ')\n\t\tnp.savetxt('Regression_Results/coeffs_%03d' % i, coeffs,delimiter = ' ')\n\t\tnp.savetxt('Regression_Results/nnz_%03d' % i, nnz,delimiter = ' ')\n\n\t\tprint('Done i = %03d\\n' % i)\n\treturn True", "def IB(px,py,pyx_c,maxbeta=5,numbeta=30,iterations=100,restarts=3,parallel = False):\n pm_size = px.size\n bs = np.linspace(0.01,maxbeta,numbeta) #value of beta\n if parallel != False:\n pool = mp.Pool(processes=parallel)\n results = [pool.apply_async(beta_iter,args=(b,px,py,pyx_c,pm_size,restarts,iterations,)) for b in bs]\n pool.close()\n results = [p.get() for p in results]\n ips = [x[0] for x in results]\n ifs = [x[1] for x in results]\n #Values of beta may not be sorted appropriately, code below sorts ipast and ifuture according to their corresponding value of beta, and in correct order\n b_s = [x[2] for x in results] \n ips = [x for _, x in sorted(zip(b_s,ips))]\n ifs = [x for _, x in sorted(zip(b_s,ifs))]\n elif parallel == False:\n\t ips = np.zeros(bs.size)\n\t ifs = np.zeros(bs.size)\n\t for bi in range(bs.size):\n\t\t candidates = []\n\t\t for r in range(restarts):\n\t\t\t # initialize distribution for bottleneck variable\n\t\t\t pm = np.random.rand(pm_size)+1\n\t\t\t pm /= pm.sum()\n\t\t\t pym_c = np.random.rand(py.size,pm.size)+1 # Starting point for the algorithm\n\t\t\t pym_c /= pym_c.sum(axis=0)\n\t\t\t\t# iterate the BA algorithm\n\t\t\t for i in range(iterations):\n\t\t\t\t pmx_c, z = p_mx_c(pm,px,py,pyx_c,pym_c,bs[bi])\n\t\t\t\t pm = p_m(pmx_c,px)\n\t\t\t\t pym_c = p_ym_c(pm,px,py,pyx_c,pmx_c)\n\t\t\t\t if i>0 and np.allclose(pmx_c,pmx_c_old,rtol=1e-3,atol=1e-3):\n\t\t\t\t\t\t# if the x->m mapping is not updating any more, we're at convergence and we can stop\n\t\t\t\t\t break\n\t\t\t\t pmx_c_old = pmx_c\n\t\t\t candidates.append({'past_info' : mi_x1x2_c(pm, px, pmx_c),\n\t\t\t\t\t\t\t\t 'future_info' : mi_x1x2_c(py, pm, pym_c),\n\t\t\t\t\t\t\t\t 'functional' : -np.log2(np.inner(z,px))})\n\t\t\t# among the restarts, select the result that gives the minimum\n\t\t\t# value for the functional we're actually minimizing (eq 29 in\n\t\t\t# Tishby et al 2000).\n\t\t selected_candidate = min(candidates, key=lambda c: c['functional'])\n\t\t ips[bi] = selected_candidate['past_info']\n\t\t ifs[bi] = selected_candidate['future_info']\n # restrict the returned values to those that, at each value of\n # beta, actually increase (for Ipast) and do not decrease (for\n # Ifuture) the information with respect to the previous value of\n # beta. This is to avoid confounds from cases where the AB\n # algorithm gets stuck in a local minimum.\n ub, bs = compute_upper_bound(ips, ifs, bs)\n ips = np.squeeze(ub[:,0])\n ifs = np.squeeze(ub[:,1])\n return ips, ifs, bs", "def run_with_time(self, runtime=1, n_iterations=float(\"inf\"), min_n_workers=1, iteration_kwargs = {},):\n\n self.wait_for_workers(min_n_workers)\n\n iteration_kwargs.update({'result_logger': self.result_logger})\n\n if self.time_ref is None:\n self.time_ref = time.time()\n self.config['time_ref'] = self.time_ref\n \n self.logger.info('HBMASTER: starting run at %s'%(str(self.time_ref)))\n\n self.thread_cond.acquire()\n\n start_time = time.time()\n\n while True:\n\n self._queue_wait()\n\n # Check if timelimit is reached\n if (runtime < time.time() - start_time):\n self.logger.info('HBMASTER: Timelimit reached: wait for remaining %i jobs'%self.num_running_jobs)\n break\n \n next_run = None\n # find a new run to schedule\n for i in self.active_iterations():\n next_run = self.iterations[i].get_next_run()\n if not next_run is None: break\n\n if next_run is not None:\n self.logger.debug('HBMASTER: schedule new run for iteration %i'%i)\n self._submit_job(*next_run)\n continue\n elif n_iterations > 0:\n next_HPB_iter = len(self.iterations) + (self.iterations[0].HPB_iter if len(self.iterations) > 0 else 0)\n self.iterations.append(self.get_next_iteration(next_HPB_iter, iteration_kwargs))\n n_iterations -= 1\n continue\n\n # at this point there is no imediate run that can be scheduled,\n # so wait for some job to finish if there are active iterations\n if self.active_iterations():\n self.thread_cond.wait()\n else:\n break\n\n # clean up / cancel remaining iteration runs\n next_run = True\n n_canceled = 0\n while next_run is not None:\n next_run = None\n for i in self.active_iterations():\n next_run = self.iterations[i].get_next_run()\n if not next_run is None: \n config_id, config, budget = next_run\n job = Job(config_id, config=config, budget=budget, working_directory=self.working_directory)\n self.iterations[job.id[0]].register_result(job) # register dummy job - will be interpreted as canceled job\n n_canceled += 1\n break\n\n self.logger.debug('HBMASTER: Canceled %i remaining runs'%n_canceled)\n\n # wait for remaining jobs\n while self.num_running_jobs > 0:\n self.thread_cond.wait(60)\n self.logger.debug('HBMASTER: Job finished: wait for remaining %i jobs'%self.num_running_jobs)\n\n self.thread_cond.release()\n \n for i in self.warmstart_iteration:\n i.fix_timestamps(self.time_ref)\n \n ws_data = [i.data for i in self.warmstart_iteration]\n \n return Result([copy.deepcopy(i.data) for i in self.iterations] + ws_data, self.config)", "def measure_mp_speedup():\n modes = [\n # name, function\n ('dSMC', ana.d_smc),\n ('dAMC', ana.d_amc),\n ('EDF-VD', ana.d_edf_vd),\n ('pSMC', ana.p_smc),\n ('pAMC-BB', ana.p_amc_bb),\n ('pAMC-BB+', ft.partial(ana.p_amc_bb, ignore_hi_mode=True))\n ]\n times_seq = {}\n task_sets_list = pickle.load(open(task_sets_path + 'task_sets_fairgen', 'rb'))\n start_total_seq = time()\n for name, func in modes:\n start_mode_seq = time()\n rates = []\n for task_sets in task_sets_list:\n results = []\n for task_set in task_sets:\n results.append(func(task_set))\n rates.append(100 * np.average(results))\n stop_mode_seq = time()\n times_seq[name] = stop_mode_seq - start_mode_seq\n stop_total_seq = time()\n times_seq['Overall'] = stop_total_seq - start_total_seq\n\n times_par = {}\n start_total_par = time()\n pool = mp.Pool()\n for name, func in modes:\n start_mode_par = time()\n rates = []\n for task_sets in task_sets_list:\n rates.append(100 * np.average(pool.map(func, task_sets)))\n stop_mode_par = time()\n times_par[name] = stop_mode_par - start_mode_par\n stop_total_par = time()\n times_par['Overall'] = stop_total_par - start_total_par\n\n speedups = {}\n for name, _ in modes:\n speedups[name] = times_seq[name] / times_par[name]\n speedups['Overall'] = times_seq['Overall'] / times_par['Overall']\n\n print(\"PERFORMANCE MEASUREMENTS\")\n print(\"Number of cores: %d\" % mp.cpu_count())\n print(\"Scheme: Sequential time / Parallel time / Speedup\")\n for name, _ in modes:\n print(\"%s: %.3fs / %.3fs / %.3f\" % (name, times_seq[name], times_par[name], speedups[name]))\n print(\"Overall: %.3fs / %.3fs / %.3f\" % (times_seq['Overall'], times_par['Overall'], speedups['Overall']))", "def run_simulation(n, experiments, iterations, budget, recovery_count, performance_factor, current_top,\r\n precomputed=True, dataset=None):\r\n T = 0\r\n k = 5\r\n delta = 0.1\r\n range_m = 1*np.arange(1, budget+1)\r\n scores, true_top = init(n, precomputed=precomputed, dataset=dataset)\r\n true_ranks = get_ranks(scores)\r\n\r\n for exp in tqdm.tqdm(range(experiments), desc=\"experiments\"):\r\n for itr in tqdm.tqdm(range(iterations), desc=\"iterations\"):\r\n t = 1\r\n count = 0\r\n A = np.arange(n)\r\n P = np.zeros((n, n))\r\n S = np.zeros(n)\r\n r_count = 0\r\n data = []\r\n for b in range(budget):\r\n m = range_m[b]\r\n\r\n est, t, A, P, S, count, r_count, data = sparse_borda(n, t, T, k, A, P, S, delta, scores, m*n, count, r_count, data)\r\n\r\n ranking, ranks, top = get_ranking(n, est)\r\n if(top == true_top):\r\n recovery_count[b][exp] += 1\r\n performance_factor[b][exp] += ranks[true_top]\r\n current_top[b][exp] += true_ranks[top]\r\n\r\n current_top /= iterations\r\n performance_factor /= iterations\r\n\r\n return ranking, ranks, data, scores, true_top, est, recovery_count, performance_factor, current_top", "def compute(args, fun, max_workers=6):\n print(\"\\nProcessing symbols in parallel\")\n ex = futures.ThreadPoolExecutor(max_workers=max_workers)\n ex.map(fun, args)", "def tune(runner, kernel_options, device_options, tuning_options):\n\n #Bayesian Optimization strategy seems to need some hyper parameter tuning to\n #become better than random sampling for auto-tuning GPU kernels.\n\n #alpha, normalize_y, and n_restarts_optimizer are options to\n #https://scikit-learn.org/stable/modules/generated/sklearn.gaussian_process.GaussianProcessRegressor.html\n #defaults used by Baysian Optimization are:\n # alpha=1e-6, #1e-3 recommended for very noisy or discrete search spaces\n # n_restarts_optimizer=5,\n # normalize_y=True,\n\n #several exploration friendly settings are: (default is acq=\"ucb\", kappa=2.576)\n # acq=\"poi\", xi=1e-1\n # acq=\"ei\", xi=1e-1\n # acq=\"ucb\", kappa=10\n\n if not bayes_opt_present:\n raise ImportError(\"Error: optional dependency Bayesian Optimization not installed\")\n\n #defaults as used by Bayesian Optimization Python package\n acq = tuning_options.strategy_options.get(\"method\", \"poi\")\n kappa = tuning_options.strategy_options.get(\"kappa\", 2.576)\n xi = tuning_options.strategy_options.get(\"xi\", 0.0)\n init_points = tuning_options.strategy_options.get(\"popsize\", 5)\n n_iter = tuning_options.strategy_options.get(\"maxiter\", 25)\n\n tuning_options[\"scaling\"] = True\n\n results = []\n\n #function to pass to the optimizer\n def func(**kwargs):\n args = [kwargs[key] for key in tuning_options.tune_params.keys()]\n return -1.0 * minimize._cost_func(args, kernel_options, tuning_options, runner, results)\n\n bounds, _, _ = minimize.get_bounds_x0_eps(tuning_options)\n pbounds = OrderedDict(zip(tuning_options.tune_params.keys(),bounds))\n\n verbose=0\n if tuning_options.verbose:\n verbose=2\n\n optimizer = BayesianOptimization(f=func, pbounds=pbounds, verbose=verbose)\n\n optimizer.maximize(init_points=init_points, n_iter=n_iter, acq=acq, kappa=kappa, xi=xi)\n\n if tuning_options.verbose:\n print(optimizer.max)\n\n return results, runner.dev.get_environment()", "def parallel_irs(i,mu_grid,l_tmp,max_runs,s_ray,theta_boundaries,start_time,state):\n beta_boundary = state['beta_boundary']\n beta_res = state['beta_res']\n m = state['m']\n zeta = state['zeta']\n # Drawing images locations theta\n theta = random_image_draw(l_tmp, theta_boundaries[0], theta_boundaries[1])\n # Calculating locations of sources and corresponding magnitudes\n beta = af.img2src(theta, m, zeta, state)\n # Binning sources magnification\n beta_grid_h, beta_grid_v, mu_grid_temp = af.mag_binning(beta, s_ray, beta_boundary, beta_res)\n mu_grid += mu_grid_temp\n temp_t = time.time() - start_time\n if i % (max(1, int(max_runs / 4000))) == 0:\n print('Finished ' + str(round((i + 1) * 100 / max_runs, 5)) + '% in ' + str(round(temp_t)) +\n 's; ~' + str(round(temp_t * (max_runs / (i + 1) - 1))) + 's remaining')\n return 0", "def run(\n self,\n number_of_clusters=None,\n max_K=8,\n method_clustering=\"pam\",\n init_clustering=\"random\",\n max_iter_clustering=100,\n discart_value_JI=0.6,\n bootstraps_JI=100,\n bootstraps_p_value=100,\n n_jobs=1,\n verbose=1,\n ):\n\n if number_of_clusters is None:\n self.k = optimizer.optimizeK(\n self.distance_matrix,\n self.y.to_numpy(),\n self.model_type,\n max_K,\n method_clustering,\n init_clustering,\n max_iter_clustering,\n discart_value_JI,\n bootstraps_JI,\n self.random_state,\n n_jobs,\n verbose,\n )\n\n if self.k == 1:\n warnings.warn(\"No stable clusters were found!\")\n return\n\n print(f\"Optimal number of cluster is: {self.k}\")\n\n else:\n self.k = number_of_clusters\n print(f\"Use {self.k} as number of cluster\")\n\n self.cluster_labels = (\n kmedoids.KMedoids(\n n_clusters=self.k,\n method=method_clustering,\n init=init_clustering,\n metric=\"precomputed\",\n max_iter=max_iter_clustering,\n random_state=self.random_state,\n )\n .fit(self.distance_matrix)\n .labels_\n )\n\n (\n self._data_clustering_ranked,\n self.p_value_of_features,\n ) = stats.calculate_global_feature_importance(\n self.X, self.y, self.cluster_labels, self.model_type\n )\n self._p_value_of_features_per_cluster = (\n stats.calculate_local_feature_importance(\n self._data_clustering_ranked, bootstraps_p_value\n )\n )", "def test_optimalk(parallel_backend, n_jobs, n_clusters):\n import numpy as np\n from sklearn.datasets.samples_generator import make_blobs\n from gap_statistic import OptimalK\n\n # Create data\n X, y = make_blobs(n_samples=int(1e3), n_features=2, centers=3)\n\n for algo in ['kmeans', 'kmeans2', 'skl-kmeans', 'sph-kmeans']:\n # Create optimalK instance\n optimalK = OptimalK(parallel_backend=parallel_backend, n_jobs=n_jobs, algo=algo)\n\n suggested_clusters = optimalK(X, n_refs=3, cluster_array=np.arange(1, 10))\n\n assert np.allclose(suggested_clusters, n_clusters, 2), \\\n ('Correct clusters is {}, OptimalK suggested {}'.format(n_clusters, suggested_clusters))", "def block_strategize(\n upper_limit: \"compute up to this dimension (inclusive)\",\n lower_limit: \"\"\"compute starting at this dimension,\n if ``None`` lowest unknown dimension is chosen.\"\"\" = None,\n c: \"overshoot parameter\" = 0.25,\n strategies_and_costs: \"previously computed strategies and costs to extend\" = None,\n lattice_type: \"one of 'qary' or 'qary-lv'\" = \"qary\",\n dump_filename: \"\"\"results are regularly written to this filename, if ``None``\n then ``data/fplll-block-simulations-{lattice_type}.sobj`` is used.\"\"\" = None,\n ncores: \"number of cores to use in parallel\" = 4,\n gh_factor: \"set target_norm^2 to gh_factor * gh^2\" = 1.00,\n rb: \"compute pruning parameters for `GH^(i/rb)` for `i in -rb, …, rb`\" = 1,\n greedy: \"use Greedy pruning strategy\" = False,\n sd: \"use self-dual strategy\" = False,\n preproc_loops: \"number of preprocessing tours\" = 2,\n ignore_preproc_cost: \"assume all preprocessing has the cost of LLL regardless of block size\" = False,\n):\n\n dump_filename, strategies, costs, lower_limit = _prepare_parameters(\n dump_filename,\n c,\n strategies_and_costs,\n lower_limit,\n lattice_type,\n preproc_loops,\n greedy,\n sd,\n ignore_preproc_cost,\n )\n\n if ncores > 1:\n workers = Pool(ncores)\n\n from cost import sample_r, _pruner_precision\n\n for d in range(lower_limit, upper_limit + 1):\n D = int((1 + c) * d + 1)\n r = sample_r(D, lattice_type=lattice_type)\n\n float_type = _pruner_precision(d, greedy)\n\n try:\n start = max(strategies[d - 1].preprocessing_block_sizes[-1], 2)\n except IndexError:\n start = 2\n\n if d < 60:\n stop = d\n else:\n stop = min(start + max(8, ncores), d)\n\n best = None\n\n for giant_step in range(start, stop, ncores):\n jobs, results = [], []\n for baby_step in range(giant_step, min(stop, giant_step + ncores)):\n opts = {\n \"greedy\": greedy,\n \"sd\": sd,\n \"gh_factor\": gh_factor,\n \"float_type\": float_type,\n \"radius_bound\": rb,\n \"preproc_loops\": preproc_loops,\n \"ignore_preproc_cost\": ignore_preproc_cost,\n }\n jobs.append((r, d, c, baby_step, strategies, costs, opts))\n\n if ncores == 1:\n for job in jobs:\n results.append(cost_kernel(job))\n else:\n results = workers.map(cost_kernel, jobs)\n\n do_break = False\n for cost, strategy in results:\n logging.debug(\n \"%3d :: C: %5.1f, P: %5.1f c: %.2f, %s\"\n % (d, log(cost[\"total cost\"], 2), log(cost[\"preprocessing\"], 2), cost[\"c\"], strategy)\n )\n if best is None or cost[\"total cost\"] < best[0][\"total cost\"]:\n best = cost, strategy\n if cost[\"total cost\"] > 1.1 * best[0][\"total cost\"]:\n do_break = True\n break\n if do_break:\n break\n\n costs.append(best[0])\n strategies.append(best[1])\n logging.info(\n \"%3d :: C: %5.1f, P: %5.1f c: %.2f, %s\"\n % (d, log(costs[-1][\"total cost\"], 2), log(costs[-1][\"preprocessing\"], 2), costs[-1][\"c\"], strategies[-1])\n )\n pickle.dump((strategies, costs), open(dump_filename, \"wb\"))\n dump_strategies_json(dump_filename.replace(\".sobj\", \"-strategies.json\"), strategies)\n\n return strategies, costs", "def run_parallel(heritability, x_start_i, x_stop_i, cluster='usc'):\n\trun_id = 'corr_trait_sim'\n\tjob_id = ' % s_ % d_ % d' % (run_id, x_start_i, x_stop_i)\n\tfile_prefix = env.env['results_dir'] + run_id + '_' + str(x_start_i) + '_' + str(x_stop_i)\n\n\t#Cluster specific parameters\t\n\tif cluster == 'gmi': #GMI cluster.\n\t\tshstr = '#!/bin/sh\\n'\n\t\tshstr += '#$ -N %s\\n' % job_id\n\t\tshstr += \"#$ -q q.norm@blade*\\n\"\n\t\tshstr += '#$ -o %s.log\\n' % job_id\n\t\t#shstr += '#$ -cwd /home/GMI/$HOME\\n'\n\t\t#shstr += '#$ -M bjarni.vilhjalmsson@gmi.oeaw.ac.at\\n\\n'\n\n\telif cluster == 'usc': #USC cluster.\n\t\tshstr = \"#!/bin/csh\\n\"\n\t\tshstr += \"#PBS -l walltime=%s \\n\" % '72:00:00'\n\t\tshstr += \"#PBS -l mem=%s \\n\" % '1950mb'\n\t\tshstr += \"#PBS -q cmb\\n\"\n\t\tshstr += \"#PBS -N p%s \\n\" % job_id\n\n\tshstr += \"(python %scorr_trait_sim.py %s %d %d \" % (env.env['script_dir'], heritability, x_start_i, x_stop_i)\n\n\tshstr += \"> \" + file_prefix + \"_job.out) >& \" + file_prefix + \"_job.err\\n\"\n\tprint '\\n', shstr, '\\n'\n\tscript_file_name = run_id + \".sh\"\n\tf = open(script_file_name, 'w')\n\tf.write(shstr)\n\tf.close()\n\n\t#Execute qsub script\n\tos.system(\"qsub \" + script_file_name)", "def fast():\n # Need a minimum of 10 total_timesteps for adversarial training code to pass\n # \"any update happened\" assertion inside training loop.\n total_timesteps = 10\n n_expert_demos = 1\n n_episodes_eval = 1\n algorithm_kwargs = dict(\n shared=dict(\n demo_batch_size=1,\n n_disc_updates_per_round=4,\n ),\n )\n gen_batch_size = 2\n parallel = False # easier to debug with everything in one process\n max_episode_steps = 5\n # SB3 RL seems to need batch size of 2, otherwise it runs into numeric\n # issues when computing multinomial distribution during predict()\n num_vec = 2\n init_rl_kwargs = dict(batch_size=2)", "def make_parallel(self, n):\n return super().make_parallel(n, True)", "def grid_evaluation(param_list_one, param_list_two, param_eval, n_trials=16, \n aggr_method=np.mean, save_dir='data/', file_name='grid evaluation',\n save_to_disk=True, save_each=1000, chunksize=1.):\n \n \n if not list(param_list_two): # If `param_list_two` is empty\n params = param_list_one\n grid_shape = (len(param_list_one),)\n is_really_grid = False\n \n else:\n params = list(itertools.product(param_list_one, param_list_two))\n grid_shape = (len(param_list_one), len(param_list_two))\n is_really_grid = True\n \n def grid_fun(point): # Function to compute for each grid point\n \n trial_out = np.nan * np.ones((n_trials,))\n \n for i in np.arange(n_trials):\n \n if is_really_grid:\n trial_out[i] = param_eval(point[0], point[1])\n else: # If `param_list_two` is empty\n trial_out[i] = param_eval(point)\n \n return aggr_method(trial_out)\n \n n_grid_pts = len(params)\n \n # Recording procedure\n def record_experiment(grid):\n now = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n save_path = save_dir + now + ' ' + file_name + '.pkl'\n experiment = {\n 'date': now,\n 'rows': param_list_one,\n 'cols': param_list_two,\n 'n_trials': n_trials,\n 'grid': np.reshape(grid, grid_shape),\n 'path': save_path\n }\n if save_to_disk:\n utils.save_obj(experiment, save_path)\n return experiment\n \n # Set a pool of workers\n nb_workers = min(mp.cpu_count(), 24)\n print('Working with {} processes.'.format(nb_workers))\n pool = mp.Pool(nb_workers)\n \n # Iterate `grid_fun` across workers\n it = pool.imap(grid_fun, params, chunksize=chunksize)\n grid = np.nan * np.ones((n_grid_pts,))\n\n for idx, val in enumerate(tqdm(it, total=n_grid_pts)):\n grid[idx] = val\n \n # Make sure that we save after each couple of iterations\n if (idx >= save_each) and (idx % save_each == 0): \n experiment = record_experiment(grid)\n \n # Close pool\n pool.close()\n pool.join()\n \n experiment = record_experiment(grid)\n \n return experiment", "def optimize_restarts(self, num_restarts=10, robust=False, verbose=True, parallel=False, num_processes=None, **kwargs):\r\n initial_parameters = self._get_params_transformed()\r\n\r\n if parallel:\r\n try:\r\n jobs = []\r\n pool = mp.Pool(processes=num_processes)\r\n for i in range(num_restarts):\r\n self.randomize()\r\n job = pool.apply_async(opt_wrapper, args=(self,), kwds=kwargs)\r\n jobs.append(job)\r\n\r\n pool.close() # signal that no more data coming in\r\n pool.join() # wait for all the tasks to complete\r\n except KeyboardInterrupt:\r\n print \"Ctrl+c received, terminating and joining pool.\"\r\n pool.terminate()\r\n pool.join()\r\n\r\n for i in range(num_restarts):\r\n try:\r\n if not parallel:\r\n self.randomize()\r\n self.optimize(**kwargs)\r\n else:\r\n self.optimization_runs.append(jobs[i].get())\r\n\r\n if verbose:\r\n print(\"Optimization restart {0}/{1}, f = {2}\".format(i + 1, num_restarts, self.optimization_runs[-1].f_opt))\r\n except Exception as e:\r\n if robust:\r\n print(\"Warning - optimization restart {0}/{1} failed\".format(i + 1, num_restarts))\r\n else:\r\n raise e\r\n\r\n if len(self.optimization_runs):\r\n i = np.argmin([o.f_opt for o in self.optimization_runs])\r\n self._set_params_transformed(self.optimization_runs[i].x_opt)\r\n else:\r\n self._set_params_transformed(initial_parameters)", "def run(self,kRange=None,sigmaRange=None,chunks=None):\n\n ## run spectral clustering parameter search\n totalCores = cpu_count()\n totalCores = totalCores - 1\n\n ## specify the ranges\n if not kRange:\n kRange = np.array([int(round(i)) for i in np.linspace(20,500,15)])\n elif type(kRange) == type([]):\n kRange = np.array(kRange)\n\n ## different sigma ranges are appropriate for different GO aspects\n if sigmaRange:\n pass\n elif self.aspect == 'biological_process':\n sigmaRange = np.linspace(0.01,1.0,15)\n elif self.aspect == 'molecular_function':\n sigmaRange = np.linspace(1.0,2.0,15)\n elif self.aspect == 'cellular_component':\n sigmaRange = np.linspace(0.05,1.0,15)\n else:\n raise Exception(\"invalid aspect provided\")\n\n ## prepare outfiles\n outFid1 = open(self.resultsPath1,'wa')\n self.writer1 = csv.writer(outFid1)\n header1 = ['k','sigma','silvalue']\n self.writer1.writerow(header1)\n \n outFid2 = open(self.resultsPath2,'wa')\n self.writer2 = csv.writer(outFid2)\n header2 = ['k','sigma']+range(kRange.max())\n self.writer2.writerow(header2)\n\n ## limit each iteration to keep memory usage down \n if chunks:\n pass\n else:\n chunks = int(round((np.log(self.M.shape[0]))))\n print(\"chunks = %s\"%chunks)\n\n toRun = []\n for k in kRange:\n toRun += [(k,sigma,self.distancePath,self.dtype) for sigma in sigmaRange]\n\n stopPoints = np.arange(0,len(toRun),chunks)\n if stopPoints[-1] < len(toRun):\n stopPoints = np.hstack([stopPoints[1:],np.array([len(toRun)])])\n\n begin = 0\n\n if chunks == 1:\n self._run_sc(toRun)\n else:\n for i,chunk in enumerate(range(stopPoints.size)):\n stop = stopPoints[chunk]\n print('...running %s-%s/%s'%(begin,stop,len(toRun)))\n self.run_sc(toRun,begin,stop)\n begin = stop\n\n print(\"complete.\")\n outFid1.close()\n outFid2.close()", "def test_batch_execute_parallel(mock_run_batch):\n mock_run_batch.return_value = TASK_BATCH\n dev = _aws_device(wires=4, foo=\"bar\", parallel=True)\n assert dev.parallel is True\n\n with QuantumTape() as circuit:\n qml.Hadamard(wires=0)\n qml.CNOT(wires=[0, 1])\n qml.probs(wires=[0])\n qml.expval(qml.PauliX(1))\n qml.var(qml.PauliY(2))\n qml.sample(qml.PauliZ(3))\n\n circuits = [circuit, circuit]\n batch_results = dev.batch_execute(circuits)\n for results in batch_results:\n assert np.allclose(\n results[0], RESULT.get_value_by_result_type(result_types.Probability(target=[0]))\n )\n assert np.allclose(\n results[1],\n RESULT.get_value_by_result_type(\n result_types.Expectation(observable=Observable.X(), target=1)\n ),\n )\n assert np.allclose(\n results[2],\n RESULT.get_value_by_result_type(\n result_types.Variance(observable=Observable.Y(), target=2)\n ),\n )\n assert np.allclose(\n results[3],\n RESULT.get_value_by_result_type(\n result_types.Sample(observable=Observable.Z(), target=3)\n ),\n )\n\n mock_run_batch.assert_called_with(\n [CIRCUIT, CIRCUIT],\n s3_destination_folder=(\"foo\", \"bar\"),\n shots=SHOTS,\n max_parallel=None,\n max_connections=AwsQuantumTaskBatch.MAX_CONNECTIONS_DEFAULT,\n poll_timeout_seconds=AwsQuantumTask.DEFAULT_RESULTS_POLL_TIMEOUT,\n poll_interval_seconds=AwsQuantumTask.DEFAULT_RESULTS_POLL_INTERVAL,\n foo=\"bar\",\n )", "def dopri853core(\n n, func, x, t, hmax, h, rtol, atol, nmax, safe, beta, fac1, fac2, pos_neg, args\n):\n # array to store the result\n result = numpy.zeros((len(t), n))\n\n # initial preparations\n facold = 1.0e-4\n expo1 = 1.0 / 8.0 - beta * 0.2\n facc1 = 1.0 / fac1\n facc2 = 1.0 / fac2\n\n k1 = numpy.array(func(x, t[0], *args))\n hmax = numpy.fabs(hmax)\n iord = 8\n\n if h == 0.0: # estimate initial time step\n h, k1, k2, k3 = hinit(func, x, t, pos_neg, k1, iord, hmax, rtol, atol, args)\n\n reject = 0\n t_current = t[\n 0\n ] # store current integration time internally (not the current time wanted by user!!)\n t_old = t[0]\n finished_user_t_ii = 0 # times indices wanted by user\n\n result[0, :] = x\n\n # basic integration step\n while (\n finished_user_t_ii < len(t) - 1\n ): # check if the current computed time indices less than total inices needed\n # keep time step not too small\n h = pos_neg * numpy.max([numpy.fabs(h), 1e3 * uround])\n\n # the twelve stages\n xx1 = x + h * a21 * k1\n k2 = numpy.array(func(xx1, t_current + c2 * h, *args))\n\n xx1 = x + h * (a31 * k1 + a32 * k2)\n k3 = numpy.array(func(xx1, t_current + c3 * h, *args))\n\n xx1 = x + h * (a41 * k1 + a43 * k3)\n k4 = numpy.array(func(xx1, t_current + c4 * h, *args))\n\n xx1 = x + h * (a51 * k1 + a53 * k3 + a54 * k4)\n k5 = numpy.array(func(xx1, t_current + c5 * h, *args))\n\n xx1 = x + h * (a61 * k1 + a64 * k4 + a65 * k5)\n k6 = numpy.array(func(xx1, t_current + c6 * h, *args))\n\n xx1 = x + h * (a71 * k1 + a74 * k4 + a75 * k5 + a76 * k6)\n k7 = numpy.array(func(xx1, t_current + c7 * h, *args))\n\n xx1 = x + h * (a81 * k1 + a84 * k4 + a85 * k5 + a86 * k6 + a87 * k7)\n k8 = numpy.array(func(xx1, t_current + c8 * h, *args))\n\n xx1 = x + h * (a91 * k1 + a94 * k4 + a95 * k5 + a96 * k6 + a97 * k7 + a98 * k8)\n k9 = numpy.array(func(xx1, t_current + c9 * h, *args))\n\n xx1 = x + h * (\n a101 * k1\n + a104 * k4\n + a105 * k5\n + a106 * k6\n + a107 * k7\n + a108 * k8\n + a109 * k9\n )\n k10 = numpy.array(func(xx1, t_current + c10 * h, *args))\n\n xx1 = x + h * (\n a111 * k1\n + a114 * k4\n + a115 * k5\n + a116 * k6\n + a117 * k7\n + a118 * k8\n + a119 * k9\n + a1110 * k10\n )\n k2 = numpy.array(func(xx1, t_current + c11 * h, *args))\n\n xx1 = x + h * (\n a121 * k1\n + a124 * k4\n + a125 * k5\n + a126 * k6\n + a127 * k7\n + a128 * k8\n + a129 * k9\n + a1210 * k10\n + a1211 * k2\n )\n\n t_old_older = numpy.copy(t_old)\n t_old = numpy.copy(t_current)\n t_current += h\n\n k3 = numpy.array(func(xx1, t_current, *args))\n\n k4 = (\n b1 * k1\n + b6 * k6\n + b7 * k7\n + b8 * k8\n + b9 * k9\n + b10 * k10\n + b11 * k2\n + b12 * k3\n )\n k5 = x + h * k4\n\n # error estimation\n sk = atol + rtol * numpy.max([numpy.fabs(x), numpy.fabs(k5)], axis=0)\n erri = k4 - bhh1 * k1 - bhh2 * k9 - bhh3 * k3\n err2 = numpy.sum(numpy.square(erri / sk), axis=0)\n erri = (\n er1 * k1\n + er6 * k6\n + er7 * k7\n + er8 * k8\n + er9 * k9\n + er10 * k10\n + er11 * k2\n + er12 * k3\n )\n err = numpy.sum(numpy.square(erri / sk), axis=0)\n\n deno = err + 0.01 * err2\n deno = 1.0 if deno <= 0.0 else deno\n err = numpy.fabs(h) * err * numpy.sqrt(1.0 / (deno * n))\n\n # computation of hnew\n fac11 = numpy.power(err, expo1)\n\n # Lund-stabilization\n fac = fac11 / pow(facold, beta)\n\n # we require fac1 <= hnew / h <= fac2\n fac = numpy.max([facc2, numpy.min([facc1, fac / safe])])\n hnew = h / fac\n\n if err <= 1.0:\n # step accepted\n facold = numpy.max([err, 1.0e-4])\n k4 = numpy.array(func(k5, t_current, *args))\n\n # final preparation for dense output\n rcont1 = numpy.copy(x)\n xdiff = k5 - x\n rcont2 = xdiff\n bspl = h * k1 - xdiff\n rcont3 = numpy.copy(bspl)\n rcont4 = xdiff - h * k4 - bspl\n rcont5 = (\n d41 * k1\n + d46 * k6\n + d47 * k7\n + d48 * k8\n + d49 * k9\n + d410 * k10\n + d411 * k2\n + d412 * k3\n )\n rcont6 = (\n d51 * k1\n + d56 * k6\n + d57 * k7\n + d58 * k8\n + d59 * k9\n + d510 * k10\n + d511 * k2\n + d512 * k3\n )\n rcont7 = (\n d61 * k1\n + d66 * k6\n + d67 * k7\n + d68 * k8\n + d69 * k9\n + d610 * k10\n + d611 * k2\n + d612 * k3\n )\n rcont8 = (\n d71 * k1\n + d76 * k6\n + d77 * k7\n + d78 * k8\n + d79 * k9\n + d710 * k10\n + d711 * k2\n + d712 * k3\n )\n\n # the next three function evaluations\n xx1 = x + h * (\n a141 * k1\n + a147 * k7\n + a148 * k8\n + a149 * k9\n + a1410 * k10\n + a1411 * k2\n + a1412 * k3\n + a1413 * k4\n )\n k10 = numpy.array(func(xx1, t_old + c14 * h, *args))\n xx1 = x + h * (\n a151 * k1\n + a156 * k6\n + a157 * k7\n + a158 * k8\n + a1511 * k2\n + a1512 * k3\n + a1513 * k4\n + a1514 * k10\n )\n k2 = numpy.array(func(xx1, t_old + c15 * h, *args))\n xx1 = x + h * (\n a161 * k1\n + a166 * k6\n + a167 * k7\n + a168 * k8\n + a169 * k9\n + a1613 * k4\n + a1614 * k10\n + a1615 * k2\n )\n k3 = numpy.array(func(xx1, t_old + c16 * h, *args))\n\n # final preparation\n rcont5 = h * (rcont5 + d413 * k4 + d414 * k10 + d415 * k2 + d416 * k3)\n rcont6 = h * (rcont6 + d513 * k4 + d514 * k10 + d515 * k2 + d516 * k3)\n rcont7 = h * (rcont7 + d613 * k4 + d614 * k10 + d615 * k2 + d616 * k3)\n rcont8 = h * (rcont8 + d713 * k4 + d714 * k10 + d715 * k2 + d716 * k3)\n\n k1 = numpy.copy(k4)\n x = numpy.copy(k5)\n\n # loop for dense output in this time slot\n while (finished_user_t_ii < len(t) - 1) and (\n pos_neg * t[finished_user_t_ii + 1] < pos_neg * t_current\n ):\n result[finished_user_t_ii + 1, :] = dense_output(\n t[finished_user_t_ii + 1],\n t_old,\n h,\n [rcont1, rcont2, rcont3, rcont4, rcont5, rcont6, rcont7, rcont8],\n )\n finished_user_t_ii += 1\n\n if numpy.fabs(hnew) > hmax:\n hnew = pos_neg * hmax\n if reject:\n hnew = pos_neg * numpy.min([numpy.fabs(hnew), numpy.fabs(h)])\n\n reject = 0\n else:\n # step rejected since error too big\n hnew = h / numpy.min([facc1, fac11 / safe])\n reject = 1\n\n # reverse time increment since error rejected\n t_current = numpy.copy(t_old)\n t_old = numpy.copy(t_old_older)\n\n h = numpy.copy(hnew) # current h\n\n return result", "def run():\n if am_i_root():\n\n print(\"*** initializing...\")\n\n # Print parameters\n print(\"N_DIMS = \" + str(N_DIMS))\n print(\"LAMBDA_OVER_DX = \" + str(LAMBDA_OVER_DX))\n print(\"R_DT = \" + str(R_DT))\n print(\"MU0_POISSON = \" + str(MU0_POISSON))\n print(\"NORM_POISSON = \" + NORM_POISSON)\n print(\"N_GRID = \" + str(N_GRID))\n print(\"N_HITS = \" + str(N_HITS))\n print(\"POLICY = \" + str(POLICY))\n if POLICY == -1:\n print(\"MODEL_PATH = \" + str(MODEL_PATH))\n else:\n print(\"STEPS_AHEAD = \" + str(STEPS_AHEAD))\n print(\"EPSILON = \" + str(EPSILON))\n print(\"STOP_t = \" + str(STOP_t))\n print(\"STOP_p = \" + str(STOP_p))\n print(\"N_PARALLEL = \" + str(N_PARALLEL))\n print(\"WITH_MPI = \" + str(WITH_MPI))\n print(\"ADAPTIVE_N_RUNS = \" + str(ADAPTIVE_N_RUNS))\n print(\"REL_TOL = \" + str(REL_TOL))\n print(\"MAX_N_RUNS = \" + str(MAX_N_RUNS))\n print(\"N_RUNS(input) = \" + str(N_RUNS))\n sys.stdout.flush()\n\n # Perform runs\n if am_i_root():\n print(\"*** generating episodes...\")\n\n N_runs = N_RUNS\n if ADAPTIVE_N_RUNS or WITH_MPI:\n N_runs = int(N_PARALLEL * (np.ceil(N_runs / N_PARALLEL))) # make it multiple of N_PARALLEL\n if am_i_root():\n print(\"N_RUNS(current) = \" + str(N_runs))\n sys.stdout.flush()\n\n N_runso = 0\n\n if WITH_MPI:\n cdf_t_tot_loc = np.zeros(LEN_CDF_T, dtype=float)\n cdf_h_tot_loc = np.zeros(LEN_CDF_H, dtype=float)\n mean_t_loc = np.nan * np.ones(MAX_N_RUNS // N_PARALLEL, dtype=float)\n failed_loc = - np.ones(MAX_N_RUNS // N_PARALLEL, dtype=float)\n else:\n cdf_t_tot = np.zeros(LEN_CDF_T, dtype=float)\n cdf_h_tot = np.zeros(LEN_CDF_H, dtype=float)\n mean_t_episodes = np.nan * np.ones(MAX_N_RUNS, dtype=float)\n failed_episodes = - np.ones(MAX_N_RUNS, dtype=float)\n\n while True:\n if WITH_MPI: # MPI\n if N_runs % N_PARALLEL != 0:\n raise Exception(\"N_runs must be multiple of N_PARALLEL with MPI\")\n COMM.Barrier()\n # Decomposition\n Nepisodes = N_runs // N_PARALLEL\n episode_list = range(N_runso + ME, N_runs, N_PARALLEL)\n # Run episodes and reduce locally\n ind = N_runso // N_PARALLEL\n for episode in episode_list:\n cdf_t, cdf_h, mean_t_loc[ind], failed_loc[ind] = Worker(episode)\n cdf_t_tot_loc += cdf_t\n cdf_h_tot_loc += cdf_h\n ind += 1\n\n # Reduce globally the mean_t and failed\n mean_t_episodes = np.empty([N_runs], dtype=float)\n failed_episodes = np.empty([N_runs], dtype=float)\n COMM.Barrier()\n COMM.Allgather([mean_t_loc[:ind], Nepisodes, MPI.DOUBLE], [mean_t_episodes, Nepisodes, MPI.DOUBLE])\n COMM.Allgather([failed_loc[:ind], Nepisodes, MPI.DOUBLE], [failed_episodes, Nepisodes, MPI.DOUBLE])\n COMM.Barrier()\n elif N_PARALLEL > 1: # multiprocessing\n # Run episodes in parallel\n pool = multiprocessing.Pool(N_PARALLEL)\n result = pool.map(Worker, range(N_runso, N_runs))\n pool.close()\n pool.join()\n # Reduce\n ind = N_runso\n for cdf_t, cdf_h, mean_t, failed in result:\n cdf_t_tot += cdf_t\n cdf_h_tot += cdf_h\n mean_t_episodes[ind] = mean_t\n failed_episodes[ind] = failed\n ind += 1\n elif N_PARALLEL == 1: # sequential\n ind = N_runso\n for episode in range(N_runso, N_runs):\n cdf_t, cdf_h, mean_t, failed = Worker(episode)\n cdf_t_tot += cdf_t\n cdf_h_tot += cdf_h\n mean_t_episodes[ind] = mean_t\n failed_episodes[ind] = failed\n ind += 1\n else:\n raise Exception(\"Problem with N_PARALLEL: must be an int >= 1\")\n\n # estimate of the error\n mean_ep = np.mean(mean_t_episodes[:N_runs])\n sigma_ep = np.std(mean_t_episodes[:N_runs])\n std_error_mean = sigma_ep / np.sqrt(N_runs)\n rel_std_error_mean = std_error_mean / mean_ep\n\n # break clause\n if not ADAPTIVE_N_RUNS:\n break\n else:\n if rel_std_error_mean < REL_TOL:\n break\n elif N_runs >= MAX_N_RUNS:\n break\n else:\n N_runso = N_runs\n N_runs = int(np.ceil(1.05 * (sigma_ep / mean_ep / REL_TOL) ** 2))\n N_runs = min(N_runs, MAX_N_RUNS)\n N_runs = int(N_PARALLEL * (np.ceil(N_runs / N_PARALLEL))) # make it multiple of N_PARALLEL\n if am_i_root():\n print(\"N_RUNS(current) = \" + str(N_runs))\n sys.stdout.flush()\n\n if am_i_root():\n print(\"N_RUNS(performed) = \" + str(N_runs))\n sys.stdout.flush()\n\n # Reduce\n if am_i_root():\n print(\"*** post-processing...\")\n if WITH_MPI:\n # locally\n cdf_t_tot_loc /= N_runs\n cdf_h_tot_loc /= N_runs\n # Reduce globally\n cdf_t_tot = np.empty([LEN_CDF_T], dtype=float)\n cdf_h_tot = np.empty([LEN_CDF_H], dtype=float)\n COMM.Barrier()\n COMM.Allreduce(cdf_t_tot_loc, cdf_t_tot, op=MPI.SUM)\n COMM.Allreduce(cdf_h_tot_loc, cdf_h_tot, op=MPI.SUM)\n COMM.Barrier()\n else:\n cdf_t_tot /= N_runs\n cdf_h_tot /= N_runs\n mean_t_episodes = mean_t_episodes[:N_runs]\n failed_episodes = failed_episodes[:N_runs]\n\n # Further post-processing, save and plot\n if am_i_root():\n\n # from cdf to pdf\n pdf_t_tot = cdf_to_pdf(cdf_t_tot)\n pdf_h_tot = cdf_to_pdf(cdf_h_tot)\n\n # compute stats of number of steps and number of hits\n t_bins = np.arange(BIN_START_T, BIN_END_T, BIN_SIZE_T) + 0.5 * BIN_SIZE_T\n mean_t, sigma_t, skew_t, kurt_t, p_found = stats_from_pdf(t_bins, pdf_t_tot)\n p25_t, p50_t, p75_t, p90_t, p95_t, p99_t, _ = stats_from_cdf(t_bins, cdf_t_tot)\n\n h_bins = np.arange(BIN_START_H, BIN_END_H, BIN_SIZE_H) + 0.5 * BIN_SIZE_H\n mean_h, sigma_h, skew_h, kurt_h, _ = stats_from_pdf(h_bins, pdf_h_tot)\n p25_h, p50_h, p75_h, p90_h, p95_h, p99_h, _ = stats_from_cdf(h_bins, cdf_h_tot)\n\n print(\"probability that the source is never found : %.10f\" % (1.0 - p_found, ))\n print(\"mean number of steps to find the source : %.3f +/- %.3f\" % (mean_t, 1.96 * std_error_mean))\n print(\"number of steps to find the source with 50 %% probability: %.3f\" % p50_t)\n print(\"number of steps to find the source with 99 %% probability: %.3f\" % p99_t)\n nb_failed = np.sum(failed_episodes)\n if np.any(failed_episodes < 0):\n nb_failed = -1\n print(\"problem while recording failures\")\n else:\n print(\"number of failed episodes : %d / %d (%f %%)\"\n % (nb_failed, N_runs, nb_failed / N_runs * 100))\n sys.stdout.flush()\n\n # save all parameters to txt file\n inputs = {\n \"N_DIMS\": N_DIMS,\n \"LAMBDA_OVER_DX\": LAMBDA_OVER_DX,\n \"R_DT\": R_DT,\n \"MU0_POISSON\": MU0_POISSON,\n \"NORM_POISSON\": NORM_POISSON,\n \"N_GRID\": N_GRID,\n \"N_HITS\": N_HITS,\n \"POLICY\": POLICY,\n \"STEPS_AHEAD\": STEPS_AHEAD,\n \"MODEL_PATH\": MODEL_PATH,\n \"STOP_t\": STOP_t,\n \"STOP_p\": STOP_p,\n \"ADAPTIVE_N_RUNS\": ADAPTIVE_N_RUNS,\n \"REL_TOL\": REL_TOL,\n \"MAX_N_RUNS\": MAX_N_RUNS,\n \"N_RUNS_PERFORMED\": N_runs,\n \"BIN_START_T\": BIN_START_T,\n \"BIN_END_T\": BIN_END_T,\n \"BIN_SIZE_T\": BIN_SIZE_T,\n \"BIN_START_H\": BIN_START_H,\n \"BIN_END_H\": BIN_END_H,\n \"BIN_SIZE_H\": BIN_SIZE_H,\n \"EPSILON\": EPSILON,\n }\n param_txt_file = os.path.join(DIR_OUTPUTS, str(RUN_NAME + \"_parameters\" + \".txt\"))\n with open(param_txt_file, 'w') as out:\n for key, val in inputs.items():\n print(key + \" = \" + str(val), file=out)\n\n # save stats\n stats_file = os.path.join(DIR_OUTPUTS, str(RUN_NAME + \"_statistics\" + \".txt\"))\n with open(stats_file, \"w\") as sfile:\n sfile.write(\"p_not_found\\t%+.4e\\n\" % (1 - p_found,))\n for varname in \\\n ('mean_t', 'sigma_t', 'skew_t', 'kurt_t', 'p25_t', 'p50_t', 'p75_t', 'p90_t', 'p95_t', 'p99_t'):\n sfile.write(\"%s\\t\\t%+.4e\\n\" % (varname, locals()[varname]))\n for varname in \\\n ('mean_h', 'sigma_h', 'skew_h', 'kurt_h', 'p25_h', 'p50_h', 'p75_h', 'p90_h', 'p95_h', 'p99_h'):\n sfile.write(\"%s\\t\\t%+.4e\\n\" % (varname, locals()[varname]))\n\n # save CDF of number of steps\n table_file = os.path.join(DIR_OUTPUTS, str(RUN_NAME + \"_table_CDF_nsteps\" + \".npy\"))\n np.save(table_file, np.vstack((t_bins, cdf_t_tot)))\n\n # save CDF of number of hits\n table_file = os.path.join(DIR_OUTPUTS, str(RUN_NAME + \"_table_CDF_nhits\" + \".npy\"))\n np.save(table_file, np.vstack((h_bins, cdf_h_tot)))\n\n # create and save figures\n if POLICY == -1:\n specifics = \"MODEL = \" + os.path.basename(MODEL_PATH)\n else:\n specifics = \"STEPS_AHEAD = \" + str(STEPS_AHEAD)\n subtitle = (\n \"N_DIMS = \"\n + str(N_DIMS)\n + \", \"\n + \"LAMBDA_OVER_DX = \"\n + str(LAMBDA_OVER_DX)\n + \", \"\n + \"R_DT = \"\n + str(R_DT)\n + \", \"\n + \"POLICY = \"\n + str(POLICY)\n + \", \"\n + specifics\n + \", \"\n + \"N_GRID = \"\n + str(N_GRID)\n + \", \"\n + \"N_HITS = \"\n + str(N_HITS)\n + \", \"\n + \"N_RUNS = \"\n + str(N_runs)\n + \"\\n\"\n )\n\n # plot PDF(nsteps), CDF(nsteps), PDF(nhits), CDF(nhits)\n fig, ax = plt.subplots(2, 2, figsize=(12, 10))\n plt.subplots_adjust(left=0.08, bottom=0.06, right=0.96, top=0.92, hspace=0.35, wspace=0.30)\n kwargs = {'xycoords': 'axes fraction', 'fontsize': 8, 'ha': \"right\"}\n for row, varname in enumerate([\"number of steps\", \"number of hits\"]):\n if varname == \"number of steps\":\n bins = t_bins\n cdf_tot = cdf_t_tot\n pdf_tot = pdf_t_tot\n mean = mean_t\n sigma = sigma_t\n skew = skew_t\n kurt = kurt_t\n p50 = p50_t\n p75 = p75_t\n p90 = p90_t\n p99 = p99_t\n filesuffix = 'nsteps'\n color = \"tab:blue\"\n else:\n bins = h_bins\n cdf_tot = cdf_h_tot\n pdf_tot = pdf_h_tot\n mean = mean_h\n sigma = sigma_h\n skew = skew_h\n kurt = kurt_h\n p50 = p50_h\n p75 = p75_h\n p90 = p90_h\n p99 = p99_h\n filesuffix = 'nhits'\n color = \"tab:orange\"\n max_x = bins[np.nonzero(pdf_tot)[0][-1]]\n for col, fct in enumerate([\"PDF\", \"CDF\"]):\n if fct == \"PDF\":\n ydata = pdf_tot\n ylim = (0.0, 1.02 * np.max(pdf_tot))\n elif fct == \"CDF\":\n ydata = cdf_tot\n ylim = (0.0, 1.0)\n\n ax[row, col].plot(bins, ydata, \"-o\", color=color, markersize=2, linewidth=1)\n ax[row, col].set_title(fct + \" of \" + varname)\n ax[row, col].set_xlabel(varname + \" to find the source\")\n ax[row, col].set_xlim((0, max_x))\n ax[row, col].set_ylim(ylim)\n\n if fct == \"PDF\":\n ax[row, col].annotate(\"p_not_found = \" + \"{:.3e}\".format(1.0 - p_found), xy=(0.98, 0.60), **kwargs)\n ax[row, col].annotate(\"mean = \" + \"{:.3e}\".format(mean), xy=(0.98, 0.56), **kwargs)\n ax[row, col].annotate(\"std = \" + \"{:.3e}\".format(sigma), xy=(0.98, 0.52), **kwargs)\n ax[row, col].annotate(\"skew = \" + \"{:.3e}\".format(skew), xy=(0.98, 0.48), **kwargs)\n ax[row, col].annotate(\"ex. kurt = \" + \"{:.3e}\".format(kurt), xy=(0.98, 0.44), **kwargs)\n elif fct == \"CDF\":\n ax[row, col].annotate(\"p_not_found = \" + \"{:.3e}\".format(1.0 - p_found), xy=(0.98, 0.60), **kwargs)\n ax[row, col].annotate(\"P50 = \" + \"{:.3e}\".format(p50), xy=(0.98, 0.56), **kwargs)\n ax[row, col].annotate(\"P75 = \" + \"{:.3e}\".format(p75), xy=(0.98, 0.52), **kwargs)\n ax[row, col].annotate(\"P90 = \" + \"{:.3e}\".format(p90), xy=(0.98, 0.48), **kwargs)\n ax[row, col].annotate(\"P99 = \" + \"{:.3e}\".format(p99), xy=(0.98, 0.44), **kwargs)\n plt.grid(False)\n plt.figtext(0.5, 0.985, subtitle, fontsize=7, ha=\"center\", va=\"top\")\n plt.draw()\n figure_file = os.path.join(DIR_OUTPUTS, str(RUN_NAME + \"_figure_distributions.pdf\"))\n fig.savefig(figure_file)\n plt.close(fig)\n\n # plot mean nb steps vs number of episodes\n number_episodes = range(1, N_runs + 1)\n cum_mean_t_episodes = np.cumsum(mean_t_episodes) / number_episodes\n if N_runs >= 100:\n number_episodes = number_episodes[20:]\n cum_mean_t_episodes = cum_mean_t_episodes[20:]\n fig, ax = plt.subplots()\n ax.plot(number_episodes, cum_mean_t_episodes, color=\"r\")\n ax.set_title(\"Convergence of the mean number of steps\")\n ax.set_xlabel(\"number of episodes\")\n ax.set_ylabel(\"mean number of steps\")\n plt.figtext(0.5, 0.985, subtitle, fontsize=5, ha=\"center\", va=\"top\")\n plt.grid(False)\n plt.draw()\n figure_file = os.path.join(DIR_OUTPUTS, str(RUN_NAME + \"_figure_convergence.pdf\"))\n fig.savefig(figure_file)\n plt.close(fig)\n\n # save monitoring information (concatenate episodes files)\n monitoring_episodes_file = os.path.join(DIR_OUTPUTS, str(RUN_NAME + \"_monitoring_episodes.txt\"))\n filenames = [os.path.join(DIR_TMP, str(\"monitoring_episode_\" + str(episode) + \".txt\")) for episode in range(N_runs)]\n with open(monitoring_episodes_file, \"w\") as mfile:\n mfile.write(\"# episode\\thit_init\\tstop_flag\\tboundary_flag\\t\"\n \"p_not_found\\t\\tmean_nsteps\\t\\ttime_elapsed(sec)\\n\")\n for fname in filenames:\n if os.path.isfile(fname):\n with open(fname) as infile:\n mfile.write(infile.read())\n os.remove(fname)\n else:\n print(\"Unexpected: Missing episode file: \" + str(fname))\n\n # clean up tmp dirs\n if len(os.listdir(DIR_TMP)) != 0:\n print(\"Unexpected: The directory '\" + DIR_TMP\n + \"' is not removed, because it should be empty but is not.\")\n else:\n os.rmdir(DIR_TMP)\n if len(os.listdir(PARENT_DIR_TMP)) == 0:\n os.rmdir(PARENT_DIR_TMP)\n\n # summary\n monitoring_file = os.path.join(DIR_OUTPUTS, str(RUN_NAME + \"_monitoring_summary\" + \".txt\"))\n with open(monitoring_file, \"w\") as mfile:\n mfile.write(\"*** initial hit ***\\n\")\n first_hit = np.loadtxt(monitoring_episodes_file, usecols=1, dtype='int')\n hit_max = np.max(first_hit)\n hit_hist, _ = np.histogram(first_hit, bins=np.arange(0.5, hit_max + 1.5), density=True)\n for h in range(1, hit_max + 1):\n mfile.write(\"hit=%1d: %6.2f %% \\n\" % (h, hit_hist[h - 1] * 100))\n\n mfile.write(\"\\n*** stats convergence ***\\n\")\n mfile.write(\"number of episodes simulated : %d\\n\" % N_runs)\n mfile.write(\"standard error of the mean (estimate): %.4e = %5.2f %%\\n\"\n % (std_error_mean, rel_std_error_mean * 100))\n\n stopping_reason = np.loadtxt(monitoring_episodes_file, usecols=2, dtype='int')\n stop_max = np.max(stopping_reason)\n stopping_hist, _ = np.histogram(stopping_reason, bins=np.arange(0.5, stop_max + 1.5), density=True)\n mfile.write(\"\\n*** reason for stopping (1 is success, anything else is failure) ***\\n\")\n for stop in range(1, stop_max + 1):\n mfile.write(\"stop=%1d: %6.2f %% \\n\" % (stop, stopping_hist[stop - 1] * 100))\n\n mfile.write(\"\\n*** probability that the source is not found at the end of the episodes ***\\n\")\n p_not_found = np.loadtxt(monitoring_episodes_file, usecols=4)\n p_gtr_stop = p_not_found[p_not_found > STOP_p]\n p_not_found_max = np.max(p_not_found)\n mfile.write(\"criteria (STOP_p): %.5e\\n\" % STOP_p)\n mfile.write(\"max(p) : %.5e\\n\" % p_not_found_max)\n mfile.write(\"number of episodes where p > STOP_p: %7d (%8.4f %%)\\n\"\n % (len(p_gtr_stop), len(p_gtr_stop) / N_runs * 100))\n\n near_boundaries = np.loadtxt(monitoring_episodes_file, usecols=3, dtype='int')\n near_boundaries = np.count_nonzero(near_boundaries)\n mfile.write(\"\\n*** agent near boundaries ***\\n\")\n mfile.write(\"number of episodes where it happened: %7d (%8.4f %%)\\n\"\n % (near_boundaries, near_boundaries / N_runs * 100))\n\n episode_elapsed = np.loadtxt(monitoring_episodes_file, usecols=5)\n mfile.write(\"\\n*** computational cost per episode ***\\n\")\n mfile.write(\"avg elapsed seconds per episode: %.5e\\n\" % (np.mean(episode_elapsed)))\n mfile.write(\"max elapsed seconds per episode: %.5e\\n\" % (np.max(episode_elapsed)))\n\n elapsed_time_0 = (time.monotonic() - start_time_0) / 3600.0\n mfile.write(\"\\n*** computational cost ***\\n\")\n mfile.write(\"N_PARALLEL = %d\\n\" % N_PARALLEL)\n mfile.write(\"total elapsed hours : %.5e\\n\" % elapsed_time_0)\n mfile.write(\"cost in hours = total elapsed time * N_PARALLEL: %.5e\\n\" % (elapsed_time_0 * N_PARALLEL))\n\n print(\">>> Results saved in the directory: \" + DIR_OUTPUTS)\n\n sys.stdout.flush()", "def model_multiprocess(reservoir_dicts, dual_lists, root, run_dict,\n perm_tups=None, cores=2, machine='laptop',\n parallel=False):\n sys.setrecursionlimit(5000000)\n if parallel:\n Parallel(n_jobs=cores)(\n delayed(NM08_model_loop)(root, run_dict, res_dict, dual_list,\n perm_tup, machine, 100, k+j+m)\n for j, res_dict in enumerate(reservoir_dicts)\n for k, dual_list in enumerate(dual_lists)\n for m, perm_tup in enumerate(perm_tups)\n )\n else:\n for r_dict in reservoir_dicts:\n NM08_model_loop(root, run_dict, r_dict, machine)\n return", "def parallelize(cores=None, fork=True, flatten=False, info=False, infoclass=InfoThreadProgressBar, init=None, *args, **kwargs):\n\tif cores == None:\n\t\tcores = multiprocessing.cpu_count()\n\tdef wrapper(f):\n\t\tdef execute(*multiargs):\n\t\t\tresults = []\n\t\t\tlen(list(zip(*multiargs)))\n\t\t\tN = len(multiargs[0])\n\t\t\tif info:\n\t\t\t\tprint(\"running %i jobs on %i cores\" % (N, cores))\n\t\t\ttaskQueue = queue.Queue(len(multiargs[0]))\n\t\t\t#for timenr in range(times):\n\t\t\t#\ttaskQueue.put(timenr)\n\t\t\tfor tasknr, _args in enumerate(zip(*multiargs)):\n\t\t\t\ttaskQueue.put((tasknr, list(_args)))\n\t\t\t#for timenr in range(times):\n\t\t\t#\tresult = f(*args, **kwargs)\n\t\t\t#\tresults.append(result)\n\t\t\texecutions = [Execution(taskQueue, fork, f, init, corenr, args, kwargs) for corenr in range(cores)]\n\t\t\tif info:\n\t\t\t\tinfoobj = infoclass(len(multiargs[0]), executions)\n\t\t\t\tinfoobj.start()\n\t\t\tfor i, execution in enumerate(executions):\n\t\t\t\texecution.setName(\"T-%d\" % i)\n\t\t\t\texecution.start()\n\t\t\t#if 1:\n\t\t\t#\twatchdog = Watchdog(executions)\n\t\t\t#\twatchdog.start()\n\t\t\terror = False\n\t\t\tfor execution in executions:\n\t\t\t\tlog(\"joining:\",execution.getName())\n\t\t\t\ttry:\n\t\t\t\t\texecution.join()\n\t\t\t\texcept BaseException:\n\t\t\t\t\terror = True\n\t\t\t\tresults.extend(execution.results)\n\t\t\t\tif execution.error:\n\t\t\t\t\terror = True \n\t\t\tif info:\n\t\t\t\tinfoobj.join()\n\t\t\tif error:\n\t\t\t\tprint(\"error\", file=sys.stderr)\n\t\t\t\tresults = None\n\t\t\t\traise Exception(\"error in one or more of the executors\")\n\t\t\telse:\n\t\t\t\tresults.sort(cmp=lambda a, b: cmp(a[0], b[0]))\n\t\t\t\tresults = [k[1] for k in results]\n\t\t\t\t#print \"bla\", results\n\t\t\t\tif flatten:\n\t\t\t\t\tflatresults = []\n\t\t\t\t\tfor result in results:\n\t\t\t\t\t\tflatresults.extend(result)\n\t\t\t\t\tresults = flatresults\n\t\t\treturn results\n\t\treturn execute\n\treturn wrapper", "def test_scaled_parallel_transport(self):\n \n for k in (1/11, -1/11, 11, -2):\n self._test_parallel_transport(k=k)", "def Skopt5DStats(numIters, numRuns):\n\n comm = MPI.COMM_WORLD\n rank = comm.Get_rank()\n\n assert comm.Get_size() == numRuns, \"Please ensure there is one process running per run i.e \" + str(numRuns) + \" processes.\"\n \n # Define the problem bounds.\n skoptBounds = [(10, 1300), (40, 230), (0, 90), (0, 90), (0, 90)]\n\n # Use the seedlist from the other runs.\n seedList = [843484, 61806, 570442, 867402, 192390, 60563, 899483, 732848, 243267, 439621] \n\n if rank == 0:\n timeList = []\n bestFoMList = []\n\n # Define which solver will be used.\n optimiser = skopt.Optimizer(skoptBounds, base_estimator = \"RF\", n_initial_points = int(np.ceil(numIters/10)), random_state = seedList[rank])\n\n # Start timing.\n startTime = time.time()\n bestFoM = 0\n\n # Start optimisation.\n for iteration in range(numIters):\n\n # Find out which point to sample next.\n nextParams = optimiser.ask()\n\n # Evaluate the objective function.\n nextFoM = FitnessSkopt5D(nextParams)\n\n if abs(nextFoM) > bestFoM:\n bestFoM = abs(nextFoM)\n \n # Update the model.\n optimiser.tell(nextParams, nextFoM)\n\n # One run complete.\n timeElapsed = time.time() - startTime\n # Run complete. Send results to main process. Tags are unique identifiers.\n if rank != 0:\n comm.send(timeElapsed, dest = 0, tag = 1)\n comm.send(bestFoM, dest = 0, tag = 2)\n \n # Wait for all the processes to end.\n comm.Barrier()\n \n if rank == 0:\n # Add own data first.\n bestFoMList.append(bestFoM)\n timeList.append(timeElapsed)\n\n for process in range(comm.Get_size() - 1):\n # Get the data.\n individualTime = None\n individualTime = comm.recv(individualTime, source = process + 1, tag = 1)\n\n individualFoM = None\n individualFoM = comm.recv(individualFoM, source = process + 1, tag = 2)\n\n bestFoMList.append(individualFoM)\n timeList.append(individualTime)\n\n avgRuntime = np.average(timeList)\n avgFoM = np.average(bestFoMList)\n avgFoMPerTime = np.average(np.divide(bestFoMList, timeList))\n avgFoMPerIter = np.average(np.divide(bestFoMList, numIters))\n absBestFoM = np.max(bestFoMList)\n\n print(\"Bayesian optimisation 5D testing complete! Here are the stats:\")\n print(\"Average runtime per run (s): \" + str(avgRuntime))\n print(\"Average FoM: \" + str(avgFoM))\n print(\"Average FoM per unit time: \" + str(avgFoMPerTime))\n print(\"Average FoM per unit iteration: \" + str(avgFoMPerIter))\n print(\"Absolute best FoM determined: \" + str(absBestFoM))\n print(\"------------------------------------------------------------------------------------------------------------------\")\n \n return", "def speedup(n0,l,ntarray=np.arange(100),marray=np.arange(100)):\n\n# initialise variables\n\n Sup_m2 = np.zeros(np.size(marray))\n Sup_m3 = np.zeros(np.size(marray))\n Sup_m4 = np.zeros(np.size(marray))\n Sup_nt2 = np.zeros(np.size(ntarray))\n Sup_nt3 = np.zeros(np.size(ntarray))\n Sup_nt4 = np.zeros(np.size(ntarray))\n\n# fix nt at the meadian value of ntarray\n# run test_stats_omp over the range of marray to collect the walltimes for one and two threads\n# calculate the speed up and store it in Sup_mi where i is the number of threads\n\n nt = int(np.around(np.mean(ntarray)))\n for m in np.arange(1,np.size(marray)+1):\n wall_1thread = ns.test_stats_omp(n0,l,nt,m,1)\n wall_2thread = ns.test_stats_omp(n0,l,nt,m,2)\n wall_3thread = ns.test_stats_omp(n0,l,nt,m,3)\n wall_4thread = ns.test_stats_omp(n0,l,nt,m,4)\n Sup_m2[m-1] = wall_1thread/wall_2thread\n Sup_m3[m-1] = wall_1thread/wall_3thread \n Sup_m4[m-1] = wall_1thread/wall_4thread \n \n# fix m at the median value of marray\n# run test_stats_omp over the range of ntarray to collect the walltimes for one and two threads\n# calculate the speed up and store it in Sup_nti where i is the number of threads\n\n m = int(np.around(np.median(marray)))\n for nt in np.arange(1,np.size(ntarray)+1):\n wall_1thread = ns.test_stats_omp(n0,l,nt,m,1)\n wall_2thread = ns.test_stats_omp(n0,l,nt,m,2)\n wall_3thread = ns.test_stats_omp(n0,l,nt,m,3)\n wall_4thread = ns.test_stats_omp(n0,l,nt,m,4)\n Sup_nt2[nt-1] = wall_1thread/wall_2thread\n Sup_nt3[nt-1] = wall_1thread/wall_3thread\n Sup_nt4[nt-1] = wall_1thread/wall_4thread\n\n# make sure marray and ntarray are suitable to create a plot\n\n m = np.arange(1,np.size(marray)+1)\n nt = np.arange(1,np.size(ntarray)+1)\n\n# plot Sup_nti against nt \n\n plt.figure()\n plt.plot(m, Sup_nt2, 'b', label ='2 Threads')\n plt.plot(nt, Sup_nt3,'r', label ='3 Threads')\n plt.plot(nt, Sup_nt4, 'g', label ='4 Threads')\n plt.xlabel('number of realizations')\n plt.ylabel('speedup')\n plt.title('plot of speedup vs number of realizations')\n plt.legend(loc='best')\n plt.show()\n\n# plot Sup_mi against m\n\n plt.figure()\n plt.plot(m, Sup_m2, 'b', label ='2 Threads')\n plt.plot(nt, Sup_m3,'r', label ='3 Threads')\n plt.plot(nt, Sup_m4, 'g', label ='4 Threads')\n plt.xlabel('number of new nodes')\n plt.ylabel('speedup')\n plt.title('plot of speedup vs number of new nodes')\n plt.legend(loc='best')\n plt.show()\n \n# plot Sup_nt4 and Sup_m4 against nt and m to compare which has the greater effect\n \n plt.figure()\n plt.plot(nt, Sup_nt4, 'b', label='varying nt 4 threads')\n plt.plot(m, Sup_m4, 'r', label='varying m 4 threads')\n plt.xlabel('number of realizations/new nodes')\n plt.ylabel('speedup')\n plt.title('comparison of speedup when varying m to speed up when varying nt')\n plt.legend(loc='best')", "def run_optimisation(model_path, tank1_outflow, tank2_outflow, tank3_outflow,\n h1_final, h2_final, h3_final, max_control, sim_control,\n h10=20.0, h20=20.0, h30=20.0, alpha1=0.5, alpha2=0.5,\n alpha3=0.5, ipopt_tolerance=1e-3,\n t_start=0, t_final=50.0, elements_number=50):\n # 2. Compute initial guess trajectories by means of simulation\n # Compile the optimization initialization model\n init_sim_fmu = compile_fmu(\"TanksPkg.ThreeTanks\", model_path)\n # Load the model\n simulation_model = load_fmu(init_sim_fmu)\n set_model_parameters(simulation_model,\n {'u': sim_control, \"h10\": h10, \"h20\": h20, \"h30\": h30,\n \"C1\": tank1_outflow, \"C2\": tank2_outflow,\n \"C3\": tank3_outflow, \"alpha1\": alpha1,\n \"alpha2\": alpha2, \"alpha3\": alpha3})\n init_result = simulation_model.simulate(start_time=t_start,\n final_time=t_final)\n # 3. Solve the optimal control problem\n # Compile and load optimization problem\n optimisation_model = \"TanksPkg.three_tanks_time_optimal\"\n op = transfer_optimization_problem(optimisation_model, model_path)\n # Set parameters\n set_model_parameters(op, {\"h10\": h10, \"h20\": h20, \"h30\": h30,\n 'h1_final': h1_final, 'h2_final': h2_final,\n 'h3_final': h3_final, \"C1\": tank1_outflow,\n \"C2\": tank2_outflow, \"C3\": tank3_outflow,\n \"alpha1\": alpha1, \"alpha2\": alpha2,\n \"alpha3\": alpha3, 'u_max': max_control})\n\n # Set options\n opt_options = op.optimize_options()\n opt_options['n_e'] = elements_number\n opt_options['variable_scaling'] = False\n opt_options['init_traj'] = init_result\n opt_options['IPOPT_options']['tol'] = ipopt_tolerance\n opt_options['verbosity'] = 1\n # Solve the optimal control problem\n res = op.optimize(options=opt_options)\n opt_result = {\"h1\": res['h1'], \"h2\": res['h2'], \"h3\": res['h3'],\n \"u\": res['u'], \"time\": res['time']}\n return opt_result", "def compile_sim(self, n_jobs=(1 + mp.cpu_count()//2), args=[]):\n\t\tsp.call(['make', '-j', str(n_jobs), '-f', self.makefile_fn] + args)", "def crossValidate(k, epochs, hyperparams, data, trainFunc, testFunc, report=None):\n \n if not (report == None):\n tabs = '\\t' * report;\n print tabs, 'Performing %d-fold cross validation...' % k;\n \n # create vars to save the best hyperparameters and their performance\n bestTheta = None;\n bestRate = float(\"-inf\");\n \n # create queue for worker threads to post results to\n queue = mp.Queue();\n \n # create train/test folds\n numPerFold = len(data) // k;\n numLeftOver = len(data) % k;\n folds = [data[i*numPerFold:i*numPerFold+numPerFold] for i in range(0, k)];\n if numLeftOver > 0:\n folds[-1].extend(data[-numLeftOver:]); \n \n # create a list of tuples; each tuple defining a unique assignment of hyperparameters \n thetas = list(itertools.product(*hyperparams));\n \n # create worker threads try all combinations of hyperparameters \n workers = []; \n for theta in thetas: \n p = mp.Process(target=cvWorker, args=(epochs, theta, folds, trainFunc, \\\n testFunc, report, queue));\n workers.append(p)\n \n # start worker threads and wait for them to finish\n for p in workers:\n p.start();\n for p in workers:\n p.join()\n \n if not (report == None):\n print tabs, 'All worker threads have terminated.';\n \n # read results out of queue \n while not queue.empty():\n [theta, rate] = queue.get();\n if rate > bestRate:\n bestTheta = theta\n bestRate = rate;\n \n return bestTheta;", "def perform_mult_steps(self, num_steps, tc, batch_size):\n\n self.sample_v()\n self.cli.empty().print(self.titlestr % (\"k\", \"objective\", *[f\"lm_{i} x constr_{i}\" for i in range(len(self.lagrange_mults))]))\n\n # perform a min step\n for s in range(num_steps):\n batch = tc.sample(batch_size, ['add_act', 'grad_ranking', 'add_pi'])\n self.perform_step(s, batch)", "def run_experiment(out_root, num_procs, trials_per_setting, **kwargs):\n # TODO: UPDATE DOCS!\n param_dicts = product_of_dict_lists(kwargs)\n out_file = (\n out_root\n + \"-\".join(\n [\n \"{}={}\".format(key, value[0])\n for key, value in kwargs.items()\n if len(kwargs[key]) == 1\n ]\n )\n + \".csv\"\n )\n\n # build list of parameters\n parameters = []\n for param_idx in range(len(param_dicts)):\n params = param_dicts[param_idx]\n params[\"param_idx\"] = param_idx\n for trial_idx in range(trials_per_setting):\n t_params = dict(params) # copy so that can vary trial number\n t_params[\"trial_idx\"] = trial_idx\n parameters.append(t_params)\n # parameters = [params for _ in range(trials_per_setting)]\n # send work to pool, wrapped in a progress bar\n procs = Pool(num_procs)\n data = pd.concat(\n list(\n tqdm.tqdm(procs.imap(run_trial_from_kw, parameters), total=len(parameters))\n ),\n ignore_index=True,\n )\n # write output\n data.to_csv(out_file)", "def inter_op_dp(\n n_layers: int,\n n_devices: int,\n n_microbatches: int,\n submesh_shapes: List[Tuple[int, int]],\n intra_compute_costs,\n max_n_succ_stages,\n):\n min_cost = np.inf\n best_solution = None\n prev_intra_cost = 0.0\n gap = 1e-6\n\n submesh_sizes: list = NumbaList()\n for n, m in submesh_shapes:\n submesh_sizes.append(n * m)\n\n for intra_cost in np.sort(np.unique(intra_compute_costs)):\n if intra_cost - prev_intra_cost < gap:\n continue\n if intra_cost * n_microbatches >= min_cost:\n break\n\n # Optimization that lifts a check for stage_cost <= t_max_stage_cost\n # out of the inner dp loop (see alpa/~/stage_construction.py#L121).\n # This yields a ~100-200x improvement over the baseline implementation.\n valid_cost_idxs = np.transpose((intra_compute_costs <= intra_cost).nonzero())\n # This corresponds to the i of k <= i <= K from eqn. 3 in the alpa paper.\n valid_cost_idxs = valid_cost_idxs[\n valid_cost_idxs[:, 0] <= valid_cost_idxs[:, 1]\n ]\n valid_costs = intra_compute_costs[tuple(valid_cost_idxs.T)]\n valid_idxs_costs = np.hstack([valid_cost_idxs, valid_costs[:, np.newaxis]])\n\n F, F_stage_max, F_argmin = inter_op_dp_inner_loop(\n n_layers,\n n_devices,\n submesh_sizes,\n valid_idxs_costs,\n max_n_succ_stages,\n )\n\n best_n_stages = F[:, 0, n_devices].argmin()\n all_stages_cost = F[best_n_stages, 0, n_devices]\n slowest_stage_cost = F_stage_max[best_n_stages, 0, n_devices]\n if np.isinf(all_stages_cost):\n continue\n slowest_stage_total_cost = (n_microbatches - 1) * slowest_stage_cost\n\n if all_stages_cost + slowest_stage_total_cost < min_cost:\n min_cost = all_stages_cost + slowest_stage_total_cost\n best_solution = best_n_stages, F_argmin\n prev_intra_cost = intra_cost\n\n assert best_solution is not None\n best_n_stages, F_argmin = best_solution\n optimal_layer_submesh_assignments = get_optimal_submesh_assignments(\n best_n_stages, F_argmin, n_devices, n_layers, submesh_sizes\n )\n return optimal_layer_submesh_assignments", "def solve(self):\n\n # Open status display\n fmtstr, nsep = self.display_start()\n\n # Start solve timer\n self.timer.start(['solve', 'solve_wo_func', 'solve_wo_rsdl',\n 'solve_wo_btrack'])\n\n # Main optimisation iterations\n for self.k in range(self.k, self.k + self.opt['MaxMainIter']):\n\n # Update record of X and Y from previous iteration\n self.on_iteration_start()\n\n # Compute backtracking\n if self.opt['Backtrack'] is not None and self.k >= 0:\n self.timer.stop('solve_wo_btrack')\n # Compute backtracking\n self.backtrack.update(self)\n self.timer.start('solve_wo_btrack')\n else:\n # Compute just proximal step\n self.xstep()\n # Update by combining previous iterates\n self.ystep()\n\n # Compute residuals and stopping thresholds\n self.timer.stop(['solve_wo_rsdl', 'solve_wo_btrack'])\n if not self.opt['FastSolve']:\n frcxd, adapt_tol = self.compute_residuals()\n self.timer.start('solve_wo_rsdl')\n\n # Compute and record other iteration statistics and\n # display iteration stats if Verbose option enabled\n self.timer.stop(['solve_wo_func', 'solve_wo_rsdl',\n 'solve_wo_btrack'])\n if not self.opt['FastSolve']:\n itst = self.iteration_stats(self.k, frcxd)\n self.itstat.append(itst)\n self.display_status(fmtstr, itst)\n self.timer.start(['solve_wo_func', 'solve_wo_rsdl',\n 'solve_wo_btrack'])\n\n # Call callback function if defined\n if self.opt['Callback'] is not None:\n if self.opt['Callback'](self):\n break\n\n # Stop if residual-based stopping tolerances reached\n if not self.opt['FastSolve']:\n if frcxd < adapt_tol:\n break\n\n # Increment iteration count\n self.k += 1\n\n # Record solve time\n self.timer.stop(['solve', 'solve_wo_func', 'solve_wo_rsdl',\n 'solve_wo_btrack'])\n\n # Print final separator string if Verbose option enabled\n self.display_end(nsep)\n\n return self.getmin()", "def ExecuteInstanceStochasticAdaptiveRefinementAux_Functionality(current_global_index,pickled_coarse_model,pickled_coarse_project_parameters,pickled_custom_metric_refinement_parameters,pickled_custom_remesh_refinement_parameters,random_variable,current_index,current_analysis_stage,previous_computational_time,open_mp_threads,mapping_flag,pickled_mapping_reference_model,print_to_file,filename):\n\n start_time = time.time()\n # unpickle model and build Kratos Model object\n serialized_model = pickle.loads(pickled_coarse_model)\n current_model = KratosMultiphysics.Model()\n serialized_model.Load(\"ModelSerialization\",current_model)\n del(serialized_model)\n # unpickle parameters and build Kratos Parameters object\n serialized_project_parameters = pickle.loads(pickled_coarse_project_parameters)\n current_project_parameters = KratosMultiphysics.Parameters()\n serialized_project_parameters.Load(\"ParametersSerialization\",current_project_parameters)\n del(serialized_project_parameters)\n # refine if current current_global_index > 0, adaptive refinement based on the solution of previous index\n if (current_index > 0):\n # unpickle metric and remesh refinement parameters and build Kratos Parameters objects\n serialized_custom_metric_refinement_parameters = pickle.loads(pickled_custom_metric_refinement_parameters)\n serialized_custom_remesh_refinement_parameters = pickle.loads(pickled_custom_remesh_refinement_parameters)\n current_custom_metric_refinement_parameters = KratosMultiphysics.Parameters()\n current_custom_remesh_refinement_parameters = KratosMultiphysics.Parameters()\n serialized_custom_metric_refinement_parameters.Load(\"MetricRefinementParametersSerialization\",current_custom_metric_refinement_parameters)\n serialized_custom_remesh_refinement_parameters.Load(\"RemeshRefinementParametersSerialization\",current_custom_remesh_refinement_parameters)\n del(serialized_custom_metric_refinement_parameters,serialized_custom_remesh_refinement_parameters)\n # refine the model Kratos object\n adaptive_refinement_manager = AdaptiveRefinement(current_index,current_model,current_project_parameters,current_custom_metric_refinement_parameters,current_custom_remesh_refinement_parameters)\n refined_model,refined_project_parameters = adaptive_refinement_manager.ComputeAdaptiveRefinement()\n current_model = refined_model\n del(refined_model,refined_project_parameters)\n # constructor analysis stage\n simulation = current_analysis_stage(current_model,current_project_parameters,random_variable)\n # add filename flag print_to_file is true\n if (print_to_file):\n simulation.filename = filename\n # add flag if current index is maximum index\n if (current_index == current_global_index):\n simulation.is_current_index_maximum_index = True\n else:\n simulation.is_current_index_maximum_index = False\n # mapping if in current finest level and mapping flag is true\n # otherwise standard behavior\n if (mapping_flag is True and current_index == current_global_index):\n # unpickle mapping reference model and build Kratos Model object\n serialized_mapping_reference_model = pickle.loads(pickled_mapping_reference_model)\n mapping_reference_model = KratosMultiphysics.Model()\n serialized_mapping_reference_model.Load(\"ModelSerialization\",mapping_reference_model)\n del(serialized_mapping_reference_model)\n # send reference model to analysis stage for mapping and set mapping flag to true\n simulation.mapping_reference_model = mapping_reference_model\n simulation.mapping = True\n simulation.Run()\n # mapping if in current finest level and mapping flag is true\n # otherwise standard qoi evaluation\n if (mapping_flag is True and current_index == current_global_index):\n qoi = simulation.MappingAndEvaluateQuantityOfInterest()\n else:\n qoi = simulation.EvaluateQuantityOfInterest()\n # save model and parameters as MpiSerializer Kratos objects\n serialized_finer_model = KratosMultiphysics.MpiSerializer()\n serialized_finer_model.Save(\"ModelSerialization\",simulation.model)\n # pickle model and parameters\n pickled_finer_model = pickle.dumps(serialized_finer_model, 2) # second argument is the protocol and is NECESSARY (according to pybind11 docs)\n del(simulation)\n end_time = time.time()\n computational_time = previous_computational_time + open_mp_threads*(end_time-start_time) # multiply by open mp threads to consider real machine cost\n return qoi,pickled_finer_model,computational_time", "def test_increment_input_with_threads():\r\n a = [0]\r\n Parallel(n_jobs=2, backend=\"threading\")(\r\n delayed(increment_input)(a) for _ in range(5))\r\n nose.tools.assert_equal(a, [5])", "def arcSubmit(model_list, config,rootDir, verbose=False, resubmit=None, runCode=None):\r\n\tjobID = []\r\n\tfor model in model_list:\r\n\t\t# put some dummy data in the ouput file\r\n\t\tmodelSubmitName=model.submit()\r\n\t\tif verbose: print \"Submitting \",modelSubmitName\r\n\t\twith cd(model.dirPath):\r\n\t\t\tjID = subprocess.check_output(\"sbatch -J %s --export=ALL %s\" % (model.name(), modelSubmitName), shell=True) # submit the script (change devel after, and shouldn't have to ssh in)\r\n\t\tjobID.append(jID[20:-1])\r\n\t\t\r\n\tjobIDstr=':$'.join(jobID) # make single string appropriately formatted of job ids..\r\n\t# now re-run this entire script so that the next iteration in the algorithm.\r\n\t# can be run\r\n\tif resubmit is not None:\r\n\t\t# Submit the next job in the iteration. runOptimise is very quick so no need to submit to ARC again - just run on the front end.\r\n\t\t\r\n\t\tjobName='RE'+config.name()\r\n\t\t# TODO move to better python syntax for var printing. Think can use named vars in...\r\n\t\tcmd = [\"sbatch -p devel --export=ALL --time=10 --dependency=afterany:%s -J %s \"%(jobIDstr,jobName)]\r\n\t\tcmd.extend(resubmit) # add the arguments in including the programme to run..\r\n\t\t#cmd = resubmit\r\n\t\tcmd=' '.join(cmd) # convert to one string.\r\n\t\tcmd = cmd + \" &>progressResubmit.txt\"\r\n\t\tif verbose: print \"Next iteration cmd is \", cmd\r\n\t\tjid = subprocess.check_output(cmd, shell=True) # submit the script. Good to remove shell=True \r\n\t\t#subprocess.check_output(cmd, shell=True)\r\n\t\tif verbose: print \"Job ID for next iteration is %s\"%jid[20:-1]\r\n\r\n\treturn True", "def main():\n for opt in optimizations:\n compile_command = [\"g++\", \"main.cpp\", f\"-O{opt}\", \"-lpthread\"]\n run(compile_command, check=True)\n for threads in num_threads:\n print(f\"{opt=}, {threads=}\", end=\"\")\n stdout.flush()\n test_command = ['./a.out', str(iterations), str(threads)]\n total = 0\n for samples in range(1, repeats_for_average + 1):\n print(\".\", end=\"\")\n stdout.flush()\n output = run(test_command, check=True, capture_output=True).stdout\n total += int(output.split()[-2]) / 1000\n print(f\"\\t{total / samples:.03f}\")", "def tune(runner, kernel_options, device_options, tuning_options):\n\n if not bayes_opt_present:\n raise ImportError(\"Error: optional dependency Bayesian Optimization not installed\")\n n_iter = tuning_options.strategy_options.get(\"max_fevals\", 100)\n tuning_options[\"scaling\"] = True\n results = []\n\n #function to pass to the optimizer\n def func(params):\n print(params)\n param_config = list(params.values())\n print(param_config)\n if not util.config_valid(param_config, tuning_options, runner.dev.max_threads):\n return {\n 'status': STATUS_FAIL\n }\n return minimize._cost_func(param_config, kernel_options, tuning_options, runner, results)\n\n minimize.get_bounds_x0_eps(tuning_options) # necessary to have EPS set\n tune_params = tuning_options.tune_params\n space = dict()\n for tune_param in tune_params.keys():\n space[tune_param] = hp.choice(tune_param, tune_params[tune_param])\n\n trials = base.Trials()\n fmin(func, space, algo=tpe.suggest, max_evals=n_iter, trials=trials)\n\n return results, runner.dev.get_environment()", "def train(self):\n\n # Step 1 - Obtain optimized weights for final model ------------------------------------------------------------\n\n t0 = time()\n\n # Check the training data for potential hazardous problems\n self.check_training_samples()\n\n opt_results = pd.DataFrame()\n kf_opt = StratifiedKFold(n_splits=self.kfold_cv, shuffle=True)\n rep_str, opt_str = '', ''\n\n if self.verbose:\n print('\\n\\n__ TRAINING STEP 1/2 \\_______________________________')\n print(' \\ Train with reverse %d-fold CV - %d time(s) /\\n' % (self.kfold_cv, self.n_repeat))\n\n for i_rep in range(self.n_repeat):\n\n if self.verbose:\n rep_str = '\\n_/--- Rep %d/%d' % (i_rep + 1, self.n_repeat)\n\n # Sample clf-net parameters to test\n param = [\n np.random.normal(loc=self.n_estimators,\n scale=self.n_estimators*self.param_tune_scale,\n size=self.kfold_cv),\n np.random.normal(loc=self.min_impurity_decrease,\n scale=self.min_impurity_decrease*self.param_tune_scale,\n size=self.kfold_cv),\n np.random.normal(loc=self.min_sample_leaf,\n scale=np.ceil(self.min_sample_leaf*self.param_tune_scale),\n size=self.kfold_cv),\n ]\n scores = list()\n\n for j_fold, (opt_idxs, cv_train_idxs) in enumerate(kf_opt.split(\n X=self.datas[self.train_idx].nidx_train,\n y=self.datas[self.train_idx].gen_labels(condense_labels=True))):\n\n if self.verbose:\n print(rep_str + ' - CV %d/%d ---\\_____\\n' % (j_fold + 1, self.kfold_cv))\n\n # set clf-net parameters\n self.n_estimators = param[0][j_fold]\n self.min_impurity_decrease = param[1][j_fold]\n self.min_sample_leaf = param[2][j_fold]\n self.clf_net = self.gen_rfc()\n\n # Split data\n opt_nidxs = np.array([self.datas[self.train_idx].nidx_train[i] for i in opt_idxs])\n cv_train_nidxs = np.array([self.datas[self.train_idx].nidx_train[i] for i in cv_train_idxs])\n\n # Partition train/eval nidx for reverse k-fold CV training\n _, _, opt_eval_nidxs, opt_train_nidxs = train_test_split(\n np.zeros(len(opt_nidxs)),\n opt_nidxs,\n test_size=1/(self.kfold_cv - 1),\n shuffle=True,\n stratify=self.datas[self.train_idx].gen_labels(nidxs=opt_nidxs, condense_labels=True))\n\n # Train clfs\n if self.verbose:\n print('\\n> Training base classifiers ...')\n self._train_clfs(train_nidxs=cv_train_nidxs)\n\n # Evaluate train with cv_train data\n if self.verbose:\n print('\\n> Evaluating base classifiers with cv_train partition ...')\n self.clfs_predict(nidxs_target=cv_train_nidxs, data=self.datas[self.train_idx], to_eval=True,\n eval_idx=self.train_idx)\n\n # Evaluate pre-optimization with opt_train data\n if self.verbose:\n print('\\n> Evaluating base classifiers with cv_eval partition ...')\n cv_res = self.clfs_predict(nidxs_target=opt_train_nidxs, data=self.datas[self.train_idx], to_eval=True,\n nidxs_train=cv_train_nidxs, eval_idx=self.train_idx)\n\n # Train clf-opt with opt_train partition results\n if self.verbose:\n print('\\n> Training clf-opt ...')\n self._train_clf_opt(predictions=cv_res)\n\n # Evaluate clf-opt with opt_eval partition\n if self.verbose:\n print('\\n> Evaluating optimized classifier with opt_test partition ...')\n opt_res = self.clfs_predict(nidxs_target=opt_eval_nidxs, data=self.datas[self.train_idx], to_eval=True,\n nidxs_train=cv_train_nidxs, eval_idx=self.train_idx)\n opt_results = opt_results.append(opt_res, ignore_index=True)\n\n # Append score to optimize clf-net parameter\n r = self.scores(opt_res['ytruth'], opt_res['ynet'])\n if not self.aim:\n scores.append(r['aucroc'])\n else:\n aim = self.aim.replace('hard', '')\n scores.append(r[aim])\n\n # reset link2featidx\n self.datas[self.train_idx].link2featidx = {}\n\n # Aggregate results from clf-net parameter search\n self._set_clf_net_param(param, scores)\n\n # STEP 2 - Train final model -----------------------------------------------------------------------------------\n # .clf_opt is already trained through previous iterations by using warm_start\n\n if self.verbose:\n print('\\n__ TRAINING STEP 2/2 \\_______________________________')\n print(' \\ Train final model with all train data /\\n')\n\n # Train clfs with all the data\n self._train_clfs()\n\n # Evaluate final clf-opt with all data\n print('\\n> Evaluating final classifier ...')\n self.clfs_predict(nidxs_target=self.datas[self.train_idx].nidx_train, to_eval=True, eval_idx=self.train_idx)\n print('** Because this is evaluating with the training data, classifier performances should be very high.')\n\n # Assign model ID - this is here so that if retrained, it would be known that it is not the same model anymore\n self.id = 'm_%s' % gen_id()\n\n if self.verbose:\n te = (time() - t0) / 60\n print('\\n Training took %.1f minutes on %d processors' % (te, os.cpu_count()))\n print('\\n__ __________')\n print(' \\ Training complete! /\\n')\n\n return opt_results", "def _update_parallel_coef_constraints(self, x):\n n_features = x.shape[1]\n xi_final = np.zeros((n_features, n_features))\n\n # Todo: parallelize this for loop with Multiprocessing/joblib\n if self.model_subset is None:\n self.model_subset = range(n_features)\n elif np.max(np.abs(self.model_subset)) >= n_features:\n raise ValueError(\n \"A value in model_subset is larger than the number \"\n \"of features in the candidate library\"\n )\n for i in self.model_subset:\n print(\"Model \", i)\n xi = cp.Variable(n_features)\n # Note that norm choice below must be convex,\n # so thresholder must be L1 or L2\n if (self.thresholder).lower() in (\"l1\", \"weighted_l1\"):\n if self.thresholds is None:\n cost = cp.sum_squares(x[:, i] - x @ xi) + self.threshold * cp.norm1(\n xi\n )\n else:\n cost = cp.sum_squares(x[:, i] - x @ xi) + cp.norm1(\n self.thresholds[i, :] @ xi\n )\n if (self.thresholder).lower() in (\"l2\", \"weighted_l2\"):\n if self.thresholds is None:\n cost = (\n cp.sum_squares(x[:, i] - x @ xi)\n + self.threshold * cp.norm2(xi) ** 2\n )\n else:\n cost = (\n cp.sum_squares(x[:, i] - x @ xi)\n + cp.norm2(self.thresholds[i, :] @ xi) ** 2\n )\n prob = cp.Problem(\n cp.Minimize(cost),\n [xi[i] == 0.0],\n )\n try:\n prob.solve(\n max_iter=self.max_iter,\n eps_abs=self.tol,\n eps_rel=self.tol,\n verbose=self.verbose_cvxpy,\n )\n if xi.value is None:\n warnings.warn(\n \"Infeasible solve on iteration \"\n + str(i)\n + \", try changing your library\",\n ConvergenceWarning,\n )\n xi_final[:, i] = xi.value\n # Annoying error coming from L2 norm switching to use the ECOS\n # solver, which uses \"max_iters\" instead of \"max_iter\", and\n # similar semantic changes for the other variables.\n except TypeError:\n prob.solve(\n max_iters=self.max_iter,\n abstol=self.tol,\n reltol=self.tol,\n verbose=self.verbose_cvxpy,\n )\n if xi.value is None:\n warnings.warn(\n \"Infeasible solve on iteration \"\n + str(i)\n + \", try changing your library\",\n ConvergenceWarning,\n )\n xi_final[:, i] = xi.value\n except cp.error.SolverError:\n print(\"Solver failed on model \", str(i), \", setting coefs to zeros\")\n xi_final[:, i] = np.zeros(n_features)\n return xi_final", "def expected_performance(x):\n pm_pars = {'alpha': x[0], 'beta': x[1]}\n rm_pars = {'theta': x[2]}\n \n T = 100 #number of trials\n n_b = 2 #number of bandits\n rho = 0.01 #switch probability of the arm-reward contingencies\n \n n_env = 100 #number of environments\n n_blocks = 1 #number of experimental blocks\n ep = 0 # expected performance\n \n for n in range(n_env):\n #generate n_env mutli-armed bandit environmets\n env = MultiArmedBandit(T, rho = rho, n_b = n_b)\n pm = RescorlaWagner(env, n_b)\n for m in range(n_blocks):\n #in each environment repeat the experiment n_blocks times\n rm = SoftMaxResponses([], pm, d_r)\n rm.get_responses(pm_pars, rm_pars)\n #For each block compute the expected performance.\n ep += env.expected_performance()\n \n return ep/(n_env*n_blocks)", "def run_in_parallel(n_proc, target, all_args):\n curr_item = Counter()\n def worker():\n index = curr_item.return_and_increment()\n while index < len(all_args):\n args = all_args[index]\n target(*args)\n index = curr_item.return_and_increment()\n return\n\n fork_and_wait(n_proc, worker)", "def greedy_alignment(embed1, embed2, top_k, nums_threads, metric, normalize, csls_k, accurate):\n t = time.time()\n sim_mat = sim(embed1, embed2, metric=metric, normalize=normalize, csls_k=csls_k)\n num = sim_mat.shape[0]\n if nums_threads > 1:\n hits = [0] * len(top_k)\n mr, mrr = 0, 0\n alignment_rest = set()\n rests = list()\n search_tasks = task_divide(np.array(range(num)), nums_threads)\n pool = multiprocessing.Pool(processes=len(search_tasks))\n for task in search_tasks:\n mat = sim_mat[task, :]\n rests.append(pool.apply_async(calculate_rank, (task, mat, top_k, accurate, num)))\n pool.close()\n pool.join()\n for rest in rests:\n sub_mr, sub_mrr, sub_hits, sub_hits1_rest = rest.get()\n mr += sub_mr\n mrr += sub_mrr\n hits += np.array(sub_hits)\n alignment_rest |= sub_hits1_rest\n else:\n mr, mrr, hits, alignment_rest = calculate_rank(list(range(num)), sim_mat, top_k, accurate, num)\n assert len(alignment_rest) == num\n hits = np.array(hits) / num * 100\n for i in range(len(hits)):\n hits[i] = round(hits[i], 3)\n cost = time.time() - t\n if accurate:\n if csls_k > 0:\n print(\"accurate results with csls: csls={}, hits@{} = {}%, mr = {:.3f}, mrr = {:.6f}, time = {:.3f} s \".\n format(csls_k, top_k, hits, mr, mrr, cost))\n else:\n print(\"accurate results: hits@{} = {}%, mr = {:.3f}, mrr = {:.6f}, time = {:.3f} s \".\n format(top_k, hits, mr, mrr, cost))\n else:\n if csls_k > 0:\n print(\"quick results with csls: csls={}, hits@{} = {}%, time = {:.3f} s \".format(csls_k, top_k, hits, cost))\n else:\n print(\"quick results: hits@{} = {}%, time = {:.3f} s \".format(top_k, hits, cost))\n hits1 = hits[0]\n del sim_mat\n gc.collect()\n return alignment_rest, hits1, mr, mrr", "def main():\n model = sys.argv[1]\n maxfun = int(sys.argv[2])\n n_threads = int(sys.argv[3])\n\n # Validate input.\n assert maxfun >= 0, \"Maximum number of function evaluations cannot be negative.\"\n assert n_threads >= 1 or n_threads == -1, (\n \"Use -1 to impose no restrictions on maximum number of threads or choose a \"\n \"number higher than zero.\"\n )\n\n # Set number of threads\n os.environ[\"NUMBA_NUM_THREADS\"] = f\"{n_threads}\"\n os.environ[\"MKL_NUM_THREADS\"] = f\"{n_threads}\"\n os.environ[\"OMP_NUM_THREADS\"] = f\"{n_threads}\"\n os.environ[\"NUMEXPR_NUM_THREADS\"] = f\"{n_threads}\"\n\n # Late import of respy to ensure that environment variables are read by Numpy, etc..\n import respy as rp\n\n # Get model\n params, options = rp.get_example_model(model, with_data=False)\n\n # Simulate the data\n simulate = rp.get_simulate_func(params, options)\n df = simulate(params)\n\n # Get the criterion function and the parameter vector.\n crit_func = rp.get_log_like_func(params, options, df)\n\n # Run the estimation\n start = dt.datetime.now()\n\n for _ in range(maxfun):\n crit_func(params)\n\n end = dt.datetime.now()\n\n # Aggregate information\n output = {\n \"model\": model,\n \"maxfun\": maxfun,\n \"n_threads\": n_threads,\n \"start\": str(start),\n \"end\": str(end),\n \"duration\": str(end - start),\n }\n\n # Save time to file\n with open(\"scalability_results.txt\", \"a+\") as file:\n file.write(json.dumps(output))\n file.write(\"\\n\")", "def timing_gemm(trials, k):\n # values to test\n vals = (100, 300, 500, 1000, 1500, 2000, 2500, 3000)\n dtypes = ('float64', 'float32')\n trans_tuple = ('n', 't')\n bp_total = 0.0\n np_total = 0.0\n\n for n in vals:\n # test all combinations of all possible values\n for (dtype, trans_a, trans_b) in product(dtypes, trans_tuple, trans_tuple):\n bp_time, np_time = timing_test(dtype, trans_a, trans_b, n, k, trials)\n bp_total += bp_time\n np_total += np_time\n\n print(\"\\nk: %d, m=n: %d, BLASpy Average: %.5fs, NumPy Average: %.5fs\"\n % (k, n, bp_total / 8, np_total / 8))", "def preprocessing(pairs, nb=4):\n generated = Parallel(n_jobs=nb, verbose=5)(delayed(_load_brick)(*p) for p in pairs)\n return generated", "def parallel_calculate_axyzc(\n molecules,\n options=None,\n n_cores=-1,\n show_progress=True,\n scr=None,\n cmd=XTB_CMD,\n):\n\n if scr is None:\n scr = \"_tmp_xtb_parallel_\"\n\n if n_cores == -1:\n n_cores = env.get_available_cores()\n\n # Ensure scratch directories\n pathlib.Path(scr).mkdir(parents=True, exist_ok=True)\n\n if show_progress:\n pbar = tqdm(\n total=len(molecules),\n desc=f\"XTB Parallel({n_cores})\",\n **constants.TQDM_OPTIONS,\n )\n\n # Pool\n xtb_options = {\"scr\": scr, \"cmd\": cmd, \"options\": options}\n\n # TODO Add this worker test to test_xtb\n # TEST\n # properties = _worker_calculate_axyzc(\n # molecules[0],\n # debug=True,\n # super_debug=True,\n # **options\n # )\n # print(properties)\n # assert False\n\n func = functools.partial(_worker_calculate_axyzc, **xtb_options)\n p = multiprocessing.Pool(processes=n_cores)\n\n try:\n results_iter = p.imap(func, molecules, chunksize=1)\n results = []\n for result in results_iter:\n\n if COLUMN_ENERGY not in result:\n results[COLUMN_ENERGY] = np.float(\"nan\")\n\n # Update the progress bar\n if show_progress:\n pbar.update(1)\n\n results.append(result)\n\n except KeyboardInterrupt:\n misc.eprint(\"got ^C while running pool of XTB workers...\")\n p.terminate()\n\n except Exception as e:\n misc.eprint(\"got exception: %r, terminating the pool\" % (e,))\n p.terminate()\n\n finally:\n p.terminate()\n\n # End the progress\n if show_progress:\n pbar.close()\n\n # TODO Clean scr dir for parallel folders, is the parallel folders needed\n # if we use tempfile?\n\n return results", "def process(data, cluster_criteria, method = \"PP\", \\\n min_height = 0, pixel_size = 0, \\\n relax = 0, stop = 0, \\\n verbose = True, interactive = False,\n n_jobs = 1, nsteps = 1 ):\n\n#==============================================================================#\n \"\"\"\n Initial prep of key variables\n \"\"\"\n\n self = Acorns()\n start = time.time()\n\n # User input information\n self.cluster_criteria = cluster_criteria\n\n if np.size(relax) == 1:\n self.relax = relax if (relax != 0) else -1.0\n relaxcond = True if (relax != 0) else False\n else:\n self.relax = relax\n relaxcond = True\n\n if method == \"PP\":\n self.method = 0\n elif method == \"PPV\":\n self.method = 1\n elif method == \"PPP\":\n self.method = 2\n else:\n raise ValueError('method {0:s} unknown'.format(method))\n method = str(method)\n\n # Generate some important information:\n self.minnpix_cluster = get_minnpix(self, pixel_size, self.cluster_criteria[0])\n self.min_height = min_height\n self.max_dist = get_maxdist(self, pixel_size)\n self.cluster_criteria[0] = self.max_dist\n self.min_sep = 2.*self.cluster_criteria[0]\n self.nsteps = nsteps\n # Prime the acorns information:\n # cluster_arr will be updated with the indices of new clusters\n self.cluster_arr = gen_cluster_arr(self, data, stop)\n self.clusters = {}\n self.forest = {}\n\n#==============================================================================#\n \"\"\"\n Main controlling routine for acorns\n \"\"\"\n\n # Get the unassigned data array\n find_unassigned_data(self, data, stop)\n\n # Gen KDTree\n tree = generate_kdtree(self)\n\n # Generate the unassigned data array\n unassigned_array_length = len(self.unassigned_data[0,:])\n\n count= 0.0\n if verbose:\n progress_bar = print_to_terminal(self, 0, data, count, \\\n unassigned_array_length, method)\n\n # Cycle through the unassigned array\n starthierarchy = time.time()\n for i in range(0, unassigned_array_length):\n\n if verbose and (count % 1 == 0):\n progress_bar + 1\n progress_bar.show_progress()\n\n # Extract the current data point\n data_point = np.array(self.unassigned_data[:,i])\n # Retrieve this data point's location in the data array\n data_idx = get_data_index(self, data, data_point)\n self.cluster_arr[0,i] = int(data_idx)\n\n # Every data point begins as a new cluster\n self.cluster_idx = i\n bud_cluster = Cluster(data_point, data_idx, idx=self.cluster_idx, acorns=self)\n\n # Calculate distances between all data points\n link = get_links(self, i, i, tree, n_jobs)\n\n # Find clusters that are closely associated with the current data\n # point\n linked_clusters = find_linked_clusters(self, data, i, bud_cluster, link)\n\n if (self.method==1) & (len(linked_clusters) >= 1):\n linked_clusters = check_other_components(self, i, i, data_idx, data, linked_clusters, bud_cluster, tree, n_jobs, re=False)\n\n \"\"\"\n\n Notes\n -----\n\n Now try and merge this cluster with surrounding linked_clusters.\n From this point on there are three options for that data_point:\n\n 1. If no linked clusters are found - add the bud cluster to the\n cluster dictionary.\n 2. If a single linked cluster is found - merge the two.\n 3. If multiple linked clusters are found, check the validity of each\n cluster and either merge non-independent clusters or form a\n branch.\n\n This philosophy follows that of agglomerative hierarchical\n clustering techniques. The basic principle is discussed here:\n http://scikit-learn.org/stable/modules/clustering.html under\n \"2.3.6. Hierarchical Clustering\".\n\n A single link measure is used to connect clusters. The strategy is\n adapted from the general methods of:\n\n astrodendro:\n https://github.com/dendrograms/astrodendro\n Copyright (c) 2013 Thomas P. Robitaille, Chris Beaumont, Braden\n MacDonald, and Erik Rosolowsky\n quickclump:\n https://github.com/vojtech-sidorin/quickclump\n Copyright (c) 2016 Vojtech Sidorin\n\n When linking using the \"PPV\" methodology, single link measures may\n be insufficient and additional connectivity constraints are applied.\n Specifically - it is imposed that no two spectral features extracted\n from the same location can be merged into the same cluster.\n\n Additionally, an additional linking strategy is implemented which\n takes into account of the variance in the properties of the linked\n clusters (specifically those selected by the user). This is only\n implemented when trying to resolve ambiguities and is used as a way\n of establishing the \"strongest\" links when multiple spectral\n features have been detected.\n\n \"\"\"\n\n if not linked_clusters:\n add_to_cluster_dictionary(self, bud_cluster)\n elif len(linked_clusters) == 1:\n merge_into_cluster(self, data, linked_clusters[0], bud_cluster)\n else:\n resolve_ambiguity(self, data, linked_clusters, bud_cluster)\n\n if verbose:\n progress_bar.progress = 100\n progress_bar.show_progress()\n print('')\n print('')\n\n # Remove insignificant clusters from the clusters dictionary and update\n # the unassigned array\n cluster_list, cluster_indices = update_clusters(self, data)\n\n # Take a second pass at the data without relaxing the linking criteria\n # to pick up any remaining stragglers not linked during the first pass\n if (np.size(self.unassigned_data_updated)>1):\n cluster_list, cluster_indices = relax_steps(self, 0, data, method, verbose, tree, n_jobs, second_pass=True)\n endhierarchy = time.time()-starthierarchy\n\n#==============================================================================#\n \"\"\"\n Secondary controlling routine for acorns implemented if the linking\n criteria are relaxed by the user\n\n \"\"\"\n\n if relaxcond and (not interactive) and (np.size(self.unassigned_data_updated)>1):\n startrelax = time.time()\n inc = self.relax/self.nsteps\n cluster_criteria_original = cluster_criteria\n for j in range(1, self.nsteps+1):\n self.cluster_criteria = get_relaxed_cluster_criteria(j*inc, cluster_criteria_original)\n cluster_list, cluster_indices = relax_steps(self, j, data, method, verbose, tree, n_jobs, second_pass=False)\n endrelax = time.time()-startrelax\n\n elif interactive and (np.size(self.unassigned_data_updated)>1):\n startrelax = time.time()\n cluster_criteria_original = cluster_criteria\n #plotting.plot_scatter(self)\n stop = True\n while (not stop): #stop != False:\n self.relax = np.array(eval(input(\"Please enter relax values in list format: \")))\n print('')\n self.cluster_criteria = get_relaxed_cluster_criteria(self.relax, cluster_criteria_original)\n cluster_list, cluster_indices = relax_steps(self, j, data, method, verbose, tree, n_jobs, second_pass=False)\n #plotting.plot_scatter(self)\n s = str(input(\"Would you like to continue? \"))\n print('')\n stop = s in ['True', 'T', 'true', '1', 't', 'y', 'yes', 'Y', 'Yes']\n endrelax = time.time()-startrelax\n\n else:\n startrelax = time.time()\n endrelax = time.time()-startrelax\n\n#==============================================================================#\n \"\"\"\n Tidy everything up for output\n\n \"\"\"\n\n cluster_list, cluster_indices = update_clusters(self, data)\n io.reshape_cluster_array(self, data)\n get_forest(self, verbose)\n\n end = time.time()-start\n\n if verbose:\n print('acorns took {0:0.1f} seconds for completion.'.format(end))\n print('Primary clustering took {0:0.1f} seconds for completion.'.format(endhierarchy))\n if relaxcond==True:\n print('Secondary clustering took {0:0.1f} seconds for completion.'.format(endrelax))\n print('')\n print('acorns found a total of {0} clusters.'.format(len(self.clusters)))\n print('')\n print('A total of {0} data points were used in the search.'.format(len(self.unassigned_data[0,:])))\n print('A total of {0} data points were assigned to clusters.'.format(num_links(self)))\n if (np.size(self.unassigned_data_relax)>1):\n print('A total of {0} data points remain unassigned to clusters.'.format(len(self.unassigned_data_relax[0,:])))\n else:\n print('A total of 0 data points remain unassigned to clusters.')\n print('')\n\n io.housekeeping(self)\n\n return self", "def run_qae_optimization(training_states, n_repetitions, exact=no_noise, noisy=gate_error):\n result_list = []\n def proxy(params, training_states, n_repetitions, exact=no_noise, noisy=gate_error):\n \"\"\"Embedded function version\n \"\"\"\n input_list = fix_list(params, all_param_array=all_param, var_param_array=var_param, fixed_vals_array=fixed_vals)\n fidelities = []\n for training_state in training_states:\n fid = cusp_stage2.compute_stage2_cost_function(*input_list, alpha=training_state, n_repetitions=n_repetitions,\n exact=exact, noisy=noisy)\n fidelities.append(fid)\n avg_fid = np.mean(fidelities)\n result_list.append(1-avg_fid)\n print(1-avg_fid)\n return 1. - avg_fid\n\n \n # Initialize parameters\n half_turn_min = 0\n half_turn_max = 2\n init_params = np.random.uniform(low=half_turn_min, high=half_turn_max,\n size=num_param)\n\n # Optimization using Nelder-Mead.\n h2_qae_wrap = lambda params: proxy(params, training_states=training_states,\n n_repetitions=n_repetitions, exact=exact, noisy=noisy)\n \n if noisy:\n maxiter = 60\n else:\n maxiter = None\n \n res = minimize(h2_qae_wrap, init_params, args=(),\n method='Nelder-Mead', tol=None, \n options={'disp': False, 'maxiter': maxiter, 'xatol': 0.001,\n 'return_all': False, 'fatol': 0.001})\n np.savetxt('stage2_data.csv',result_list, delimiter=',')\n return res.x", "def _fit_multiple(self, X, y, configurations, bracket_num):\n device_used = self.device\n if device_used == 'cuda':\n device_used += f':{self.gpu_ids[bracket_num % self.n_device]}'\n list_toTrain_model = []\n best_config_by_round = []\n\n for i in tqdm(range(bracket_num + 1), desc=f'Bracket {bracket_num}', position=(self.max_rounds-bracket_num), leave=True):\n for contender in range(self.brackets[bracket_num][i]['ni']):\n self.brackets[bracket_num][i]['contenders'][contender] = dict.fromkeys([\n 'hparams', 'score'])\n self.brackets[bracket_num][i]['contenders'][contender]['hparams'] = configurations[contender]['hparams']\n model = self.create_model(\n self.estimator,\n random_state=self.random_state,\n epoch=self.brackets[bracket_num][i]['ri'],\n device=device_used,\n log_path=self.log_path,\n **configurations[contender]['hparams']\n )\n verbose = 0\n list_toTrain_model.append(\n (model, X, y, self.scoring, self.cv, self.n_jobs_cv, verbose))\n\n torch.multiprocessing.set_start_method('spawn', force=True)\n with MyPool(self.n_jobs_model) as p:\n list_toTrain_score = p.starmap(\n self.get_mean_cv_score, list_toTrain_model)\n\n for contender in range(self.brackets[bracket_num][i]['ni']):\n self.brackets[bracket_num][i]['contenders'][contender]['score'] = list_toTrain_score[contender]\n\n configurations = self.get_top_k(\n self.brackets[bracket_num][i]['contenders'],\n k=max(math.floor(\n self.brackets[bracket_num][i]['ni']/self.factor), 1)\n )\n\n best_config = configurations[0].copy()\n best_config_by_round.append({\n 'bracket': bracket_num,\n 'round': i,\n 'epoch': int(self.brackets[bracket_num][i]['ri']),\n **best_config\n })\n\n return best_config_by_round", "def train_q(n=1000):\n for i in range(50):\n p1_strategy = strategies.QStrategy('X')\n p2_strategy = strategies.QStrategy('O')\n p1 = player.Player('X', p1_strategy)\n p2 = player.Player('O', p2_strategy)\n board = tictactoe.Board()\n game = rl_game.Game(p1, p2, board)\n game.play_many(n)\n p1.strategy.save_q()\n p2.strategy.save_q()", "def run(self):\n i = 0\n try:\n for i in range(0, self._iters):\n if self._verbose:\n print(\" Inner CG Iteration \" + repr(i))\n\n self._forward(self._p_k, self._v_k)\n sigma_k = measure(self._p_k, self._v_k)\n alpha_k = self._rho_k / sigma_k\n update_m(self._m, alpha_k, self._p_k)\n sub_scaled_vector(self._residual_k, self._residual_k, alpha_k,\n self._v_k)\n self._v_k = gpuarray_copy(self._residual_k)\n rho_k_plus_1 = measure(self._v_k, self._residual_k)\n rho_k_t = np.abs(rho_k_plus_1)\n\n if (rho_k_t / self._rho_0 <= self._relative_tolerance) \\\n or (rho_k_t <= self._absolute_tolerance):\n if self._verbose:\n print(\"Converged at Iteration \" + str(i) + \".\")\n self.converged = True\n self.iteration = i+1\n return\n\n add_scaled_vector(self._p_k, self._v_k,\n rho_k_plus_1/self._rho_k,\n self._p_k)\n self._rho_k = rho_k_plus_1\n\n if self._verbose >= 3:\n print(\" Residual=\" + repr(rho_k_t))\n except KeyboardInterrupt:\n raise\n finally:\n self.iteration = i+1", "def _Train(self, limit):\n if len(self.Memory)>BATCH_SIZE: \n # Limit of Agents to Train\n for i in range(limit): \n # 'n' number of rounds to train \n for _ in range(50):\n # Get Batch Data\n experiances = self.Memory.sample()\n # Train Models\n self._Learn(self.Actor[i], self.ActorTarget, self.actorOpt[i], experiances)", "def threadsInBatches_run(l_threadAnalysis):\n index = 1\n if self.numThreads > total:\n self.numThreads = total\n threadFullLoops = int(total / self.numThreads)\n threadRem = total % self.numThreads\n alreadyRunCount = thread_batch(\n l_threadAnalysis,\n threadFullLoops,\n self.numThreads,\n 0)\n nextRunCount = thread_batch(\n l_threadAnalysis,\n 1,\n threadRem,\n alreadyRunCount)", "def map(initial_aerosol_pops, params):\n from parcel_model.parcel import ParcelModelError\n import time\n\n ## Pull model settings from params\n T0, S0, P0 = [params[s] for s in ('T0', 'S0', 'P0')]\n z_top, dt, max_steps = params['z_top'], params['dt'], params['max_steps']\n\n ## Helper method for re-submitting jobs which fail.\n def resubmit(ps, initial_aerosols, dt, max_steps):\n x = time.time()\n\n alpha, gamma, V = ps\n\n while dt >= 0.001:\n ## Try to run the model\n activation_results = RunParcelModels.simulation_pair(ps, initial_aerosols, V, T0, S0, P0, z_top, dt, max_steps)\n ## If it didn't work, report his and cut the timestep in half\n if not activation_results:\n print \"resubmitting %r with dt=%1.2e\" % (ps, dt/2.,)\n dt = dt/2.\n max_steps = int(max_steps*3.)\n ## If it did work, we're done\n else:\n break\n ## If we still don't have a good result after cutting dt several times,\n ## then report this.\n elapsed = time.time() - x\n if not activation_results:\n print \"FAILED (%1.2e seconds) %r\" % (elapsed, ps)\n else:\n print \"SUCCESS (%1.2e seconds) %r\" % (elapsed, ps)\n return activation_results\n\n results = []\n n, initial_aerosol_pops = initial_aerosol_pops\n n_runs = len(initial_aerosol_pops)\n for i, (initial_aerosols, ps) in enumerate(initial_aerosol_pops):\n print \"EXECUTING RUN %d/%d\" % (i+1, n_runs)\n component_results = {}\n\n ## INDIVIDUAL SPECIES\n param_fail = False\n for aerosol in initial_aerosols:\n species = aerosol.species\n\n #activation_results = RunParcelModels.simulation_pair(ps, [aerosol, ], V, T0, S0, P0, z_top, dt, max_steps)\n activation_results = resubmit(ps, [aerosol, ], dt, max_steps)\n\n if not activation_results:\n results.append((ps, None))\n param_fail = True\n break\n else:\n component_results[species] = activation_results\n\n if not param_fail:\n ## FULL MIXTURE\n #activation_results = RunParcelModels.simulation_pair(ps, initial_aerosols, V, T0, S0, P0, z_top, dt, max_steps)\n activation_results = resubmit(ps, initial_aerosols, dt, max_steps)\n\n if not activation_results:\n results.append((ps, None))\n continue\n\n component_results['mixture'] = activation_results\n results.append((ps, component_results))\n yield (n, results)", "def test_n_jobs(self):\n for n_jobs in [1, 6]:\n with self.subTest(input='list', n_jobs=n_jobs):\n self.assertEqual(get_n_chunks(self.test_data, iterable_len=None, chunk_size=None, n_splits=None,\n n_jobs=n_jobs), min(4 * n_jobs, len(self.test_data)))\n\n with self.subTest(input='numpy', n_jobs=n_jobs):\n self.assertEqual(get_n_chunks(self.test_data_numpy, iterable_len=None, chunk_size=None, n_splits=None,\n n_jobs=n_jobs), min(4 * n_jobs, len(self.test_data_numpy)))", "def parratt(\n q,\n layers,\n scale=1.0,\n bkg=0,\n threads=0,\n) -> np.array:\n qvals = np.asfarray(q)\n flatq = qvals.ravel()\n\n nlayers = layers.shape[0] - 2\n npnts = flatq.size\n\n kn = np.zeros((npnts, nlayers + 2), np.complex128)\n sld = np.zeros(nlayers + 2, np.complex128)\n\n # addition of TINY is to ensure the correct branch cut\n # in the complex sqrt calculation of kn.\n sld[1:] += (\n (layers[1:, 1] - layers[0, 1]) + 1j * (np.abs(layers[1:, 2]) + TINY)\n ) * 1.0e-6\n\n # calculate wavevector in each layer, for each Q point.\n # kn.shape = (npnts, nlayers)\n kn[:] = np.sqrt(flatq[:, np.newaxis] ** 2.0 / 4.0 - 4.0 * np.pi * sld)\n\n # reflectances for each layer\n # rj.shape = (npnts, nlayers + 1)\n rj = kn[:, :-1] - kn[:, 1:]\n rj /= kn[:, :-1] + kn[:, 1:]\n rj *= np.exp(-2.0 * kn[:, :-1] * kn[:, 1:] * layers[1:, 3] ** 2)\n\n beta = np.exp(\n -2.0\n * kn[:, 1 : nlayers + 1]\n * 1j\n * np.fabs(layers[1 : nlayers + 1, 0])\n )\n beta_rj = beta * rj[:, 0:nlayers]\n\n RRJ_1 = rj[:, -1]\n for idx in range(nlayers - 1, -1, -1):\n # RRJ = (rj[:, idx] + RRJ_1 * beta[:, idx]) / (1 + rj[:, idx] * RRJ_1 * beta[:, idx])\n RRJ = (rj[:, idx] + RRJ_1 * beta[:, idx]) / (\n 1 + RRJ_1 * beta_rj[:, idx]\n )\n RRJ_1 = RRJ\n\n reflectivity = RRJ_1 * np.conj(RRJ_1)\n reflectivity *= scale\n reflectivity += bkg\n return np.real(np.reshape(reflectivity, qvals.shape))", "def run_RL_sync(mapname,n_trials = int, seed = int,alpha = 0.15, beta = 0.2, tau = 5, gamma = 0.9, max_steps = 1000, reward_size = 100):\n\n # Softmax can't be from external file, because multiprocessing messes up the seed values\n np.random.seed(seed)\n def softmax_action(action_weights = [], tau = int):\n action_indices = list(range(len(action_weights)))\n f = np.exp((action_weights - np.max(action_weights))/tau) # shift values\n action_prob = f / f.sum(axis=0)\n action_index = np.random.choice(action_indices, 1, p=action_prob)\n return action_index[0]\n\n srate = 500 #sample rate\n \n total_time = int(1.5*srate) #total timesteps or \"time the agent gets to think about moving\"\n\n time0 = time.perf_counter()\n\n print(\"Running the RL model but with sync !\")\n srate = 500 #sample rate\n \n total_time = int(1.5*srate) #total timesteps or \"time the agent gets to think about moving\"\n\n # Learning Parameters\n parameters = {\"alpha\": alpha\n ,\"beta\": beta\n ,\"gamma\": gamma\n ,\"tau\": tau}\n n_steps = max_steps\n n_trials = n_trials\n \n sub_reward_size = 0 # no subgoals!\n # # # # # # # # # # # # # #\n # # Setting up the map # #\n # # # # # # # # # # # # # #\n \"\"\" The agent begins in a walled grid and has to find \n the goal to obtain a reward.\"\"\"\n # Grid #\n states = create_grid_from_file(map_file=mapname,goal_location = [10,3],reward_size=reward_size,sub_reward_size=sub_reward_size)\n state_set = list(range(int(states.shape[0]*states.shape[1]))) #index of states\n\n #set of actions\n move_name=[\"UP\", \"R-UP\", \"RIGHT\",\"R-DOWN\",\"DOWN\",\"L-DOWN\", \"LEFT\" ,\"LEFT-UP\"] \n moves = [[-1, 0],[-1, 1], [0, 1], [1, 1], [1, 0],[1, -1], [0, -1], [-1, -1]]\n action_set = list(range(len(moves))) #index list\n\n # # # # # # # # # # # # # # # # # # # # # # # #\n # # Setting up the synchronization modules # #\n # # # # # # # # # # # # # # # # # # # # # # # #\n\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n Processing module\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\n # Initial variables #\n\n r2_max = 1 #maximum amplitude of nodes\n drift = .8 #rate of drift between coupling parameters\n\n cg_1 = (30/srate)*np.pi #gamma band coupling parameter for input information\n cg_2 = cg_1 + (drift/srate)*2*np.pi #gamma band coupling parameter for actions\n \n damp = 0.3 #damping parameter\n decay = 0.9 #decay parameter\n noise = 0.5 #noise parameter\n\n # Initial matrices #\n\n n_states = len(state_set)\n n_actions= len(action_set)\n\n #Setting up phase code neurons across entire task\n S_Phase = np.zeros((2,states.shape[0],states.shape[1],total_time)) #State phase code units\n A_Phase = np.zeros((2,n_actions,total_time)) #Action phase code units\n\n #Setting up rate code neurons across entire task\n S_Rate = np.zeros((states.shape[0],states.shape[1],total_time)) #State rate code units\n A_Rate = np.zeros((n_actions,total_time)) #Action rate code units\n #State-Action Weight Matrix\n W = np.zeros((states.shape[0],states.shape[1],n_actions))#*0.001 #initial state-action weights\n V = np.zeros((states.shape[0],states.shape[1]))#*0.001 #initial state weights\n\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n Control module\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\n # MFC #\n # Initial variables \n r2_MFC = 0.7 #maximum amplitude MFC node\n damp_MFC = 0.03 # damping parameter MFC\n acc_slope = 10 # MFC slope parameter ---> steepness of burst probability distribution\n ct = (5/srate)*2*np.pi #theta band coupling parameter for MFC\n\n #Setting up phase code nodes for the MFC\n MFC = np.zeros((2,total_time))\n #Setting up phase code neuron for MFC -> Bernoulli rate code\n Be = 0 \n \"\"\"When the be value as the rate code of MFC\n reaches certain threshold the MFC will send a burst to coupled neurons\"\"\"\n\n # LFC #\n #Module indicating which states should be initiate action-state synchronization\n LFC = np.zeros((states.shape[0],states.shape[1],n_steps))\n\n #Module that gives the right indices to synchronize\n LFC_sync = 0\n\n\n\n # # # # # # # # # # # # # #\n # # Simulation # #\n # # # # # # # # # # # # # #\n\n # Logging dependent variables\n Hit = np.zeros((total_time,n_steps,n_trials)) #log when there is a burst from the MFC\n # Goal_reach = np.zeros((n_steps,n_trials)) #record if goal is reached \n # Move = np.zeros((n_steps,n_trials)) #record move\n # Bernoulli = np.zeros((total_time,n_steps,n_trials)) #Logging the bernoulli process variables (should be in between -.8 and .8)\n # pred_err = np.zeros((states.shape[0],states.shape[1],n_steps,n_trials)) #logging the prediction error\n trial_length = np.zeros((n_trials))\n\n # Recording sync\n sync = np.zeros((n_states,n_actions,n_steps,n_trials)) \n\n \"\"\" L O O P \"\"\"\n\n exploration = 0\n exploration_intent =0\n sync_fail=0\n greedy=0\n for trial in range(n_trials):\n \"\"\"A trial is considered as each journey the actor makes until the goal\n or until it runs out of steps\"\"\"\n at_goal = False\n start_loc = [1,int(states.shape[1]-2)] #start in the top left\n step = 0 \n S_Phase[:,:,:,0] = (2*np.random.random_sample((2,states.shape[0],states.shape[1])))-1 # random starting points processing module\n A_Phase[:,:,0] = (2*np.random.random_sample((2,n_actions)))-1 # idem\n while not at_goal:\n #starting location at first trial\n if step == 0:\n current_loc = start_loc\n else:\n S_Phase[:,:,:,0] = S_Phase[:,:,:,total_time-1] # random starting points processing module\n A_Phase[:,:,0] = A_Phase[:,:,total_time-1] # idem\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n Synchronization\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\n \n #phase reset\n MFC[:,0]=np.ones((2))*r2_MFC \n\n\n # LFC setting instruction per step: each state is an input\n LFC[current_loc[0],current_loc[1],step] = 1\n\n # What we want is the lfc to indicate the state and then have the LFC sync pro actively select an action based on state action value maps\n \n action_to_sync = softmax_action(action_weights=W[current_loc[0],current_loc[1],:],tau=10)\n if action_to_sync in np.where(W[current_loc[0],current_loc[1],:]== max(W[current_loc[0],current_loc[1],:]))[0]:\n greedy+=0\n else:\n exploration_intent+=1\n \n \n #Which action does LFC sync to current state\n LFC_sync = int(action_to_sync)\n LFC_desync = list(range(len(moves)))\n LFC_desync.pop(LFC_sync) \n\n # The actor makes the move #\n for t in range(total_time-1):\n\n \n #Update phase code neurons for actions and states in processing module\n #State phase code neurons \n S_Phase[:,:,:,t+1] = update_phase(nodes=S_Phase[:,:,:,t], grid = True, radius=r2_max, damp = damp, coupling = cg_1,multiple=True )\n \n #Action phase code neurons\n A_Phase[:,:,t+1] = update_phase(nodes=A_Phase[:,:,t], grid = False, radius=r2_max, damp = damp, coupling = cg_2,multiple=True )\n\n #Update phase code untis of MFC\n MFC[:,t+1] = update_phase(nodes=MFC[:,t], grid = False, radius=r2_MFC, damp=damp_MFC, coupling=ct,multiple=False)\n\n #MFC rate code neuron-> Bernoulli process\n\n Be = 1/(1 + np.exp(-acc_slope*(MFC[0,t]-1))) # Bernoulli process \n #Bernoulli[time,step,trial] = Be # logging Be value\n\n p = random.random()\n\n if p < Be:\n\n Gaussian = np.random.normal(size = [1,2]) #noise factor as normal distribution\n #Hit[tijd,step,trial] = 1\n \n \n x, y = current_loc[1], current_loc[0]\n\n #the LFC decides which state is paired with which actions\n\n if LFC[y,x,step]:\n #The state the actor is in receives a burst because it is the only input\n S_Phase[:,y,x,t+1] = decay*S_Phase[:,y,x,t] + Gaussian\n\n # and all the actions that are to be synchronized to that state receive a burst\n if type(LFC_sync) is int:\n A_Phase[:,LFC_sync,t+1] = decay*A_Phase[:,LFC_sync,t] + Gaussian\n \n # Desynchronize all other actions !\n for node in LFC_desync:\n A_Phase[:,int(node),t+1] = decay*A_Phase[:,int(node),t] - Gaussian*noise\n\n #Updating rate code units\n #Only the rate code neuron of a single state is updated because the actor can only be in one place at the same time\n S_Rate[current_loc[0],current_loc[1],t]= (1/(1+np.exp(-5*S_Phase[0,current_loc[0],current_loc[1],t]-0.6)))\n A_Rate[:,t]=(S_Rate[current_loc[0],current_loc[1],t]*(W[current_loc[0],current_loc[1],:]+1))*(1/(1+np.exp(-5*A_Phase[0,:,t]-0.6)))\n #A_Rate[:,t]=(S_Rate[current_loc[0],current_loc[1],t])*(1/(1+np.exp(-5*A_Phase[0,:,t]-0.6)))\n \n # select action\n action_index = int(np.argmax(np.sum(A_Rate[:,:],axis=1)))\n if action_index in np.where(W[current_loc[0],current_loc[1],:] == max(W[current_loc[0],current_loc[1],:]))[0]:\n greedy+=1\n else:\n exploration+=1\n\n if action_index != LFC_sync:\n sync_fail+=1\n \n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n Learning\n \"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\n #update location\n new_loc= update_location(grid = states, loc=current_loc,move = moves[action_index])\n\n #log coordinates for weight matrices\n coordinates = [current_loc[0], current_loc[1], new_loc[0], new_loc[1], action_index] #location coordinates\n\n #update weights according to TD-learning\n V, W, delta, at_goal = update_weights(param=parameters, index=coordinates, V=V, W=W, states=states, reward_size = reward_size)\n\n\n #update_location\n current_loc = new_loc\n step+=1\n if step ==n_steps:\n #print(\"Agent did not reach goal\")\n break\n \n trial_length[trial] = step \n \n print(\"I took {0} exploratory steps and {1} greedy steps this simulation\".format(exploration,greedy))\n print(\"I intended to explore {} times\".format(exploration_intent))\n print(\"Sync of correct action failed {} times\".format(sync_fail))\n print(\"In this sim I took a total {} steps\".format(np.sum(trial_length)))\n \n time1 = time.perf_counter()\n print(\"For the second model I took {} minutes\".format((time1-time0)/60))\n return trial_length, V", "def backend_train_test_loop(e=None, timeout=30,\n is_compute_contributivity='True',\n is_parallelize=''):\n if is_parallelize == '':\n is_parallelize = None\n else:\n is_parallelize = strtobool(is_parallelize)\n\n from databoard.db_tools import backend_train_test_loop\n is_compute_contributivity = strtobool(is_compute_contributivity)\n backend_train_test_loop(\n e, timeout, is_compute_contributivity, is_parallelize)", "def _optimization_loop(self, iteration=0):\n self.logger.print_optimization_header()\n\n while iteration < self.iterations:\n try:\n self._execute_experiment()\n except RepeatedExperimentError:\n # G.debug_(F'Skipping repeated Experiment: {_ex!s}\\n')\n if len(self.similar_experiments) + len(self.tested_keys) >= self.search_space_size:\n G.log_(f\"Hyperparameter search space has been exhausted\")\n break\n self.skipped_iterations += 1\n continue\n except StopIteration:\n if len(self.similar_experiments) + len(self.tested_keys) >= self.search_space_size:\n G.log_(f\"Hyperparameter search space has been exhausted\")\n break\n # G.debug_(f'Re-initializing hyperparameter grid after testing {len(self.tested_keys)} keys')\n self._set_hyperparameter_space()\n continue\n\n self.logger.print_result(\n self.current_hyperparameters_list,\n self.current_score,\n experiment_id=self.current_experiment.experiment_id,\n )\n\n if (\n (self.best_experiment is None) # First evaluation\n or (self.do_maximize and (self.best_score < self.current_score)) # New best max\n or (not self.do_maximize and (self.best_score > self.current_score)) # New best min\n ):\n self.best_experiment = self.current_experiment.experiment_id\n self.best_score = self.current_score\n\n iteration += 1", "def run_grid_experiment(self):\n # Ask for confirmation - optional.\n if self.flags.confirm:\n input('Press any key to continue')\n\n # Check max number of child processes. \n if self.max_concurrent_runs <= 0: # We need at least one proces!\n max_processes = len(os.sched_getaffinity(0))\n else: \n # Take into account the minimum value.\n max_processes = min(len(os.sched_getaffinity(0)), self.max_concurrent_runs)\n self.logger.info('Spanning experiments using {} CPU(s) concurrently.'.format(max_processes))\n\n # Run in as many threads as there are CPUs available to the script.\n with ThreadPool(processes=max_processes) as pool:\n func = partial(GridTesterCPU.run_experiment, self, prefix=\"\")\n pool.map(func, self.experiments_list)\n\n self.logger.info('Grid test experiments finished.')", "def paralll_worker(rank, size,\n target_function=None,\n batch=None,\n fixed_args=None,\n output_queue=None):\n for input in batch:\n print(\"This is process {} out of {} operating on {}\".format(rank, size, input))\n result = target_function(*input, *fixed_args)\n if output_queue is not None:\n output_queue.put((input, result))", "def hyper_parameter_test(elements, args):\n\n greedy_factors = np.linspace(\n args.greedy_start, args.greedy_end, args.greedy_num_samples\n )\n k_values = np.arange(args.k_start - 1, args.k_end, args.k_step) + 1\n k_values = [int(k) for k in k_values]\n if args.gc_prune_test:\n gc_prune = [True, False]\n else:\n gc_prune = [True]\n if args.forest:\n forest = [True, False]\n else:\n forest = [False]\n \n all_runs = {}\n factors = [p for p in product(forest, greedy_factors, k_values, gc_prune)]\n for factor in factors:\n all_runs[factor] = []\n all_signatures_used = []\n splitter = RepeatedKFold(args.n_split, args.n_repeat, random_state = args.random_seed)\n i = 0\n for tree_indexes, search_indexes in splitter.split(elements):\n print(\"current run number:\", i)\n i+=1\n tree_elems = elements[tree_indexes]\n search_elems = elements[search_indexes]\n \n if args.forest:\n forest = VPForest(\n tree_elems, random=args.random_vp, max_leaf_size=args.leaf_size\n )\n tree = VPTree(tree_elems, random=args.random_vp, max_leaf_size=args.leaf_size)\n tree_elem_names = [elem.identifier for elem in tree_elems]\n search_elem_names = [elem.identifier for elem in search_elems]\n all_signatures_used.append((tree_elem_names, search_elem_names))\n start = time.time()\n for factor in factors:\n if factor[0]:\n run_NNS = one_nn_search_run(forest, search_elems, factor, args.parallel)\n else:\n run_NNS = one_nn_search_run(tree, search_elems, factor, args.parallel)\n all_runs[factor].append(run_NNS)\n\n print(\"search time:\", time.time()-start)\n data = NNData(all_runs, all_signatures_used, factors)\n with open(args.o, \"wb\") as f:\n pickle.dump(data, f)", "def test_run():\n # Only few steps for test\n timesteps = 128\n\n # Compute all sub testing conf\n envs = ['CartPole-v0']\n ml_platforms = ['torch', 'tf']\n agents = ['dqn', 'a2c']\n\n test_combinations = list(it.product(\n envs,\n ml_platforms,\n agents\n )\n )\n\n # Finally test them all\n for conf in test_combinations:\n env_str, ml_platform_str, agent_str = conf\n run(\n agent_str,\n ml_platform_str,\n env_str,\n 'dense',\n timesteps,\n './target/')", "def rkStep(ebitParams, mySpecies, species, tstep, populationAtT0, populationAtTtstep):\r\n # longer function param calls yes but it speeds it up calculateK by 10%...\r\n # print(\"\\nRunning an RK step... \")\r\n\r\n # mySpecies.k1, mySpecies.r1 = calculateKR(ebitParams, mySpecies, species, mySpecies.tmpPop, mySpecies.Z, mySpecies.ionizationRates, mySpecies.chargeExchangeRates, mySpecies.rrRates, mySpecies.k1, mySpecies.r1, populationAtT0, mySpecies.tmpPop, 0.0, tstep)\r\n # mySpecies.k2, mySpecies.r2 = calculateKR(ebitParams, mySpecies, species, mySpecies.tmpPop, mySpecies.Z, mySpecies.ionizationRates, mySpecies.chargeExchangeRates, mySpecies.rrRates, mySpecies.k2, mySpecies.r2, populationAtT0, mySpecies.k1, 0.5, tstep)\r\n # mySpecies.k3, mySpecies.r3 = calculateKR(ebitParams, mySpecies, species, mySpecies.tmpPop, mySpecies.Z, mySpecies.ionizationRates, mySpecies.chargeExchangeRates, mySpecies.rrRates, mySpecies.k3, mySpecies.r3, populationAtT0, mySpecies.k2, 0.5, tstep)\r\n # mySpecies.k4, mySpecies.r4 = calculateKR(ebitParams, mySpecies, species, mySpecies.tmpPop, mySpecies.Z, mySpecies.ionizationRates, mySpecies.chargeExchangeRates, mySpecies.rrRates, mySpecies.k4, mySpecies.r4, populationAtT0, mySpecies.k3, 1.0, tstep)\r\n \r\n mySpecies.k1 = calculateK(ebitParams, mySpecies, species, mySpecies.tmpPop, mySpecies.Z, mySpecies.ionizationRates, mySpecies.chargeExchangeRates, mySpecies.rrRates, mySpecies.k1, populationAtT0, mySpecies.tmpPop, 0.0, tstep)\r\n mySpecies.k2 = calculateK(ebitParams, mySpecies, species, mySpecies.tmpPop, mySpecies.Z, mySpecies.ionizationRates, mySpecies.chargeExchangeRates, mySpecies.rrRates, mySpecies.k2, populationAtT0, mySpecies.k1, 0.5, tstep)\r\n mySpecies.k3 = calculateK(ebitParams, mySpecies, species, mySpecies.tmpPop, mySpecies.Z, mySpecies.ionizationRates, mySpecies.chargeExchangeRates, mySpecies.rrRates, mySpecies.k3, populationAtT0, mySpecies.k2, 0.5, tstep)\r\n mySpecies.k4 = calculateK(ebitParams, mySpecies, species, mySpecies.tmpPop, mySpecies.Z, mySpecies.ionizationRates, mySpecies.chargeExchangeRates, mySpecies.rrRates, mySpecies.k4, populationAtT0, mySpecies.k3, 1.0, tstep)\r\n \r\n\r\n # print(\"k values for q=1:\")\r\n # print(\"k1 %s\"%mySpecies.k1[1])\r\n # print(\"k2 %s\"%mySpecies.k2[1])\r\n # print(\"k3 %s\"%mySpecies.k3[1])\r\n # print(\"k4 %s\"%mySpecies.k4[1])\r\n\r\n # Updates the population of each charge state in the species.\r\n for qindex in range(0, mySpecies.Z + 1):\r\n # new energy value = ( kT(q-1)(pop gained by q-1) - kT(q)(lost by q) + kT(q+1)(gained by q+1) ) / total change in population \r\n # populationAtTtstep[qindex] = populationAtT0[qindex] + ((1 / 6) * (sum(mySpecies.r1[qindex]) + (2 * sum(mySpecies.r2[qindex]) + sum(mySpecies.r3[qindex]) ) + sum(mySpecies.r4[qindex]) ))\r\n populationAtTtstep[qindex] = populationAtT0[qindex] + ((1 / 6) * (mySpecies.k1[qindex] + (2 * (mySpecies.k2[qindex] + mySpecies.k3[qindex])) + mySpecies.k4[qindex]) )\r\n\r\n # New calculation of time stepped energy\r\n # deltaPop = [loss by q(i) from EI, gain by q(i) from CX or RR]\r\n # for q in range(0, mySpecies.Z+1):\r\n # deltaPop = [(mySpecies.r1[q][i] + (2 * (mySpecies.r2[q][i] + mySpecies.r3[q][i])) + mySpecies.r4[q][i])/6 for i in range(0,2)]\r\n # # print(\"DeltaPop for q=%s\"%q+\": %s\"%deltaPop)\r\n # if q==0:\r\n # try:\r\n # #this one is with gain only...\r\n # energyAtTtstep[q] = (energyAtT0[q]*(populationAtT0[q]) + energyAtT0[q+1]*deltaPop[1]) / (populationAtT0[q]+deltaPop[1])\r\n # #this one is with gain and loss... caused problems\r\n # # energyAtTtstep[q] = (energyAtT0[q]*(populationAtT0[q]-deltaPop[0]) + energyAtT0[q+1]*deltaPop[1]) / (populationAtTtstep[q])\r\n # except ZeroDivisionError:\r\n # energyAtTtstep[q] = energyAtT0[q]\r\n # elif q==mySpecies.Z:\r\n # lowerQ = [(mySpecies.r1[q-1][i] + (2 * (mySpecies.r2[q-1][i] + mySpecies.r3[q-1][i])) + mySpecies.r4[q-1][i])/6 for i in range(0,2)]\r\n # try:\r\n # #gain only\r\n # energyAtTtstep[q] = (energyAtT0[q]*(populationAtT0[q]) + energyAtT0[q-1]*lowerQ[0]) / (populationAtT0[q]+deltaPop[1]+lowerQ[0])\r\n # # gain and loss\r\n # # energyAtTtstep[q] = (energyAtT0[q]*(populationAtT0[q]-deltaPop[0]) + energyAtT0[q-1]*lowerQ[0]) / (populationAtTtstep[q])\r\n # except ZeroDivisionError:\r\n # energyAtTtstep[q] = energyAtT0[q]\r\n # else:\r\n # lowerQ = [(mySpecies.r1[q-1][i] + (2 * (mySpecies.r2[q-1][i] + mySpecies.r3[q-1][i])) + mySpecies.r4[q-1][i])/6 for i in range(0,2)]\r\n # # print(\"lowerQ: %s\"%lowerQ)\r\n # try:\r\n # #gain\r\n # # print(\"energyAtT0[q-1] = %s\"%energyAtT0[q-1] + \", lowerQ[0] = %s\"%lowerQ[0]+\", populationAtT0[q]=%s\"%populationAtT0[q]+\", deltaPop[1]=%s\"%deltaPop[1])\r\n # energyAtTtstep[q] = (energyAtT0[q]*(populationAtT0[q]) + energyAtT0[q-1]*lowerQ[0] + energyAtT0[q+1]*deltaPop[1]) / (populationAtT0[q]+deltaPop[1]+lowerQ[0])\r\n # #gain and loss\r\n # # energyAtTtstep[q] = (energyAtT0[q]*(populationAtT0[q]-deltaPop[0]) + energyAtT0[q-1]*lowerQ[0] + energyAtT0[q+1]*deltaPop[1]) / (populationAtTtstep[q])\r\n # except ZeroDivisionError:\r\n # energyAtTtstep[q] = energyAtT0[q]\r\n\r\n \r\n # print(\"Initial pop: %s\"%populationAtT0 + \",\\nfinal pop: %s\"%populationAtTtstep)\r\n # print(\"Initial temp: %s\"%energyAtT0 + \",\\nfinal temp: %s\"%energyAtTtstep)\r\n return", "def run_in_parallel(self):\n\t\tfor p in self.parallel_threads:\n\t\t\tp.start()\n\t\tfor p in self.parallel_threads:\n\t\t\tp.join()", "def grid_visibilities_parallel(self, visibilities,min_attenuation = 1e-10, N = 120):\n\n #Find out the number of frequencies to process per thread\n nfreq = len(self.frequencies)\n numperthread = int(np.ceil(nfreq/self.n_obs))\n offset = 0\n nfreqstart = np.zeros(self.n_obs,dtype=int)\n nfreqend = np.zeros(self.n_obs,dtype=int)\n infreq = np.zeros(self.n_obs,dtype=int)\n for i in range(self.n_obs):\n nfreqstart[i] = offset\n nfreqend[i] = offset + numperthread\n\n if(i==self.n_obs-1):\n infreq[i] = nfreq - offset\n else:\n infreq[i] = numperthread\n\n offset+=numperthread\n\n # Set the last process to the number of frequencies\n nfreqend[-1] = nfreq\n\n processes = []\n\n ugrid = np.linspace(-self.uv_max, self.uv_max, self.n_uv +1 ) # +1 because these are bin edges.\n \n centres = (ugrid[1:] + ugrid[:-1]) / 2\n \n visgrid = np.zeros((self.n_uv, self.n_uv, len(self.frequencies)), dtype=np.complex128)\n\n\n if(os.path.exists(self.datafile[0][:-4]+\".kernel_weights.npy\")):\n kernel_weights = np.load(self.datafile[0][:-4]+\".kernel_weights.npy\")\n else:\n kernel_weights=None\n \n if kernel_weights is None:\n weights = np.zeros((self.n_uv, self.n_uv, len(self.frequencies)))\n\n visgrid_buff_real = []\n visgrid_buff_imag = []\n weights_buff = []\n\n #Lets split this array up into chunks\n for i in range(self.n_obs):\n\n visgrid_buff_real.append(multiprocessing.RawArray(np.sctype2char(visgrid.real),visgrid[:,:,nfreqstart[i]:nfreqend[i]].size))\n visgrid_buff_imag.append(multiprocessing.RawArray(np.sctype2char(visgrid.imag),visgrid[:,:,nfreqstart[i]:nfreqend[i]].size))\n visgrid_tmp_real = np.frombuffer(visgrid_buff_real[i])\n visgrid_tmp_imag = np.frombuffer(visgrid_buff_imag[i])\n visgrid_tmp_real = visgrid[:,:,nfreqstart[i]:nfreqend[i]].real.flatten()\n visgrid_tmp_imag = visgrid[:,:,nfreqstart[i]:nfreqend[i]].imag.flatten()\n\n\n if(kernel_weights is None):\n weights_buff.append(multiprocessing.RawArray(np.sctype2char(weights),weights[:,:,nfreqstart[i]:nfreqend[i]].size))\n weights_tmp = np.frombuffer(weights_buff[i])\n weights_tmp = weights[:,:,nfreqstart[i]:nfreqend[i]]\n else:\n weights_buff.append(None)\n\n processes.append(multiprocessing.Process(target=self._grid_visibilities_buff,args=(self.n_uv,visgrid_buff_real[i],visgrid_buff_imag[i],weights_buff[i], visibilities[:,nfreqstart[i]:nfreqend[i]],self.frequencies[nfreqstart[i]:nfreqend[i]],self.baselines,centres,self._instr_core.sigma(self.frequencies[nfreqstart[i]:nfreqend[i]]),min_attenuation, N) ))\n\n for p in processes:\n p.start()\n\n for p in processes:\n p.join()\n\n for i in range(self.n_obs):\n\n visgrid[:,:,nfreqstart[i]:nfreqend[i]].real = np.frombuffer(visgrid_buff_real[i]).reshape(self.n_uv,self.n_uv,nfreqend[i]-nfreqstart[i])\n visgrid[:,:,nfreqstart[i]:nfreqend[i]].imag = np.frombuffer(visgrid_buff_imag[i]).reshape(self.n_uv,self.n_uv,nfreqend[i]-nfreqstart[i])\n\n if(kernel_weights is None):\n weights[:,:,nfreqstart[i]:nfreqend[i]] = np.frombuffer(weights_buff[i]).reshape(self.n_uv,self.n_uv,nfreqend[i]-nfreqstart[i])\n\n if kernel_weights is None:\n kernel_weights = weights\n \n visgrid[kernel_weights!=0] /= kernel_weights[kernel_weights!=0]\n\n return visgrid,kernel_weights", "def run(data, params):\n start_time = time.process_time()\n\n # 'n' is the number of candidates, also the number of ranks\n n = params['n']\n # 'N' is the total number of voters\n N = params['N']\n # 's0' is the optional ground truth full ranking of the candidates\n # (distribution is drawn off this full ranking)\n s0 = params['s0']\n\n # Order candidates by non-decreasing pair-wise contest wins \n # (ascending order with lexicographic tie-breaking)\n precedenceMatrix = utils.precedenceMatrix(data, n)\n\n # Credits to Sayan-Paul for starter code for merge sort\n # See: https://github.com/Sayan-Paul/Sort-Library-in-Python/blob/master/sortlib.py\n def mergesort(ar):\n if len(ar)<=1:\n return ar\n middle=len(ar)/2\n left =ar[:middle]\n right=ar[middle:]\n left=mergesort(left)\n right=mergesort(right)\n res=merge(left,right)\n return res\n\n def merge(left,right):\n res=[]\n while len(left)+len(right):\n if len(left)*len(right):\n if precedenceMatrix[left[0],right[0]]<=precedenceMatrix[right[0],left[0]]:\n res.append(left[0])\n left=left[1:]\n else:\n res.append(right[0])\n right=right[1:]\n elif len(left):\n res.append(left[0])\n left=left[1:]\n elif len(right):\n res.append(right[0])\n right=right[1:]\n return res\n\n candidates = [i for i in range(n)]\n sortedCandidates = mergesort(candidates)\n\n sigma = tuple(sortedCandidates)\n\n time_elapsed = (time.process_time() - start_time) * 1000\n\n return ALGORITHM_NAME, utils.generalizedKendallTauDistance(data, sigma, n, N, s0), time_elapsed, sigma", "def sim_run(ini_resource=0.0002, ini_density=(1e4, 1e4), min_size=(1.5e1, 1.5e4), max_size=(2.5e1, 2.5e4),\n spp_names=('Aa', 'Bb'), dilution_rate=0.0, volume=1.0, nsi_spp=(500, 500), nsi_min=200,\n nsi_max=2000, num_sc=(100, 100), time_end=30, time_step=1 / 24, print_time_step=1,\n n_procs=2, n_threads=1, mem_lim=2e9):\n cluster = LocalCluster(n_workers=n_procs, threads_per_worker=n_threads, memory_limit=mem_lim)\n client = Client(cluster)\n\n sbm_out = []\n sbmc = dask.delayed(SBMc)(ini_resource=ini_resource, ini_density=ini_density, min_size=min_size, max_size=max_size,\n spp_names=spp_names, num_sc=num_sc, time_end=time_end,\n dilution_rate=dilution_rate, volume=volume)\n sbmi = dask.delayed(SBMi)(ini_resource=ini_resource, ini_density=ini_density, min_size=min_size, max_size=max_size,\n spp_names=spp_names, nsi_spp=nsi_spp, nsi_min=nsi_min, nsi_max=nsi_max, volume=volume,\n time_step=time_step, time_end=time_end, print_time_step=print_time_step,\n dilution_rate=dilution_rate)\n sbm_out.append(sbmc)\n sbm_out.append(sbmi)\n\n with ProgressBar(), dask.config.set(scheduler='processes'):\n output = dask.compute(sbm_out)\n\n client.close()\n cluster.close()\n return output", "def cache_thetas_multicore(eta, povs, n_cores, start=0, stop=(len(adjusted_adj_list_4.keys())-1)):\n import os\n import multiprocessing\n processes=[]\n for x in range(n_cores):\n theta_lists = adjusted_adj_list_4.keys()[start:stop]\n theta_lists = np.array_split(theta_lists, n_cores)\n p = multiprocessing.Process(target=cache_thetas,\n args = [eta, povs,theta_lists[x][0],\n theta_lists[x][-1]])\n p.start()\n processes.append(p)\n for p in processes:\n p.join()", "def run_and_evaluate():\n tsp_problems = read_all_problems()\n # Empty list of metrics\n results = []\n for problem in tqdm.tqdm(tsp_problems):\n # As random factors are involved repeat experiments a couple of times\n best_routes_base = []\n best_routes_af = []\n best_routes_ms = []\n base_times = []\n af_times = []\n ms_times = []\n for i in range(10):\n # Base solution\n start_time = timeit.default_timer()\n best_route_base = solve_tsp_basic(problem)\n base_time = timeit.default_timer() - start_time\n best_routes_base.append(Fitness(route=best_route_base).route_distance())\n base_times.append(base_time)\n\n # AF clustering solution\n start_time = timeit.default_timer()\n best_route_af = solve_tsp_affinity_propagation(problem)\n af_time = timeit.default_timer() - start_time\n best_routes_af.append(Fitness(route=best_route_af).route_distance())\n af_times.append(af_time)\n\n # MS solution\n start_time = timeit.default_timer()\n best_route_ms = solve_mean_shift(problem)\n ms_time = timeit.default_timer() - start_time\n best_routes_ms.append(Fitness(route=best_route_ms).route_distance())\n ms_times.append(ms_time)\n\n results.append(\n {\n \"problem name\": problem.name,\n \"optimal solution\": find_route_optimal_route_length(problem),\n \"baseline tour length\": mean(best_routes_base),\n \"af clustering tour length\": mean(best_routes_af),\n \"ms clustering tour length\": mean(best_routes_ms),\n \"baseline algorithm time\": mean(base_times),\n \"af clustering algorithm time\": mean(af_times),\n \"ms clustering algorithm time\": mean(ms_times),\n }\n )\n # Create dataframe and safe results\n df = pd.DataFrame(results)\n df.to_csv(\"results.csv\", index=False)\n return df", "def multi_run(replications: int, iters: List, n: int):\n global call_count\n kwargs = {\n # 'alpha': 0.75,\n # 'rho': 'VaR',\n 'alpha': 0.75,\n 'rho': 'CVaR',\n 'x0': 2,\n 'n0': n,\n 'mu_1': -15,\n 'mu_2': 10,\n 'sigma_1': 4,\n 'sigma_2': 2\n }\n\n out_dict = {\n 'SA': dict(),\n 'SA_SAA': dict(),\n 'NM': dict(),\n 'NM_SAA': dict(),\n 'LBFGS': dict(),\n 'LBFGS_SAA': dict(),\n 'EI': dict(),\n 'EI_SAA': dict()\n }\n total_calls = dict()\n for key in out_dict.keys():\n total_calls[key] = dict()\n for it_count in iters:\n kwargs['iter_count'] = it_count\n for key in out_dict.keys():\n out_dict[key][it_count] = dict()\n total_calls[key][it_count] = 0\n i = 0\n while i < replications:\n try:\n out_dict['SA'][it_count][i] = SA_run(seed=i, **kwargs)\n total_calls['SA'][it_count] += call_count\n call_count = 0\n out_dict['SA_SAA'][it_count][i] = SA_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['SA_SAA'][it_count] += call_count\n call_count = 0\n out_dict['NM'][it_count][i] = NM_run(seed=i, **kwargs)\n total_calls['NM'][it_count] += call_count\n call_count = 0\n out_dict['NM_SAA'][it_count][i] = NM_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['NM_SAA'][it_count] += call_count\n call_count = 0\n out_dict['LBFGS'][it_count][i] = LBFGS_run(seed=i, **kwargs)\n total_calls['LBFGS'][it_count] += call_count\n call_count = 0\n out_dict['LBFGS_SAA'][it_count][i] = LBFGS_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['LBFGS_SAA'][it_count] += call_count\n call_count = 0\n out_dict['EI'][it_count][i] = EI_run(seed=i, **kwargs)\n total_calls['EI'][it_count] += call_count\n call_count = 0\n out_dict['EI_SAA'][it_count][i] = EI_run(seed=i, **kwargs, SAA_seed=i)\n total_calls['EI_SAA'][it_count] += call_count\n call_count = 0\n i += 1\n except:\n continue\n np.save('call_counts_cvar_%d.npy' % n, total_calls)\n evaluate(out_dict, n)", "def test_parallel_kwargs():\r\n lst = range(10)\r\n for n_jobs in (1, 4):\r\n yield (nose.tools.assert_equal,\r\n [f(x, y=1) for x in lst],\r\n Parallel(n_jobs=n_jobs)(delayed(f)(x, y=1) for x in lst)\r\n )", "def zoo_loop(sgf_dir=None, max_jobs=40):\n desired_pairs = restore_pairs() or []\n random.shuffle(desired_pairs)\n last_model_queued = restore_last_model()\n\n if sgf_dir:\n sgf_dir = os.path.abspath(sgf_dir)\n\n api_instance = get_api()\n toggle = True\n try:\n while True:\n last_model = fsdb.get_latest_pb()[0]\n if last_model_queued < last_model:\n print(\"Adding models {} to {} to be scheduled\".format(\n last_model_queued+1, last_model))\n for m in reversed(range(last_model_queued+1, last_model+1)):\n desired_pairs += make_pairs_for_model(m)\n last_model_queued = last_model\n save_last_model(last_model)\n\n cleanup(api_instance)\n random.shuffle(desired_pairs)\n r = api_instance.list_job_for_all_namespaces()\n if r.items:\n tasks = sum([item.spec.completions for item in r.items])\n else:\n tasks = 0\n if tasks < MAX_TASKS:\n if len(desired_pairs) == 0:\n if sgf_dir:\n if tasks > MIN_TASKS:\n time.sleep(60)\n continue\n print(\"Out of pairs! Syncing new eval games...\")\n ratings.sync(sgf_dir)\n print(\"Updating ratings and getting suggestions...\")\n if toggle:\n print(\"Pairing the top of the table.\")\n add_top_pairs()\n else:\n print(\"Pairing the least-known models.\")\n add_uncertain_pairs()\n toggle = not toggle\n for modelnum, rate in ratings.top_n():\n print(\"{:>30}: {:0.3f} ({:0.3f})\".format(modelnum, rate[0], rate[1]))\n desired_pairs = restore_pairs() or []\n else:\n print(\"Out of pairs. Sleeping ({} remain)\".format(len(r.items)))\n time.sleep(600)\n continue\n\n\n next_pair = desired_pairs.pop()\n failed = maybe_enqueue([next_pair])\n if failed != []:\n desired_pairs.extend(failed)\n save_pairs(sorted(desired_pairs))\n save_last_model(last_model)\n time.sleep(1)\n\n else:\n print(\"{}\\t {} finished / {} requested. \"\n \"({} jobs, {} pairs to be scheduled)\".format(\n time.strftime(\"%I:%M:%S %p\"),\n sum([i.status.succeeded or 0 for i in r.items]),\n tasks, len(r.items), len(desired_pairs)))\n time.sleep(60)\n except:\n print(\"Unfinished pairs:\")\n print(sorted(desired_pairs))\n save_pairs(sorted(desired_pairs))\n save_last_model(last_model)\n raise", "def OSK_Q_MultiProcess_Ave(ave_times=20,\n learning_rate=0.1,\n eligibility_factor=0.9,\n observation_dim=4,\n mu_1=0.04,\n mu_2=0.08):\n # Learning Parameter\n discount_factor = 0.9\n discount_of_learning_rate = 0.999\n epsilon = 0.1\n\n # Parameter OSK-Q\n sigma = 1\n\n # Macro\n NUM_EPISODE = 600\n AVE_TIMES = ave_times\n REWARD_THREASHOLD = 40\n # Definition of dependencies\n env = gym.make('CartPole-v0')\n\n # Run algorithm\n for ave_times in range(AVE_TIMES):\n learning_agent_OSKQ = OSKQ(\n mu_1,\n mu_2,\n learning_rate,\n discount_factor,\n eligibility_factor,\n [0, 1],\n observation_dim,\n sigma\n )\n learning_agent = learning_agent_OSKQ\n\n Qfunc_error_history_3 = []\n total_reward_episode_3 = []\n time_history_3 = []\n max_reward = -float(\"inf\")\n for i_episode in range(NUM_EPISODE):\n time_start = time.clock()\n observation = env.reset()\n action = learning_agent._m_GreedyPolicy(observation, epsilon)\n\n total_reward = 0\n done = False\n step = 0\n while not done:\n step += 1\n observation_bar, step_reward, done, info = env.step(action)\n\n action_bar = learning_agent._m_GreedyPolicy(\n observation_bar,\n epsilon\n )\n\n learning_agent._m_Learn(\n observation,\n action,\n observation_bar,\n action_bar,\n step_reward\n )\n\n observation = observation_bar\n action = action_bar\n total_reward += step_reward\n print \"Episode finished after {} timesteps in OSK-Q(lambda)\".format(step), \"in \", ave_times + 1, \"times\"\n time_end = time.clock()\n time_consumed = time_end - time_start\n time_history_3.append(time_consumed)\n\n if total_reward > max_reward:\n if total_reward > REWARD_THREASHOLD:\n epsilon *= 0.999\n max_reward = total_reward\n\n total_reward_episode_3.append(total_reward) # Add total reward to reward history\n\n if i_episode % 10 == 0:\n print i_episode, \"th episode completed\"\n print \"Maximal reward is\", max_reward, \"\\n\"\n\n total_reward_episode_3 = np.array(total_reward_episode_3)\n if 'total_reward_episode_ave_3' not in locals():\n total_reward_episode_ave_3 = total_reward_episode_3\n else:\n total_reward_episode_ave_3 = total_reward_episode_ave_3 + (total_reward_episode_3 - total_reward_episode_ave_3) / (ave_times * 1.0)\n\n time_history_3 = np.array(time_history_3)\n if 'time_history_ave_3' not in locals():\n time_history_ave_3 = time_history_3\n else:\n time_history_ave_3 = time_history_ave_3 + (time_history_3 - time_history_ave_3) / (ave_times * 1.0)\n\n total_reward_episode_3 = total_reward_episode_ave_3\n time_history_3 = time_history_ave_3\n\n with open(\n path + \"total_reward_OSKQ-\" + str(learning_rate) + \"-\" +\n str(eligibility_factor) + \"-\" + str(mu_1) + \"-\" + str(mu_2), 'wb') as f:\n pickle.dump(total_reward_episode_3, f)\n with open(\n path + \"time_history_OSKQ-\" + str(learning_rate) + \"-\" +\n str(eligibility_factor) + \"-\" + str(mu_1) + \"-\" + str(mu_2), 'wb') as f:\n pickle.dump(time_history_3, f)", "def evaluate_tasks(self,parameters,potential,max_time_per_simulation=100):\n \n _sleep_time = 0.1\n _max_time_per_simulation = max_time_per_simulation\n\n # initialize results dictions\n self.results = OrderedDict()\n\n # each task requires potential information and parameter information provided in a dictionary\n _configuration = OrderedDict()\n _configuration['potential'] = potential\n _configuration['parameters'] = parameters\n \n _start_time = time.time()\n while not self.__all_simulations_finished(self.obj_Task):\n \n # if the maximum time has been exceeded for this parameter set, we are going to kill\n # off all the subprocesses which maybe running simulations in each of the tasks.\n _time_elapsed = time.time() - _start_time\n if _time_elapsed > _max_time_per_simulation:\n for k_task,o_task in self.obj_Task.items():\n # kill off process\n # https://www.programcreek.com/python/example/11892/os.getpgid\n # https://stackoverflow.com/questions/4789837/how-to-terminate-a-python-subprocess-launched-with-shell-true/4791612#4791612\n # https://www.codeday.top/2017/06/28/25301.html\n try:\n o_task.process.kill()\n #pid = o_task.process.pid\n #pgid = os.getpgid(pid)\n #if pgid == pid:\n # os.killpg(pgid,signal.SIGTERM)\n #else:\n # os.kill(pgid,signal.SIGTERM)\n except: \n pass\n raise PypospackTaskManagerError('simulation time exceeded',parameters=parameters)\n \n # iterate over each task, and try to progress the status\n # INIT -> CONFIG\n # CONFIG -> READY\n # READY -> RUNNING\n # RUNNING -> POST\n # POST -> FINISHED\n for k_task,o_task in self.obj_Task.items():\n assert isinstance(o_task.configuration,OrderedDict)\n o_task.update_status()\n if o_task.status == 'INIT':\n\n _configuration = OrderedDict()\n _configuration['potential'] = potential\n _configuration['parameters'] = parameters\n if 'bulk_structure' in self.tasks[k_task]:\n _structure_name = self.tasks[k_task]['bulk_structure']\n _structure_filename = os.path.join(\n self.structures['structure_directory'],\n self.structures['structures'][_structure_name])\n _configuration['bulk_structure'] = _structure_name\n _configuration['bulk_structure_filename'] = _structure_filename\n \n o_task.on_init(configuration=_configuration)\n\n elif o_task.status == 'CONFIG':\n try:\n o_task.on_config(\n configuration=_configuration,\n results=self.results)\n except TypeError as e:\n o_task.on_config(configuration=_configuration)\n elif o_task.status == 'READY':\n try:\n o_task.on_ready(results=self.results)\n except TypeError as e:\n print(\"Error with {}:{}\".format(k_task,type(o_task)))\n raise\n elif o_task.status == 'RUNNING':\n o_task.on_running()\n elif o_task.status == 'POST':\n o_task.on_post()\n _results = o_task.results\n try:\n for k,v in o_task.results.items():\n self.results[k] = v\n except AttributeError as e:\n print('k_task:{}'.format(k_task))\n print('o_task:{}'.format(o_task))\n raise\n\n elif o_task.status == 'FINISHED':\n o_task.on_finished()\n elif o_task.status == 'ERROR':\n raise ValueError\n else:\n raise ValueError\n \n time.sleep(_sleep_time)", "def run_several_iterations(iterations, means, horizon):\n\n # Initializing the results vector.\n results = [0]*horizon\n\n for iteration in range(iterations):\n\n # The current cumulative regret.\n results = np.add(results, run_sparring_algorithm(means[:, iteration], horizon))\n\n # Returning the average cumulative regret.\n return results/(iterations +.0)", "def main():\n run_simulation(spectral=False, ml=False, num_procs=1)\n run_simulation(spectral=True, ml=False, num_procs=1)\n run_simulation(spectral=False, ml=True, num_procs=1)\n run_simulation(spectral=True, ml=True, num_procs=1)\n run_simulation(spectral=False, ml=True, num_procs=10)\n run_simulation(spectral=True, ml=True, num_procs=10)", "def effective_n_jobs(n_jobs=-1):\n if n_jobs == 1:\n return 1\n\n backend, backend_n_jobs = get_active_backend()\n if n_jobs is None:\n n_jobs = backend_n_jobs\n return backend.effective_n_jobs(n_jobs=n_jobs)", "def time_run(fnk):\n xval = []\n yval = []\n for n in range(10, 1000, 10):\n xval.append(n)\n graph = gdc.make_upa_graph(n, 5)\n c_time = time.time()\n fnk(graph)\n time_passed = time.time() - c_time\n yval.append(time_passed)\n return xval, yval", "def paramap(func, in_list, out_shape=None, n_jobs=-1, engine=\"joblib\",\n backend=None, func_args=None, func_kwargs=None,\n **kwargs):\n\n func_args = func_args or []\n func_kwargs = func_kwargs or {}\n\n if engine == \"joblib\":\n if not has_joblib:\n raise joblib()\n if backend is None:\n backend = \"loky\"\n pp = joblib.Parallel(\n n_jobs=n_jobs, backend=backend,\n **kwargs)\n dd = joblib.delayed(func)\n d_l = [dd(ii, *func_args, **func_kwargs) for ii in in_list]\n results = pp(tqdm(d_l))\n\n elif engine == \"dask\":\n if not has_dask:\n raise dask()\n if backend is None:\n backend = \"threading\"\n\n if n_jobs == -1:\n n_jobs = multiprocessing.cpu_count()\n n_jobs = n_jobs - 1\n\n def partial(func, *args, **keywords):\n def newfunc(in_arg):\n return func(in_arg, *args, **keywords)\n return newfunc\n pp = partial(func, *func_args, **func_kwargs)\n dd = [dask.delayed(pp)(ii) for ii in in_list]\n if backend == \"multiprocessing\":\n results = dask.compute(*dd, scheduler=\"processes\",\n workers=n_jobs, **kwargs)\n elif backend == \"threading\":\n results = dask.compute(*dd, scheduler=\"threads\",\n workers=n_jobs, **kwargs)\n else:\n raise ValueError(\"%s is not a backend for dask\" % backend)\n\n if engine == \"ray\":\n if not has_ray:\n raise ray()\n\n func = ray.remote(func)\n results = ray.get([func.remote(ii, *func_args, **func_kwargs)\n for ii in in_list])\n\n elif engine == \"serial\":\n results = []\n for in_element in in_list:\n results.append(func(in_element, *func_args, **func_kwargs))\n\n if out_shape is not None:\n return np.array(results).reshape(out_shape)\n else:\n return results", "def __call__(self, x, u, k):\n first_time_through = True\n for cost, arg, weight in zip(self._costs, self._args, self._weights):\n if arg == \"x\":\n cost_input = x\n else:\n cost_input = u[arg]\n\n current_term = weight * cost(cost_input, k)\n if current_term > 1e8:\n print(\"Warning: cost %s is %f\" % (cost._name, current_term))\n print(\"Input is: \", cost_input)\n\n# if cost._name[:4] == \"bike\":\n# print(cost._name, \": \", current_term)\n\n if first_time_through:\n total_cost = current_term\n else:\n total_cost += current_term\n\n first_time_through = False\n\n return total_cost", "def tune_model(\n self,\n ds_x,\n ds_y,\n folds,\n eval_rounds=100,\n groups=None,\n trials=None,\n mon_cons=None,\n categorical=None,\n ):\n # Create hyperopt Trials object\n if trials is None:\n trials = Trials()\n additional_evals = eval_rounds\n else:\n additional_evals = len(trials.losses()) + eval_rounds\n\n # Create the loss function\n loss_func = self.create_loss_func(ds_x, ds_y, folds, groups)\n\n # Find optimal hyperparameters\n parameters = self.optimize(\n trials, loss_func, additional_evals, mon_cons, categorical\n )\n\n self.params = parameters\n self.trials = trials\n\n return parameters", "def simulate_trajectories(kav):\n print \"Simulating \"+str(kav)\n wt_trajectories = []\n avp_trajectories = []\n vip_trajectories = []\n for tn in range(100):\n # get random initial condition\n # initial phases\n init_conditions_AV = [single_osc.lc(wt_T*np.random.rand()) \n for i in range(AVPcells+VIPcells)]\n init_conditions_NAV = [single_osc.lc(wt_T*np.random.rand())[:-1]\n for i in range(NAVcells)]\n y0_random = np.hstack(init_conditions_AV+init_conditions_NAV)\n\n # do the simulation\n model = GonzeModelManyCells(param, kav=kav, \n initial_values=y0_random)\n wt_trajectories.append(model.run(show_labels=False, seed=0))\n\n # avp bmalko\n avp_model = GonzeModelManyCells(param, bmalko='AVP', kav=kav, \n initial_values=y0_random)\n avp_trajectories.append(avp_model.run(show_labels=False, seed=0))\n\n # vip bmalko\n vip_model = GonzeModelManyCells(param, bmalko='VIP', kav=kav, \n initial_values=y0_random)\n vip_trajectories.append(vip_model.run(show_labels=False, seed=0))\n\n # save results\n with open(\"Data/params/wt_\"+str(kav)+\".pickle\", \"wb\") as output_file:\n pickle.dump(wt_trajectories, output_file)\n with open(\"Data/params/avp_\"+str(kav)+\".pickle\", \"wb\") as output_file:\n pickle.dump(avp_trajectories, output_file)\n with open(\"Data/params/vip_\"+str(kav)+\".pickle\", \"wb\") as output_file:\n pickle.dump(vip_trajectories, output_file)\n\n return {'wt': wt_trajectories,\n 'avp': avp_trajectories,\n 'vip': vip_trajectories}", "def Run(k):\n po = Pool()\n#---q1theta---------------------------------------------------------------------\n#---Priors for the theta (model parameters)--------------------\n r = lhs.lhs(stats.uniform, [2, 4], k)\n p0 = lhs.lhs(stats.uniform,[0,5],k)\n q1theta = (r, p0)\n#-------------------------------------------------------------------------------\n phi=zeros(k, float)\n #print r.shape, p0.shape\n for i in xrange(k):\n re = po.apply_async(model,(r[i], p0[i]))\n phi[i] = re.get()[-1]#model(r[i], p0[i])[-1] # Sets phi[i] to the last point of the simulation\n \n \n return phi, q1theta", "def run(bench, budget):\n\n # Get the set of hypeparameter configuration space possible in this benchmark\n cs = bench.get_configuration_space()\n\n ##############################################################################\n # Begin implementation\n ##############################################################################\n popsize=5\n cmaes = CMAES(cs, pop=popsize)\n\n for i in range(int(budget/popsize)):\n pool = cmaes.sample()\n evals = []\n for i in pool:\n eval = bench.objective_function(cs[i])\n evals.append(eval)\n print(\"Sample:\", eval)\n cmaes.fit_predict(evals)\n\n ##############################################################################\n # End implementation\n ##############################################################################\n # This needs to be called at the end of a run\n bench.done()" ]
[ "0.58023125", "0.5677729", "0.55972874", "0.55697495", "0.5547481", "0.5508865", "0.54776615", "0.5470444", "0.5464197", "0.53902745", "0.53773487", "0.53718054", "0.53490144", "0.53486687", "0.5348121", "0.5320192", "0.5305906", "0.52872264", "0.5229914", "0.52269125", "0.5218901", "0.5164598", "0.51629525", "0.5160731", "0.51356715", "0.5114451", "0.51079905", "0.5104619", "0.5096821", "0.50828", "0.5080234", "0.5063031", "0.5058328", "0.50576776", "0.5056964", "0.5055922", "0.50515336", "0.50471735", "0.50393844", "0.50303054", "0.501446", "0.5006115", "0.5004222", "0.50012904", "0.49876794", "0.4985717", "0.4979539", "0.4970186", "0.49698326", "0.49666864", "0.49657536", "0.4957961", "0.49553183", "0.49517936", "0.4950754", "0.49487907", "0.4948227", "0.4946263", "0.4938256", "0.4937653", "0.49352145", "0.49216598", "0.49021855", "0.48987666", "0.48987255", "0.48982057", "0.4898067", "0.48942828", "0.48924297", "0.48777753", "0.48748043", "0.48663932", "0.4865827", "0.48657578", "0.4865148", "0.4859921", "0.48552418", "0.48545593", "0.4843358", "0.48389", "0.4838738", "0.48374578", "0.48372194", "0.4833537", "0.4827322", "0.48150524", "0.4813941", "0.48129067", "0.48112562", "0.4811247", "0.48088586", "0.47998634", "0.47983196", "0.47922373", "0.4787383", "0.47862712", "0.4782557", "0.47776935", "0.47596404", "0.47593457" ]
0.54965633
6
Test the popxl simple addition example
def test_documentation_popxl_addition(self): filename = "simple_addition.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_add_numbers(self):\n self.assertEqual(add(3, 8), 11)", "def test_add_numbers(self):\n self.assertEqual(add(3, 8), 11)", "def test_add_numbers(self):\n self.assertEqual(add(3, 8), 11)", "def test_and_numbers(self):\n self.assertEqual(add(3,8), 11)", "def test_add_two_numbers(self):\n self.assertEqual(add(5, 9), 14)", "def test_add2(self):\n self.assertEqual(5, add(10 , -5), \"should be 5\")", "def test_add1(self):\n self.assertEqual(15, add(10 , 5), \"should be 15\")", "def test_add_integers(self):\n print(\"---running test_add_integers\")\n result = some_math.add(1, 2)\n assert result == 3", "def test_add(self):\n self.assertEqual(add(1, 1), 2, \"Wrong answer\")\n self.assertEqual(add(10, 1), 11, \"Wrong answer\")\n self.assertEqual(add(15, 15), 30, \"Wrong answer\")", "def test_add4(self):\n self.assertEqual(-15, add(-10 , -5), \"should be -15\")", "def test_add_numbers():\n assert add(3, 8) == 11", "def test_add_numbers(self):\n self.assertEqual(addNums(3, 8), 11)", "def test_addition():\n assert calculator.add(7, 3) == 10\n assert calculator.add(7.0, 3.0) == 10.0\n assert calculator.add(7, -3) == 4\n assert calculator.add(7.0, -3.0) == 4.0", "def test_add3(self):\n self.assertEqual(-5, add(-10 , 5), \"should be -5\")", "def test_getSum_twoNumbers(self):\r\n self.assertEqual(17, Arith().add(8, 9))", "def test_add_numbers(self):\n a, b = 5, 6\n expected = a + b\n # check for equality, real vs expected\n self.assertEqual(add(a, b), expected)", "def test_add_integer(self):\n assert cr.add(3, 2) == 3 + 2", "def test_add_int(self):\n self.assertEqual(operations.add(3,4), 7)", "def test_add_returns_correct_result(self):\n result = self.calc.add(2, 2)\n self.assertEqual(4, result)", "def test_add(self):\n print('test_add')\n \n self.assertEqual(120, add(100, 20))\n self.assertNotEqual(3, add(10, 10))", "def test_two_plus_two():\n assert add.add(2, 2) == 4", "def test_add(self):\r\n operation = Operation(3, 4)\r\n result = operation.add()\r\n self.assertEqual(result, 7)", "def test_addition(l1, l2):\n result = addTwoNumbers(l1, l2)\n assert result.val == '5'\n assert result.next.val == '8'\n assert result.next.next.val == '0'\n assert result.next.next.next.val == '1'", "def test_calculate_multiplication_and_adding(self):\n result = self.calcuate.calcuate('1+2x3')\n expected_result = \"7\"\n self.assertEqual(expected_result, result)", "def test_documentation_popxl_addition_variable(self):\n filename = \"tensor_addition.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_add():\n l = [1, 2, 3, 4]\n assert s7.add(*l) == sum(l)\n assert s7.add(100, 200) == 300\n assert s7.add(1.0, 2.0, 100.0) == 103.0", "def test_arithmetic(self):\n for test in [\n TypeTest(sir.BinOpCode(name = 'OP_ADD', left = sir.Int(5), right = sir.Int(6)), SymbolType.Integer),\n TypeTest(sir.BinOpCode(name = 'OP_ADD', left = sir.Bytes('05'), right = sir.Bytes('06')), SymbolType.Integer),\n ]:\n self._test(test)", "def test_add():\n\n assert add(1, 1) == 2\n assert add(1, 2) == add(2, 1) == 3", "def test_add(self):\n self.assertEqual(work_file.add(10, 5), 15)\n self.assertEqual(work_file.add(-1, 1), 0)\n self.assertEqual(work_file.add(-1, -1), -2)", "def test_addition(self):\n\n a1 = points.Point(3, -2, 5)\n a2 = vectors.Vector(-2, 3, 1)\n\n a3 = a1 + a2\n\n self.assertEqual(a3,\n tuples.Tuple([\"x\", \"y\", \"z\", \"w\"], 1, 1, 6, 1))\n self.assertEqual(a3, points.Point(1, 1, 6))", "def test_add(self):\n\n for i in range(1, 200 + 1):\n\n for j in range(1, 200 + 1):\n\n for k in range(1, 200 + 1):\n\n value = i + j + k\n assert value == add(i, j, k)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_calculate_order_multiplication_subtraction_adding(self):\n result = self.calcuate.calcuate('11-2+4x3')\n expected_result = \"21\"\n self.assertEqual(expected_result, result)", "def add_numbers(x, y):\r\n return x + y", "def test_add(self):\n self.assertEqual(3, add(1, 2))\n self.assertNotEqual(3, add(2, 2))", "def test_calculate_adding_in_bracket(self):\n result = self.calcuate.calcuate('(2+1)')\n expected_result = \"3\"\n self.assertEqual(expected_result, result)", "def add_numbers(x,y):\n return x + y", "def test_point_positive_add(self):\n p1 = Point(x=3, y=5)\n p2 = Point(5, 3)\n p = p1 + p2\n self.assertEqual(str(p), '(8.0, 8.0)',\n 'Test of Point(x=3, y=5) + Point(5, 3) failed. Returned value != (8.0, 8.0)')", "def test_add(self):\n a = Vector(1, 2)\n b = Vector(3, 4)\n c = a + b\n assert c.x == 4\n assert c.y == 6", "def test_add(self):\n self.assertEqual(3, foo.add(1, 2))\n self.assertNotEqual(3, foo.add(2, 2))", "def test_our_add(self):\n\n # arrange\n x = 2\n y = 3\n expected_result = 5\n\n # act; assert\n self.assertEqual(self.our_module.add(x, y), expected_result)", "def test_basic_addition(self):\r\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\r\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\r\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\r\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\r\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\r\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\r\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\r\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\r\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\r\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\r\n self.assertEqual(1 + 1, 2)", "def testadd_X_Y ( self ):\r\n\t\tr = re.compile ( 'frac' )\r\n\t\tfor fracTupX, fracTupY, dictAdd, dictSub, dictMul, dictDiv in self.knownArithResultValues:\r\n\t\t\tfracX = eval ( r.sub ( 'frac.frac', fracTupX ) )\r\n\t\t\tfracY = eval ( r.sub ( 'frac.frac', fracTupY ) )\r\n\t\t\tadd_fracX_fracY = fracX + fracY\r\n\t\t\tself.assertEqual ( add_fracX_fracY.toString ().split ()[0], dictAdd ['X+Y'] )", "def add_numbers(x, y):\n return x + y", "def add(x, y):\n\n return x + y", "def test_adding(self):\n adder = Adder()\n\n for i in range(-10, 10):\n for j in range(-10, 10):\n self.assertEqual(i + j, adder.calc(j, i))", "def test_add_to_stock(add):\n assert STOCK[0]['quantity'] == 20\n add[0].add_to_stock(10)\n assert STOCK[0]['quantity'] == 30\n STOCK[0]['quantity'] = 20", "def test_basic_addition(self):\n self.failUnlessEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.failUnlessEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.failUnlessEqual(1 + 1, 2)", "def test_add(x, y, expected):\n\n assert add(x, y) == pytest.approx(add(y, x)) == pytest.approx(expected)", "def main():\n print('1 + 2 = ', add(1, 2), '. Yay! This test works! :D', sep='')", "def test_add():\n value = 42\n num_a = param.Integer(value=value)\n assert num_a.value == value\n\n new_value = value + 1\n num_a.value += 1\n assert num_a.value == new_value", "def test_calculate_addition(self):\n result = self.calcuate.calcuate('1+4')\n expected_result = \"5\"\n self.assertEqual(expected_result, result)", "def test_calculator_add(clear_history):\n assert Calculator.add_number(1, 2) == 3\n assert Calculator.add_number(2, 2) == 4\n assert Calculator.add_number(3, 2) == 5\n assert Calculator.add_number(4, 2) == 6\n assert Calculator.history_count() == 4\n assert Calculator.get_result_of_last_calculation_added_to_history() == 6\n pprint.pprint(Calculator.history)", "def test_calculate_bracket_at_the_beginning_and_multiplication(self):\n result = self.calcuate.calcuate('(2+1)x3')\n expected_result = \"9\"\n self.assertEqual(expected_result, result)", "def test_add_strings(self):\n print(\"---running test_add_strings\")\n result = some_math.add('abc', 'def')\n assert result == 'abcdef'", "def testadd_Y_X ( self ):\r\n\t\tr = re.compile ( 'frac' )\r\n\t\tfor fracTupX, fracTupY, dictAdd, dictSub, dictMul, dictDiv in self.knownArithResultValues:\r\n\t\t\tfracX = eval ( r.sub ( 'frac.frac', fracTupX ) )\r\n\t\t\tfracY = eval ( r.sub ( 'frac.frac', fracTupY ) )\r\n\t\t\tadd_fracY_fracX = fracY + fracX\r\n\t\t\tself.assertEqual ( add_fracY_fracX.toString ().split ()[0], dictAdd ['Y+X'] )", "def add(a,b):\n\treturn a+b", "def test_calculate_addition_of_four_elements(self):\n result = self.calcuate.calcuate('15+4+10+3')\n expected_result = \"32\"\n self.assertEqual(expected_result, result)", "def add_numbers(a,b):\r\n return a+ b", "def add(x,y):\n return x + y", "def add(x,y):\n return x + y", "def test_list_int(self):\n result = add(2, 4)\n self.assertEqual(result, 6)", "def add(a,b):\r\n result=a+b\r\n return result", "def test_add():\n z = Complex(1, -2)\n w = Complex(1, 1)\n assert (z + w) == Complex(2, -1)\n assert (z + (1+1j)) == Complex(2, -1)\n assert (z + 2) == Complex(3, -2)\n assert (z + 2.0) == Complex(3, -2)", "def test_basic_addition(self):\r\n self.failUnlessEqual(1 + 1, 2)", "def test_basic_addition(self):\r\n self.failUnlessEqual(1 + 1, 2)", "def add(a,b):\r\n return a+b", "def add(x, y):\n return x + y" ]
[ "0.65502137", "0.65502137", "0.65502137", "0.64431727", "0.63226366", "0.6308587", "0.628632", "0.6280519", "0.6253776", "0.6226606", "0.62078625", "0.61861867", "0.61754084", "0.6170412", "0.61692727", "0.615465", "0.6137472", "0.61353207", "0.6135082", "0.6111764", "0.6055483", "0.6052546", "0.60271466", "0.60074615", "0.5998734", "0.5931518", "0.59088784", "0.59009635", "0.5900211", "0.5895206", "0.5849179", "0.58489525", "0.58489525", "0.58489525", "0.58489525", "0.58489525", "0.58489525", "0.58489525", "0.58489525", "0.58489525", "0.58489525", "0.58489525", "0.58489525", "0.58489525", "0.58489525", "0.58489525", "0.58489525", "0.58489525", "0.58489525", "0.58489525", "0.58489525", "0.58489525", "0.5836547", "0.58297294", "0.57552624", "0.57550156", "0.5727744", "0.5726815", "0.57230824", "0.57154185", "0.5711889", "0.5697257", "0.5697257", "0.5697257", "0.5697257", "0.5697257", "0.5697257", "0.5697257", "0.5697257", "0.5697257", "0.5697257", "0.5697257", "0.56958455", "0.5691856", "0.56589377", "0.56558585", "0.5648774", "0.56455535", "0.56455535", "0.56455535", "0.564103", "0.5637016", "0.5634641", "0.56305695", "0.5619661", "0.5615862", "0.55849606", "0.55824924", "0.55765074", "0.5572857", "0.5566707", "0.55660546", "0.55660546", "0.5564602", "0.5536528", "0.553451", "0.5527909", "0.5527909", "0.55057436", "0.55026287" ]
0.70790774
0
Test the popxl simple addition example
def test_documentation_popxl_addition_variable(self): filename = "tensor_addition.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_documentation_popxl_addition(self):\n filename = \"simple_addition.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_add_numbers(self):\n self.assertEqual(add(3, 8), 11)", "def test_add_numbers(self):\n self.assertEqual(add(3, 8), 11)", "def test_add_numbers(self):\n self.assertEqual(add(3, 8), 11)", "def test_and_numbers(self):\n self.assertEqual(add(3,8), 11)", "def test_add_two_numbers(self):\n self.assertEqual(add(5, 9), 14)", "def test_add2(self):\n self.assertEqual(5, add(10 , -5), \"should be 5\")", "def test_add1(self):\n self.assertEqual(15, add(10 , 5), \"should be 15\")", "def test_add_integers(self):\n print(\"---running test_add_integers\")\n result = some_math.add(1, 2)\n assert result == 3", "def test_add(self):\n self.assertEqual(add(1, 1), 2, \"Wrong answer\")\n self.assertEqual(add(10, 1), 11, \"Wrong answer\")\n self.assertEqual(add(15, 15), 30, \"Wrong answer\")", "def test_add4(self):\n self.assertEqual(-15, add(-10 , -5), \"should be -15\")", "def test_add_numbers():\n assert add(3, 8) == 11", "def test_add_numbers(self):\n self.assertEqual(addNums(3, 8), 11)", "def test_addition():\n assert calculator.add(7, 3) == 10\n assert calculator.add(7.0, 3.0) == 10.0\n assert calculator.add(7, -3) == 4\n assert calculator.add(7.0, -3.0) == 4.0", "def test_add3(self):\n self.assertEqual(-5, add(-10 , 5), \"should be -5\")", "def test_getSum_twoNumbers(self):\r\n self.assertEqual(17, Arith().add(8, 9))", "def test_add_numbers(self):\n a, b = 5, 6\n expected = a + b\n # check for equality, real vs expected\n self.assertEqual(add(a, b), expected)", "def test_add_integer(self):\n assert cr.add(3, 2) == 3 + 2", "def test_add_int(self):\n self.assertEqual(operations.add(3,4), 7)", "def test_add_returns_correct_result(self):\n result = self.calc.add(2, 2)\n self.assertEqual(4, result)", "def test_add(self):\n print('test_add')\n \n self.assertEqual(120, add(100, 20))\n self.assertNotEqual(3, add(10, 10))", "def test_two_plus_two():\n assert add.add(2, 2) == 4", "def test_add(self):\r\n operation = Operation(3, 4)\r\n result = operation.add()\r\n self.assertEqual(result, 7)", "def test_addition(l1, l2):\n result = addTwoNumbers(l1, l2)\n assert result.val == '5'\n assert result.next.val == '8'\n assert result.next.next.val == '0'\n assert result.next.next.next.val == '1'", "def test_calculate_multiplication_and_adding(self):\n result = self.calcuate.calcuate('1+2x3')\n expected_result = \"7\"\n self.assertEqual(expected_result, result)", "def test_add():\n l = [1, 2, 3, 4]\n assert s7.add(*l) == sum(l)\n assert s7.add(100, 200) == 300\n assert s7.add(1.0, 2.0, 100.0) == 103.0", "def test_arithmetic(self):\n for test in [\n TypeTest(sir.BinOpCode(name = 'OP_ADD', left = sir.Int(5), right = sir.Int(6)), SymbolType.Integer),\n TypeTest(sir.BinOpCode(name = 'OP_ADD', left = sir.Bytes('05'), right = sir.Bytes('06')), SymbolType.Integer),\n ]:\n self._test(test)", "def test_add():\n\n assert add(1, 1) == 2\n assert add(1, 2) == add(2, 1) == 3", "def test_add(self):\n self.assertEqual(work_file.add(10, 5), 15)\n self.assertEqual(work_file.add(-1, 1), 0)\n self.assertEqual(work_file.add(-1, -1), -2)", "def test_addition(self):\n\n a1 = points.Point(3, -2, 5)\n a2 = vectors.Vector(-2, 3, 1)\n\n a3 = a1 + a2\n\n self.assertEqual(a3,\n tuples.Tuple([\"x\", \"y\", \"z\", \"w\"], 1, 1, 6, 1))\n self.assertEqual(a3, points.Point(1, 1, 6))", "def test_add(self):\n\n for i in range(1, 200 + 1):\n\n for j in range(1, 200 + 1):\n\n for k in range(1, 200 + 1):\n\n value = i + j + k\n assert value == add(i, j, k)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.assertEqual(1 + 1, 2)", "def test_calculate_order_multiplication_subtraction_adding(self):\n result = self.calcuate.calcuate('11-2+4x3')\n expected_result = \"21\"\n self.assertEqual(expected_result, result)", "def add_numbers(x, y):\r\n return x + y", "def test_add(self):\n self.assertEqual(3, add(1, 2))\n self.assertNotEqual(3, add(2, 2))", "def test_calculate_adding_in_bracket(self):\n result = self.calcuate.calcuate('(2+1)')\n expected_result = \"3\"\n self.assertEqual(expected_result, result)", "def add_numbers(x,y):\n return x + y", "def test_point_positive_add(self):\n p1 = Point(x=3, y=5)\n p2 = Point(5, 3)\n p = p1 + p2\n self.assertEqual(str(p), '(8.0, 8.0)',\n 'Test of Point(x=3, y=5) + Point(5, 3) failed. Returned value != (8.0, 8.0)')", "def test_add(self):\n a = Vector(1, 2)\n b = Vector(3, 4)\n c = a + b\n assert c.x == 4\n assert c.y == 6", "def test_add(self):\n self.assertEqual(3, foo.add(1, 2))\n self.assertNotEqual(3, foo.add(2, 2))", "def test_our_add(self):\n\n # arrange\n x = 2\n y = 3\n expected_result = 5\n\n # act; assert\n self.assertEqual(self.our_module.add(x, y), expected_result)", "def test_basic_addition(self):\r\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\r\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\r\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\r\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\r\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\r\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\r\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\r\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\r\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\r\n self.assertEqual(1 + 1, 2)", "def test_basic_addition(self):\r\n self.assertEqual(1 + 1, 2)", "def testadd_X_Y ( self ):\r\n\t\tr = re.compile ( 'frac' )\r\n\t\tfor fracTupX, fracTupY, dictAdd, dictSub, dictMul, dictDiv in self.knownArithResultValues:\r\n\t\t\tfracX = eval ( r.sub ( 'frac.frac', fracTupX ) )\r\n\t\t\tfracY = eval ( r.sub ( 'frac.frac', fracTupY ) )\r\n\t\t\tadd_fracX_fracY = fracX + fracY\r\n\t\t\tself.assertEqual ( add_fracX_fracY.toString ().split ()[0], dictAdd ['X+Y'] )", "def add_numbers(x, y):\n return x + y", "def add(x, y):\n\n return x + y", "def test_adding(self):\n adder = Adder()\n\n for i in range(-10, 10):\n for j in range(-10, 10):\n self.assertEqual(i + j, adder.calc(j, i))", "def test_add_to_stock(add):\n assert STOCK[0]['quantity'] == 20\n add[0].add_to_stock(10)\n assert STOCK[0]['quantity'] == 30\n STOCK[0]['quantity'] = 20", "def test_basic_addition(self):\n self.failUnlessEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.failUnlessEqual(1 + 1, 2)", "def test_basic_addition(self):\n self.failUnlessEqual(1 + 1, 2)", "def test_add(x, y, expected):\n\n assert add(x, y) == pytest.approx(add(y, x)) == pytest.approx(expected)", "def main():\n print('1 + 2 = ', add(1, 2), '. Yay! This test works! :D', sep='')", "def test_add():\n value = 42\n num_a = param.Integer(value=value)\n assert num_a.value == value\n\n new_value = value + 1\n num_a.value += 1\n assert num_a.value == new_value", "def test_calculate_addition(self):\n result = self.calcuate.calcuate('1+4')\n expected_result = \"5\"\n self.assertEqual(expected_result, result)", "def test_calculator_add(clear_history):\n assert Calculator.add_number(1, 2) == 3\n assert Calculator.add_number(2, 2) == 4\n assert Calculator.add_number(3, 2) == 5\n assert Calculator.add_number(4, 2) == 6\n assert Calculator.history_count() == 4\n assert Calculator.get_result_of_last_calculation_added_to_history() == 6\n pprint.pprint(Calculator.history)", "def test_calculate_bracket_at_the_beginning_and_multiplication(self):\n result = self.calcuate.calcuate('(2+1)x3')\n expected_result = \"9\"\n self.assertEqual(expected_result, result)", "def test_add_strings(self):\n print(\"---running test_add_strings\")\n result = some_math.add('abc', 'def')\n assert result == 'abcdef'", "def testadd_Y_X ( self ):\r\n\t\tr = re.compile ( 'frac' )\r\n\t\tfor fracTupX, fracTupY, dictAdd, dictSub, dictMul, dictDiv in self.knownArithResultValues:\r\n\t\t\tfracX = eval ( r.sub ( 'frac.frac', fracTupX ) )\r\n\t\t\tfracY = eval ( r.sub ( 'frac.frac', fracTupY ) )\r\n\t\t\tadd_fracY_fracX = fracY + fracX\r\n\t\t\tself.assertEqual ( add_fracY_fracX.toString ().split ()[0], dictAdd ['Y+X'] )", "def add(a,b):\n\treturn a+b", "def test_calculate_addition_of_four_elements(self):\n result = self.calcuate.calcuate('15+4+10+3')\n expected_result = \"32\"\n self.assertEqual(expected_result, result)", "def add_numbers(a,b):\r\n return a+ b", "def add(x,y):\n return x + y", "def add(x,y):\n return x + y", "def test_list_int(self):\n result = add(2, 4)\n self.assertEqual(result, 6)", "def add(a,b):\r\n result=a+b\r\n return result", "def test_add():\n z = Complex(1, -2)\n w = Complex(1, 1)\n assert (z + w) == Complex(2, -1)\n assert (z + (1+1j)) == Complex(2, -1)\n assert (z + 2) == Complex(3, -2)\n assert (z + 2.0) == Complex(3, -2)", "def test_basic_addition(self):\r\n self.failUnlessEqual(1 + 1, 2)", "def test_basic_addition(self):\r\n self.failUnlessEqual(1 + 1, 2)", "def add(a,b):\r\n return a+b", "def add(x, y):\n return x + y" ]
[ "0.70790774", "0.65502137", "0.65502137", "0.65502137", "0.64431727", "0.63226366", "0.6308587", "0.628632", "0.6280519", "0.6253776", "0.6226606", "0.62078625", "0.61861867", "0.61754084", "0.6170412", "0.61692727", "0.615465", "0.6137472", "0.61353207", "0.6135082", "0.6111764", "0.6055483", "0.6052546", "0.60271466", "0.60074615", "0.5931518", "0.59088784", "0.59009635", "0.5900211", "0.5895206", "0.5849179", "0.58489525", "0.58489525", "0.58489525", "0.58489525", "0.58489525", "0.58489525", "0.58489525", "0.58489525", "0.58489525", "0.58489525", "0.58489525", "0.58489525", "0.58489525", "0.58489525", "0.58489525", "0.58489525", "0.58489525", "0.58489525", "0.58489525", "0.58489525", "0.58489525", "0.5836547", "0.58297294", "0.57552624", "0.57550156", "0.5727744", "0.5726815", "0.57230824", "0.57154185", "0.5711889", "0.5697257", "0.5697257", "0.5697257", "0.5697257", "0.5697257", "0.5697257", "0.5697257", "0.5697257", "0.5697257", "0.5697257", "0.5697257", "0.56958455", "0.5691856", "0.56589377", "0.56558585", "0.5648774", "0.56455535", "0.56455535", "0.56455535", "0.564103", "0.5637016", "0.5634641", "0.56305695", "0.5619661", "0.5615862", "0.55849606", "0.55824924", "0.55765074", "0.5572857", "0.5566707", "0.55660546", "0.55660546", "0.5564602", "0.5536528", "0.553451", "0.5527909", "0.5527909", "0.55057436", "0.55026287" ]
0.5998734
25
Test the popxl basic subgraph example
def test_documentation_popxl_basic_subgraph(self): filename = "basic_graph.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_documentation_popxl_create_multi_subgraph(self):\n filename = \"create_multi_graphs_from_same_func.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def sub_graph_merging(self):", "def show_subgraph(dfs_codes, nsupport, mapper):\n\tglobal __subgraph_count\n\n\tg = build_graph(dfs_codes)\n\tg.id = __subgraph_count\n\t__subgraph_count += 1\n\tg.gprint(nsupport, mapper)", "def test_documentation_popxl_repeat_1(self):\n filename = \"repeat_graph_1.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_documentation_popxl_repeat_2(self):\n filename = \"repeat_graph_2.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def populate_graph(self):", "def test_documentation_popxl_repeat_0(self):\n filename = \"repeat_graph_0.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_simple(self):\n exp = [{'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'h'},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'p'},\n {'edge_info': '1', 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hp'},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'g'},\n {'edge_info': None, 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hpg'}\n ]\n content = '((h,p)hp:1,g)hpg;'\n self._do_test(content, exp)\n content = '((h,[pretest]p[test][posttest])hp,g)hpg;'\n exp = [{'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'h'},\n {'edge_info': None, 'type': NewickEvents.TIP,\n 'comments': ['pretest', 'test', 'posttest'], 'label': 'p'},\n {'edge_info': None, 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hp'},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'g'},\n {'edge_info': None, 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hpg'}\n ]\n self._do_test(content, exp)", "def test_documentation_popxl_multi_callsites_graph_input(self):\n filename = \"multi_call_graph_input.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_dummy3(self):\n xpb = XPathBuilder()\n xp = xpb.dummy()\n self.assertTrue(xp.parenthesize() is xp)", "def create_subbasin_graph():\n subbasin_to_downstream = pd.read_csv(module_dir + '/../data/simulations_shervan/test.rvh', sep='\\s+', skiprows=7, nrows=724, names=['subbasin', 'downstream_subbasin'], usecols=[1,2])\n subbasin_to_downstream['subbasin'] = subbasin_to_downstream['subbasin']\n subbasin_to_downstream['downstream_subbasin'] = 'sub' + subbasin_to_downstream['downstream_subbasin'].astype(str)\n subbasin_to_downstream['edge'] = 1\n\n for subbasin in subbasin_to_downstream['subbasin'].unique():\n is_sink = 1 if len(subbasin_to_downstream[(subbasin_to_downstream['subbasin'] == subbasin) & subbasin_to_downstream['edge'] == 1]) == 0 else 0\n subbasin_to_downstream = subbasin_to_downstream.append({'subbasin': subbasin, 'downstream_subbasin': subbasin, 'edge': is_sink}, ignore_index=True)\n subbasin_to_downstream = subbasin_to_downstream.append({'subbasin': 'sub-1', 'downstream_subbasin': 'sub-1', 'edge': 1}, ignore_index=True)\n \n adj = subbasin_to_downstream.pivot(index='subbasin', columns='downstream_subbasin', values='edge').fillna(0) \n adj = adj.sort_index(axis=0).sort_index(axis=1)\n \n G = nx.from_numpy_matrix(adj.values, parallel_edges=False, create_using=nx.DiGraph())\n label_mapping = dict(zip(range(len(adj.values)), adj.index))\n G = nx.relabel_nodes(G, label_mapping)\n \n return G", "def test_extract_graph(default_plugin_resolver):\n dpr = default_plugin_resolver\n nx_graph = nx.Graph()\n nx_graph.add_weighted_edges_from(\n [(1, 0, 2), (1, 4, 3), (2, 5, 5), (2, 7, 6), (3, 1, 7), (5, 6, 10), (6, 2, 11),]\n )\n desired_nodes = {2, 5, 6}\n nx_extracted_graph = nx.Graph()\n nx_extracted_graph.add_weighted_edges_from([(2, 5, 5), (5, 6, 10), (6, 2, 11)])\n graph = dpr.wrappers.Graph.NetworkXGraph(nx_graph)\n desired_nodes_wrapped = dpr.wrappers.NodeSet.PythonNodeSet(desired_nodes)\n extracted_graph = dpr.wrappers.Graph.NetworkXGraph(nx_extracted_graph)\n MultiVerify(\n dpr, \"subgraph.extract_subgraph\", graph, desired_nodes_wrapped\n ).assert_equals(extracted_graph)", "def test__graph_structure():\n assert PES_GRAPH == (\n ('CH2CH2+OH', 'CH2CH+H2O', 'C2H4OH', 'C2H5O', 'CH3CHO+H'),\n (frozenset({0, 1}), frozenset({0, 2}), frozenset({2, 3}),\n frozenset({3, 4}), frozenset({1, 2})))\n assert pgraph.species(PES_GRAPH) == (\n ('CH2CH2+OH', 'CH2CH+H2O', 'C2H4OH', 'C2H5O', 'CH3CHO+H'))\n assert pgraph.channels(PES_GRAPH) == (\n (frozenset({0, 1}), frozenset({0, 2}), frozenset({2, 3}),\n frozenset({3, 4}), frozenset({1, 2})))\n print('\\npes graph')\n print(PES_GRAPH)", "def test_setup(self):\n self.setup()\n print(\"Nodes in graph\")\n for node in self.graph.graph.nodes:\n print(node)\n print(\"Edges in graph\")\n for edge in self.graph.graph.edges(data=True):\n print(edge)", "def test_ExplorePath_Simple( self ):\n links = []\n n1 = graph.Node( 10, 50 )\n n2 = graph.Node( 10, 50 )\n n3 = graph.Node( 10, 50 )\n n7 = graph.Node( 10, 50 )\n\n links.append( graph.Link( n1, n2 ) )\n links.append( graph.Link( n2, n3 ) )\n links.append( graph.Link( n3, n7 ) )\n roots = [n1]\n actual = nodes.explorePath( links, roots, n1 )\n expected = [ n1, n2, n3, n7 ]\n self.assertEqual( expected, actual )", "def graph(self):\n ...", "def setUp(self):\n self.complete = nx.Graph()\n self.complete.add_edge(1, 2)\n self.complete.add_edge(2, 3)\n self.complete.add_edge(1, 3)\n\n self.small_tree = nx.Graph()\n self.small_tree.add_edge(1, 2)\n self.small_tree.add_edge(2, 3)\n self.small_tree.add_edge(3, 4)\n self.small_tree.add_edge(1, 4)\n self.small_tree.add_edge(2, 4)\n self.small_tree.add_edge(4, 5)\n self.small_tree.add_edge(5, 6)\n self.small_tree.add_edge(5, 7)\n self.small_tree.add_edge(6, 7)\n\n self.deterministic_graph = nx.Graph()\n self.deterministic_graph.add_edge(1, 2)\n self.deterministic_graph.add_edge(1, 3)\n self.deterministic_graph.add_edge(3, 4)\n self.deterministic_graph.add_edge(2, 4)\n self.deterministic_graph.add_edge(3, 5)\n self.deterministic_graph.add_edge(4, 5)\n self.deterministic_graph.add_edge(3, 6)\n self.deterministic_graph.add_edge(5, 6)", "def testBasic1(self):\n nodes = self.G.nodes()\n assert len(nodes) == len( set(nodes) )", "def test_createSubLinkographWithoutCommands(self):\n self.performTestForParams()", "def run_tests(g: Graph) -> None:\n print( g.nodes() , \"->\" , ', '.join([f\"{l}\" for l in g.scc()]) , f\"({g.cyclic()})\" )\n for n in g.nodes():\n for m in [m for m in g.nodes() if m != n]:\n p = g.path(n,m)\n if p is not None:\n assert p[0] == n\n assert p[-1] == m\n for i in range(1,len(p)):\n assert g.is_edge(p[i-1], p[i])\n print(\" \", n, \"->\", m, \":\", ' -> '.join([f\"{v}\" for v in p]))", "def test_get_vertex_from_subvertex(self):\n subvertices = list()\n subvertices.append(PartitionedVertex(None, \"\"))\n subvertices.append(PartitionedVertex(None, \"\"))\n\n subvert1 = PartitionedVertex(None, \"\")\n subvert2 = PartitionedVertex(None, \"\")\n\n graph_mapper = GraphMapper()\n vert = TestVertex(10, \"Some testing vertex\")\n\n vertex_slice = Slice(0, 1)\n graph_mapper.add_subvertex(subvert1, vertex_slice, vert)\n vertex_slice = Slice(2, 3)\n graph_mapper.add_subvertex(subvert2, vertex_slice, vert)\n\n self.assertEqual(\n vert, graph_mapper.get_vertex_from_subvertex(subvert1))\n self.assertEqual(\n vert, graph_mapper.get_vertex_from_subvertex(subvert2))\n self.assertEqual(\n None, graph_mapper.get_vertex_from_subvertex(subvertices[0]))\n self.assertEqual(\n None, graph_mapper.get_vertex_from_subvertex(subvertices[1]))", "def subplot_1(self, Graph, n_tabs):\n # The code below walks does a pre-order traversal of the tree\n # For exact details about the structure of self.Graph refer description in init function.\n\n attr_name = list(Graph.keys())[0]\n print(\"\\t\"*(n_tabs),\"feature name :\",attr_name)\n for val in list(Graph[attr_name].keys()):\n print(\"\\t\"*(n_tabs+1),\"feature value :\",val)\n sub_graph = Graph[attr_name][val]\n if (type(sub_graph)==dict):\n self.subplot_1(sub_graph, n_tabs+2)\n else:\n print(\"\\t\"*(n_tabs+2),\"class :\", sub_graph)", "def test_build_graph(self):\n insert_good_data()\n dataframe = get_dataframe()\n results = processing.build_graph(dataframe, figure_path, False)\n # 1\n self.assertEqual(results, \"Updated html File and Opened it\")", "def test_dot(self):\n graph = graphviz.Graph(comment='The Round Table')\n graph.node('A', 'King Arthur')\n graph.node('B', 'Sir Bedevere the Wise')\n graph.edges(['AB'])\n\n st.graphviz_chart(graph)\n\n c = self.get_delta_from_queue().new_element.graphviz_chart\n self.assertEqual(hasattr(c, 'spec'), True)", "def test_Tree():", "def main(dot_file):\n global SUBGRAPHS, PARENTS\n graph = graph_from_dot(dot_file)\n SUBGRAPHS = {}\n PARENTS = {}\n extract_subgraphs([graph])\n \n for (name, subgraph) in SUBGRAPHS.items():\n nodes = extract_nodes(subgraph)\n for node in nodes:\n (name_function, result, function_call_line) = analyse_label_function_calls(node)\n if name_function is not None:\n (label_node1, label_node2, bb) = create_labels(node, result, function_call_line)\n node.set_label(label_node1)\n nodes_to_update = get_nodes_to_update(subgraph, graph.get_name())\n update_nodes(nodes_to_update, bb)\n nodes.append(create_new_node(subgraph, node, label_node2, bb))\n update_edges(subgraph, graph.get_name(), bb)\n create_new_edge(graph, node.get_name(), SUBGRAPHS[name_function])\n recreate_subgraphs_name()\n export_graph(graph, \"main_output\", \"png\")\n export_graph(graph, \"main_output\", \"dot\")\n return graph", "def testGraphExtract(self):\n graph = Graph2()\n graph.parseFile(TESTFILE)", "def __init__(self, prefix, downstream, upstream, root):\n super(SubGraph, self).__init__(prefix, downstream, upstream, root)", "def test_k_core(default_plugin_resolver):\n dpr = default_plugin_resolver\n k = 2\n nx_graph = nx.Graph()\n nx_graph.add_weighted_edges_from(\n [(1, 0, 2), (1, 4, 3), (2, 5, 5), (2, 7, 6), (3, 1, 7), (5, 6, 10), (6, 2, 11),]\n )\n nx_k_core_graph = nx.Graph()\n nx_k_core_graph.add_weighted_edges_from(\n [(2, 5, 5), (5, 6, 10), (6, 2, 11),]\n )\n graph = dpr.wrappers.Graph.NetworkXGraph(nx_graph)\n k_core_graph = dpr.wrappers.Graph.NetworkXGraph(nx_k_core_graph)\n MultiVerify(dpr, \"subgraph.k_core\", graph, k).assert_equals(k_core_graph)", "def __test(graph): \n \n if not isinstance(graph, basegraph):\n raise TypeError(\"Expected type was Graph.\")\n \n print \"### iPATH TEST DATA STRUCTURE\"\n print \"### Data Type: Graph ({})\".format(str(graph.__class__.__bases__[0].__name__))\n print \"### Implementation: {}\".format(str(graph.__class__.__name__))\n \n print \"\\n*** ADD NODE ***\\n\" \n for i in range(10):\n print \"add_node({})\".format(str(i)) \n graph.add_node(i) \n \n print \"\\n*** ADD ARC ***\\n\" \n for i in range(10):\n print \"add_arc({}, {}, {})\".format(str(i), str(i + 1), str(2 * (i + 1)))\n graph.add_arc(i, i + 1, 2 * (i + 1))\n print \"add_arc({}, {}, {})\".format(str(i), str(i + 2), str(2 * (i + 2)))\n graph.add_arc(i, i + 2, 2 * (i + 2))\n \n print \"\\n*** GRAPH ***\\n\" \n print \"\\n{}\\n\".format(str(graph))\n \n print \"\\n*** REMOVE NODE ***\\n\" \n print \"remove_node(5)\"\n graph.remove_node(5)\n \n print \"\\n*** GRAPH ***\\n\" \n print \"\\n{}\\n\".format(str(graph))\n \n print \"\\n*** REMOVE ARC ***\\n\" \n print \"remove_arc(7, 8)\" \n graph.remove_arc(7, 8)\n \n print \"\\n*** GRAPH ***\\n\" \n print \"\\n{}\\n\".format(str(graph))\n \n print \"\\n*** INCIDENT ARCS ***\\n\" \n for node in graph.get_nodes():\n print \"Incident Arcs of {}\\t{}\\n\".format(str(node), str(graph.get_incident_arcs(node._id)))\n \n print \"\\n*** ADJACENCY ***\\n\" \n for i in range(10):\n for j in range(10):\n if graph.are_adjacent(i, j) == True:\n print \"Adjacency Between ({}, {}): True\\n\".format(str(i), str(j))\n \n print \"\\n*** NODES ***\\n\" \n print \"numNodes: {}\\n\".format(str(graph.get_num_nodes())) \n print \"Nodes: {}\\n\".format(str(graph.get_nodes())) \n \n print \"\\n*** ARCS ***\\n\" \n print \"numArcs: {}\\n\".format(str(graph.get_num_arcs())) \n print \"Arcs: {}\\n\".format(str(graph.get_arcs())) \n \n print \"\\n*** SEARCH BFS ***\\n\" \n for i in range(10): \n print \"bfs({})\".format(str(i))\n Lbfs = graph.bfs(i)\n for n in Lbfs:\n print \"{}\\n\".format(str(n))\n print \"\\n\"\n \n print \"\\n*** SEARCH DFS ***\\n\" \n for i in range(9):\n print \"dfs({})\".format(str(i))\n Ldfs = graph.dfs(i)\n for n in Ldfs:\n print \"{}\\n\".format(str(n))\n print \"\\n\"\n \n print \"\\n### END OF TEST ###\\n\"", "def sub_graph_merging(self):\n raise NotImplementedError()", "def main():\n\n bGraph = DiGraph()\n bGraph + Vertex(\"alpha\", 20, deltaInherent=5)\n bGraph[\"beta\"] = Vertex(\"beta\", 13)\n bGraph.add_edge(bGraph[\"alpha\"], bGraph[\"beta\"], \"aa_lin\", [1])\n print(bGraph[\"alpha\"])\n print(bGraph)\n bGraph + Vertex(\"gamma\", 4, randomFlag=True, randomInfo=(0, 10))\n bGraph.apply_floating_deltas()\n for vert in bGraph:\n print(vert)", "def test_ExplorePath( self ):\n links = []\n n1 = graph.Node( 10, 10 )\n n2 = graph.Node( 10, 20 )\n n3 = graph.Node( 10, 30 )\n n4a = graph.Node( 5, 40 )\n n4b = graph.Node( 15, 40 )\n n5a = graph.Node( 5, 50 )\n n5b = graph.Node( 15, 50 )\n n6a = graph.Node( 5, 60 )\n n6b = graph.Node( 15, 60 )\n n7 = graph.Node( 10, 70 )\n\n links.append( graph.Link( n1, n2 ) )\n links.append( graph.Link( n2, n3 ) )\n links.append( graph.Link( n3, n4a ) )\n links.append( graph.Link( n3, n4b ) )\n links.append( graph.Link( n4a, n5a ) )\n links.append( graph.Link( n4b, n5b ) )\n links.append( graph.Link( n5a, n6a ) )\n links.append( graph.Link( n6a, n7 ) )\n links.append( graph.Link( n5b, n7 ) )\n roots = [n1]\n actual = nodes.explorePath( links, n1, n1 )\n expected = [ n1, n2, n3, n4b, n5b, n7 ]\n self.assertEqual( expected, actual )", "def dump_subgraph_for_debug(self):\n\n import pypipegraph2 as ppg\n\n nodes = []\n seen = set()\n edges = []\n counter = [0]\n node_to_counters = {}\n\n def descend(node):\n if node in seen:\n return\n seen.add(node)\n j = self.runner.jobs[node]\n if isinstance(j, ppg.FileInvariant):\n nodes.append(f\"Path('{counter[0]}').write_text('A')\")\n nodes.append(f\"job_{counter[0]} = ppg.FileInvariant('{counter[0]}')\")\n elif isinstance(j, ppg.ParameterInvariant):\n nodes.append(\n f\"job_{counter[0]} = ppg.ParameterInvariant('{counter[0]}', 55)\"\n )\n elif isinstance(j, ppg.FunctionInvariant):\n nodes.append(\n f\"job_{counter[0]} = ppg.FunctionInvariant('{counter[0]}', lambda: 55)\"\n )\n elif isinstance(j, ppg.SharedMultiFileGeneratingJob):\n nodes.append(\n f\"job_{counter[0]} = ppg.SharedMultiFileGeneratingJob('{counter[0]}', {[x.name for x in j.files]!r}, dummy_smfg, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.TempFileGeneratingJob):\n nodes.append(\n f\"job_{counter[0]} = ppg.TempFileGeneratingJob('{counter[0]}', dummy_fg, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.FileGeneratingJob):\n nodes.append(\n f\"job_{counter[0]} = ppg.FileGeneratingJob('{counter[0]}', dummy_fg, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.MultiTempFileGeneratingJob):\n files = [counter[0] + \"/\" + x.name for x in j.files]\n nodes.append(\n f\"job_{counter[0]} = ppg.MultiTempFileGeneratingJob({files!r}, dummy_mfg, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.MultiFileGeneratingJob):\n files = [str(counter[0]) + \"/\" + x.name for x in j.files]\n nodes.append(\n f\"job_{counter[0]} = ppg.MultiFileGeneratingJob({files!r}, dummy_mfg, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.DataLoadingJob):\n nodes.append(\n f\"job_{counter[0]} = ppg.DataLoadingJob('{counter[0]}', lambda: None, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.AttributeLoadingJob):\n nodes.append(\n f\"job_{counter[0]} = ppg.AttributeLoadingJob('{counter[0]}', DummyObject(), 'attr_{counter[0]}', lambda: None, depend_on_function=False)\"\n )\n else:\n raise ValueError(j)\n node_to_counters[node] = counter[0]\n counter[0] += 1\n for parent in self.runner.dag.predecessors(node):\n descend(parent)\n\n def build_edges(node):\n for parent in self.runner.dag.predecessors(node):\n edges.append(\n f\"edges.append(('{node_to_counters[node]}', '{node_to_counters[parent]}'))\"\n )\n build_edges(parent)\n\n descend(self.job_id)\n edges.append(\"edges = []\")\n build_edges(self.job_id)\n edges.extend(\n [\n \"for (a,b) in edges:\",\n \" if a in ppg.global_pipegraph.jobs and b in ppg.global_pipegraph.jobs:\",\n \" ppg.global_pipegraph.jobs[a].depends_on(ppg.global_pipegraph.jobs[b])\",\n ]\n )\n with open(\"subgraph_debug.py\", \"w\") as op:\n lines = \"\"\"\nclass DummyObject:\n pass\n\ndef dummy_smfg(files, prefix):\n Path(prefix).mkdir(exist_ok=True, parents=True)\n for f in files:\n f.write_text(\"hello\")\n\n\ndef dummy_mfg(files):\n for f in files:\n f.parent.mkdir(exist_ok=True, parents=True)\n f.write_text(\"hello\")\n\ndef dummy_fg(of):\n of.parent.mkdir(exist_ok=True, parents=True)\n of.write_text(\"fg\")\n\n\"\"\".split(\n \"\\n\"\n )\n lines += nodes\n lines += edges\n lines += [\"\", \"ppg.run()\", \"ppg.run\"]\n\n op.write(\"\\n\".join(\" \" + l for l in lines))", "def main(\n num_sampled=[3, 3],\n max_depth=2,\n num_iters=1000,\n do_graph=False,\n # These are for checking stats on smaller data\n subsample=False,\n plot=False,\n # Generates a random matrix for comparison\n random=False,\n # Visualise the connection matrix\n vis_connect=False,\n subsample_vis=False,\n # Generate final graphs\n final=False,\n # Analyse\n analyse=False,\n only_exp=False,\n # Which regions are considered here\n # A_name, B_name = \"MOp\", \"SSP-ll\"\n A_name=\"VISp\",\n B_name=\"VISl\",\n desired_depth=1,\n desired_samples=79,\n):\n np.random.seed(42)\n\n if random:\n AB, BA, AA, BB = gen_random_matrix(150, 50, 0, 0.04, 0, 0.0)\n matrix_vis(AB, BA, AA, BB, 10, name=\"test_vis.png\")\n\n os.makedirs(os.path.dirname(pickle_loc), exist_ok=True)\n convert_mouse_data(A_name, B_name)\n to_use = [True, True, True, True]\n mc, args_dict = load_matrix_data(to_use, A_name, B_name)\n print(\"{} - {}, {} - {}\".format(A_name, B_name, mc.num_a, mc.num_b))\n\n result = {}\n result[\"matrix_stats\"] = print_args_dict(args_dict, out=False)\n\n if only_exp:\n mpf_res = mpf_connectome(mc, num_sampled, max_depth, args_dict)\n mpf_val = [\n mpf_res[\"expected\"],\n mpf_res[\"expected\"] / num_sampled[1],\n \"{}_{}\".format(A_name, B_name),\n \"Statistical estimation\",\n ]\n if do_graph:\n print(\"Converting matrix\")\n gc.collect()\n mc.create_connections()\n print(\"Finished conversion\")\n graph = mc.graph\n to_write = [mc.num_a, mc.num_b]\n del mc\n gc.collect()\n reverse_graph = reverse(graph)\n graph_res = graph_connectome(\n num_sampled,\n max_depth,\n graph=graph,\n reverse_graph=reverse_graph,\n to_write=to_write,\n num_iters=num_iters,\n )\n to_add = np.mean(graph_res[\"full_results\"][\"Connections\"].values)\n graph_val = [\n to_add,\n to_add / num_sampled[1],\n \"{}_{}\".format(A_name, B_name),\n \"Statistical estimation\",\n ]\n return mpf_val, graph_val\n return mpf_val, None\n\n # Convert to a pickle\n # if not os.path.isfile(pickle_loc):\n # print(\"Converting matrix\")\n # gc.collect()\n # mc.create_connections()\n # print(\"Finished conversion\")\n # graph = mc.graph\n # to_write = [mc.num_a, mc.num_b]\n # del mc\n # gc.collect()\n\n # handle_pickle(graph, \"graph.pickle\", \"w\")\n # handle_pickle(reverse(graph), \"r_graph.pickle\", \"w\")\n # handle_pickle(to_write, \"graph_size.pickle\", \"w\")\n\n if vis_connect:\n if subsample_vis:\n print(\"Plotting subsampled matrix vis\")\n new_mc = mc.subsample(int(mc.num_a / 10), int(mc.num_b / 10))\n matrix_vis(\n new_mc.ab,\n new_mc.ba,\n new_mc.aa,\n new_mc.bb,\n 15,\n name=\"mc_mat_vis_sub10.pdf\",\n )\n else:\n o_name = \"mc_mat_vis_{}_to_{}.pdf\".format(A_name, B_name)\n print(\"Plotting full matrix vis\")\n matrix_vis(mc.ab, mc.ba, mc.aa, mc.bb, 150, name=o_name)\n print(\"done vis\")\n\n print(mc, print_args_dict(args_dict, out=False))\n\n result = None\n if subsample:\n result = check_stats(mc, 1000, 1, 20000, 1, plot)\n if final:\n result = {}\n\n # For different depths and number of samples\n for depth in range(1, 4):\n for ns in range(1, num_sampled[0] + 1):\n ns_2 = [ns] * 2\n mpf_res = mpf_connectome(mc, ns_2, depth, args_dict)\n result[\"mpf_{}_{}\".format(depth, ns)] = mpf_res\n\n # Save this for plotting\n cols = [\"Number of samples\", \"Proportion of connections\", \"Max distance\"]\n depth_name = [None, \"Direct synapse\", \"Two synapses\", \"Three synapses\"]\n vals = []\n for depth in range(1, 4):\n for ns in range(1, num_sampled[0] + 1):\n this = result[\"mpf_{}_{}\".format(depth, ns)]\n val = [ns, this[\"expected\"] / ns, depth_name[depth]]\n vals.append(val)\n df = pd.DataFrame(vals, columns=cols)\n os.makedirs(os.path.join(here, \"..\", \"results\"), exist_ok=True)\n df.to_csv(\n os.path.join(\n here, \"..\", \"results\", \"{}_to_{}_depth.csv\".format(A_name, B_name)\n ),\n index=False,\n )\n\n cols = [\"Number of sampled connected neurons\", \"Probability\"]\n total_pmf = result[\"mpf_{}_{}\".format(desired_depth, desired_samples)][\"total\"]\n vals = []\n for k, v in total_pmf.items():\n vals.append([k, float(v)])\n df = pd.DataFrame(vals, columns=cols)\n df.to_csv(\n os.path.join(\n here,\n \"..\",\n \"results\",\n \"{}_to_{}_pmf_{}_{}.csv\".format(\n A_name, B_name, desired_depth, desired_samples\n ),\n ),\n index=False,\n )\n if analyse:\n result = {}\n result[\"matrix_stats\"] = args_dict\n\n mpf_res = mpf_connectome(\n mc,\n num_sampled,\n max_depth,\n args_dict,\n clt_start=30,\n sr=None,\n mean_estimate=True,\n )\n result[\"mean\"] = mpf_res\n\n vals = []\n cols = [\"Number of connected neurons\", \"Probability\", \"Calculation\"]\n for k, v in mpf_res[\"total\"].items():\n vals.append([k, float(v), \"Mean estimation\"])\n\n mpf_res = mpf_connectome(mc, num_sampled, max_depth, args_dict, clt_start=30)\n result[\"mpf\"] = mpf_res\n\n for k, v in mpf_res[\"total\"].items():\n vals.append([k, float(v), \"Statistical estimation\"])\n\n if do_graph:\n print(\"Converting matrix\")\n gc.collect()\n mc.create_connections()\n print(\"Finished conversion\")\n graph = mc.graph\n to_write = [mc.num_a, mc.num_b]\n del mc\n gc.collect()\n reverse_graph = reverse(graph)\n\n graph_res = graph_connectome(\n num_sampled,\n max_depth,\n graph=graph,\n reverse_graph=reverse_graph,\n to_write=to_write,\n num_iters=num_iters,\n )\n\n result[\"difference\"] = (\n dist_difference(mpf_res[\"total\"], graph_res[\"dist\"]),\n )\n result[\"graph\"] = graph_res\n\n for k, v in graph_res[\"dist\"].items():\n vals.append([k, float(v), \"Monte Carlo simulation\"])\n\n df = pd.DataFrame(vals, columns=cols)\n df.to_csv(\n os.path.join(\n here,\n \"..\",\n \"results\",\n \"{}_to_{}_pmf_final_{}_{}.csv\".format(\n A_name, B_name, max_depth, num_sampled[0]\n ),\n ),\n index=False,\n )\n\n if result is not None:\n with open(os.path.join(here, \"..\", \"results\", \"mouse.txt\"), \"w\") as f:\n pprint(result, width=120, stream=f)\n\n return result", "def gexf_graph():\n # you must replace these lines and supply your own graph\n gexf = Gexf(\"author\", \"title\")\n mygraph = gexf.addGraph(\"undirected\", \"static\", \"A web network\")\n atr_type = mygraph.addNodeAttribute('Type', type='string')\n atr_id = mygraph.addNodeAttribute('id', type='string')\n atr_label = mygraph.addNodeAttribute('label', type='string')\n atr_color_r = mygraph.addNodeAttribute('color_r', type='string', defaultValue='0')\n atr_color_g = mygraph.addNodeAttribute('color_g', type='string', defaultValue='0')\n atr_color_b = mygraph.addNodeAttribute('color_b', type='string', defaultValue='0')\n k = 0\n for i in range(min_parts()):\n tmp = mygraph.addNode(set_num[i], name[i], r=\"0\", g=\"0\", b=\"0\")\n tmp.addAttribute(atr_type, \"set\")\n tmp.addAttribute(atr_id, set_num[i])\n tmp.addAttribute(atr_label, name[i])\n for j in range(len(Parts[i][\"Parts\"])):\n if mygraph.nodeExists(Parts[i][\"Parts\"][j][\"number\"]+\"_\"+Parts[i][\"Parts\"][j][\"color\"][\"rgb\"])==0:\n temp = mygraph.addNode((Parts[i][\"Parts\"][j][\"number\"]+\"_\"+Parts[i][\"Parts\"][j][\"color\"][\"rgb\"]), Parts[i][\"Parts\"][j][\"name\"], r=str(int(Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][0:2], 16)), g=str(int(Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][2:4], 16)), b=str(int(Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][4:6], 16)))\n temp.addAttribute(atr_type, \"part\")\n temp.addAttribute(atr_id, (Parts[i][\"Parts\"][j][\"number\"]+\"_\"+Parts[i][\"Parts\"][j][\"color\"][\"rgb\"]))\n temp.addAttribute(atr_label, Parts[i][\"Parts\"][j][\"name\"])\n temp.addAttribute(atr_color_r, Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][0:2])\n temp.addAttribute(atr_color_g, Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][2:4])\n temp.addAttribute(atr_color_b, Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][4:6])\n mygraph.addEdge(str(k), set_num[i], (Parts[i][\"Parts\"][j][\"number\"]+\"_\"+Parts[i][\"Parts\"][j][\"color\"][\"rgb\"]), weight=Parts[i][\"Parts\"][j][\"quantity\"])\n k = k+1\n output_file = open(\"bricks_graph.gexf\", \"wb\")\n gexf.write(output_file)\n return -1", "def do_printgraph(self, args):\n self.currentGraph.printGraph()", "def get_subgraphs(graph):\n nodes_powerset = get_nodes_combinations(graph)\n #print(\"Doing\")\n #draw_graph(graph)\n subgraphs = []\n for nodes in nodes_powerset:\n subg = graph.subgraph(nodes)\n nodes = subg.nodes(data=True)\n if nx.is_weakly_connected(subg):\n subgraphs.append(subg)\n return subgraphs", "def main():\n GRAPH = lambda_graph()\n GRAPH.save_graph(\"pylon\")\n meshName = \"pylon.mesh\"\n cmd = \"./population/linuxShow \"+meshName\n process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE)\n process.communicate()\n print \"nodes:\", GRAPH.number_of_nodes()\n print \"edges\", GRAPH.number_of_edges()", "def generate_subgraph(format):\n\n # get business information\n directorypath = genpath+directory\n if os.path.isfile(directorypath):\n \n bizdata = pd.read_csv( directorypath, escapechar='\\\\')\n\n #create a directory of page-id and object-ids\n tempdf = bizdata.set_index('pageid')\n tempdf = tempdf['objectid']\n dictionary = tempdf.to_dict()\n\n uncgraph = pd.read_csv(inpath+graphfile, escapechar='\\\\')\n uncgraph = uncgraph.dropna()\n uncgraph['likee_object_id'] = uncgraph.apply(lambda x: dictionary.get(x['likee_page_id']), axis=1)\n cgraph = uncgraph.dropna()\n cgraph = cgraph[['liker_page_id', 'likee_page_id']]\n cgraph.columns = ['Source', 'Target']\n\n \n print_stats(cgraph)\n if format == 'networkx' :\n print \"[Generating a networkX graph...]\" \n cgraph.to_csv(genpath+subgraph+'.ntx', index=False, header=False, sep= ' ')\n else:\n print \"[Generating a csv graph...]\" \n cgraph.to_csv(genpath+subgraph+'.csv', index=False)\n\n\n else:\n print \"Either file is missing or is not readable\"", "def test_spec(self):\n graph = graphviz.Graph(comment='The Round Table')\n graph.node('A', 'King Arthur')\n graph.node('B', 'Sir Bedevere the Wise')\n graph.edges(['AB'])\n\n st.graphviz_chart(graph)\n\n c = self.get_delta_from_queue().new_element.graphviz_chart\n self.assertEqual(hasattr(c, 'spec'), True)", "def test_subkey(man):\n errors = []\n\n G = man.writeTest()\n\n G.addVertex(\"Work\", \"Thing\", {})\n G.addVertex(\"Workflow\", \"Thing\", {})\n G.addVertex(\"Other\", \"Thing\", {})\n G.addVertex(\"OtherGuy\", \"Thing\", {})\n\n G.addEdge(\"Work\", \"Other\", \"edge\")\n G.addEdge(\"Workflow\", \"OtherGuy\", \"edge\")\n\n count = 0\n for i in G.query().V(\"Work\").out():\n count += 1\n if count != 1:\n errors.append(\"Incorrect outgoing vertex count %d != %d\" % (count, 1))\n\n count = 0\n for i in G.query().V(\"Work\").outE():\n count += 1\n if count != 1:\n errors.append(\"Incorrect outgoing edge count %d != %d\" % (count, 1))\n\n count = 0\n for i in G.query().V(\"Other\").inE():\n count += 1\n if count != 1:\n errors.append(\"Incorrect incoming edge count %d != %d\" % (count, 1))\n\n return errors", "def test_generator8(self):\n xpb = XPathBuilder()\n xp1 = xp2 = None\n base_xp = (xpb.foo.bar | xpb.x.y).parenthesize()\n base_gen = None\n with base_xp as b:\n base_gen = b\n xp1 = b() & xpb.c\n xp2 = b() & xpb.d\n xp1_exp = '(/foo/bar or /x/y) and /c'\n xp2_exp = '(/foo/bar or /x/y) and /d'\n base_exp = '(/foo/bar or /x/y)'\n # check tree structure\n self.assertTrue(base_xp._parent is None)\n self.assertTrue(base_gen._parent is None)\n # check xpath\n self.assertEqual(xp1.tostring(), xp1_exp)\n self.assertEqual(xp2.tostring(), xp2_exp)\n self.assertEqual(base_xp.tostring(), base_exp)\n self.assertEqual(base_gen.tostring(), base_exp)", "def setUp(self):\n\n singleLabels = linkoCreate.Linkograph(\n [({'A'}, set(), {1,2,3}),\n ({'D'}, {0}, {3,4}),\n ({'A'}, {0}, {4}),\n ({'C'}, {0,1}, {4}),\n ({'A'}, {1,2,3}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko0_2 = linkoCreate.Linkograph(\n [({'A'}, set(), {1,2}),\n ({'D'}, {0}, set()),\n ({'A'}, {0}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko0_1 = linkoCreate.Linkograph(\n [({'A'}, set(), {1}),\n ({'D'}, {0}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko0_0 = linkoCreate.Linkograph(\n [({'A'}, set(), set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko1_2 = linkoCreate.Linkograph(\n [({'D'}, set(), set()),\n ({'A'}, set(), set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko1_1 = linkoCreate.Linkograph(\n [({'D'}, set(), set())],\n ['A', 'B', 'C', 'D'])\n\n trivialLinkograph = linkoCreate.Linkograph(\n [], ['A', 'B', 'C', 'D'])\n\n\n singleSubLinko1_4 = linkoCreate.Linkograph(\n [({'D'}, set(), {2,3}),\n ({'A'}, set(), {3}),\n ({'C'}, {0}, {3}),\n ({'A'}, {0,1,2}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko2_4 = linkoCreate.Linkograph(\n [({'A'}, set(), {2}),\n ({'C'}, set(), {2}),\n ({'A'}, {0,1}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko3_4 = linkoCreate.Linkograph(\n [({'C'}, set(), {1}),\n ({'A'}, {0}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko4_4 = linkoCreate.Linkograph(\n [({'A'}, set(), set())],\n ['A', 'B', 'C', 'D'])\n\n simpleLinko = linkoCreate.Linkograph(\n [({'A', 'B', 'C'}, set(), {1,2,3}),\n ({'D'}, {0}, {3,4}),\n ({'A'}, {0}, {4}),\n ({'B', 'C'}, {0,1}, {4}),\n ({'A'}, {1,2,3}, set())],\n ['A', 'B', 'C', 'D'])\n\n if self.id().split('.')[-1] == 'test_createSubLinkographWithoutCommands':\n self.testParams = [\n {'linko': singleLabels,\n 'lowerBound': None,\n 'upperBound': None,\n 'ExpectedLinkograph': singleLabels},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': 4,\n 'ExpectedLinkograph': singleLabels},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': 5,\n 'ExpectedLinkograph': singleLabels},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': 2,\n 'ExpectedLinkograph': singleSubLinko0_2},\n\n {'linko': singleLabels,\n 'lowerBound': -1,\n 'upperBound': 2,\n 'ExpectedLinkograph': singleSubLinko0_2},\n\n {'linko': singleLabels,\n 'lowerBound': None,\n 'upperBound': 2,\n 'ExpectedLinkograph': singleSubLinko0_2},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': 1,\n 'ExpectedLinkograph': singleSubLinko0_1},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': 0,\n 'ExpectedLinkograph': singleSubLinko0_0},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': -1,\n 'ExpectedLinkograph': trivialLinkograph},\n\n {'linko': singleLabels,\n 'lowerBound': 1,\n 'upperBound': 2,\n 'ExpectedLinkograph': singleSubLinko1_2},\n\n {'linko': singleLabels,\n 'lowerBound': 1,\n 'upperBound': 1,\n 'ExpectedLinkograph': singleSubLinko1_1},\n\n {'linko': singleLabels,\n 'lowerBound': 1,\n 'upperBound': 0,\n 'ExpectedLinkograph': trivialLinkograph},\n\n {'linko': singleLabels,\n 'lowerBound': -1,\n 'upperBound': -1,\n 'ExpectedLinkograph': trivialLinkograph},\n\n {'linko': singleLabels,\n 'lowerBound': 1,\n 'upperBound': 4,\n 'ExpectedLinkograph': singleSubLinko1_4},\n\n {'linko': singleLabels,\n 'lowerBound': 2,\n 'upperBound': 4,\n 'ExpectedLinkograph': singleSubLinko2_4},\n\n {'linko': singleLabels,\n 'lowerBound': 3,\n 'upperBound': 4,\n 'ExpectedLinkograph': singleSubLinko3_4},\n\n {'linko': singleLabels,\n 'lowerBound': 4,\n 'upperBound': 4,\n 'ExpectedLinkograph': singleSubLinko4_4},\n\n ]", "def test_get_subvertices_from_vertex(self):\n subvertices = list()\n subvertices.append(PartitionedVertex(None, \"\"))\n subvertices.append(PartitionedVertex(None, \"\"))\n subvert1 = PartitionedVertex(None, \"\")\n subvert2 = PartitionedVertex(None, \"\")\n\n subedges = list()\n subedges.append(MultiCastPartitionedEdge(subvertices[0],\n subvertices[1]))\n subedges.append(MultiCastPartitionedEdge(subvertices[1],\n subvertices[1]))\n\n graph_mapper = GraphMapper()\n vert = TestVertex(4, \"Some testing vertex\")\n\n vertex_slice = Slice(0, 1)\n graph_mapper.add_subvertex(subvert1, vertex_slice, vert)\n vertex_slice = Slice(2, 3)\n graph_mapper.add_subvertex(subvert2, vertex_slice, vert)\n\n returned_subverts = graph_mapper.get_subvertices_from_vertex(vert)\n\n self.assertIn(subvert1, returned_subverts)\n self.assertIn(subvert2, returned_subverts)\n for sub in subvertices:\n self.assertNotIn(sub, returned_subverts)", "def test_tree_splay() -> None:\n t = generate_graph_resources(5)\n field(t, (\"dr_1\", \"ds_1\", \"f1\")).references.append(\n (FieldAddress(\"dr_2\", \"ds_2\", \"f1\"), \"to\")\n )\n field(t, (\"dr_1\", \"ds_1\", \"f1\")).references.append(\n (FieldAddress(\"dr_3\", \"ds_3\", \"f1\"), \"to\")\n )\n field(t, (\"dr_1\", \"ds_1\", \"f1\")).references.append(\n (FieldAddress(\"dr_4\", \"ds_4\", \"f1\"), \"to\")\n )\n field(t, (\"dr_1\", \"ds_1\", \"f1\")).references.append(\n (FieldAddress(\"dr_5\", \"ds_5\", \"f1\"), \"to\")\n )\n field(t, (\"dr_1\", \"ds_1\", \"f1\")).identity = \"email\"\n traversal = Traversal(DatasetGraph(*t), {\"email\": \"X\"})\n\n assert incoming_edges(traversal, CollectionAddress(\"dr_1\", \"ds_1\")) == {\n Edge(\n FieldAddress(\"__ROOT__\", \"__ROOT__\", \"email\"),\n FieldAddress(\"dr_1\", \"ds_1\", \"f1\"),\n )\n }\n assert outgoing_edges(traversal, CollectionAddress(\"dr_1\", \"ds_1\")) == {\n Edge(FieldAddress(\"dr_1\", \"ds_1\", \"f1\"), FieldAddress(\"dr_3\", \"ds_3\", \"f1\")),\n Edge(FieldAddress(\"dr_1\", \"ds_1\", \"f1\"), FieldAddress(\"dr_4\", \"ds_4\", \"f1\")),\n Edge(FieldAddress(\"dr_1\", \"ds_1\", \"f1\"), FieldAddress(\"dr_5\", \"ds_5\", \"f1\")),\n Edge(FieldAddress(\"dr_1\", \"ds_1\", \"f1\"), FieldAddress(\"dr_2\", \"ds_2\", \"f1\")),\n }\n\n assert outgoing_edges(traversal, CollectionAddress(\"dr_5\", \"ds_5\")) == set()\n assert incoming_edges(traversal, CollectionAddress(\"dr_2\", \"ds_2\")) == {\n Edge(FieldAddress(\"dr_1\", \"ds_1\", \"f1\"), FieldAddress(\"dr_2\", \"ds_2\", \"f1\"))\n }\n assert incoming_edges(traversal, CollectionAddress(\"dr_3\", \"ds_3\")) == {\n Edge(FieldAddress(\"dr_1\", \"ds_1\", \"f1\"), FieldAddress(\"dr_3\", \"ds_3\", \"f1\"))\n }\n assert incoming_edges(traversal, CollectionAddress(\"dr_4\", \"ds_4\")) == {\n Edge(FieldAddress(\"dr_1\", \"ds_1\", \"f1\"), FieldAddress(\"dr_4\", \"ds_4\", \"f1\"))\n }\n assert incoming_edges(traversal, CollectionAddress(\"dr_5\", \"ds_5\")) == {\n Edge(FieldAddress(\"dr_1\", \"ds_1\", \"f1\"), FieldAddress(\"dr_5\", \"ds_5\", \"f1\"))\n }\n traversal_map, terminators = traversal.traversal_map()\n assert traversal_map == {\n \"__ROOT__:__ROOT__\": {\"from\": {}, \"to\": {\"dr_1:ds_1\": {\"email -> f1\"}}},\n \"dr_1:ds_1\": {\n \"from\": {\"__ROOT__:__ROOT__\": {\"email -> f1\"}},\n \"to\": {\n \"dr_2:ds_2\": {\"f1 -> f1\"},\n \"dr_3:ds_3\": {\"f1 -> f1\"},\n \"dr_4:ds_4\": {\"f1 -> f1\"},\n \"dr_5:ds_5\": {\"f1 -> f1\"},\n },\n },\n \"dr_2:ds_2\": {\"from\": {\"dr_1:ds_1\": {\"f1 -> f1\"}}, \"to\": {}},\n \"dr_3:ds_3\": {\"from\": {\"dr_1:ds_1\": {\"f1 -> f1\"}}, \"to\": {}},\n \"dr_4:ds_4\": {\"from\": {\"dr_1:ds_1\": {\"f1 -> f1\"}}, \"to\": {}},\n \"dr_5:ds_5\": {\"from\": {\"dr_1:ds_1\": {\"f1 -> f1\"}}, \"to\": {}},\n }\n\n assert set(terminators) == {\n CollectionAddress(\"dr_2\", \"ds_2\"),\n CollectionAddress(\"dr_3\", \"ds_3\"),\n CollectionAddress(\"dr_4\", \"ds_4\"),\n CollectionAddress(\"dr_5\", \"ds_5\"),\n }", "def test_build_poset_lattice():\n lattice = build_poset_lattice(all_games_gen(2))\n assert len(lattice.edges()) == 36", "def test_get_related_nodes(self):\n pass", "def sample_from_subpop(instance, params, subpop):\n y = subpop\n x = np.random.choice([-1,+1], size=params['d'])\n x[instance['indices'][subpop]] = instance['values'][subpop]\n return x, y, subpop", "def test_get_grid_edge_nodes(flopy_dis_mf6):\n mf6 = flopy_dis_mf6[1]\n mf6.initialize()\n\n with pytest.raises(NotImplementedError):\n mf6.get_grid_edge_nodes(1, np.zeros((1, 1)))", "def show_custom_graph(self):\n pass", "def gen_graph(self):", "def slice_graph_fwd( startea, reg ): \r\n\tgraph = vcg_Graph.vcgGraph({\"title\":'\"Slice for %s\"' % reg, \\\r\n\t\t\"manhattan_edges\":\"no\", \"layoutalgorithm\":\"maxdepth\"})\r\n\t#\r\n\t# Retrieve the name of the current basic block\r\n\t# \r\n\tworklist = []\r\n\tdata_bib = {}\r\n\tstartnode = slice_node( startea, 0, reg )\r\n\trootnode = graph.Add_Node( startnode.to_name() )\r\n\tdata_bib[ startnode.to_name() ] = startnode\r\n\tworklist.insert( 0, rootnode )\r\n\twhile len( worklist ) > 0:\r\n\t\tcurrnode = worklist.pop()\r\n\t\tcurrslice = data_bib[ currnode.get_name() ]\r\n\t\ttgt_reg = currslice.get_target_reg()\r\n\t\tif tgt_reg == \"END\":\r\n\t\t# Do not process this node any further\r\n\t\t\tpass\r\n\t\telif tgt_reg == \"\" or (( len( currslice.get_lines()) > 0) and \\\r\n\t\t\tcurrslice.endea != currslice.get_lines()[-1][0]):\r\n\t\t\t# Nothing much happening here, just proceed to parent bocks\r\n\t\t\tif ua_mnem( currslice.endea ) == \"call\":\r\n\t\t\t\txrefs = get_short_crefs_from( currslice.endea )\r\n\t\t\telse:\r\n\t\t\t\txrefs = get_crefs_from( currslice.endea )\r\n\t\t\tfor ref in xrefs:\r\n\t\t\t\tnewslice = slice_node( ref, 0, currslice.reg )\r\n\t\t\t\tif graph.Get_Node( newslice.to_name() ) == 0:\r\n\t\t\t\t\tnewnode = graph.Add_Node( newslice.to_name() )\r\n\t\t\t\t\tworklist.insert( 0, newnode )\r\n\t\t\t\t\tdata_bib[ newslice.to_name() ] = newslice\r\n\t\t\t\tgraph.Add_Link( currnode.get_name(), newslice.to_name())\r\n\t\telse:\r\n\t\t\t# Register was modified, use new register\r\n\t\t\txrefs = get_crefs_from( currslice.endea )\r\n\t\t\tfor ref in xrefs:\r\n\t\t\t\tnewslice = slice_node( ref, 0, tgt_reg )\r\n\t\t\t\tif graph.Get_Node( newslice.to_name() ) == 0:\r\n\t\t\t\t\tnewnode = graph.Add_Node( newslice.to_name() )\r\n\t\t\t\t\tworklist.insert( 0, newnode )\r\n\t\t\t\t\tdata_bib[ newslice.to_name() ] = newslice\r\n\t\t\t\tgraph.Add_Link( currnode.get_name(), newslice.to_name())\r\n\t\t\txrefs = get_crefs_from( currslice.endea )\r\n\t\t\tfor ref in xrefs:\r\n\t\t\t\tnewslice = slice_node( ref, 0, currslice.reg )\r\n\t\t\t\tif graph.Get_Node( newslice.to_name() ) == 0:\r\n\t\t\t\t\tnewnode = graph.Add_Node( newslice.to_name() )\r\n\t\t\t\t\tworklist.insert( 0, newnode )\r\n\t\t\t\t\tdata_bib[ newslice.to_name() ] = newslice\r\n\t\t\t\tgraph.Add_Link( currnode.get_name(), newslice.to_name())\r\n\treturn [ graph, data_bib ]", "def test_dot() -> None:\n # Given\n source_files: Iterator[SourceFile] = iter([SIMPLE_FILE])\n drawer = GraphDrawer(Graph(\"tests/graph.svg\"))\n use_case = DrawGraphUC(drawer, PARSER, source_files)\n\n # When\n use_case.run()\n\n # Then\n with open(\"/tmp/graph.dot\", encoding=\"utf-8\") as dot:\n lines = sorted(dot.readlines())\n\n assert lines == sorted(\n [\n \"digraph G {\\n\",\n \"splines=true;\\n\",\n f\"node[shape=box fontname=Arial style=filled fillcolor={drawer.graph.node_color}];\\n\",\n f\"bgcolor={drawer.graph.background_color}\\n\",\n \"\\n\",\n '\"simple_module\" -> \"module\"\\n',\n '\"simple_module\" -> \"module.inside.module\"\\n',\n '\"simple_module\" -> \"amodule\"\\n',\n \"}\\n\",\n ]\n )", "def test_input_valid_subgraph(self, dim):\r\n with pytest.raises(ValueError, match=\"Input is not a valid subgraph\"):\r\n clique.swap([0, dim], nx.empty_graph(dim))", "def test_general_subset_level():\n pass", "def print_graph() -> None:\n raise NotImplementedError", "def _setup_graph(self):\n pass", "def _setup_graph(self):\n pass", "def subplot_2(self, Graph, n_tabs):\n # The code below walks does a pre-order traversal of the tree\n # For exact details about the structure of self.Graph refer description in init function.\n\n attr_name = list(Graph.keys())[0]\n print(\"\\t\"*(n_tabs),\"feature name :\",attr_name)\n for val in list(Graph[attr_name].keys()):\n if (val[1]==1):\n des = \"greater\"\n else:\n des = \"lower\"\n print(\"\\t\"*(n_tabs+1),\"feature threashold :\", val[0],\" \",des)\n sub_graph = Graph[attr_name][val]\n if (type(sub_graph)==dict):\n self.subplot_2(sub_graph, n_tabs+2)\n else:\n print(\"\\t\"*(n_tabs+2), \"prediction :\",sub_graph)", "def setUp(self):\n self.complete = nx.Graph()\n self.complete.add_edge(1, 2)\n self.complete.add_edge(2, 3)\n self.complete.add_edge(1, 3)\n\n self.small_tree = nx.Graph()\n self.small_tree.add_edge(1, 3)\n self.small_tree.add_edge(4, 3)\n self.small_tree.add_edge(2, 3)\n self.small_tree.add_edge(3, 5)\n self.small_tree.add_edge(5, 6)\n self.small_tree.add_edge(5, 7)\n self.small_tree.add_edge(6, 7)\n\n self.deterministic_graph = nx.Graph()\n self.deterministic_graph.add_edge(0, 1) # deg(0) = 1\n\n self.deterministic_graph.add_edge(1, 2) # deg(1) = 2\n\n self.deterministic_graph.add_edge(2, 3)\n self.deterministic_graph.add_edge(2, 4) # deg(2) = 3\n\n self.deterministic_graph.add_edge(3, 4)\n self.deterministic_graph.add_edge(3, 5)\n self.deterministic_graph.add_edge(3, 6) # deg(3) = 4\n\n self.deterministic_graph.add_edge(4, 5)\n self.deterministic_graph.add_edge(4, 6)\n self.deterministic_graph.add_edge(4, 7) # deg(4) = 5\n\n self.deterministic_graph.add_edge(5, 6)\n self.deterministic_graph.add_edge(5, 7)\n self.deterministic_graph.add_edge(5, 8)\n self.deterministic_graph.add_edge(5, 9) # deg(5) = 6\n\n self.deterministic_graph.add_edge(6, 7)\n self.deterministic_graph.add_edge(6, 8)\n self.deterministic_graph.add_edge(6, 9) # deg(6) = 6\n\n self.deterministic_graph.add_edge(7, 8)\n self.deterministic_graph.add_edge(7, 9) # deg(7) = 5\n\n self.deterministic_graph.add_edge(8, 9) # deg(8) = 4", "def sample(tree, i, alpha=0.5, beta=0.5, only_tree=True):\n # for n in tree.nodes():\n # lab = tuple(n)\n # if len(n) == 1:\n # lab = \"(\" + str(list(n)[0]) + \")\"\n # tree.node[n] = {\"color\": \"black\", \"label\": lab}\n # print tree.nodes()\n\n if only_tree is True:\n tree_new = tree # Alter the input tree\n else:\n #tree_new = tree.subgraph(tree.nodes()) # nx < 2.0\n tree_new = tree.copy() # nx < 2.0\n\n #print(nocopy)\n #old_G = trilearn.graph.junction_tree.get_graph(tree)\n #(subtree, old_separators, probtree) = glib.random_subtree(tree, alpha, beta)\n\n # plotGraph(subtree, directory+\"subtree_\"+str(i)+\".eps\")\n # for n in subtree.nodes():\n # tree_old.node[n] = {\"color\": \"blue\", \"label\": tuple(n)}\n # if n in tree.nodes():\n # tree.node[n] = {\"color\": \"blue\", \"label\": tuple(n)}\n\n # plotGraph(tree_old.subgraph(tree_old.nodes()),\n # directory + \"tree(\" + str(i-1) + \")p.eps\")\n\n (_, subtree_nodes, subtree_edges, subtree_adjlist,\n old_separators, prob_subtree) = ss.random_subtree(tree, alpha, beta, i)\n\n (old_cliques,\n new_cliques,\n new_separators,\n P,\n neig) = sample_cond_on_subtree_nodes(i, tree_new, subtree_nodes, subtree_edges, subtree_adjlist)\n\n if only_tree is True:\n return tree_new\n #conn_nodes = set()\n #for clique in new_cliques:\n # conn_nodes |= clique\n\n # for n in tree.nodes():\n # lab = tuple(n)\n # if len(n) == 1:\n # lab = \"(\"+str(list(n)[0])+\")\"\n # if n in new_cliques:\n # tree.node[n] = {\"color\": \"red\", \"label\": lab}\n # plotGraph(tree.subgraph(tree.nodes()), directory+\"tree(\"+str(i)+\").eps\")\n\n #G = trilearn.graph.junction_tree.get_graph(tree)\n # G.node[i] = {\"color\": \"red\"}\n # for n in old_G:\n # if n in conn_nodes:\n # old_G.node[n] = {\"color\": \"blue\"}\n # G.node[n] = {\"color\": \"blue\"}\n\n # plotGraph(G, directory+\"G\"+str(i)+\".eps\")\n # plotGraph(old_G, directory+\"G\"+str(i-1)+\"p.eps\")\n\n # Proposal kernel\n K_st = None\n if len(subtree_nodes) == 1:\n # There might be two possible subtrees so\n # we calculate the probabilities for these explicitly\n K_st = pdf(tree, tree_new, alpha, beta, i)\n else:\n K_st = prob_subtree\n for c in P:\n K_st *= P[c] * neig[c]\n return tree_new, K_st, old_cliques, old_separators, new_cliques, new_separators", "def test_treewidth_complete_graphs():\n\n def test_kn(size):\n \"\"\"Test on complete graphs.\"\"\"\n graph = Graph()\n for one in range(size):\n for two in range(one + 1, size):\n graph.add_edge(one, two)\n eq_(size-1, graph.approx_treewidth())\n for size in range(2, 6):\n test_kn(size)", "def test_restore_with_subgraph(self):\n subgraph = self._subgraph()\n task = self._remote_task()\n subgraph['id'] = 15\n task['parameters']['containing_subgraph'] = 15\n\n graph = self._restore_graph([subgraph, task])\n assert len(graph.tasks) == 2\n subgraphs = [op for op in graph.tasks if op.is_subgraph]\n remote_tasks = [op for op in graph.tasks if not op.is_subgraph]\n\n assert len(subgraphs) == 1\n assert len(remote_tasks) == 1\n\n assert len(subgraphs[0].tasks) == 1\n assert remote_tasks[0].containing_subgraph is subgraphs[0]", "def test_dummy6(self):\n xpb = XPathBuilder()\n xp = xpb.dummy()\n xp = xpb.bar | xp\n exp = '/bar'\n self.assertEqual(xp.tostring(), exp)", "def test_vertex_cover_basic(self):\n G = dnx.chimera_graph(1, 2, 2)\n cover = dnx.min_vertex_cover(G, ExactSolver())\n self.vertex_cover_check(G, cover)\n\n G = nx.path_graph(5)\n cover = dnx.min_vertex_cover(G, ExactSolver())\n self.vertex_cover_check(G, cover)\n\n for __ in range(10):\n G = nx.gnp_random_graph(5, .5)\n cover = dnx.min_vertex_cover(G, ExactSolver())\n self.vertex_cover_check(G, cover)", "def test_graphid_construction():\n _ = _ir.GraphId(\"g\")", "def test_hierarchical_register_and_contain(self):\n space = Space()\n\n categories = {\"asdfa\": 0.1, 2: 0.2, 3: 0.3, 4: 0.4}\n dim = Categorical(\"yolo.nested\", categories, shape=2)\n space.register(dim)\n dim = Integer(\"yolo2.nested\", \"uniform\", -3, 6)\n space.register(dim)\n dim = Real(\"yolo3\", \"norm\", 0.9)\n space.register(dim)\n\n trial = Trial(\n params=[\n {\"name\": \"yolo.nested\", \"value\": [\"asdfa\", 2], \"type\": \"categorical\"},\n {\"name\": \"yolo2.nested\", \"value\": 1, \"type\": \"integer\"},\n {\"name\": \"yolo3\", \"value\": 0.5, \"type\": \"real\"},\n ]\n )\n\n assert \"yolo\" in trial.params\n assert \"nested\" in trial.params[\"yolo\"]\n assert \"yolo2\" in trial.params\n assert \"nested\" in trial.params[\"yolo2\"]\n assert \"yolo3\" in trial.params\n\n assert trial in space", "def create_nodes(self):", "def explore(self, *args):", "def test_graph(self):\n with Graph('g') as graph:\n a = Node('a')\n self.assertEqual(graph.nodes, [a])", "def test_tree_graph_creation(self):\n # There is little to test here other than simple creation\n # Whether it comes out OK or not ... ¯\\_(ツ)_/¯\n model = FairModel(name='Test')\n model.input_data('Loss Magnitude', mean=50, stdev=5)\n model.input_data('Loss Event Frequency', low=10, mode=20, high=30)\n metamodel = FairMetaModel(name='Test Meta', models=[model, model])\n with warnings.catch_warnings(record=False):\n warnings.simplefilter(\"ignore\")\n fvp = FairViolinPlot(metamodel)\n _, _ = fvp.generate_image()", "def test_wp_association_bp(self):\n test_graph = wikipathways_to_bel(WP2359, self.hgnc_manager)\n\n self.assertEqual(type(test_graph), BELGraph, msg='Error with graph type')\n\n self.assertEqual(test_graph.summary_dict()['Number of Nodes'], 2)\n self.assertEqual(test_graph.summary_dict()['Number of Edges'], 1)\n self.assertEqual(count_relations(test_graph)['regulates'], 1)", "def test_case1(self):\n\n graph = BipartiteGraph()\n\n graph.addEdge(\"supervisor1\",\"student1\")\n\n val1 = graph.getStudents(\"supervisor1\")\n val2 = graph.getSupervisors(\"student1\")\n\n expected1 = [\"student1\"]\n expected2 = [\"supervisor1\"]\n\n self.assertEqual((val1,val2),(expected1,expected2))", "def rectangle_graph():\n scaled = scale((200, 200, 200), 2)\n print scaled\n pylon_graph = graph.graph()\n base = rectangle(ORIGIN, WIDTH, LENGTH)\n base_ids = pylon_graph.add_nodes(base, \"base\")\n pylon_graph.connect_neighbours(base_ids, LENGTH)\n pylon_graph.connect_neighbours(base_ids, WIDTH)\n all_ids = []\n for i in range(LEVELS):\n level = offset(base, LENGTH * i, \"z\")\n level_ids = pylon_graph.add_nodes(level, \"level\" + str(i))\n all_ids.extend(level_ids)\n pylon_graph.connect_neighbours(all_ids, LENGTH)\n pylon_graph.connect_neighbours(all_ids, WIDTH)\n return pylon_graph", "def test_case2(self):\n\n graph = BipartiteGraph()\n\n graph.addEdge(\"supervisor1\",\"student1\")\n graph.addEdge(\"supervisor2\",\"student4\")\n graph.addEdge(\"supervisor3\",\"student3\")\n\n val1 = graph.getSupervisorDegree(\"supervisor1\")\n\n graph.addEdge(\"supervisor1\",\"student2\")\n\n curr = graph.getSupervisorDegree(\"supervisor1\")\n val2 = graph.getSupervisors(\"student2\")\n expected2 = [\"supervisor1\"]\n\n self.assertEqual((curr-1,expected2),(val1,val2))", "def test_branches_and_nodes_troubling():\n traces = tests.troubling_traces\n areas = tests.sample_areas\n snap_threshold = 0.001\n branches, nodes = branches_and_nodes.branches_and_nodes(\n traces, areas, snap_threshold, allowed_loops=10, already_clipped=False\n )\n assert isinstance(branches, gpd.GeoDataFrame)\n assert isinstance(nodes, gpd.GeoDataFrame)", "def test_get_child():\n \n root_ts = TrackSegment(flow_dict=flow_dict) \n \n # ROOT MODULE\n start = root_ts.get_child(\"start\")\n # check depth\n assert(start.depth == root_ts.depth+1)\n # check parent\n assert(start.parent is root_ts)\n # check module_id\n assert(start.module_id == 'start')\n \n # CHILD (1,2,3)\n root_ts_get_child_result = root_ts.get_child((1,2,3))\n # check depth\n assert(root_ts_get_child_result.depth == root_ts.depth+1)\n # check parent\n assert(root_ts_get_child_result.parent is root_ts)\n \n # CHILD (2,3,4)\n root_ts_get_child_result2 = root_ts_get_child_result.get_child((2,3,4))\n # check depth\n assert(root_ts_get_child_result2.depth == root_ts.depth+2)\n # check parent \n assert(root_ts_get_child_result2.parent is root_ts_get_child_result)\n \n print(\"TEST GET_CHILD: success!\")", "def test_generator6(self):\n xpb = XPathBuilder()\n xp1 = xp2 = None\n base_xp = xpb.base.foo.bar\n base_gen = None\n with base_xp as b:\n base_gen = b\n xp1 = xpb.a.b.c.join(b())\n xp2 = xpb.test.join(b())\n xp1_exp = '/a/b/c/base/foo/bar'\n xp2_exp = '/test/base/foo/bar'\n base_exp = '/base/foo/bar'\n # check tree structure\n self.assertTrue(base_xp._parent is None)\n self.assertTrue(len(base_xp._children[0]._children[0]._children) == 0)\n self.assertTrue(base_gen._parent is None)\n self.assertTrue(len(base_gen._children) == 0)\n # check xpath\n self.assertEqual(xp1.tostring(), xp1_exp)\n self.assertEqual(xp2.tostring(), xp2_exp)\n self.assertEqual(base_xp.tostring(), base_exp)\n self.assertEqual(base_gen.tostring(), base_exp)", "def non_pol_neighbours_graph():\n data = pd.read_csv(\"/Users/emg/GitHub/thesis/output/2019_01/1000_residuals_output_utf8.csv\", index_col=0)\n\n labelled = label_subs(data)\n labelled['resid_rank'] = labelled.resid.rank(pct=True)\n top = subset_df(labelled, 'resid', q=0.95)\n\n edges = top.copy()[['source','target','resid']]\n edges_rev = edges.copy()\n edges_rev.columns = ['target','source','resid']\n directed_edges = pd.concat([edges,edges_rev], sort=True)\n directed_edges['resid_rank'] = directed_edges['resid'].rank(pct=True)\n\n df = label_subs(directed_edges)\n\n pol_subs = load_pol_subs()\n pol_names = pol_subs.subreddit.str.replace('\\\\','')\n pol_subs.subreddit=pol_subs.subreddit.str.replace('\\\\','')\n\n pol_neighbours = df[df['source'].isin(pol_names)].sort_values('resid', ascending=False)\n\n top_pol_neigh = pol_neighbours.groupby('source').head(10).sort_values(['source','resid'], ascending=[True,False])\n \n x = top_pol_neigh[~top_pol_neigh.target.isin(pol_names)][['source','target']]\n\n col_dict = pol_subs.set_index('subreddit').col.to_dict()\n for sub in x.target.unique():\n col_dict[sub] = 'gray'\n\n G = nx.from_pandas_edgelist(x)\n nx.set_node_attributes(G, col_dict, 'col')\n\n f = plt.figure(1)\n ax = f.add_subplot(1,1,1)\n\n colors = dict(G.nodes(data='col')).values()\n\n pos = nx.spring_layout(G, k=0.2)\n nx.draw_networkx(G, pos=pos, with_labels=False, node_color=colors, alpha=0.3)\n #nx.draw_networkx_labels(G, pos=pos, with_labels=True)\n\n plt.axis('off')\n f.set_facecolor('w')\n \n f.tight_layout()\n plt.savefig(figures_path(f\"{date}/non_pol_neighbours_graph.png\"))\n plt.close()", "def generate(self):\n self.graph_repl = self.master.graph_repl", "def bclone():\n node = nuke.selectedNodes()\n if len(node)==1:\n clone1 = nuke.createNode(\"NoOp\", inpanel = False)\n clone1.setName(\"Bclone\")\n clone1['label'].setValue(node[0].name()+\"\\nClone_Parent\")\n clone1['tile_color'].setValue(2521651711)\n clone1['note_font_color'].setValue(1583243007)\n clone1xpos = clone1['xpos'].getValue()\n clone1ypos = clone1['ypos'].getValue()\n \n clone2 = nuke.createNode(\"NoOp\", inpanel = False)\n clone2.setName(\"Bclone\")\n clone2['label'].setValue(node[0].name()+\"\\nClone\")\n clone2['hide_input'].setValue(True)\n clone2['tile_color'].setValue(2521651711)\n clone2['note_font_color'].setValue(1583243007)\n clone2['xpos'].setValue(clone1xpos)\n clone2['ypos'].setValue(clone1ypos)\n\n if len(node)==0:\n clone1 = nuke.createNode(\"NoOp\", inpanel = False)\n clone1.setName(\"Bclone\")\n clone1['label'].setValue(\"Clone_Parent\")\n clone1['tile_color'].setValue(2521651711)\n clone1['note_font_color'].setValue(1583243007)\n clone1xpos = clone1['xpos'].getValue()\n clone1ypos = clone1['ypos'].getValue()\n \n clone2 = nuke.createNode(\"NoOp\", inpanel = False)\n clone2.setName(\"Bclone\")\n clone2['label'].setValue(\"Clone\")\n clone2['hide_input'].setValue(True)\n clone2['tile_color'].setValue(2521651711)\n clone2['note_font_color'].setValue(1583243007)\n clone2['xpos'].setValue(clone1xpos)\n clone2['ypos'].setValue(clone1ypos)\n if len(node)!=0 and len(node)!=1:\n nuke.message('Just select one node to clone !')", "def subgraph(self, v):\n\n from pygraph.algorithms.traversal import NodeVisitor, dfs\n\n\n class Visitor(NodeVisitor):\n def __init__(self, result):\n self.result = result\n\n def visit_node(self, dg, v):\n self.result.add_node(v)\n\n def visit_edge(self, dg, v, w):\n self.result.add_edge(v, w)\n\n dg = Digraph()\n visitor = Visitor(dg)\n\n dfs(self, v, visitor)\n return dg", "def test_generator5(self):\n xpb = XPathBuilder()\n xp = None\n base_xp = xpb.base.foo.bar\n base_gen = None\n with base_xp as b:\n base_gen = b\n xp = b().join(xpb.a.b.c[3])\n exp = '/base/foo/bar/a/b/c[3]'\n base_exp = '/base/foo/bar'\n # check tree structure\n self.assertTrue(base_xp._parent is None)\n self.assertTrue(len(base_xp._children[0]._children[0]._children) == 0)\n self.assertTrue(base_gen._parent is None)\n self.assertTrue(len(base_gen._children) == 0)\n # check xpath\n self.assertEqual(xp.tostring(), exp)\n self.assertEqual(base_xp.tostring(), base_exp)\n self.assertEqual(base_gen.tostring(), base_exp)", "def test_generator7(self):\n xpb = XPathBuilder()\n xp1 = xp2 = None\n base_xp = xpb.foo.bar & xpb.x.y\n base_gen = None\n with base_xp as b:\n base_gen = b\n xp1 = b() | xpb.c\n xp2 = b() | xpb.d\n xp1_exp = '/foo/bar and /x/y or /c'\n xp2_exp = '/foo/bar and /x/y or /d'\n base_exp = '/foo/bar and /x/y'\n # check tree structure\n self.assertTrue(base_xp._parent is None)\n self.assertTrue(base_gen._parent is None)\n # check xpath\n self.assertEqual(xp1.tostring(), xp1_exp)\n self.assertEqual(xp2.tostring(), xp2_exp)\n self.assertEqual(base_xp.tostring(), base_exp)\n self.assertEqual(base_gen.tostring(), base_exp)", "def test_removes_empty_subgraph(self):\n ctx = MockWorkflowContext()\n g = TaskDependencyGraph(ctx)\n\n # sg1 is just empty, no tasks inside it\n sg1 = g.subgraph(ctx)\n # sg2 contains only a NOPTask\n sg2 = g.subgraph(ctx)\n sg2.add_task(tasks.NOPLocalWorkflowTask(ctx))\n\n # sg3 contains sg4, which is empty behcause it only contains a NOPTask\n sg3 = g.subgraph(ctx)\n sg4 = g.subgraph(ctx)\n sg4.add_task(tasks.NOPLocalWorkflowTask(ctx))\n sg3.add_task(sg4)\n\n # sg5 is a subgraph that contains a real task! it is not removed\n sg5 = g.subgraph(ctx)\n real_task = tasks.WorkflowTask(ctx)\n sg5.add_task(real_task)\n\n assert set(g.tasks) > {sg1, sg2, sg3, sg4, sg5, real_task}\n g.optimize()\n assert set(g.tasks) == {sg5, real_task}", "def testExtractSpanningTree(self):\n prevNewick1 = NXNewick().writeString(self.mcTree1)\n # Check a dead-simple spanning tree with 3 closely related leaves.\n spanHCB = self.mcTree1.extractSpanningTree([\"HUMAN\", \"CHIMP\", \"BABOON\"])\n # Check that the existing tree hasn't been modified (OK, a bit\n # silly, but just in case).\n self.assertEqual(NXNewick().writeString(self.mcTree1), prevNewick1)\n # Check the actual spanning tree.\n self.assertEqual(NXNewick().writeString(spanHCB), \"((HUMAN:0.006969,CHIMP:0.009727)Anc7:0.025291,BABOON:0.044568)Anc3;\")\n\n # Now test a more complicated tree, where we should remove as\n # many of the ancestors as possible (they will add extra\n # losses for no reason!).\n spanHCC = self.mcTree1.extractSpanningTree([\"HUMAN\", \"CHIMP\", \"CAT\"])\n self.assertEqual(NXNewick().writeString(self.mcTree1), prevNewick1)\n self.assertEqual(NXNewick().writeString(spanHCC), \"((HUMAN:0.006969,CHIMP:0.009727)Anc7:0.158551,CAT:0.197381)Anc0;\")", "def setUp(self):\n self.G = nx.DiGraph()", "def di_subgraph(self, nbunch):\n\t\t\n\t\t# create new subgraph.\n\t\tg = nx.DiGraph()\n\t\t\n\t\t# add edges and nodes.\n\t\tg.add_edges_from( self.edges(nbunch=nbunch) )\n\t\t\n\t\treturn g", "def visualize(self, branch=None, level=0):\n #print('dog')\n if not branch:\n branch = self.tree\n\n branch = self.tree\n self._visualize_helper(branch, level)\n\n for branch in branch.branches:\n branch.visualize(branch, level+1)", "def test_graph1():\n mol_graph = DGLGraph([(0, 1), (0, 2), (1, 2)])\n node_feats = torch.arange(mol_graph.number_of_nodes()).float().reshape(-1, 1)\n edge_feats = torch.arange(2 * mol_graph.number_of_edges()).float().reshape(-1, 2)\n\n complete_graph = get_complete_graph(mol_graph.number_of_nodes())\n atom_pair_feats = torch.arange(complete_graph.number_of_edges()).float().reshape(-1, 1)\n\n return mol_graph, node_feats, edge_feats, complete_graph, atom_pair_feats", "def test_case6(self):\n\n graph = BipartiteGraph()\n\n graph.addEdge(\"supervisor1\",\"student1\")\n graph.removeEdge(\"supervisor1\",\"student1\")\n\n val1 = graph.getStudentDegree(\"student1\")\n val2 = graph.getSupervisorDegree(\"supervisor1\")\n\n self.assertEqual((val1,val2),(0,0))", "def test_xcomarg_set(self, test_dag):\n # Unpack the fixture\n dag, (op1, op2, op3, op4) = test_dag\n # Arrange the operators with a Label in the middle\n op1_arg = XComArg(op1, \"test_key\")\n op1_arg.set_downstream(op2, Label(\"Label 1\"))\n op1.set_downstream([op3, op4])\n # Check that the DAG has the right edge info\n assert dag.get_edge_info(op1.task_id, op2.task_id) == {\"label\": \"Label 1\"}\n assert dag.get_edge_info(op1.task_id, op4.task_id) == {}", "def tree():\n nobv.visual_tree()", "def test_plot_graphs(self):\n\n # Graphs who are not embedded, i.e., have no coordinates.\n COORDS_NO = {\n 'Graph',\n 'BarabasiAlbert',\n 'ErdosRenyi',\n 'FullConnected',\n 'RandomRegular',\n 'StochasticBlockModel',\n }\n\n # Coordinates are not in 2D or 3D.\n COORDS_WRONG_DIM = {'ImgPatches'}\n\n Gs = []\n for classname in set(graphs.__all__) - COORDS_NO - COORDS_WRONG_DIM:\n Graph = getattr(graphs, classname)\n\n # Classes who require parameters.\n if classname == 'NNGraph':\n Xin = np.arange(90).reshape(30, 3)\n Gs.append(Graph(Xin))\n elif classname in ['ImgPatches', 'Grid2dImgPatches']:\n Gs.append(Graph(img=self._img, patch_shape=(3, 3)))\n elif classname == 'LineGraph':\n Gs.append(Graph(graphs.Sensor(20, seed=42)))\n else:\n Gs.append(Graph())\n\n # Add more test cases.\n if classname == 'TwoMoons':\n Gs.append(Graph(moontype='standard'))\n Gs.append(Graph(moontype='synthesized'))\n elif classname == 'Cube':\n Gs.append(Graph(nb_dim=2))\n Gs.append(Graph(nb_dim=3))\n elif classname == 'DavidSensorNet':\n Gs.append(Graph(N=64))\n Gs.append(Graph(N=500))\n Gs.append(Graph(N=128))\n\n for G in Gs:\n self.assertTrue(hasattr(G, 'coords'))\n self.assertEqual(G.N, G.coords.shape[0])\n\n signal = np.arange(G.N) + 0.3\n\n G.plot(backend='pyqtgraph')\n G.plot(backend='matplotlib')\n G.plot(signal, backend='pyqtgraph')\n G.plot(signal, backend='matplotlib')\n plotting.close_all()", "def test_dag_preserves_superrep(dimension, conversion):\n qobj = conversion(rand_super_bcsz(dimension))\n assert qobj.superrep == qobj.dag().superrep", "def plot_graph(self) -> None:", "def subgraph(self, nodes, relabel_nodes=False, output_device=None):\n raise NotImplementedError(\"subgraph is not implemented yet\")", "def test_restore_multiple_in_subgraph(self):\n subgraph = self._subgraph()\n subgraph['id'] = 15\n task1 = self._remote_task()\n task1['id'] = 1\n task2 = self._remote_task()\n task2['id'] = 2\n task1['parameters']['containing_subgraph'] = 15\n task2['parameters']['containing_subgraph'] = 15\n\n graph = self._restore_graph([subgraph, task1, task2])\n assert len(graph.tasks) == 3\n subgraphs = [op for op in graph.tasks if op.is_subgraph]\n remote_tasks = [op for op in graph.tasks if not op.is_subgraph]\n\n # those are all references to the same subgraph, the subgraph was\n # NOT restored multiple times\n assert remote_tasks[0].containing_subgraph \\\n is remote_tasks[1].containing_subgraph \\\n is subgraphs[0]\n\n assert len(subgraphs[0].tasks) == 2", "def update_edges(subgraph, graph_name, bb):\n top_subgraph = get_top_parent(subgraph, graph_name)\n edges = extract_edges(top_subgraph)\n for edge in edges:\n if(edge.get_style() is not None):\n style = edge.get_style()\n if(edge.get_color() is not None):\n color = edge.get_color()\n if(edge.get_label() is not None):\n label = edge.get_label()\n node_head = edge.get_source()\n node_tail = edge.get_destination()\n bb_head = get_bb(node_head)\n bb_tail = get_bb(node_tail)\n if(bb_head >= bb or bb_tail > bb):\n top_subgraph.del_edge(node_head, node_tail, 0)\n if bb_head >= bb:\n if bb_tail > bb:\n add_edge(top_subgraph, update_edge_node_name(node_head, bb_head), update_edge_node_name\n (node_tail, bb_tail), style=style, color=color, label=label)\n else:\n add_edge(top_subgraph, update_edge_node_name(node_head, bb_head), node_tail, style=style, \n color=color, label=label)\n else:\n add_edge(top_subgraph, node_head, update_edge_node_name(node_tail, bb_tail), \n style=style, color=color, label=label)\n \n #si bb_n < bb et bb_s <= bb on touche pas\n #sinon\n # si bb_n >= bb:\n # si bb_s >= bb:\n # creer edge (n+1, s+1)\n # sinon:\n # creer edge (n+1, s)\n # sinon:\n # si bb_s > bb:\n # creer edge (n, s+1)" ]
[ "0.75176567", "0.62960446", "0.60611916", "0.6042744", "0.6038377", "0.5965643", "0.5938931", "0.58985436", "0.587293", "0.5843227", "0.58157665", "0.5796282", "0.5735608", "0.5665891", "0.5604234", "0.5579183", "0.5560783", "0.5559308", "0.5536412", "0.5530756", "0.5525885", "0.548392", "0.5461244", "0.545128", "0.5449947", "0.5442631", "0.542125", "0.54199004", "0.5406626", "0.54005134", "0.53776157", "0.5341316", "0.53378004", "0.53361857", "0.53324676", "0.5327177", "0.5319751", "0.53178716", "0.53124017", "0.530629", "0.53003323", "0.52878445", "0.52822715", "0.52784204", "0.52777195", "0.5276823", "0.5274939", "0.52494884", "0.5248355", "0.5245921", "0.52390796", "0.5235919", "0.52331007", "0.52292764", "0.5226305", "0.5225457", "0.5217939", "0.52007425", "0.52007425", "0.5194335", "0.5191487", "0.51865816", "0.5181808", "0.51718736", "0.51671666", "0.5166562", "0.5164996", "0.5164405", "0.5158946", "0.514659", "0.512748", "0.5127456", "0.5125569", "0.5118922", "0.51178974", "0.5114518", "0.5111858", "0.51114285", "0.5105787", "0.5101167", "0.50931644", "0.5083224", "0.50763416", "0.5074007", "0.5073463", "0.50719976", "0.50641453", "0.5062677", "0.5047206", "0.504715", "0.50459135", "0.5045248", "0.5044492", "0.5042785", "0.50384533", "0.5029401", "0.5027554", "0.50246865", "0.50244147", "0.5022018" ]
0.8505168
0
Test the popxl replication example
def test_documentation_popxl_replication(self): filename = "replication.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_documentation_popxl_mnist_replication_train(self):\n filename = \"mnist_rts.py --replication-factor 2\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_replicate_pg_to_pg(self):\n # TODO - Real and more complex e2e tests will be added here\n assert True", "def test_replicate_mariadb_to_pg(self):\n # TODO - Real and more complex e2e tests will be added here\n assert True", "def test_11_clone(self):\n # Test basic operation of cloning repo which contains one\n # publisher to repo which contains same publisher\n self.pkgrecv(self.durl1, \"--clone -d {0}\".format(self.dpath2))\n\n ret = subprocess.call([\"/usr/bin/gdiff\", \"-Naur\", \"-x\", \n \"index\", \"-x\", \"trans\", self.dpath1, self.dpath2])\n self.assertTrue(ret==0)\n\n # Test that packages in dst which are not in src get removed.\n self.pkgsend_bulk(self.durl2, (self.amber30))\n self.pkgrecv(self.durl1, \"--clone -d {0}\".format(self.dpath2))\n ret = subprocess.call([\"/usr/bin/gdiff\", \"-Naur\", \"-x\", \n \"index\", \"-x\", \"trans\", self.dpath1, self.dpath2])\n self.assertTrue(ret==0)\n\n # Test that clone reports publishers not in the dest repo.\n amber = self.amber10.replace(\"open \", \"open pkg://test2/\")\n self.pkgsend_bulk(self.durl1, amber)\n self.pkgrecv(self.durl1, \"--clone -d {0}\".format(self.dpath2), exit=1)\n\n # Test that clone adds new publishers if requested.\n amber = self.amber10.replace(\"open \", \"open pkg://test2/\")\n self.pkgsend_bulk(self.durl1, amber)\n self.pkgrecv(self.durl1, \"--clone -d {0} -p test2\".format(self.dpath2))\n ret = subprocess.call([\"/usr/bin/gdiff\", \"-Naur\", \"-x\", \n \"index\", \"-x\", \"trans\", self.dpath1,\n self.dpath2])\n self.assertTrue(ret==0)\n\n # Test that clone removes all packages if source is empty\n self.pkgrecv(self.durl3, \"--clone -d {0}\".format(self.dpath2))\n self.pkgrepo(\"-s {0} list -H -p test2\".format(self.dpath2))\n self.assertEqualDiff(\"\", self.output)\n\n # Test that clone works fine with mulitple publishers\n amber = self.amber10.replace(\"open \", \"open pkg://test2/\")\n self.pkgsend_bulk(self.durl1, amber)\n\n path = os.path.join(self.dpath2, \"publisher/test1\")\n shutil.rmtree(path)\n path = os.path.join(self.dpath2, \"publisher/test2\")\n shutil.rmtree(path)\n self.pkgrecv(self.durl1, \"--clone -d {0} -p test2 -p test1\".format(\n self.dpath2))\n ret = subprocess.call([\"/usr/bin/gdiff\", \"-Naur\", \"-x\",\n \"index\", \"-x\", \"trans\", self.dpath1, self.dpath2])\n self.assertTrue(ret==0)\n\n # Test that clone fails if --raw is specified.\n self.pkgrecv(self.durl1, \"--raw --clone -d {0} -p test2\".format(\n self.dpath2), exit=2)\n\n # Test that clone fails if -c is specified.\n self.pkgrecv(self.durl1, \"-c /tmp/ --clone -d {0} -p test2\".format(\n self.dpath2), exit=2)\n\n # Test that clone fails if -a is specified.\n self.pkgrecv(self.durl1, \"-a --clone -d {0} -p test2\".format(\n self.dpath2), exit=2)\n\n # Test that clone fails if --newest is specified.\n self.pkgrecv(self.durl1, \"--newest --clone -d {0} -p test2\".format(\n self.dpath2), exit=2)", "def test_documentation_popxl_mnist_rts_train_test(self):\n filename = \"mnist_rts.py --replication-factor 2 --rts --test\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_documentation_popxl_addition(self):\n filename = \"simple_addition.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_documentation_popxl_mnist_rts_train(self):\n filename = \"mnist_rts.py --replication-factor 2 --rts\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_clone_deployment(self):\n pass", "def test_backup_restore_with_xdcr(self):\n rest_src = RestConnection(self.backupset.cluster_host)\n rest_dest = RestConnection(self.servers[1])\n\n try:\n rest_src.remove_all_replications()\n rest_src.remove_all_remote_clusters()\n kwargs = {}\n if self.input.param(\"enforce_tls\", False):\n kwargs[\"demandEncryption\"] = 1\n trusted_ca = rest_dest.get_trusted_CAs()[-1][\"pem\"]\n kwargs[\"certificate\"] = trusted_ca\n rest_src.add_remote_cluster(self.servers[1].ip, self.servers[1].port, self.backupset.cluster_host_username,\n self.backupset.cluster_host_password, \"C2\", **kwargs)\n rest_dest.create_bucket(bucket='default', ramQuotaMB=512)\n self.sleep(10)\n repl_id = rest_src.start_replication('continuous', 'default', \"C2\")\n if repl_id is not None:\n self.log.info(\"Replication created successfully\")\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n tasks = self._async_load_all_buckets(self.master, gen, \"create\", 0)\n\n reps = rest_src.get_replications()\n start_time = datetime.datetime.now()\n while reps[0][\"status\"] != \"running\" or reps[0][\"changesLeft\"] > 0:\n if (datetime.datetime.now() - start_time).total_seconds() > 600:\n self.fail(\"Timed out waiting for replications\")\n self.sleep(10, \"Waiting for replication...\")\n reps = rest_src.get_replications()\n self.backup_create()\n self.backup_cluster_validate()\n self.backup_restore_validate(compare_uuid=False, seqno_compare_function=\"<=\")\n for task in tasks:\n task.result()\n finally:\n rest_dest.delete_bucket()", "def test_clone_scenario(self):\n pass", "def test_documentation_popxl_repeat_1(self):\n filename = \"repeat_graph_1.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_documentation_popxl_in_sequence(self):\n filename = \"in_sequence.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_clone_system(self):\n pass", "def test_documentation_popxl_repeat_0(self):\n filename = \"repeat_graph_0.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_documentation_popxl_mnist(self):\n filename = \"mnist.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_check_replication_ok(self, mock_timestamp):\n base_url = 'http://localhost:6000/recon/'\n jdata = b'{\"replication_last\": 1493299546.629282, ' \\\n b'\"replication_stats\": {\"no_change\": 0, \"rsync\": 0, ' \\\n b'\"success\": 0, \"failure\": 0, \"attempted\": 0, \"ts_repl\": 0, ' \\\n b'\"remove\": 0, \"remote_merge\": 0, \"diff_capped\": 0, ' \\\n b'\"start\": 1493299546.621624, \"hashmatch\": 0, \"diff\": 0, ' \\\n b'\"empty\": 0}, \"replication_time\": 0.0076580047607421875}'\n pmock_jdata = PropertyMock(return_value=jdata)\n mock_timestamp.return_value = (MagicMock(days=0, seconds=0), 0)\n with patch('urllib.request.urlopen') as mock_urlopen:\n mock_urlopen.return_value = MagicMock(read=pmock_jdata)\n result = check_replication(base_url, [4, 10, 4, 10])\n self.assertEqual(result, [(STATUS_OK, 'OK')])", "def test_relic():\n mongo_db = pymongo.MongoClient()\n init_db(mongo_db.roguesim_python)\n populate_db(mongo_db.roguesim_python)", "def test_documentation_popxl_repeat_2(self):\n filename = \"repeat_graph_2.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_redis_increase_replica_count_usual_case():", "def test_clone_repository(m_check):\n m_check.return_value = 0\n assert clone_repository(\"test\", \"test\", \"test\") == 0", "def replication(self, replication):\n self._replication = replication", "def test_endpointPOP3(self):\n self._endpointTest(\"pop3\")", "def test_pop3(self):\n self._endpointServerTest(\"pop3\", protocols.POP3Factory)", "def test_connection_duplication():", "def test_ipcrm():\n IPCComm.ipcrm()", "def test_multihop_intermediate_replica_lifecycle(vo, did_factory, root_account, core_config_mock, caches_mock, metrics_mock):\n src_rse1_name = 'XRD1'\n src_rse1_id = rse_core.get_rse_id(rse=src_rse1_name, vo=vo)\n src_rse2_name = 'XRD2'\n src_rse2_id = rse_core.get_rse_id(rse=src_rse2_name, vo=vo)\n jump_rse_name = 'XRD3'\n jump_rse_id = rse_core.get_rse_id(rse=jump_rse_name, vo=vo)\n dst_rse_name = 'XRD4'\n dst_rse_id = rse_core.get_rse_id(rse=dst_rse_name, vo=vo)\n\n all_rses = [src_rse1_id, src_rse2_id, jump_rse_id, dst_rse_id]\n did = did_factory.upload_test_file(src_rse1_name)\n\n # Copy replica to a second source. To avoid the special case of having a unique last replica, which could be handled in a special (more careful) way\n rule_core.add_rule(dids=[did], account=root_account, copies=1, rse_expression=src_rse2_name, grouping='ALL', weight=None, lifetime=3600, locked=False, subscription_id=None)\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], partition_wait_time=0, transfertype='single', filter_transfertool=None)\n replica = __wait_for_replica_transfer(dst_rse_id=src_rse2_id, **did)\n assert replica['state'] == ReplicaState.AVAILABLE\n\n rse_core.set_rse_limits(rse_id=jump_rse_id, name='MinFreeSpace', value=1)\n rse_core.set_rse_usage(rse_id=jump_rse_id, source='storage', used=1, free=0)\n try:\n rule_core.add_rule(dids=[did], account=root_account, copies=1, rse_expression=dst_rse_name, grouping='ALL', weight=None, lifetime=3600, locked=False, subscription_id=None)\n\n # Submit transfers to FTS\n # Ensure a replica was created on the intermediary host with epoch tombstone\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], partition_wait_time=0, transfertype='single', filter_transfertool=None)\n request = request_core.get_request_by_did(rse_id=jump_rse_id, **did)\n assert request['state'] == RequestState.SUBMITTED\n replica = replica_core.get_replica(rse_id=jump_rse_id, **did)\n assert replica['tombstone'] == datetime(year=1970, month=1, day=1)\n assert replica['state'] == ReplicaState.COPYING\n\n request = request_core.get_request_by_did(rse_id=dst_rse_id, **did)\n # Fake an existing unused source with raking of 0 for the second source.\n # The ranking of this source should remain at 0 till the end.\n\n @transactional_session\n def __fake_source_ranking(*, session=None):\n models.Source(request_id=request['id'],\n scope=request['scope'],\n name=request['name'],\n rse_id=src_rse2_id,\n dest_rse_id=request['dest_rse_id'],\n ranking=0,\n bytes=request['bytes'],\n url=None,\n is_using=False). \\\n save(session=session, flush=False)\n\n __fake_source_ranking()\n\n # The intermediate replica is protected by its state (Copying)\n rucio.daemons.reaper.reaper.REGION.invalidate()\n reaper(once=True, rses=[], include_rses=jump_rse_name, exclude_rses=None)\n replica = replica_core.get_replica(rse_id=jump_rse_id, **did)\n assert replica['state'] == ReplicaState.COPYING\n\n # Wait for the intermediate replica to become ready\n replica = __wait_for_replica_transfer(dst_rse_id=jump_rse_id, **did)\n assert replica['state'] == ReplicaState.AVAILABLE\n\n # ensure tha the ranking was correct for all sources and intermediate rses\n assert __get_source(request_id=request['id'], src_rse_id=src_rse1_id, **did).ranking == 0\n assert __get_source(request_id=request['id'], src_rse_id=jump_rse_id, **did).ranking == 0\n assert __get_source(request_id=request['id'], src_rse_id=src_rse2_id, **did).ranking == 0\n # Only group_bulk=1 part of the path was submitted.\n # run submitter again to copy from jump rse to destination rse\n submitter(once=True, rses=[{'id': rse_id} for rse_id in all_rses], partition_wait_time=0, transfertype='single', filter_transfertool=None)\n\n # Wait for the destination replica to become ready\n replica = __wait_for_replica_transfer(dst_rse_id=dst_rse_id, **did)\n assert replica['state'] == ReplicaState.AVAILABLE\n\n rucio.daemons.reaper.reaper.REGION.invalidate()\n reaper(once=True, rses=[], include_rses='test_container_xrd=True', exclude_rses=None)\n\n with pytest.raises(ReplicaNotFound):\n replica_core.get_replica(rse_id=jump_rse_id, **did)\n\n # 3 request: copy to second source + 2 hops (each separately)\n # Use inequalities, because there can be left-overs from other tests\n assert metrics_mock.get_sample_value('rucio_daemons_conveyor_poller_update_request_state_total', labels={'updated': 'True'}) >= 3\n assert metrics_mock.get_sample_value('rucio_daemons_conveyor_common_submit_transfer_total') >= 3\n # at least the failed hop\n assert metrics_mock.get_sample_value('rucio_daemons_conveyor_finisher_handle_requests_total') > 0\n finally:\n\n @transactional_session\n def _cleanup_all_usage_and_limits(rse_id, *, session=None):\n session.query(models.RSELimit).filter_by(rse_id=rse_id).delete()\n session.query(models.RSEUsage).filter_by(rse_id=rse_id, source='storage').delete()\n\n _cleanup_all_usage_and_limits(rse_id=jump_rse_id)", "def test_documentation_popxl_autodiff(self):\n filename = \"autodiff.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def testExecute(self):\n client = ICSClientFactory(self.config, 'slave', 'master') \n\n #Test reading and writing coils\n reply = client.execute(cst.READ_COILS, 10, 2)\n assert reply[0] == False \n assert reply[1] == False\n reply = client.execute(cst.WRITE_SINGLE_COIL, 10, 1, output_value=1)\n reply = client.execute(cst.READ_COILS, 10, 2)\n assert reply[0] == True\n assert reply[1] == False\n reply = client.execute(cst.WRITE_MULTIPLE_COILS, 10, 2,\n output_value=[0,1])\n reply = client.execute(cst.READ_COILS, 10, 2)\n assert reply[1] == True\n assert reply[0] == False\n \n #Test reading and writing input regs\n #reply = client.execute(cst.READ_INPUT_REGISTERS, 30002, 1)\n #print \"BRDEBUG: Reply: \", reply\n #assert reply[0] == 17\n\n #Test reading and setting holding regs\n reply = client.execute(cst.READ_HOLDING_REGISTERS, 40003, 2)\n #print \"BRDEBUG: Reply\", reply\n assert reply[0] == 16752 \n assert reply[1] == 0 \n reply = client.execute(cst.WRITE_SINGLE_REGISTER, 40003, 1,\n output_value = 10)\n reply = client.execute(cst.READ_HOLDING_REGISTERS, 40003, 1)\n #print \"BRDEBUG: Reply\", reply\n assert reply[0] == 10", "def empty_test_case():\n # Mirror server\n empty_test_path = os.path.dirname(os.path.realpath(__file__)) + \"/empty.rpl\"\n test_config = {'ROOT_ADDR': '127.0.0.10',\n '_SOCKET_FAMILY': socket.AF_INET}\n return scenario.parse_file(empty_test_path)[0], test_config", "def test_documentation_popxl_remote_var(self):\n filename = \"remote_variable.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_backup_restore_with_ops(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n initial_gen = copy.deepcopy(gen)\n initial_keys = []\n for x in initial_gen:\n initial_keys.append(x[0])\n self.log.info(\"Start to load items to all buckets\")\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.ops_type = self.input.param(\"ops-type\", \"update\")\n self.log.info(\"Create backup repo \")\n self.backup_create()\n for i in range(1, self.backupset.number_of_backups + 1):\n self._backup_restore_with_ops()\n start = randrange(1, self.backupset.number_of_backups + 1)\n if start == self.backupset.number_of_backups:\n end = start\n else:\n end = randrange(start, self.backupset.number_of_backups + 1)\n\n if self.compact_backup and self.ops_type == \"delete\":\n self.log.info(\"Start to compact backup \")\n self.backup_compact_validate()\n self.log.info(\"Validate deleted keys\")\n self.backup_compact_deleted_keys_validation(initial_keys)\n\n self.log.info(\"start restore cluster \")\n restored = {\"{0}/{1}\".format(start, end): \"\"}\n for i in range(1, self.backupset.number_of_backups + 1):\n self.backupset.start = start\n self.backupset.end = end\n self._backup_restore_with_ops(backup=False, compare_function=\">=\")\n if self.backupset.number_of_backups == 1:\n continue\n while \"{0}/{1}\".format(start, end) in restored:\n start = randrange(1, self.backupset.number_of_backups + 1)\n if start == self.backupset.number_of_backups:\n end = start\n else:\n end = randrange(start, self.backupset.number_of_backups + 1)\n restored[\"{0}/{1}\".format(start, end)] = \"\"", "def replicate(self, source, target, **params):\n replicator = cloudant.replicator.Replication(self.cloudant_client)\n source_db = Database(self.cloudant_client, source)\n target_db = Database(self.cloudant_client, target)\n return replicator.create_replication(source_db, target_db, **params)", "def test_package(response):\n \n # from comeon_core import update\n ip = getIP()\n print(ip)\n db = connect()\n engine = db.connect() \n init_db(engine)\n update(engine)\n assert True", "def connect_to_master():", "def test_basic_partial_duplicate(self):\n args = self.get_args()\n config = self.create_config_file()\n self.write_config_file(config, args)\n execute_tool(args, test_mode=True)\n\n for i in range(0, 7):\n mac = '00:11:22:33:33:3' + str(i)\n ip = '3.4.3.' + str(i)\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-remote',\n 'l3out', 'intersite-testsuite-app-epg'))\n self.add_remote_duplicate_entry(ip)\n\n time.sleep(2)\n\n for i in range(4, 9):\n mac = '00:11:22:33:33:3' + str(i)\n ip = '3.4.3.' + str(i)\n self.add_endpoint(mac, ip, 'intersite-testsuite-local', 'app', 'epg')\n\n time.sleep(2)\n for i in range(4, 9):\n mac = '00:11:22:33:33:3' + str(i)\n ip = '3.4.3.' + str(i)\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-remote',\n 'l3out', 'intersite-testsuite-app-epg'))", "def test_check_replication_unknown_valueerror2(self, mock_urlopen):\n base_url = 'http://localhost:6000/recon/'\n jdata = PropertyMock(return_value=b'X')\n mock_urlopen.return_value = MagicMock(read=jdata)\n result = check_replication(base_url, [4, 10, 4, 10])\n self.assertEqual(result,\n 3*[(STATUS_UNKNOWN,\n \"Can't parse status data\")])", "def testExecute(self):\n client = ICSClientFactory(self.config, 'slave', 'master') \n\n #Test reading and writing coils\n reply = client.execute(cst.READ_COILS, 10, 2)\n assert reply[0] == False \n assert reply[1] == False\n reply = client.execute(cst.WRITE_SINGLE_COIL, 10, 1, output_value=1)\n reply = client.execute(cst.READ_COILS, 10, 2)\n assert reply[0] == True\n assert reply[1] == False\n reply = client.execute(cst.WRITE_MULTIPLE_COILS, 10, 2,\n output_value=[0,1])\n reply = client.execute(cst.READ_COILS, 10, 2)\n assert reply[1] == True\n assert reply[0] == False\n \n #Test reading and writing input regs\n #reply = client.execute(cst.READ_INPUT_REGISTERS, 30002, 1)\n #print \"BRDEBUG: Reply: \", reply\n #assert reply[0] == 17\n\n #Test reading and setting holding regs\n reply = client.execute(cst.READ_HOLDING_REGISTERS, 40003, 1)\n assert reply[0] == 15.0\n reply = client.execute(cst.WRITE_SINGLE_REGISTER, 40003, 1,\n output_value = 10)\n reply = client.execute(cst.READ_HOLDING_REGISTERS, 40003, 1)\n assert reply[0] == 10.0", "def test_raises(self):\n no_replicates = 25\n try:\n replicate(experiment3, no_replicates)\n except RuntimeError as err:\n self.assertEqual(err, FAKE_ERROR)\n else:\n assert False", "def pure_replicate(self):\n\n from os import mkdir, chdir\n from shutil import copy\n from subprocess import call\n\n # make a folder to store the liquid box move into it and run the insert molecule command.\n mkdir('pure_liquid')\n copy(self.pdb, 'pure_liquid')\n chdir('pure_liquid')\n # run the command into a log file as we need to check the right number of molecule were put in the box.\n with open('log.txt', 'w+')as log:\n call(f'gmx insert-molecules -ci {self.pdb} -box {self.box_length} {self.box_length} {self.box_length} -nmol {self.relicas} -o box.pdb', shell=True, stdout=log)\n\n for line in log:\n if 'Added' in line and 'molecules' in line:\n added = int(line.split()[1])\n if added == self.relicas:\n solvated = True\n break\n else:\n solvated = False\n chdir('../')\n\n # return the success of the function\n # if it fails remove the folder increase the length and call it again till it returns true.\n return solvated", "def tenpar_subset_test():\n model_d = \"ies_10par_xsec\"\n test_d = os.path.join(model_d, \"master_subset_test\")\n template_d = os.path.join(model_d, \"test_template\")\n if not os.path.exists(template_d):\n raise Exception(\"template_d {0} not found\".format(template_d))\n if os.path.exists(test_d):\n shutil.rmtree(test_d)\n # shutil.copytree(base_d,test_d)\n pst = pyemu.Pst(os.path.join(template_d, \"pest.pst\"))\n pst.control_data.noptmax = 3\n\n # first without subset\n pst.pestpp_options = {}\n pst.pestpp_options[\"ies_num_reals\"] = 50\n pst.pestpp_options[\"ies_lambda_mults\"] = \"1.0\"\n pst.pestpp_options[\"ies_accept_phi_fac\"] = 100.0\n pst.write(os.path.join(template_d, \"pest.pst\"))\n pyemu.helpers.start_slaves(template_d, exe_path, \"pest.pst\", num_slaves=10,\n slave_root=model_d, master_dir=test_d)\n df_base = pd.read_csv(os.path.join(test_d, \"pest.phi.meas.csv\"),index_col=0)\n\n pst.pestpp_options = {}\n pst.pestpp_options[\"ies_num_reals\"] = 50\n pst.pestpp_options[\"ies_lambda_mults\"] = \"1.0\"\n pst.pestpp_options[\"ies_subset_size\"] = 15\n pst.pestpp_options[\"ies_accept_phi_fac\"] = 100.0\n\n pst.write(os.path.join(template_d, \"pest.pst\"))\n pyemu.helpers.start_slaves(template_d, exe_path, \"pest.pst\", num_slaves=10,\n slave_root=model_d, master_dir=test_d)\n df_sub = pd.read_csv(os.path.join(test_d, \"pest.phi.meas.csv\"),index_col=0)\n diff = (df_sub - df_base).apply(np.abs)\n print(diff.max())\n print(df_sub.iloc[-1,:])\n print(df_base.iloc[-1,:])\n assert diff.max().max() == 0.0", "def test1 (self, testStore, dbh, dbn, dbu, dbp):", "def test_cherrypick_simple(self):\n import evoware.fileutil as F\n \n options = {'i': F.testRoot('targetlist_PCR.xls'), \n 'src': [F.testRoot('primers.xls'), \n F.testRoot('partslist_simple.xls')],\n 'o': 'cherrypicking_simple.gwl',\n 'p': self.f_project,\n 'columns' : ['primer1', 'primer2', 'template']\n }\n self.generictest(options)", "def test_7_recv_multipublisher(self):\n\n # Setup a repository with packages from multiple publishers.\n amber = self.amber10.replace(\"open \", \"open //test2/\")\n self.pkgsend_bulk(self.durl3, amber)\n self.pkgrecv(self.durl1, \"-d {0} amber@1.0 bronze@1.0\".format(\n self.durl3))\n\n # Now attempt to receive from a repository with packages from\n # multiple publishers and verify entry exists only for test1.\n self.pkgrecv(self.durl3, \"-d {0} bronze\".format(self.durl4))\n self.pkgrecv(self.durl3, \"--newest\")\n self.assertNotEqual(self.output.find(\"test1/bronze\"), -1)\n self.assertEqual(self.output.find(\"test2/bronze\"), -1)\n\n # Now retrieve amber, and verify entries exist for both pubs.\n self.wait_repo(self.dcs[4].get_repodir())\n self.wait_repo(self.dcs[3].get_repodir())\n self.pkgrecv(self.durl3, \"-d {0} amber\".format(self.durl4))\n self.pkgrecv(self.durl4, \"--newest\")\n self.assertNotEqual(self.output.find(\"test1/amber\"), -1)\n self.assertNotEqual(self.output.find(\"test2/amber\"), -1)\n\n # Verify attempting to retrieve a non-existent package fails\n # for a multi-publisher repository.\n self.pkgrecv(self.durl3, \"-d {0} nosuchpackage\".format(self.durl4),\n exit=1)", "def test_clone(self):\n mock_query = MagicMock(return_value=\"\")\n with patch(\n \"salt.cloud.clouds.proxmox._get_properties\", MagicMock(return_value=[])\n ), patch(\"salt.cloud.clouds.proxmox.query\", mock_query):\n vm_ = {\n \"technology\": \"qemu\",\n \"name\": \"new2\",\n \"host\": \"myhost\",\n \"clone\": True,\n \"clone_from\": 123,\n }\n\n # CASE 1: Numeric ID\n result = proxmox.create_node(vm_, ANY)\n mock_query.assert_called_once_with(\n \"post\",\n \"nodes/myhost/qemu/123/clone\",\n {\"newid\": ANY},\n )\n assert result == {}\n\n # CASE 2: host:ID notation\n mock_query.reset_mock()\n vm_[\"clone_from\"] = \"otherhost:123\"\n result = proxmox.create_node(vm_, ANY)\n mock_query.assert_called_once_with(\n \"post\",\n \"nodes/otherhost/qemu/123/clone\",\n {\"newid\": ANY},\n )\n assert result == {}", "def test_documentation_popxl_adv_get_write(self):\n filename = \"tensor_get_write_adv.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_postfix(self):\n test_sensordef = {\n \"kind\": self.test_postfix.get_kind(),\n \"name\": \"Postfix Mailqueue\",\n \"description\": \"Monitors the mailqueue of a postfix server\",\n \"help\": \"Monitors the mailqueue of a postfix server for active, deferred, hold or corrupt mail\",\n \"tag\": \"mppostfixsensor\",\n \"fields\": [],\n \"groups\": []\n }\n assert_equal(self.test_postfix.get_sensordef(), test_sensordef)", "def test_return_to_assigned_master(\n mm_failover_master_1_salt_cli,\n mm_failover_master_2_salt_cli,\n salt_mm_failover_minion_1,\n salt_mm_failover_minion_2,\n run_salt_cmds,\n):\n returns = run_salt_cmds(\n [mm_failover_master_1_salt_cli, mm_failover_master_2_salt_cli],\n [salt_mm_failover_minion_1, salt_mm_failover_minion_2],\n )\n\n assert len(returns) == 2\n assert (mm_failover_master_1_salt_cli, salt_mm_failover_minion_1) in returns\n assert (mm_failover_master_2_salt_cli, salt_mm_failover_minion_2) in returns", "def setUp(self):\n cherrypy.request.user = \"test\"\n self.validRSEs = ['rse1', 'rse2']\n msConfig = {'reqmgr2Url': 'http://localhost',\n 'rucioAccount': 'wmcore_mspileup',\n 'rucioUrl': 'http://cms-rucio-int.cern.ch',\n 'rucioAuthUrl': 'https://cms-rucio-auth-int.cern.ch',\n 'mongoDB': 'msPileupDB',\n 'mongoDBCollection': 'msPileupDBCollection',\n 'mongoDBServer': 'mongodb://localhost',\n 'mongoDBReplicaSet': '',\n 'mongoDBUser': None,\n 'mongoDBPassword': None,\n 'validRSEs': self.validRSEs,\n 'mockMongoDB': True}\n self.mgr = MSPileup(msConfig)\n\n self.pname = '/lksjdflksdjf/kljsdklfjsldfj/PREMIX'\n expectedRSEs = self.validRSEs\n fullReplicas = 0\n campaigns = ['c1', 'c2']\n data = {\n 'pileupName': self.pname,\n 'pileupType': 'classic',\n 'expectedRSEs': expectedRSEs,\n 'currentRSEs': expectedRSEs,\n 'fullReplicas': fullReplicas,\n 'campaigns': campaigns,\n 'containerFraction': 0.0,\n 'replicationGrouping': \"ALL\",\n 'active': True,\n 'pileupSize': 0,\n 'ruleIds': []}\n\n obj = MSPileupObj(data, validRSEs=self.validRSEs)\n for key in ['insertTime', 'lastUpdateTime', 'activatedOn', 'deactivatedOn']:\n self.assertNotEqual(obj.data[key], 0)\n self.assertEqual(obj.data['expectedRSEs'], expectedRSEs)\n self.assertEqual(obj.data['fullReplicas'], fullReplicas)\n self.assertEqual(obj.data['campaigns'], campaigns)\n self.doc = obj.getPileupData()", "def test_basic_partial_duplicate(self):\n args = self.get_args()\n config = self.create_config_file()\n self.write_config_file(config, args)\n execute_tool(args, test_mode=True)\n\n for i in range(0, 7):\n mac = '00:11:22:33:33:3' + str(i)\n ip = '3.4.3.' + str(i)\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-remote',\n 'l3out1', 'intersite-testsuite-app-epg'))\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-remote',\n 'l3out2', 'intersite-testsuite-app-epg'))\n self.add_remote_duplicate_entry(ip)\n\n time.sleep(2)\n\n for i in range(4, 9):\n mac = '00:11:22:33:33:3' + str(i)\n ip = '3.4.3.' + str(i)\n self.add_endpoint(mac, ip, 'intersite-testsuite-local', 'app', 'epg')\n\n time.sleep(2)\n for i in range(4, 9):\n mac = '00:11:22:33:33:3' + str(i)\n ip = '3.4.3.' + str(i)\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-remote',\n 'l3out1', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-remote',\n 'l3out2', 'intersite-testsuite-app-epg'))", "def run(prefix):\n # run_tests.assert_folder_is_empty(prefix=prefix)\n xrs_good,xrs_poor,f_obs,r_free_flags = run_tests.setup_helix_example()\n # pdb_inp = os.path.join(qr_unit_tests,\"data_files\",\"2lvr.pdb\")\n r = run_tests.run_cmd(prefix,\n args = [\"restraints=cctbx\",\"mode=gtest\",\"g_scan=20\",\"g_mode=1\"],\n pdb_name = 'm00_poor.pdb', mtz_name='')\n assert os.path.isfile('1-20.npy')", "def runTest(self):\n self.setUp()\n self.test_JupyterNotebooks1()", "def test_basic_duplicate(self):\n args = self.get_args()\n config = self.create_config_file()\n self.write_config_file(config, args)\n execute_tool(args, test_mode=True)\n\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-remote', 'l3out', 'intersite-testsuite-app-epg'))\n self.add_remote_duplicate_entry(ip)\n\n time.sleep(2)\n self.add_endpoint(mac, ip, 'intersite-testsuite-local', 'app', 'epg')\n\n time.sleep(2)\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-remote', 'l3out', 'intersite-testsuite-app-epg'))", "def test_initialization_of_TCRsubset_alpha_beta_case_plus_motif_finding():\n import pytest\n import pandas as pd\n from tcrregex.subset import TCRsubset\n from tcrregex.tests.my_test_subset import dist_a_subset, dist_b_subset, clone_df_subset \n from tcrregex.cdr3_motif import TCRMotif\n\n assert isinstance(dist_a_subset, pd.DataFrame)\n assert isinstance(dist_b_subset, pd.DataFrame)\n assert isinstance(clone_df_subset, pd.DataFrame)\n df = clone_df_subset.iloc[0:20, :].copy()\n db = dist_b_subset.iloc[0:20, 0:20]\n da = dist_a_subset.iloc[0:20, 0:20]\n ts=TCRsubset(clone_df = df, \n organism = \"mouse\",\n epitopes = [\"PA\"] ,\n epitope = \"PA\",\n chains = [\"A\",\"B\"],\n dist_a = da,\n dist_b = db)\n motif_df = ts.find_motif()\n assert isinstance(motif_df, pd.DataFrame)\n assert isinstance(ts.motif_df, pd.DataFrame)", "def test_restore_backup():", "def test_basic_deletion(self):\n args = self.get_args()\n config_filename = 'testsuite_cfg.json'\n args.config = config_filename\n config = self.create_config_file()\n\n config_file = open(config_filename, 'w')\n config_file.write(str(json.dumps(config)))\n config_file.close()\n\n # Create the \"stale\" entry on the remote site\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD)\n resp = site2.login()\n self.assertTrue(resp.ok)\n tag = IntersiteTag('intersite-testsuite', 'app', 'epg', 'Site1')\n remote_tenant = Tenant('intersite-testsuite')\n remote_l3out = OutsideL3('l3out', remote_tenant)\n remote_epg = OutsideEPG('intersite-testsuite-app-epg', remote_l3out)\n remote_ep = OutsideNetwork(ip, remote_epg)\n remote_ep.ip = ip + '/32'\n remote_tenant.push_to_apic(site2)\n\n time.sleep(2)\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))\n\n execute_tool(args, test_mode=True)\n\n time.sleep(2)\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))", "def test_backup_restore_misc(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backupset.name = \"!@#$%^&\"\n output, error = self.backup_create()\n self.assertTrue(\"Backup `!@#$%^` created successfully\" in output[0],\n \"Backup could not be created with special characters\")\n self.log.info(\"Backup created with special characters\")\n self.backupset.name = \"backup\"\n self.backup_create()\n self.backup_cluster()\n conn = RemoteMachineShellConnection(self.backupset.backup_host)\n command = \"ls -tr {0}/{1}/{2} | tail\".format(self.backupset.directory, self.backupset.name, self.backups[0])\n o, e = conn.execute_command(command)\n data_dir = o[0]\n conn.execute_command(\"dd if=/dev/zero of=/tmp/entbackup/backup/\" +\n str(self.backups[0]) +\n \"/\" + data_dir + \"/data/shard_0.sqlite\" +\n \" bs=1024 count=100 seek=10 conv=notrunc\")\n output, error = self.backup_restore()\n self.assertTrue(\"Restore failed due to an internal issue, see logs for details\" in output[-1],\n \"Expected error not thrown when file is corrupt\")\n self.log.info(\"Expected error thrown when file is corrupted\")\n conn.execute_command(\"mv /tmp/entbackup/backup /tmp/entbackup/backup2\")\n conn.disconnect()\n output, error = self.backup_restore()\n self.assertTrue(\"Backup Repository `backup` not found\" in output[-1], \"Expected error message not thrown\")\n self.log.info(\"Expected error message thrown\")", "def test_data_norange(self):\n ex = self.ex\n m = self.m\n n = self.n\n\n nreps = random.randint(1, 10)\n lensumrange = random.randint(1, 10)\n\n ex.nreps = nreps\n ex.sumrange = [\"j\", range(lensumrange)]\n ex.vary[\"X\"][\"with\"].add(\"rep\")\n ex.vary[\"Y\"][\"with\"].add(\"j\")\n ex.vary[\"Y\"][\"along\"] = 0\n ex.vary[\"Z\"][\"with\"].update([\"rep\", \"j\"])\n ex.infer_lds()\n\n cmds = ex.generate_cmds()\n\n self.assertIn([\"smalloc\", \"X\", nreps * m * n], cmds)\n idx = random.randint(0, nreps - 1)\n self.assertIn([\"soffset\", \"X\", idx * m * n, \"X_%d\" % idx], cmds)\n\n self.assertIn([\n \"dmalloc\", \"Y\", lensumrange * m * m + (lensumrange - 1) * m\n ], cmds)\n idx = random.randint(0, lensumrange - 1)\n self.assertIn([\"doffset\", \"Y\", idx * m, \"Y_%d\" % idx], cmds)\n\n self.assertIn([\"cmalloc\", \"Z\", nreps * lensumrange * n * n], cmds)\n idxrep = random.randint(0, nreps - 1)\n idxrange = random.randint(0, lensumrange - 1)\n self.assertIn([\"coffset\", \"Z\",\n (idxrep * lensumrange + idxrange) * n * n,\n \"Z_%d_%d\" % (idxrep, idxrange)], cmds)", "def test_basic_duplicate(self):\n args = self.get_args()\n config = self.create_config_file()\n self.write_config_file(config, args)\n execute_tool(args, test_mode=True)\n\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-remote',\n 'l3out1', 'intersite-testsuite-app-epg'))\n self.add_remote_duplicate_entry(ip)\n\n time.sleep(2)\n self.add_endpoint(mac, ip, 'intersite-testsuite-local', 'app', 'epg')\n mac2 = '00:11:22:33:33:44'\n ip2 = '3.4.3.44'\n self.add_endpoint(mac2, ip2, 'intersite-testsuite-local', 'app', 'epg')\n\n time.sleep(2)\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-remote',\n 'l3out1', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite-remote',\n 'l3out1', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-remote',\n 'l3out2', 'intersite-testsuite-app-epg'))\n self.assertTrue(self.verify_remote_site_has_entry(mac2, ip2, 'intersite-testsuite-remote',\n 'l3out2', 'intersite-testsuite-app-epg'))", "def test_base_replica_repair_with_contention(self):\n self._base_replica_repair_test(fail_mv_lock=True)", "def test_get_genome_4(self):\n self.tkt1.data_add = set([\"subcluster\"])\n gnm = tickets.get_genome(self.tkt1, gnm_type=\"add\")\n with self.subTest():\n self.assertEqual(gnm.host_genus, \"\")\n with self.subTest():\n self.assertEqual(gnm.subcluster, \"A2\")", "def _standby_clone():\n # manualy:\n # $ mkdir -p /var/lib/postgresql/9.1/testscluster/\n # $ rsync -avz --rsh='ssh -p2222' root@12.34.56.789:/var/lib/postgresql/9.1/testscluster/ /var/lib/postgresql/9.1/testscluster/\n\n with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):\n puts(green('Start cloning the master'))\n repmgr_clone_command = 'repmgr -D %(slave_pgdata_path)s -d %(sync_db)s -p %(cluster_port)s -U %(sync_user)s -R postgres --verbose standby clone %(pgmaster_ip)s' % env\n puts(green(repmgr_clone_command))\n puts(\"-\" * 40)\n res = sudo(repmgr_clone_command, user='postgres')\n if 'Can not connect to the remote host' in res or 'Connection to database failed' in res:\n puts(\"-\" * 40)\n puts(green(repmgr_clone_command))\n puts(\"-\" * 40)\n puts(\"Master server is %s reachable.\" % red(\"NOT\"))\n puts(\"%s you can try to CLONE the slave manually [%s]:\" % (green(\"BUT\"), red(\"at your own risk\")))\n puts(\"On the slave server:\")\n puts(\"$ sudo -u postgres rsync -avz --rsh='ssh -p%(master_ssh_port)s' postgres@%(pgmaster_ip)s:%(master_pgdata_path)s %(slave_pgdata_path)s --exclude=pg_xlog* --exclude=pg_control --exclude=*.pid\" % env)\n puts(\"Here:\")\n puts(\"$ fab <cluster_task_name> finish_configuring_slave\")\n abort(\"STOP...\")", "def test_excel(test_data,tmp_path):\n\n for d in test_data:\n\n gpm = GenotypePhenotypeMap(genotype=d[\"genotype\"],\n wildtype=d[\"wildtype\"],\n phenotype=d[\"phenotype\"],\n uncertainty=d[\"uncertainty\"])\n\n # Write excel file\n excel_file = os.path.join(tmp_path,\"tmp.xlsx\")\n gpm.to_excel(filename=excel_file)\n assert os.path.isfile(excel_file)\n\n # Read in and make sure it worked.\n new_gpm = gpmap.read_excel(filename=excel_file,wildtype=d[\"wildtype\"])\n conftest.compare_gpmap(gpm,new_gpm)\n\n # Do not give wildtype. Should still work because the wildtype was\n # inferred.\n gpm_read = gpmap.read_excel(filename=excel_file)\n conftest.compare_gpmap(gpm,gpm_read)\n\n # Check ability to read labels back in\n site_labels = [f\"{x}\" for x in range(10,10+len(d[\"wildtype\"]),1)]\n gpm = GenotypePhenotypeMap(genotype=d[\"genotype\"],\n wildtype=d[\"wildtype\"],\n site_labels=site_labels)\n out_file = os.path.join(tmp_path,\"tmp.xlsx\")\n gpm.to_excel(out_file)\n\n gpm_read = gpmap.read_excel(out_file)\n\n for i in range(len(gpm_read.site_labels)):\n\n # Skip virtual site_labels added for invariant sites\n if len(d[\"mutations\"][i]) == 1:\n continue\n\n assert gpm_read.site_labels[i] == gpm.site_labels[i]\n\n # Read in with bad wildtype. Should throw warning and then have\n # sequential site labels.\n with pytest.warns(UserWarning):\n gpm_read = gpmap.read_excel(out_file,wildtype=d[\"mutant\"])\n\n assert np.array_equal(gpm_read.site_labels,range(len(d[\"wildtype\"])))", "def test_catchup_store(self):\n cluster = HaCluster(self, 2)\n sn = cluster[0].connect().session()\n s1 = sn.sender(\"q1;{create:always,node:{durable:true}}\")\n for m in [\"foo\",\"bar\"]: s1.send(Message(m, durable=True))\n s2 = sn.sender(\"q2;{create:always,node:{durable:true}}\")\n sk2 = sn.sender(\"ex/k2;{create:always,node:{type:topic, durable:true, x-declare:{type:'direct'}, x-bindings:[{exchange:ex,key:k2,queue:q2}]}}\")\n sk2.send(Message(\"hello\", durable=True))\n # Wait for backup to catch up.\n cluster[1].assert_browse_backup(\"q1\", [\"foo\",\"bar\"]) \n cluster[1].assert_browse_backup(\"q2\", [\"hello\"])\n\n # Make changes that the backup doesn't see\n cluster.kill(1, promote_next=False)\n r1 = cluster[0].connect().session().receiver(\"q1\")\n for m in [\"foo\", \"bar\"]: self.assertEqual(r1.fetch().content, m)\n r1.session.acknowledge()\n for m in [\"x\",\"y\",\"z\"]: s1.send(Message(m, durable=True))\n # Use old connection to unbind\n us = cluster[0].connect_old().session(str(uuid4()))\n us.exchange_unbind(exchange=\"ex\", binding_key=\"k2\", queue=\"q2\")\n us.exchange_bind(exchange=\"ex\", binding_key=\"k1\", queue=\"q1\")\n # Restart both brokers from store to get inconsistent sequence numbering.\n cluster.bounce(0, promote_next=False)\n cluster[0].promote()\n cluster[0].wait_status(\"active\")\n cluster.restart(1)\n cluster[1].wait_status(\"ready\")\n\n # Verify state\n cluster[0].assert_browse(\"q1\", [\"x\",\"y\",\"z\"])\n cluster[1].assert_browse_backup(\"q1\", [\"x\",\"y\",\"z\"])\n sn = cluster[0].connect().session() # FIXME aconway 2012-09-25: should fail over!\n sn.sender(\"ex/k1\").send(\"boo\")\n cluster[0].assert_browse_backup(\"q1\", [\"x\",\"y\",\"z\", \"boo\"])\n cluster[1].assert_browse_backup(\"q1\", [\"x\",\"y\",\"z\", \"boo\"])\n sn.sender(\"ex/k2\").send(\"hoo\") # q2 was unbound so this should be dropped.\n sn.sender(\"q2\").send(\"end\") # mark the end of the queue for assert_browse\n cluster[0].assert_browse(\"q2\", [\"hello\", \"end\"])\n cluster[1].assert_browse_backup(\"q2\", [\"hello\", \"end\"])", "def test_single_repotest(self):\n self.Mokes.add_repo_to_pi()\n response = self.client.get(\"/repo/PerseusDl/canonical-latinLit/2\").data.decode()\n index = BeautifulSoup(response, 'html.parser')\n self.assertEqual(len(index.select('div.alert')), 0, \"There is no error\")\n self.assertEqual(index.select('dd[aria-label=\"Coverage\"]')[0].text, \"99.85\")\n self.assertEqual(index.select('dd[aria-label=\"Text Count\"]')[0].text, \"636/637\")\n self.assertEqual(index.select('dd[aria-label=\"Metadata Count\"]')[0].text, \"718/720\")\n self.assertEqual(index.select('dd[aria-label=\"Citation Nodes\"]')[0].text, \"113179\")\n self.assertEqual(index.select('dd[aria-label=\"Words in eng\"]')[0].text, \"125\")\n self.assertEqual(index.select('dd[aria-label=\"Words in lat\"]')[0].text, \"1050\")\n self.assertEqual(index.select('dd[aria-label=\"Words in ger\"]')[0].text, \"1088\")\n self.assertEqual(index.select('dd[aria-label=\"Travis Build\"] a')[0][\"href\"],\n \"https://travis-ci.org/PerseusDl/canonical-latinLit/builds/216262555\")\n self.assertEqual(index.select('dd[aria-label=\"Github Comment\"] a')[0][\"href\"],\n \"https://github.com/PerseusDL/canonical-latinLit/commit/7d3d6a0b62f0d244b684843\"\n \"c7546906d742013fd#all_commit_comments\")\n self.assertEqual(\n len(index.select(\"div.dl-horizontal.card div.left.success\")), 1,\n \"Success class should be applied\"\n )\n\n self.Mokes.make_new_latinLit_test(session=self.db.session, coverage=75.01)\n response = self.client.get(\"/repo/PerseusDl/canonical-latinLit/3\").data.decode()\n index = BeautifulSoup(response, 'html.parser')\n self.assertEqual(\n len(index.select(\"div.dl-horizontal.card div.left.acceptable\")), 1,\n \"Acceptable class should be applied\"\n )\n\n self.Mokes.make_new_latinLit_test(session=self.db.session, coverage=74.99)\n response = self.client.get(\"/repo/PerseusDl/canonical-latinLit/4\").data.decode()\n index = BeautifulSoup(response, 'html.parser')\n self.assertEqual(\n len(index.select(\"div.dl-horizontal.card div.left.failed\")), 1,\n \"Failure class should be applied\"\n )", "def subdDuplicateAndConnect(*args, **kwargs)->None:\n pass", "def test_ipcs():\n IPCComm.ipcs()", "def test_generate_sample_sheet(self):\n pass", "def test_documentation_popxl_addition_variable(self):\n filename = \"tensor_addition.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_get_original_samplesheet(self):\n run_dir = 'data/nanopore_data/run2/done_sequencing/20200102_1412_MN19414_AAU642_68125dc2'\n run = MinIONqc(run_dir, None, None)\n run._get_anglerfish_samplesheet()\n expected_sample_sheet = 'data/nanopore_samplesheets/2020/QC_SQK-LSK109_AAU642_Samplesheet_22-594126.csv'\n self.assertEqual(run.lims_samplesheet, expected_sample_sheet)", "def test_make_pop(self, pop_size, cell_number, microcell_number):\n for i in [0, 1]:\n pe.Parameters.instance().use_ages = i\n # Population is initialised with no households\n pop_params = {\"population_size\": pop_size,\n \"cell_number\": cell_number,\n \"microcell_number\": microcell_number}\n test_pop = ToyPopulationFactory.make_pop(pop_params)\n\n total_people = 0\n count_non_empty_cells = 0\n for cell in test_pop.cells:\n for microcell in cell.microcells:\n total_people += len(microcell.persons)\n if len(cell.persons) > 0:\n count_non_empty_cells += 1\n # Test there is at least one non-empty cell\n self.assertTrue(count_non_empty_cells >= 1)\n # Test that everyone in the population has been assigned a\n # microcell\n self.assertEqual(total_people, pop_size)\n\n # Test a population class object is returned\n self.assertIsInstance(test_pop, pe.Population)", "def test_13_output(self):\n\n # Now attempt to receive from a repository.\n self.pkgrepo(\"create {0}\".format(self.tempdir))\n self.pkgrecv(self.dpath1, \"-d {0} -n -v \\*\".format(self.tempdir))\n expected = \"\"\"\\\nRetrieving packages (dry-run) ...\n Packages to add: 9\n Files to retrieve: 17\nEstimated transfer size: 528.00 B\n\"\"\"\n self.assert_(expected in self.output, self.output)\n for s in self.published:\n self.assert_(fmri.PkgFmri(s).get_fmri(anarchy=True,\n include_scheme=False) in self.output)\n\n # Clean up for next test.\n shutil.rmtree(self.tempdir)\n\n # Now attempt to receive from a repository to a package archive.\n self.pkgrecv(self.dpath1, \"-a -d {0} -n -v \\*\".format(self.tempdir))\n expected = \"\"\"\\\nArchiving packages (dry-run) ...\n Packages to add: 9\n Files to retrieve: 17\nEstimated transfer size: 528.00 B\n\"\"\"\n self.assert_(expected in self.output, self.output)\n for s in self.published:\n self.assert_(fmri.PkgFmri(s).get_fmri(anarchy=True,\n include_scheme=False) in self.output)\n\n # Now attempt to clone a repository.\n self.pkgrepo(\"create {0}\".format(self.tempdir))\n self.pkgrecv(self.dpath1, \"--clone -d {0} -p \\* -n -v\" \\\n .format(self.tempdir))\n expected = \"\"\"\\\nRetrieving packages (dry-run) ...\n Packages to add: 9\n Files to retrieve: 17\nEstimated transfer size: 528.00 B\n\"\"\"\n self.assert_(expected in self.output, self.output)\n for s in self.published:\n self.assert_(fmri.PkgFmri(s).get_fmri(anarchy=True,\n include_scheme=False) in self.output)\n\n # Test that output is correct if -n is not specified.\n self.pkgrecv(self.dpath1, \"-d {0} -v \\*\".format(self.tempdir))\n self.assert_(\"dry-run\" not in self.output)", "def test_backup_merge(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self._take_n_backups(n=self.backupset.number_of_backups)\n status, output, message = self.backup_list()\n if not status:\n self.fail(message)\n backup_count = 0\n \"\"\" remove last 6 chars of offset time in backup name\"\"\"\n if output and output[0]:\n bk_info = json.loads(output[0])\n bk_info = bk_info[\"repos\"][0]\n else:\n return False, \"No output content\"\n\n if bk_info[\"backups\"]:\n for i in range(0, len(bk_info[\"backups\"])):\n backup_name = bk_info[\"backups\"][i][\"date\"]\n if self.debug_logs:\n print(\"backup name \", backup_name)\n print(\"backup set \", self.backups)\n if backup_name in self.backups:\n backup_count += 1\n self.log.info(\"{0} matched in info command output\".format(backup_name))\n self.assertEqual(backup_count, len(self.backups), \"Initial number of backups did not match\")\n self.log.info(\"Initial number of backups matched\")\n self.backupset.start = randrange(1, self.backupset.number_of_backups)\n self.backupset.end = randrange(self.backupset.start + 1, self.backupset.number_of_backups + 1)\n status, output, message = self.backup_merge(check_for_panic=True)\n if not status:\n self.fail(message)\n status, output, message = self.backup_list()\n if not status:\n self.fail(message)\n backup_count = 0\n if output and output[0]:\n bk_info = json.loads(output[0])\n bk_info = bk_info[\"repos\"][0]\n else:\n return False, \"No output content\"\n if bk_info[\"backups\"]:\n for i in range(0, len(bk_info[\"backups\"])):\n backup_name = bk_info[\"backups\"][i][\"date\"]\n if self.debug_logs:\n print(\"backup name \", backup_name)\n print(\"backup set \", self.backups)\n backup_count += 1\n if backup_name in self.backups:\n self.log.info(\"{0} matched in info command output\".format(backup_name))\n else:\n self.fail(\"Didn't expect backup date {0} from the info command output\" \\\n \" to be in self.backups (the list of exepected backup dates\" \\\n \" after a merge)\".format(backup_name))\n\n self.assertEqual(backup_count, len(self.backups), \"Merged number of backups did not match\")\n self.log.info(\"Merged number of backups matched\")", "def run(self):\n try:\n source_conn = self.setup_source_db()\n target_conn = self.setup_target_db()\n target_collection = 'oplog' + self._replica_set\n\n while True:\n try:\n # Induce an operation on the replication test database\n db_name = 'ReplTest_' + self._replica_set.upper()\n source_conn[db_name]['operation'].replace_one({'replica': self._replica_set}, {\n 'replica': self._replica_set, 'ts': int(time.time())}, upsert=True)\n\n # Wait a bit for it to replicate\n time.sleep(10)\n\n # check latest oplog of source\n entry = source_conn['local'][\n 'oplog.rs'].find().sort('$natural', -1).limit(1)\n source_oplog = entry[0]['ts'].time\n\n # get latest oplog from connector target oplog collection\n entry = target_conn['__mongo_connector'][\n target_collection].find().sort('_ts', -1).limit(1)\n target_oplog = entry[0]['_ts'] >> 32\n\n lag = source_oplog - target_oplog\n self._stat_client.gauge(self._lag_key, lag)\n\n time.sleep(self._poll_interval)\n except Exception as ex:\n logger.exception('Connection Failed, retrying..')\n time.sleep(5)\n\n except Exception as ex:\n logger.exception('Critical Error, bailing out..')", "def test_backup_restore_sanity(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self.log.info(\"*** start to load items to all buckets\")\n self._load_all_buckets(self.master, gen, \"create\", self.expires)\n self.log.info(\"*** done to load items to all buckets\")\n self.ops_type = self.input.param(\"ops-type\", \"update\")\n self.expected_error = self.input.param(\"expected_error\", None)\n if self.auto_failover:\n self.log.info(\"Enabling auto failover on \" + str(self.backupset.cluster_host))\n rest_conn = RestConnection(self.backupset.cluster_host)\n rest_conn.update_autofailover_settings(self.auto_failover, self.auto_failover_timeout)\n self.backup_create_validate()\n for i in range(1, self.backupset.number_of_backups + 1):\n if self.ops_type == \"update\":\n self.log.info(\"*** start to update items in all buckets\")\n self._load_all_buckets(self.master, gen, \"update\", self.expires)\n self.log.info(\"*** done update items in all buckets\")\n elif self.ops_type == \"delete\":\n self.log.info(\"*** start to delete items in all buckets\")\n self._load_all_buckets(self.master, gen, \"delete\", self.expires)\n self.log.info(\"*** done to delete items in all buckets\")\n self.sleep(10)\n self.log.info(\"*** start to validate backup cluster\")\n self.backup_cluster_validate()\n self.targetMaster = True\n start = randrange(1, self.backupset.number_of_backups + 1)\n if start == self.backupset.number_of_backups:\n end = start\n else:\n end = randrange(start, self.backupset.number_of_backups + 1)\n self.log.info(\"*** start to restore cluster\")\n restored = {\"{0}/{1}\".format(start, end): \"\"}\n for i in range(1, self.backupset.number_of_backups + 1):\n if self.reset_restore_cluster:\n self.log.info(\"\\n*** start to reset cluster\")\n self.backup_reset_clusters(self.cluster_to_restore)\n cmd_init = 'node-init'\n if self.same_cluster:\n self.log.info(\"Same cluster\")\n self._initialize_nodes(Cluster(), self.servers[:self.nodes_init])\n if self.hostname and self.master.ip.endswith(\".com\"):\n options = '--node-init-hostname ' + self.master.ip\n shell = RemoteMachineShellConnection(self.master)\n output, _ = shell.execute_couchbase_cli(cli_command=cmd_init,\n options=options,\n cluster_host=\"localhost\",\n user=self.master.rest_username,\n password=self.master.rest_password)\n shell.disconnect()\n if not self._check_output(\"SUCCESS: Node initialize\", output):\n raise(\"Failed to set hostname\")\n else:\n self.log.info(\"Different cluster\")\n shell = RemoteMachineShellConnection(self.backupset.restore_cluster_host)\n shell.enable_diag_eval_on_non_local_hosts()\n rest = RestConnection(self.backupset.restore_cluster_host)\n rest.force_eject_node()\n rest.init_node()\n if self.hostname and self.backupset.restore_cluster_host.ip.endswith(\".com\"):\n options = '--node-init-hostname ' + self.backupset.restore_cluster_host.ip\n output, _ = shell.execute_couchbase_cli(cli_command=cmd_init, options=options,\n cluster_host=\"localhost\",\n user=self.backupset.restore_cluster_host.rest_username,\n password=self.backupset.restore_cluster_host.rest_password)\n if not self._check_output(\"SUCCESS: Node initialize\", output):\n raise(\"Failed to set hostname\")\n shell.disconnect()\n self.log.info(\"\\n*** Done reset cluster\")\n self.sleep(10)\n\n \"\"\" Add built-in user cbadminbucket to second cluster \"\"\"\n self.add_built_in_server_user(node=self.input.clusters[0][:self.nodes_init][0])\n\n self.backupset.start = start\n self.backupset.end = end\n self.log.info(\"*** start restore validation\")\n self.backup_restore_validate(compare_uuid=False,\n seqno_compare_function=\">=\",\n expected_error=self.expected_error)\n if self.backupset.number_of_backups == 1:\n continue\n while \"{0}/{1}\".format(start, end) in restored:\n start = randrange(1, self.backupset.number_of_backups + 1)\n if start == self.backupset.number_of_backups:\n end = start\n else:\n end = randrange(start, self.backupset.number_of_backups + 1)\n restored[\"{0}/{1}\".format(start, end)] = \"\"", "def test_get_backup_command(self):\n mb = self.maria_backup\n mock = mock_open(read_data='')\n with patch('builtins.open', mock): # skip reading the port list section\n self.assertEqual(mb.get_backup_cmd('test_dir'),\n ['xtrabackup', '--backup',\n '--target-dir', 'test_dir/test',\n '--datadir', '/srv/sqldata',\n '--socket', '/run/mysqld/mysqld.sock'])", "def test_master(self):\n m = self.d.master(4242)\n self.assertEqual(len(m.tracklist), 4)", "def test_rollback(self):\n os.system('rm config.txt; touch config.txt')\n test_oplog, primary_conn, mongos, solr = self.get_new_oplog()\n\n if not start_cluster():\n self.fail('Cluster could not be started successfully!')\n\n solr = DocManager()\n test_oplog.doc_manager = solr\n solr._delete() # equivalent to solr.delete(q='*: *')\n\n mongos['test']['test'].remove({})\n mongos['test']['test'].insert( \n {'_id': ObjectId('4ff74db3f646462b38000001'),\n 'name': 'paulie'},\n safe=True\n )\n while (mongos['test']['test'].find().count() != 1):\n time.sleep(1)\n cutoff_ts = test_oplog.get_last_oplog_timestamp()\n\n first_doc = {'name': 'paulie', '_ts': bson_ts_to_long(cutoff_ts),\n 'ns': 'test.test',\n '_id': ObjectId('4ff74db3f646462b38000001')}\n\n #try kill one, try restarting\n kill_mongo_proc(primary_conn.host, PORTS_ONE['PRIMARY'])\n\n new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY']))\n admin = new_primary_conn['admin']\n while admin.command(\"isMaster\")['ismaster'] is False:\n time.sleep(1)\n time.sleep(5)\n count = 0\n while True:\n try:\n mongos['test']['test'].insert({\n '_id': ObjectId('4ff74db3f646462b38000002'),\n 'name': 'paul'}, \n safe=True)\n break\n except OperationFailure:\n count += 1\n if count > 60:\n self.fail('Call to insert doc failed too many times')\n time.sleep(1)\n continue\n while (mongos['test']['test'].find().count() != 2):\n time.sleep(1)\n kill_mongo_proc(primary_conn.host, PORTS_ONE['SECONDARY'])\n start_mongo_proc(PORTS_ONE['PRIMARY'], \"demo-repl\", \"/replset1a\",\n \"/replset1a.log\", None)\n\n #wait for master to be established\n while primary_conn['admin'].command(\"isMaster\")['ismaster'] is False:\n time.sleep(1)\n\n start_mongo_proc(PORTS_ONE['SECONDARY'], \"demo-repl\", \"/replset1b\",\n \"/replset1b.log\", None)\n\n #wait for secondary to be established\n admin = new_primary_conn['admin']\n while admin.command(\"replSetGetStatus\")['myState'] != 2:\n time.sleep(1)\n while retry_until_ok(mongos['test']['test'].find().count) != 1:\n time.sleep(1)\n\n self.assertEqual(str(new_primary_conn.port), PORTS_ONE['SECONDARY'])\n self.assertEqual(str(primary_conn.port), PORTS_ONE['PRIMARY'])\n\n last_ts = test_oplog.get_last_oplog_timestamp()\n second_doc = {'name': 'paul', '_ts': bson_ts_to_long(last_ts),\n 'ns': 'test.test', \n '_id': ObjectId('4ff74db3f646462b38000002')}\n\n test_oplog.doc_manager.upsert(first_doc)\n test_oplog.doc_manager.upsert(second_doc)\n\n test_oplog.rollback()\n test_oplog.doc_manager.commit()\n results = solr._search()\n\n assert(len(results) == 1)\n\n self.assertEqual(results[0]['name'], 'paulie')\n self.assertTrue(results[0]['_ts'] <= bson_ts_to_long(cutoff_ts))\n\n #test_oplog.join()", "def testEveryNodeRepliesWithNoFaultyNodes(looper, client1, replied1):\n\n def chk():\n receivedReplies = getRepliesFromClientInbox(client1.inBox,\n replied1.reqId)\n print(receivedReplies)\n assert len(receivedReplies) == nodeCount\n\n looper.run(eventually(chk))", "def test_pop_returns_value(new_dll):\n assert new_dll.pop() == 3", "def test_setting_state_parallel(self):\n no_replicates = 25\n\n replicate(experiment, no_replicates, parallel=True, no_processes=2)\n for i in range(no_replicates):\n self.assertIn('result', state[SUBSTATE_KEY_PATTERN % i])\n self.assertEqual(state[SUBSTATE_KEY_PATTERN % i]['result'], \"bla\")", "def test_seed_initial_population():\n # Test if a initial population can be read in from CSV\n i_population = owp.seed_initial_population('initial_populations.csv')\n # Test if a new population can be generated with i_population as the base\n pop_size = 30\n population = sga.generate_population(pop_size, i_population)\n assert type(i_population) is list\n assert len(population) == pop_size", "def test_execute_deployment(self):\n pass", "def test_get(self):\n obs = self.tester.get('1.SKM7.640188')\n exp = PrepSample('1.SKM7.640188', self.tester)\n self.assertEqual(obs, exp)", "def quickstart():\n snmp.quickstart()\n return 0", "def test_get_genome_2(self):\n self.tkt1.data_add = set([\"host_genus\"])\n gnm = tickets.get_genome(self.tkt1, gnm_type=\"add\")\n with self.subTest():\n self.assertEqual(gnm.host_genus, \"Mycobacterium\")\n with self.subTest():\n self.assertEqual(gnm.cluster, \"\")", "def test_clone_repository(koan, assert_cloned_repo_exists):\n koan.shell('')", "def test_03_publisher(self):\n\n robj = publisher.Repository(\n collection_type=publisher.REPO_CTYPE_SUPPLEMENTAL,\n description=\"Provides only the best BobCat packages!\",\n legal_uris=[\n \"http://legal1.example.com\",\n \"http://legal2.example.com\"\n ],\n mirrors=[\n \"http://mirror1.example.com/\",\n \"http://mirror2.example.com/\"\n ],\n name=\"First Repository\",\n origins=[\n \"http://origin1.example.com/\",\n \"http://origin2.example.com/\"\n ],\n refresh_seconds=70000,\n registered=True,\n registration_uri=\"http://register.example.com/\",\n related_uris=[\n \"http://related1.example.com\",\n \"http://related2.example.com\"\n ],\n sort_policy=publisher.URI_SORT_PRIORITY,\n )\n\n r2obj = copy.copy(robj)\n r2obj.origins = [\"http://origin3.example.com\"]\n r2obj.name = \"Second Repository\"\n r2obj.reset_mirrors()\n\n pprops = {\n \"alias\": \"cat\",\n \"client_uuid\": \"2c6a8ff8-20e5-11de-a818-001fd0979039\",\n \"disabled\": True,\n \"meta_root\": os.path.join(self.test_root, \"bobcat\"),\n \"repository\": r2obj,\n }\n\n # Check that all properties can be set at construction time.\n pobj = publisher.Publisher(\"bobcat\", **pprops)\n\n # Verify that all properties provided at construction time were\n # set as expected.\n for p in pprops:\n self.assertEqual(pprops[p], getattr(pobj, p))\n\n # Verify that a copy matches its original.\n cpobj = copy.copy(pobj)\n for p in pprops:\n if p == \"repository\":\n # These attributes can't be directly compared.\n continue\n self.assertEqual(getattr(pobj, p), getattr(cpobj, p))\n\n # Assume that if the origins match, we have the right selected\n # repository.\n self.assertEqual(cpobj.repository.origins,\n r2obj.origins)\n\n # Compare the source_object_id of the copied repository object\n # with the id of the source repository object.\n self.assertEqual(id(pobj), cpobj._source_object_id)\n\n cpobj = None\n\n # Verify that individual properties can be set.\n pobj = publisher.Publisher(\"tomcat\")\n pobj.prefix = \"bobcat\"\n self.assertEqual(pobj.prefix, \"bobcat\")\n\n for p in pprops:\n if p == \"repositories\":\n for r in pprops[p]:\n pobj.add_repository(r)\n else:\n setattr(pobj, p, pprops[p])\n self.assertEqual(getattr(pobj, p), pprops[p])\n\n pobj.repository = robj\n self.assertEqual(pobj.repository, robj)\n\n # An invalid value shouldn't be allowed.\n self.assertRaises(api_errors.UnknownRepository, setattr,\n pobj, \"repository\", -1)\n\n pobj.reset_client_uuid()\n self.assertNotEqual(pobj.client_uuid, None)\n self.assertNotEqual(pobj.client_uuid, pprops[\"client_uuid\"])\n\n pobj.create_meta_root()\n self.assertTrue(os.path.exists(pobj.meta_root))\n\n pobj.remove_meta_root()\n self.assertFalse(os.path.exists(pobj.meta_root))", "def test_crossrefbot_2(self, mock_method):\n r_id = 27634736\n ec = Client()\n with open('data/trialpubs_rtrial_5.csv', 'rb') as csvfile:\n spamreader = csv.reader(csvfile)\n for row in spamreader:\n article = ec.efetch(db='pubmed', id=row[1])\n for a in article:\n crud.pubmedarticle_to_db(a, 'trial_publications')\n crud.publication_trial(row[1], row[0], 1)\n article = ec.efetch(db='pubmed', id=r_id)\n for i, a in enumerate(article):\n crud.pubmedarticle_to_db(a, 'systematic_reviews')\n pmids = {24491689, 23741057, 15265849, 12409541, 26673558, 23616602, 21080835, 21444883, 21931078, 26984864,\n 26857383, 25131977, 23680885, 21080836, 9921604, 22433752, 21187258, 21315441, 26560249, 25286913,\n 18342224, 12598066, 20176990, 25921522, 21906250, 26874388, 20562255, 18794390, 27207191}\n content = collections.namedtuple('ids', ['pmids', 'nctids'])\n mock_method.return_value = content(list(pmids), None)\n bot.check_citations(r_id)\n conn = psycopg2.connect(**self.postgresql.dsn())\n cursor = conn.cursor()\n cursor.execute(\"SELECT DISTINCT trialpub_id from review_trialpubs where review_id = %s;\", (r_id,))\n db_ids = cursor.fetchall()\n pmids1 = set([int(pmid[0]) for pmid in db_ids])\n self.assertEqual(pmids1, {21315441, 25131977, 25286913, 26560249, 26857383})\n cursor.execute(\"SELECT DISTINCT nct_id from review_rtrial where review_id = %s;\", (r_id,))\n db_ids = cursor.fetchall()\n conn.close()\n nct_ids_1 = set([nct[0] for nct in db_ids])\n self.assertEqual(nct_ids_1, {'NCT00531661', 'NCT00538356', 'NCT00531661', 'NCT01360203'})", "def test_basic_multiple_duplicate(self):\n args = self.get_args()\n config = self.create_config_file()\n self.write_config_file(config, args)\n execute_tool(args, test_mode=True)\n\n for i in range(0, 5):\n mac = '00:11:22:33:33:3' + str(i)\n ip = '3.4.3.' + str(i)\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-remote',\n 'l3out', 'intersite-testsuite-app-epg'))\n self.add_remote_duplicate_entry(ip)\n\n time.sleep(2)\n\n for i in range(0, 5):\n mac = '00:11:22:33:33:3' + str(i)\n ip = '3.4.3.' + str(i)\n self.add_endpoint(mac, ip, 'intersite-testsuite-local', 'app', 'epg')\n\n time.sleep(2)\n for i in range(0, 5):\n mac = '00:11:22:33:33:3' + str(i)\n ip = '3.4.3.' + str(i)\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite-remote',\n 'l3out', 'intersite-testsuite-app-epg'))", "def test_notebook():\n jupyter_notebooks = os.getenv('PYNQ_JUPYTER_NOTEBOOKS')\n\n # Try and find the notebook\n if os.path.isdir(f\"{jupyter_notebooks}/pynq-helloworld\"):\n if os.path.isfile(f\"{jupyter_notebooks}/pynq-helloworld/resizer_pl.ipynb\"): \n result = run_notebook(f\"{jupyter_notebooks}/pynq-helloworld/resizer_pl.ipynb\")\n else:\n raise CannotFindNotebook(f\"unable to locate the helloworld notebook, expecting it at {jupyter_notebooks}/pynq-helloworld/resizer_pl.ipynb\")\n else:\n raise CannotFindNotebook(f\"unable to locate the helloworld directory, expecting it at {jupyter_notebooks}/pynq-helloworld\")", "def runtest(self):", "def test_get_subgrid_snapshots():\n subgrid_snapshots = get_subgrid_snapshots(nTime=200)\n\n subgrid_snapshots_corr = np.load('./preprocessing/tests/test_data/\\\nsubgrid_snapshots.npy')\n\n assert (subgrid_snapshots == subgrid_snapshots_corr).all()", "def test_cbrestoremgr_should_not_change_replica_count_in_restore_bucket(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=10000)\n if not self.new_replicas:\n self.fail(\"This test needs to pass param 'new-replicas' to run\")\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n self.log.info(\"Start backup cluster\")\n self.backup_cluster_validate()\n self.backup_restore_validate()\n\n self.log.info(\"replicas from backup bucket: {0}\".format(self.num_replicas))\n self.log.info(\"replica in restore bucket should be {0} after restore\"\\\n .format(self.new_replicas))\n rest_r = RestConnection(self.backupset.restore_cluster_host)\n for bucket in self.buckets:\n bucket_stats = rest_r.get_bucket_json(bucket.name)\n if self.new_replicas != bucket_stats[\"replicaNumber\"]:\n self.fail(\"replia number in bucket {0} did change after restore\"\\\n .format(bucket.name))\n self.log.info(\"Verified replica in bucket {0}: {1}\"\\\n .format(bucket.name,\n bucket_stats[\"replicaNumber\"]))", "def test_documentation_popxl_call_with_info(self):\n filename = \"call_with_info.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def c_test_population_function(self, function):\r\n return 1", "def test_documentation_popxl_basic_subgraph(self):\n filename = \"basic_graph.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_documentation_popxl_remote_rts_var(self):\n filename = \"remote_rts_var.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_TCRrep_gamma_delta():\n # sant.csv was generated from Sant et al. 2019\n # Using\n # 1. sant_et_all_clean_stables.py and \n # 2. clean.py \n df = pd.read_csv(os.path.join('tcrdist','test_files_compact','sant.csv'), sep = \",\")\n tr = TCRrep(cell_df = df, organism = \"human\", chains = ['gamma','delta'], db_file='gammadelta_db.tsv') \n\n tr.infer_cdrs_from_v_gene(chain = 'gamma', imgt_aligned=True)\n tr.infer_cdrs_from_v_gene(chain = 'delta', imgt_aligned=True)\n\n tr.index_cols = ['clone_id', 'subject', 'v_g_gene', \"v_d_gene\", \n 'cdr3_g_aa', 'cdr3_d_aa',\n 'cdr1_g_aa', 'cdr2_g_aa', 'pmhc_g_aa',\n 'cdr1_d_aa', 'cdr2_d_aa', 'pmhc_d_aa']\n\n tr.deduplicate()\n tr.clone_df\n\n # AttributeError: 'TCRrep' object has no attribute '_tcrdist_legacy_method_gamma_delta'\n tr._tcrdist_legacy_method_gamma_delta()", "def run_UQ_sample(config,**args):\n update_environment(args)\n with_config(config)\n execute(put_configs,config)\n job(dict(script='run_UQ_sample', job_wall_time='0:15:0', memory='2G'),args)", "def test_calls(self):\n ex = self.ex\n m = self.m\n n = self.n\n\n nreps = random.randint(1, 10)\n ex.nreps = nreps\n ex.vary[\"X\"][\"with\"].add(\"rep\")\n ex.infer_lds()\n\n cmds = ex.generate_cmds()\n\n idx = random.randint(0, nreps - 1)\n self.assertIn([\"name\", m, n, \"X_%d\" % idx, m, \"Y\", m, \"Z\", n], cmds)" ]
[ "0.6849515", "0.5858241", "0.567002", "0.5621691", "0.5603418", "0.5590965", "0.55826133", "0.55337054", "0.55278075", "0.5499861", "0.5477127", "0.539578", "0.53619903", "0.5334775", "0.5324242", "0.53177917", "0.53116417", "0.53056175", "0.52885664", "0.5232968", "0.5212479", "0.518361", "0.5143064", "0.51113", "0.50958025", "0.5082429", "0.500379", "0.5002982", "0.4990406", "0.49888358", "0.49705225", "0.4943006", "0.4935964", "0.49313387", "0.4912955", "0.49086156", "0.49036604", "0.48971668", "0.48851386", "0.48774794", "0.487055", "0.48579472", "0.48473096", "0.48421592", "0.48294148", "0.4827458", "0.48261", "0.48249656", "0.481571", "0.4809564", "0.48077482", "0.48012403", "0.4800839", "0.47968394", "0.47866544", "0.47838303", "0.47711164", "0.47684988", "0.4764695", "0.4755708", "0.47548333", "0.47518173", "0.47517842", "0.47323915", "0.47297686", "0.47257552", "0.47209194", "0.47206122", "0.47170967", "0.471481", "0.46942738", "0.46940008", "0.46926028", "0.4692231", "0.46883085", "0.46882662", "0.46804425", "0.4675823", "0.46675903", "0.46667168", "0.46633548", "0.46626103", "0.46620715", "0.46614206", "0.46587196", "0.46575195", "0.46571454", "0.46532333", "0.46495724", "0.46490064", "0.4643782", "0.46434027", "0.46376005", "0.4626197", "0.461858", "0.46155638", "0.46115053", "0.46073925", "0.46068594", "0.460432" ]
0.8200137
0
Test the popxl create multiple subgraph example
def test_documentation_popxl_create_multi_subgraph(self): filename = "create_multi_graphs_from_same_func.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_documentation_popxl_basic_subgraph(self):\n filename = \"basic_graph.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def sub_graph_merging(self):", "def populate_graph(self):", "def test_documentation_popxl_repeat_2(self):\n filename = \"repeat_graph_2.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_documentation_popxl_repeat_1(self):\n filename = \"repeat_graph_1.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_documentation_popxl_multi_callsites_graph_input(self):\n filename = \"multi_call_graph_input.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_documentation_popxl_repeat_0(self):\n filename = \"repeat_graph_0.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def show_subgraph(dfs_codes, nsupport, mapper):\n\tglobal __subgraph_count\n\n\tg = build_graph(dfs_codes)\n\tg.id = __subgraph_count\n\t__subgraph_count += 1\n\tg.gprint(nsupport, mapper)", "def create_nodes(self):", "def create_subbasin_graph():\n subbasin_to_downstream = pd.read_csv(module_dir + '/../data/simulations_shervan/test.rvh', sep='\\s+', skiprows=7, nrows=724, names=['subbasin', 'downstream_subbasin'], usecols=[1,2])\n subbasin_to_downstream['subbasin'] = subbasin_to_downstream['subbasin']\n subbasin_to_downstream['downstream_subbasin'] = 'sub' + subbasin_to_downstream['downstream_subbasin'].astype(str)\n subbasin_to_downstream['edge'] = 1\n\n for subbasin in subbasin_to_downstream['subbasin'].unique():\n is_sink = 1 if len(subbasin_to_downstream[(subbasin_to_downstream['subbasin'] == subbasin) & subbasin_to_downstream['edge'] == 1]) == 0 else 0\n subbasin_to_downstream = subbasin_to_downstream.append({'subbasin': subbasin, 'downstream_subbasin': subbasin, 'edge': is_sink}, ignore_index=True)\n subbasin_to_downstream = subbasin_to_downstream.append({'subbasin': 'sub-1', 'downstream_subbasin': 'sub-1', 'edge': 1}, ignore_index=True)\n \n adj = subbasin_to_downstream.pivot(index='subbasin', columns='downstream_subbasin', values='edge').fillna(0) \n adj = adj.sort_index(axis=0).sort_index(axis=1)\n \n G = nx.from_numpy_matrix(adj.values, parallel_edges=False, create_using=nx.DiGraph())\n label_mapping = dict(zip(range(len(adj.values)), adj.index))\n G = nx.relabel_nodes(G, label_mapping)\n \n return G", "def gen_graph(self):", "def generate_subgraph(format):\n\n # get business information\n directorypath = genpath+directory\n if os.path.isfile(directorypath):\n \n bizdata = pd.read_csv( directorypath, escapechar='\\\\')\n\n #create a directory of page-id and object-ids\n tempdf = bizdata.set_index('pageid')\n tempdf = tempdf['objectid']\n dictionary = tempdf.to_dict()\n\n uncgraph = pd.read_csv(inpath+graphfile, escapechar='\\\\')\n uncgraph = uncgraph.dropna()\n uncgraph['likee_object_id'] = uncgraph.apply(lambda x: dictionary.get(x['likee_page_id']), axis=1)\n cgraph = uncgraph.dropna()\n cgraph = cgraph[['liker_page_id', 'likee_page_id']]\n cgraph.columns = ['Source', 'Target']\n\n \n print_stats(cgraph)\n if format == 'networkx' :\n print \"[Generating a networkX graph...]\" \n cgraph.to_csv(genpath+subgraph+'.ntx', index=False, header=False, sep= ' ')\n else:\n print \"[Generating a csv graph...]\" \n cgraph.to_csv(genpath+subgraph+'.csv', index=False)\n\n\n else:\n print \"Either file is missing or is not readable\"", "def gexf_graph():\n # you must replace these lines and supply your own graph\n gexf = Gexf(\"author\", \"title\")\n mygraph = gexf.addGraph(\"undirected\", \"static\", \"A web network\")\n atr_type = mygraph.addNodeAttribute('Type', type='string')\n atr_id = mygraph.addNodeAttribute('id', type='string')\n atr_label = mygraph.addNodeAttribute('label', type='string')\n atr_color_r = mygraph.addNodeAttribute('color_r', type='string', defaultValue='0')\n atr_color_g = mygraph.addNodeAttribute('color_g', type='string', defaultValue='0')\n atr_color_b = mygraph.addNodeAttribute('color_b', type='string', defaultValue='0')\n k = 0\n for i in range(min_parts()):\n tmp = mygraph.addNode(set_num[i], name[i], r=\"0\", g=\"0\", b=\"0\")\n tmp.addAttribute(atr_type, \"set\")\n tmp.addAttribute(atr_id, set_num[i])\n tmp.addAttribute(atr_label, name[i])\n for j in range(len(Parts[i][\"Parts\"])):\n if mygraph.nodeExists(Parts[i][\"Parts\"][j][\"number\"]+\"_\"+Parts[i][\"Parts\"][j][\"color\"][\"rgb\"])==0:\n temp = mygraph.addNode((Parts[i][\"Parts\"][j][\"number\"]+\"_\"+Parts[i][\"Parts\"][j][\"color\"][\"rgb\"]), Parts[i][\"Parts\"][j][\"name\"], r=str(int(Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][0:2], 16)), g=str(int(Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][2:4], 16)), b=str(int(Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][4:6], 16)))\n temp.addAttribute(atr_type, \"part\")\n temp.addAttribute(atr_id, (Parts[i][\"Parts\"][j][\"number\"]+\"_\"+Parts[i][\"Parts\"][j][\"color\"][\"rgb\"]))\n temp.addAttribute(atr_label, Parts[i][\"Parts\"][j][\"name\"])\n temp.addAttribute(atr_color_r, Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][0:2])\n temp.addAttribute(atr_color_g, Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][2:4])\n temp.addAttribute(atr_color_b, Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][4:6])\n mygraph.addEdge(str(k), set_num[i], (Parts[i][\"Parts\"][j][\"number\"]+\"_\"+Parts[i][\"Parts\"][j][\"color\"][\"rgb\"]), weight=Parts[i][\"Parts\"][j][\"quantity\"])\n k = k+1\n output_file = open(\"bricks_graph.gexf\", \"wb\")\n gexf.write(output_file)\n return -1", "def setUp(self):\n\n singleLabels = linkoCreate.Linkograph(\n [({'A'}, set(), {1,2,3}),\n ({'D'}, {0}, {3,4}),\n ({'A'}, {0}, {4}),\n ({'C'}, {0,1}, {4}),\n ({'A'}, {1,2,3}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko0_2 = linkoCreate.Linkograph(\n [({'A'}, set(), {1,2}),\n ({'D'}, {0}, set()),\n ({'A'}, {0}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko0_1 = linkoCreate.Linkograph(\n [({'A'}, set(), {1}),\n ({'D'}, {0}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko0_0 = linkoCreate.Linkograph(\n [({'A'}, set(), set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko1_2 = linkoCreate.Linkograph(\n [({'D'}, set(), set()),\n ({'A'}, set(), set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko1_1 = linkoCreate.Linkograph(\n [({'D'}, set(), set())],\n ['A', 'B', 'C', 'D'])\n\n trivialLinkograph = linkoCreate.Linkograph(\n [], ['A', 'B', 'C', 'D'])\n\n\n singleSubLinko1_4 = linkoCreate.Linkograph(\n [({'D'}, set(), {2,3}),\n ({'A'}, set(), {3}),\n ({'C'}, {0}, {3}),\n ({'A'}, {0,1,2}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko2_4 = linkoCreate.Linkograph(\n [({'A'}, set(), {2}),\n ({'C'}, set(), {2}),\n ({'A'}, {0,1}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko3_4 = linkoCreate.Linkograph(\n [({'C'}, set(), {1}),\n ({'A'}, {0}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko4_4 = linkoCreate.Linkograph(\n [({'A'}, set(), set())],\n ['A', 'B', 'C', 'D'])\n\n simpleLinko = linkoCreate.Linkograph(\n [({'A', 'B', 'C'}, set(), {1,2,3}),\n ({'D'}, {0}, {3,4}),\n ({'A'}, {0}, {4}),\n ({'B', 'C'}, {0,1}, {4}),\n ({'A'}, {1,2,3}, set())],\n ['A', 'B', 'C', 'D'])\n\n if self.id().split('.')[-1] == 'test_createSubLinkographWithoutCommands':\n self.testParams = [\n {'linko': singleLabels,\n 'lowerBound': None,\n 'upperBound': None,\n 'ExpectedLinkograph': singleLabels},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': 4,\n 'ExpectedLinkograph': singleLabels},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': 5,\n 'ExpectedLinkograph': singleLabels},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': 2,\n 'ExpectedLinkograph': singleSubLinko0_2},\n\n {'linko': singleLabels,\n 'lowerBound': -1,\n 'upperBound': 2,\n 'ExpectedLinkograph': singleSubLinko0_2},\n\n {'linko': singleLabels,\n 'lowerBound': None,\n 'upperBound': 2,\n 'ExpectedLinkograph': singleSubLinko0_2},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': 1,\n 'ExpectedLinkograph': singleSubLinko0_1},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': 0,\n 'ExpectedLinkograph': singleSubLinko0_0},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': -1,\n 'ExpectedLinkograph': trivialLinkograph},\n\n {'linko': singleLabels,\n 'lowerBound': 1,\n 'upperBound': 2,\n 'ExpectedLinkograph': singleSubLinko1_2},\n\n {'linko': singleLabels,\n 'lowerBound': 1,\n 'upperBound': 1,\n 'ExpectedLinkograph': singleSubLinko1_1},\n\n {'linko': singleLabels,\n 'lowerBound': 1,\n 'upperBound': 0,\n 'ExpectedLinkograph': trivialLinkograph},\n\n {'linko': singleLabels,\n 'lowerBound': -1,\n 'upperBound': -1,\n 'ExpectedLinkograph': trivialLinkograph},\n\n {'linko': singleLabels,\n 'lowerBound': 1,\n 'upperBound': 4,\n 'ExpectedLinkograph': singleSubLinko1_4},\n\n {'linko': singleLabels,\n 'lowerBound': 2,\n 'upperBound': 4,\n 'ExpectedLinkograph': singleSubLinko2_4},\n\n {'linko': singleLabels,\n 'lowerBound': 3,\n 'upperBound': 4,\n 'ExpectedLinkograph': singleSubLinko3_4},\n\n {'linko': singleLabels,\n 'lowerBound': 4,\n 'upperBound': 4,\n 'ExpectedLinkograph': singleSubLinko4_4},\n\n ]", "def test_simple(self):\n exp = [{'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'h'},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'p'},\n {'edge_info': '1', 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hp'},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'g'},\n {'edge_info': None, 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hpg'}\n ]\n content = '((h,p)hp:1,g)hpg;'\n self._do_test(content, exp)\n content = '((h,[pretest]p[test][posttest])hp,g)hpg;'\n exp = [{'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'h'},\n {'edge_info': None, 'type': NewickEvents.TIP,\n 'comments': ['pretest', 'test', 'posttest'], 'label': 'p'},\n {'edge_info': None, 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hp'},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'g'},\n {'edge_info': None, 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hpg'}\n ]\n self._do_test(content, exp)", "def test__graph_structure():\n assert PES_GRAPH == (\n ('CH2CH2+OH', 'CH2CH+H2O', 'C2H4OH', 'C2H5O', 'CH3CHO+H'),\n (frozenset({0, 1}), frozenset({0, 2}), frozenset({2, 3}),\n frozenset({3, 4}), frozenset({1, 2})))\n assert pgraph.species(PES_GRAPH) == (\n ('CH2CH2+OH', 'CH2CH+H2O', 'C2H4OH', 'C2H5O', 'CH3CHO+H'))\n assert pgraph.channels(PES_GRAPH) == (\n (frozenset({0, 1}), frozenset({0, 2}), frozenset({2, 3}),\n frozenset({3, 4}), frozenset({1, 2})))\n print('\\npes graph')\n print(PES_GRAPH)", "def test_build_graph(self):\n insert_good_data()\n dataframe = get_dataframe()\n results = processing.build_graph(dataframe, figure_path, False)\n # 1\n self.assertEqual(results, \"Updated html File and Opened it\")", "def setUp(self):\n self.complete = nx.Graph()\n self.complete.add_edge(1, 2)\n self.complete.add_edge(2, 3)\n self.complete.add_edge(1, 3)\n\n self.small_tree = nx.Graph()\n self.small_tree.add_edge(1, 2)\n self.small_tree.add_edge(2, 3)\n self.small_tree.add_edge(3, 4)\n self.small_tree.add_edge(1, 4)\n self.small_tree.add_edge(2, 4)\n self.small_tree.add_edge(4, 5)\n self.small_tree.add_edge(5, 6)\n self.small_tree.add_edge(5, 7)\n self.small_tree.add_edge(6, 7)\n\n self.deterministic_graph = nx.Graph()\n self.deterministic_graph.add_edge(1, 2)\n self.deterministic_graph.add_edge(1, 3)\n self.deterministic_graph.add_edge(3, 4)\n self.deterministic_graph.add_edge(2, 4)\n self.deterministic_graph.add_edge(3, 5)\n self.deterministic_graph.add_edge(4, 5)\n self.deterministic_graph.add_edge(3, 6)\n self.deterministic_graph.add_edge(5, 6)", "def test_dummy3(self):\n xpb = XPathBuilder()\n xp = xpb.dummy()\n self.assertTrue(xp.parenthesize() is xp)", "def graph(self):\n ...", "def gexf_graph():\n # you must replace these lines and supply your own graph\n \n \n \n my_gexf = Gexf(\"JiajiaXie\", \"My awesome graph\")\n graph=my_gexf.addGraph(\"undirected\", \"static\", \"My awesome networks\")\n \n atr1=graph.addNodeAttribute('Type',type='string')\n\n\n for set in data_specific:\n if graph.nodeExists(set['set_num']) ==0:\n tm1=graph.addNode(set['set_num'], set['name'], r='0', g='0', b='0')\n tm1.addAttribute(atr1,\"set\")\n\n\n\n counter_test=1\n for set, part in data_parts.items():\n for key, part_list in part.items():\n interme =part_list['color']\n red=interme[0]+interme[1]\n green=interme[2]+interme[3]\n blue=interme[4]+interme[5]\n\n red_de=str(int(red,16))\n green_de=str(int(green,16))\n blue_de=str(int(blue,16))\n if graph.nodeExists(part_list['id'])==0:\n tm2=graph.addNode(part_list['id'], part_list['part_name'],r=red_de, g=green_de, b = blue_de)\n tm2.addAttribute(atr1,\"part\")\n\n\n counter_test+=1\n graph.addEdge(\"_\"+str(counter_test), set, part_list['id'], part_list['quantity'])\n\n\n\n f=open('bricks_graph.gexf','wb')\n my_gexf.write(f)\n\n\n return my_gexf.graphs[0]", "def generate(self):\n self.graph_repl = self.master.graph_repl", "def main(dot_file):\n global SUBGRAPHS, PARENTS\n graph = graph_from_dot(dot_file)\n SUBGRAPHS = {}\n PARENTS = {}\n extract_subgraphs([graph])\n \n for (name, subgraph) in SUBGRAPHS.items():\n nodes = extract_nodes(subgraph)\n for node in nodes:\n (name_function, result, function_call_line) = analyse_label_function_calls(node)\n if name_function is not None:\n (label_node1, label_node2, bb) = create_labels(node, result, function_call_line)\n node.set_label(label_node1)\n nodes_to_update = get_nodes_to_update(subgraph, graph.get_name())\n update_nodes(nodes_to_update, bb)\n nodes.append(create_new_node(subgraph, node, label_node2, bb))\n update_edges(subgraph, graph.get_name(), bb)\n create_new_edge(graph, node.get_name(), SUBGRAPHS[name_function])\n recreate_subgraphs_name()\n export_graph(graph, \"main_output\", \"png\")\n export_graph(graph, \"main_output\", \"dot\")\n return graph", "def subplot_1(self, Graph, n_tabs):\n # The code below walks does a pre-order traversal of the tree\n # For exact details about the structure of self.Graph refer description in init function.\n\n attr_name = list(Graph.keys())[0]\n print(\"\\t\"*(n_tabs),\"feature name :\",attr_name)\n for val in list(Graph[attr_name].keys()):\n print(\"\\t\"*(n_tabs+1),\"feature value :\",val)\n sub_graph = Graph[attr_name][val]\n if (type(sub_graph)==dict):\n self.subplot_1(sub_graph, n_tabs+2)\n else:\n print(\"\\t\"*(n_tabs+2),\"class :\", sub_graph)", "def create_four_subplots():\n pass", "def sub_graph_merging(self):\n raise NotImplementedError()", "def dump_subgraph_for_debug(self):\n\n import pypipegraph2 as ppg\n\n nodes = []\n seen = set()\n edges = []\n counter = [0]\n node_to_counters = {}\n\n def descend(node):\n if node in seen:\n return\n seen.add(node)\n j = self.runner.jobs[node]\n if isinstance(j, ppg.FileInvariant):\n nodes.append(f\"Path('{counter[0]}').write_text('A')\")\n nodes.append(f\"job_{counter[0]} = ppg.FileInvariant('{counter[0]}')\")\n elif isinstance(j, ppg.ParameterInvariant):\n nodes.append(\n f\"job_{counter[0]} = ppg.ParameterInvariant('{counter[0]}', 55)\"\n )\n elif isinstance(j, ppg.FunctionInvariant):\n nodes.append(\n f\"job_{counter[0]} = ppg.FunctionInvariant('{counter[0]}', lambda: 55)\"\n )\n elif isinstance(j, ppg.SharedMultiFileGeneratingJob):\n nodes.append(\n f\"job_{counter[0]} = ppg.SharedMultiFileGeneratingJob('{counter[0]}', {[x.name for x in j.files]!r}, dummy_smfg, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.TempFileGeneratingJob):\n nodes.append(\n f\"job_{counter[0]} = ppg.TempFileGeneratingJob('{counter[0]}', dummy_fg, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.FileGeneratingJob):\n nodes.append(\n f\"job_{counter[0]} = ppg.FileGeneratingJob('{counter[0]}', dummy_fg, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.MultiTempFileGeneratingJob):\n files = [counter[0] + \"/\" + x.name for x in j.files]\n nodes.append(\n f\"job_{counter[0]} = ppg.MultiTempFileGeneratingJob({files!r}, dummy_mfg, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.MultiFileGeneratingJob):\n files = [str(counter[0]) + \"/\" + x.name for x in j.files]\n nodes.append(\n f\"job_{counter[0]} = ppg.MultiFileGeneratingJob({files!r}, dummy_mfg, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.DataLoadingJob):\n nodes.append(\n f\"job_{counter[0]} = ppg.DataLoadingJob('{counter[0]}', lambda: None, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.AttributeLoadingJob):\n nodes.append(\n f\"job_{counter[0]} = ppg.AttributeLoadingJob('{counter[0]}', DummyObject(), 'attr_{counter[0]}', lambda: None, depend_on_function=False)\"\n )\n else:\n raise ValueError(j)\n node_to_counters[node] = counter[0]\n counter[0] += 1\n for parent in self.runner.dag.predecessors(node):\n descend(parent)\n\n def build_edges(node):\n for parent in self.runner.dag.predecessors(node):\n edges.append(\n f\"edges.append(('{node_to_counters[node]}', '{node_to_counters[parent]}'))\"\n )\n build_edges(parent)\n\n descend(self.job_id)\n edges.append(\"edges = []\")\n build_edges(self.job_id)\n edges.extend(\n [\n \"for (a,b) in edges:\",\n \" if a in ppg.global_pipegraph.jobs and b in ppg.global_pipegraph.jobs:\",\n \" ppg.global_pipegraph.jobs[a].depends_on(ppg.global_pipegraph.jobs[b])\",\n ]\n )\n with open(\"subgraph_debug.py\", \"w\") as op:\n lines = \"\"\"\nclass DummyObject:\n pass\n\ndef dummy_smfg(files, prefix):\n Path(prefix).mkdir(exist_ok=True, parents=True)\n for f in files:\n f.write_text(\"hello\")\n\n\ndef dummy_mfg(files):\n for f in files:\n f.parent.mkdir(exist_ok=True, parents=True)\n f.write_text(\"hello\")\n\ndef dummy_fg(of):\n of.parent.mkdir(exist_ok=True, parents=True)\n of.write_text(\"fg\")\n\n\"\"\".split(\n \"\\n\"\n )\n lines += nodes\n lines += edges\n lines += [\"\", \"ppg.run()\", \"ppg.run\"]\n\n op.write(\"\\n\".join(\" \" + l for l in lines))", "def test_build_poset_lattice():\n lattice = build_poset_lattice(all_games_gen(2))\n assert len(lattice.edges()) == 36", "def run_tests(g: Graph) -> None:\n print( g.nodes() , \"->\" , ', '.join([f\"{l}\" for l in g.scc()]) , f\"({g.cyclic()})\" )\n for n in g.nodes():\n for m in [m for m in g.nodes() if m != n]:\n p = g.path(n,m)\n if p is not None:\n assert p[0] == n\n assert p[-1] == m\n for i in range(1,len(p)):\n assert g.is_edge(p[i-1], p[i])\n print(\" \", n, \"->\", m, \":\", ' -> '.join([f\"{v}\" for v in p]))", "def generate_pristine_graphene(x_dim, y_dim, filename1):\n y_number = round(y_dim / 1.228)\n x_number = int(x_dim / 2.127)\n x_addition = (x_dim / 2.127 ) % 1\n list_of_coords = []\n a = 0\n b = 0\n c = 0\n list_of_coords = fill_row(list_of_coords, y_number, a,b,c, [], 5, prev = False)\n for i in range(1,x_number):\n if (i == x_number-1):\n if (i % 2 == 1):\n a += 1.228\n b += 2.127\n list_of_coords = fill_row(list_of_coords, y_number, a, b, c, [], 6, prev = True)\n fill_hexagon(list_of_coords, -1.228, b, c, [0, 1, 3, 4, 5], full=6, prev=False)\n if (i % 2 == 0):\n a -= 1.228\n b += 2.127\n list_of_coords = fill_row(list_of_coords, y_number, a, b, c, [], 6, prev = False)\n fill_hexagon(list_of_coords, y_number*1.228, b, c, [0, 1, 3, 4, 5], full=6, prev=False)\n elif (i % 2 == 1):\n a += 1.228\n b += 2.127\n list_of_coords = fill_row(list_of_coords, y_number, a, b, c, [], 6, prev = True)\n elif (i % 2 == 0):\n a -= 1.228\n b += 2.127\n list_of_coords = fill_row(list_of_coords, y_number, a, b, c, [], 6, prev = False)\n list_x_steps = [0, 0.33, 0.66, 1]\n x_step = min(list_x_steps, key=lambda x:abs(x-x_addition))\n if (x_step == 0.33):\n list_of_coords = fill_row(list_of_coords, y_number, 0, 0, 0, [], 6, prev = False)\n fill_hexagon(list_of_coords, y_number*1.228, 0, 0, [0, 1, 2, 3, 4], full=6, prev=False)\n elif (x_step == 0.66):\n if (x_number % 2 == 1):\n a += 1.228\n b += 2.127\n list_of_coords = fill_row(list_of_coords, y_number, a, b, c, [2], 6, prev = True)\n elif (x_number % 2 == 0):\n a -= 1.228\n b += 2.127\n list_of_coords = fill_row(list_of_coords, y_number, a, b, c, [2], 6, prev = False)\n elif (x_step == 1):\n if (x_number % 2 == 1):\n a += 1.228\n b += 2.127\n list_of_coords = fill_row(list_of_coords, y_number, a, b, c, [], 6, prev = True)\n elif (x_number % 2 == 0):\n a -= 1.228\n b += 2.127\n list_of_coords = fill_row(list_of_coords, y_number, a, b, c, [], 6, prev = False)\n writepdb3(list_of_coords, filename1)\n print('done.')\n return list_of_coords", "def generateGraph(mids, chaptersField, labelsField):\n output = \"digraph G { \\n\"\n # On ne traite que les chapitres qui ont actives le graphe\n chapts = chapters.graphChapters()\n # le dico nodes contient une liste pour chaque chapitre. Chaque liste\n # contient tous les neuds (un par note) presents dans ce chapitre, et\n # representes par des tuples (noteId, label)\n nodes = {}\n for mid in mids:\n chapterField = chaptersField[mid]\n labelField = labelsField[mid]\n for id, flds in mw.col.db.execute(\"\"\"\n SELECT id, flds FROM notes WHERE mid=%d\n \"\"\" % mid):\n fields = splitFields(flds)\n chapter = fields[chapterField]\n if not chapter in chapts:\n continue\n label = fields[labelField]\n if(not chapter in nodes):\n nodes[chapter] = []\n nodes[chapter].append((id, label))\n # On genere les noeuds, dans des clusters (un par chapitre)\n notes = []\n for chap in nodes:\n output += \"\"\"subgraph cluster_%d {\n node [style=filled];\n label = \"%s\";\n color=blue;\n \"\"\" % (chapts[chap], chap)\n for n in nodes[chap]:\n output += \"\"\"n%d [label=\"%s\", URL=\"%d\"];\\n\"\"\" % (n[0], n[1], n[0])\n notes.append(n)\n output += \"\"\"\n }\\n\"\"\"\n # Puis on ajoute tous les liens ..\n for n in notes:\n for nid in mw.col.db.execute(\"\"\"SELECT N.noteId FROM `PATH.links` AS L\n JOIN `PATH.match` AS M ON M.id = L.matchId\n JOIN `PATH.nodes` AS N ON M.nodeId = N.id\n WHERE L.noteId = %d\"\"\" % (n[0])):\n output += \"\"\"n%d -> n%d;\\n\"\"\" % (nid[0], n[0])\n output += \"}\"\n generateGraphImage(output)", "def main(\n num_sampled=[3, 3],\n max_depth=2,\n num_iters=1000,\n do_graph=False,\n # These are for checking stats on smaller data\n subsample=False,\n plot=False,\n # Generates a random matrix for comparison\n random=False,\n # Visualise the connection matrix\n vis_connect=False,\n subsample_vis=False,\n # Generate final graphs\n final=False,\n # Analyse\n analyse=False,\n only_exp=False,\n # Which regions are considered here\n # A_name, B_name = \"MOp\", \"SSP-ll\"\n A_name=\"VISp\",\n B_name=\"VISl\",\n desired_depth=1,\n desired_samples=79,\n):\n np.random.seed(42)\n\n if random:\n AB, BA, AA, BB = gen_random_matrix(150, 50, 0, 0.04, 0, 0.0)\n matrix_vis(AB, BA, AA, BB, 10, name=\"test_vis.png\")\n\n os.makedirs(os.path.dirname(pickle_loc), exist_ok=True)\n convert_mouse_data(A_name, B_name)\n to_use = [True, True, True, True]\n mc, args_dict = load_matrix_data(to_use, A_name, B_name)\n print(\"{} - {}, {} - {}\".format(A_name, B_name, mc.num_a, mc.num_b))\n\n result = {}\n result[\"matrix_stats\"] = print_args_dict(args_dict, out=False)\n\n if only_exp:\n mpf_res = mpf_connectome(mc, num_sampled, max_depth, args_dict)\n mpf_val = [\n mpf_res[\"expected\"],\n mpf_res[\"expected\"] / num_sampled[1],\n \"{}_{}\".format(A_name, B_name),\n \"Statistical estimation\",\n ]\n if do_graph:\n print(\"Converting matrix\")\n gc.collect()\n mc.create_connections()\n print(\"Finished conversion\")\n graph = mc.graph\n to_write = [mc.num_a, mc.num_b]\n del mc\n gc.collect()\n reverse_graph = reverse(graph)\n graph_res = graph_connectome(\n num_sampled,\n max_depth,\n graph=graph,\n reverse_graph=reverse_graph,\n to_write=to_write,\n num_iters=num_iters,\n )\n to_add = np.mean(graph_res[\"full_results\"][\"Connections\"].values)\n graph_val = [\n to_add,\n to_add / num_sampled[1],\n \"{}_{}\".format(A_name, B_name),\n \"Statistical estimation\",\n ]\n return mpf_val, graph_val\n return mpf_val, None\n\n # Convert to a pickle\n # if not os.path.isfile(pickle_loc):\n # print(\"Converting matrix\")\n # gc.collect()\n # mc.create_connections()\n # print(\"Finished conversion\")\n # graph = mc.graph\n # to_write = [mc.num_a, mc.num_b]\n # del mc\n # gc.collect()\n\n # handle_pickle(graph, \"graph.pickle\", \"w\")\n # handle_pickle(reverse(graph), \"r_graph.pickle\", \"w\")\n # handle_pickle(to_write, \"graph_size.pickle\", \"w\")\n\n if vis_connect:\n if subsample_vis:\n print(\"Plotting subsampled matrix vis\")\n new_mc = mc.subsample(int(mc.num_a / 10), int(mc.num_b / 10))\n matrix_vis(\n new_mc.ab,\n new_mc.ba,\n new_mc.aa,\n new_mc.bb,\n 15,\n name=\"mc_mat_vis_sub10.pdf\",\n )\n else:\n o_name = \"mc_mat_vis_{}_to_{}.pdf\".format(A_name, B_name)\n print(\"Plotting full matrix vis\")\n matrix_vis(mc.ab, mc.ba, mc.aa, mc.bb, 150, name=o_name)\n print(\"done vis\")\n\n print(mc, print_args_dict(args_dict, out=False))\n\n result = None\n if subsample:\n result = check_stats(mc, 1000, 1, 20000, 1, plot)\n if final:\n result = {}\n\n # For different depths and number of samples\n for depth in range(1, 4):\n for ns in range(1, num_sampled[0] + 1):\n ns_2 = [ns] * 2\n mpf_res = mpf_connectome(mc, ns_2, depth, args_dict)\n result[\"mpf_{}_{}\".format(depth, ns)] = mpf_res\n\n # Save this for plotting\n cols = [\"Number of samples\", \"Proportion of connections\", \"Max distance\"]\n depth_name = [None, \"Direct synapse\", \"Two synapses\", \"Three synapses\"]\n vals = []\n for depth in range(1, 4):\n for ns in range(1, num_sampled[0] + 1):\n this = result[\"mpf_{}_{}\".format(depth, ns)]\n val = [ns, this[\"expected\"] / ns, depth_name[depth]]\n vals.append(val)\n df = pd.DataFrame(vals, columns=cols)\n os.makedirs(os.path.join(here, \"..\", \"results\"), exist_ok=True)\n df.to_csv(\n os.path.join(\n here, \"..\", \"results\", \"{}_to_{}_depth.csv\".format(A_name, B_name)\n ),\n index=False,\n )\n\n cols = [\"Number of sampled connected neurons\", \"Probability\"]\n total_pmf = result[\"mpf_{}_{}\".format(desired_depth, desired_samples)][\"total\"]\n vals = []\n for k, v in total_pmf.items():\n vals.append([k, float(v)])\n df = pd.DataFrame(vals, columns=cols)\n df.to_csv(\n os.path.join(\n here,\n \"..\",\n \"results\",\n \"{}_to_{}_pmf_{}_{}.csv\".format(\n A_name, B_name, desired_depth, desired_samples\n ),\n ),\n index=False,\n )\n if analyse:\n result = {}\n result[\"matrix_stats\"] = args_dict\n\n mpf_res = mpf_connectome(\n mc,\n num_sampled,\n max_depth,\n args_dict,\n clt_start=30,\n sr=None,\n mean_estimate=True,\n )\n result[\"mean\"] = mpf_res\n\n vals = []\n cols = [\"Number of connected neurons\", \"Probability\", \"Calculation\"]\n for k, v in mpf_res[\"total\"].items():\n vals.append([k, float(v), \"Mean estimation\"])\n\n mpf_res = mpf_connectome(mc, num_sampled, max_depth, args_dict, clt_start=30)\n result[\"mpf\"] = mpf_res\n\n for k, v in mpf_res[\"total\"].items():\n vals.append([k, float(v), \"Statistical estimation\"])\n\n if do_graph:\n print(\"Converting matrix\")\n gc.collect()\n mc.create_connections()\n print(\"Finished conversion\")\n graph = mc.graph\n to_write = [mc.num_a, mc.num_b]\n del mc\n gc.collect()\n reverse_graph = reverse(graph)\n\n graph_res = graph_connectome(\n num_sampled,\n max_depth,\n graph=graph,\n reverse_graph=reverse_graph,\n to_write=to_write,\n num_iters=num_iters,\n )\n\n result[\"difference\"] = (\n dist_difference(mpf_res[\"total\"], graph_res[\"dist\"]),\n )\n result[\"graph\"] = graph_res\n\n for k, v in graph_res[\"dist\"].items():\n vals.append([k, float(v), \"Monte Carlo simulation\"])\n\n df = pd.DataFrame(vals, columns=cols)\n df.to_csv(\n os.path.join(\n here,\n \"..\",\n \"results\",\n \"{}_to_{}_pmf_final_{}_{}.csv\".format(\n A_name, B_name, max_depth, num_sampled[0]\n ),\n ),\n index=False,\n )\n\n if result is not None:\n with open(os.path.join(here, \"..\", \"results\", \"mouse.txt\"), \"w\") as f:\n pprint(result, width=120, stream=f)\n\n return result", "def test_extract_graph(default_plugin_resolver):\n dpr = default_plugin_resolver\n nx_graph = nx.Graph()\n nx_graph.add_weighted_edges_from(\n [(1, 0, 2), (1, 4, 3), (2, 5, 5), (2, 7, 6), (3, 1, 7), (5, 6, 10), (6, 2, 11),]\n )\n desired_nodes = {2, 5, 6}\n nx_extracted_graph = nx.Graph()\n nx_extracted_graph.add_weighted_edges_from([(2, 5, 5), (5, 6, 10), (6, 2, 11)])\n graph = dpr.wrappers.Graph.NetworkXGraph(nx_graph)\n desired_nodes_wrapped = dpr.wrappers.NodeSet.PythonNodeSet(desired_nodes)\n extracted_graph = dpr.wrappers.Graph.NetworkXGraph(nx_extracted_graph)\n MultiVerify(\n dpr, \"subgraph.extract_subgraph\", graph, desired_nodes_wrapped\n ).assert_equals(extracted_graph)", "def bclone():\n node = nuke.selectedNodes()\n if len(node)==1:\n clone1 = nuke.createNode(\"NoOp\", inpanel = False)\n clone1.setName(\"Bclone\")\n clone1['label'].setValue(node[0].name()+\"\\nClone_Parent\")\n clone1['tile_color'].setValue(2521651711)\n clone1['note_font_color'].setValue(1583243007)\n clone1xpos = clone1['xpos'].getValue()\n clone1ypos = clone1['ypos'].getValue()\n \n clone2 = nuke.createNode(\"NoOp\", inpanel = False)\n clone2.setName(\"Bclone\")\n clone2['label'].setValue(node[0].name()+\"\\nClone\")\n clone2['hide_input'].setValue(True)\n clone2['tile_color'].setValue(2521651711)\n clone2['note_font_color'].setValue(1583243007)\n clone2['xpos'].setValue(clone1xpos)\n clone2['ypos'].setValue(clone1ypos)\n\n if len(node)==0:\n clone1 = nuke.createNode(\"NoOp\", inpanel = False)\n clone1.setName(\"Bclone\")\n clone1['label'].setValue(\"Clone_Parent\")\n clone1['tile_color'].setValue(2521651711)\n clone1['note_font_color'].setValue(1583243007)\n clone1xpos = clone1['xpos'].getValue()\n clone1ypos = clone1['ypos'].getValue()\n \n clone2 = nuke.createNode(\"NoOp\", inpanel = False)\n clone2.setName(\"Bclone\")\n clone2['label'].setValue(\"Clone\")\n clone2['hide_input'].setValue(True)\n clone2['tile_color'].setValue(2521651711)\n clone2['note_font_color'].setValue(1583243007)\n clone2['xpos'].setValue(clone1xpos)\n clone2['ypos'].setValue(clone1ypos)\n if len(node)!=0 and len(node)!=1:\n nuke.message('Just select one node to clone !')", "def test_generator8(self):\n xpb = XPathBuilder()\n xp1 = xp2 = None\n base_xp = (xpb.foo.bar | xpb.x.y).parenthesize()\n base_gen = None\n with base_xp as b:\n base_gen = b\n xp1 = b() & xpb.c\n xp2 = b() & xpb.d\n xp1_exp = '(/foo/bar or /x/y) and /c'\n xp2_exp = '(/foo/bar or /x/y) and /d'\n base_exp = '(/foo/bar or /x/y)'\n # check tree structure\n self.assertTrue(base_xp._parent is None)\n self.assertTrue(base_gen._parent is None)\n # check xpath\n self.assertEqual(xp1.tostring(), xp1_exp)\n self.assertEqual(xp2.tostring(), xp2_exp)\n self.assertEqual(base_xp.tostring(), base_exp)\n self.assertEqual(base_gen.tostring(), base_exp)", "def __init__(self, firstParent, secondParent):\n CrossOver.__init__(self, \"Group Point CrossOver\", firstParent, secondParent)", "def test_graph1():\n mol_graph = DGLGraph([(0, 1), (0, 2), (1, 2)])\n node_feats = torch.arange(mol_graph.number_of_nodes()).float().reshape(-1, 1)\n edge_feats = torch.arange(2 * mol_graph.number_of_edges()).float().reshape(-1, 2)\n\n complete_graph = get_complete_graph(mol_graph.number_of_nodes())\n atom_pair_feats = torch.arange(complete_graph.number_of_edges()).float().reshape(-1, 1)\n\n return mol_graph, node_feats, edge_feats, complete_graph, atom_pair_feats", "def generate(self, diagram):", "def plot_graphic_file_flow_subgrid(n_sg=3, savefig=\"png\"):\n\n # initialize graph\n # (note: can't do dictionary for nodes like plot_graphic_model.py, because\n # we need to exert more control over which things are lined up)\n graph = Digraph(node_attr={\"shape\": \"box\"})\n\n # first layer\n with graph.subgraph() as sg:\n sg.attr(rank=\"same\")\n sg.node(\"spec\", \"SpecGrid\")\n sg.node(\"phot\", \"phot\")\n sg.node(\"fake\", \"phot_fake\")\n\n # additional layers\n sg1 = Digraph(node_attr={\"shape\": \"box\"})\n sg2 = Digraph(node_attr={\"shape\": \"box\"})\n sg3 = Digraph(node_attr={\"shape\": \"box\"})\n sg4 = Digraph(node_attr={\"shape\": \"box\"})\n sg5 = Digraph(node_attr={\"shape\": \"box\"})\n sg6 = Digraph(node_attr={\"shape\": \"box\"})\n _ = [x.attr(rank=\"same\") for x in [sg1, sg2, sg3, sg4, sg5, sg6]]\n\n # place phot/fake\n # sg1.node('phot','phot')\n # sg1.node('fake','phot_fake')\n\n # items in last layer\n sg6.node(\"stats\", \"stats_all\")\n sg6.node(\"pdf1d\", \"pdf1d_all\")\n sg6.node(\"pdf2d\", \"pdf2d_all\")\n sg6.node(\"lnp\", \"lnp_all\")\n\n # initialize dict of edges\n edges = defaultdict(list)\n # initialize dict of invisible edges\n # these are used to force the order:\n # https://stackoverflow.com/questions/44274518/how-can-i-control-within-level-node-order-in-graphvizs-dot/44274606\n edges_invis = defaultdict(list)\n\n # iterate through subgrids\n for s in range(n_sg):\n\n curr_sg = f\"SG{s}\"\n\n # -- nodes --\n sg1.node(f\"spec{s}\", f\"SpecGrid_{curr_sg}\")\n sg2.node(f\"sed{s}\", f\"SEDgrid_{curr_sg}\")\n sg2.node(f\"obs{s}\", f\"obsmodel_{curr_sg}\")\n sg3.node(f\"sed{s}t\", f\"SEDgrid_{curr_sg}_trim\")\n sg3.node(f\"obs{s}t\", f\"obsmodel_{curr_sg}_trim\")\n sg4.node(f\"lnps_{s}\", f\"likelihoods\\nfor {curr_sg}\")\n sg5.node(f\"stat{s}\", f\"stats_{curr_sg}\")\n sg5.node(f\"pdf1d{s}\", f\"pdf1d_{curr_sg}\")\n sg5.node(f\"pdf2d{s}\", f\"pdf2d_{curr_sg}\")\n sg5.node(f\"lnp{s}\", f\"lnp_{curr_sg}\")\n\n # -- edges --\n # spec to spec subgrid\n edges[\"spec\"].append(f\"spec{s}\")\n # spec subgrid to SED subgrid\n edges[f\"spec{s}\"].append(f\"sed{s}\")\n # phot_fake to obsmodel\n edges[\"fake\"].append(f\"obs{s}\")\n # SED to trimmed SED\n edges[f\"sed{s}\"].append(f\"sed{s}t\")\n # obsmodel to trimmed obsmodel\n edges[f\"obs{s}\"].append(f\"obs{s}t\")\n # photometry + trimmed SED + trimmed obsmodel to likelihoods\n edges[\"phot\"].append(f\"lnps_{s}\")\n edges[f\"sed{s}t\"].append(f\"lnps_{s}\")\n edges[f\"obs{s}t\"].append(f\"lnps_{s}\")\n # likelihoods to output files\n edges[f\"lnps_{s}\"] += [f\"stat{s}\", f\"pdf1d{s}\", f\"pdf2d{s}\", f\"lnp{s}\"]\n # output files to combined files\n edges[f\"stat{s}\"].append(\"stats_all\")\n edges[f\"pdf1d{s}\"].append(\"pdf1d_all\")\n edges[f\"pdf2d{s}\"].append(\"pdf2d_all\")\n edges[f\"lnp{s}\"].append(\"lnp_all\")\n\n # -- invisible edges --\n # group subgrids together\n edges_invis[f\"sed{s}\"].append(f\"obs{s}\")\n edges_invis[f\"sed{s}t\"].append(f\"obs{s}t\")\n if s < n_sg - 1:\n edges_invis[f\"obs{s}\"].append(f\"sed{s+1}\")\n edges_invis[f\"obs{s}t\"].append(f\"sed{s+1}t\")\n edges_invis[f\"lnps_{s}\"].append(f\"lnps_{s+1}\")\n # output files\n edges_invis[f\"stat{s}\"].append(f\"pdf1d{s}\")\n edges_invis[f\"pdf1d{s}\"].append(f\"pdf2d{s}\")\n edges_invis[f\"pdf2d{s}\"].append(f\"lnp{s}\")\n\n # add the edges\n for ckey in edges.keys():\n for cval in edges[ckey]:\n if \"phot\" in ckey:\n graph.edge(ckey, cval, color=\"#FDB62D\")\n elif \"fake\" in ckey or \"obs\" in ckey:\n graph.edge(ckey, cval, color=\"#D7576B\")\n elif \"spec\" in ckey or \"sed\" in ckey:\n graph.edge(ckey, cval, color=\"#4A02A0\")\n else:\n graph.edge(ckey, cval)\n # add invisible edges (to force ordering)\n for ckey in edges_invis.keys():\n for cval in edges_invis[ckey]:\n graph.edge(ckey, cval, style=\"invis\")\n\n # append subgraphs\n graph.subgraph(sg1)\n graph.subgraph(sg2)\n graph.subgraph(sg3)\n graph.subgraph(sg4)\n graph.subgraph(sg5)\n\n # make the graph flow horizontally\n graph.graph_attr[\"rankdir\"] = \"LR\"\n\n # save it\n graph.render(\"beast-graphic-file-flow-subgrid\", format=savefig)", "def test_setup(self):\n self.setup()\n print(\"Nodes in graph\")\n for node in self.graph.graph.nodes:\n print(node)\n print(\"Edges in graph\")\n for edge in self.graph.graph.edges(data=True):\n print(edge)", "def test_hierarchical_register_and_contain(self):\n space = Space()\n\n categories = {\"asdfa\": 0.1, 2: 0.2, 3: 0.3, 4: 0.4}\n dim = Categorical(\"yolo.nested\", categories, shape=2)\n space.register(dim)\n dim = Integer(\"yolo2.nested\", \"uniform\", -3, 6)\n space.register(dim)\n dim = Real(\"yolo3\", \"norm\", 0.9)\n space.register(dim)\n\n trial = Trial(\n params=[\n {\"name\": \"yolo.nested\", \"value\": [\"asdfa\", 2], \"type\": \"categorical\"},\n {\"name\": \"yolo2.nested\", \"value\": 1, \"type\": \"integer\"},\n {\"name\": \"yolo3\", \"value\": 0.5, \"type\": \"real\"},\n ]\n )\n\n assert \"yolo\" in trial.params\n assert \"nested\" in trial.params[\"yolo\"]\n assert \"yolo2\" in trial.params\n assert \"nested\" in trial.params[\"yolo2\"]\n assert \"yolo3\" in trial.params\n\n assert trial in space", "def test_tree_graph_creation(self):\n # There is little to test here other than simple creation\n # Whether it comes out OK or not ... ¯\\_(ツ)_/¯\n model = FairModel(name='Test')\n model.input_data('Loss Magnitude', mean=50, stdev=5)\n model.input_data('Loss Event Frequency', low=10, mode=20, high=30)\n metamodel = FairMetaModel(name='Test Meta', models=[model, model])\n with warnings.catch_warnings(record=False):\n warnings.simplefilter(\"ignore\")\n fvp = FairViolinPlot(metamodel)\n _, _ = fvp.generate_image()", "def _test_multiple(self, fin_graphzip, fin_insts, T, n=None):\n for t in range(T+1)[1:]:\n self._test_graphzip_subgen(fin_graphzip, fin_insts, n)", "def test_tree_splay() -> None:\n t = generate_graph_resources(5)\n field(t, (\"dr_1\", \"ds_1\", \"f1\")).references.append(\n (FieldAddress(\"dr_2\", \"ds_2\", \"f1\"), \"to\")\n )\n field(t, (\"dr_1\", \"ds_1\", \"f1\")).references.append(\n (FieldAddress(\"dr_3\", \"ds_3\", \"f1\"), \"to\")\n )\n field(t, (\"dr_1\", \"ds_1\", \"f1\")).references.append(\n (FieldAddress(\"dr_4\", \"ds_4\", \"f1\"), \"to\")\n )\n field(t, (\"dr_1\", \"ds_1\", \"f1\")).references.append(\n (FieldAddress(\"dr_5\", \"ds_5\", \"f1\"), \"to\")\n )\n field(t, (\"dr_1\", \"ds_1\", \"f1\")).identity = \"email\"\n traversal = Traversal(DatasetGraph(*t), {\"email\": \"X\"})\n\n assert incoming_edges(traversal, CollectionAddress(\"dr_1\", \"ds_1\")) == {\n Edge(\n FieldAddress(\"__ROOT__\", \"__ROOT__\", \"email\"),\n FieldAddress(\"dr_1\", \"ds_1\", \"f1\"),\n )\n }\n assert outgoing_edges(traversal, CollectionAddress(\"dr_1\", \"ds_1\")) == {\n Edge(FieldAddress(\"dr_1\", \"ds_1\", \"f1\"), FieldAddress(\"dr_3\", \"ds_3\", \"f1\")),\n Edge(FieldAddress(\"dr_1\", \"ds_1\", \"f1\"), FieldAddress(\"dr_4\", \"ds_4\", \"f1\")),\n Edge(FieldAddress(\"dr_1\", \"ds_1\", \"f1\"), FieldAddress(\"dr_5\", \"ds_5\", \"f1\")),\n Edge(FieldAddress(\"dr_1\", \"ds_1\", \"f1\"), FieldAddress(\"dr_2\", \"ds_2\", \"f1\")),\n }\n\n assert outgoing_edges(traversal, CollectionAddress(\"dr_5\", \"ds_5\")) == set()\n assert incoming_edges(traversal, CollectionAddress(\"dr_2\", \"ds_2\")) == {\n Edge(FieldAddress(\"dr_1\", \"ds_1\", \"f1\"), FieldAddress(\"dr_2\", \"ds_2\", \"f1\"))\n }\n assert incoming_edges(traversal, CollectionAddress(\"dr_3\", \"ds_3\")) == {\n Edge(FieldAddress(\"dr_1\", \"ds_1\", \"f1\"), FieldAddress(\"dr_3\", \"ds_3\", \"f1\"))\n }\n assert incoming_edges(traversal, CollectionAddress(\"dr_4\", \"ds_4\")) == {\n Edge(FieldAddress(\"dr_1\", \"ds_1\", \"f1\"), FieldAddress(\"dr_4\", \"ds_4\", \"f1\"))\n }\n assert incoming_edges(traversal, CollectionAddress(\"dr_5\", \"ds_5\")) == {\n Edge(FieldAddress(\"dr_1\", \"ds_1\", \"f1\"), FieldAddress(\"dr_5\", \"ds_5\", \"f1\"))\n }\n traversal_map, terminators = traversal.traversal_map()\n assert traversal_map == {\n \"__ROOT__:__ROOT__\": {\"from\": {}, \"to\": {\"dr_1:ds_1\": {\"email -> f1\"}}},\n \"dr_1:ds_1\": {\n \"from\": {\"__ROOT__:__ROOT__\": {\"email -> f1\"}},\n \"to\": {\n \"dr_2:ds_2\": {\"f1 -> f1\"},\n \"dr_3:ds_3\": {\"f1 -> f1\"},\n \"dr_4:ds_4\": {\"f1 -> f1\"},\n \"dr_5:ds_5\": {\"f1 -> f1\"},\n },\n },\n \"dr_2:ds_2\": {\"from\": {\"dr_1:ds_1\": {\"f1 -> f1\"}}, \"to\": {}},\n \"dr_3:ds_3\": {\"from\": {\"dr_1:ds_1\": {\"f1 -> f1\"}}, \"to\": {}},\n \"dr_4:ds_4\": {\"from\": {\"dr_1:ds_1\": {\"f1 -> f1\"}}, \"to\": {}},\n \"dr_5:ds_5\": {\"from\": {\"dr_1:ds_1\": {\"f1 -> f1\"}}, \"to\": {}},\n }\n\n assert set(terminators) == {\n CollectionAddress(\"dr_2\", \"ds_2\"),\n CollectionAddress(\"dr_3\", \"ds_3\"),\n CollectionAddress(\"dr_4\", \"ds_4\"),\n CollectionAddress(\"dr_5\", \"ds_5\"),\n }", "def sample_from_subpop(instance, params, subpop):\n y = subpop\n x = np.random.choice([-1,+1], size=params['d'])\n x[instance['indices'][subpop]] = instance['values'][subpop]\n return x, y, subpop", "def test_plot_graphs(self):\n\n # Graphs who are not embedded, i.e., have no coordinates.\n COORDS_NO = {\n 'Graph',\n 'BarabasiAlbert',\n 'ErdosRenyi',\n 'FullConnected',\n 'RandomRegular',\n 'StochasticBlockModel',\n }\n\n # Coordinates are not in 2D or 3D.\n COORDS_WRONG_DIM = {'ImgPatches'}\n\n Gs = []\n for classname in set(graphs.__all__) - COORDS_NO - COORDS_WRONG_DIM:\n Graph = getattr(graphs, classname)\n\n # Classes who require parameters.\n if classname == 'NNGraph':\n Xin = np.arange(90).reshape(30, 3)\n Gs.append(Graph(Xin))\n elif classname in ['ImgPatches', 'Grid2dImgPatches']:\n Gs.append(Graph(img=self._img, patch_shape=(3, 3)))\n elif classname == 'LineGraph':\n Gs.append(Graph(graphs.Sensor(20, seed=42)))\n else:\n Gs.append(Graph())\n\n # Add more test cases.\n if classname == 'TwoMoons':\n Gs.append(Graph(moontype='standard'))\n Gs.append(Graph(moontype='synthesized'))\n elif classname == 'Cube':\n Gs.append(Graph(nb_dim=2))\n Gs.append(Graph(nb_dim=3))\n elif classname == 'DavidSensorNet':\n Gs.append(Graph(N=64))\n Gs.append(Graph(N=500))\n Gs.append(Graph(N=128))\n\n for G in Gs:\n self.assertTrue(hasattr(G, 'coords'))\n self.assertEqual(G.N, G.coords.shape[0])\n\n signal = np.arange(G.N) + 0.3\n\n G.plot(backend='pyqtgraph')\n G.plot(backend='matplotlib')\n G.plot(signal, backend='pyqtgraph')\n G.plot(signal, backend='matplotlib')\n plotting.close_all()", "def run_generations(init_len):\n num_graphs = 0\n current_gen = [nx.path_graph(init_len)]\n complete_graph_list = current_gen.copy()\n while len(current_gen) and current_gen[0].size() < (3*init_len - 7):\n current_gen = generation_next(current_gen)\n num_graphs += show_graph_list(current_gen)\n complete_graph_list.extend(filter_bridge_case(current_gen))\n print(num_graphs)\n return complete_graph_list", "def test_Tree():", "def test07_add_type_triples(self):\n r = LDPRS('http://ex.org/abc')\n g = Graph()\n r.add_type_triples(g)\n self.assertEqual(len(g), 2)", "def main():\n GRAPH = lambda_graph()\n GRAPH.save_graph(\"pylon\")\n meshName = \"pylon.mesh\"\n cmd = \"./population/linuxShow \"+meshName\n process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE)\n process.communicate()\n print \"nodes:\", GRAPH.number_of_nodes()\n print \"edges\", GRAPH.number_of_edges()", "def test_generator6(self):\n xpb = XPathBuilder()\n xp1 = xp2 = None\n base_xp = xpb.base.foo.bar\n base_gen = None\n with base_xp as b:\n base_gen = b\n xp1 = xpb.a.b.c.join(b())\n xp2 = xpb.test.join(b())\n xp1_exp = '/a/b/c/base/foo/bar'\n xp2_exp = '/test/base/foo/bar'\n base_exp = '/base/foo/bar'\n # check tree structure\n self.assertTrue(base_xp._parent is None)\n self.assertTrue(len(base_xp._children[0]._children[0]._children) == 0)\n self.assertTrue(base_gen._parent is None)\n self.assertTrue(len(base_gen._children) == 0)\n # check xpath\n self.assertEqual(xp1.tostring(), xp1_exp)\n self.assertEqual(xp2.tostring(), xp2_exp)\n self.assertEqual(base_xp.tostring(), base_exp)\n self.assertEqual(base_gen.tostring(), base_exp)", "def test_createSubLinkographWithoutCommands(self):\n self.performTestForParams()", "def crossover(self, pop):\n intDiscID = self.iID + self.dID\n varID = self.cID\n goldenRatio = (1.0 + sqrt(5)) / 2.0\n dx = np.zeros_like(pop[0])\n children = []\n used = []\n for i in range(0, int(self.fracElite * len(pop)), 1):\n r = int(rand() * self.population)\n while r in used or r == i:\n r = int(rand() * self.population)\n\n used.append(i)\n children.append(cp.deepcopy(pop[r]))\n dx = abs(pop[i] - children[i]) / goldenRatio\n children[i] = children[i] + dx * varID + np.round(dx * intDiscID)\n children[i] = simple_bounds(children[i], self.lb, self.ub)\n\n return (\n children, used)", "def get_subgraphs(graph):\n nodes_powerset = get_nodes_combinations(graph)\n #print(\"Doing\")\n #draw_graph(graph)\n subgraphs = []\n for nodes in nodes_powerset:\n subg = graph.subgraph(nodes)\n nodes = subg.nodes(data=True)\n if nx.is_weakly_connected(subg):\n subgraphs.append(subg)\n return subgraphs", "def create_data_sample(self) -> DataNode:\n root = DataNode(\"Grandpa\")\n\n node1 = DataNode(\"Node1\", parent=root)\n node2 = DataNode(\"Node2\", parent=root)\n node3 = DataNode(\"Node3\", parent=root)\n\n child1_1 = DataNode(\"Child1_1\", parent=node1)\n child1_2 = DataNode(\"Child1_2\", parent=node1)\n\n child2_1 = DataNode(\"Child2_1\", parent=node2)\n child2_2 = DataNode(\"Child2_2\", parent=node2)\n child2_3 = DataNode(\"Child2_3\", parent=node2)\n\n child3_1 = DataNode(\"Child3_1\", parent=node3)\n\n grandchild1_1_1 = DataNode(\"Grandchild1_1_1\", parent=child1_1)\n grandchild1_1_2 = DataNode(\"Grandchild1_1_2\", parent=child1_1)\n\n grandchild1_2_1 = DataNode(\"Grandchild1_2_1\", parent=child1_2)\n\n grandchild2_1_1 = DataNode(\"Grandchild2_1_1\", parent=child2_1)\n grandchild2_1_2 = DataNode(\"Grandchild2_1_2\", parent=child2_1)\n grandchild2_1_3 = DataNode(\"Grandchild2_1_3\", parent=child2_1)\n\n grandchild2_2_1 = DataNode(\"Grandchild2_2_1\", parent=child2_2)\n grandchild2_2_2 = DataNode(\"Grandchild2_2_2\", parent=child2_2)\n\n grandchild2_3_1 = DataNode(\"Grandchild2_3_1\", parent=child2_3)\n grandchild2_3_2 = DataNode(\"Grandchild2_3_2\", parent=child2_3)\n grandchild2_3_3 = DataNode(\"Grandchild2_3_3\", parent=child2_3)\n grandchild2_3_4 = DataNode(\"Grandchild2_3_4\", parent=child2_3)\n\n grandchild3_1_1 = DataNode(\"Grandchild3_1_1\", parent=child3_1)\n grandchild3_1_2 = DataNode(\"Grandchild3_1_2\", parent=child3_1)\n grandchild3_1_3 = DataNode(\"Grandchild3_1_3\", parent=child3_1)\n grandchild3_1_4 = DataNode(\"Grandchild3_1_4\", parent=child3_1)\n grandchild3_1_5 = DataNode(\"Grandchild3_1_5\", parent=child3_1)\n\n return root", "def test_ExplorePath_Simple( self ):\n links = []\n n1 = graph.Node( 10, 50 )\n n2 = graph.Node( 10, 50 )\n n3 = graph.Node( 10, 50 )\n n7 = graph.Node( 10, 50 )\n\n links.append( graph.Link( n1, n2 ) )\n links.append( graph.Link( n2, n3 ) )\n links.append( graph.Link( n3, n7 ) )\n roots = [n1]\n actual = nodes.explorePath( links, roots, n1 )\n expected = [ n1, n2, n3, n7 ]\n self.assertEqual( expected, actual )", "def _build_graph(self):\n pass", "def testGraphExtract(self):\n graph = Graph2()\n graph.parseFile(TESTFILE)", "def test_multiple_task_groups_dag(\n self, test_multiple_taskgroups_dag, multiple_taskgroups_dag_expected_edges\n ):\n (\n dag,\n group1,\n group2,\n group3,\n (\n group1_emp1,\n group1_emp2,\n group1_emp3,\n group2_emp1,\n group2_emp2,\n group2_emp3,\n group2_op1,\n group2_op2,\n group3_emp1,\n group3_emp2,\n group3_emp3,\n emp_in1,\n emp_in2,\n emp_in3,\n emp_in4,\n emp_out1,\n emp_out2,\n emp_out3,\n emp_out4,\n op_in1,\n op_out1,\n ),\n ) = test_multiple_taskgroups_dag\n\n group1_emp1 >> Label(\"label group1.group1_emp1 <=> group1.group1_emp2\") >> group1_emp3\n\n emp_in1 >> group1\n emp_in2 >> Label(\"label emp_in2 <=> group1\") >> group1\n [emp_in3, emp_in4] >> Label(\"label emp_in3/emp_in4 <=> group1\") >> group1\n XComArg(op_in1, \"test_key\") >> Label(\"label op_in1 <=> group1\") >> group1\n\n (\n [group2_emp1, group2_emp2]\n >> Label(\"label group2.group2_emp1/group2.group2_emp2 <=> group2.group2_emp3\")\n >> group2_emp3\n )\n (\n group2_emp1\n >> Label(\"label group2.group2_emp1 <=> group2.group2_emp2/group2.group2_emp3\")\n >> [group2_emp2, group2_emp3]\n )\n group2_emp3 >> Label(\"label group2.group2_emp3 <=> group3\") >> group3\n\n (\n XComArg(group2_op1, \"test_key\")\n >> Label(\"label group2.group2_op1 <=> group2.group2_op2\")\n >> XComArg(group2_op2, \"test_key\")\n )\n XComArg(group2_op2, \"test_key\") >> Label(\"label group2.group2_op2 <=> group3\") >> group3\n\n group3 >> emp_out1\n group3 >> Label(\"label group3 <=> emp_out2\") >> emp_out2\n group3 >> Label(\"label group3 <=> emp_out3/emp_out4\") >> [emp_out3, emp_out4]\n group3 >> Label(\"label group3 <=> op_out1\") >> XComArg(op_out1, \"test_key\")\n\n group1 >> Label(\"label group1 <=> group2\") >> group2\n\n compare_dag_edges(dag_edges(dag), multiple_taskgroups_dag_expected_edges)", "def test_generator7(self):\n xpb = XPathBuilder()\n xp1 = xp2 = None\n base_xp = xpb.foo.bar & xpb.x.y\n base_gen = None\n with base_xp as b:\n base_gen = b\n xp1 = b() | xpb.c\n xp2 = b() | xpb.d\n xp1_exp = '/foo/bar and /x/y or /c'\n xp2_exp = '/foo/bar and /x/y or /d'\n base_exp = '/foo/bar and /x/y'\n # check tree structure\n self.assertTrue(base_xp._parent is None)\n self.assertTrue(base_gen._parent is None)\n # check xpath\n self.assertEqual(xp1.tostring(), xp1_exp)\n self.assertEqual(xp2.tostring(), xp2_exp)\n self.assertEqual(base_xp.tostring(), base_exp)\n self.assertEqual(base_gen.tostring(), base_exp)", "def explore(self, *args):", "def iter_func(root_name, root, set_traverse, list_funcs, G, strings,\n plot_nodes, cur_pos, xgrain, min_weight, max_weight):\n set_traverse.append(root)\n nbs = G.neighbors(root)\n nbs = G[root]\n\n plot_nodes.append(cur_pos)\n xgrain = xgrain/2.0\n\n flag_pn = -1\n for nb in nbs.keys():\n if nb in set_traverse:\n continue\n\n next_pos = [0, 0, 0]\n if root.name == root_name:\n next_pos[0] = cur_pos[0]\n else:\n next_pos[0] = cur_pos[0] + xgrain*flag_pn*( 0.8+0.2*(nbs[nb][\"weight\"]-1.0/max_weight)/(1.0/min_weight-1.0/max_weight) ) #* (nbs[nb][\"weight\"]-1.0/max_weight)/(1.0/min_weight-1.0/max_weight)\n next_pos[1] = cur_pos[1] + 3.0*(nbs[nb][\"weight\"]-1.0/max_weight)/(1.0/min_weight-1.0/max_weight)\n next_pos[2] = nb.name\n\n flag_pn = flag_pn*(-1)\n\n strings.append([root, nb])\n set_traverse, strings, plot_nodes = iter_func(root_name, nb, set_traverse, list_funcs, G, strings, plot_nodes, next_pos, xgrain, min_weight, max_weight)\n\n return set_traverse, strings, plot_nodes", "def test_treewidth_complete_graphs():\n\n def test_kn(size):\n \"\"\"Test on complete graphs.\"\"\"\n graph = Graph()\n for one in range(size):\n for two in range(one + 1, size):\n graph.add_edge(one, two)\n eq_(size-1, graph.approx_treewidth())\n for size in range(2, 6):\n test_kn(size)", "def __init__(self, prefix, downstream, upstream, root):\n super(SubGraph, self).__init__(prefix, downstream, upstream, root)", "def test_multiple_triples(self):\n self.graph.add((artis, RDF.type, zoo))\n self.graph.add((artis, RDF.type, org))\n self.graph.add((berlin_zoo, RDF.type, zoo))\n self.assertEquals(len(list(self.graph.triples((None, None, None)))), 3)\n\n self.assertEquals(len(list(self.graph.triples((artis, None, None)))), 2)\n self.assertEquals(len(list(self.graph.triples((None, RDF.type, None)))), 3)\n self.assertEquals(len(list(self.graph.triples((None, None, zoo)))), 2)\n self.assertEquals(len(list(self.graph.triples((None, None, org)))), 1)", "def testFlatSeries(self):\n self.AddToChart(self.chart, [5, 5, 5])\n self.assertEqual(self.Param('chd'), 's:AAA')\n self.chart.left.min = 0\n self.chart.left.max = 5\n self.assertEqual(self.Param('chd'), 's:999')\n self.chart.left.min = 5\n self.chart.left.max = 15\n self.assertEqual(self.Param('chd'), 's:AAA')", "def main():\n filenames = sys.argv[1]\n fdir = sys.argv[2]\n filenames = filenames.split(',')\n\n # print (filenames)\n graph = PGraph(fdir, filenames, \"Multi-Source Foraging\")\n # graph = PGraph(fdir, filenames, \"Cooperative Transport\")\n # graph = PGraph(fdir, filenames, \"Nest Maintenance\")\n # graph = PGraph(\n # fdir, filenames, \"Nest Maintenance \\n with \\n Handcoded behaviors\")\n graph.gen_plot()\n\n # box = BoxGraph(fdir, filenames, \"Single-Source Foraging\")\n # box = BoxGraph(fdir, filenames, False, (-1, 100), \"Multi-Source Foraging\")\n box = BoxGraph(fdir, filenames, False, (-1, 120), \"Nest Maintenance with Handcoded behaviors\")\n # box = BoxGraph(\n # fdir, filenames, \"Nest Maintenance \\n with \\n Handcoded behaviors\")\n box.gen_plot()", "def _setup_graph(self):\n pass", "def _setup_graph(self):\n pass", "def testBasic1(self):\n nodes = self.G.nodes()\n assert len(nodes) == len( set(nodes) )", "def test_restore_multiple_in_subgraph(self):\n subgraph = self._subgraph()\n subgraph['id'] = 15\n task1 = self._remote_task()\n task1['id'] = 1\n task2 = self._remote_task()\n task2['id'] = 2\n task1['parameters']['containing_subgraph'] = 15\n task2['parameters']['containing_subgraph'] = 15\n\n graph = self._restore_graph([subgraph, task1, task2])\n assert len(graph.tasks) == 3\n subgraphs = [op for op in graph.tasks if op.is_subgraph]\n remote_tasks = [op for op in graph.tasks if not op.is_subgraph]\n\n # those are all references to the same subgraph, the subgraph was\n # NOT restored multiple times\n assert remote_tasks[0].containing_subgraph \\\n is remote_tasks[1].containing_subgraph \\\n is subgraphs[0]\n\n assert len(subgraphs[0].tasks) == 2", "def test_wp_association_bp(self):\n test_graph = wikipathways_to_bel(WP2359, self.hgnc_manager)\n\n self.assertEqual(type(test_graph), BELGraph, msg='Error with graph type')\n\n self.assertEqual(test_graph.summary_dict()['Number of Nodes'], 2)\n self.assertEqual(test_graph.summary_dict()['Number of Edges'], 1)\n self.assertEqual(count_relations(test_graph)['regulates'], 1)", "def generate_graph(data, x=\"Red blood Cells\", y=\"Paletes\", z=\"Leukocytes\", graph_type='scatter', selected_points=[], *args, **kwargs):\n\n # Return an empty figure if the input is empty\n if x == [] and y==[]:\n return go.Figure()\n \n # Transform the data into wide format\n melted = pd.melt(data, value_vars=data.columns)\n\n if graph_type==\"histogram\":\n if len(x) > 1:\n return generate_histogram(melted, x, y, \"wide\", **kwargs)\n elif len(x) == 0:\n return go.Figure()\n return generate_histogram(data, x, y, \"long\", **kwargs)\n\n elif graph_type==\"scatter\":\n if len(x) == 1 and len(y) == 1:\n return generate_scatter(data, x[0], y[0], \"long\", selected_points=selected_points, **kwargs)\n\n elif len(x) > 1 or len(y) > 1:\n return generate_scatter_matrix(data, x, y, data_format=\"long\", **kwargs)\n\n return generate_scatter(melted, x, y, \"wide\")\n\n elif graph_type==\"box\":\n return generate_box_plot(data, x, \"long\", **kwargs)\n\n elif graph_type==\"heatmap\":\n if len(x) > 0 and len(y) > 0:\n return generate_heatmap(data, x, y, \"long\", **kwargs)\n return go.Figure()\n\n elif graph_type==\"par_coords\":\n return generate_parallel_coords(data, x, y, \"long\", **kwargs)\n\n elif graph_type==\"strip\":\n if len(x) > 0 and len(y) > 0:\n return generate_strip(data, x, y, \"long\", **kwargs)\n return go.Figure()\n\n elif graph_type==\"ternary\":\n if len(x) > 0 and len(y) > 0 and len(z) > 0:\n return generate_ternary(data, x, y, z, \"long\", **kwargs)\n return go.Figure()", "def project_pop(self):\n M = self.N[0:2]\n for x in range(10):\n M.append(self.run_step(M))\n split_N = split_list(M)\n \n fig = self.make_figure(split_N)\n fig.update_layout(title='Projected Fish Population')\n\n return fig", "def generate_test_graph(sameDomain = False):\n num = 100\n\n urls = []\n emails = []\n nodes={}\n if sameDomain:\n domain = generate_domainname()\n else:\n domain = None\n for i in range(num):\n urls.append(generate_url(domain))\n emails.append(generate_email())\n \n used_urls = set()\n used_emails = set()\n for u in urls:\n l = random.choices(urls, k = floor(num/4))\n #l = [u for u in urls]\n e = random.choices(emails, k = floor(num/10))\n #e = [e for e in emails]\n used_urls.update(l)\n used_emails.update(e)\n nodes[u] = testNode(u, l, e)\n nodes[u].generate_page()\n \n return nodes, urls, emails", "def setUp(self):\n self.complete = nx.Graph()\n self.complete.add_edge(1, 2)\n self.complete.add_edge(2, 3)\n self.complete.add_edge(1, 3)\n\n self.small_tree = nx.Graph()\n self.small_tree.add_edge(1, 3)\n self.small_tree.add_edge(4, 3)\n self.small_tree.add_edge(2, 3)\n self.small_tree.add_edge(3, 5)\n self.small_tree.add_edge(5, 6)\n self.small_tree.add_edge(5, 7)\n self.small_tree.add_edge(6, 7)\n\n self.deterministic_graph = nx.Graph()\n self.deterministic_graph.add_edge(0, 1) # deg(0) = 1\n\n self.deterministic_graph.add_edge(1, 2) # deg(1) = 2\n\n self.deterministic_graph.add_edge(2, 3)\n self.deterministic_graph.add_edge(2, 4) # deg(2) = 3\n\n self.deterministic_graph.add_edge(3, 4)\n self.deterministic_graph.add_edge(3, 5)\n self.deterministic_graph.add_edge(3, 6) # deg(3) = 4\n\n self.deterministic_graph.add_edge(4, 5)\n self.deterministic_graph.add_edge(4, 6)\n self.deterministic_graph.add_edge(4, 7) # deg(4) = 5\n\n self.deterministic_graph.add_edge(5, 6)\n self.deterministic_graph.add_edge(5, 7)\n self.deterministic_graph.add_edge(5, 8)\n self.deterministic_graph.add_edge(5, 9) # deg(5) = 6\n\n self.deterministic_graph.add_edge(6, 7)\n self.deterministic_graph.add_edge(6, 8)\n self.deterministic_graph.add_edge(6, 9) # deg(6) = 6\n\n self.deterministic_graph.add_edge(7, 8)\n self.deterministic_graph.add_edge(7, 9) # deg(7) = 5\n\n self.deterministic_graph.add_edge(8, 9) # deg(8) = 4", "def main():\n\n bGraph = DiGraph()\n bGraph + Vertex(\"alpha\", 20, deltaInherent=5)\n bGraph[\"beta\"] = Vertex(\"beta\", 13)\n bGraph.add_edge(bGraph[\"alpha\"], bGraph[\"beta\"], \"aa_lin\", [1])\n print(bGraph[\"alpha\"])\n print(bGraph)\n bGraph + Vertex(\"gamma\", 4, randomFlag=True, randomInfo=(0, 10))\n bGraph.apply_floating_deltas()\n for vert in bGraph:\n print(vert)", "def makeQuadSubplots(df_rad_obs, \n df_dir_obs, \n df_rad_sen, \n df_dir_sen, \n suptitle='Big title',\n eps=3, \n min_samples=50):\n fig, axs = plt.subplots(2, 2, \n figsize=(10,10)\n )\n\n fig.suptitle('Clustering Output', fontsize=20)\n\n populateSubPlot(df=df_rad_obs,\n eps=eps, \n min_samples=min_samples,\n fig=fig, \n axs=axs, \n row=0, \n col=0, title='Obsever Wards Radiant')\n\n\n populateSubPlot(df=df_dir_obs, \n eps=eps, \n min_samples=min_samples,\n fig=fig, \n axs=axs, \n row=0, \n col=1, title='Obsever Wards Dire')\n\n\n populateSubPlot(df=df_rad_sen, \n eps=eps, \n min_samples=min_samples,\n fig=fig, \n axs=axs, \n row=1, \n col=0, title='Sentry Wards Radiant')\n\n populateSubPlot(df=df_dir_sen, \n eps=eps, \n min_samples=min_samples,\n fig=fig, \n axs=axs, \n row=1, \n col=1, title='Sentry Wards Dire')\n \n \n return fig, axs", "def di_subgraph(self, nbunch):\n\t\t\n\t\t# create new subgraph.\n\t\tg = nx.DiGraph()\n\t\t\n\t\t# add edges and nodes.\n\t\tg.add_edges_from( self.edges(nbunch=nbunch) )\n\t\t\n\t\treturn g", "def create_hierarchy(self):\n\t\tpass", "def test_xcomarg_set(self, test_dag):\n # Unpack the fixture\n dag, (op1, op2, op3, op4) = test_dag\n # Arrange the operators with a Label in the middle\n op1_arg = XComArg(op1, \"test_key\")\n op1_arg.set_downstream(op2, Label(\"Label 1\"))\n op1.set_downstream([op3, op4])\n # Check that the DAG has the right edge info\n assert dag.get_edge_info(op1.task_id, op2.task_id) == {\"label\": \"Label 1\"}\n assert dag.get_edge_info(op1.task_id, op4.task_id) == {}", "def rectangle_graph():\n scaled = scale((200, 200, 200), 2)\n print scaled\n pylon_graph = graph.graph()\n base = rectangle(ORIGIN, WIDTH, LENGTH)\n base_ids = pylon_graph.add_nodes(base, \"base\")\n pylon_graph.connect_neighbours(base_ids, LENGTH)\n pylon_graph.connect_neighbours(base_ids, WIDTH)\n all_ids = []\n for i in range(LEVELS):\n level = offset(base, LENGTH * i, \"z\")\n level_ids = pylon_graph.add_nodes(level, \"level\" + str(i))\n all_ids.extend(level_ids)\n pylon_graph.connect_neighbours(all_ids, LENGTH)\n pylon_graph.connect_neighbours(all_ids, WIDTH)\n return pylon_graph", "def subplot_2(self, Graph, n_tabs):\n # The code below walks does a pre-order traversal of the tree\n # For exact details about the structure of self.Graph refer description in init function.\n\n attr_name = list(Graph.keys())[0]\n print(\"\\t\"*(n_tabs),\"feature name :\",attr_name)\n for val in list(Graph[attr_name].keys()):\n if (val[1]==1):\n des = \"greater\"\n else:\n des = \"lower\"\n print(\"\\t\"*(n_tabs+1),\"feature threashold :\", val[0],\" \",des)\n sub_graph = Graph[attr_name][val]\n if (type(sub_graph)==dict):\n self.subplot_2(sub_graph, n_tabs+2)\n else:\n print(\"\\t\"*(n_tabs+2), \"prediction :\",sub_graph)", "def recreate_subgraphs_name():\n global SUBGRAPHS\n for (name, subgraph) in SUBGRAPHS.items():\n subgraph.set_name(\"\\\"cluster_\" + subgraph.get_name() + \"\\\"\")", "def show_custom_graph(self):\n pass", "def test_dot(self):\n graph = graphviz.Graph(comment='The Round Table')\n graph.node('A', 'King Arthur')\n graph.node('B', 'Sir Bedevere the Wise')\n graph.edges(['AB'])\n\n st.graphviz_chart(graph)\n\n c = self.get_delta_from_queue().new_element.graphviz_chart\n self.assertEqual(hasattr(c, 'spec'), True)", "def test_sizergrid():\n regular_grid(8, 3)\n mpl.show()", "def test_get_grid_edge_nodes(flopy_dis_mf6):\n mf6 = flopy_dis_mf6[1]\n mf6.initialize()\n\n with pytest.raises(NotImplementedError):\n mf6.get_grid_edge_nodes(1, np.zeros((1, 1)))", "def create_subgrid(self)->list:\n return [subgrid.Subgrid(i) for i in range(0, 9)]", "def test_get_related_nodes(self):\n pass", "def test_graphid_construction():\n _ = _ir.GraphId(\"g\")", "def discover_structure_from_pops(self, pops, data):\n\n def create_maximal_pgm(pops):\n pgm = nx.DiGraph()\n pgm.add_nodes_from(pops) # create nodes\n for node in pops:\n edges = [(parent, node) for parent in pops.get(node) if\n node.rsplit('_', 1)[0] != parent.rsplit('_', 1)[0]]\n pgm.add_edges_from(edges) # add edges\n return pgm\n\n def markov_blanket(graph, parent_node, node):\n mb = set(pa for pa in graph.predecessors(node)) # add parent nodes\n mb |= set(ch for ch in graph.successors(node)) # add child nodes\n for child in graph.successors(node): # add parents of children\n mb |= set(pa for pa in graph.predecessors(child))\n if node in mb: # remove node\n mb.remove(node)\n if parent_node in mb: # remove parent_node\n mb.remove(parent_node)\n return mb\n\n max_pgm = create_maximal_pgm(pops)\n\n if self.draw:\n plt.title('Maximal PGM (only intra-edges)')\n signal_pos_map = {}\n pos = {}\n for node in max_pgm.nodes:\n if node.rsplit('_', 1)[0] not in signal_pos_map:\n signal_pos_map.update({node.rsplit('_', 1)[0]: len(signal_pos_map)})\n x_coordinate = int(node[-1:])\n y_coordinate = signal_pos_map.get(node.rsplit('_', 1)[0])\n pos.update({node: [x_coordinate, y_coordinate]})\n nx.draw(max_pgm, pos=pos, with_labels=True)\n plt.show()\n pass\n\n if isinstance(data, ADTree):\n cb_estimator = GSquareEstimator(adtree=data)\n else:\n cb_estimator = BaseEstimator(data=data, complete_samples_only=False)\n # procedure similar to PC algorithm\n pgm = max_pgm.copy()\n condition_set_size = 0\n L().log.debug('---------------------------------------------------')\n L().log.debug('---- Conditional Independence Tests ---------------')\n L().log.debug('---------------------------------------------------')\n\n #\n if self.optimization_chi_square:\n import scipy.stats as scs\n def chi_square_of_df_cols(df, col1, col2):\n df_col1, df_col2 = df[col1], df[col2]\n categories_2 = list(df_col2.unique())\n categories_1 = list(df_col1.unique())\n result = [[sum((df_col1 == cat1) & (df_col2 == cat2))\n for cat2 in categories_2]\n for cat1 in categories_1]\n\n chi = scs.chi2_contingency(result)\n\n\n return chi\n\n remove_edges = []\n for (source, target) in pgm.edges():\n # check how correlated those two edges are / independent of MB and all the other stuff\n dat = chi_square_of_df_cols(self.data, source, target) # 1 = more corr. 0 = less corr.\n chi2, p, sufficient_data = dat[0], dat[1], dat[2]\n #print(\"%s Chi = %s, p=%s\" % (str([source, target]), str(chi2), str(p)))\n\n if chi2 < self.chi_square_thresh and pgm.has_edge(source, target):\n L().log.debug('remove edge ' + str((source, target)))\n remove_edges.append((source, target))\n pgm.remove_edges_from(remove_edges)\n #import sys\n #sys.exit(0)\n\n\n # additionally remove edges which are conditionally independent\n # e.g. given a-> b c->b and given a, c is independent of b, then I can remove c!!!\n remove_edges = []\n for (source, target) in pgm.edges():\n condition_set = [a for a in pgm.predecessors(target) if a != source]\n if not condition_set:continue\n _, p_val, _ = cb_estimator.test_conditional_independence(source, target, list(condition_set))\n if p_val > self.alpha:\n if pgm.has_edge(source, target):\n L().log.debug('remove edge ' + str((source, target)))\n remove_edges.append((source, target))\n pgm.remove_edges_from(remove_edges)\n\n else:\n while True:\n cont = False\n remove_edges = []\n for (source, target) in pgm.edges():\n mb = markov_blanket(pgm, target, source)\n if len(mb) >= condition_set_size:\n L().log.debug('testing ' + source + ' --> ' + target)\n L().log.debug('markov blanket of ' + source + ' is ' + str(mb))\n for condition_set in combinations(mb, condition_set_size):\n L().log.debug(\n 'independence test of ' + source + ' and ' + target + ' with subset ' + str(condition_set))\n _, p_val, _ = cb_estimator.test_conditional_independence(source, target, list(condition_set))\n #if isnan(p_val): # pgmpy CI test returns NaN instead of 1\n # p_val = 1\n L().log.debug('p_val = ' + str(p_val))\n if p_val > self.alpha:\n if pgm.has_edge(source, target):\n L().log.debug('remove edge ' + str((source, target)))\n remove_edges.append((source, target))\n break\n pass\n cont = True\n pass\n condition_set_size += 1\n pgm.remove_edges_from(remove_edges)\n if cont is False:\n break\n if condition_set_size > self.max_reach:\n break\n\n if self.draw:\n plt.title('PGM after CI tests (only inter-edges)')\n signal_pos_map = {}\n pos = {}\n for node in pgm.nodes:\n if node.rsplit('_', 1)[0] not in signal_pos_map:\n signal_pos_map.update({node.rsplit('_', 1)[0]: len(signal_pos_map)})\n x_coordinate = int(node[-1:])\n y_coordinate = signal_pos_map.get(node.rsplit('_', 1)[0])\n pos.update({node: [x_coordinate, y_coordinate]})\n nx.draw(pgm, pos=pos, with_labels=True)\n plt.show()\n pass\n\n nodes = list(pops.keys())\n edges = [list(edge) for edge in pgm.edges]\n return nodes, edges", "def test_ExplorePath( self ):\n links = []\n n1 = graph.Node( 10, 10 )\n n2 = graph.Node( 10, 20 )\n n3 = graph.Node( 10, 30 )\n n4a = graph.Node( 5, 40 )\n n4b = graph.Node( 15, 40 )\n n5a = graph.Node( 5, 50 )\n n5b = graph.Node( 15, 50 )\n n6a = graph.Node( 5, 60 )\n n6b = graph.Node( 15, 60 )\n n7 = graph.Node( 10, 70 )\n\n links.append( graph.Link( n1, n2 ) )\n links.append( graph.Link( n2, n3 ) )\n links.append( graph.Link( n3, n4a ) )\n links.append( graph.Link( n3, n4b ) )\n links.append( graph.Link( n4a, n5a ) )\n links.append( graph.Link( n4b, n5b ) )\n links.append( graph.Link( n5a, n6a ) )\n links.append( graph.Link( n6a, n7 ) )\n links.append( graph.Link( n5b, n7 ) )\n roots = [n1]\n actual = nodes.explorePath( links, n1, n1 )\n expected = [ n1, n2, n3, n4b, n5b, n7 ]\n self.assertEqual( expected, actual )", "def create_graph_domain():\n \n \"\"\"\n Fetch data\n \"\"\"\n \n from input.read_input import read_item_data\n df = read_item_data()\n df['item_id'] = df.index\n dct_title = df['title'].to_dict()\n dct_domain = df['domain_id'].to_dict()\n dct_cat= df['category_id'].to_dict()\n \n dct_price = df['price'].to_dict()\n \n \"\"\" Ratio stuff \"\"\" \n from input.create_ratio import get_ratio\n dct_ratio_dom = get_ratio(which='domain_id')\n \n ratio_df = get_ratio(which='item_id',full=True)\n ratio_df['popularity'] = 100.0*ratio_df['bought'] + ratio_df['searched']\n dct_ratio_item_b = ratio_df['popularity'].to_dict()\n \n \n \n \"\"\"\n JSON\n \n \"\"\"\n check = lambda x: x <= np.round(413163*0.8).astype(np.int32)\n \n DATA_PATH = path.join(DATA_DIR,'train_dataset.jl')\n line_i = 0\n \n \n\n \"\"\"\n Create graph vertices\n \"\"\"\n g = ig.Graph() \n from input.read_input import get_mappings\n counter, f_map_func, r_map_func = get_mappings()\n \n num_items = df.shape[0]\n for k in dct_title.keys():\n g.add_vertex(value=k,deg=dct_ratio_item_b[k],domain_id=dct_domain[k],price=dct_price[k],cat='item_id')\n\n \"\"\" ['item_id','domain_id','category_id','product_id'] \"\"\"\n \n for k in pd.unique(df['domain_id']):\n g.add_vertex(value=k,cat='domain_id')\n\n\n for k in pd.unique(df['category_id']):\n g.add_vertex(value=k,cat='category_id')\n\n\n for k in pd.unique(df['product_id']):\n g.add_vertex(value=k,cat='product_id')\n\n \n \n \"\"\"\n Create edges\n \"\"\"\n E1 = []\n E2 = []\n \n with jsonlines.open(DATA_PATH) as reader:\n for line_i, obj in enumerate(reader):\n if check(line_i):\n print(line_i)\n L = []\n for h in obj['user_history']:\n if h['event_type'] == 'view':\n #print(\"Viewed {}\".format(dct[h['event_info']]))\n L.append(h['event_info'])\n elif h['event_type'] == 'search':\n #print(\"Searched {}\".format(h['event_info']))\n pass\n L_domain = [dct_domain[k] for k in L]\n L_domain = pd.unique(L_domain)\n L_cat = [dct_cat[k] for k in L]\n L_cat = pd.unique(L_cat)\n \n for i in range(len(L)):\n E1.append(dct_domain[L[i]])\n E2.append(dct_domain[obj['item_bought']] )\n\n \n \n E1 = f_map_func['domain_id'](E1)\n E2 = f_map_func['domain_id'](E2)\n \n \n E = pd.Series(list(zip(E1,E2))).value_counts()\n g.add_edges(E.index)\n g.es[\"weight\"] = E.values\n \n \n g.write_pickle(fname=path.join(DATA_DIR,'graph_domain_to_domain.pkl'))", "def test_graph2():\n mol_graph1 = DGLGraph([(0, 1), (0, 2), (1, 2)])\n mol_graph2 = DGLGraph([(0, 1), (1, 2), (1, 3), (1, 4)])\n batch_mol_graph = dgl.batch([mol_graph1, mol_graph2])\n node_feats = torch.arange(batch_mol_graph.number_of_nodes()).float().reshape(-1, 1)\n edge_feats = torch.arange(2 * batch_mol_graph.number_of_edges()).float().reshape(-1, 2)\n\n complete_graph1 = get_complete_graph(mol_graph1.number_of_nodes())\n complete_graph2 = get_complete_graph(mol_graph2.number_of_nodes())\n batch_complete_graph = dgl.batch([complete_graph1, complete_graph2])\n atom_pair_feats = torch.arange(batch_complete_graph.number_of_edges()).float().reshape(-1, 1)\n\n return batch_mol_graph, node_feats, edge_feats, batch_complete_graph, atom_pair_feats", "def build_drop_fullgraphs(self, do_subgraph=False, graph_lib='pygraphviz'):\n if 'pygraphviz' == graph_lib:\n G = pgv.AGraph(strict=True, directed=True)\n else:\n G = nx.Graph()\n do_subgraph = False\n subgraph_dict = defaultdict(list) # k - node-ip, v - a list of graph nodes\n oid_gnid_dict = dict()\n\n for i, oid in enumerate(self.pg_spec.keys()):\n oid_gnid_dict[oid] = str(i)\n logger.info(\"oid to gid mapping done\")\n\n for dropspec in self.pg_spec.itervalues():\n gid = oid_gnid_dict[dropspec['oid']]\n ip = dropspec['node']\n subgraph_dict[ip].append(gid)\n if (dropspec['type'] == 'app'):\n G.add_node(gid, shape='rect', label='')#, fixedsize=True, hight=.05, width=.05)\n elif (dropspec['type'] == 'plain'): #parallelogram\n G.add_node(gid, shape='circle', label='')#, fixedsize=True, hight=.05, width=.05)\n logger.info(\"Graph nodes added\")\n\n for dropspec in self.pg_spec.itervalues():\n gid = oid_gnid_dict[dropspec['oid']]\n if (dropspec['type'] == 'app'):\n ds_kw = 'outputs' #down stream key word\n elif (dropspec['type'] == 'plain'):\n ds_kw = 'consumers'\n else:\n ds_kw = 'None'\n if (ds_kw in dropspec):\n for doid in dropspec[ds_kw]:\n G.add_edge(gid, oid_gnid_dict[doid])\n logger.info(\"Graph edges added\")\n\n if (do_subgraph):\n for i, subgraph_nodes in enumerate(subgraph_dict.values()):\n # we don't care about the subgraph label or rank\n subgraph = G.add_subgraph(subgraph_nodes, label='%d' % i, name=\"cluster_%d\" % i, rank=\"same\")\n subgraph.graph_attr['rank']='same'\n logger.info(\"Subgraph added\")\n\n return G", "def slice_graph_fwd( startea, reg ): \r\n\tgraph = vcg_Graph.vcgGraph({\"title\":'\"Slice for %s\"' % reg, \\\r\n\t\t\"manhattan_edges\":\"no\", \"layoutalgorithm\":\"maxdepth\"})\r\n\t#\r\n\t# Retrieve the name of the current basic block\r\n\t# \r\n\tworklist = []\r\n\tdata_bib = {}\r\n\tstartnode = slice_node( startea, 0, reg )\r\n\trootnode = graph.Add_Node( startnode.to_name() )\r\n\tdata_bib[ startnode.to_name() ] = startnode\r\n\tworklist.insert( 0, rootnode )\r\n\twhile len( worklist ) > 0:\r\n\t\tcurrnode = worklist.pop()\r\n\t\tcurrslice = data_bib[ currnode.get_name() ]\r\n\t\ttgt_reg = currslice.get_target_reg()\r\n\t\tif tgt_reg == \"END\":\r\n\t\t# Do not process this node any further\r\n\t\t\tpass\r\n\t\telif tgt_reg == \"\" or (( len( currslice.get_lines()) > 0) and \\\r\n\t\t\tcurrslice.endea != currslice.get_lines()[-1][0]):\r\n\t\t\t# Nothing much happening here, just proceed to parent bocks\r\n\t\t\tif ua_mnem( currslice.endea ) == \"call\":\r\n\t\t\t\txrefs = get_short_crefs_from( currslice.endea )\r\n\t\t\telse:\r\n\t\t\t\txrefs = get_crefs_from( currslice.endea )\r\n\t\t\tfor ref in xrefs:\r\n\t\t\t\tnewslice = slice_node( ref, 0, currslice.reg )\r\n\t\t\t\tif graph.Get_Node( newslice.to_name() ) == 0:\r\n\t\t\t\t\tnewnode = graph.Add_Node( newslice.to_name() )\r\n\t\t\t\t\tworklist.insert( 0, newnode )\r\n\t\t\t\t\tdata_bib[ newslice.to_name() ] = newslice\r\n\t\t\t\tgraph.Add_Link( currnode.get_name(), newslice.to_name())\r\n\t\telse:\r\n\t\t\t# Register was modified, use new register\r\n\t\t\txrefs = get_crefs_from( currslice.endea )\r\n\t\t\tfor ref in xrefs:\r\n\t\t\t\tnewslice = slice_node( ref, 0, tgt_reg )\r\n\t\t\t\tif graph.Get_Node( newslice.to_name() ) == 0:\r\n\t\t\t\t\tnewnode = graph.Add_Node( newslice.to_name() )\r\n\t\t\t\t\tworklist.insert( 0, newnode )\r\n\t\t\t\t\tdata_bib[ newslice.to_name() ] = newslice\r\n\t\t\t\tgraph.Add_Link( currnode.get_name(), newslice.to_name())\r\n\t\t\txrefs = get_crefs_from( currslice.endea )\r\n\t\t\tfor ref in xrefs:\r\n\t\t\t\tnewslice = slice_node( ref, 0, currslice.reg )\r\n\t\t\t\tif graph.Get_Node( newslice.to_name() ) == 0:\r\n\t\t\t\t\tnewnode = graph.Add_Node( newslice.to_name() )\r\n\t\t\t\t\tworklist.insert( 0, newnode )\r\n\t\t\t\t\tdata_bib[ newslice.to_name() ] = newslice\r\n\t\t\t\tgraph.Add_Link( currnode.get_name(), newslice.to_name())\r\n\treturn [ graph, data_bib ]", "def SimpleReferenceGrid(min_x,min_y,max_x,max_y,x_divisions,y_divisions,\n color=(0.5,1.0,0.5,1.0),xoff=-0.15,yoff=-0.04,\n label_type=None,shapes_name=\"Grid\"):\n\n shps=gview.GvShapes(name=shapes_name)\n gview.undo_register( shps )\n shps.add_field('position','string',20)\n\n if os.name == 'nt':\n font=\"-adobe-helvetica-medium-r-*-*-12-*-*-*-*-*-*-*\"\n else:\n #font=\"-adobe-helvetica-medium-r-*-*-12-*-*-*-*-*-*-*\"\n #font=\"-urw-helvetica-medium-r-normal-*-9-*-*-*-p-*-iso8859-2\"\n font=\"-adobe-helvetica-medium-r-normal-*-8-*-*-*-p-*-iso10646-1\"\n #font=\"-misc-fixed-medium-r-*-*-9-*-*-*-*-*-*-*\"\n\n\n lxoff=(max_x-min_x)*xoff # horizontal label placement\n lyoff=(max_y-min_y)*yoff # vertical label placement\n\n hspc=(max_x-min_x)/x_divisions\n vspc=(max_y-min_y)/y_divisions\n\n for hval in numpy.arange(min_x,max_x+hspc/100.0,hspc):\n nshp=gview.GvShape(type=gview.GVSHAPE_LINE)\n nshp.set_node(hval,max_y,0,0)\n nshp.set_node(hval,min_y,0,1)\n shps.append(nshp)\n pshp=gview.GvShape(type=gview.GVSHAPE_POINT)\n pshp.set_node(hval,min_y+lyoff)\n pshp.set_property('position',\"%.1f\" % hval)\n shps.append(pshp)\n\n for vval in numpy.arange(min_y,max_y+vspc/100.0,vspc):\n nshp=gview.GvShape(type=gview.GVSHAPE_LINE)\n nshp.set_node(min_x,vval,0,0)\n nshp.set_node(max_x,vval,0,1)\n shps.append(nshp)\n pshp=gview.GvShape(type=gview.GVSHAPE_POINT)\n pshp.set_node(min_x+lxoff,vval)\n pshp.set_property('position',\"%.1f\" % vval)\n shps.append(pshp)\n\n cstr=gvogrfs.gv_to_ogr_color(color)\n if len(cstr) < 9:\n cstr=cstr+\"FF\"\n clstr=str(color[0])+' '+str(color[1])+' '+str(color[2])+' '+str(color[3])\n\n layer=gview.GvShapesLayer(shps)\n layer.set_property('_line_color',clstr)\n layer.set_property('_point_color',clstr)\n # Set antialias property so that lines look nice\n # when rotated.\n layer.set_property('_gl_antialias','1')\n layer.set_property('_gv_ogrfs_point',\n 'LABEL(t:{position},f:\"'+font+'\",c:'+cstr+')')\n layer.set_read_only(True) \n\n return layer", "def __test(graph): \n \n if not isinstance(graph, basegraph):\n raise TypeError(\"Expected type was Graph.\")\n \n print \"### iPATH TEST DATA STRUCTURE\"\n print \"### Data Type: Graph ({})\".format(str(graph.__class__.__bases__[0].__name__))\n print \"### Implementation: {}\".format(str(graph.__class__.__name__))\n \n print \"\\n*** ADD NODE ***\\n\" \n for i in range(10):\n print \"add_node({})\".format(str(i)) \n graph.add_node(i) \n \n print \"\\n*** ADD ARC ***\\n\" \n for i in range(10):\n print \"add_arc({}, {}, {})\".format(str(i), str(i + 1), str(2 * (i + 1)))\n graph.add_arc(i, i + 1, 2 * (i + 1))\n print \"add_arc({}, {}, {})\".format(str(i), str(i + 2), str(2 * (i + 2)))\n graph.add_arc(i, i + 2, 2 * (i + 2))\n \n print \"\\n*** GRAPH ***\\n\" \n print \"\\n{}\\n\".format(str(graph))\n \n print \"\\n*** REMOVE NODE ***\\n\" \n print \"remove_node(5)\"\n graph.remove_node(5)\n \n print \"\\n*** GRAPH ***\\n\" \n print \"\\n{}\\n\".format(str(graph))\n \n print \"\\n*** REMOVE ARC ***\\n\" \n print \"remove_arc(7, 8)\" \n graph.remove_arc(7, 8)\n \n print \"\\n*** GRAPH ***\\n\" \n print \"\\n{}\\n\".format(str(graph))\n \n print \"\\n*** INCIDENT ARCS ***\\n\" \n for node in graph.get_nodes():\n print \"Incident Arcs of {}\\t{}\\n\".format(str(node), str(graph.get_incident_arcs(node._id)))\n \n print \"\\n*** ADJACENCY ***\\n\" \n for i in range(10):\n for j in range(10):\n if graph.are_adjacent(i, j) == True:\n print \"Adjacency Between ({}, {}): True\\n\".format(str(i), str(j))\n \n print \"\\n*** NODES ***\\n\" \n print \"numNodes: {}\\n\".format(str(graph.get_num_nodes())) \n print \"Nodes: {}\\n\".format(str(graph.get_nodes())) \n \n print \"\\n*** ARCS ***\\n\" \n print \"numArcs: {}\\n\".format(str(graph.get_num_arcs())) \n print \"Arcs: {}\\n\".format(str(graph.get_arcs())) \n \n print \"\\n*** SEARCH BFS ***\\n\" \n for i in range(10): \n print \"bfs({})\".format(str(i))\n Lbfs = graph.bfs(i)\n for n in Lbfs:\n print \"{}\\n\".format(str(n))\n print \"\\n\"\n \n print \"\\n*** SEARCH DFS ***\\n\" \n for i in range(9):\n print \"dfs({})\".format(str(i))\n Ldfs = graph.dfs(i)\n for n in Ldfs:\n print \"{}\\n\".format(str(n))\n print \"\\n\"\n \n print \"\\n### END OF TEST ###\\n\"", "def do_printgraph(self, args):\n self.currentGraph.printGraph()" ]
[ "0.79100144", "0.65580076", "0.64052117", "0.63540345", "0.6348667", "0.61608034", "0.6133355", "0.5854858", "0.5819954", "0.5817296", "0.5729944", "0.57231635", "0.5651493", "0.5646747", "0.5616531", "0.5610424", "0.56057763", "0.5573422", "0.55470526", "0.5529936", "0.55223936", "0.548946", "0.54719025", "0.5463685", "0.542794", "0.54104775", "0.5367909", "0.5331161", "0.53173673", "0.531653", "0.53138065", "0.5299587", "0.529403", "0.5286369", "0.5277277", "0.5274089", "0.5259896", "0.52557755", "0.5255154", "0.5250453", "0.52455384", "0.5243805", "0.52431357", "0.5232331", "0.5225946", "0.52239835", "0.522327", "0.5223267", "0.5208987", "0.520408", "0.52030396", "0.5198093", "0.5191159", "0.5181923", "0.5181079", "0.51773447", "0.5159624", "0.51575315", "0.5149096", "0.51418734", "0.5138336", "0.51333183", "0.5131424", "0.5124564", "0.51216453", "0.5116607", "0.51041955", "0.5098653", "0.5098653", "0.5097246", "0.5088556", "0.5087094", "0.5084827", "0.508432", "0.50815696", "0.5080257", "0.5074737", "0.5064817", "0.5062849", "0.50582373", "0.50573325", "0.50557715", "0.50545186", "0.5053927", "0.5051967", "0.5051801", "0.50475746", "0.5039426", "0.5038892", "0.5036758", "0.50346255", "0.5032037", "0.5031954", "0.502397", "0.5022857", "0.5020285", "0.50197035", "0.5015002", "0.50122994", "0.500538" ]
0.8254092
0
Test the popxl create multiple callsites for a subgraph input example
def test_documentation_popxl_multi_callsites_graph_input(self): filename = "multi_call_graph_input.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_documentation_popxl_create_multi_subgraph(self):\n filename = \"create_multi_graphs_from_same_func.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_documentation_popxl_basic_subgraph(self):\n filename = \"basic_graph.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_documentation_popxl_repeat_2(self):\n filename = \"repeat_graph_2.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_documentation_popxl_repeat_1(self):\n filename = \"repeat_graph_1.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_documentation_popxl_repeat_0(self):\n filename = \"repeat_graph_0.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def _test_multiple(self, fin_graphzip, fin_insts, T, n=None):\n for t in range(T+1)[1:]:\n self._test_graphzip_subgen(fin_graphzip, fin_insts, n)", "def sub_graph_merging(self):", "def test_simple(self):\n exp = [{'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'h'},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'p'},\n {'edge_info': '1', 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hp'},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'g'},\n {'edge_info': None, 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hpg'}\n ]\n content = '((h,p)hp:1,g)hpg;'\n self._do_test(content, exp)\n content = '((h,[pretest]p[test][posttest])hp,g)hpg;'\n exp = [{'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'h'},\n {'edge_info': None, 'type': NewickEvents.TIP,\n 'comments': ['pretest', 'test', 'posttest'], 'label': 'p'},\n {'edge_info': None, 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hp'},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'g'},\n {'edge_info': None, 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hpg'}\n ]\n self._do_test(content, exp)", "def sample_from_subpop(instance, params, subpop):\n y = subpop\n x = np.random.choice([-1,+1], size=params['d'])\n x[instance['indices'][subpop]] = instance['values'][subpop]\n return x, y, subpop", "def test_generator8(self):\n xpb = XPathBuilder()\n xp1 = xp2 = None\n base_xp = (xpb.foo.bar | xpb.x.y).parenthesize()\n base_gen = None\n with base_xp as b:\n base_gen = b\n xp1 = b() & xpb.c\n xp2 = b() & xpb.d\n xp1_exp = '(/foo/bar or /x/y) and /c'\n xp2_exp = '(/foo/bar or /x/y) and /d'\n base_exp = '(/foo/bar or /x/y)'\n # check tree structure\n self.assertTrue(base_xp._parent is None)\n self.assertTrue(base_gen._parent is None)\n # check xpath\n self.assertEqual(xp1.tostring(), xp1_exp)\n self.assertEqual(xp2.tostring(), xp2_exp)\n self.assertEqual(base_xp.tostring(), base_exp)\n self.assertEqual(base_gen.tostring(), base_exp)", "def test_generator7(self):\n xpb = XPathBuilder()\n xp1 = xp2 = None\n base_xp = xpb.foo.bar & xpb.x.y\n base_gen = None\n with base_xp as b:\n base_gen = b\n xp1 = b() | xpb.c\n xp2 = b() | xpb.d\n xp1_exp = '/foo/bar and /x/y or /c'\n xp2_exp = '/foo/bar and /x/y or /d'\n base_exp = '/foo/bar and /x/y'\n # check tree structure\n self.assertTrue(base_xp._parent is None)\n self.assertTrue(base_gen._parent is None)\n # check xpath\n self.assertEqual(xp1.tostring(), xp1_exp)\n self.assertEqual(xp2.tostring(), xp2_exp)\n self.assertEqual(base_xp.tostring(), base_exp)\n self.assertEqual(base_gen.tostring(), base_exp)", "def test_dummy3(self):\n xpb = XPathBuilder()\n xp = xpb.dummy()\n self.assertTrue(xp.parenthesize() is xp)", "def test_xcomarg_set(self, test_dag):\n # Unpack the fixture\n dag, (op1, op2, op3, op4) = test_dag\n # Arrange the operators with a Label in the middle\n op1_arg = XComArg(op1, \"test_key\")\n op1_arg.set_downstream(op2, Label(\"Label 1\"))\n op1.set_downstream([op3, op4])\n # Check that the DAG has the right edge info\n assert dag.get_edge_info(op1.task_id, op2.task_id) == {\"label\": \"Label 1\"}\n assert dag.get_edge_info(op1.task_id, op4.task_id) == {}", "def test_createSubLinkographWithoutCommands(self):\n self.performTestForParams()", "def test_generator6(self):\n xpb = XPathBuilder()\n xp1 = xp2 = None\n base_xp = xpb.base.foo.bar\n base_gen = None\n with base_xp as b:\n base_gen = b\n xp1 = xpb.a.b.c.join(b())\n xp2 = xpb.test.join(b())\n xp1_exp = '/a/b/c/base/foo/bar'\n xp2_exp = '/test/base/foo/bar'\n base_exp = '/base/foo/bar'\n # check tree structure\n self.assertTrue(base_xp._parent is None)\n self.assertTrue(len(base_xp._children[0]._children[0]._children) == 0)\n self.assertTrue(base_gen._parent is None)\n self.assertTrue(len(base_gen._children) == 0)\n # check xpath\n self.assertEqual(xp1.tostring(), xp1_exp)\n self.assertEqual(xp2.tostring(), xp2_exp)\n self.assertEqual(base_xp.tostring(), base_exp)\n self.assertEqual(base_gen.tostring(), base_exp)", "def populate_graph(self):", "def test_multiple_task_groups_dag(\n self, test_multiple_taskgroups_dag, multiple_taskgroups_dag_expected_edges\n ):\n (\n dag,\n group1,\n group2,\n group3,\n (\n group1_emp1,\n group1_emp2,\n group1_emp3,\n group2_emp1,\n group2_emp2,\n group2_emp3,\n group2_op1,\n group2_op2,\n group3_emp1,\n group3_emp2,\n group3_emp3,\n emp_in1,\n emp_in2,\n emp_in3,\n emp_in4,\n emp_out1,\n emp_out2,\n emp_out3,\n emp_out4,\n op_in1,\n op_out1,\n ),\n ) = test_multiple_taskgroups_dag\n\n group1_emp1 >> Label(\"label group1.group1_emp1 <=> group1.group1_emp2\") >> group1_emp3\n\n emp_in1 >> group1\n emp_in2 >> Label(\"label emp_in2 <=> group1\") >> group1\n [emp_in3, emp_in4] >> Label(\"label emp_in3/emp_in4 <=> group1\") >> group1\n XComArg(op_in1, \"test_key\") >> Label(\"label op_in1 <=> group1\") >> group1\n\n (\n [group2_emp1, group2_emp2]\n >> Label(\"label group2.group2_emp1/group2.group2_emp2 <=> group2.group2_emp3\")\n >> group2_emp3\n )\n (\n group2_emp1\n >> Label(\"label group2.group2_emp1 <=> group2.group2_emp2/group2.group2_emp3\")\n >> [group2_emp2, group2_emp3]\n )\n group2_emp3 >> Label(\"label group2.group2_emp3 <=> group3\") >> group3\n\n (\n XComArg(group2_op1, \"test_key\")\n >> Label(\"label group2.group2_op1 <=> group2.group2_op2\")\n >> XComArg(group2_op2, \"test_key\")\n )\n XComArg(group2_op2, \"test_key\") >> Label(\"label group2.group2_op2 <=> group3\") >> group3\n\n group3 >> emp_out1\n group3 >> Label(\"label group3 <=> emp_out2\") >> emp_out2\n group3 >> Label(\"label group3 <=> emp_out3/emp_out4\") >> [emp_out3, emp_out4]\n group3 >> Label(\"label group3 <=> op_out1\") >> XComArg(op_out1, \"test_key\")\n\n group1 >> Label(\"label group1 <=> group2\") >> group2\n\n compare_dag_edges(dag_edges(dag), multiple_taskgroups_dag_expected_edges)", "def test_path7():\n path = [(0, 0, 1)]\n path += [\n [('A', 3, 0)],\n (0, 1, 1),\n [('A', 2, 0)],\n (np.pi/2, 1, 1),\n [('B',3,0)],\n (0, 1, 1),\n [('B',2,0)],\n (np.pi/2, 1, 1),\n [('C',3,0)],\n (0, 1, 1),\n [('C',2,0)],\n (np.pi/2, 1, 1),\n [('D', 3, 0)],\n (0, 1, 1),\n [('D', 2,0)],\n (np.pi/2, 1, 1),\n ] * 4\n execute_path(path,True)", "def explore(self, *args):", "def create_subbasin_graph():\n subbasin_to_downstream = pd.read_csv(module_dir + '/../data/simulations_shervan/test.rvh', sep='\\s+', skiprows=7, nrows=724, names=['subbasin', 'downstream_subbasin'], usecols=[1,2])\n subbasin_to_downstream['subbasin'] = subbasin_to_downstream['subbasin']\n subbasin_to_downstream['downstream_subbasin'] = 'sub' + subbasin_to_downstream['downstream_subbasin'].astype(str)\n subbasin_to_downstream['edge'] = 1\n\n for subbasin in subbasin_to_downstream['subbasin'].unique():\n is_sink = 1 if len(subbasin_to_downstream[(subbasin_to_downstream['subbasin'] == subbasin) & subbasin_to_downstream['edge'] == 1]) == 0 else 0\n subbasin_to_downstream = subbasin_to_downstream.append({'subbasin': subbasin, 'downstream_subbasin': subbasin, 'edge': is_sink}, ignore_index=True)\n subbasin_to_downstream = subbasin_to_downstream.append({'subbasin': 'sub-1', 'downstream_subbasin': 'sub-1', 'edge': 1}, ignore_index=True)\n \n adj = subbasin_to_downstream.pivot(index='subbasin', columns='downstream_subbasin', values='edge').fillna(0) \n adj = adj.sort_index(axis=0).sort_index(axis=1)\n \n G = nx.from_numpy_matrix(adj.values, parallel_edges=False, create_using=nx.DiGraph())\n label_mapping = dict(zip(range(len(adj.values)), adj.index))\n G = nx.relabel_nodes(G, label_mapping)\n \n return G", "def setUp(self):\n\n singleLabels = linkoCreate.Linkograph(\n [({'A'}, set(), {1,2,3}),\n ({'D'}, {0}, {3,4}),\n ({'A'}, {0}, {4}),\n ({'C'}, {0,1}, {4}),\n ({'A'}, {1,2,3}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko0_2 = linkoCreate.Linkograph(\n [({'A'}, set(), {1,2}),\n ({'D'}, {0}, set()),\n ({'A'}, {0}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko0_1 = linkoCreate.Linkograph(\n [({'A'}, set(), {1}),\n ({'D'}, {0}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko0_0 = linkoCreate.Linkograph(\n [({'A'}, set(), set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko1_2 = linkoCreate.Linkograph(\n [({'D'}, set(), set()),\n ({'A'}, set(), set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko1_1 = linkoCreate.Linkograph(\n [({'D'}, set(), set())],\n ['A', 'B', 'C', 'D'])\n\n trivialLinkograph = linkoCreate.Linkograph(\n [], ['A', 'B', 'C', 'D'])\n\n\n singleSubLinko1_4 = linkoCreate.Linkograph(\n [({'D'}, set(), {2,3}),\n ({'A'}, set(), {3}),\n ({'C'}, {0}, {3}),\n ({'A'}, {0,1,2}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko2_4 = linkoCreate.Linkograph(\n [({'A'}, set(), {2}),\n ({'C'}, set(), {2}),\n ({'A'}, {0,1}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko3_4 = linkoCreate.Linkograph(\n [({'C'}, set(), {1}),\n ({'A'}, {0}, set())],\n ['A', 'B', 'C', 'D'])\n\n singleSubLinko4_4 = linkoCreate.Linkograph(\n [({'A'}, set(), set())],\n ['A', 'B', 'C', 'D'])\n\n simpleLinko = linkoCreate.Linkograph(\n [({'A', 'B', 'C'}, set(), {1,2,3}),\n ({'D'}, {0}, {3,4}),\n ({'A'}, {0}, {4}),\n ({'B', 'C'}, {0,1}, {4}),\n ({'A'}, {1,2,3}, set())],\n ['A', 'B', 'C', 'D'])\n\n if self.id().split('.')[-1] == 'test_createSubLinkographWithoutCommands':\n self.testParams = [\n {'linko': singleLabels,\n 'lowerBound': None,\n 'upperBound': None,\n 'ExpectedLinkograph': singleLabels},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': 4,\n 'ExpectedLinkograph': singleLabels},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': 5,\n 'ExpectedLinkograph': singleLabels},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': 2,\n 'ExpectedLinkograph': singleSubLinko0_2},\n\n {'linko': singleLabels,\n 'lowerBound': -1,\n 'upperBound': 2,\n 'ExpectedLinkograph': singleSubLinko0_2},\n\n {'linko': singleLabels,\n 'lowerBound': None,\n 'upperBound': 2,\n 'ExpectedLinkograph': singleSubLinko0_2},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': 1,\n 'ExpectedLinkograph': singleSubLinko0_1},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': 0,\n 'ExpectedLinkograph': singleSubLinko0_0},\n\n {'linko': singleLabels,\n 'lowerBound': 0,\n 'upperBound': -1,\n 'ExpectedLinkograph': trivialLinkograph},\n\n {'linko': singleLabels,\n 'lowerBound': 1,\n 'upperBound': 2,\n 'ExpectedLinkograph': singleSubLinko1_2},\n\n {'linko': singleLabels,\n 'lowerBound': 1,\n 'upperBound': 1,\n 'ExpectedLinkograph': singleSubLinko1_1},\n\n {'linko': singleLabels,\n 'lowerBound': 1,\n 'upperBound': 0,\n 'ExpectedLinkograph': trivialLinkograph},\n\n {'linko': singleLabels,\n 'lowerBound': -1,\n 'upperBound': -1,\n 'ExpectedLinkograph': trivialLinkograph},\n\n {'linko': singleLabels,\n 'lowerBound': 1,\n 'upperBound': 4,\n 'ExpectedLinkograph': singleSubLinko1_4},\n\n {'linko': singleLabels,\n 'lowerBound': 2,\n 'upperBound': 4,\n 'ExpectedLinkograph': singleSubLinko2_4},\n\n {'linko': singleLabels,\n 'lowerBound': 3,\n 'upperBound': 4,\n 'ExpectedLinkograph': singleSubLinko3_4},\n\n {'linko': singleLabels,\n 'lowerBound': 4,\n 'upperBound': 4,\n 'ExpectedLinkograph': singleSubLinko4_4},\n\n ]", "def test_build_graph(self):\n insert_good_data()\n dataframe = get_dataframe()\n results = processing.build_graph(dataframe, figure_path, False)\n # 1\n self.assertEqual(results, \"Updated html File and Opened it\")", "def generate_subgraph(format):\n\n # get business information\n directorypath = genpath+directory\n if os.path.isfile(directorypath):\n \n bizdata = pd.read_csv( directorypath, escapechar='\\\\')\n\n #create a directory of page-id and object-ids\n tempdf = bizdata.set_index('pageid')\n tempdf = tempdf['objectid']\n dictionary = tempdf.to_dict()\n\n uncgraph = pd.read_csv(inpath+graphfile, escapechar='\\\\')\n uncgraph = uncgraph.dropna()\n uncgraph['likee_object_id'] = uncgraph.apply(lambda x: dictionary.get(x['likee_page_id']), axis=1)\n cgraph = uncgraph.dropna()\n cgraph = cgraph[['liker_page_id', 'likee_page_id']]\n cgraph.columns = ['Source', 'Target']\n\n \n print_stats(cgraph)\n if format == 'networkx' :\n print \"[Generating a networkX graph...]\" \n cgraph.to_csv(genpath+subgraph+'.ntx', index=False, header=False, sep= ' ')\n else:\n print \"[Generating a csv graph...]\" \n cgraph.to_csv(genpath+subgraph+'.csv', index=False)\n\n\n else:\n print \"Either file is missing or is not readable\"", "def test_calls(self):\n ex = self.ex\n m = self.m\n n = self.n\n\n nreps = random.randint(1, 10)\n ex.nreps = nreps\n ex.vary[\"X\"][\"with\"].add(\"rep\")\n ex.infer_lds()\n\n cmds = ex.generate_cmds()\n\n idx = random.randint(0, nreps - 1)\n self.assertIn([\"name\", m, n, \"X_%d\" % idx, m, \"Y\", m, \"Z\", n], cmds)", "def show_subgraph(dfs_codes, nsupport, mapper):\n\tglobal __subgraph_count\n\n\tg = build_graph(dfs_codes)\n\tg.id = __subgraph_count\n\t__subgraph_count += 1\n\tg.gprint(nsupport, mapper)", "def main(\n num_sampled=[3, 3],\n max_depth=2,\n num_iters=1000,\n do_graph=False,\n # These are for checking stats on smaller data\n subsample=False,\n plot=False,\n # Generates a random matrix for comparison\n random=False,\n # Visualise the connection matrix\n vis_connect=False,\n subsample_vis=False,\n # Generate final graphs\n final=False,\n # Analyse\n analyse=False,\n only_exp=False,\n # Which regions are considered here\n # A_name, B_name = \"MOp\", \"SSP-ll\"\n A_name=\"VISp\",\n B_name=\"VISl\",\n desired_depth=1,\n desired_samples=79,\n):\n np.random.seed(42)\n\n if random:\n AB, BA, AA, BB = gen_random_matrix(150, 50, 0, 0.04, 0, 0.0)\n matrix_vis(AB, BA, AA, BB, 10, name=\"test_vis.png\")\n\n os.makedirs(os.path.dirname(pickle_loc), exist_ok=True)\n convert_mouse_data(A_name, B_name)\n to_use = [True, True, True, True]\n mc, args_dict = load_matrix_data(to_use, A_name, B_name)\n print(\"{} - {}, {} - {}\".format(A_name, B_name, mc.num_a, mc.num_b))\n\n result = {}\n result[\"matrix_stats\"] = print_args_dict(args_dict, out=False)\n\n if only_exp:\n mpf_res = mpf_connectome(mc, num_sampled, max_depth, args_dict)\n mpf_val = [\n mpf_res[\"expected\"],\n mpf_res[\"expected\"] / num_sampled[1],\n \"{}_{}\".format(A_name, B_name),\n \"Statistical estimation\",\n ]\n if do_graph:\n print(\"Converting matrix\")\n gc.collect()\n mc.create_connections()\n print(\"Finished conversion\")\n graph = mc.graph\n to_write = [mc.num_a, mc.num_b]\n del mc\n gc.collect()\n reverse_graph = reverse(graph)\n graph_res = graph_connectome(\n num_sampled,\n max_depth,\n graph=graph,\n reverse_graph=reverse_graph,\n to_write=to_write,\n num_iters=num_iters,\n )\n to_add = np.mean(graph_res[\"full_results\"][\"Connections\"].values)\n graph_val = [\n to_add,\n to_add / num_sampled[1],\n \"{}_{}\".format(A_name, B_name),\n \"Statistical estimation\",\n ]\n return mpf_val, graph_val\n return mpf_val, None\n\n # Convert to a pickle\n # if not os.path.isfile(pickle_loc):\n # print(\"Converting matrix\")\n # gc.collect()\n # mc.create_connections()\n # print(\"Finished conversion\")\n # graph = mc.graph\n # to_write = [mc.num_a, mc.num_b]\n # del mc\n # gc.collect()\n\n # handle_pickle(graph, \"graph.pickle\", \"w\")\n # handle_pickle(reverse(graph), \"r_graph.pickle\", \"w\")\n # handle_pickle(to_write, \"graph_size.pickle\", \"w\")\n\n if vis_connect:\n if subsample_vis:\n print(\"Plotting subsampled matrix vis\")\n new_mc = mc.subsample(int(mc.num_a / 10), int(mc.num_b / 10))\n matrix_vis(\n new_mc.ab,\n new_mc.ba,\n new_mc.aa,\n new_mc.bb,\n 15,\n name=\"mc_mat_vis_sub10.pdf\",\n )\n else:\n o_name = \"mc_mat_vis_{}_to_{}.pdf\".format(A_name, B_name)\n print(\"Plotting full matrix vis\")\n matrix_vis(mc.ab, mc.ba, mc.aa, mc.bb, 150, name=o_name)\n print(\"done vis\")\n\n print(mc, print_args_dict(args_dict, out=False))\n\n result = None\n if subsample:\n result = check_stats(mc, 1000, 1, 20000, 1, plot)\n if final:\n result = {}\n\n # For different depths and number of samples\n for depth in range(1, 4):\n for ns in range(1, num_sampled[0] + 1):\n ns_2 = [ns] * 2\n mpf_res = mpf_connectome(mc, ns_2, depth, args_dict)\n result[\"mpf_{}_{}\".format(depth, ns)] = mpf_res\n\n # Save this for plotting\n cols = [\"Number of samples\", \"Proportion of connections\", \"Max distance\"]\n depth_name = [None, \"Direct synapse\", \"Two synapses\", \"Three synapses\"]\n vals = []\n for depth in range(1, 4):\n for ns in range(1, num_sampled[0] + 1):\n this = result[\"mpf_{}_{}\".format(depth, ns)]\n val = [ns, this[\"expected\"] / ns, depth_name[depth]]\n vals.append(val)\n df = pd.DataFrame(vals, columns=cols)\n os.makedirs(os.path.join(here, \"..\", \"results\"), exist_ok=True)\n df.to_csv(\n os.path.join(\n here, \"..\", \"results\", \"{}_to_{}_depth.csv\".format(A_name, B_name)\n ),\n index=False,\n )\n\n cols = [\"Number of sampled connected neurons\", \"Probability\"]\n total_pmf = result[\"mpf_{}_{}\".format(desired_depth, desired_samples)][\"total\"]\n vals = []\n for k, v in total_pmf.items():\n vals.append([k, float(v)])\n df = pd.DataFrame(vals, columns=cols)\n df.to_csv(\n os.path.join(\n here,\n \"..\",\n \"results\",\n \"{}_to_{}_pmf_{}_{}.csv\".format(\n A_name, B_name, desired_depth, desired_samples\n ),\n ),\n index=False,\n )\n if analyse:\n result = {}\n result[\"matrix_stats\"] = args_dict\n\n mpf_res = mpf_connectome(\n mc,\n num_sampled,\n max_depth,\n args_dict,\n clt_start=30,\n sr=None,\n mean_estimate=True,\n )\n result[\"mean\"] = mpf_res\n\n vals = []\n cols = [\"Number of connected neurons\", \"Probability\", \"Calculation\"]\n for k, v in mpf_res[\"total\"].items():\n vals.append([k, float(v), \"Mean estimation\"])\n\n mpf_res = mpf_connectome(mc, num_sampled, max_depth, args_dict, clt_start=30)\n result[\"mpf\"] = mpf_res\n\n for k, v in mpf_res[\"total\"].items():\n vals.append([k, float(v), \"Statistical estimation\"])\n\n if do_graph:\n print(\"Converting matrix\")\n gc.collect()\n mc.create_connections()\n print(\"Finished conversion\")\n graph = mc.graph\n to_write = [mc.num_a, mc.num_b]\n del mc\n gc.collect()\n reverse_graph = reverse(graph)\n\n graph_res = graph_connectome(\n num_sampled,\n max_depth,\n graph=graph,\n reverse_graph=reverse_graph,\n to_write=to_write,\n num_iters=num_iters,\n )\n\n result[\"difference\"] = (\n dist_difference(mpf_res[\"total\"], graph_res[\"dist\"]),\n )\n result[\"graph\"] = graph_res\n\n for k, v in graph_res[\"dist\"].items():\n vals.append([k, float(v), \"Monte Carlo simulation\"])\n\n df = pd.DataFrame(vals, columns=cols)\n df.to_csv(\n os.path.join(\n here,\n \"..\",\n \"results\",\n \"{}_to_{}_pmf_final_{}_{}.csv\".format(\n A_name, B_name, max_depth, num_sampled[0]\n ),\n ),\n index=False,\n )\n\n if result is not None:\n with open(os.path.join(here, \"..\", \"results\", \"mouse.txt\"), \"w\") as f:\n pprint(result, width=120, stream=f)\n\n return result", "def examples():\r\n\r\n # get some data for a single name\r\n x = blp.bdp('BDEV LN Equity', 'px_last')\r\n print(x)\r\n print('the type of x', type(x))\r\n print('the value of x:', x.iloc[0]['px_last'])\r\n\r\n\r\n # get multiple data for a single name\r\n y = blp.bdp('BDEV LN Equity', flds=['px_bid', 'px_ask'])\r\n print(y)\r\n\r\n\r\n # get multiple data for multiple names\r\n z = blp.bdp(tickers=['BDEV LN Equity', 'BARC LN Equity'], flds=['px_bid', 'px_ask'])\r\n print(z)\r\n print('here is the bdev ask >>>', z.loc['BDEV LN Equity','px_ask'])", "def test_generator3(self):\n xpb = XPathBuilder()\n xp1 = xp2 = None\n base_xp = xpb.base.foo[xpb.attr('abc') == 'x']\n with base_xp as b:\n xp1 = b().bar.text() == 'foo'\n xp2 = b().x.y.z[42]\n base_exp = '/base/foo[@abc = \"x\"]'\n xp1_exp = '/base/foo[@abc = \"x\"]/bar/text() = \"foo\"'\n xp2_exp = '/base/foo[@abc = \"x\"]/x/y/z[42]'\n self.assertEqual(base_xp.tostring(), base_exp)\n self.assertEqual(xp1.tostring(), xp1_exp)\n self.assertEqual(xp2.tostring(), xp2_exp)", "def test_extract_graph(default_plugin_resolver):\n dpr = default_plugin_resolver\n nx_graph = nx.Graph()\n nx_graph.add_weighted_edges_from(\n [(1, 0, 2), (1, 4, 3), (2, 5, 5), (2, 7, 6), (3, 1, 7), (5, 6, 10), (6, 2, 11),]\n )\n desired_nodes = {2, 5, 6}\n nx_extracted_graph = nx.Graph()\n nx_extracted_graph.add_weighted_edges_from([(2, 5, 5), (5, 6, 10), (6, 2, 11)])\n graph = dpr.wrappers.Graph.NetworkXGraph(nx_graph)\n desired_nodes_wrapped = dpr.wrappers.NodeSet.PythonNodeSet(desired_nodes)\n extracted_graph = dpr.wrappers.Graph.NetworkXGraph(nx_extracted_graph)\n MultiVerify(\n dpr, \"subgraph.extract_subgraph\", graph, desired_nodes_wrapped\n ).assert_equals(extracted_graph)", "def test_generator4(self):\n xpb = XPathBuilder()\n xp1 = xp2 = None\n base_xp = xpb.base.foo.where(xpb.attr('abc').equals('x'))\n with base_xp as b:\n xp1 = b().bar.text().equals('foo')\n xp2 = b().x.y.z.where(42)\n base_exp = '/base/foo[@abc = \"x\"]'\n xp1_exp = '/base/foo[@abc = \"x\"]/bar/text() = \"foo\"'\n xp2_exp = '/base/foo[@abc = \"x\"]/x/y/z[42]'\n self.assertEqual(base_xp.tostring(), base_exp)\n self.assertEqual(xp1.tostring(), xp1_exp)\n self.assertEqual(xp2.tostring(), xp2_exp)", "def test_dummy6(self):\n xpb = XPathBuilder()\n xp = xpb.dummy()\n xp = xpb.bar | xp\n exp = '/bar'\n self.assertEqual(xp.tostring(), exp)", "def run_tests(g: Graph) -> None:\n print( g.nodes() , \"->\" , ', '.join([f\"{l}\" for l in g.scc()]) , f\"({g.cyclic()})\" )\n for n in g.nodes():\n for m in [m for m in g.nodes() if m != n]:\n p = g.path(n,m)\n if p is not None:\n assert p[0] == n\n assert p[-1] == m\n for i in range(1,len(p)):\n assert g.is_edge(p[i-1], p[i])\n print(\" \", n, \"->\", m, \":\", ' -> '.join([f\"{v}\" for v in p]))", "def test_createData():\n\n sys = LVsystem.Ecosystem()\n\n sys.addSpecies('rabbit')\n sys.setInteraction('rabbit', 'hen', 0)\n sys.setInteraction('rabbit', 'fox', -1)\n sys.setInitialCond('rabbit', 30)\n sys.setGrowthRate('rabbit', 0.09)\n sys.setCarrCap('rabbit', 10000)\n sys.setChangeRate('rabbit', 400)\n\n sys.addSpecies('hen')\n sys.setInteraction('hen', 'rabbit', 0)\n sys.setInteraction('hen', 'fox', -1)\n sys.setInitialCond('hen', 10)\n sys.setGrowthRate('hen', 0.07)\n sys.setCarrCap('hen', 10000)\n sys.setChangeRate('hen', 500)\n\n sys.addSpecies('fox')\n sys.setInteraction('fox', 'rabbit', 1)\n sys.setInteraction('fox', 'hen', 1)\n sys.setInitialCond('fox', 20)\n sys.setGrowthRate('fox', -0.06)\n sys.setCarrCap('fox', 1)\n sys.setChangeRate('fox', 250)\n\n \n data = sys.create_data()\n \n assert data[0] == 3\n assert data[1] == ['rabbit', 'hen', 'fox']\n assert data[2] == [30,10,20]\n assert data[3] == [0.09,0.07,-0.06] \n assert data[4] == [10000,10000,1]\n assert data[5] == [400,500,250]\n assert data[6][1][2] == -data[6][2][1]\n assert data[6][2][2] == 0\n \n sys.removeSpecies('rabbit')\n sys.removeSpecies('fox')\n sys.removeSpecies('hen')", "def test_restore_multiple_in_subgraph(self):\n subgraph = self._subgraph()\n subgraph['id'] = 15\n task1 = self._remote_task()\n task1['id'] = 1\n task2 = self._remote_task()\n task2['id'] = 2\n task1['parameters']['containing_subgraph'] = 15\n task2['parameters']['containing_subgraph'] = 15\n\n graph = self._restore_graph([subgraph, task1, task2])\n assert len(graph.tasks) == 3\n subgraphs = [op for op in graph.tasks if op.is_subgraph]\n remote_tasks = [op for op in graph.tasks if not op.is_subgraph]\n\n # those are all references to the same subgraph, the subgraph was\n # NOT restored multiple times\n assert remote_tasks[0].containing_subgraph \\\n is remote_tasks[1].containing_subgraph \\\n is subgraphs[0]\n\n assert len(subgraphs[0].tasks) == 2", "def test_example(self):\n\n solution = Solution()\n\n nums = [1, 2, 3]\n\n expected_output = [\n (3,),\n (1,),\n (2,),\n (1, 2, 3),\n (1, 3),\n (2, 3),\n (1, 2),\n ()\n ]\n actual_output = solution.subsets(nums)\n\n for ss in expected_output:\n self.assertIn(ss, actual_output)", "def test_generator5(self):\n xpb = XPathBuilder()\n xp = None\n base_xp = xpb.base.foo.bar\n base_gen = None\n with base_xp as b:\n base_gen = b\n xp = b().join(xpb.a.b.c[3])\n exp = '/base/foo/bar/a/b/c[3]'\n base_exp = '/base/foo/bar'\n # check tree structure\n self.assertTrue(base_xp._parent is None)\n self.assertTrue(len(base_xp._children[0]._children[0]._children) == 0)\n self.assertTrue(base_gen._parent is None)\n self.assertTrue(len(base_gen._children) == 0)\n # check xpath\n self.assertEqual(xp.tostring(), exp)\n self.assertEqual(base_xp.tostring(), base_exp)\n self.assertEqual(base_gen.tostring(), base_exp)", "def test_multi_sink(self):\n with self.assertRaises(ValidationError):\n with Graph('g') as graph:\n pike.glob('a', '*')\n pike.glob('b', '*')", "def test__graph_structure():\n assert PES_GRAPH == (\n ('CH2CH2+OH', 'CH2CH+H2O', 'C2H4OH', 'C2H5O', 'CH3CHO+H'),\n (frozenset({0, 1}), frozenset({0, 2}), frozenset({2, 3}),\n frozenset({3, 4}), frozenset({1, 2})))\n assert pgraph.species(PES_GRAPH) == (\n ('CH2CH2+OH', 'CH2CH+H2O', 'C2H4OH', 'C2H5O', 'CH3CHO+H'))\n assert pgraph.channels(PES_GRAPH) == (\n (frozenset({0, 1}), frozenset({0, 2}), frozenset({2, 3}),\n frozenset({3, 4}), frozenset({1, 2})))\n print('\\npes graph')\n print(PES_GRAPH)", "def test_case1(self):\n\n graph = BipartiteGraph()\n\n graph.addEdge(\"supervisor1\",\"student1\")\n\n val1 = graph.getStudents(\"supervisor1\")\n val2 = graph.getSupervisors(\"student1\")\n\n expected1 = [\"student1\"]\n expected2 = [\"supervisor1\"]\n\n self.assertEqual((val1,val2),(expected1,expected2))", "def test_parent_with_iterables(self):\n def makeCubesAndGrp():\n cmds.file(new=1, f=1)\n cubes = []\n for x in range(10):\n cubes.append(pm.polyCube()[0])\n group = pm.group(empty=True)\n return cubes, group\n\n cubes, group = makeCubesAndGrp()\n res1 = pm.parent(cubes[:4], group)\n self.assertEqual(cubes[:4], res1)\n res2 = pm.parent(cubes, group)\n self.assertEqual(cubes[4:], res2)\n\n cubes, group = makeCubesAndGrp()\n res1 = pm.parent(cubes[:4] + [group])\n self.assertEqual(cubes[:4], res1)\n res2 = pm.parent(cubes, group)\n self.assertEqual(cubes[4:], res2)\n\n cubes, group = makeCubesAndGrp()\n res1 = pm.parent(cubes[0], cubes[1], cubes[2], cubes[3], group)\n self.assertEqual(cubes[:4], res1)\n res2 = pm.parent(cubes, group)\n self.assertEqual(cubes[4:], res2)\n\n cubes, group = makeCubesAndGrp()\n res1 = pm.parent(cubes[0], cubes[1], [cubes[2], cubes[3], group])\n self.assertEqual(cubes[:4], res1)\n res2 = pm.parent(cubes, group)\n self.assertEqual(cubes[4:], res2)\n\n cubes, group = makeCubesAndGrp()\n res1 = pm.parent([cubes[0], cubes[1]], cubes[2], [cubes[3], group])\n self.assertEqual(cubes[:4], res1)\n res2 = pm.parent(cubes, group)\n self.assertEqual(cubes[4:], res2)", "def test_programs():\n yield 4, 4, 1\n yield 16, 12, 2", "def tenpar_subset_test():\n model_d = \"ies_10par_xsec\"\n test_d = os.path.join(model_d, \"master_subset_test\")\n template_d = os.path.join(model_d, \"test_template\")\n if not os.path.exists(template_d):\n raise Exception(\"template_d {0} not found\".format(template_d))\n if os.path.exists(test_d):\n shutil.rmtree(test_d)\n # shutil.copytree(base_d,test_d)\n pst = pyemu.Pst(os.path.join(template_d, \"pest.pst\"))\n pst.control_data.noptmax = 3\n\n # first without subset\n pst.pestpp_options = {}\n pst.pestpp_options[\"ies_num_reals\"] = 50\n pst.pestpp_options[\"ies_lambda_mults\"] = \"1.0\"\n pst.pestpp_options[\"ies_accept_phi_fac\"] = 100.0\n pst.write(os.path.join(template_d, \"pest.pst\"))\n pyemu.helpers.start_slaves(template_d, exe_path, \"pest.pst\", num_slaves=10,\n slave_root=model_d, master_dir=test_d)\n df_base = pd.read_csv(os.path.join(test_d, \"pest.phi.meas.csv\"),index_col=0)\n\n pst.pestpp_options = {}\n pst.pestpp_options[\"ies_num_reals\"] = 50\n pst.pestpp_options[\"ies_lambda_mults\"] = \"1.0\"\n pst.pestpp_options[\"ies_subset_size\"] = 15\n pst.pestpp_options[\"ies_accept_phi_fac\"] = 100.0\n\n pst.write(os.path.join(template_d, \"pest.pst\"))\n pyemu.helpers.start_slaves(template_d, exe_path, \"pest.pst\", num_slaves=10,\n slave_root=model_d, master_dir=test_d)\n df_sub = pd.read_csv(os.path.join(test_d, \"pest.phi.meas.csv\"),index_col=0)\n diff = (df_sub - df_base).apply(np.abs)\n print(diff.max())\n print(df_sub.iloc[-1,:])\n print(df_base.iloc[-1,:])\n assert diff.max().max() == 0.0", "def test_path1():\n path = [(0,0,1)]\n path.append([('A',4,0)])\n path.append((0,1,1))\n path.append([('A',3,0)])\n path.append((0,1,1))\n path.append([('A',2,0)])\n path.append((0,1,1))\n path.append([('A',1,0)])\n\n execute_path(path, True)", "def test_multiple_triples(self):\n self.graph.add((artis, RDF.type, zoo))\n self.graph.add((artis, RDF.type, org))\n self.graph.add((berlin_zoo, RDF.type, zoo))\n self.assertEquals(len(list(self.graph.triples((None, None, None)))), 3)\n\n self.assertEquals(len(list(self.graph.triples((artis, None, None)))), 2)\n self.assertEquals(len(list(self.graph.triples((None, RDF.type, None)))), 3)\n self.assertEquals(len(list(self.graph.triples((None, None, zoo)))), 2)\n self.assertEquals(len(list(self.graph.triples((None, None, org)))), 1)", "def test_EnumerateTestSet():\n protos = list(random_networkx_generator.EnumerateTestSet())\n assert len(protos) == 100", "def iter_func(root_name, root, set_traverse, list_funcs, G, strings,\n plot_nodes, cur_pos, xgrain, min_weight, max_weight):\n set_traverse.append(root)\n nbs = G.neighbors(root)\n nbs = G[root]\n\n plot_nodes.append(cur_pos)\n xgrain = xgrain/2.0\n\n flag_pn = -1\n for nb in nbs.keys():\n if nb in set_traverse:\n continue\n\n next_pos = [0, 0, 0]\n if root.name == root_name:\n next_pos[0] = cur_pos[0]\n else:\n next_pos[0] = cur_pos[0] + xgrain*flag_pn*( 0.8+0.2*(nbs[nb][\"weight\"]-1.0/max_weight)/(1.0/min_weight-1.0/max_weight) ) #* (nbs[nb][\"weight\"]-1.0/max_weight)/(1.0/min_weight-1.0/max_weight)\n next_pos[1] = cur_pos[1] + 3.0*(nbs[nb][\"weight\"]-1.0/max_weight)/(1.0/min_weight-1.0/max_weight)\n next_pos[2] = nb.name\n\n flag_pn = flag_pn*(-1)\n\n strings.append([root, nb])\n set_traverse, strings, plot_nodes = iter_func(root_name, nb, set_traverse, list_funcs, G, strings, plot_nodes, next_pos, xgrain, min_weight, max_weight)\n\n return set_traverse, strings, plot_nodes", "def test_excel(test_data,tmp_path):\n\n for d in test_data:\n\n gpm = GenotypePhenotypeMap(genotype=d[\"genotype\"],\n wildtype=d[\"wildtype\"],\n phenotype=d[\"phenotype\"],\n uncertainty=d[\"uncertainty\"])\n\n # Write excel file\n excel_file = os.path.join(tmp_path,\"tmp.xlsx\")\n gpm.to_excel(filename=excel_file)\n assert os.path.isfile(excel_file)\n\n # Read in and make sure it worked.\n new_gpm = gpmap.read_excel(filename=excel_file,wildtype=d[\"wildtype\"])\n conftest.compare_gpmap(gpm,new_gpm)\n\n # Do not give wildtype. Should still work because the wildtype was\n # inferred.\n gpm_read = gpmap.read_excel(filename=excel_file)\n conftest.compare_gpmap(gpm,gpm_read)\n\n # Check ability to read labels back in\n site_labels = [f\"{x}\" for x in range(10,10+len(d[\"wildtype\"]),1)]\n gpm = GenotypePhenotypeMap(genotype=d[\"genotype\"],\n wildtype=d[\"wildtype\"],\n site_labels=site_labels)\n out_file = os.path.join(tmp_path,\"tmp.xlsx\")\n gpm.to_excel(out_file)\n\n gpm_read = gpmap.read_excel(out_file)\n\n for i in range(len(gpm_read.site_labels)):\n\n # Skip virtual site_labels added for invariant sites\n if len(d[\"mutations\"][i]) == 1:\n continue\n\n assert gpm_read.site_labels[i] == gpm.site_labels[i]\n\n # Read in with bad wildtype. Should throw warning and then have\n # sequential site labels.\n with pytest.warns(UserWarning):\n gpm_read = gpmap.read_excel(out_file,wildtype=d[\"mutant\"])\n\n assert np.array_equal(gpm_read.site_labels,range(len(d[\"wildtype\"])))", "def main(dot_file):\n global SUBGRAPHS, PARENTS\n graph = graph_from_dot(dot_file)\n SUBGRAPHS = {}\n PARENTS = {}\n extract_subgraphs([graph])\n \n for (name, subgraph) in SUBGRAPHS.items():\n nodes = extract_nodes(subgraph)\n for node in nodes:\n (name_function, result, function_call_line) = analyse_label_function_calls(node)\n if name_function is not None:\n (label_node1, label_node2, bb) = create_labels(node, result, function_call_line)\n node.set_label(label_node1)\n nodes_to_update = get_nodes_to_update(subgraph, graph.get_name())\n update_nodes(nodes_to_update, bb)\n nodes.append(create_new_node(subgraph, node, label_node2, bb))\n update_edges(subgraph, graph.get_name(), bb)\n create_new_edge(graph, node.get_name(), SUBGRAPHS[name_function])\n recreate_subgraphs_name()\n export_graph(graph, \"main_output\", \"png\")\n export_graph(graph, \"main_output\", \"dot\")\n return graph", "def run_generations(init_len):\n num_graphs = 0\n current_gen = [nx.path_graph(init_len)]\n complete_graph_list = current_gen.copy()\n while len(current_gen) and current_gen[0].size() < (3*init_len - 7):\n current_gen = generation_next(current_gen)\n num_graphs += show_graph_list(current_gen)\n complete_graph_list.extend(filter_bridge_case(current_gen))\n print(num_graphs)\n return complete_graph_list", "def test_general_subset_level():\n pass", "def test_clips_deftemplate_parse_smoke(self):\n G = CreateTestCarpetingGraph()\n \n nlpGraphProcessor = NLPResultGraphParser()\n res = BLNlpClipsRuleBase()\n seen = []\n nlpGraphProcessor.ParseObject(G, Strings.ndStep, seen, res)\n nlpGraphProcessor.ParseObject(G, \"CarpetingPattern\", seen, res)\n nlpGraphProcessor.ParseObject(G, \"ProcessSubStep\", seen, res)\n\n # Deftemplate with part relation and extra fields\n template = next(t for t in res.Deftemplates if t.TemplateName == Strings.ndStep)\n self.assertTrue(template != None)\n self.assertEqual(len(template.Slots), 4)\n self.assertTrue(next(s for s in template.Slots if s.Name == \"Id\") != None)\n self.assertTrue(next(s for s in template.Slots if s.Name == \"ProcessId\") != None)\n self.assertTrue(next(s for s in template.Slots if s.Name == Strings.after) != None)\n\n # Deftemplate with several has relations\n template = next(t for t in res.Deftemplates if t.TemplateName == \"CarpetingPattern\")\n self.assertTrue(template != None)\n self.assertEqual(len(template.Slots), 4)\n self.assertTrue(next(s for s in template.Slots if s.Name == \"Id\") != None)\n self.assertTrue(next(s for s in template.Slots if s.Name == \"RotationAngle\") != None)\n self.assertTrue(next(s for s in template.Slots if s.Name == \"ShiftDirection\") != None)\n self.assertTrue(next(s for s in template.Slots if s.Name == \"ShiftRatio\") != None)\n\n # Deftemplate with in relation\n template = next(t for t in res.Deftemplates if t.TemplateName == \"ProcessSubStep\")\n self.assertTrue(template != None)\n self.assertEqual(len(template.Slots), 7)\n self.assertTrue(next(s for s in template.Slots if s.Name == \"Id\") != None)\n self.assertTrue(next(s for s in template.Slots if s.Name == \"Instruction\") != None)\n self.assertTrue(next(s for s in template.Slots if s.Name == \"ProcessStepId\") != None)\n self.assertTrue(next(s for s in template.Slots if s.Name == \"StepNumber\") != None)", "def test07_add_type_triples(self):\n r = LDPRS('http://ex.org/abc')\n g = Graph()\n r.add_type_triples(g)\n self.assertEqual(len(g), 2)", "def test_multiple_task_groups_reversed_dag(\n self, test_multiple_taskgroups_dag, multiple_taskgroups_dag_expected_edges\n ):\n (\n dag,\n group1,\n group2,\n group3,\n (\n group1_emp1,\n group1_emp2,\n group1_emp3,\n group2_emp1,\n group2_emp2,\n group2_emp3,\n group2_op1,\n group2_op2,\n group3_emp1,\n group3_emp2,\n group3_emp3,\n emp_in1,\n emp_in2,\n emp_in3,\n emp_in4,\n emp_out1,\n emp_out2,\n emp_out3,\n emp_out4,\n op_in1,\n op_out1,\n ),\n ) = test_multiple_taskgroups_dag\n\n group1_emp3 << Label(\"label group1.group1_emp1 <=> group1.group1_emp2\") << group1_emp1\n\n group1 << emp_in1\n group1 << Label(\"label emp_in2 <=> group1\") << emp_in2\n group1 << Label(\"label emp_in3/emp_in4 <=> group1\") << [emp_in3, emp_in4]\n group1 << Label(\"label op_in1 <=> group1\") << XComArg(op_in1, \"test_key\")\n\n (\n group2_emp3\n << Label(\"label group2.group2_emp1/group2.group2_emp2 <=> group2.group2_emp3\")\n << [group2_emp1, group2_emp2]\n )\n (\n [group2_emp2, group2_emp3]\n << Label(\"label group2.group2_emp1 <=> group2.group2_emp2/group2.group2_emp3\")\n << group2_emp1\n )\n group3 << Label(\"label group2.group2_emp3 <=> group3\") << group2_emp3\n\n (\n XComArg(group2_op2, \"test_key\")\n << Label(\"label group2.group2_op1 <=> group2.group2_op2\")\n << XComArg(group2_op1, \"test_key\")\n )\n group3 << Label(\"label group2.group2_op2 <=> group3\") << XComArg(group2_op2, \"test_key\")\n\n emp_out1 << group3\n emp_out2 << Label(\"label group3 <=> emp_out2\") << group3\n [emp_out3, emp_out4] << Label(\"label group3 <=> emp_out3/emp_out4\") << group3\n XComArg(op_out1, \"test_key\") << Label(\"label group3 <=> op_out1\") << group3\n\n group2 << Label(\"label group1 <=> group2\") << group1\n\n compare_dag_edges(dag_edges(dag), multiple_taskgroups_dag_expected_edges)", "def test_ExplorePath_Simple( self ):\n links = []\n n1 = graph.Node( 10, 50 )\n n2 = graph.Node( 10, 50 )\n n3 = graph.Node( 10, 50 )\n n7 = graph.Node( 10, 50 )\n\n links.append( graph.Link( n1, n2 ) )\n links.append( graph.Link( n2, n3 ) )\n links.append( graph.Link( n3, n7 ) )\n roots = [n1]\n actual = nodes.explorePath( links, roots, n1 )\n expected = [ n1, n2, n3, n7 ]\n self.assertEqual( expected, actual )", "def gexf_graph():\n # you must replace these lines and supply your own graph\n \n \n \n my_gexf = Gexf(\"JiajiaXie\", \"My awesome graph\")\n graph=my_gexf.addGraph(\"undirected\", \"static\", \"My awesome networks\")\n \n atr1=graph.addNodeAttribute('Type',type='string')\n\n\n for set in data_specific:\n if graph.nodeExists(set['set_num']) ==0:\n tm1=graph.addNode(set['set_num'], set['name'], r='0', g='0', b='0')\n tm1.addAttribute(atr1,\"set\")\n\n\n\n counter_test=1\n for set, part in data_parts.items():\n for key, part_list in part.items():\n interme =part_list['color']\n red=interme[0]+interme[1]\n green=interme[2]+interme[3]\n blue=interme[4]+interme[5]\n\n red_de=str(int(red,16))\n green_de=str(int(green,16))\n blue_de=str(int(blue,16))\n if graph.nodeExists(part_list['id'])==0:\n tm2=graph.addNode(part_list['id'], part_list['part_name'],r=red_de, g=green_de, b = blue_de)\n tm2.addAttribute(atr1,\"part\")\n\n\n counter_test+=1\n graph.addEdge(\"_\"+str(counter_test), set, part_list['id'], part_list['quantity'])\n\n\n\n f=open('bricks_graph.gexf','wb')\n my_gexf.write(f)\n\n\n return my_gexf.graphs[0]", "def test_xcomarg_shift(self, test_dag):\n # Unpack the fixture\n dag, (op1, op2, op3, op4) = test_dag\n # Arrange the operators with a Label in the middle\n op1_arg = XComArg(op1, \"test_key\")\n op1_arg >> Label(\"Label 1\") >> [op2, op3]\n op1_arg >> op4\n # Check that the DAG has the right edge info\n assert dag.get_edge_info(op1.task_id, op2.task_id) == {\"label\": \"Label 1\"}\n assert dag.get_edge_info(op1.task_id, op4.task_id) == {}", "def generate_test():\n o = []\n pos = [384, 288]\n note_group_size = GAN_PARAMS[\"note_group_size\"]\n generate_set(begin=3 * note_group_size, start_pos=pos,\n length_multiplier=dist_multiplier, group_id=3, plot_map=True)", "def test_build_poset_lattice():\n lattice = build_poset_lattice(all_games_gen(2))\n assert len(lattice.edges()) == 36", "def create_nodes(self):", "def __test(graph): \n \n if not isinstance(graph, basegraph):\n raise TypeError(\"Expected type was Graph.\")\n \n print \"### iPATH TEST DATA STRUCTURE\"\n print \"### Data Type: Graph ({})\".format(str(graph.__class__.__bases__[0].__name__))\n print \"### Implementation: {}\".format(str(graph.__class__.__name__))\n \n print \"\\n*** ADD NODE ***\\n\" \n for i in range(10):\n print \"add_node({})\".format(str(i)) \n graph.add_node(i) \n \n print \"\\n*** ADD ARC ***\\n\" \n for i in range(10):\n print \"add_arc({}, {}, {})\".format(str(i), str(i + 1), str(2 * (i + 1)))\n graph.add_arc(i, i + 1, 2 * (i + 1))\n print \"add_arc({}, {}, {})\".format(str(i), str(i + 2), str(2 * (i + 2)))\n graph.add_arc(i, i + 2, 2 * (i + 2))\n \n print \"\\n*** GRAPH ***\\n\" \n print \"\\n{}\\n\".format(str(graph))\n \n print \"\\n*** REMOVE NODE ***\\n\" \n print \"remove_node(5)\"\n graph.remove_node(5)\n \n print \"\\n*** GRAPH ***\\n\" \n print \"\\n{}\\n\".format(str(graph))\n \n print \"\\n*** REMOVE ARC ***\\n\" \n print \"remove_arc(7, 8)\" \n graph.remove_arc(7, 8)\n \n print \"\\n*** GRAPH ***\\n\" \n print \"\\n{}\\n\".format(str(graph))\n \n print \"\\n*** INCIDENT ARCS ***\\n\" \n for node in graph.get_nodes():\n print \"Incident Arcs of {}\\t{}\\n\".format(str(node), str(graph.get_incident_arcs(node._id)))\n \n print \"\\n*** ADJACENCY ***\\n\" \n for i in range(10):\n for j in range(10):\n if graph.are_adjacent(i, j) == True:\n print \"Adjacency Between ({}, {}): True\\n\".format(str(i), str(j))\n \n print \"\\n*** NODES ***\\n\" \n print \"numNodes: {}\\n\".format(str(graph.get_num_nodes())) \n print \"Nodes: {}\\n\".format(str(graph.get_nodes())) \n \n print \"\\n*** ARCS ***\\n\" \n print \"numArcs: {}\\n\".format(str(graph.get_num_arcs())) \n print \"Arcs: {}\\n\".format(str(graph.get_arcs())) \n \n print \"\\n*** SEARCH BFS ***\\n\" \n for i in range(10): \n print \"bfs({})\".format(str(i))\n Lbfs = graph.bfs(i)\n for n in Lbfs:\n print \"{}\\n\".format(str(n))\n print \"\\n\"\n \n print \"\\n*** SEARCH DFS ***\\n\" \n for i in range(9):\n print \"dfs({})\".format(str(i))\n Ldfs = graph.dfs(i)\n for n in Ldfs:\n print \"{}\\n\".format(str(n))\n print \"\\n\"\n \n print \"\\n### END OF TEST ###\\n\"", "def gen_graph(self):", "def test_pytree(self):\n\n # Arguments are of the form [([x00, x01], [x10]), dict(a=ya, b=yb)]\n def add_all_jax(x_pair_of_list, y_dict):\n x_list_0, x_list_1 = x_pair_of_list\n return functools.reduce(operator.add,\n x_list_0 + x_list_1 + [y_dict[\"a\"], y_dict[\"b\"]])\n\n self.CheckShapePolymorphism(\n add_all_jax,\n input_signature=[([tf.TensorSpec([None]), tf.TensorSpec([None])],\n [tf.TensorSpec([None])]),\n dict(a=tf.TensorSpec([None]), b=tf.TensorSpec([None]))],\n in_shapes=[([\"(v,)\", \"(v,)\"], [(\"v,\")]),\n dict(a=\"(v,)\", b=\"(v,)\")],\n expected_output_signature=tf.TensorSpec([None]))\n\n # Now partial in_shapes; the parts of the in_shapes that are not specified\n # must have full input_signatures.\n self.CheckShapePolymorphism(\n add_all_jax,\n input_signature=[([tf.TensorSpec([4]), tf.TensorSpec([4])],\n [tf.TensorSpec([4])]),\n dict(a=tf.TensorSpec([4]), b=tf.TensorSpec([4]))],\n in_shapes=[([\"(4,)\", \"(_,)\"], [(\"4,\")]),\n dict(a=\"(_,)\", b=\"(4,)\")],\n expected_output_signature=tf.TensorSpec([4]))", "def test_generator1(self):\n xpb = XPathBuilder()\n xp = xpb.foo\n xp = xp.bar\n xp = xp.baz[xpb.attr('x') == 'y']\n xp = xp[1]\n exp = '/foo/bar/baz[@x = \"y\"][1]'\n self.assertEqual(xp.tostring(), exp)", "def create_four_subplots():\n pass", "def test_hierarchical_register_and_contain(self):\n space = Space()\n\n categories = {\"asdfa\": 0.1, 2: 0.2, 3: 0.3, 4: 0.4}\n dim = Categorical(\"yolo.nested\", categories, shape=2)\n space.register(dim)\n dim = Integer(\"yolo2.nested\", \"uniform\", -3, 6)\n space.register(dim)\n dim = Real(\"yolo3\", \"norm\", 0.9)\n space.register(dim)\n\n trial = Trial(\n params=[\n {\"name\": \"yolo.nested\", \"value\": [\"asdfa\", 2], \"type\": \"categorical\"},\n {\"name\": \"yolo2.nested\", \"value\": 1, \"type\": \"integer\"},\n {\"name\": \"yolo3\", \"value\": 0.5, \"type\": \"real\"},\n ]\n )\n\n assert \"yolo\" in trial.params\n assert \"nested\" in trial.params[\"yolo\"]\n assert \"yolo2\" in trial.params\n assert \"nested\" in trial.params[\"yolo2\"]\n assert \"yolo3\" in trial.params\n\n assert trial in space", "def test_make_pop(self, pop_size, cell_number, microcell_number):\n for i in [0, 1]:\n pe.Parameters.instance().use_ages = i\n # Population is initialised with no households\n pop_params = {\"population_size\": pop_size,\n \"cell_number\": cell_number,\n \"microcell_number\": microcell_number}\n test_pop = ToyPopulationFactory.make_pop(pop_params)\n\n total_people = 0\n count_non_empty_cells = 0\n for cell in test_pop.cells:\n for microcell in cell.microcells:\n total_people += len(microcell.persons)\n if len(cell.persons) > 0:\n count_non_empty_cells += 1\n # Test there is at least one non-empty cell\n self.assertTrue(count_non_empty_cells >= 1)\n # Test that everyone in the population has been assigned a\n # microcell\n self.assertEqual(total_people, pop_size)\n\n # Test a population class object is returned\n self.assertIsInstance(test_pop, pe.Population)", "def test_path4():\n path = [\n (0, 0, 1),\n [('A', 3, 0)],\n (0, 1, 1),\n [('A', 2, 0)],\n (np.pi/2, 1, 1),\n (0, 1, 1),\n (np.pi/2, 1, 1),\n (0, 1, 1),\n (np.pi/2, 1, 1),\n (0, 1, 1),\n (np.pi/2, 1, 1),\n ] * 4\n execute_path(path,True)", "def crossover(self, pop):\n intDiscID = self.iID + self.dID\n varID = self.cID\n goldenRatio = (1.0 + sqrt(5)) / 2.0\n dx = np.zeros_like(pop[0])\n children = []\n used = []\n for i in range(0, int(self.fracElite * len(pop)), 1):\n r = int(rand() * self.population)\n while r in used or r == i:\n r = int(rand() * self.population)\n\n used.append(i)\n children.append(cp.deepcopy(pop[r]))\n dx = abs(pop[i] - children[i]) / goldenRatio\n children[i] = children[i] + dx * varID + np.round(dx * intDiscID)\n children[i] = simple_bounds(children[i], self.lb, self.ub)\n\n return (\n children, used)", "def main(showSamples=True, showConfusion=True):\n ndigit = 10\n elambda = [0.4, 0.6, 0.8]\n for i in elambda:\n test(ndigit, i, showSamples, showConfusion)\n if showSamples:\n pltmulti('graphs.pdf')", "def gexf_graph():\n # you must replace these lines and supply your own graph\n gexf = Gexf(\"author\", \"title\")\n mygraph = gexf.addGraph(\"undirected\", \"static\", \"A web network\")\n atr_type = mygraph.addNodeAttribute('Type', type='string')\n atr_id = mygraph.addNodeAttribute('id', type='string')\n atr_label = mygraph.addNodeAttribute('label', type='string')\n atr_color_r = mygraph.addNodeAttribute('color_r', type='string', defaultValue='0')\n atr_color_g = mygraph.addNodeAttribute('color_g', type='string', defaultValue='0')\n atr_color_b = mygraph.addNodeAttribute('color_b', type='string', defaultValue='0')\n k = 0\n for i in range(min_parts()):\n tmp = mygraph.addNode(set_num[i], name[i], r=\"0\", g=\"0\", b=\"0\")\n tmp.addAttribute(atr_type, \"set\")\n tmp.addAttribute(atr_id, set_num[i])\n tmp.addAttribute(atr_label, name[i])\n for j in range(len(Parts[i][\"Parts\"])):\n if mygraph.nodeExists(Parts[i][\"Parts\"][j][\"number\"]+\"_\"+Parts[i][\"Parts\"][j][\"color\"][\"rgb\"])==0:\n temp = mygraph.addNode((Parts[i][\"Parts\"][j][\"number\"]+\"_\"+Parts[i][\"Parts\"][j][\"color\"][\"rgb\"]), Parts[i][\"Parts\"][j][\"name\"], r=str(int(Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][0:2], 16)), g=str(int(Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][2:4], 16)), b=str(int(Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][4:6], 16)))\n temp.addAttribute(atr_type, \"part\")\n temp.addAttribute(atr_id, (Parts[i][\"Parts\"][j][\"number\"]+\"_\"+Parts[i][\"Parts\"][j][\"color\"][\"rgb\"]))\n temp.addAttribute(atr_label, Parts[i][\"Parts\"][j][\"name\"])\n temp.addAttribute(atr_color_r, Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][0:2])\n temp.addAttribute(atr_color_g, Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][2:4])\n temp.addAttribute(atr_color_b, Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][4:6])\n mygraph.addEdge(str(k), set_num[i], (Parts[i][\"Parts\"][j][\"number\"]+\"_\"+Parts[i][\"Parts\"][j][\"color\"][\"rgb\"]), weight=Parts[i][\"Parts\"][j][\"quantity\"])\n k = k+1\n output_file = open(\"bricks_graph.gexf\", \"wb\")\n gexf.write(output_file)\n return -1", "def test_get_grid_edge_nodes(flopy_dis_mf6):\n mf6 = flopy_dis_mf6[1]\n mf6.initialize()\n\n with pytest.raises(NotImplementedError):\n mf6.get_grid_edge_nodes(1, np.zeros((1, 1)))", "def generate_graph(data, x=\"Red blood Cells\", y=\"Paletes\", z=\"Leukocytes\", graph_type='scatter', selected_points=[], *args, **kwargs):\n\n # Return an empty figure if the input is empty\n if x == [] and y==[]:\n return go.Figure()\n \n # Transform the data into wide format\n melted = pd.melt(data, value_vars=data.columns)\n\n if graph_type==\"histogram\":\n if len(x) > 1:\n return generate_histogram(melted, x, y, \"wide\", **kwargs)\n elif len(x) == 0:\n return go.Figure()\n return generate_histogram(data, x, y, \"long\", **kwargs)\n\n elif graph_type==\"scatter\":\n if len(x) == 1 and len(y) == 1:\n return generate_scatter(data, x[0], y[0], \"long\", selected_points=selected_points, **kwargs)\n\n elif len(x) > 1 or len(y) > 1:\n return generate_scatter_matrix(data, x, y, data_format=\"long\", **kwargs)\n\n return generate_scatter(melted, x, y, \"wide\")\n\n elif graph_type==\"box\":\n return generate_box_plot(data, x, \"long\", **kwargs)\n\n elif graph_type==\"heatmap\":\n if len(x) > 0 and len(y) > 0:\n return generate_heatmap(data, x, y, \"long\", **kwargs)\n return go.Figure()\n\n elif graph_type==\"par_coords\":\n return generate_parallel_coords(data, x, y, \"long\", **kwargs)\n\n elif graph_type==\"strip\":\n if len(x) > 0 and len(y) > 0:\n return generate_strip(data, x, y, \"long\", **kwargs)\n return go.Figure()\n\n elif graph_type==\"ternary\":\n if len(x) > 0 and len(y) > 0 and len(z) > 0:\n return generate_ternary(data, x, y, z, \"long\", **kwargs)\n return go.Figure()", "def test_subkey(man):\n errors = []\n\n G = man.writeTest()\n\n G.addVertex(\"Work\", \"Thing\", {})\n G.addVertex(\"Workflow\", \"Thing\", {})\n G.addVertex(\"Other\", \"Thing\", {})\n G.addVertex(\"OtherGuy\", \"Thing\", {})\n\n G.addEdge(\"Work\", \"Other\", \"edge\")\n G.addEdge(\"Workflow\", \"OtherGuy\", \"edge\")\n\n count = 0\n for i in G.query().V(\"Work\").out():\n count += 1\n if count != 1:\n errors.append(\"Incorrect outgoing vertex count %d != %d\" % (count, 1))\n\n count = 0\n for i in G.query().V(\"Work\").outE():\n count += 1\n if count != 1:\n errors.append(\"Incorrect outgoing edge count %d != %d\" % (count, 1))\n\n count = 0\n for i in G.query().V(\"Other\").inE():\n count += 1\n if count != 1:\n errors.append(\"Incorrect incoming edge count %d != %d\" % (count, 1))\n\n return errors", "def bclone():\n node = nuke.selectedNodes()\n if len(node)==1:\n clone1 = nuke.createNode(\"NoOp\", inpanel = False)\n clone1.setName(\"Bclone\")\n clone1['label'].setValue(node[0].name()+\"\\nClone_Parent\")\n clone1['tile_color'].setValue(2521651711)\n clone1['note_font_color'].setValue(1583243007)\n clone1xpos = clone1['xpos'].getValue()\n clone1ypos = clone1['ypos'].getValue()\n \n clone2 = nuke.createNode(\"NoOp\", inpanel = False)\n clone2.setName(\"Bclone\")\n clone2['label'].setValue(node[0].name()+\"\\nClone\")\n clone2['hide_input'].setValue(True)\n clone2['tile_color'].setValue(2521651711)\n clone2['note_font_color'].setValue(1583243007)\n clone2['xpos'].setValue(clone1xpos)\n clone2['ypos'].setValue(clone1ypos)\n\n if len(node)==0:\n clone1 = nuke.createNode(\"NoOp\", inpanel = False)\n clone1.setName(\"Bclone\")\n clone1['label'].setValue(\"Clone_Parent\")\n clone1['tile_color'].setValue(2521651711)\n clone1['note_font_color'].setValue(1583243007)\n clone1xpos = clone1['xpos'].getValue()\n clone1ypos = clone1['ypos'].getValue()\n \n clone2 = nuke.createNode(\"NoOp\", inpanel = False)\n clone2.setName(\"Bclone\")\n clone2['label'].setValue(\"Clone\")\n clone2['hide_input'].setValue(True)\n clone2['tile_color'].setValue(2521651711)\n clone2['note_font_color'].setValue(1583243007)\n clone2['xpos'].setValue(clone1xpos)\n clone2['ypos'].setValue(clone1ypos)\n if len(node)!=0 and len(node)!=1:\n nuke.message('Just select one node to clone !')", "def test_dummy4(self):\n xpb = XPathBuilder()\n xp = xpb.dummy()\n self.assertTrue(xp.log_not() is xp)", "def test_duplicate(self):\n test_file = os.path.join(INPUT_HYPM_PATH, 'unit_364-2013-225-1-0.mdd')\n\n mdd.procall([test_file])\n \n self.compare_node58()", "def test_general_subset_all():\n pass", "def main():\n dims = params['dims']\n\n for d in dims:\n print('**** Running test for d={0:d} ****'.format(d))\n run_test(d)", "def sub_graph_merging(self):\n raise NotImplementedError()", "def test_get_dependencies_subgraph_by_dfs(\n self, source_node, expected_nodes_in, expected_nodes_out\n ):\n graph = nx.DiGraph()\n graph.add_node(\"pack1\")\n graph.add_node(\"pack2\")\n graph.add_node(\"pack3\")\n graph.add_node(\"pack4\")\n graph.add_edge(\"pack1\", \"pack2\")\n graph.add_edge(\"pack2\", \"pack3\")\n dfs_graph = PackDependencies.get_dependencies_subgraph_by_dfs(\n graph, source_node\n )\n for i in expected_nodes_in:\n assert i in dfs_graph.nodes()\n for i in expected_nodes_out:\n assert i not in dfs_graph.nodes()", "def test_zip_batch(self):\n assert self.design.layout.layers[0].name == 'top'", "def test_Tree():", "def dump_subgraph_for_debug(self):\n\n import pypipegraph2 as ppg\n\n nodes = []\n seen = set()\n edges = []\n counter = [0]\n node_to_counters = {}\n\n def descend(node):\n if node in seen:\n return\n seen.add(node)\n j = self.runner.jobs[node]\n if isinstance(j, ppg.FileInvariant):\n nodes.append(f\"Path('{counter[0]}').write_text('A')\")\n nodes.append(f\"job_{counter[0]} = ppg.FileInvariant('{counter[0]}')\")\n elif isinstance(j, ppg.ParameterInvariant):\n nodes.append(\n f\"job_{counter[0]} = ppg.ParameterInvariant('{counter[0]}', 55)\"\n )\n elif isinstance(j, ppg.FunctionInvariant):\n nodes.append(\n f\"job_{counter[0]} = ppg.FunctionInvariant('{counter[0]}', lambda: 55)\"\n )\n elif isinstance(j, ppg.SharedMultiFileGeneratingJob):\n nodes.append(\n f\"job_{counter[0]} = ppg.SharedMultiFileGeneratingJob('{counter[0]}', {[x.name for x in j.files]!r}, dummy_smfg, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.TempFileGeneratingJob):\n nodes.append(\n f\"job_{counter[0]} = ppg.TempFileGeneratingJob('{counter[0]}', dummy_fg, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.FileGeneratingJob):\n nodes.append(\n f\"job_{counter[0]} = ppg.FileGeneratingJob('{counter[0]}', dummy_fg, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.MultiTempFileGeneratingJob):\n files = [counter[0] + \"/\" + x.name for x in j.files]\n nodes.append(\n f\"job_{counter[0]} = ppg.MultiTempFileGeneratingJob({files!r}, dummy_mfg, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.MultiFileGeneratingJob):\n files = [str(counter[0]) + \"/\" + x.name for x in j.files]\n nodes.append(\n f\"job_{counter[0]} = ppg.MultiFileGeneratingJob({files!r}, dummy_mfg, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.DataLoadingJob):\n nodes.append(\n f\"job_{counter[0]} = ppg.DataLoadingJob('{counter[0]}', lambda: None, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.AttributeLoadingJob):\n nodes.append(\n f\"job_{counter[0]} = ppg.AttributeLoadingJob('{counter[0]}', DummyObject(), 'attr_{counter[0]}', lambda: None, depend_on_function=False)\"\n )\n else:\n raise ValueError(j)\n node_to_counters[node] = counter[0]\n counter[0] += 1\n for parent in self.runner.dag.predecessors(node):\n descend(parent)\n\n def build_edges(node):\n for parent in self.runner.dag.predecessors(node):\n edges.append(\n f\"edges.append(('{node_to_counters[node]}', '{node_to_counters[parent]}'))\"\n )\n build_edges(parent)\n\n descend(self.job_id)\n edges.append(\"edges = []\")\n build_edges(self.job_id)\n edges.extend(\n [\n \"for (a,b) in edges:\",\n \" if a in ppg.global_pipegraph.jobs and b in ppg.global_pipegraph.jobs:\",\n \" ppg.global_pipegraph.jobs[a].depends_on(ppg.global_pipegraph.jobs[b])\",\n ]\n )\n with open(\"subgraph_debug.py\", \"w\") as op:\n lines = \"\"\"\nclass DummyObject:\n pass\n\ndef dummy_smfg(files, prefix):\n Path(prefix).mkdir(exist_ok=True, parents=True)\n for f in files:\n f.write_text(\"hello\")\n\n\ndef dummy_mfg(files):\n for f in files:\n f.parent.mkdir(exist_ok=True, parents=True)\n f.write_text(\"hello\")\n\ndef dummy_fg(of):\n of.parent.mkdir(exist_ok=True, parents=True)\n of.write_text(\"fg\")\n\n\"\"\".split(\n \"\\n\"\n )\n lines += nodes\n lines += edges\n lines += [\"\", \"ppg.run()\", \"ppg.run\"]\n\n op.write(\"\\n\".join(\" \" + l for l in lines))", "def setUp(self):\n\n\n # InverseLabeling\n invLabeling0 = {'L0': [0, 1, 2]}\n\n invLabeling1 = {'L0' : [0, 2],\n 'L1' : [1]}\n\n invLabeling2 = {\n 'L0' : [0],\n 'L1' : [1],\n 'L2' : [2]\n }\n\n invLabeling3 = {\n 'L1' : [0, 1],\n 'L2' : [2]\n }\n\n invLabeling4 = {\n 'L0' : [0,1],\n 'L1' : [0],\n 'L2' : [2]\n }\n\n invLabeling5 = {\n 'L0': [0, 1, 2],\n 'L1': []\n }\n \n # Create some ontologies\n ontology0 = {'L0': ['L0']}\n\n ontology1 = {}\n\n ontology2 = {'L0': ['L1']}\n\n ontology3 = {'L0': ['L1', 'L2'],\n 'L1': ['L2'],\n 'L2': ['L0']}\n\n if self.id().split('.')[-1] == 'test_createLinkograph':\n self.testParams = [\n {'inverseLabeling': invLabeling0,\n 'ontology': ontology0,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L0'}, set(), {1, 2}),\n ({'L0'}, {0}, {2}),\n ({'L0'}, {0,1}, set())] \n )},\n\n {'inverseLabeling': invLabeling0,\n 'ontology': ontology1,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L0'}, set(), set()),\n ({'L0'}, set(), set()),\n ({'L0'}, set(), set())]\n )},\n\n {'inverseLabeling': invLabeling0,\n 'ontology': ontology2,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L0'}, set(), set()),\n ({'L0'}, set(), set()),\n ({'L0'}, set(), set())]\n )},\n\n\n {'inverseLabeling': invLabeling1,\n 'ontology': ontology0,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L0'}, set(), {2}),\n ({'L1'}, set(), set()),\n ({'L0'}, {0}, set())]\n )},\n\n {'inverseLabeling': invLabeling1,\n 'ontology': ontology1,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L0'}, set(), set()),\n ({'L1'}, set(), set()),\n ({'L0'}, set(), set())]\n )},\n\n {'inverseLabeling': invLabeling1,\n 'ontology': ontology2,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L0'}, set(), {1}),\n ({'L1'}, {0}, set()),\n ({'L0'}, set(), set())]\n )},\n\n {'inverseLabeling': invLabeling0,\n 'ontology': ontology3,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L0'}, set(), set()),\n ({'L0'}, set(), set()),\n ({'L0'}, set(), set())]\n )},\n\n {'inverseLabeling': invLabeling1,\n 'ontology': ontology3,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L0'}, set(), {1}),\n ({'L1'}, {0}, set()),\n ({'L0'}, set(), set())]\n )},\n\n {'inverseLabeling': invLabeling2,\n 'ontology': ontology3,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L0'}, set(), {1,2}),\n ({'L1'}, {0}, {2}),\n ({'L2'}, {0, 1}, set())]\n )},\n\n {'inverseLabeling': invLabeling3,\n 'ontology': ontology3,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L1'}, set(), {2}),\n ({'L1'}, set(), {2}),\n ({'L2'}, {0, 1}, set())]\n )},\n\n {'inverseLabeling': invLabeling4,\n 'ontology': ontology3,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L0', 'L1'}, set(), {2}),\n ({'L0'}, set(), {2}),\n ({'L2'}, {0, 1}, set())]\n )},\n\n {'inverseLabeling': invLabeling5,\n 'ontology': ontology3,\n 'ExpectedLinkograph':\n linkoCreate.Linkograph(\n [({'L0'}, set(), set()),\n ({'L0'}, set(), set()),\n ({'L0'}, set(), set())]\n )},\n\n ]", "def test_input_valid_subgraph(self, dim):\r\n with pytest.raises(ValueError, match=\"Input is not a valid subgraph\"):\r\n clique.swap([0, dim], nx.empty_graph(dim))", "def test_dummy5(self):\n xpb = XPathBuilder()\n xp = xpb.dummy()\n xp = xpb.foo & xp\n exp = '/foo'\n self.assertEqual(xp.tostring(), exp)", "def test6():\n setLogLevel(\"info\")\n info(\"Configuracion Unidad experimental\")\n \"\"\" 1 -> Definicion de la topologia \"\"\"\n t1 = Topologia1()\n ue1 = UnidadExperimental()\n ue1.setTopo(t1)\n ue1.definirNodosClaves(A = 'h1', C='h2', V='h3') # Caso solo para trafico normal\n ue1.setController('ryu', 'simple_switch_13.py,ofctl_rest.py')\n info(\"Configuracion del experimento\")\n \"\"\" 3. Confiracion del experimento \"\"\"\n exp1 = Experimento()\n exp1.configureParams(ue1)\n exp1.configurarTrafico('ataque')\n \"\"\" 4. Inicio del experimento \"\"\"\n exp1.startTest()\n \"\"\" 5. Aplicacion de pruebas \"\"\"\n exp1.trafico.iperfMeasure()\n exp1.trafico.iperfMeasure(filename='iperf_ataque_test.log')\n \"\"\" 6. Fin del experimento \"\"\"\n exp1.endTest()\n info(\"Removiendo la topologia\\n\")\n exp1.killTest()\n info(\"Removiendo el controlador\\n\")\n exp1.killController() # Si no se pone no se finaliza el controlador", "def test_ExplorePath( self ):\n links = []\n n1 = graph.Node( 10, 10 )\n n2 = graph.Node( 10, 20 )\n n3 = graph.Node( 10, 30 )\n n4a = graph.Node( 5, 40 )\n n4b = graph.Node( 15, 40 )\n n5a = graph.Node( 5, 50 )\n n5b = graph.Node( 15, 50 )\n n6a = graph.Node( 5, 60 )\n n6b = graph.Node( 15, 60 )\n n7 = graph.Node( 10, 70 )\n\n links.append( graph.Link( n1, n2 ) )\n links.append( graph.Link( n2, n3 ) )\n links.append( graph.Link( n3, n4a ) )\n links.append( graph.Link( n3, n4b ) )\n links.append( graph.Link( n4a, n5a ) )\n links.append( graph.Link( n4b, n5b ) )\n links.append( graph.Link( n5a, n6a ) )\n links.append( graph.Link( n6a, n7 ) )\n links.append( graph.Link( n5b, n7 ) )\n roots = [n1]\n actual = nodes.explorePath( links, n1, n1 )\n expected = [ n1, n2, n3, n4b, n5b, n7 ]\n self.assertEqual( expected, actual )", "def par_test_8(self):\n\n for i in range(4):\n self.XY_par_factor.setMaxDepth(i)\n self.XY_par_factor.setMaxDepth(i)\n\n res = self.XY_factor.mult(self.XY_factor)\n par_res = self.XY_par_factor.mult(self.XY_par_factor)\n assert res.rand_vars == par_res.rand_vars and res.values == par_res.values", "def test_construct_subcircuit_layers(self):\r\n dev = qml.device(\"default.qubit\", wires=3)\r\n\r\n def circuit(params):\r\n # section 1\r\n qml.RX(params[0], wires=0)\r\n # section 2\r\n qml.RY(params[1], wires=0)\r\n qml.CNOT(wires=[0, 1])\r\n qml.CNOT(wires=[1, 2])\r\n # section 3\r\n qml.RX(params[2], wires=0)\r\n qml.RY(params[3], wires=1)\r\n qml.RZ(params[4], wires=2)\r\n qml.CNOT(wires=[0, 1])\r\n qml.CNOT(wires=[1, 2])\r\n # section 4\r\n qml.RX(params[5], wires=0)\r\n qml.RY(params[6], wires=1)\r\n qml.RZ(params[7], wires=2)\r\n qml.CNOT(wires=[0, 1])\r\n qml.CNOT(wires=[1, 2])\r\n return qml.expval(qml.PauliX(0)), qml.expval(qml.PauliX(1)), qml.expval(qml.PauliX(2))\r\n\r\n circuit = qml.QNode(circuit, dev)\r\n\r\n params = np.ones([8])\r\n tapes = circuit.metric_tensor(params, only_construct=True)\r\n\r\n # this circuit should split into 4 independent\r\n # sections or layers when constructing subcircuits\r\n assert len(tapes) == 4\r\n\r\n # first layer subcircuit\r\n assert len(tapes[0].operations) == 1\r\n assert isinstance(tapes[0].operations[0], qml.Hadamard) # PauliX decomp\r\n\r\n # second layer subcircuit\r\n assert len(tapes[1].operations) == 4\r\n assert isinstance(tapes[1].operations[0], qml.RX)\r\n # PauliY decomp\r\n assert isinstance(tapes[1].operations[1], qml.PauliZ)\r\n assert isinstance(tapes[1].operations[2], qml.S)\r\n assert isinstance(tapes[1].operations[3], qml.Hadamard)\r\n\r\n # # third layer subcircuit\r\n assert len(tapes[2].operations) == 8\r\n assert isinstance(tapes[2].operations[0], qml.RX)\r\n assert isinstance(tapes[2].operations[1], qml.RY)\r\n assert isinstance(tapes[2].operations[2], qml.CNOT)\r\n assert isinstance(tapes[2].operations[3], qml.CNOT)\r\n # PauliX decomp\r\n assert isinstance(tapes[2].operations[4], qml.Hadamard)\r\n # PauliY decomp\r\n assert isinstance(tapes[2].operations[5], qml.PauliZ)\r\n assert isinstance(tapes[2].operations[6], qml.S)\r\n assert isinstance(tapes[2].operations[7], qml.Hadamard)\r\n\r\n # # fourth layer subcircuit\r\n assert len(tapes[3].operations) == 13\r\n assert isinstance(tapes[3].operations[0], qml.RX)\r\n assert isinstance(tapes[3].operations[1], qml.RY)\r\n assert isinstance(tapes[3].operations[2], qml.CNOT)\r\n assert isinstance(tapes[3].operations[3], qml.CNOT)\r\n assert isinstance(tapes[3].operations[4], qml.RX)\r\n assert isinstance(tapes[3].operations[5], qml.RY)\r\n assert isinstance(tapes[3].operations[6], qml.RZ)\r\n assert isinstance(tapes[3].operations[7], qml.CNOT)\r\n assert isinstance(tapes[3].operations[8], qml.CNOT)\r\n # PauliX decomp\r\n assert isinstance(tapes[3].operations[9], qml.Hadamard)\r\n # PauliY decomp\r\n assert isinstance(tapes[3].operations[10], qml.PauliZ)\r\n assert isinstance(tapes[3].operations[11], qml.S)\r\n assert isinstance(tapes[3].operations[12], qml.Hadamard)", "def test_get_child():\n \n root_ts = TrackSegment(flow_dict=flow_dict) \n \n # ROOT MODULE\n start = root_ts.get_child(\"start\")\n # check depth\n assert(start.depth == root_ts.depth+1)\n # check parent\n assert(start.parent is root_ts)\n # check module_id\n assert(start.module_id == 'start')\n \n # CHILD (1,2,3)\n root_ts_get_child_result = root_ts.get_child((1,2,3))\n # check depth\n assert(root_ts_get_child_result.depth == root_ts.depth+1)\n # check parent\n assert(root_ts_get_child_result.parent is root_ts)\n \n # CHILD (2,3,4)\n root_ts_get_child_result2 = root_ts_get_child_result.get_child((2,3,4))\n # check depth\n assert(root_ts_get_child_result2.depth == root_ts.depth+2)\n # check parent \n assert(root_ts_get_child_result2.parent is root_ts_get_child_result)\n \n print(\"TEST GET_CHILD: success!\")", "def test_get_subvertices_from_vertex(self):\n subvertices = list()\n subvertices.append(PartitionedVertex(None, \"\"))\n subvertices.append(PartitionedVertex(None, \"\"))\n subvert1 = PartitionedVertex(None, \"\")\n subvert2 = PartitionedVertex(None, \"\")\n\n subedges = list()\n subedges.append(MultiCastPartitionedEdge(subvertices[0],\n subvertices[1]))\n subedges.append(MultiCastPartitionedEdge(subvertices[1],\n subvertices[1]))\n\n graph_mapper = GraphMapper()\n vert = TestVertex(4, \"Some testing vertex\")\n\n vertex_slice = Slice(0, 1)\n graph_mapper.add_subvertex(subvert1, vertex_slice, vert)\n vertex_slice = Slice(2, 3)\n graph_mapper.add_subvertex(subvert2, vertex_slice, vert)\n\n returned_subverts = graph_mapper.get_subvertices_from_vertex(vert)\n\n self.assertIn(subvert1, returned_subverts)\n self.assertIn(subvert2, returned_subverts)\n for sub in subvertices:\n self.assertNotIn(sub, returned_subverts)", "def setUp(self):\n self.complete = nx.Graph()\n self.complete.add_edge(1, 2)\n self.complete.add_edge(2, 3)\n self.complete.add_edge(1, 3)\n\n self.small_tree = nx.Graph()\n self.small_tree.add_edge(1, 2)\n self.small_tree.add_edge(2, 3)\n self.small_tree.add_edge(3, 4)\n self.small_tree.add_edge(1, 4)\n self.small_tree.add_edge(2, 4)\n self.small_tree.add_edge(4, 5)\n self.small_tree.add_edge(5, 6)\n self.small_tree.add_edge(5, 7)\n self.small_tree.add_edge(6, 7)\n\n self.deterministic_graph = nx.Graph()\n self.deterministic_graph.add_edge(1, 2)\n self.deterministic_graph.add_edge(1, 3)\n self.deterministic_graph.add_edge(3, 4)\n self.deterministic_graph.add_edge(2, 4)\n self.deterministic_graph.add_edge(3, 5)\n self.deterministic_graph.add_edge(4, 5)\n self.deterministic_graph.add_edge(3, 6)\n self.deterministic_graph.add_edge(5, 6)", "def __init__(self, firstParent, secondParent):\n CrossOver.__init__(self, \"Group Point CrossOver\", firstParent, secondParent)", "def testFlatSeries(self):\n self.AddToChart(self.chart, [5, 5, 5])\n self.assertEqual(self.Param('chd'), 's:AAA')\n self.chart.left.min = 0\n self.chart.left.max = 5\n self.assertEqual(self.Param('chd'), 's:999')\n self.chart.left.min = 5\n self.chart.left.max = 15\n self.assertEqual(self.Param('chd'), 's:AAA')", "def generate_test_graph(sameDomain = False):\n num = 100\n\n urls = []\n emails = []\n nodes={}\n if sameDomain:\n domain = generate_domainname()\n else:\n domain = None\n for i in range(num):\n urls.append(generate_url(domain))\n emails.append(generate_email())\n \n used_urls = set()\n used_emails = set()\n for u in urls:\n l = random.choices(urls, k = floor(num/4))\n #l = [u for u in urls]\n e = random.choices(emails, k = floor(num/10))\n #e = [e for e in emails]\n used_urls.update(l)\n used_emails.update(e)\n nodes[u] = testNode(u, l, e)\n nodes[u].generate_page()\n \n return nodes, urls, emails", "def test_propene(self):\n def draw(image: ShapeImage):\n image.add_line((400, 400), (500, 400))\n image.add_line((400, 410), (500, 410))\n image.add_line((500, 400), (587, 350))\n\n self._test_shape(\n image_size=(1000, 1000),\n expected_corners=np.array([\n [[400, 400]],\n [[500, 400]],\n [[587, 350]]\n ]),\n drawer=draw,\n expected_edges=np.array([\n [[400, 400, 500, 400]],\n [[400, 410, 500, 410]],\n [[500, 400, 587, 350]]\n ])\n )", "def genNextPop(prevPop, masterList, popSize):\n parSize = int(popSize / 10) #top 10%\n parentPop = [Team(prevPop[i].roster) for i in range(parSize)]\n parentPop = getStats(parentPop, masterList) # inefficent\n rosterSize = len(parentPop[0].roster)\n newPop = []\n #parentsList = [] #debug\n for i in range(popSize):\n chromosome = doCrossover(parentPop, parSize, rosterSize)\n newPop.append(Team(chromosome))\n getStats(newPop, masterList)\n #showStats(newPop, masterList, \"all\")\n #debug code\n #for playerInd in chromosome:\n # print(masterList[playerInd].pos)\n\n return newPop", "def test_population_movements_with_compilation(self):\n self._pystepx = PySTEPXIsland(nb_islands=4, init_script=init_script)\n print self._pystepx._rc[0]['gp_engine']\n self._pystepx._rc[0].execute('elems = gp_engine.get_evolver().select_and_remove_individuals(0.01)',\n block=True)\n print self._pystepx._rc[0]['elems']", "def project_pop(self):\n M = self.N[0:2]\n for x in range(10):\n M.append(self.run_step(M))\n split_N = split_list(M)\n \n fig = self.make_figure(split_N)\n fig.update_layout(title='Projected Fish Population')\n\n return fig" ]
[ "0.81625026", "0.75757235", "0.63863367", "0.63754267", "0.61888385", "0.5910345", "0.5906325", "0.568147", "0.56611234", "0.5634181", "0.5622761", "0.5596353", "0.5533244", "0.5507174", "0.54756355", "0.5458438", "0.5447056", "0.53142405", "0.5302135", "0.5267153", "0.5253093", "0.5245071", "0.5240114", "0.5235194", "0.5217268", "0.5196521", "0.51842874", "0.51788044", "0.51737154", "0.5170908", "0.5161676", "0.514867", "0.514599", "0.5143633", "0.5143085", "0.513967", "0.51278925", "0.5127106", "0.51228756", "0.51074904", "0.5107393", "0.5107199", "0.50886023", "0.50847566", "0.508385", "0.50830364", "0.5071429", "0.50665736", "0.5062188", "0.50593823", "0.5051964", "0.5051814", "0.5043247", "0.5042272", "0.50368017", "0.5021848", "0.5012966", "0.5008378", "0.4992401", "0.4990757", "0.49878663", "0.49834293", "0.49813193", "0.49793318", "0.49724782", "0.4965399", "0.496524", "0.49537122", "0.49523303", "0.49405652", "0.49405012", "0.4940013", "0.49369767", "0.49287248", "0.4925167", "0.49217826", "0.49160716", "0.4912953", "0.491277", "0.49070102", "0.49031508", "0.49011728", "0.49007982", "0.48968747", "0.48965114", "0.48953682", "0.4891352", "0.48869544", "0.48806882", "0.48784497", "0.48766583", "0.4876114", "0.48691735", "0.48690647", "0.4867235", "0.48659053", "0.48625904", "0.48558822", "0.4853836", "0.48494703" ]
0.69611263
2
Test the code loading example
def test_documentation_popxl_code_loading(self): filename = "code_loading.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_example(decorated_example):\n import visual_coding_2p_analysis", "def test_examples():\n import airconics\n # pytest runs test files in ./__pycache__: need to go up two levels\n example_dir = os.path.abspath(\n os.path.join(__file__, '..', '..', 'examples', 'core'))\n example_scripts = os.listdir(example_dir)\n for script in example_scripts:\n if script.endswith('.py'):\n fname = os.path.join(example_dir, script)\n try:\n subprocess.check_call(['python', fname])\n except subprocess.CalledProcessError:\n raise AssertionError('Example {} failed'.format(fname))", "def test_load_simple_module():\n loader = Loader()\n main_fname = loader.load(\"https://gist.githubusercontent.com/miohtama/80391980c2e73b285cfe/raw/dd89a55497ba33a6014453d9bb7432ab424c01cf/kivyhello.py#main\")\n mod = path_to_mod_name(main_fname)\n result = loader.run(mod, \"hello\")\n assert result == \"Hello there\"\n loader.close()", "def main():\n example()", "def main():\n example()", "def main():\n example()", "def main():\n example()", "def test_script(self) -> None:\n main()", "def test_module(self):\n pass", "def test_load_quality_codes():\n assert len(code_reader.load_quality_codes()) > 0", "def _test():\n import doctest", "def test_Demo(self):\n self._run(self._example_scenarios, \"Demo\")", "def test_example_runs(self):\n run_example(\n verbose=False,\n testapp=self.testapp,\n )", "def test_documentation_popxl_nested_code_loading(self):\n filename = \"code_loading_nested.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_load_testcase(self):\n tests = self.loader.load(\"tests.sampletest.hellotest.HelloTest\")\n self.assertEqual(len(tests), 1)\n from tests.sampletest.hellotest import HelloTest\n\n self.assertEqual(type(tests[0]), HelloTest)", "def test():\n import doctest\n from . import locate\n return doctest.testmod(locate)", "def test_main():\n # Setup\n # Exercise\n # Verify", "def __main() :\n launchTests()", "def runtest(self):", "def test_lint(self):\n l = self.l\n l.loadTestsFromTestCase\n l.loadTestsFromModule\n l.loadTestsFromName\n l.loadTestsFromNames", "def test():\n pass", "def test_pep8_conformance_example(self):\n\n print(\"\\r\\n\")\n\n # Get the path to current directory\n path = os.path.dirname(os.path.realpath(__file__))\n path += \"/../docs/examples/\"\n\n # Find all the examples files\n file_paths = []\n for root, dirnames, filenames in os.walk(path):\n for file_path in fnmatch.filter(filenames, '*.py'):\n file_paths.append(os.path.join(root, file_path))\n\n for path in file_paths:\n self.run_check(path)", "def test(self):\n pass", "def tests():", "def test():\n loader = unittest.TestLoader()\n suite = loader.discover(os.path.dirname(__file__))\n runner = unittest.TextTestRunner()\n runner.run(suite)", "def test():\n import unittest\n\n tests = unittest.TestLoader().discover(\"tests\")\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test_functional(self):\n with sphinx_build('pyexample'):\n with open('_build/text/docfx_yaml/example.example.yml') as yml_file:\n data = yaml.safe_load(yml_file)\n self.assertEqual(\n data['items'][0]['fullName'],\n 'example.example'\n )", "def setUp(self):\n self.example = Example()", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test_examples(fname):\n app = use_app()\n app.start_timer(0, app.quit)\n if \"OLD\" in fname:\n with pytest.warns(FutureWarning):\n runpy.run_path(fname)\n else:\n try:\n runpy.run_path(fname)\n except ImportError as e:\n if \"Numpy required to use images\" in str(e):\n pytest.skip(\"numpy unavailable: skipping image example\")", "def test_sample():\n print(os.getcwd())\n run_validator(SAMPLE_FILE_PATH)", "def test_examples():\n argv = [\"py.test\", \"-examples\"]\n assert get_sargs(argv) is None", "def main():\n print(\"It works!!! ;-)\")\n ###TODO### do something with the various methods/functions of this file", "def main():\n doctest.testmod()\n game()", "def runTest(self):\r\n self.setUp()\r\n self.test_sceneImport24281()", "def test_CL04_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import CL04 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.CL04\", test]", "def test():\n import unittest\n tests = unittest.TestLoader().discover(tests)\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test_example(self):\n self.assertEqual(self.example.get_example(), True)", "def runTest(self):\n self.setUp()\n self.test_JupyterNotebooks1()", "def test():\r\n import unittest\r\n tests=unittest.TestLoader().discover('tests')\r\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test_examples_for_real(platforms, path):\n if 'TRAVIS' in os.environ:\n return #: Doesn't work on travis\n\n #: Pretty hackish but whatever\n prepare_new_app(config)\n\n #: Load the code\n dir_path = os.path.abspath(os.path.split(os.path.dirname(__file__))[0])\n enaml_file = os.path.join(dir_path, 'examples', os.path.normpath(path))\n\n with open(enaml_file, 'rb') as f:\n source = f.read()\n\n #: Trigger a reload\n r = requests.post(\"http://localhost:8888/\", json={\n \"type\": \"reload\",\n \"files\": {'view.enaml': source},\n }).json()\n assert r['ok'], \"Failed to reload {}!\".format(enaml_file)\n\n #: TODO need a way to know when everything is done...\n #: should read the log unil it stops\n time.sleep(5)\n #: Flush logcat\n\n #: Save it\n stats = parse_stats(sh.adb('logcat', '-d'))\n config['stats'][enaml_file] = stats\n\n #: Save it\n data = json.dumps(config,indent=2)\n with open('tmp/stats.json', 'w') as f:\n f.write(data)\n\n #: TODO: Now compare it to the baseline", "def test():\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\r\n import unittest\r\n tests = unittest.TestLoader().discover('tests')\r\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\r\n import unittest\r\n tests = unittest.TestLoader().discover('tests')\r\n unittest.TextTestRunner(verbosity=2).run(tests)", "def Cpp_test():\n pass", "def test():\n import unittest\n\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n\n tests = unittest.TestLoader().discover(\"tests\")\n unittest.TextTestRunner(verbosity=2).run(tests)", "def _test():\n import doctest\n doctest.testmod(verbose=1)", "def test_CL13_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import CL13 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.CL13\", test]", "def _test():\n import doctest\n doctest.testmod()", "def _test():\n import doctest\n doctest.testmod()", "def test(): \n\treturn [\"vice.yields.ccsne.import\", \n\t\t[ \n\t\t\ttest_LC18_import(), \n\t\t\ttest_CL13_import(), \n\t\t\ttest_CL04_import(), \n\t\t\ttest_WW95_import(), \n\t\t\ttest_NKT13_import(), \n\t\t\ttest_S16_import() \n\t\t] \n\t]", "def main():\n run_test_all()", "def _test(): # pragma: no cover\r\n print('Starting doctest')\r\n doctest.testmod()\r\n print('Completed doctest')", "def unitary_test():", "def test():", "def test():", "def test_LC18_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import LC18 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.LC18\", test]", "def test():\n import unittest\n tests = unittest \n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test_load(self):\n command = constituencies.Command()\n command.handle('load', silent=True)", "def test_loading_document(self):", "def test_module_doc():\r\n\r\n for fname in os.listdir('.'):\r\n if fname.endswith('.py'):\r\n f = fname.split('.')[0]\r\n print 'Executing ', fname\r\n execfile(fname, locals())", "def test_load_testcase_in_module(self):\n tests = self.loader.load(\"tests.sampletest.InitTest\")\n self.assertEqual(len(tests), 1)\n from tests.sampletest import InitTest\n\n self.assertEqual(type(tests[0]), InitTest)", "def fixture_example_data():\n import_example_data()", "def main(source):\n pass", "def test_execute_code_sample():\n hello = Hello()\n assert hello.out() == 'Hello, world!'", "def test_get_run(self):\n pass", "def test_library_and_test_code(self):\n\n def do_check(path):\n \"\"\"The contents of the .iml file should certain sourceFolder entries:\n\n <sourceFolder url=\".../testprojects/tests/java/org/pantsbuild/testproject/ideatestsandlib\" isTestSource=\"true\" />\n \"\"\"\n found = set()\n iml_file = os.path.join(path, 'project.iml')\n self.assertTrue(os.path.exists(iml_file))\n dom = minidom.parse(iml_file)\n for sourceFolder in self._get_sourceFolders(dom):\n url = sourceFolder.getAttribute('url')\n is_test_source = sourceFolder.getAttribute('isTestSource')\n type_attr = sourceFolder.getAttribute('type')\n url = re.sub(r'^.*/testprojects/', 'testprojects/', url)\n found.add(url)\n if url == 'testprojects/tests/java/org/pantsbuild/testproject/ideatestsandlib':\n self.assertEquals('', type_attr)\n self.assertEquals('True', is_test_source)\n\n self.assertEquals(set([\n 'testprojects/tests/java/org/pantsbuild/testproject/ideatestsandlib',\n ]), found)\n\n self._idea_test([\n 'testprojects/tests/java/org/pantsbuild/testproject/ideatestsandlib::'\n ], check_func=do_check)", "def lua_test():\n pass", "def test_NKT13_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import NKT13 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.NKT13\", test]", "def test_begin(self):", "def test_basic_execution(self):", "def runTests(self):\n \n pass", "def __test__():\n#-------------------------------------------------------------------------------\n import pylib.tester as tester\n return 0", "def _test():\n import doctest\n return doctest.testmod(verbose=True)", "def _test():\n import doctest\n return doctest.testmod(verbose=True)", "def test():#Test functions\n python = ProgrammingLanguage(\"Java\", \"Dynamic\", True, 1987)\n print(python)", "def run_playground():\n from .playground import playground\n playground()", "def test_compute_glycemic_load(self):\n pass", "def main():\n\n print(\"=\" * 80)\n print(\"DATA STRUCTURE TESTS\")\n test_module(structs.tests)\n test_module(structs.regularization)\n\n print(\"=\" * 80)\n print(\"END-TO-END TESTS\")\n test_module(globals())", "def startTestRun(self):", "def test_py_compile_basic(self):\n self._test_py_compile('basic')", "def test(self):", "def test(self):" ]
[ "0.7195194", "0.7059802", "0.6890149", "0.6869557", "0.6869557", "0.6869557", "0.6869557", "0.67875314", "0.6767249", "0.6749942", "0.67483723", "0.67235684", "0.6716485", "0.6689319", "0.66765344", "0.66539055", "0.66304076", "0.6627514", "0.65707266", "0.6563701", "0.6548684", "0.65423787", "0.6523631", "0.6505159", "0.6482269", "0.6477886", "0.64554393", "0.6444179", "0.64405406", "0.6438618", "0.64318645", "0.6426611", "0.6425933", "0.6419005", "0.63982403", "0.637443", "0.6365926", "0.6361171", "0.63493687", "0.63487035", "0.63462895", "0.6342258", "0.6342258", "0.6342258", "0.6337438", "0.6337438", "0.6331677", "0.6327746", "0.63256466", "0.63256466", "0.63256466", "0.63256466", "0.63256466", "0.63256466", "0.63256466", "0.63256466", "0.63256466", "0.63256466", "0.63256466", "0.63256466", "0.63256466", "0.63256466", "0.6324298", "0.6318961", "0.63129354", "0.62995124", "0.62995124", "0.62937677", "0.62843126", "0.62792987", "0.626754", "0.62627316", "0.62627316", "0.625056", "0.6241495", "0.62375814", "0.6233974", "0.6232294", "0.62254655", "0.6212669", "0.62099594", "0.62049687", "0.62000775", "0.6196725", "0.6179676", "0.6176563", "0.61540693", "0.6145558", "0.6138003", "0.61320925", "0.61221206", "0.61221206", "0.6121171", "0.61206573", "0.6116493", "0.61063963", "0.6105194", "0.6101723", "0.60957116", "0.60957116" ]
0.74833703
0
Test the nested code loading example
def test_documentation_popxl_nested_code_loading(self): filename = "code_loading_nested.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_example(decorated_example):\n import visual_coding_2p_analysis", "def inner_test():\n pass", "def inner_test():\n pass", "def test_documentation_popxl_code_loading(self):\n filename = \"code_loading.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_lint(self):\n l = self.l\n l.loadTestsFromTestCase\n l.loadTestsFromModule\n l.loadTestsFromName\n l.loadTestsFromNames", "def test_CL04_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import CL04 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.CL04\", test]", "def test_load_testcase(self):\n tests = self.loader.load(\"tests.sampletest.hellotest.HelloTest\")\n self.assertEqual(len(tests), 1)\n from tests.sampletest.hellotest import HelloTest\n\n self.assertEqual(type(tests[0]), HelloTest)", "def test_loading_document(self):", "def test_NKT13_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import NKT13 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.NKT13\", test]", "def test_simple(self):\n bento_info = \"\"\"\\\nName: foo\n\nLibrary:\n Packages: foo, foo.bar\n Modules: fubar\n\"\"\"\n self._test_run(bento_info)", "def test_module(self):\n pass", "def test_CL13_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import CL13 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.CL13\", test]", "def _test():\n import doctest", "def test_001(settings, inspector):\n sourcepath = os.path.join(settings.sample_path, 'main_basic.scss')\n\n inspector.inspect(sourcepath)\n\n inspector.reset()\n\n assert inspector._CHILDREN_MAP == {}\n assert inspector._PARENTS_MAP == {}\n assert inspector.children(sourcepath) == set([])\n assert inspector.parents(sourcepath) == set([])", "def test_LC18_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import LC18 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.LC18\", test]", "def main():\n\n print(\"=\" * 80)\n print(\"DATA STRUCTURE TESTS\")\n test_module(structs.tests)\n test_module(structs.regularization)\n\n print(\"=\" * 80)\n print(\"END-TO-END TESTS\")\n test_module(globals())", "def test_module(self):\n data = (\n os.path.join(\n _CURRENT_DIRECTORY,\n \"fake_project\",\n \"_modules\",\n \"fake_project\",\n \"nested_folder\",\n \"another.html\",\n ),\n \"\",\n )\n\n content = self._get_fake_project_nested_module()\n\n expected = textwrap.dedent(\n '''\\\n #!/usr/bin/env python\n # -*- coding: utf-8 -*-\n\n \"\"\"A module that shows every type of documentable class / method / function.\n\n Attributes:\n ATTRIBUTE_VALUE (float):\n Some number.\n\n \"\"\"\n\n\n ATTRIBUTE_VALUE = 14.3\n\n\n class MyKlass(object):\n \"\"\"A class that does something.\n\n Multi-line information here.\n\n Attributes:\n attribute_value (str):\n Some string.\n\n \"\"\"\n\n attribute_value = \"asdfasdf\"\n\n def __init__(self, value):\n \"\"\"Create this instance.\"\"\"\n # A comment that should show up in the unittest's results\n super(MyKlass, self).__init__()\n\n @staticmethod\n def get_staticmethod():\n \"\"\"int: Get some value.\"\"\"\n return 8\n\n @classmethod\n def get_classmethod(cls):\n \"\"\"int: Get some value.\"\"\"\n return 8\n\n def get_method(self):\n \"\"\"int: Get some value.\"\"\"\n return 8\n\n\n class ParentClass(object):\n \"\"\"The outter class.\n\n Attributes:\n attribute_value (str):\n Some string.\n\n \"\"\"\n\n attribute_value = \"tttt\"\n\n class NestedClass(object):\n \"\"\"A class within a class.\n\n Attributes:\n attribute_value (str):\n Some string.\n\n \"\"\"\n\n attribute_value = \"zzzzzzzzzzzzz\"\n\n @staticmethod\n def get_staticmethod():\n \"\"\"int: Get some value.\"\"\"\n return 5\n\n @classmethod\n def get_classmethod(cls):\n \"\"\"int: Get some value.\"\"\"\n return 5\n\n def get_method(self):\n \"\"\"int: Get some value.\"\"\"\n return 5\n\n @staticmethod\n def get_staticmethod():\n \"\"\"int: Get some value.\"\"\"\n return 6\n\n @classmethod\n def get_classmethod(cls):\n \"\"\"int: Get some value.\"\"\"\n return 6\n\n def get_method(self):\n \"\"\"int: Get some value.\"\"\"\n return 6\n\n\n def _set_private_function_thing(value, another):\n \"\"\"Do something here.\"\"\"\n # Do something with these values\n # and more comment text, here.\n #\n if value:\n return 2\n\n # Another comment\n return 1\n\n\n def set_function_thing(value, another):\n \"\"\"Do something here.\"\"\"\n # Do something with these values\n # and more comment text, here.\n #\n if value:\n return 2\n\n # Another comment\n return 1'''\n )\n\n self._test(data, content, expected) # pylint: disable=no-value-for-parameter", "def test_documentation_popxl_nested_session_contexts(self):\n filename = \"nested_session_contexts.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test(self):\n pass", "def test_load_testcase_in_module(self):\n tests = self.loader.load(\"tests.sampletest.InitTest\")\n self.assertEqual(len(tests), 1)\n from tests.sampletest import InitTest\n\n self.assertEqual(type(tests[0]), InitTest)", "def test_documentation_popxl_basic_subgraph(self):\n filename = \"basic_graph.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_load_simple_module():\n loader = Loader()\n main_fname = loader.load(\"https://gist.githubusercontent.com/miohtama/80391980c2e73b285cfe/raw/dd89a55497ba33a6014453d9bb7432ab424c01cf/kivyhello.py#main\")\n mod = path_to_mod_name(main_fname)\n result = loader.run(mod, \"hello\")\n assert result == \"Hello there\"\n loader.close()", "def test_main():\n # Setup\n # Exercise\n # Verify", "def fixture_example_data():\n import_example_data()", "def test_001_basic(settings, inspector):\n sources = [\n os.path.join(settings.sample_path, 'main_basic.scss'),\n os.path.join(settings.sample_path, 'main_depth_import-3.scss'),\n os.path.join(settings.sample_path, 'main_with_subimports.scss'),\n os.path.join(settings.sample_path, 'main_using_libs.scss'),\n ]\n sourcepath = os.path.join(settings.sample_path, 'main_basic.scss')\n\n inspector.inspect(*sources, library_paths=settings.libraries_fixture_paths)\n\n parents = inspector.parents(sourcepath)\n assert parents == set([\n os.path.join(settings.sample_path, 'main_depth_import-1.scss'),\n os.path.join(settings.sample_path, 'main_depth_import-2.scss'),\n os.path.join(settings.sample_path, 'main_depth_import-3.scss'),\n os.path.join(settings.sample_path, 'main_with_subimports.scss'),\n os.path.join(settings.sample_path, 'main_using_libs.scss'),\n ])", "def test(self):\n # -- Test --\n\n # (1)\n\n # (2)\n\n # (3)\n\n # (4)\n # -- Test --", "def runtest(self):", "def test_compute_glycemic_load(self):\n pass", "def test_functional(self):\n with sphinx_build('pyexample'):\n with open('_build/text/docfx_yaml/example.example.yml') as yml_file:\n data = yaml.safe_load(yml_file)\n self.assertEqual(\n data['items'][0]['fullName'],\n 'example.example'\n )", "def test_pep8_conformance_example(self):\n\n print(\"\\r\\n\")\n\n # Get the path to current directory\n path = os.path.dirname(os.path.realpath(__file__))\n path += \"/../docs/examples/\"\n\n # Find all the examples files\n file_paths = []\n for root, dirnames, filenames in os.walk(path):\n for file_path in fnmatch.filter(filenames, '*.py'):\n file_paths.append(os.path.join(root, file_path))\n\n for path in file_paths:\n self.run_check(path)", "def test_include():\n from bst import BST", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def test_load_case_objects(cli_args_fixture, tmp_path):\n # cli_args_fixture fixture is called but not used simply in order to mock it in render_json.dump()\n tmp_file = os.path.join(tmp_path, 'stub-load.json')\n # - protocol\n proto_ref, proto_name, proto_blocking, proto_is_database = ('foo', 'bar', True, False)\n # - crawl strategy\n cs_description, cs_name, cs_providers, cs_provider_args, cs_child_provider, cs_filter, cs_rewrites = \\\n ('foo', 'bar', ['baz'], {'buzz': 'buzzbuzz'}, {'qux': True}, {'quux': True}, {'quz': True})\n # - node\n node_ref, node_prov, node_mux, node_hint, node_address, node_service_name, node_children, node_warn, node_err = \\\n ('a_ref', 'a_prov', 'a_mux', True, 'an_add', 'a_name', {'foo': 'child'}, {'bar': True}, {'baz': True})\n stub_json = f\"\"\"\n{{\n \"args\": {{\n \"max_depth\": 0,\n \"skip_nonblocking_grandchildren\": false\n }},\n \"tree\": {{\n \"{node_ref}\": {{\n \"__type__\": \"Node\",\n \"provider\": \"{node_prov}\",\n \"protocol_mux\": \"{node_mux}\",\n \"from_hint\": {str(node_hint).lower()},\n \"address\": \"{node_address}\",\n \"service_name\": \"{node_service_name}\",\n \"children\": {json.dumps(node_children)},\n \"warnings\": {json.dumps(node_warn)},\n \"errors\": {json.dumps(node_err)},\n \"crawl_strategy\": {{\n \"__type__\": \"CrawlStrategy\",\n \"description\": \"{cs_description}\",\n \"name\": \"{cs_name}\",\n \"providers\": {json.dumps(cs_providers)},\n \"provider_args\": {json.dumps(cs_provider_args)},\n \"child_provider\": {json.dumps(cs_child_provider)},\n \"service_name_filter\": {json.dumps(cs_filter)},\n \"service_name_rewrites\": {json.dumps(cs_rewrites)},\n \"protocol\": {{\n \"__type__\": \"Protocol\",\n \"ref\": \"{proto_ref}\",\n \"name\": \"{proto_name}\",\n \"blocking\": {str(proto_blocking).lower()},\n \"is_database\": {str(proto_is_database).lower()}\n }}\n }},\n \"protocol\": {{\n \"__type__\": \"Protocol\",\n \"ref\": \"{proto_ref}\",\n \"name\": \"{proto_name}\",\n \"blocking\": {str(proto_blocking).lower()},\n \"is_database\": {str(proto_is_database).lower()}\n }}\n }}\n }}\n}}\n\"\"\"\n with open(tmp_file, 'w') as f:\n f.write(stub_json)\n\n # act\n tree = render_json.load(tmp_file)\n\n # assert\n # - Node()\n assert isinstance(tree[node_ref], node.Node)\n loaded_node = tree[node_ref]\n assert loaded_node.provider == node_prov\n assert loaded_node.protocol_mux == node_mux\n assert loaded_node.from_hint == node_hint\n assert loaded_node.address == node_address\n assert loaded_node.service_name == node_service_name\n assert loaded_node.children == node_children\n assert loaded_node.warnings == node_warn\n assert loaded_node.errors == node_err\n # - CrawlStrategy()\n assert isinstance(loaded_node.crawl_strategy, charlotte.CrawlStrategy)\n loaded_cs = loaded_node.crawl_strategy\n assert loaded_cs.description == cs_description\n assert loaded_cs.name == cs_name\n assert loaded_cs.providers == cs_providers\n assert loaded_cs.provider_args == cs_provider_args\n assert loaded_cs.child_provider == cs_child_provider\n assert loaded_cs.service_name_filter == cs_filter\n assert loaded_cs.service_name_rewrites == cs_rewrites\n # - Protocol\n assert isinstance(loaded_cs.protocol, charlotte_web.Protocol)\n loaded_protocol = loaded_cs.protocol\n assert loaded_protocol.ref == proto_ref\n assert loaded_protocol.name == proto_name\n assert loaded_protocol.blocking == proto_blocking\n assert loaded_protocol.is_database == proto_is_database\n assert isinstance(loaded_node.protocol, charlotte_web.Protocol)", "def runTest(self):\r\n self.setUp()\r\n self.test_sceneImport24281()", "def test():\n pass", "def test_examples(self):\n with sphinx_build('pyexample'):\n with open('_build/text/docfx_yaml/example.nap.Base.yml') as yml_file:\n data = yaml.safe_load(yml_file)\n for item in data['items']:\n if item['uid'] == 'example.nap.Base.ref':\n self.assertEqual(\n item['example'].split('\\n')[2],\n \"\"\">>> print('docblock 1')\"\"\"\n )\n self.assertEqual(\n item['example'].split('\\n')[7],\n \"\"\">>> print('docblock 2')\"\"\"\n )", "def test_loaders():\n\n tempdir = tempfile.mkdtemp()\n\n loader = \"\"\"\nfrom mindbender import api\n\nclass DemoLoader(api.Loader):\n def process(self, asset, subset, version, representation):\n pass\n\n\"\"\"\n\n with open(os.path.join(tempdir, \"my_loader.py\"), \"w\") as f:\n f.write(loader)\n\n try:\n pipeline.register_loaders_path(tempdir)\n loaders = pipeline.discover_loaders()\n\n assert \"DemoLoader\" in list(\n L.__name__ for L in loaders\n ), \"Loader not found in %s\" % \", \".join(\n l.__name__ for l in loaders)\n\n finally:\n shutil.rmtree(tempdir)", "def unitary_test():", "def test_get_run(self):\n pass", "def test_component_resolution_different_file():\n\n assert snippet_eval(ComponentSnippet(modulea.ComponentResolutionViaModule())) == \"hi from module b\\n\"", "def main():\n example()", "def main():\n example()", "def main():\n example()", "def main():\n example()", "def test_Tree():", "def tests():", "def test_basic_execution(self):", "def test_module_doc():\r\n\r\n for fname in os.listdir('.'):\r\n if fname.endswith('.py'):\r\n f = fname.split('.')[0]\r\n print 'Executing ', fname\r\n execfile(fname, locals())", "def test(self, parent, block):\r\n pass", "def test_2():", "def _test(self):\n pass", "def _test(self):\n pass", "def _test(self):\n pass", "def test_3():", "def test_003_library(settings, inspector):\n sources = [\n os.path.join(settings.sample_path, 'main_syntax.scss'),\n os.path.join(settings.sample_path, 'main_commented.scss'),\n os.path.join(settings.sample_path, 'main_basic.scss'),\n os.path.join(settings.sample_path, 'main_depth_import-1.scss'),\n os.path.join(settings.sample_path, 'main_depth_import-2.scss'),\n os.path.join(settings.sample_path, 'main_depth_import-3.scss'),\n os.path.join(settings.sample_path, 'main_with_subimports.scss'),\n os.path.join(settings.sample_path, 'main_using_libs.scss'),\n os.path.join(settings.sample_path, 'main_circular_0.scss'),\n os.path.join(settings.sample_path, 'main_circular_1.scss'),\n os.path.join(settings.sample_path, 'main_circular_2.scss'),\n os.path.join(settings.sample_path, 'main_circular_3.scss'),\n os.path.join(settings.sample_path, 'main_circular_4.scss'),\n os.path.join(settings.sample_path, 'main_circular_bridge.scss'),\n os.path.join(settings.sample_path, 'main_circular_5.scss'),\n ]\n sourcepath = os.path.join(settings.lib1_path, 'components/_panels.scss')\n\n inspector.inspect(*sources, library_paths=settings.libraries_fixture_paths)\n\n parents = inspector.parents(sourcepath)\n assert parents == set([\n os.path.join(settings.lib1_path, 'library_1_fullstack.scss'),\n os.path.join(settings.sample_path, 'main_using_libs.scss'),\n ])", "def test():\n import doctest\n from . import locate\n return doctest.testmod(locate)", "def test(self):", "def test(self):", "def test_simple_extension(self):\n bento_info = \"\"\"\\\nName: foo\n\nLibrary:\n Packages: foo, foo.bar\n Modules: fubar\n Extension: foo\n Sources: src/foo.c\n\"\"\"\n self._test_run(bento_info)", "def test_example(self):\n self.assertEqual(self.example.get_example(), True)", "def test_4():", "def test():", "def test():", "def test_partial_twice_dependent_object_import(self):\n pass", "def test_subsystems(self):\n pass", "def test_examples():\n import airconics\n # pytest runs test files in ./__pycache__: need to go up two levels\n example_dir = os.path.abspath(\n os.path.join(__file__, '..', '..', 'examples', 'core'))\n example_scripts = os.listdir(example_dir)\n for script in example_scripts:\n if script.endswith('.py'):\n fname = os.path.join(example_dir, script)\n try:\n subprocess.check_call(['python', fname])\n except subprocess.CalledProcessError:\n raise AssertionError('Example {} failed'.format(fname))", "def test_nested_template_source_generation(self):\n sources = [source for source in self.loader.get_template_sources('component.child.html')]\n self.assertEqual(len(sources), 2)\n self.assertEqual(sources[0], 'MOCK_BASE_DIR/component/child/child.html')\n self.assertEqual(sources[1], 'MOCK_BASE_DIR_2/component/child/child.html')\n\n sources = [source for source in self.loader.get_template_sources('deeply.nested.component.and.child.html')]\n self.assertEqual(len(sources), 2)\n self.assertEqual(sources[0], 'MOCK_BASE_DIR/deeply/nested/component/and/child/child.html')\n self.assertEqual(sources[1], 'MOCK_BASE_DIR_2/deeply/nested/component/and/child/child.html')\n\n sources = [source for source in self.loader.get_template_sources('component.child/another.html')]\n self.assertEqual(len(sources), 2)\n self.assertEqual(sources[0], 'MOCK_BASE_DIR/component/child/another.html')\n self.assertEqual(sources[1], 'MOCK_BASE_DIR_2/component/child/another.html')", "def test_load_model_data(self):\n add_components_and_load_data(\n prereq_modules=IMPORTED_PREREQ_MODULES,\n module_to_test=MODULE_BEING_TESTED,\n test_data_dir=TEST_DATA_DIRECTORY,\n subproblem=\"\",\n stage=\"\",\n )", "def test_load_model_data(self):\n add_components_and_load_data(\n prereq_modules=IMPORTED_PREREQ_MODULES,\n module_to_test=MODULE_BEING_TESTED,\n test_data_dir=TEST_DATA_DIRECTORY,\n subproblem=\"\",\n stage=\"\",\n )", "def test_load(self):\n command = constituencies.Command()\n command.handle('load', silent=True)", "def test_T01():", "def main():\n doctest.testmod()\n game()", "def test_bed(self):\n #TODO write bed tests", "def test_1():", "def less_nested_example_vanilla():\n return", "def test_S16_import(): \n\tdef test(): \n\t\ttry: \n\t\t\tfrom .. import S16 \n\t\texcept: \n\t\t\treturn False \n\t\treturn True \n\treturn [\"vice.yields.ccsne.S16\", test]", "def test_get_imports(self):\n pass", "def test_example_runs(self):\n run_example(\n verbose=False,\n testapp=self.testapp,\n )", "def test_twice_dependent_object_import(self):\n pass", "def test_definition_loading(self):\r\n\r\n modulestore = XMLModuleStore(DATA_DIR, course_dirs=['toy', 'two_toys'])\r\n\r\n location = Location(\"edX\", \"toy\", \"2012_Fall\", \"video\", \"Welcome\", None)\r\n toy_video = modulestore.get_item(location)\r\n location_two = Location(\"edX\", \"toy\", \"TT_2012_Fall\", \"video\", \"Welcome\", None)\r\n two_toy_video = modulestore.get_item(location_two)\r\n self.assertEqual(toy_video.youtube_id_1_0, \"p2Q6BrNhdh8\")\r\n self.assertEqual(two_toy_video.youtube_id_1_0, \"p2Q6BrNhdh9\")", "def test_read_namespaced_build(self):\n pass", "def test_load(self):\n (spec, check) = bundylogging.load()\n # It returns the checking function\n self.assertEqual(check, bundylogging.check)\n # The plugin stores it's spec\n self.assertEqual(spec, bundylogging.spec)", "def test_load_file_contents():\n\n file_name = 'test_fooof_all'\n loaded_data = load_json(file_name, TEST_DATA_PATH)\n\n # Check settings\n for setting in OBJ_DESC['settings']:\n assert setting in loaded_data.keys()\n\n # Check results\n for result in OBJ_DESC['results']:\n assert result in loaded_data.keys()\n\n # Check results\n for datum in OBJ_DESC['data']:\n assert datum in loaded_data.keys()", "def inner_test(param: dict):\n pass", "def test_data_dump_and_load():\n raw_data = {\"A\": 1, \"B\": 2}\n\n # Dump json\n json_file = os.path.join(tempfile.gettempdir(), \"jade-unit-test-file.json\")\n dump_data(raw_data, json_file)\n assert os.path.exists(json_file)\n\n # Load json\n json_data = load_data(json_file)\n assert json_data == raw_data\n\n if os.path.exists(json_file):\n os.remove(json_file)\n\n # Dump toml\n toml_file = os.path.join(tempfile.gettempdir(), \"jade-unit-test-file.toml\")\n dump_data(raw_data, toml_file)\n assert os.path.exists(toml_file)\n\n # Load toml\n toml_data = load_data(toml_file)\n assert toml_data == raw_data\n\n if os.path.exists(toml_file):\n os.remove(toml_file)\n\n # Re-enable if we add support again.\n # Dump yaml\n # yaml_file = os.path.join(tempfile.gettempdir(), \"jade-unit-test-file.yaml\")\n # dump_data(raw_data, yaml_file)\n # assert os.path.exists(yaml_file)\n\n ## Load yaml\n # yaml_data = load_data(yaml_file)\n # assert yaml_data == raw_data\n\n # if os.path.exists(yaml_file):\n # os.remove(yaml_file)", "def test_cms_imported_course_walkthrough(self):\r\n def test_get_html(handler):\r\n # Helper function for getting HTML for a page in Studio and\r\n # checking that it does not error.\r\n resp = self.client.get_html(\r\n get_url(handler, course_key, 'course_key_string')\r\n )\r\n self.assertEqual(resp.status_code, 200)\r\n _test_no_locations(self, resp)\r\n\r\n _, course_items = import_from_xml(modulestore('direct'), 'common/test/data/', ['simple'])\r\n course_key = course_items[0].id\r\n\r\n resp = self._show_course_overview(course_key)\r\n self.assertEqual(resp.status_code, 200)\r\n self.assertContains(resp, 'Chapter 2')\r\n\r\n # go to various pages\r\n test_get_html('import_handler')\r\n test_get_html('export_handler')\r\n test_get_html('course_team_handler')\r\n test_get_html('course_info_handler')\r\n test_get_html('checklists_handler')\r\n test_get_html('assets_handler')\r\n test_get_html('tabs_handler')\r\n test_get_html('settings_handler')\r\n test_get_html('grading_handler')\r\n test_get_html('advanced_settings_handler')\r\n test_get_html('textbooks_list_handler')\r\n\r\n # go look at a subsection page\r\n subsection_key = course_key.make_usage_key('sequential', 'test_sequence')\r\n resp = self.client.get_html(get_url('subsection_handler', subsection_key))\r\n self.assertEqual(resp.status_code, 200)\r\n _test_no_locations(self, resp)\r\n\r\n # go look at the Edit page\r\n unit_key = course_key.make_usage_key('vertical', 'test_vertical')\r\n resp = self.client.get_html(get_url('unit_handler', unit_key))\r\n self.assertEqual(resp.status_code, 200)\r\n _test_no_locations(self, resp)\r\n\r\n def delete_item(category, name):\r\n \"\"\" Helper method for testing the deletion of an xblock item. \"\"\"\r\n item_key = course_key.make_usage_key(category, name)\r\n resp = self.client.delete(get_url('xblock_handler', item_key))\r\n self.assertEqual(resp.status_code, 204)\r\n _test_no_locations(self, resp, status_code=204, html=False)\r\n\r\n # delete a component\r\n delete_item(category='html', name='test_html')\r\n\r\n # delete a unit\r\n delete_item(category='vertical', name='test_vertical')\r\n\r\n # delete a unit\r\n delete_item(category='sequential', name='test_sequence')\r\n\r\n # delete a chapter\r\n delete_item(category='chapter', name='chapter_2')", "def test_syntax(self):\n\t\ttheResult = False\n\t\ttry:\n\t\t\tfrom .context import code\n\t\t\tfrom code import restart_service\n\t\t\ttheResult = True\n\t\texcept Exception:\n\t\t\ttheResult = False\n\t\tassert theResult", "def _test():\n import doctest\n doctest.testmod(verbose=1)", "def runTest(self):\n self.setUp()\n self.test_JupyterNotebooks1()", "def test_modul1(self):\n\n self.delayDisplay(\"Starting the test\")\n #\n # first, get some data\n #\n import urllib\n downloads = (\n ('http://slicer.kitware.com/midas3/download?items=5767', 'FA.nrrd', slicer.util.loadVolume),\n )\n\n for url,name,loader in downloads:\n filePath = slicer.app.temporaryPath + '/' + name\n if not os.path.exists(filePath) or os.stat(filePath).st_size == 0:\n logging.info('Requesting download %s from %s...\\n' % (name, url))\n urllib.urlretrieve(url, filePath)\n if loader:\n logging.info('Loading %s...' % (name,))\n loader(filePath)\n self.delayDisplay('Finished with download and loading')\n\n volumeNode = slicer.util.getNode(pattern=\"FA\")\n logic = modulLogic()\n self.assertTrue( logic.hasImageData(volumeNode) )\n self.delayDisplay('Test passed!')", "def __main() :\n launchTests()", "def runTest(self):\n pagename = self._tester.create_wiki_page(content=\"\"\"\n{{{\n#!rst\n.. code-block:: python\n\n print \"123\"\n}}}\n\"\"\")\n self._tester.go_to_wiki(pagename)\n tc.notfind(\"code-block\")\n tc.find('print')\n tc.find('\"123\"')", "def test_basic_workflow(self):\n\n test_data_dir = os.path.join(os.path.dirname(__file__), 'testdata')\n sys.path.append(test_data_dir)\n import basic\n tmpdir = tempfile.mkdtemp()\n package_path = os.path.join(tmpdir, 'workflow.tar.gz')\n try:\n compiler.Compiler().compile(basic.save_most_frequent_word, package_path)\n with open(os.path.join(test_data_dir, 'basic.yaml'), 'r') as f:\n golden = yaml.load(f)\n compiled = self._get_yaml_from_tar(package_path)\n\n self.maxDiff = None\n # Comment next line for generating golden yaml.\n self.assertEqual(golden, compiled)\n finally:\n # Replace next line with commented line for gathering golden yaml.\n shutil.rmtree(tmpdir)\n # print(tmpdir)", "def test_script(self) -> None:\n main()", "def test_quick_build(self):\n pass", "def test_dag_load(self):\n\n with ObservatoryEnvironment().create():\n dag_file = os.path.join(module_file_path(\"academic_observatory_workflows.dags\"), \"openalex_telescope.py\")\n self.assert_dag_load(\"openalex\", dag_file)" ]
[ "0.6656562", "0.6570933", "0.6570933", "0.6489614", "0.6259519", "0.6246904", "0.61843383", "0.61584216", "0.61110884", "0.61107814", "0.6097114", "0.6087612", "0.60835487", "0.60752654", "0.60712975", "0.6059714", "0.60356414", "0.60127175", "0.60122466", "0.601197", "0.5992272", "0.5991382", "0.59841245", "0.59821856", "0.59772164", "0.5974786", "0.5959991", "0.5928582", "0.59175533", "0.58958703", "0.5859042", "0.5843219", "0.5843219", "0.5843219", "0.5843219", "0.5843219", "0.58368576", "0.5821035", "0.581782", "0.5816487", "0.5814812", "0.5812459", "0.58077496", "0.5795895", "0.5795067", "0.5795067", "0.5795067", "0.5795067", "0.5789442", "0.5777511", "0.57676333", "0.57657003", "0.575902", "0.57486725", "0.57480097", "0.57480097", "0.57480097", "0.5747713", "0.574615", "0.57360345", "0.5731538", "0.5731538", "0.571834", "0.57139224", "0.5712714", "0.57122034", "0.57122034", "0.57086134", "0.57066816", "0.5705689", "0.5696143", "0.5695092", "0.5695092", "0.5692772", "0.5689019", "0.56836814", "0.5677645", "0.5670156", "0.56617427", "0.56610835", "0.5644757", "0.5643017", "0.56429493", "0.56378627", "0.56332856", "0.56295615", "0.56271815", "0.5623159", "0.5620006", "0.561445", "0.5614354", "0.56114227", "0.5608165", "0.5606964", "0.56052727", "0.5603394", "0.5603033", "0.560051", "0.5599641", "0.5585677" ]
0.83801514
0
Test the nested Session contexts example
def test_documentation_popxl_nested_session_contexts(self): filename = "nested_session_contexts.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_set_session():", "def test_resource(data_manager):\n sessions = set([])\n with data_manager.dal():\n context1 = current_context._get_current_object()\n session = context1.sqlalchemy\n assert isinstance(session, orm.Session)\n sessions.add(session)\n\n with data_manager.dal():\n context2 = current_context._get_current_object()\n assert context2 != context1\n session = context2.sqlalchemy\n assert isinstance(session, orm.Session)\n sessions.add(session)\n\n # Make sure we have two unique sessions\n assert len(sessions) == 2", "def test_existing_session_cookie(self):\n\n with self.app_sess1 as c:\n ret1 = c.get('/')\n ret2 = c.get('/')\n self.assertEqual(ret1.data, ret2.data)", "def test_session_promotion(self):\r\n cursor = self.db.cursor()\r\n cursor.execute(\"INSERT INTO session VALUES ('123456', 0, 0)\")\r\n incookie = Cookie()\r\n incookie['trac_session'] = '123456'\r\n outcookie = Cookie()\r\n req = Mock(authname='john', base_path='/', incookie=incookie,\r\n outcookie=outcookie)\r\n session = Session(self.env, req)\r\n self.assertEqual('john', session.sid)\r\n session.save()\r\n\r\n cursor.execute(\"SELECT sid,authenticated FROM session\")\r\n self.assertEqual(('john', 1), cursor.fetchone())\r\n self.assertEqual(None, cursor.fetchone())", "def session(db):\n db.session.begin_nested()\n\n yield db.session\n\n db.session.rollback()", "def test_new_session_promotion(self):\r\n cursor = self.db.cursor()\r\n incookie = Cookie()\r\n incookie['trac_session'] = '123456'\r\n outcookie = Cookie()\r\n req = Mock(authname='john', base_path='/', incookie=incookie,\r\n outcookie=outcookie)\r\n session = Session(self.env, req)\r\n self.assertEqual('john', session.sid)\r\n session.save()\r\n\r\n cursor.execute(\"SELECT sid,authenticated FROM session\")\r\n self.assertEqual(('john', 1), cursor.fetchone())\r\n self.assertEqual(None, cursor.fetchone())", "def test_newSession(self):\n session = self.mdk.session()\n session2 = self.mdk.session()\n self.assertSessionHas(session, session._context.traceId, [0])\n self.assertSessionHas(session2, session2._context.traceId, [0])\n self.assertNotEqual(session._context.traceId,\n session2._context.traceId)", "def test_childSession(self):\n session = self.mdk.session()\n session.setProperty(\"other\", 123)\n session._context.tick()\n session._context.tick()\n session._context.tick()\n session.setTimeout(13.0)\n session2 = self.mdk.derive(session.externalize())\n self.assertNotEqual(session._context.traceId,\n session2._context.traceId)\n self.assertEqual(session2.getRemainingTime(), None)\n self.assertSessionHas(session2, session2._context.traceId, [1],\n other=123)", "def db_subsession(session):\n try:\n with session.begin_nested():\n yield\n except:\n logger.exception(\"Problem with DB sub-session, rolling back.\")", "def test_do_login(self):\r\n\r\n with app.test_request_context():\r\n u1 = User.query.filter_by(username='testuser').one()\r\n\r\n self.assertNotIn(CURR_USER_KEY, session)\r\n do_login(u1)\r\n self.assertEqual(session[CURR_USER_KEY], u1.id)", "def session(self):", "def test_distinct_sessions_cookie(self):\n\n sess1 = None\n sess2 = None\n with self.app_sess1 as c:\n sess1 = c.get('/').data\n\n with self.app_sess2 as c:\n sess2 = c.get('/').data\n\n self.assertNotEqual(sess1, sess2)", "def test_client_custom_session():\n c_session = requests.Session()\n client = ConfigureClients(custom_session=c_session)\n assert client.session == c_session", "def test_joinSession(self):\n session = self.mdk.session()\n session.setProperty(\"key\", 456)\n session.setProperty(\"key2\", [456, {\"zoo\": \"foo\"}])\n session2 = self.mdk.join(session.externalize())\n self.assertSessionHas(session2, session._context.traceId, [1, 0],\n key=456, key2=[456, {\"zoo\": \"foo\"}])", "def session(request):\n session = get_test_db_session()\n request.cls.session = session\n return session", "def test_session_auth_token(self):\n\n sess1 = None\n sess2 = None\n test_header = {'X-Auth-Token': 'pretend_token'}\n\n with self.app_sess1 as c:\n ret = c.get('/', headers=test_header)\n sess1 = ret.data\n\n with self.app_sess2 as c:\n ret = c.get('/', headers=test_header)\n sess2 = ret.data\n\n self.assertEqual(sess1, sess2)", "def test_find_where_multiple_infos(server, session):\n\n for session in server.sessions:\n session_id = session.get('session_id')\n session_name = session.get('session_name')\n find_where = server.find_where(\n {'session_id': session_id, 'session_name': session_name}\n )\n\n assert find_where == session\n assert isinstance(find_where, Session)\n\n # session.find_where\n for window in session.windows:\n window_id = window.get('window_id')\n window_index = window.get('window_index')\n\n find_where = session.find_where(\n {'window_id': window_id, 'window_index': window_index}\n )\n\n assert find_where == window\n assert isinstance(find_where, Window)\n\n # window.find_where\n for pane in window.panes:\n pane_id = pane.get('pane_id')\n pane_tty = pane.get('pane_tty')\n\n find_where = window.find_where(\n {'pane_id': pane_id, 'pane_tty': pane_tty}\n )\n\n assert find_where == pane\n assert isinstance(find_where, Pane)", "def test_get_session(self):\n with self.settings(SESSION_ENGINE='django.contrib.sessions.backends.db'):\n self._run_expired_session_test_for_engine()\n with self.settings(SESSION_ENGINE='django.contrib.sessions.backends.cache'):\n self._run_expired_session_test_for_engine()\n with self.settings(SESSION_ENGINE='django.contrib.sessions.backends.file'):\n self._run_expired_session_test_for_engine()\n with self.settings(SESSION_ENGINE='django.contrib.sessions.backends.cached_db'):\n self._run_expired_session_test_for_engine()", "def session(self, context: InjectionContext = None) -> \"ProfileSession\":", "def test_modify_detached_session(self):\r\n cursor = self.db.cursor()\r\n cursor.execute(\"INSERT INTO session VALUES ('john', 1, 0)\")\r\n cursor.execute(\"INSERT INTO session_attribute VALUES \"\r\n \"('john', 1, 'foo', 'bar')\")\r\n\r\n session = DetachedSession(self.env, 'john')\r\n self.assertEqual('bar', session['foo'])\r\n session['foo'] = 'baz'\r\n session.save()\r\n cursor.execute(\"SELECT value FROM session_attribute \"\r\n \"WHERE sid='john' AND name='foo'\")\r\n self.assertEqual('baz', cursor.fetchone()[0])", "def test_sessions():\n CHECKS = (check_correct_usage, check_expiration, check_bad_cookie, check_various_session_sizes)\n for no_datastore in (False, True):\n if no_datastore:\n test_db = 'without'\n else:\n test_db = 'with'\n for cot in (0, 10*1024, 2**30):\n if cot == 0:\n test_cookie = 'no data stored in cookies'\n elif cot == 2**30:\n test_cookie = 'data only stored in cookies'\n else:\n test_cookie = 'store data in cookies when its encoded size<=%dB' % cot\n for check in CHECKS:\n logger.debug('\\n\\n' + '*'*50)\n logger.debug('Running %s %s datastore and %s' % (check.__name__, test_db, test_cookie))\n yield check, no_datastore, cot", "def test_set_session_id(self, context):\n context.set_session_id(b\"abc\")", "def test_server_get_session(self):\n server, client = loopback()\n session = server.get_session()\n assert isinstance(session, Session)", "def session_context(pytestconfig, request, tmp_env):\n ctx = Context.only()\n\n # Temporary, empty local directory for local data\n session_tmp_dir = Path(request.config._tmp_path_factory.mktemp(\"data\"))\n\n # Set the cache path according to whether pytest --local-cache was given. If True,\n # pick up the existing setting from the user environment. If False, use a pytest-\n # managed cache directory that persists across test sessions.\n ctx.cache_path = (\n ctx.local_data.joinpath(\"cache\")\n if request.config.option.local_cache\n # TODO use pytestconfig.cache.mkdir() when pytest >= 6.3 is available\n else Path(pytestconfig.cache.makedir(\"cache\"))\n )\n\n # Other local data in the temporary directory for this session only\n ctx.local_data = session_tmp_dir\n\n platform_name = \"message-ix-models\"\n\n # Add a platform connected to an in-memory database\n # NB cannot call Config.add_platform() here because it does not support supplying a\n # URL for a HyperSQL database.\n # TODO add that feature upstream.\n ixmp_config.values[\"platform\"][platform_name] = {\n \"class\": \"jdbc\",\n \"driver\": \"hsqldb\",\n \"url\": f\"jdbc:hsqldb:mem://{platform_name}\",\n }\n\n # Launch Platform and connect to testdb (reconnect if closed)\n mp = Platform(name=platform_name)\n mp.open_db()\n\n ctx.platform_info[\"name\"] = platform_name\n\n yield ctx\n\n ctx.close_db()\n ixmp_config.remove_platform(platform_name)", "def load_session(session):\n def inner():\n web.ctx.session = session\n return inner", "def test_client_get_session(self):\n server, client = loopback()\n session = client.get_session()\n assert isinstance(session, Session)", "def test_get_activities_from_recursive_contexts(self):\n from .mockers import context_query\n from .mockers import create_context\n from .mockers import subscribe_contextA, create_contextA, user_status_contextA\n from .mockers import subscribe_contextB, create_contextB, user_status_contextB\n username = 'messi'\n username_not_me = 'xavi'\n self.create_user(username)\n self.create_user(username_not_me)\n self.create_context(create_context, permissions=dict(read='public', write='restricted', subscribe='restricted', invite='restricted'))\n self.create_context(create_contextA, permissions=dict(read='subscribed', write='subscribed', subscribe='restricted', invite='restricted'))\n self.create_context(create_contextB, permissions=dict(read='subscribed', write='subscribed', subscribe='restricted', invite='restricted'))\n self.admin_subscribe_user_to_context(username, subscribe_contextA)\n self.admin_subscribe_user_to_context(username_not_me, subscribe_contextA)\n self.admin_subscribe_user_to_context(username_not_me, subscribe_contextB)\n self.create_activity(username, user_status_contextA)\n self.create_activity(username_not_me, user_status_contextA)\n self.create_activity(username_not_me, user_status_contextB)\n\n res = self.testapp.get('/contexts/%s/activities' % (context_query['context']), '', oauth2Header(username), status=200)\n result = json.loads(res.text)\n self.assertEqual(len(result), 2)\n self.assertEqual(result[0].get('actor', None).get('username'), 'xavi')\n self.assertEqual(result[0].get('object', None).get('objectType', None), 'note')\n self.assertEqual(result[0].get('contexts', None)[0]['url'], subscribe_contextA['object']['url'])\n self.assertEqual(result[1].get('actor', None).get('username'), 'messi')\n self.assertEqual(result[1].get('object', None).get('objectType', None), 'note')\n self.assertEqual(result[1].get('contexts', None)[0]['url'], subscribe_contextA['object']['url'])\n\n res = self.testapp.get('/contexts/%s/activities' % (context_query['context']), '', oauth2Header(username_not_me), status=200)\n result = json.loads(res.text)\n self.assertEqual(len(result), 3)\n self.assertEqual(result[0].get('actor', None).get('username'), 'xavi')\n self.assertEqual(result[0].get('object', None).get('objectType', None), 'note')\n self.assertEqual(result[0].get('contexts', None)[0]['url'], subscribe_contextB['object']['url'])\n self.assertEqual(result[1].get('actor', None).get('username'), 'xavi')\n self.assertEqual(result[1].get('object', None).get('objectType', None), 'note')\n self.assertEqual(result[1].get('contexts', None)[0]['url'], subscribe_contextA['object']['url'])\n self.assertEqual(result[2].get('actor', None).get('username'), 'messi')\n self.assertEqual(result[2].get('object', None).get('objectType', None), 'note')\n self.assertEqual(result[2].get('contexts', None)[0]['url'], subscribe_contextA['object']['url'])", "def test_create_session(self):\n study_id = self.storage.create_study(sample_study_spec())\n\n session = sample_session(study_id=study_id)\n self.storage.create_session(session)\n\n self.assertEqual(self.storage.get_session(study_id, session.id), session)", "def test_scoring_logic():\n app = create_ctfd()\n with app.app_context():\n admin = login_as_user(app, name=\"admin\", password=\"password\")\n\n register_user(app, name=\"user1\", email=\"user1@ctfd.io\", password=\"password\")\n client1 = login_as_user(app, name=\"user1\", password=\"password\")\n register_user(app, name=\"user2\", email=\"user2@ctfd.io\", password=\"password\")\n client2 = login_as_user(app, name=\"user2\", password=\"password\")\n\n chal1 = gen_challenge(app.db)\n flag1 = gen_flag(app.db, chal=chal1.id, flag='flag')\n chal1_id = chal1.id\n\n chal2 = gen_challenge(app.db)\n flag2 = gen_flag(app.db, chal=chal2.id, flag='flag')\n chal2_id = chal2.id\n\n # user1 solves chal1\n with freeze_time(\"2017-10-3 03:21:34\"):\n with client1.session_transaction() as sess:\n data = {\n \"key\": 'flag',\n \"nonce\": sess.get('nonce')\n }\n r = client1.post('/chal/{}'.format(chal1_id), data=data)\n\n # user1 is now on top\n scores = get_scores(admin)\n assert scores[0]['team'] == 'user1'\n\n # user2 solves chal1 and chal2\n with freeze_time(\"2017-10-4 03:30:34\"):\n with client2.session_transaction() as sess:\n # solve chal1\n data = {\n \"key\": 'flag',\n \"nonce\": sess.get('nonce')\n }\n r = client2.post('/chal/{}'.format(chal1_id), data=data)\n # solve chal2\n data = {\n \"key\": 'flag',\n \"nonce\": sess.get('nonce')\n }\n r = client2.post('/chal/{}'.format(chal2_id), data=data)\n\n # user2 is now on top\n scores = get_scores(admin)\n assert scores[0]['team'] == 'user2'\n\n # user1 solves chal2\n with freeze_time(\"2017-10-5 03:50:34\"):\n with client1.session_transaction() as sess:\n data = {\n \"key\": 'flag',\n \"nonce\": sess.get('nonce')\n }\n r = client1.post('/chal/{}'.format(chal2_id), data=data)\n\n # user2 should still be on top because they solved chal2 first\n scores = get_scores(admin)\n assert scores[0]['team'] == 'user2'\n destroy_ctfd(app)", "def test_find_where(server, session):\n # server.find_where\n for session in server.sessions:\n session_id = session.get('session_id')\n\n assert server.find_where({'session_id': session_id}) == session\n assert isinstance(server.find_where({'session_id': session_id}), Session)\n\n # session.find_where\n for window in session.windows:\n window_id = window.get('window_id')\n\n assert session.find_where({'window_id': window_id}) == window\n assert isinstance(session.find_where({'window_id': window_id}), Window)\n\n # window.find_where\n for pane in window.panes:\n pane_id = pane.get('pane_id')\n\n assert window.find_where({'pane_id': pane_id}) == pane\n assert isinstance(window.find_where({'pane_id': pane_id}), Pane)", "def test_get_db_session(initialized_db_url):\n with utils.get_db_session(initialized_db_url) as db:\n assert isinstance(db, Session)", "def test_get_context():\n\n application_services.add_context('context4', 'value4')\n assert application_services.get_context('context4') == 'value4'", "def check_correct_usage(no_datastore, cookie_only_threshold):\n def minitest_divider(test):\n logger.debug('\\n\\n' + '-'*50)\n logger.debug(test + ' (nd=%s cot=%s)' % (no_datastore, cookie_only_threshold))\n\n st = SessionTester(no_datastore=no_datastore, cookie_only_threshold=cookie_only_threshold)\n expected_num_sessions_in_db_if_db_used = lambda a,b=0 : generic_expected_num_sessions_in_db_if_db_used(st, no_datastore, cookie_only_threshold, a, b)\n st.verify_active_sessions_in_db(0)\n\n minitest_divider('try doing nothing (no session should be started)')\n st.noop()\n st.verify_active_sessions_in_db(0)\n\n minitest_divider('start a session with a single write')\n st.start_request()\n str(st)\n assert st.get_expiration()==0, \"no session yet => no expiration yet\"\n assert st.is_active() is False\n st['x'] = 7\n assert st.is_active() is True\n st.finish_request_and_check()\n expected_num_sessions_in_db_if_db_used(1)\n\n minitest_divider('start another session')\n st2 = SessionTester(st=st)\n st2.start_request()\n assert not st2.is_active()\n assert st2.get('x') is None, \"shouldn't get other session's data\"\n assert not st2.is_active(), \"still shouldn't be active - nothing set yet\"\n st2['x'] = 'st2x'\n assert st2.is_active()\n st2.finish_request_and_check()\n expected_num_sessions_in_db_if_db_used(2)\n\n minitest_divider('each session should get a unique sid')\n assert st2.ss.sid != st.ss.sid\n\n minitest_divider('we should still have the values we set earlier')\n st.start_request()\n str(st)\n assert_equal(st['x'], 7)\n st.finish_request_and_check()\n st2.start_request()\n assert_equal(st2['x'], 'st2x')\n st2.finish_request_and_check()\n\n minitest_divider(\"check get session by sid, save(True), and terminate()\")\n if cookie_only_threshold == 0:\n data1 = st.ss.data\n data2 = st2.ss.data\n else:\n # data is being stored in cookie-only form => won't be in the db\n data1 = data2 = {}\n resp = st.get_url('/get_by_sid?sid=%s' % st.ss.sid)\n assert_equal(pickle.loads(b64decode(resp.body)), data1)\n resp = st2.get_url('/get_by_sid?sid=%s' % st2.ss.sid)\n assert_equal(pickle.loads(b64decode(resp.body)), data2)\n expected_num_sessions_in_db_if_db_used(2)\n st.start_request()\n st['y'] = 9 # make the session dirty\n st.save(True) # force it to persist to the db even though it normally wouldn't\n st.finish_request_and_check()\n\n # now the data should be in the db\n resp = st.get_url('/get_by_sid?sid=%s' % st.ss.sid)\n assert_equal(pickle.loads(b64decode(resp.body)), st.ss.data)\n expected_num_sessions_in_db_if_db_used(2, 1)\n st.start_request()\n st.terminate() # remove it from the db\n st.finish_request_and_check()\n expected_num_sessions_in_db_if_db_used(1)\n\n minitest_divider(\"should be able to terminate() and then start a new session all in one request\")\n st.start_request()\n st['y'] = 'yy'\n assert_equal(st.get('y'), 'yy')\n st.terminate()\n assert_raises(KeyError, st.__getitem__, 'y')\n st['x'] = 7\n st.finish_request_and_check()\n expected_num_sessions_in_db_if_db_used(2)\n\n minitest_divider(\"regenerating SID test\")\n initial_sid = st.ss.sid\n st.start_request()\n initial_expir = st.get_expiration()\n st.regenerate_id()\n assert_equal(st['x'], 7, \"data should not be affected\")\n st.finish_request_and_check()\n assert_not_equal(initial_sid, st.ss.sid, \"regenerated sid should be different\")\n assert_equal(initial_expir, st._get_expiration(), \"expiration should not change\")\n st.start_request()\n assert_equal(st['x'], 7, \"data should not be affected\")\n st.finish_request_and_check()\n expected_num_sessions_in_db_if_db_used(2)\n\n minitest_divider(\"regenerating SID test w/new expiration time\")\n initial_sid = st.ss.sid\n st.start_request()\n initial_expir = st.get_expiration()\n new_expir = initial_expir + 120 # something new\n st.regenerate_id(expiration_ts=new_expir)\n assert_equal(st['x'], 7, \"data should not be affected\")\n st.finish_request_and_check()\n assert_not_equal(initial_sid, st.ss.sid, \"regenerated sid should be different\")\n assert_equal(new_expir, st._get_expiration(), \"expiration should be what we asked for\")\n st.start_request()\n assert_equal(st['x'], 7, \"data should not be affected\")\n st.finish_request_and_check()\n expected_num_sessions_in_db_if_db_used(2)\n\n minitest_divider(\"check basic dictionary operations\")\n st.start_request()\n st['s'] = 'aaa'\n st['i'] = 99\n st['f'] = 4.37\n assert_equal(st.pop('s'), 'aaa')\n assert_equal(st.pop('s'), None)\n assert_equal(st.pop('s', 'nil'), 'nil')\n assert st.has_key('i')\n assert not st.has_key('s')\n assert_equal(st.get('i'), 99)\n assert_equal(st.get('ii'), None)\n assert_equal(st.get('iii', 3), 3)\n assert_equal(st.get('f'), st['f'])\n del st['f']\n assert_raises(KeyError, st.__getitem__, 'f')\n assert 'f' not in st\n assert 'i' in st\n assert_equal(st.get('x'), 7)\n st.clear()\n assert 'i' not in st\n assert 'x' not in st\n st.finish_request_and_check()\n\n minitest_divider(\"add complex data (models and objects) to the session\")\n st.start_request()\n st['model'] = make_entity(0)\n st['dict'] = dict(a='alpha', c='charlie', e='echo')\n st['list'] = ['b', 'd', 'f']\n st['set'] = set([2, 3, 5, 7, 11, 13, 17, 19])\n st['tuple'] = (7, 7, 1985)\n st.finish_request_and_check()\n st.start_request()\n st.clear()\n st.finish_request_and_check()\n\n minitest_divider(\"test quick methods: basic usage\")\n st.start_request()\n st.set_quick('msg', 'mc only!')\n assert_equal('mc only!', st['msg'])\n st.finish_request_and_check()\n st.start_request()\n assert_equal('mc only!', st.pop_quick('msg'))\n assert_raises(KeyError, st.__getitem__, 'msg')\n st.finish_request_and_check()\n\n minitest_divider(\"test quick methods: flush memcache (value will be lost if not using cookies)\")\n st.start_request()\n st.set_quick('a', 1)\n st.set_quick('b', 2)\n st.finish_request_and_check()\n st.flush_memcache()\n st.start_request()\n if cookie_only_threshold > 0:\n assert_equal(st['a'], 1)\n assert_equal(st['b'], 2)\n else:\n assert_raises(KeyError, st.__getitem__, 'a')\n assert_raises(KeyError, st.__getitem__, 'b')\n st.finish_request_and_check()\n\n minitest_divider(\"test quick methods: flush memcache should have no impact if another mutator is also used (and this ISNT memcache-only)\")\n st.start_request()\n st['x'] = 24\n st.set_quick('a', 1)\n st.finish_request_and_check()\n st.flush_memcache()\n st.start_request()\n if no_datastore and cookie_only_threshold == 0:\n assert_raises(KeyError, st.__getitem__, 'a')\n assert_raises(KeyError, st.__getitem__, 'x')\n else:\n assert_equal(st['a'], 1)\n assert_equal(st['x'], 24)\n st.set_quick('msg', 'hello')\n st['z'] = 99\n st.finish_request_and_check()", "def _setup_app_context_for_test():\n ctx = application.app_context()\n ctx.push()\n yield # tests will run here\n ctx.pop()", "def test_index_in_context(self):\n path = '/'\n with self.app.test_client() as client:\n client.get(path)\n self.assertContext('current_page', self.app.get_page(path))", "def test_context_creation_and_retrieval(self):\n tracer_id = 'd551573a-01dc-41b2-b197-ea8afb7fbac1'.replace('-', '')\n\n with new_context(tracer_id=tracer_id):\n context = get_context()\n nose.tools.eq_(tracer_id, str(context.tracer_id))", "def setUp(self):\n\n app.config['TESTING'] = True\n self.client = app.test_client()\n\n with self.client as c:\n with c.session_transaction() as sess:\n sess['user_id'] = 1", "def test_connection_session_switch(self):\n\n connection.set_session(self.session1)\n sync_table(TestConnectModel)\n TCM1 = TestConnectModel.create(id=1, keyspace=self.keyspace1)\n connection.set_session(self.session2)\n sync_table(TestConnectModel)\n TCM2 = TestConnectModel.create(id=1, keyspace=self.keyspace2)\n connection.set_session(self.session1)\n self.assertEqual(1, TestConnectModel.objects.count())\n self.assertEqual(TestConnectModel.objects.first(), TCM1)\n connection.set_session(self.session2)\n self.assertEqual(1, TestConnectModel.objects.count())\n self.assertEqual(TestConnectModel.objects.first(), TCM2)", "def setUp(self):\n\n app.config[\"TESTING\"] = True\n app.config[\"SECRET_KEY\"] = \"ABC\"\n self.client = app.test_client()\n\n # Connect to test database\n connect_to_db(app)\n db.drop_all()\n db.create_all()\n load_test()\n\n # Put user1 into session.\n with self.client as c:\n with c.session_transaction() as sess:\n sess[\"current_user\"] = 1", "def test_create_Context(self):\n client = IPythonClient()\n dac = Context(client)\n self.assertIs(dac.client, client)\n dac.close()\n client.close()", "def test_write_load(self, req_session):\n session = PoorSession(SECRET_KEY)\n session.load(req_session.cookies)\n assert session.data == {'test': True}", "def test_add_authenticated_session_var(self):\r\n req = Mock(authname='john', base_path='/', incookie=Cookie())\r\n session = Session(self.env, req)\r\n session['foo'] = 'bar'\r\n session.save()\r\n cursor = self.db.cursor()\r\n cursor.execute(\"SELECT value FROM session_attribute WHERE sid='john'\"\r\n \"AND name='foo'\") \r\n self.assertEqual('bar', cursor.fetchone()[0])", "def test_status_persisted(self):\n storage = SessionStorage()\n session1 = storage['key']\n session1['value'] = 'example'\n\n session2 = storage['key']\n self.assertEquals('example', session2['value'])", "def test_modify_authenticated_session_var(self):\r\n cursor = self.db.cursor()\r\n cursor.execute(\"INSERT INTO session VALUES ('john', 1, 0)\")\r\n cursor.execute(\"INSERT INTO session_attribute VALUES \"\r\n \"('john', 1, 'foo', 'bar')\")\r\n\r\n req = Mock(authname='john', base_path='/', incookie=Cookie())\r\n session = Session(self.env, req)\r\n self.assertEqual('bar', session['foo'])\r\n session['foo'] = 'baz'\r\n session.save()\r\n cursor.execute(\"SELECT value FROM session_attribute \"\r\n \"WHERE sid='john' AND name='foo'\") \r\n self.assertEqual('baz', cursor.fetchone()[0])", "def setUp(self):\n\n # Get the Flask test client.\n self.client = app.test_client()\n app.config[\"TESTING\"] = True\n app.config[\"SECRET_KEY\"] = \"key\"\n app.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\n\n with self.client as c:\n with c.session_transaction() as sess:\n sess[\"dietitian_id\"] = 2", "def test_im_chat_sessions(self):\n pass", "def pytest_sessionstart(session):\n\n db = Database()\n for a in range(1, 4):\n data = {\n 'author': f'test-author-{a}',\n 'text': f'test-text-{a}'\n }\n phrase = PhraseInput(**data)\n db.add(phrase)\n print('created:')\n print(list(db.items.keys()))", "def test_authenticated_session(self):\r\n incookie = Cookie()\r\n incookie['trac_session'] = '123456'\r\n outcookie = Cookie()\r\n req = Mock(authname='john', base_path='/', incookie=incookie,\r\n outcookie=outcookie)\r\n session = Session(self.env, req)\r\n self.assertEqual('john', session.sid)\r\n session['foo'] = 'bar'\r\n session.save()\r\n self.assertEquals(0, outcookie['trac_session']['expires'])", "def test_client_set_session(self):\n key = load_privatekey(FILETYPE_PEM, server_key_pem)\n cert = load_certificate(FILETYPE_PEM, server_cert_pem)\n ctx = Context(TLSv1_2_METHOD)\n ctx.use_privatekey(key)\n ctx.use_certificate(cert)\n ctx.set_session_id(b\"unity-test\")\n\n def makeServer(socket):\n server = Connection(ctx, socket)\n server.set_accept_state()\n return server\n\n originalServer, originalClient = loopback(server_factory=makeServer)\n originalSession = originalClient.get_session()\n\n def makeClient(socket):\n client = loopback_client_factory(socket)\n client.set_session(originalSession)\n return client\n\n resumedServer, resumedClient = loopback(\n server_factory=makeServer, client_factory=makeClient\n )\n\n # This is a proxy: in general, we have no access to any unique\n # identifier for the session (new enough versions of OpenSSL expose\n # a hash which could be usable, but \"new enough\" is very, very new).\n # Instead, exploit the fact that the master key is re-used if the\n # session is re-used. As long as the master key for the two\n # connections is the same, the session was re-used!\n assert originalServer.master_key() == resumedServer.master_key()", "def Session(self):\n return self.context", "def test_existing_session_auth_token(self):\n\n test_header = {'X-Auth-Token': 'pretend_token'}\n\n with self.app_sess1 as c:\n ret1 = c.get('/', headers=test_header)\n ret2 = c.get('/', headers=test_header)\n self.assertEqual(ret1.data, ret2.data)", "def test_session_is_accessed(self):\n response = self.client.get(\"/auth_processor_attr_access/\")\n self.assertContains(response, \"Session accessed\")", "def test_mongo_context_prodmode(self):\n if not helpers.can_connect_to_mongo(helpers.TEST_CONFIG):\n pytest.xfail('no mongo credentials')\n\n mongo_context = schema_utils.MongoContextManager(\n helpers.TEST_CONFIG,\n )\n\n with mongo_context as mongo:\n mongo['test_collection'].insert(self.demo_data)\n\n with mongo_context as _:\n data = mongo['test_collection'].find_one({'butts': True})\n\n assert data['many'] == 10", "def test_session( uri ):\n\n # basic setup to handle cookies (all in-memory)\n cj = cookielib.CookieJar()\n opener = urllib2.build_opener( urllib2.HTTPCookieProcessor( cj ) )\n urllib2.install_opener( opener )\n\n # request to get the initial session state\n request = urllib2.Request( uri + '?fetch=true' )\n handle = urllib2.urlopen( request )\n initial_id = get_cookie_value( cj, 'session_test' )\n initial = json.load( handle )\n\n # request to change the session state\n request = urllib2.Request( uri + '?ckey=tkey&cval=tval' )\n handle = urllib2.urlopen( request )\n setup_id = get_cookie_value( cj, 'session_test' )\n setup = json.load( handle )\n\n # request to get the final session state\n request = urllib2.Request( uri + '?fetch=true' )\n handle = urllib2.urlopen( request )\n final_id = get_cookie_value( cj, 'session_test' )\n final = json.load( handle )\n\n # tests consistency of reported session ID\n if initial_id != final_id:\n print 'FAILED: session ID mismatch: %s != %s' % (\n initial_id, final_id\n )\n\n # verifies the host resource isn't cheating\n elif 'tkey' in initial[ 'session' ]:\n print 'FAILED: \"tkey\" present in initial session'\n\n # verifies the updated state has a valid entry\n elif 'tkey' not in final[ 'session' ]:\n print 'FAILED: \"tkey\" not present in final session'\n\n # verifies the updated entry is correct\n elif final[ 'session' ][ 'tkey' ] != 'tval':\n print 'FAILED: \"tkey\" incorrect value \"tval\" != \"%s\"' % (\n final[ 'session' ][ 'tkey' ],\n )\n\n # all checks verified\n else:\n print 'PASSED: \"tkey\" set to \"%s\" in session %s' % (\n final[ 'session' ][ 'tkey' ],\n get_cookie_value( cj, 'session_test' )\n )", "def setUp(self):\n\n app.config['TESTING'] = True\n app.config['SECRET_KEY'] = 'key'\n self.client = app.test_client()\n\n with self.client as c:\n with c.session_transaction() as sess:\n sess['user_id'] = 1", "def test_new_session(self):\r\n cookie = Cookie()\r\n req = Mock(incookie=Cookie(), outcookie=cookie, authname='anonymous',\r\n base_path='/')\r\n session = Session(self.env, req)\r\n self.assertEqual(session.sid, cookie['trac_session'].value)\r\n cursor = self.db.cursor()\r\n cursor.execute(\"SELECT COUNT(*) FROM session\")\r\n self.assertEqual(0, cursor.fetchone()[0])", "def test_execute_with_context(self):\n pass", "def test_sessionProperties(self):\n session = self.mdk.session()\n value = [\"123\", {\"12\": 123}]\n session.setProperty(\"key\", value)\n session.setProperty(\"key2\", \"hello\")\n self.assertEqual((session.getProperty(\"key\"), session.getProperty(\"key2\"),\n session.hasProperty(\"key\"), session.hasProperty(\"key2\"),\n session.hasProperty(\"nope\")),\n (value, \"hello\", True, True, False))", "def test_add_context():\n\n application_services.add_context('context1', 'value1')\n assert application_services.get_context('context1') == 'value1'", "def test_fixture_simple_patch_with_session(testdir):\n\n # create a temporary pytest test module\n testdir.makepyfile(\n \"\"\"\n import requests\n from requests.sessions import Session\n\n def test_simple_with_session(requests_mock):\n with requests_mock.patch('/api/test') as patch:\n patch.returns = requests_mock.good('hello')\n with Session() as s:\n response = s.get('https://test.api/api/test')\n assert response.text == 'hello'\n \"\"\"\n )\n\n # run pytest with the following cmd args\n result = testdir.runpytest(\"-v\")\n\n # fnmatch_lines does an assertion internally\n result.stdout.fnmatch_lines([\"*::test_simple_with_session PASSED*\"])\n\n # make sure that that we get a '0' exit code for the testsuite\n assert result.ret == 0", "def test_context_data(self):\n mixin = mixins.SiteMixin(site=Site(domain='test', name='test'))\n ret = mixin.get_context_data()\n # make sure data is just updated, not replaced\n assert ret['builder'] is mixin\n # check if 'site' is in context and if it's valid\n assert ret['site'].domain == 'test' and ret['site'].name == 'test'", "def setUp(self):\n\n self.client = server.app.test_client()\n server.app.config['TESTING'] = True\n server.app.config['SECRET_KEY'] = \"123\"\n\n # Connect to test database\n model.connect_to_db(server.app, \"postgresql:///testdb\")\n\n # Create tables and add sample data\n model.db.create_all()\n # example_data()\n\n with self.client as c:\n with c.session_transaction() as session:\n session['user_id'] = 1\n session['username'] = 'j'\n session['name'] = 'l'\n session['cal_id'] = 1", "def test_context_data(self):\n response = self.client.get(self.get_url(self.trait.pk))\n context = response.context\n self.assertTrue('form' in context)\n self.assertTrue('trait' in context)\n self.assertEqual(context['trait'], self.trait)", "def test_context_data(self):\n response = self.client.get(self.get_url(self.trait.pk))\n context = response.context\n self.assertTrue('form' in context)\n self.assertTrue('trait' in context)\n self.assertEqual(context['trait'], self.trait)", "def test_admin_survey_session(self):\n\n # =======================\n # CREATE SURVEY TEST DATA\n # =======================\n\n test_start_time = fields.Datetime.now()\n\n survey_session = self.env['survey.survey'].create({\n 'title': 'User Session Survey',\n 'access_token': 'b137640d-14d4-4748-9ef6-344caaaaafe',\n 'state': 'open',\n 'access_mode': 'public',\n 'users_can_go_back': False,\n 'questions_layout': 'page_per_question',\n 'scoring_type': 'scoring_without_answers'\n })\n\n nickname_question = self.env['survey.question'].create({\n 'survey_id': survey_session.id,\n 'title': 'Nickname',\n 'save_as_nickname': True,\n 'sequence': 1,\n 'question_type': 'char_box',\n })\n text_question = self.env['survey.question'].create({\n 'survey_id': survey_session.id,\n 'title': 'Text Question',\n 'sequence': 2,\n 'question_type': 'char_box',\n })\n date_question = self.env['survey.question'].create({\n 'survey_id': survey_session.id,\n 'title': 'Date Question',\n 'sequence': 3,\n 'question_type': 'date',\n })\n datetime_question = self.env['survey.question'].create({\n 'survey_id': survey_session.id,\n 'title': 'Datetime Question',\n 'sequence': 4,\n 'question_type': 'datetime',\n })\n simple_choice_answer_1 = self.env['survey.question.answer'].create({\n 'value': 'First'\n })\n simple_choice_answer_2 = self.env['survey.question.answer'].create({\n 'value': 'Second'\n })\n simple_choice_answer_3 = self.env['survey.question.answer'].create({\n 'value': 'Third'\n })\n simple_choice_question = self.env['survey.question'].create({\n 'survey_id': survey_session.id,\n 'title': 'Regular Simple Choice',\n 'sequence': 5,\n 'question_type': 'simple_choice',\n 'suggested_answer_ids': [\n (4, simple_choice_answer_1.id),\n (4, simple_choice_answer_2.id),\n (4, simple_choice_answer_3.id)],\n })\n scored_choice_answer_1 = self.env['survey.question.answer'].create({\n 'value': 'Correct',\n 'is_correct': True,\n 'answer_score': 30\n })\n scored_choice_answer_2 = self.env['survey.question.answer'].create({\n 'value': 'Incorrect 1'\n })\n scored_choice_answer_3 = self.env['survey.question.answer'].create({\n 'value': 'Incorrect 2'\n })\n scored_choice_answer_4 = self.env['survey.question.answer'].create({\n 'value': 'Incorrect 3'\n })\n scored_choice_question = self.env['survey.question'].create({\n 'survey_id': survey_session.id,\n 'title': 'Scored Simple Choice',\n 'sequence': 6,\n 'question_type': 'simple_choice',\n 'suggested_answer_ids': [\n (4, scored_choice_answer_1.id),\n (4, scored_choice_answer_2.id),\n (4, scored_choice_answer_3.id),\n (4, scored_choice_answer_4.id)],\n })\n timed_scored_choice_answer_1 = self.env['survey.question.answer'].create({\n 'value': 'Correct',\n 'is_correct': True,\n 'answer_score': 30\n })\n timed_scored_choice_answer_2 = self.env['survey.question.answer'].create({\n 'value': 'Also correct but less points',\n 'is_correct': True,\n 'answer_score': 10\n })\n timed_scored_choice_answer_3 = self.env['survey.question.answer'].create({\n 'value': 'Incorrect',\n 'answer_score': -40\n })\n timed_scored_choice_question = self.env['survey.question'].create({\n 'survey_id': survey_session.id,\n 'title': 'Timed Scored Multiple Choice',\n 'sequence': 6,\n 'question_type': 'multiple_choice',\n 'is_time_limited': True,\n 'time_limit': 1,\n 'suggested_answer_ids': [\n (4, timed_scored_choice_answer_1.id),\n (4, timed_scored_choice_answer_2.id),\n (4, timed_scored_choice_answer_3.id)],\n })\n\n # =======================\n # PART 1 : CREATE SESSION\n # =======================\n\n self.start_tour('/web', 'test_survey_session_create_tour', login='admin')\n\n # tricky part: we only take into account answers created after the session_start_time\n # the create_date of the answers we just saved is set to the beginning of the test.\n # but the session_start_time is set after that.\n # So we cheat on the session start date to be able to count answers properly.\n survey_session.write({'session_start_time': test_start_time - relativedelta(minutes=10)})\n\n attendee_1 = survey_session._create_answer()\n attendee_2 = survey_session._create_answer()\n attendee_3 = survey_session._create_answer()\n all_attendees = [attendee_1, attendee_2, attendee_3]\n\n self.assertEqual('ready', survey_session.session_state)\n self.assertTrue(all(attendee.is_session_answer for attendee in all_attendees),\n \"Created answers should be within the session.\")\n self.assertTrue(all(attendee.state == 'new' for attendee in all_attendees),\n \"Created answers should be in the 'new' state.\")\n\n # =========================================\n # PART 2 : OPEN SESSION AND CHECK ATTENDEES\n # =========================================\n\n self.start_tour('/web', 'test_survey_session_start_tour', login='admin')\n\n self.assertEqual('in_progress', survey_session.session_state)\n self.assertTrue(bool(survey_session.session_start_time))\n\n # ========================================\n # PART 3 : CREATE ANSWERS & MANAGE SESSION\n # ========================================\n\n # create a few answers beforehand to avoid having to back and forth too\n # many times between the tours and the python test\n\n attendee_1.save_lines(nickname_question, 'xxxTheBestxxx')\n attendee_2.save_lines(nickname_question, 'azerty')\n attendee_3.save_lines(nickname_question, 'nicktalope')\n self.assertEqual('xxxTheBestxxx', attendee_1.nickname)\n self.assertEqual('azerty', attendee_2.nickname)\n self.assertEqual('nicktalope', attendee_3.nickname)\n\n attendee_1.save_lines(text_question, 'Attendee 1 is the best')\n attendee_2.save_lines(text_question, 'Attendee 2 rulez')\n attendee_3.save_lines(text_question, 'Attendee 3 will crush you')\n attendee_1.save_lines(date_question, '2010-10-10')\n attendee_2.save_lines(date_question, '2011-11-11')\n attendee_2.save_lines(datetime_question, '2010-10-10 10:00:00')\n attendee_3.save_lines(datetime_question, '2011-11-11 15:55:55')\n attendee_1.save_lines(simple_choice_question, simple_choice_answer_1.id)\n attendee_2.save_lines(simple_choice_question, simple_choice_answer_1.id)\n attendee_3.save_lines(simple_choice_question, simple_choice_answer_2.id)\n attendee_1.save_lines(scored_choice_question, scored_choice_answer_1.id)\n attendee_2.save_lines(scored_choice_question, scored_choice_answer_2.id)\n attendee_3.save_lines(scored_choice_question, scored_choice_answer_3.id)\n attendee_1.save_lines(timed_scored_choice_question,\n [timed_scored_choice_answer_1.id, timed_scored_choice_answer_3.id])\n attendee_2.save_lines(timed_scored_choice_question,\n [timed_scored_choice_answer_1.id, timed_scored_choice_answer_2.id])\n attendee_3.save_lines(timed_scored_choice_question,\n [timed_scored_choice_answer_2.id])\n\n self.start_tour('/web', 'test_survey_session_manage_tour', login='admin')\n\n self.assertFalse(bool(survey_session.session_state))\n self.assertTrue(all(answer.state == 'done' for answer in all_attendees))", "def test_session_creation(self):\n response = self.client.get(reverse('index'))\n self.assertEqual(self.client.session['liked_user_ids'], [])", "def test_session_state_for_used_flash(self):\n self.response = self.client.get(reverse(views.set_flash_var))\n self.response = self.client.get(reverse(views.render_template))\n self.assertTrue(_SESSION_KEY in self.client.session)\n\n # Flash scope should be removed from the session\n self.response = self.client.get(reverse(views.render_template))\n self.assertFalse(_SESSION_KEY in self.client.session)", "def test_do_logout(self):\r\n with app.test_request_context():\r\n u1 = User.query.filter_by(username='testuser').one()\r\n\r\n do_login(u1)\r\n self.assertIn(CURR_USER_KEY, session)\r\n do_logout()\r\n self.assertNotIn(CURR_USER_KEY, session)", "def test_run_container2(client):\n with client.application.app_context():\n pytest.skip(\"Not implemented\")", "def session(get_session):\n return get_session()", "def test_database_sessions(web_app):\n db = DB(connect_url=TEST_URL, web_app=web_app, echo=web_app)\n db.init()\n with db.session() as session:\n archive = Archive(archive_id='0000', name='test')\n session.add(archive)\n with pytest.raises(IntegrityError):\n with db.session() as session:\n archive = Archive(archive_id='0000', name='test')\n session.add(archive)\n with pytest.raises(ValueError):\n with db.session() as session:\n archive = Archive(archive_id='0001', name='test')\n session.add(archive)\n raise ValueError('test')\n db.close()", "def setUp(self):\n\n self.client = server.app.test_client()\n server.app.config['TESTING'] = True\n server.app.config['SECRET_KEY'] = \"123\"\n\n # Connect to test database\n model.connect_to_db(server.app, \"postgresql:///testdb\")\n\n # Create tables and add sample data\n model.db.create_all()\n # example_data()\n\n with self.client as c:\n with c.session_transaction() as session:\n session['user_id'] = 33\n session['username'] = 'balloonicorn'\n session['name'] = 'balloonicorn'", "def __init__(self, session):\n self._session = session", "def test_init_with_request():\n serializers.SessionCreationSerializer(context={\"request\": \"foo\"})", "def session_context(func):\r\n def wrapper(*args, **kwargs):\r\n self = args[0]\r\n with self._create_db_session() as db:\r\n self.db = db\r\n return func(*args, **kwargs)\r\n return wrapper", "def test_some_page_in_context(self):\n path = '/about/history'\n with self.app.test_client() as client:\n client.get(path)\n self.assertContext('current_page', self.app.get_page(path))", "def test_session(self, k8sconfig):\n # Basic.\n config = k8sconfig._replace(token=None)\n sess = k8s.session(config)\n assert \"authorization\" not in sess.headers\n\n # With access token.\n config = k8sconfig._replace(token=\"token\")\n sess = k8s.session(config)\n assert sess.headers[\"authorization\"] == \"Bearer token\"\n\n # With access token and client certificate.\n ccert = k8s.K8sClientCert(crt=\"foo\", key=\"bar\")\n config = k8sconfig._replace(token=\"token\", client_cert=ccert)\n sess = k8s.session(config)\n assert sess.headers[\"authorization\"] == \"Bearer token\"\n assert sess.cert == (\"foo\", \"bar\")", "def test_user_get_topteams():\n app = create_ctfd()\n with app.app_context():\n register_user(app)\n client = login_as_user(app)\n r = client.get('/top/10')\n assert r.status_code == 200\n destroy_ctfd(app)", "def test_api_livesession_read_token_lti_admin_instruct_record_course_diff(self):\n other_consumer_site = ConsumerSiteFactory()\n # livesession with consumer_site\n livesession = LiveSessionFactory(\n consumer_site=other_consumer_site,\n email=\"admin@openfun.fr\", # explicit to be found in response\n lti_user_id=\"56255f3807599c377bf0e5bf072359fd\", # explicit to be found in response\n lti_id=\"Maths\", # explicit to be found in response\n )\n\n # token with context_id leading to another consumer site\n jwt_token = InstructorOrAdminLtiTokenFactory(\n playlist=livesession.video.playlist,\n context_id=f\"{livesession.video.playlist.lti_id}_diff\",\n # consumer_site is not other_consumer_site\n consumer_site=str(livesession.video.playlist.consumer_site.id),\n user__email=livesession.email,\n user__id=livesession.lti_user_id,\n user__username=livesession.username,\n )\n\n response = self.client.get(\n self._get_url(livesession.video, livesession),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 200)\n self.assertEqual(\n response.json(),\n {\n \"anonymous_id\": None,\n \"consumer_site\": str(other_consumer_site.id),\n \"display_name\": None,\n \"email\": \"admin@openfun.fr\",\n \"id\": str(livesession.id),\n \"is_registered\": False,\n \"language\": \"en\",\n \"live_attendance\": None,\n \"lti_user_id\": \"56255f3807599c377bf0e5bf072359fd\",\n \"lti_id\": \"Maths\",\n \"should_send_reminders\": True,\n \"username\": None,\n \"video\": str(livesession.video.id),\n },\n )", "def test_distinct_sessions_auth_token(self):\n\n sess1 = None\n sess2 = None\n\n with self.app_sess1 as c:\n ret = c.get('/', headers={'X-Auth-Token': 'pretend_token'})\n sess1 = ret.data\n\n with self.app_sess2 as c:\n ret = c.get('/', headers={'X-Auth-Token': 'another_pretend_token'})\n sess2 = ret.data\n\n self.assertNotEqual(sess1, sess2)", "def test_where(server, session):\n\n window = session.attached_window\n window.split_window() # create second pane\n\n for session in server.sessions:\n session_id = session.get('session_id')\n session_name = session.get('session_name')\n where = server.where({'session_id': session_id, 'session_name': session_name})\n\n assert len(where) == 1\n assert isinstance(where, list)\n assert where[0] == session\n assert isinstance(where[0], Session)\n\n # session.where\n for window in session.windows:\n window_id = window.get('window_id')\n window_index = window.get('window_index')\n\n where = session.where(\n {'window_id': window_id, 'window_index': window_index}\n )\n\n assert len(where) == 1\n assert isinstance(where, list)\n assert where[0] == window\n assert isinstance(where[0], Window)\n\n # window.where\n for pane in window.panes:\n pane_id = pane.get('pane_id')\n pane_tty = pane.get('pane_tty')\n\n where = window.where({'pane_id': pane_id, 'pane_tty': pane_tty})\n\n assert len(where) == 1\n assert isinstance(where, list)\n assert where[0] == pane\n assert isinstance(where[0], Pane)", "async def session(self,ctx):\n if ctx.invoked_subcommand is None:\n await ctx.send(\"The current main session is \" + \"```\" + await self.config.sessions.main() + \"```\")", "def test_get_by_id(server, session):\n\n window = session.attached_window\n\n window.split_window() # create second pane\n\n for session in server.sessions:\n session_id = session.get('session_id')\n get_by_id = server.get_by_id(session_id)\n\n assert get_by_id == session\n assert isinstance(get_by_id, Session)\n assert server.get_by_id('$' + next(namer)) is None\n\n # session.get_by_id\n for window in session.windows:\n window_id = window.get('window_id')\n\n get_by_id = session.get_by_id(window_id)\n\n assert get_by_id == window\n assert isinstance(get_by_id, Window)\n\n assert session.get_by_id('@' + next(namer)) is None\n\n # window.get_by_id\n for pane in window.panes:\n pane_id = pane.get('pane_id')\n\n get_by_id = window.get_by_id(pane_id)\n\n assert get_by_id == pane\n assert isinstance(get_by_id, Pane)\n assert window.get_by_id('%' + next(namer)) is None", "def session_context(self):\n session = self.Session()\n try:\n yield session\n session.commit()\n except: # noqa E722\n session.rollback()\n raise\n finally:\n session.close()", "def test_inner_live_setup(self):\n mocktable = self.classes.MockTable\n session = Session(bind=self.bind)\n session.add(mocktable(test=5))\n session.commit()\n session.close()\n\n res = session.query(mocktable).all()\n session.close()\n expected = (res[0].test, len(res))\n\n assert expected == (5, 1)", "def setUp(self):\n\n # Get the Flask test client\n self.client = app.test_client()\n\n # Show Flask errors that happen during tests\n app.config['TESTING'] = True\n\n #To test sessions we need to set Secret key \n app.config['SECRET_KEY'] = 'key'\n\n # Connect to test database\n connect_to_db(app, \"postgresql:///testdb\")\n\n # Create tables and add sample data\n db.create_all()\n users()\n reviews()\n\n with self.client as c:\n with c.session_transaction() as sess:\n sess['user_id'] = 1", "def test_update_session(self):\r\n now = time.time()\r\n\r\n # Make sure the session has data so that it doesn't get dropped\r\n cursor = self.db.cursor()\r\n cursor.execute(\"INSERT INTO session VALUES ('123456', 0, 1)\")\r\n cursor.execute(\"INSERT INTO session_attribute VALUES \"\r\n \"('123456', 0, 'foo', 'bar')\")\r\n\r\n incookie = Cookie()\r\n incookie['trac_session'] = '123456'\r\n outcookie = Cookie()\r\n req = Mock(authname='anonymous', base_path='/', incookie=incookie,\r\n outcookie=outcookie)\r\n session = Session(self.env, req)\r\n session.save() # updating should not require modifications\r\n\r\n self.assertEqual(PURGE_AGE, outcookie['trac_session']['expires'])\r\n\r\n cursor.execute(\"SELECT last_visit FROM session WHERE sid='123456' AND \"\r\n \"authenticated=0\")\r\n self.assertAlmostEqual(now, int(cursor.fetchone()[0]), -1)", "def test_api_livesession_read_token_lti_admin_instruct_record_consumer_diff(self):\n other_consumer_site = ConsumerSiteFactory()\n # livesession with consumer_site\n livesession = LiveSessionFactory(\n consumer_site=other_consumer_site,\n is_from_lti_connection=True,\n email=\"admin@openfun.fr\", # explicit to be found in response\n lti_user_id=\"56255f3807599c377bf0e5bf072359fd\", # explicit to be found in response\n )\n\n # token with context_id leading to another consumer site\n jwt_token = InstructorOrAdminLtiTokenFactory(\n playlist=livesession.video.playlist,\n context_id=str(livesession.video.playlist.lti_id),\n # consumer_site is not other_consumer_site\n consumer_site=str(livesession.video.playlist.consumer_site.id),\n )\n\n response = self.client.get(\n self._get_url(livesession.video, livesession),\n HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\",\n )\n self.assertEqual(response.status_code, 200)\n self.assertEqual(\n response.json(),\n {\n \"anonymous_id\": None,\n \"consumer_site\": str(other_consumer_site.id),\n \"display_name\": None,\n \"email\": \"admin@openfun.fr\",\n \"id\": str(livesession.id),\n \"is_registered\": False,\n \"language\": \"en\",\n \"live_attendance\": None,\n \"lti_user_id\": \"56255f3807599c377bf0e5bf072359fd\",\n \"lti_id\": str(livesession.video.playlist.lti_id),\n \"should_send_reminders\": True,\n \"username\": None,\n \"video\": str(livesession.video.id),\n },\n )", "def setUp(self):\n self.hello_url = \"http://localhost:7000\"\n self.store_url = self.hello_url + \"/store\"\n self.session = requests.session()", "def iter_sessions():\n return iter(_session_stack)", "def test_multiple_requests(self):\n s = self.api.session()\n s.request(\"1.2.3.4\", \"mozilla\", \"/foo/bar\").end()\n s.request(\"1.2.3.4\", \"mozilla\", \"/foo/blah\").end()\n s.end()\n data = self.connector.transcription()\n assert len(data) == 2\n assert data[0].get('action') == \"session_start\"\n assert data[1].get('action') == \"session_end\"", "def getSession():\n return call(\"getSession\")", "def test_homepage_logged_in(self):\r\n u1 = User.query.filter_by(username='testuser').one()\r\n\r\n with self.client as c:\r\n with c.session_transaction() as sess:\r\n sess[CURR_USER_KEY] = u1.id\r\n response = c.get('/')\r\n self.assertEqual(response.status_code, 200)\r\n self.assertIn(b'Virginia News', response.data)", "def test_new_session_create_with_auth_json(self):\n\n with self.app_sess1 as c:\n data = {\n \"token\": \"pretend_token\"\n }\n ret1 = c.post('/', data=json.dumps(data), headers={'Content-Type': 'application/json'})\n ret2 = c.get('/', headers={'X-Auth-Token': 'pretend_token'})\n\n self.assertEqual(ret1.data, ret2.data)", "def client(app):\n conn = engine.connect()\n transaction = conn.begin()\n app.db_session = create_session(conn)\n yield app.test_client()\n # No need to retain the junk we\n # put in the DB for testing.\n transaction.rollback()\n conn.close()\n app.db_session.remove()", "def establish_session(func, db):\n def inner(*args, **kwargs):\n Session = sessionmaker(bind=db)\n session = Session()\n kwargs['session'] = session\n res = func(*args, **kwargs)\n return res\n\n return inner", "def test_context_data(self):\n tagged_traits = TaggedTraitFactory.create_batch(2, trait=self.trait)\n response = self.client.get(self.get_url(self.trait.pk))\n context = response.context\n self.assertIn('source_trait', context)\n self.assertEqual(context['source_trait'], self.trait)\n self.assertIn('tagged_traits_with_xs', context)\n self.assertEqual([el[0] for el in context['tagged_traits_with_xs']],\n list(self.trait.all_taggedtraits.non_archived()))\n self.assertIn('user_is_study_tagger', context)\n self.assertFalse(context['user_is_study_tagger'])\n self.assertIn('is_deprecated', context)\n self.assertIn('show_removed_text', context)\n self.assertIn('new_version_link', context)", "def loop_context():\n loop = setup_test_loop()\n yield loop\n # teardown_test_loop(loop)", "def test_mongo_context_testmode(self, tmpdir):\n mongo_context = schema_utils.MongoContextManager(\n helpers.TEST_CONFIG,\n _testmode=True,\n _testmode_filepath=tmpdir,\n )\n\n with mongo_context as t_mongo:\n t_mongo['test_collection'].insert(self.demo_data)\n\n with mongo_context as t_mongo:\n data = t_mongo['test_collection'].find_one({'butts': True})\n\n assert data['many'] == 10", "def _context():\n global _trident_context\n if _trident_context is None:\n _trident_context = _Context()\n return _trident_context" ]
[ "0.67018175", "0.6668321", "0.6427891", "0.63258743", "0.62819177", "0.6264992", "0.6244206", "0.6197762", "0.6128129", "0.60963386", "0.59784377", "0.59195083", "0.5882602", "0.58783644", "0.5873574", "0.58591706", "0.5849194", "0.584282", "0.5837098", "0.58088905", "0.58034945", "0.5788038", "0.5782187", "0.5770325", "0.5761921", "0.57494193", "0.5720195", "0.5711418", "0.5710981", "0.56825006", "0.56716037", "0.56655604", "0.56559104", "0.5636023", "0.5634938", "0.5630474", "0.56059533", "0.5595552", "0.55857", "0.5585573", "0.5581559", "0.55664426", "0.5557496", "0.5534353", "0.5529654", "0.55266625", "0.5508452", "0.5500517", "0.54963416", "0.54885745", "0.54804724", "0.5476506", "0.54731405", "0.54723406", "0.5462534", "0.54206276", "0.541661", "0.54102975", "0.5402018", "0.54007375", "0.53992075", "0.53959405", "0.5386174", "0.5386174", "0.5378541", "0.53755707", "0.53733563", "0.5362562", "0.53600085", "0.5356902", "0.5354855", "0.53510857", "0.5349936", "0.5342898", "0.5333078", "0.531519", "0.53082657", "0.5297952", "0.52938706", "0.5278837", "0.52777827", "0.52648354", "0.5263927", "0.52626866", "0.52568936", "0.52558583", "0.5254524", "0.52526283", "0.5251947", "0.524848", "0.52481824", "0.524413", "0.5243311", "0.52384037", "0.52333266", "0.5224298", "0.52218133", "0.52213496", "0.52186054", "0.521327" ]
0.8149533
0
Test the popxl call_with_info example
def test_documentation_popxl_call_with_info(self): filename = "call_with_info.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hxlinfo():\n run_script(hxlinfo_main)", "def test_get_info(self):\n pass", "def test_application_info(self, mocked_serial, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call\n self.assertEqual({'name': 'appli'}, rpc.get_application_info('dummy'))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('dummy')], mocked_serial.call_args_list)", "def verify_call(obj):\n\tassert obj.tag == 'OMOBJ'\n\tattr = obj[0]\n\t\n\tassert attr.tag == 'OMATTR'\n\tpairs, application = attr\n\t\n\tassert application.tag == 'OMA'\n\tsymbol, args = application\n\t\n\tassert symbol.tag == 'OMS'\n\tassert symbol.get('cd') == \"scscp1\"\n\tassert symbol.get('name') == \"procedure_call\"\n\t\n\tassert args.tag == 'OMA'\n\tassert len(args) > 0\n\tname_symbol = args[0]\n\t\n\tassert name_symbol.tag == 'OMS'\n\tcd = name_symbol.get('cd')\n\tproc_name = name_symbol.get('name')\n\t\n\t#2. Now handle the extra information\n\tassert pairs.tag == 'OMATP'\n\tassert len(pairs) % 2 == 0\n\t\n\textras = {}\n\tcall_id = None\n\treturn_type = None\n\t\n\tfor i in range(0, len(pairs), 2):\n\t\tsymbol = pairs[i]\n\t\tassert symbol.tag == 'OMS'\n\t\tassert symbol.get('cd') == \"scscp1\"\n\t\tname = symbol.get('name')\n\t\textras[name] = pairs[i+1]\n\t\t\n\t\tif name == 'call_id':\n\t\t\tassert call_id is None\n\t\t\tcall_id = pairs[i+1].text\n\t\t\tprint(call_id)\n\t\telif name.startswith('option_return_'):\n\t\t\tassert return_type is None\n\t\t\treturn_type = ReturnTypes[name[14:]]\n\t\n\t#Some information is mandatory\n\tassert call_id is not None\n\tassert return_type is not None\n\t\n\treturn cd, proc_name, call_id, return_type, args[1:], extras", "def info(self, *args, **kwargs):", "def test_process_info(self, mocked_get, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test first RPC call with process namespec\n self.assertEqual([{'name': 'proc'}], rpc.get_process_info('appli:proc'))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('appli:proc')], mocked_get.call_args_list)\n # reset patches\n mocked_check.reset_mock()\n mocked_get.reset_mock()\n # test second RPC call with group namespec\n self.assertEqual([{'name': 'proc_1'}, {'name': 'proc_2'}],\n rpc.get_process_info('appli:*'))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call('appli:*')], mocked_get.call_args_list)", "def get_info(self, info):\r\n pass", "def setInfo(*args):", "def getInfo():", "def info() -> None:", "def test_get_info_function() -> None:\n current_directory = Path.cwd()\n with zipfile.ZipFile(\n current_directory / 'app' / 'tests' / 'files' / 'oneFile.zip') as zip_object:\n res = get_info_about_file(zip_object, 'dotnetfx.exe')\n assert res == {'path': 'dotnetfx.exe', 'size': 21823560}", "def test_documentation_popxl_addition(self):\n filename = \"simple_addition.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def hxlinfo_main(args, stdin=STDIN, stdout=sys.stdout, stderr=sys.stderr):\n parser = make_args(\n 'Display JSON-formatted metadata for a data source (does not have to be HXLated).',\n hxl_output=False\n )\n\n args = parser.parse_args(args)\n\n do_common_args(args)\n\n json.dump(hxl.input.info(args.infile or stdin, make_input_options(args)), stdout, indent=2, ensure_ascii=False)\n\n return EXIT_OK", "def test_print_info(clarisse):\n info = \"test print info\"\n assert bool(clarisse.print_info(info)) is False", "def test_get_cell_info(self):\n expected = { 'Experiment': \"20220101_EGS1_12345AA\",\n 'Cell': '12345AA0018/20220101_1234_1-A1-A1_AAA66666_deadbeef',\n 'Pool': '12345AA0018',\n 'Date': '20220101',\n 'Number': '1234',\n 'Slot': '1-A1-A1',\n 'CellID': 'AAA66666',\n 'Checksum': 'deadbeef',\n 'Project': '12345',\n 'Base': '12345AA0018/20220101_1234_1-A1-A1_AAA66666_deadbeef/'\n '20220101_EGS1_12345AA_12345AA0018_AAA66666_deadbeef',\n 'Files in pass': 'unknown',\n 'Files in fail': 1,\n 'Files in fast5 fail': 1,\n '_counts': [\n {'_barcode': '.', '_label': 'All passed reads', '_part': 'pass', 'total_reads': 200},\n {'_barcode': '.', '_label': 'Passed and lambda-filtered reads', '_part': 'nolambda'},\n {'_barcode': '.', '_label': 'All failed reads', '_part': 'fail'} ],\n '_blobs': ['../../__blob__'],\n '_duplex' : [ ['Duplex pairs', 1],\n ['from total passing reads', 200],\n ['% of passing reads', '1.00%'] ],\n '_filter_type': 'none',\n '_final_summary': {'is_rna': False},\n '_nanoplot': '../../__nanoplot__',\n }\n\n\n got = get_cell_info( experiment = \"20220101_EGS1_12345AA\",\n cell = \"12345AA0018/20220101_1234_1-A1-A1_AAA66666_deadbeef\",\n cell_content = { '.': dict( fast5_pass = ['x.fast5'],\n fastq_fail = ['y.fastq'],\n fast5_fail = ['y.fast5'] ) },\n counts = { ('.','pass'): dict(total_reads = 200),\n ('.','fail'): dict(),\n ('.','nolambda'): dict() },\n fin_summary = dict(is_rna = False),\n blobs = ['__blob__'],\n nanoplot = '__nanoplot__',\n duplex = 1,\n fast5_meta = dict() )\n\n if VERBOSE:\n pprint(got)\n\n self.assertEqual( type(got), OrderedDict )\n self.assertEqual( dict(got), expected )", "def test_all_process_info(self, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.processes = {\n 'proc_1': Mock(**{'serial.return_value': {'name': 'proc_1'}}),\n 'proc_2': Mock(**{'serial.return_value': {'name': 'proc_2'}})}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call\n self.assertItemsEqual([{'name': 'proc_1'}, {'name': 'proc_2'}],\n rpc.get_all_process_info())\n self.assertEqual([call()], mocked_check.call_args_list)", "def _handle_info_response(self, resp, info, prev_info):\r\n if info.line_num != prev_info.line_num:\r\n return\r\n\r\n if resp['calltip']:\r\n info.editor.show_calltip('Arguments', resp['calltip'],\r\n signature=True,\r\n at_position=prev_info.position)\r\n\r\n if resp['name']:\r\n self.send_to_inspector.emit(\r\n resp['name'], resp['argspec'],\r\n resp['note'], resp['docstring'],\r\n not prev_info.auto)", "def test_get_patch_info_returns(self):\n # This test assumes IIQ isn't installed, thus the pile of errors that'll\n # occur shouldn't prevent us from getting a PatchInfo object\n fake_log = MagicMock()\n patch_info = versions.get_patch_info('bogus-patch.tgz', fake_log)\n\n self.assertTrue(isinstance(patch_info, versions._PatchInfo))\n self.assertEqual(patch_info.iiq_dir, '')", "def rpc_info():", "def test_all_applications_info(self, mocked_get, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.applications = {\n 'dummy_1': None, 'dummy_2': None}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call\n self.assertItemsEqual([{'name': 'appli_1'}, {'name': 'appli_2'}],\n rpc.get_all_applications_info())\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertItemsEqual([call('dummy_1'), call('dummy_2')],\n mocked_get.call_args_list)", "def svn_info_invoke_receiver(svn_info_receiver_t__obj, void_baton, char_path, svn_info_t_info, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def after_call_used(self, function_info, subscript, call_code, return_value, code_reference):\n # pylint: disable=too-many-arguments\n if function_info == ('pandas.io.parsers', 'read_csv'):\n operator_context = OperatorContext(OperatorType.DATA_SOURCE, function_info)\n return_value = self.execute_inspection_visits_no_parents(operator_context, code_reference,\n return_value, function_info)\n if function_info == ('pandas.core.groupby.generic', 'agg'):\n operator_context = OperatorContext(OperatorType.GROUP_BY_AGG, function_info)\n return_value = self.execute_inspection_visits_no_parents(operator_context, code_reference,\n return_value.reset_index(), function_info)\n elif function_info == ('pandas.core.frame', 'dropna'):\n operator_context = OperatorContext(OperatorType.SELECTION, function_info)\n return_value = execute_inspection_visits_unary_operator_df(self, operator_context, code_reference,\n self.input_data[-1],\n self.input_data[-1].annotations,\n return_value)\n elif function_info == ('pandas.core.frame', '__getitem__'):\n # TODO: Can this also be a select\n if self.select:\n self.select = False\n # Gets converted to Selection later?\n operator_context = OperatorContext(OperatorType.SELECTION, function_info)\n return_value = execute_inspection_visits_unary_operator_df(self, operator_context, code_reference,\n self.input_data[-1],\n self.input_data[-1].annotations,\n return_value)\n elif isinstance(return_value, MlinspectDataFrame):\n operator_context = OperatorContext(OperatorType.PROJECTION, function_info)\n return_value['mlinspect_index'] = range(1, len(return_value) + 1)\n return_value = execute_inspection_visits_unary_operator_df(self, operator_context, code_reference,\n self.input_data[-1],\n self.input_data[-1].annotations,\n return_value)\n elif isinstance(return_value, MlinspectSeries):\n operator_context = OperatorContext(OperatorType.PROJECTION, function_info)\n return_value = self.execute_inspection_visits_unary_operator_series(operator_context, code_reference,\n return_value,\n function_info)\n elif function_info == ('pandas.core.frame', 'groupby'):\n description = self.code_reference_to_description[code_reference]\n return_value.name = description # TODO: Do not use name here but something else to transport the value\n if function_info == ('pandas.core.frame', 'merge'):\n operator_context = OperatorContext(OperatorType.JOIN, function_info)\n return_value = self.execute_inspection_visits_join_operator_df(operator_context, code_reference,\n return_value,\n function_info)\n\n self.input_data.pop()\n\n return return_value", "def test_address_info(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.addresses = {\n '10.0.0.1': Mock(**{'serial.return_value': 'address_info'})}\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test with known address\n self.assertEqual('address_info', rpc.get_address_info('10.0.0.1'))\n # test with unknown address\n with self.assertRaises(RPCError) as exc:\n rpc.get_address_info('10.0.0.0')\n self.assertEqual(Faults.BAD_ADDRESS, exc.exception.code)\n self.assertEqual('BAD_ADDRESS: address 10.0.0.0 unknown in Supvisors',\n exc.exception.text)", "def test_info_get(self):\n response = self.client.open(\n '/info',\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def info(): # noqa: E501\n return 'do some magic!'", "def print_info(*args):\n print(CGREEN2 + str(*args) + CEND)", "def test_pop_returns_value(new_dll):\n assert new_dll.pop() == 3", "def get_system_info():\n query = {\"type\": \"op\", \"cmd\": \"<show><system><info></info></system></show>\"}\n\n return __proxy__[\"panos.call\"](query)", "def test_ctcpQuery_USERINFO(self):\n self.client.userinfo = \"info\"\n self.client.ctcpQuery_USERINFO(self.user, self.channel, \"data\")\n self.assertEqual(\n self.client.methods, [(\"ctcpMakeReply\", (\"Wolf\", [(\"USERINFO\", \"info\")]))]\n )", "def test_rpcCall(self):\n pass", "def info():\n print __doc__\n sys.exit(1)", "def process_info(self, info):\n return info", "def test_info(manager):\n manager.test_window(\"one\")\n manager.c.sync()\n info = manager.c.window.info()\n assert info[\"name\"] == \"one\"\n assert info[\"group\"] == \"a\"\n assert info[\"wm_class\"][0] == \"TestWindow\"\n assert \"x\" in info\n assert \"y\" in info\n assert \"width\" in info\n assert \"height\" in info\n assert \"id\" in info", "def test_get_orderbook_info(mocker, expected_response, expected_data, client) -> None:\n\n mocker.patch(\"requests.Session.request\", return_value=expected_response)\n actual_data = client.get_orderbook_info(pair_id=1)\n\n assert actual_data == expected_data", "def test_call_command(self):\n p = call('whoami', shell=False, executable=None, path=None)\n self.assertIsInstance(p, Response)\n self.assertIsInstance(p.stdout, str)\n self.assertIsInstance(p.stderr, str)", "def getCallTip(self, command='', *args, **kwds):\n return ('', '', '')", "def return_info(self):\n\t\treturn self.info", "def get_info():\r\n\r\n path = \"data.xlsx\" # change path depending on the name and location of the file\r\n xl_book = xlrd.open_workbook(path)\r\n xl_sheet = xl_book.sheet_by_index(0) # selects the first sheet in the spreadsheet\r\n emails = xl_sheet.col_values(1, 1) # emails are in second column\r\n names = xl_sheet.col_values(0, 1) # client names are in first column\r\n return emails, names", "def test_get_pair_info(mocker, expected_response, expected_data, client) -> None:\n\n mocker.patch(\"requests.Session.request\", return_value=expected_response)\n actual_data = client.get_pair_info(pair_id=1)\n\n assert actual_data == expected_data", "def testGetInfoRemote(self):\n self.stream_start(mode='client',\n plugins=['xep_0030'])\n\n events = set()\n\n def handle_disco_info(iq):\n events.add('disco_info')\n\n\n self.xmpp.add_event_handler('disco_info', handle_disco_info)\n\n\n self.xmpp.wrap(self.xmpp['xep_0030'].get_info('user@localhost', 'foo'))\n self.wait_()\n\n self.send(\"\"\"\n <iq type=\"get\" to=\"user@localhost\" id=\"1\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\"\n node=\"foo\" />\n </iq>\n \"\"\")\n\n self.recv(\"\"\"\n <iq type=\"result\" to=\"tester@localhost\" id=\"1\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\"\n node=\"foo\">\n <identity category=\"client\" type=\"bot\" />\n <feature var=\"urn:xmpp:ping\" />\n </query>\n </iq>\n \"\"\")\n\n self.assertEqual(events, {'disco_info'},\n \"Disco info event was not triggered: %s\" % events)", "def call(self, callee: \"SIPPhoneTemplate\") -> None:", "def getInfoFromExcel(excel_path):\n book = xlrd.open_workbook(excel_path)\n field = book.sheets()[0].row_values(0)\n insid_index = field.index('id')\n tester_index = field.index('Responsible Tester')\n #tester_index = field.index('Tester')\n insid = book.sheets()[0].col_values(insid_index)\n tester = book.sheets()[0].col_values(tester_index)\n insid.pop(0) #delete 'id'\n tester.pop(0) #delete 'tester'\n #print insid\n #print tester\n info = {}\n for i in range(len(insid)):\n info[int(insid[i])] = str(tester[i])\n \n return info", "def test_info():\n mock_getgrall = [grp.struct_group((\"foo\", \"*\", 20, [\"test\"]))]\n with patch(\"grp.getgrall\", MagicMock(return_value=mock_getgrall)):\n ret = {\"passwd\": \"*\", \"gid\": 20, \"name\": \"foo\", \"members\": [\"test\"]}\n assert mac_group.info(\"foo\") == ret", "def __xcall__(self, cmd):\n\n return call_pkgconfig(cmd + [self.name], self.paths)", "def printinfo(name, age=35): # create function printinfo with arguments name, and age(defualt to 35 if not specified)\n print(\"Name: \", name) # print name\n print(\"Age \", age) # print age, will print 35 unless specified in the call statement\n return # exit the function printinfo()", "def getInfo(notification):", "def test_info(config):\n conventional_commits = ConventionalCommitsCz(config)\n info = conventional_commits.info()\n assert isinstance(info, str)", "def get_xcom(**context):\n ti = context['ti']\n data = ti.xcom_pull(task_ids='xcom_from_bash', key='return_value')\n logging.info(data)", "def test_add_batch_info(self):\n resp = self.query_with_token(\n self.access_token, batch_info_query.format(**self.batch_data))\n self.assertIn('data', resp)\n self.assertEqual(\n resp['data']['createBatchInfo']['batchInfo']['supplier']['name'],\n self.supplier.name)", "def chain2Info(self, action, args=(), kwargs={}, actionReturnsExitCode=False ):\r\n if not isinstance( action, str):\r\n objToCall=action\r\n else:\r\n objToCall = getattr( self, action )\r\n result = objToCall(*args,**args)\r\n resultInfo = ReturnInfo( value = result, exitCode=None ) \r\n return reultInfo, self", "def test_pop_methods(self):\n\n batch = Batch(Mock())\n\n # mock BatchRequests\n mock_obj = Mock()\n mock_ref = Mock()\n batch._objects_batch = mock_obj\n batch._reference_batch = mock_ref\n\n mock_obj.pop.assert_not_called()\n mock_ref.pop.assert_not_called()\n\n # pop object default value\n batch.pop_object()\n mock_obj.pop.assert_called_with(-1)\n mock_ref.pop.assert_not_called()\n # reset mock objects\n mock_obj.reset_mock()\n mock_ref.reset_mock()\n\n # pop object at index\n batch.pop_object(10)\n mock_obj.pop.assert_called_with(10)\n mock_ref.pop.assert_not_called()\n # reset mock objects\n mock_obj.reset_mock()\n mock_ref.reset_mock()\n\n # pop reference default value\n batch.pop_reference()\n mock_obj.pop.assert_not_called()\n mock_ref.pop.assert_called_with(-1)\n # reset mock objects\n mock_obj.reset_mock()\n mock_ref.reset_mock()\n\n # pop reference at index\n batch.pop_reference(9)\n mock_obj.pop.assert_not_called()\n mock_ref.pop.assert_called_with(9)", "def test_name_and_value_arguments(self):\n mock_call = Call(\"some_name\", 1024, 3.1415, \"hello\")\n assert_that(str(mock_call), equal_to(\"Call(some_name, 1024, 3.1415, hello)\"))", "def test_documentation_popxl_replication(self):\n filename = \"replication.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def info(self):", "def info(self):", "def test_basiccall(self):\n ex = self.ex\n i = self.i\n\n sig = (\"name\", \"char*\", \"int*\", \"double*\", \"double*\")\n ex.call = BasicCall(sig, \"N\", 100, 1.5, [10000])\n cmds = ex.generate_cmds()\n self.assertIn([\"name\", \"N\", 100, 1.5, [10000]], cmds)\n\n # now with a range\n lenrange = random.randint(1, 10)\n ex.range = [i, range(lenrange)]\n ex.call = BasicCall(sig, \"N\", i - 1, 1.5, [i * i])\n cmds = ex.generate_cmds()\n idx = random.randint(0, lenrange - 1)\n self.assertIn([\"name\", \"N\", idx - 1, 1.5, [idx * idx]], cmds)", "def info_cmd(args):\n livebox_info()", "def request_info(self):\r\n if self.use_http():\r\n self.enqueue_http_request(\"money/info\", {}, \"info\")\r\n else:\r\n self.send_signed_call(\"private/info\", {}, \"info\")", "def test_documentation_popxl_in_sequence(self):\n filename = \"in_sequence.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def get_pex_info(entry_point):\r\n from . import pex_info\r\n\r\n pex_info_content = read_pex_info_content(entry_point)\r\n if pex_info_content:\r\n return pex_info.PexInfo.from_json(pex_info_content)\r\n raise ValueError('Invalid entry_point: %s' % entry_point)", "def prep_info_callset(df, info):\n df = df.copy()\n df = df[df.ID.isin(info.ID_original.tolist())]\n return df", "def test_user_info(self):\n fake_fetchall = MagicMock()\n fake_fetchall.return_value = [(1234, 5678)]\n db = database.Database()\n db._cursor.fetchall = fake_fetchall\n\n info = db.user_info('sally')\n expected = (1234, 5678)\n\n self.assertEqual(info, expected)", "def test_get_cell(workbook):\n assert workbook.get_cell(3,1) == '507906000030242007'", "def batch_info():\n return BatchInfo(\"Applitools Demo Visual Tests\")", "def get_info(self):\n pass", "def get_info(self):\n pass", "def on_info_click(self, event):\n def on_close(event, wind):\n wind.Close()\n wind.Destroy()\n event.Skip()\n wind = wx.PopupTransientWindow(self, wx.RAISED_BORDER)\n if self.auto_save.GetValue():\n info = \"'auto-save' is currently selected. Temperature bounds will be saved when you click 'next' or 'back'.\"\n else:\n info = \"'auto-save' is not selected. Temperature bounds will only be saved when you click 'save'.\"\n text = wx.StaticText(wind, -1, info)\n box = wx.StaticBox(wind, -1, 'Info:')\n boxSizer = wx.StaticBoxSizer(box, wx.VERTICAL)\n boxSizer.Add(text, 5, wx.ALL | wx.CENTER)\n exit_btn = wx.Button(wind, wx.ID_EXIT, 'Close')\n wind.Bind(wx.EVT_BUTTON, lambda evt: on_close(evt, wind), exit_btn)\n boxSizer.Add(exit_btn, 5, wx.ALL | wx.CENTER)\n wind.SetSizer(boxSizer)\n wind.Layout()\n wind.Popup()", "def info(self) -> int:", "def test_1_variantcall(install_test_files, data_dir):\n with make_workdir() as workdir:\n cl = [\"bcbio_nextgen.py\",\n get_post_process_yaml(data_dir, workdir),\n os.path.join(data_dir, os.pardir, \"100326_FC6107FAAXX\"),\n os.path.join(data_dir, \"run_info-variantcall.yaml\")]\n subprocess.check_call(cl)", "def info(self, msg, *args, **kwargs):\n pass", "def test_get_info(self):\n self.addCleanup(self.sdkapi.guest_delete, self.userid)\n\n self.sdkapi.guest_create(self.userid, 1, 1024, disk_list=self.disks)\n self.sdkapi.guest_deploy(self.userid, self.image_name)\n\n # get info in shutdown state\n info_off = self.sdkapi.guest_get_info(self.userid)\n self.assertEquals(info_off['power_state'], 'off')\n self.assertEquals(info_off['mem_kb'], 0)\n self.assertEquals(info_off['cpu_time_us'], 0)\n\n # get info in active state\n self.sdkapi.guest_start(self.userid)\n self.assertTrue(self.sdkutils.wait_until_guest_in_power_state(\n self.userid, 'on'))\n time.sleep(1)\n info_on = self.sdkapi.guest_get_info(self.userid)\n self.assertEquals(info_on['power_state'], 'on')\n self.assertNotEqual(info_on['cpu_time_us'], 0)\n self.assertNotEqual(info_on['mem_kb'], 0)\n\n # get info in paused state\n self.sdkapi.guest_pause(self.userid)\n info_on = self.sdkapi.guest_get_info(self.userid)\n self.assertEquals(info_on['power_state'], 'on')\n self.assertNotEqual(info_on['cpu_time_us'], 0)\n self.assertNotEqual(info_on['mem_kb'], 0)", "def _call(*, call, method, hub_token_header, data=None, **kwargs):\n response = None\n headers = {}\n if 'headers' in kwargs: # pragma: no cover\n raise ValueError('Headers already defined: {}'.format(kwargs['headers']))\n if hub_token_header:\n if 'hub_token' not in kwargs:\n raise AttributeError('Asked to do a call to the hub but no hub_token provided.')\n headers['Authorization'] = kwargs['hub_token']\n if data is not None:\n data = json.dumps(data)\n headers['content-type'] = 'application/json'\n\n if 'remote' in kwargs and kwargs['remote']: # remote call\n if 'cloud_token' not in kwargs:\n raise AttributeError('Asked to do remote call but no cloud_token provided.')\n headers['Authorization'] = kwargs['cloud_token']\n headers['X-Hub-Key'] = kwargs['hub_token']\n response = cloud_api.remote(apicall=call, data=data, headers=headers)\n else: # local call\n if 'host' not in kwargs or not kwargs['host']:\n raise AttributeError(\n 'Local call but no hostname was provided. Either set keyword remote or host.')\n try:\n response = method(_getBase(**kwargs) + call, headers=headers, data=data, timeout=5)\n except requests.exceptions.RequestException as e: # pragma: no cover\n raise ConnectionError(str(e)) from None\n\n # evaluate response, wether it was remote or local\n if response.status_code == 200:\n return response.json()\n elif response.status_code == 410: # pragma: no cover\n raise APIError(\n response.status_code, 'API version outdated. Update python-cozify. %s - %s - %s' %\n (response.reason, response.url, response.text))\n else: # pragma: no cover\n raise APIError(response.status_code,\n '%s - %s - %s' % (response.reason, response.url, response.text))", "def test_get_details7(self):\n pass", "def before_call_used_args(self, function_info, subscript, call_code, args_code, code_reference, store, args_values):\n # pylint: disable=too-many-arguments\n if function_info == ('pandas.core.frame', 'merge'):\n assert isinstance(args_values[0], MlinspectDataFrame)\n args_values[0]['mlinspect_index_y'] = range(1, len(args_values[0]) + 1)\n self.df_arg = args_values[0]\n elif function_info == ('pandas.core.frame', '__getitem__') and isinstance(args_values, MlinspectSeries):\n self.select = True\n self.before_call_used_args_add_description(args_values, code_reference, function_info, args_code)", "def call(self, procedure: str, *args: aiowamp.WAMPType,\n kwargs: aiowamp.WAMPDict = None,\n receive_progress: bool = None,\n call_timeout: float = None,\n cancel_mode: aiowamp.CancelMode = None,\n disclose_me: bool = None,\n resource_key: str = None,\n options: aiowamp.WAMPDict = None) -> aiowamp.CallABC:\n ...", "def get_info(self) -> types.NestedArray:\n raise NotImplementedError('No support of get_info for this environment.')", "def got_info(self, cloud_obj):", "def get_software_info():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<request><system><software><info></info></software></system></request>\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def get_info_cli(self, no):\n #\n os.system(\"toilet -f smblock --filter border:metal -w 50 'Indo Phone Number Checker - ( IPNC )'\")\n print()\n os.system(\"toilet -f smblock ' by : @danrfq' --filter gay\")\n print()\n\n for data in self.get_info(no):\n if \"message\" in data:\n print(c.fg.red+\"[ ERROR ]\\nFormat Nomor Yang Anda Masukkan Salah!\"+c.end+\"\\n\"+c.fg.lightgreen+\"Contoh Nomor : +6281291718019\"+c.end)\n else:\n print(c.fg.yellow+\"\"\"╔ [ {} Information ]\n╠\n╠ International : {}\n╠ National : {}\n╠ Provider : {}\n╠ Type : {}\n╠ Location : {}\n╠ Timezones : {}\n╠\n╚ [ Finish ]\"\"\".\n format(no,\n data[\"international\"],\n data[\"national\"],\n data['provider'],\n data[\"type\"].replace(\"_\",\" \").title(),\n data[\"location\"],\n \", \".join(data[\"timezone\"]))+c.end\n )", "def InfoCall(connection, functionno, rc):\n\n ssl_logging = logging.getLogger('SSL_InfoCall')\n\n ssl_logging.debug('In InfoCall')\n ssl_logging.debug('State : %s' % connection.state_string())\n ssl_logging.debug('Fuction Number: %s' % functionno)\n ssl_logging.debug('Return Code : %s' % rc)\n return 0", "def pex_info_name(entry_point):\r\n return os.path.join(entry_point, 'PEX-INFO')", "def test_ws_getItemInfosWithExtraInfosRequest(self):\n self.changeUser('pmCreator1')\n self.failUnless(len(self.portal.portal_catalog(portal_type='MeetingItemPga')) == 0)\n # prepare data for a default item\n req = self._prepareCreationData()\n # add one annex\n data = {'title': 'My annex 1', 'filename': 'smallTestFile.pdf', 'file': 'smallTestFile.pdf'}\n req._creationData._annexes = [self._prepareAnnexInfo(**data)]\n # create the item\n newItem, reponse = self._createItem(req)\n newItemUID = newItem.UID()\n # get informations about the item, by default 'showExtraInfos' is False\n resp = self._getItemInfos(newItemUID, showExtraInfos=True)\n extraInfosFields = SOAPView(self.portal, req)._getExtraInfosFields(newItem)\n # check that every field considered as extra informations is returned in the response\n for extraInfosField in extraInfosFields:\n self.failUnless(extraInfosField.getName() in resp)", "def call_method(method, meta):\n try:\n return method()\n except (OSError, NotImplementedError) as e:\n ometa = meta.copy()\n ometa[MK_PAYLOAD] = False\n ometa[MK_ERROR] = type(e).__name__\n return ometa.data, None", "def test_ctcpQuery_CLIENTINFO(self):\n self.client.ctcpQuery_CLIENTINFO(self.user, self.channel, \"\")\n self.client.ctcpQuery_CLIENTINFO(self.user, self.channel, \"PING PONG\")\n info = (\n \"ACTION CLIENTINFO DCC ERRMSG FINGER PING SOURCE TIME \" \"USERINFO VERSION\"\n )\n self.assertEqual(\n self.client.methods,\n [\n (\"ctcpMakeReply\", (\"Wolf\", [(\"CLIENTINFO\", info)])),\n (\"ctcpMakeReply\", (\"Wolf\", [(\"CLIENTINFO\", None)])),\n ],\n )", "def main():\n outfile = 'result.txt'\n\n if os.path.exists(outfile):\n os.remove(outfile)\n\n for arg in sys.argv[1:]:\n get_info(arg, outfile)", "def svn_info_dup(svn_info_t_info, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def df_info(df: pd.DataFrame, return_info=False, shape=True, cols=True, info_prefix=''):\n info = info_prefix\n if shape:\n info = f'{info}Shape = {df.shape}'\n if cols:\n info = f'{info} , Cols = {df.columns.tolist()}'\n print(info)\n if return_info:\n return info", "def testOverrideGlobalInfoHandler(self):\n self.stream_start(mode='component',\n jid='tester.localhost',\n plugins=['xep_0030'])\n\n def dynamic_global(jid, node, ifrom, iq):\n result = self.xmpp['xep_0030'].stanza.DiscoInfo()\n result['node'] = node\n result.add_identity('component', 'generic', name='Dynamic Info')\n return result\n\n self.xmpp['xep_0030'].set_node_handler('get_info',\n handler=dynamic_global)\n\n self.xmpp['xep_0030'].restore_defaults(jid='user@tester.localhost',\n node='testing')\n\n self.xmpp['xep_0030'].add_feature(jid='user@tester.localhost',\n node='testing',\n feature='urn:xmpp:ping')\n\n self.recv(\"\"\"\n <iq type=\"get\" id=\"test\"\n to=\"user@tester.localhost\"\n from=\"tester@localhost\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\"\n node=\"testing\" />\n </iq>\n \"\"\")\n\n self.send(\"\"\"\n <iq type=\"result\" id=\"test\"\n to=\"tester@localhost\"\n from=\"user@tester.localhost\">\n <query xmlns=\"http://jabber.org/protocol/disco#info\"\n node=\"testing\">\n <feature var=\"urn:xmpp:ping\" />\n </query>\n </iq>\n \"\"\")", "def test_fax_inbound_automation_get(self):\n pass", "def info(msg, *args):\n if args:\n msg %= args\n click.echo(msg, file=sys.stdout)", "def process_info(self, info):\n info['result'] = info['result'].value\n return info", "def test_info(self):\n self.run_function(\"group.add\", [self._group], gid=self._gid)\n self.run_function(\"user.add\", [self._user])\n self.run_function(\"group.adduser\", [self._group, self._user])\n group_info = self.run_function(\"group.info\", [self._group])\n\n self.assertEqual(group_info[\"name\"], self._group)\n self.assertEqual(group_info[\"gid\"], self._gid)\n self.assertIn(self._user, str(group_info[\"members\"]))", "def printinfo(arg1, *vartuple):\n print('This is the output:')\n print(arg1)\n\n for var in vartuple:\n print(var)\n\n return", "def test_endpointPOP3(self):\n self._endpointTest(\"pop3\")", "def handle_info(self, api, command):\n return self.handle_log(api, command, level=logging.INFO)", "def test_get_invoice_info(mocker, expected_response, expected_data, client) -> None:\n\n mocker.patch(\"requests.Session.request\", return_value=expected_response)\n actual_data = client.get_invoice_info(invoice_id=1)\n\n assert actual_data == expected_data", "def test_cell_list_detail_success(self, mock_list):\n self.shell('cell-list -r 1 --detail')\n mock_list.assert_called_once_with(detail=True)", "def test_patch_info(self):\n patch_info = versions.PatchInfo(iiq_dir='/some/path',\n patches_dir='/another/path',\n is_installed=False,\n specific_patch='this_patch',\n readme='data from readme',\n all_patches=('patch1', 'patch2'))\n self.assertTrue(isinstance(patch_info, versions._PatchInfo))", "def meta_info(environ, start_response, logger, handle):\n pass", "def show_info(title, message):\n\n pass" ]
[ "0.6383911", "0.5714342", "0.55663514", "0.5375583", "0.532167", "0.52700174", "0.5258182", "0.5240253", "0.51907164", "0.5187251", "0.51472026", "0.50939924", "0.5084021", "0.50806963", "0.507654", "0.5028689", "0.50142485", "0.49954024", "0.4964053", "0.49308", "0.49162632", "0.4889757", "0.48548278", "0.48462722", "0.4829606", "0.48205596", "0.48201934", "0.48180994", "0.47867846", "0.4786442", "0.47680753", "0.47460273", "0.47422758", "0.4729599", "0.47082764", "0.47072533", "0.47055408", "0.4705246", "0.46863875", "0.46861896", "0.4680918", "0.4650679", "0.464461", "0.46310905", "0.46120685", "0.46037155", "0.4588301", "0.45876884", "0.4587433", "0.45860124", "0.45793778", "0.45778674", "0.45753038", "0.45551494", "0.45551494", "0.45543382", "0.4554075", "0.45531243", "0.4545149", "0.45408547", "0.45366028", "0.45054495", "0.45013094", "0.44972938", "0.44949204", "0.44949204", "0.44941145", "0.44751275", "0.4473639", "0.4472719", "0.44713157", "0.44683638", "0.44672975", "0.44637358", "0.4456193", "0.4455615", "0.44464648", "0.44366753", "0.44359055", "0.44345355", "0.44344303", "0.44277602", "0.44249442", "0.44221655", "0.4418834", "0.44068232", "0.44046128", "0.4400749", "0.4400578", "0.4398876", "0.43983194", "0.4390595", "0.4386194", "0.43792915", "0.43773672", "0.43747872", "0.43745047", "0.4364967", "0.43648437", "0.43604663" ]
0.82054126
0
Test the popxl basic repeat example
def test_documentation_popxl_repeat_0(self): filename = "repeat_graph_0.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_documentation_popxl_repeat_1(self):\n filename = \"repeat_graph_1.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_documentation_popxl_repeat_2(self):\n filename = \"repeat_graph_2.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_documentation_popxl_in_sequence(self):\n filename = \"in_sequence.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def repeat(x, repeats, axis=None):\r\n return RepeatOp(axis=axis)(x, repeats)", "async def repeat(ctx, times : int, content='repeating...'):\n for i in range(times):\n await bot.say(content)", "def populate(self, pop_size):\n for _ in range(pop_size):\n sample = next(self._exp_source)\n self._add(sample)", "def test_documentation_popxl_replication(self):\n filename = \"replication.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def repeat(self):\n return self._repeat", "async def repeat(self,ctx, times: int, content='repeating...'):\n for i in range(times):\n await ctx.send(content)", "def repeat(self, count):\n return self.Sequence((self,) * count)", "def generator():\n mygenerator = (x for x in range(3))\n for element in mygenerator:\n print 'poprve = ', element\n\n for element in mygenerator:\n print 'podruhe = ', element", "def test_repeat_seq():\n\n out_file = \"out.txt\"\n try:\n if os.path.isfile(out_file):\n os.remove(out_file)\n\n rv, out = getstatusoutput(f'{prg} -f {repeat}')\n assert rv == 0\n expected = (' 1: amigo_repeat.txt\\n'\n 'Wrote 5 gene IDs from 1 file to file \"out.txt\"')\n assert out == expected\n assert os.path.isfile(out_file)\n exp_repeat = '\\n'.join(\n sorted(\"\"\"\n AT4G14690 AT5G41340 AT5G03720 AT5G12020 AT2G22360\n \"\"\".split()))\n assert open(out_file).read().strip() == exp_repeat.strip()\n\n finally:\n if os.path.isfile(out_file):\n os.remove(out_file)", "def test_repeat_seq():\n\n out_file = \"out.txt\"\n try:\n if os.path.isfile(out_file):\n os.remove(out_file)\n\n rv, out = getstatusoutput(f'{prg} -f {repeat}')\n assert rv == 0\n expected = (' 1: amigo_repeat.txt\\n'\n 'Wrote 5 gene IDs from 1 file to file \"out.txt\"')\n assert out == expected\n assert os.path.isfile(out_file)\n exp_repeat = '\\n'.join(\n sorted(\"\"\"\n AT4G14690 AT5G41340 AT5G03720 AT5G12020 AT2G22360\n \"\"\".split()))\n assert open(out_file).read().strip() == exp_repeat.strip()\n\n finally:\n if os.path.isfile(out_file):\n os.remove(out_file)", "def foxGrowth():\r\n # you need these lines for modifying global variables\r\n global CURRENTRABBITPOP\r\n global CURRENTFOXPOP\r\n\r\n # TO DO\r\n #pass\r\n for i in range(CURRENTFOXPOP):\r\n if CURRENTRABBITPOP > 10:\r\n if random.random() <= (CURRENTRABBITPOP/MAXRABBITPOP):\r\n CURRENTRABBITPOP -= 1\r\n # fox reproducing\r\n if random.random() <= (1/3):\r\n CURRENTFOXPOP += 1\r\n else:\r\n # fox dying\r\n if random.random() <= 0.1:\r\n CURRENTFOXPOP -= 1", "def test_x_repeating(name, ipset_x_repeating):\n with pytest.raises(ValueError):\n interpolation.interpolate(*ipset_x_repeating, kind=name, **IPARGS.get(name, {}))", "def repeat_nd(x, reps):\n return RepeatND(reps)(x)", "def foxGrowth():\r\n # you need these lines for modifying global variables\r\n global CURRENTRABBITPOP\r\n global CURRENTFOXPOP\r\n\r\n # TO DO\r\n #pass\r\n for i in range(CURRENTFOXPOP):\r\n if CURRENTRABBITPOP > 10:\r\n if random.random() <= (CURRENTRABBITPOP/MAXRABBITPOP):\r\n CURRENTRABBITPOP -= 1\r\n # fox reproducing\r\n if random.random() <= (1/3):\r\n CURRENTFOXPOP += 1\r\n else:\r\n # fox dying\r\n if random.random() <= 0.9:\r\n CURRENTFOXPOP -= 1", "async def repeat(times : int, content='repeating...'):\n for i in range(times):\n await bot.say(content)", "def rabbitGrowth():\r\n # you need this line for modifying global variables\r\n global CURRENTRABBITPOP\r\n\r\n # TO DO\r\n #pass\r\n for i in range(CURRENTRABBITPOP):\r\n if random.random() <= (1 - (CURRENTRABBITPOP/MAXRABBITPOP)):\r\n CURRENTRABBITPOP += 1", "def rabbitGrowth():\r\n # you need this line for modifying global variables\r\n global CURRENTRABBITPOP\r\n\r\n # TO DO\r\n #pass\r\n for i in range(CURRENTRABBITPOP):\r\n if random.random() <= (1 - (CURRENTRABBITPOP/MAXRABBITPOP)):\r\n CURRENTRABBITPOP += 1", "def repeat(self, repeats):\n return SeriesDefault.register(pandas.Series.repeat)(self, repeats=repeats)", "def test_op_repeat(self) -> None:\n op_base = OpIncrForTest()\n kwargs_per_step_to_add = [\n dict(key_in=\"data.val.a\", key_out=\"data.val.b\"),\n dict(key_in=\"data.val.b\", key_out=\"data.val.c\"),\n dict(key_in=\"data.val.b\", key_out=\"data.val.d\"),\n dict(key_in=\"data.val.d\", key_out=\"data.val.d\"),\n ]\n op_repeat = OpRepeat(op_base, kwargs_per_step_to_add)\n sample_dict = NDict({})\n sample_dict[\"data.val.a\"] = 5\n sample_dict = op_repeat(sample_dict, \"_.test_repeat\", incr_value=3)\n self.assertEqual(sample_dict[\"data.val.a\"], 5)\n self.assertEqual(sample_dict[\"data.val.b\"], 8)\n self.assertEqual(sample_dict[\"data.val.c\"], 11)\n self.assertEqual(sample_dict[\"data.val.d\"], 14)\n\n op_repeat.reverse(\n sample_dict,\n key_to_follow=\"data.val.d\",\n key_to_reverse=\"data.val.d\",\n op_id=\"_.test_repeat\",\n )\n self.assertEqual(sample_dict[\"data.val.a\"], 5)\n self.assertEqual(sample_dict[\"data.val.b\"], 8)\n self.assertEqual(sample_dict[\"data.val.c\"], 11)\n self.assertEqual(sample_dict[\"data.val.d\"], 8)\n\n sample_dict[\"data.val.e\"] = 48\n op_repeat.reverse(\n sample_dict,\n key_to_follow=\"data.val.d\",\n key_to_reverse=\"data.val.e\",\n op_id=\"_.test_repeat\",\n )\n self.assertEqual(sample_dict[\"data.val.a\"], 5)\n self.assertEqual(sample_dict[\"data.val.b\"], 8)\n self.assertEqual(sample_dict[\"data.val.c\"], 11)\n self.assertEqual(sample_dict[\"data.val.d\"], 8)\n self.assertEqual(sample_dict[\"data.val.e\"], 42)", "def repeat(self, repeat: bool=None):\n self._select_interface(self._rc_repeat, self._http_repeat, repeat)", "def test_make_pop(self, pop_size, cell_number, microcell_number):\n for i in [0, 1]:\n pe.Parameters.instance().use_ages = i\n # Population is initialised with no households\n pop_params = {\"population_size\": pop_size,\n \"cell_number\": cell_number,\n \"microcell_number\": microcell_number}\n test_pop = ToyPopulationFactory.make_pop(pop_params)\n\n total_people = 0\n count_non_empty_cells = 0\n for cell in test_pop.cells:\n for microcell in cell.microcells:\n total_people += len(microcell.persons)\n if len(cell.persons) > 0:\n count_non_empty_cells += 1\n # Test there is at least one non-empty cell\n self.assertTrue(count_non_empty_cells >= 1)\n # Test that everyone in the population has been assigned a\n # microcell\n self.assertEqual(total_people, pop_size)\n\n # Test a population class object is returned\n self.assertIsInstance(test_pop, pe.Population)", "async def repeat(ctx, times: int, content='repeating...'):\n for i in range(times):\n await ctx.send(content)", "def MakeRepeat1(self,content):\n return self.register(Repeat1(content,reg=self))", "def test_pop(self):\n sched = Schedule()\n inst_map = InstructionScheduleMap()\n\n inst_map.add(\"tmp\", 100, sched)\n self.assertEqual(inst_map.pop(\"tmp\", 100), sched)\n self.assertFalse(inst_map.has(\"tmp\", 100))\n\n self.assertEqual(inst_map.qubit_instructions(100), [])\n self.assertEqual(inst_map.qubits_with_instruction(\"tmp\"), [])\n with self.assertRaises(PulseError):\n inst_map.pop(\"not_there\", (0,))", "def runSimulation(numSteps):\n\n rabbit_pop = []\n fox_pop = [] \n \n for steps in range(numSteps):\n rabbitGrowth()\n foxGrowth()\n rabbit_pop.append(CURRENTRABBITPOP)\n fox_pop.append(CURRENTFOXPOP)\n \n return (rabbit_pop, fox_pop)", "def test_documentation_popxl_addition(self):\n filename = \"simple_addition.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def repeat(self, number_of_repeats):\n return \"G\" + str(number_of_repeats)", "def test_pop_methods(self):\n\n batch = Batch(Mock())\n\n # mock BatchRequests\n mock_obj = Mock()\n mock_ref = Mock()\n batch._objects_batch = mock_obj\n batch._reference_batch = mock_ref\n\n mock_obj.pop.assert_not_called()\n mock_ref.pop.assert_not_called()\n\n # pop object default value\n batch.pop_object()\n mock_obj.pop.assert_called_with(-1)\n mock_ref.pop.assert_not_called()\n # reset mock objects\n mock_obj.reset_mock()\n mock_ref.reset_mock()\n\n # pop object at index\n batch.pop_object(10)\n mock_obj.pop.assert_called_with(10)\n mock_ref.pop.assert_not_called()\n # reset mock objects\n mock_obj.reset_mock()\n mock_ref.reset_mock()\n\n # pop reference default value\n batch.pop_reference()\n mock_obj.pop.assert_not_called()\n mock_ref.pop.assert_called_with(-1)\n # reset mock objects\n mock_obj.reset_mock()\n mock_ref.reset_mock()\n\n # pop reference at index\n batch.pop_reference(9)\n mock_obj.pop.assert_not_called()\n mock_ref.pop.assert_called_with(9)", "def test_pop_gate(self):\n sched = Schedule()\n inst_map = InstructionScheduleMap()\n\n inst_map.add(XGate(), 100, sched)\n self.assertEqual(inst_map.pop(XGate(), 100), sched)\n self.assertFalse(inst_map.has(XGate(), 100))\n\n self.assertEqual(inst_map.qubit_instructions(100), [])\n self.assertEqual(inst_map.qubits_with_instruction(XGate()), [])\n with self.assertRaises(PulseError):\n inst_map.pop(\"not_there\", (0,))", "def generate_RC_codebook(cube_dim, reps, permutate=1, item_num=0):\r\n\r\n\timport numpy as np\r\n\t\r\n\tcodebook = []\r\n\tcodeindex = []\r\n\t\r\n\tcube_size = np.prod(cube_dim)\r\n\tif item_num == 0 or item_num > cube_size:\r\n\t\tntargets = cube_size\r\n\telse:\r\n\t\tntargets = item_num\r\n\t\r\n\tindices = np.arange(cube_size)\r\n\tcube = indices.reshape(cube_dim)\r\n\t\r\n\tlen_codebook = sum(cube_dim)#6col + 6row =12\r\n\t\r\n\tfor rep in range(reps):\r\n\t\t# random permute the codebook for one rep\r\n\t\tind_code = np.random.permutation(len_codebook)\r\n\t\t\r\n\t\t# avoid consecutive appearance between reps\r\n\t\tif rep > 0:\r\n\t\t\twhile ind_code[0]==last_ind_code:\r\n\t\t\t\tind_code = np.random.permutation(len_codebook)\r\n\t\tlast_ind_code = ind_code[-1]\r\n\t\t\r\n\t\tfor i in ind_code:\r\n\t\t\tif i < cube_dim[0]:\r\n\t\t\t\tcode_slice = cube[i,:]\r\n\t\t\t\tcode_slice = code_slice[np.nonzero(code_slice<ntargets)]\r\n\t\t\t\tcodebook.append(code_slice.tolist())\r\n\t\t\telse:\r\n\t\t\t\tcode_slice = cube[:,i-cube_dim[0]]\r\n\t\t\t\tcode_slice = code_slice[np.nonzero(code_slice<ntargets)]\r\n\t\t\t\tcodebook.append(code_slice.tolist())\r\n\t\tcodeindex.extend(ind_code)\t\r\n\r\n\treturn cube, codebook, codeindex", "def foxGrowth():\r\n # you need these lines for modifying global variables\r\n global CURRENTRABBITPOP\r\n global CURRENTFOXPOP\r\n\r\n \r\n \r\n for fox in range(CURRENTFOXPOP):\r\n if CURRENTRABBITPOP > 10:\r\n prob_fox_eats_rabbit = float(CURRENTRABBITPOP)/MAXRABBITPOP\r\n if prob_fox_eats_rabbit > random.random():\r\n CURRENTRABBITPOP -= 1\r\n if 1.0/3.0 > random.random():\r\n CURRENTFOXPOP += 1\r\n elif CURRENTFOXPOP > 10:\r\n if 0.9 > random.random():\r\n CURRENTFOXPOP -= 1", "def poprve():\n mylist = [1, 2, 3]\n for element in mylist:\n print element", "def repeat(self):\n return self._get('repeat')", "def test_calls(self):\n ex = self.ex\n m = self.m\n n = self.n\n\n nreps = random.randint(1, 10)\n ex.nreps = nreps\n ex.vary[\"X\"][\"with\"].add(\"rep\")\n ex.infer_lds()\n\n cmds = ex.generate_cmds()\n\n idx = random.randint(0, nreps - 1)\n self.assertIn([\"name\", m, n, \"X_%d\" % idx, m, \"Y\", m, \"Z\", n], cmds)", "def test_data_norange(self):\n ex = self.ex\n m = self.m\n n = self.n\n\n nreps = random.randint(1, 10)\n lensumrange = random.randint(1, 10)\n\n ex.nreps = nreps\n ex.sumrange = [\"j\", range(lensumrange)]\n ex.vary[\"X\"][\"with\"].add(\"rep\")\n ex.vary[\"Y\"][\"with\"].add(\"j\")\n ex.vary[\"Y\"][\"along\"] = 0\n ex.vary[\"Z\"][\"with\"].update([\"rep\", \"j\"])\n ex.infer_lds()\n\n cmds = ex.generate_cmds()\n\n self.assertIn([\"smalloc\", \"X\", nreps * m * n], cmds)\n idx = random.randint(0, nreps - 1)\n self.assertIn([\"soffset\", \"X\", idx * m * n, \"X_%d\" % idx], cmds)\n\n self.assertIn([\n \"dmalloc\", \"Y\", lensumrange * m * m + (lensumrange - 1) * m\n ], cmds)\n idx = random.randint(0, lensumrange - 1)\n self.assertIn([\"doffset\", \"Y\", idx * m, \"Y_%d\" % idx], cmds)\n\n self.assertIn([\"cmalloc\", \"Z\", nreps * lensumrange * n * n], cmds)\n idxrep = random.randint(0, nreps - 1)\n idxrange = random.randint(0, lensumrange - 1)\n self.assertIn([\"coffset\", \"Z\",\n (idxrep * lensumrange + idxrange) * n * n,\n \"Z_%d_%d\" % (idxrep, idxrange)], cmds)", "def repeat(self, count):\n x = HSeq()\n for i in range(count):\n x = x.concatenate(self)\n return x", "def generate_pops(target_reg, exclude_regs=[], count=1, allow_dups=True):\n\n random_regs = []\n\n for _ in range(0, count-1):\n random_reg = get_random_register(exclude_regs=exclude_regs)\n\n random_regs.append(random_reg)\n\n pops = ''\n\n for reg in random_regs:\n pops += f'pop {reg}; '\n\n pops += f'pop {target_reg}; '\n\n return pops", "def test_7_replay_4(self):\n self._execute_replay_nr(4)\n\n self.grid.add_pawn(5, 'H')\n self.grid.add_pawn(3, 'B')\n self.grid.add_pawn(2, 'H')\n self.grid.add_pawn(1, 'B')\n self.grid.add_pawn(1, 'H')\n\n # self.grid.print_grid()\n # print(self.minmaxBot_7.choose_move(self.grid))", "def project_pop(self):\n M = self.N[0:2]\n for x in range(10):\n M.append(self.run_step(M))\n split_N = split_list(M)\n \n fig = self.make_figure(split_N)\n fig.update_layout(title='Projected Fish Population')\n\n return fig", "def test_raises(self):\n no_replicates = 25\n try:\n replicate(experiment3, no_replicates)\n except RuntimeError as err:\n self.assertEqual(err, FAKE_ERROR)\n else:\n assert False", "def run_memory_exp(nb_repeat, name, cmd):\n # Create the required folders\n if not os.path.exists(\"../logs\"):\n os.mkdir(\"../logs\")\n if not os.path.exists(f\"../logs/{name}\"):\n os.mkdir(f\"../logs/{name}\")\n\n # Log the configuration\n with open(f\"../logs/{name}/cmd_template.txt\", \"w\") as cmd_log:\n cmd_log.write(cmd)\n\n # Repeat the simulation\n for i in range(nb_repeat):\n print(\"[Memory experience] Progression: {}%\".format(i * 100 // nb_repeat))\n itr_cmd = cmd + \" --output {}/itr_{}\".format(name, i)\n args = get_args(itr_cmd.split(\" \"))\n sim = Sim()\n sim.from_args(args)\n print(\"[Memory experience] Done !\")", "def _generate_pileups(self):\n pass", "def test_INVOKE_repeat(self, propose):\n self.rep.proposals[1] = PROPOSAL1\n self.failIf(propose.called)", "def foxGrowth():\n # you need these lines for modifying global variables\n global CURRENTRABBITPOP\n global CURRENTFOXPOP\n \n for fox in range(CURRENTFOXPOP):\n fox_eat_prob = float(CURRENTRABBITPOP) / MAXRABBITPOP\n if random.random() < fox_eat_prob and CURRENTRABBITPOP > 10:\n CURRENTRABBITPOP -= 1\n if random.random() < (1.0 / 3.0):\n CURRENTFOXPOP += 1\n else: \n if random.random() < (1.0 / 10.0) and CURRENTFOXPOP > 10:\n CURRENTFOXPOP -= 1", "def Repeat(dataset, count=None):\n return dataset.repeat(count=count)", "def repeated_iteration(self) -> global___Statement.Iteration.RepeatedIteration:", "def test_repeatable(self):\n\n def run(seed, ModelClass=Model):\n \"\"\"Return the history of a run\"\"\"\n model = ModelClass(random_seed=seed)\n return model.one_trial(1, 10)\n\n self.assertEqual(run(0, ModelClass=Model).data, run(0, ModelClass=Model).data)\n self.assertEqual(run(0, ModelClass=ReplicatedModel).data, run(0, ModelClass=ReplicatedModel).data)", "def repeat(self, min=0, max=None):\n raise NotImplemented()", "def rabbitGrowth():\n # you need this line for modifying global variables\n global CURRENTRABBITPOP\n\n for rabbit in range(CURRENTRABBITPOP):\n rabbit_reproduction_prob = 1.0 - (float(CURRENTRABBITPOP) / MAXRABBITPOP)\n if random.random() < rabbit_reproduction_prob and CURRENTRABBITPOP < 1000:\n CURRENTRABBITPOP += 1", "def population(params, n=100):\r\n pops = []\r\n for i in range(n):\r\n pop = []\r\n for param in params:\r\n pop.append(np.random.choice(param))\r\n\r\n individuale = Individuale(pop)\r\n pops.append(individuale)\r\n # print(\"No.{} : {} : {}\".format(i, individuale, individuale.x))\r\n return pops", "def runSimulation(numSteps):\r\n rabbit_populations = []\r\n fox_populations = []\r\n for step in range(numSteps):\r\n rabbitGrowth()\r\n foxGrowth()\r\n rabbit_populations.append(CURRENTRABBITPOP)\r\n fox_populations.append(CURRENTFOXPOP)\r\n return (rabbit_populations, fox_populations)", "def run_example1():\r\n #outcomes = set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\r\n #outcomes = set(['Heads','Tails'])\r\n #outcomes = set([\"Red\", \"Green\", \"Blue\"])\r\n outcomes = set([\"Sunday\", \"Mondy\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\"])\r\n \r\n length = 7\r\n seq_outcomes = gen_permutations(outcomes,length)\r\n print \"Computed\", len(seq_outcomes), \"sequences of\", str(length), \"outcomes\"\r\n #print \"Sequences were\", seq_outcomes\r", "def podruhe():\n mylist = [x for x in range(3)]\n for element in mylist:\n print element", "def repeat(fn):\n def repeated():\n i = 0\n while i < random_test_iterations:\n fn()\n i += 1\n # nosetest runs functions that start with 'test_'\n repeated.__name__ = fn.__name__\n return repeated", "def repeat(word, repetitions):\n return word * repetitions", "def repeat(num_times):\n\n def decorator_repeat(func):\n \"\"\"\n defines wrapper_repeat(*args, **kwargs)\n\n :returns: wrapper_repeat\n \"\"\"\n\n @functools.wraps(func)\n def wrapper_repeat(*args, **kwargs):\n \"\"\"\n func(*args, **kwargs) num_times\n\n :return: last return value\n \"\"\"\n for _ in range(num_times):\n value = func(*args, **kwargs)\n return value\n\n return wrapper_repeat\n\n return decorator_repeat", "def repeat(s):\r\n\r\n return s", "def testspec(arr: list[int]) -> None:\n\n print(50*'-')\n print(arr)\n print_rem(arr)\n rev_dupes(arr)\n print(arr)", "def repeat(a, repeats, axis=None):\n return afnumpy.asarray(a).repeat(repeats, axis=axis)", "def test_pop_no_args(self):\r\n msg_list = messages.MessageList()\r\n # Adds 5 Message objects to the list.\r\n msg_list.push(messages.StringMessage(\"a\"))\r\n msg_list.push(messages.StringMessage(\"b\"))\r\n msg_list.push(messages.StringMessage(\"c\"))\r\n msg_list.push(messages.StringMessage(\"d\"))\r\n msg_list.push(messages.StringMessage(\"e\"))\r\n\r\n self.assertEqual(msg_list.length(), 5)\r\n popped = msg_list.pop()\r\n self.assertEqual(msg_list.length(), 4)\r\n self.assertEqual(popped.msg, \"e\")\r\n msg_list.pop()\r\n msg_list.pop()\r\n msg_list.pop()\r\n msg_list.pop()\r\n self.assertRaises(IndexError, msg_list.pop)", "def generatesack(nmbrofitems):\n while nmbrofitems != 0:\n newitem = Sackitem(random.randint(1,10),random.randint(1,20))\n itemlist.append(newitem)\n nmbrofitems -= 1\n return itemlist", "def initialPop(popSize,rangeMin,rangeMax,genLength):\n\t\n\tpop=[]\n\n\tfor i in range(popSize):\n\t\tgenome=[]\n\t\tfor j in range(genLength):\n\t\t\tparam=random.uniform(rangeMin,rangeMax)\n\t\t\tgenome.append(param)\n\t\tpop.append(Gen(genome)) #add each random genome to the pop\n\t\t\t\t\n\treturn pop", "def fillLoop():\n print('Now looping spotfill (press enter to quit).')\n x = 1\n while x != 0:\n x = spotfill()\n print('Ending spotfill.')\n return", "def test_generate_sample_sheet(self):\n pass", "def alert_pet(self, reps=3):\n for x in range(0,reps):\n time.sleep(1)\n GPIO.output(self.alert_pin, 0)\n time.sleep(1)\n GPIO.output(self.alert_pin, 1)\n return", "def test_reset(sim):\n repeats = 3\n dt = 1\n sim.setup(timestep=dt, min_delay=dt)\n p = sim.Population(1, sim.IF_curr_exp(i_offset=0.1))\n p.record('v')\n\n for i in range(repeats):\n sim.run(10.0)\n sim.reset()\n data = p.get_data(clear=False)\n sim.end()\n\n assert len(data.segments) == repeats\n for segment in data.segments[1:]:\n assert_array_almost_equal(segment.analogsignals[0],\n data.segments[0].analogsignals[0], 10)", "def test_remainder(self):\n alp = list(range(5))\n targets = generate_targets(alp, 12)\n\n counts = Counter(targets)\n for item in alp:\n self.assertGreaterEqual(counts[item], 2)\n self.assertLessEqual(counts[item], 3)", "def simulate_fish_population(fish, days):\n for _ in range(days):\n new_fish = []\n for fish in fish:\n if fish == 0:\n new_fish.append(6)\n new_fish.append(8)\n else:\n new_fish.append(fish - 1)\n fish = new_fish\n return fish", "def inversion_crossover(self, pop):\n children, tmpNonComb, used = ([] for i in range(3))\n for i in range(0, int(len(pop) * self.fracElite), 1):\n r = int(rand() * len(pop))\n while r == i:\n r = int(rand() * len(pop))\n\n if sum(self.cID + self.dID + self.iID) != 0:\n nonComb1 = pop[i][:np.where(self.cID + self.dID + self.iID == 1)[0][(-1)] + 1]\n nonComb2 = pop[r][:np.where(self.cID + self.dID + self.iID == 1)[0][(-1)] + 1]\n if sum(self.xID) != 0:\n comb1 = pop[i][:np.where(self.xID == 1)[0][(-1)] + 1]\n comb2 = pop[r][:np.where(self.xID == 1)[0][(-1)] + 1]\n if sum(self.cID + self.dID + self.iID) != 0:\n c = int(rand() * len(nonComb1))\n if rand() > 0.5:\n tmpNonComb.append(np.array(nonComb1[0:c + 1].tolist() + nonComb2[c + 1:].tolist()))\n else:\n tmpNonComb.append(np.array(nonComb2[0:c + 1].tolist() + nonComb1[c + 1:].tolist()))\n used.append(i)\n if sum(self.xID) != 0:\n c = int(rand() * len(comb1))\n for c1 in range(c, len(comb1), 1):\n d2 = (contains_sublist(comb2, comb1[c1]) + 1) % len(comb1)\n d1 = contains_sublist(comb1, comb2[d2])\n c2 = contains_sublist(comb2, comb1[((d1 + 1) % len(comb1))]) % len(comb1)\n tmp1 = cp.copy(comb1)\n if c1 < d1:\n tmp1[(c1 + 1):(d1 + 1)] = list(reversed(tmp1[c1 + 1:d1 + 1]))\n else:\n tmp1[d1:c1] = list(reversed(tmp1[d1:c1]))\n tmp2 = cp.copy(comb2)\n if c2 < d2:\n tmp2[c2:d2] = list(reversed(tmp2[c2:d2]))\n else:\n tmp2[(d2 + 1):(c2 + 1)] = list(reversed(tmp2[d2 + 1:c2 + 1]))\n if sum(self.cID + self.dID + self.iID) == 0 and sum(self.xID) != 0:\n children.append(tmp1)\n children.append(tmp2)\n elif sum(self.cID + self.dID + self.iID) != 0 and sum(self.xID) != 0:\n children.append(np.concatenate(tmpNonComb[(-1)], tmp1))\n children.append(np.concatenate(tmpNonComb[(-1)], tmp2))\n used.append(i)\n used.append(r)\n\n if sum(self.cID + self.dID + self.iID) != 0 and sum(self.xID) == 0:\n children = tmpNonComb\n return (\n children, used)", "def test_push_pop(values):\n test_stack = stack.Stack()\n\n for value in values:\n test_stack.push(value)\n\n for expected_value in reversed(values):\n value = test_stack.pop()\n assert value == expected_value\n\n with pytest.raises(stack.StackEmptyError):\n test_stack.pop()", "def run_simulation(env, pop, nr):\n pop_f = []\n pop_pl = []\n pop_el = []\n\n\n for individual in pop:\n fitness, player_life, enemy_life, time = env.play(pcont=individual)\n pop_f.append(fitness)\n pop_pl.append(player_life)\n pop_el.append(enemy_life)\n\n return pop_f, pop_pl, pop_el", "def reproduce(population:list):\n new_gen = []\n probs = []\n for p in population:\n probs.append(p[3])\n while len(new_gen) != len(probs):\n parents = selection(probs)\n son,eval_son,daughter,eval_daughter = xo(population[parents[0]][0],population[parents[0]][1], population[parents[1]][0],population[parents[1]][1],2)\n new_gen.append([son,eval_son])\n new_gen.append([daughter,eval_daughter])\n # mutation\n # lets say 5% of the population gets mutated\n how_many_to_mutate = int(NUM_OF_CHROMOZOMS * (1/100))\n t = [i for i in range(NUM_OF_CHROMOZOMS)]\n # choose percent of the population randomly, uniformly\n indices_to_mutate = choice(t, how_many_to_mutate, replace=False)\n for i in range(len(indices_to_mutate)):\n mutate(new_gen[indices_to_mutate[i]])\n\n evaluateAll(new_gen)\n return new_gen", "def crossover(population, kw=None, **kwargs):\n future_population = []\n while len(future_population) < len(population):\n p1, p2 = random.choice(population)['notes'], random.choice(population)['notes']\n split = random.randint(1, len(p1) - 1)\n map(future_population.append, [p1[:split] + p2[split:], p2[:split] + p1[split:]])\n return future_population", "def genPopulation(self):\r\n self.population_list = []\r\n for i in xrange(0, self.pop_size):\r\n individual = bitarray(self.indv_size)\r\n # Loop for randomizing the 'individual' string.\r\n for j in xrange(0, self.board_size):\r\n vert_pos = random.randint(0, self.board_size-1)\r\n vert_pos_bitnum = toBitArray(vert_pos, self.pos_bits_size)\r\n # print \"\\t\\t\", j, vert_pos_bitnum, vert_pos\r\n for k in range(0, self.pos_bits_size):\r\n individual[j * self.pos_bits_size + k] = vert_pos_bitnum[k]\r\n self.population_list.append(individual)\r\n # print \"\\t\", i, individual\r", "def repeat_expt(epsilon, gamma,\n result_nonprivate, \n repetitions=10,\n outfile_singles=None, outfile_aggregates=None, \n data_blocker=1, windsorized=False):\n \n \n blocker = gupt.GuptRunTime.get_data_blockers()[data_blocker-1]\n # 1 NaiveDataBlocker\n # 2 ResamplingDataBlockerConstantSize \n # 3 ResamplingDataBlockerConstantBlocks\n\n if not windsorized:\n DP_mode=\"standard_DP\"\n else:\n DP_mode=\"windsorized_DP\"\n\n logger.info(\"Running %d repetitions with data_blocker=%s\" % (repetitions, blocker))\n logger.info(\"epsilon=%s gamma=%s, in mode %s\" % (epsilon, gamma, DP_mode))\n \n results, starttime = [], time.clock()\n \n # results = pickle.load( open( \"res.pickle\", \"rb\" ))\n \n \n for i in range(repetitions):\n\n # TODO: Perhaps they DO or DO NOT have to be recreated in each run?\n blocker = gupt.GuptRunTime.get_data_blockers()[data_blocker-1]\n reader = censusdatadriver.get_reader()\n runtime = gupt.GuptRunTime(MeanComputer, reader, epsilon, \n blocker_name=blocker, blocker_args=gamma)\n # end TODO\n\n if not windsorized:\n res=runtime.start()\n else:\n res=runtime.start_windsorized()\n \n # artificial 2nd dimension, just for testing these routines:\n # res = res + res\n \n print report_results(res, result_nonprivate, DP_mode, blocker, \n epsilon, gamma, outfile_singles)\n sleep_short()\n \n results.append(res)\n\n # pickle.dump(results, open( \"res.pickle\", \"wb\" ) )\n \n \n duration = time.clock() - starttime\n logger.info(\"%d repetitions took %.2f seconds\" % (repetitions, duration))\n \n mean, std = analyze_results(results) # , result_nonprivate)\n \n print report_results_repeated(mean, std, DP_mode, blocker,\n epsilon, gamma, repetitions,\n outfile=outfile_aggregates)", "def repeat(self, count):\n x = _OSeq()\n for i in range(count):\n x = x.concatenate(self)\n return x", "def pop():", "def run_expt_many_times(result_nonprivate, repetitions=10,\n data_blocker=1, windsorized=False,\n outfile_singles=None, outfile_aggregates=None):\n for epsilon in numpy.arange(0.1, 2.2, 0.2):#(0.2, 10, 0.2):\n for gamma in range(1, 10, 1): # (1, 7, 1):\n \n repeat_expt(epsilon, gamma,\n data_blocker=data_blocker, windsorized=windsorized,\n result_nonprivate=result_nonprivate,\n repetitions=repetitions,\n outfile_singles=outfile_singles, \n outfile_aggregates=outfile_aggregates)", "def reset_next_population(self):\n self.next_population = []", "def repeat_value(value: Any = None, repeat_count: int = None) -> ObservableBase:\n from ..operators.observable.repeat import repeat_value\n return repeat_value(value, repeat_count)", "def init_pop(self):\n genes = np.random.randn( self.population_size * self.individual.gene_count )\n self.population = genes.reshape((self.population_size, -1))\n #print(self.population)", "def genNextPop(prevPop, masterList, popSize):\n parSize = int(popSize / 10) #top 10%\n parentPop = [Team(prevPop[i].roster) for i in range(parSize)]\n parentPop = getStats(parentPop, masterList) # inefficent\n rosterSize = len(parentPop[0].roster)\n newPop = []\n #parentsList = [] #debug\n for i in range(popSize):\n chromosome = doCrossover(parentPop, parSize, rosterSize)\n newPop.append(Team(chromosome))\n getStats(newPop, masterList)\n #showStats(newPop, masterList, \"all\")\n #debug code\n #for playerInd in chromosome:\n # print(masterList[playerInd].pos)\n\n return newPop", "def test_pop(self):\n test_list = DoubleLinkedList()\n test_list.push(15)\n test_list.push(150)\n test_list.push(13)\n test_list.push(155)\n test_list.push(1)\n test_list.pop()\n self.assertEqual(test_list.last().get_elem(), 155)", "def rand_pop(l: list):\n i = randrange(len(l)) \n l[i], l[-1] = l[-1], l[i] \n return l.pop()", "def rep_pops(self):\n return _PopFlag(self.rep_pop_flag)", "def repeated(self, *args, **kwargs):\n return self.rep.RepeatBorders(self._trg, *args, **kwargs)", "def test_make_pop_exception(self, patch_log, patch_random):\n patch_random.side_effect = ValueError\n # Population is initialised with no households\n pop_params = {\"population_size\": 10, \"cell_number\": 1,\n \"microcell_number\": 1}\n ToyPopulationFactory.make_pop(pop_params)\n patch_log.assert_called_once_with(\"ValueError in ToyPopulation\"\n + \"Factory.make_pop()\")", "def get_random_population():\r\n return [ get_random_individual() for _ in range(POPULATION_COUNT) ]", "def test_shift(doctest):", "def test_solo_cell():\n cell = c6.Cell(loc=[1, 1])\n for i in range(10):\n cell.step()", "def runSimulation(numSteps):\r\n\r\n # TO DO\r\n #pass\r\n rabbits = []\r\n foxes = []\r\n for i in range(numSteps):\r\n rabbitGrowth()\r\n foxGrowth()\r\n rabbits.append(CURRENTRABBITPOP)\r\n foxes.append(CURRENTFOXPOP)\r\n return rabbits, foxes", "def runSimulation(numSteps):\r\n\r\n # TO DO\r\n #pass\r\n rabbits = []\r\n foxes = []\r\n for i in range(numSteps):\r\n rabbitGrowth()\r\n foxGrowth()\r\n rabbits.append(CURRENTRABBITPOP)\r\n foxes.append(CURRENTFOXPOP)\r\n return rabbits, foxes", "def popmany(self, num=1):\n return [self.next() for i in range(num)]", "def ipset_x_repeating():\n x = np.linspace(0, 10, 11)\n x[5] = x[4]\n return IPSet(x=x, y=np.linspace(-1, 1, 11), x_new=np.linspace(2, 5, 7))", "def test_reset(self):\r\n self.p += 8\r\n self.p.reset()\r\n self.assertEqual(str(self.p), '0% [....................]')", "def play():\n decks = make_decks()\n\n deck_pulls = {deck: [] for deck in decks}\n for i in range(100):\n deck = random.choice(decks)\n deck_pulls[deck].append(deck.pull())\n\n return decks, deck_pulls", "def test_endpointPOP3(self):\n self._endpointTest(\"pop3\")" ]
[ "0.6505946", "0.6401242", "0.58164656", "0.54440105", "0.53839743", "0.5373807", "0.53707576", "0.53479195", "0.5337674", "0.5326383", "0.5316677", "0.53121865", "0.53121865", "0.5300285", "0.5297226", "0.52901715", "0.5276409", "0.5268856", "0.5196304", "0.5196304", "0.5173073", "0.51597667", "0.51544595", "0.5149219", "0.51400036", "0.51352286", "0.5117778", "0.5116364", "0.51089984", "0.51067406", "0.50602293", "0.5053365", "0.5052107", "0.50432307", "0.50402075", "0.5036144", "0.5017667", "0.50152904", "0.5014456", "0.5009249", "0.5007194", "0.49938065", "0.49811885", "0.49721584", "0.49535653", "0.49454007", "0.49449232", "0.4931532", "0.49272776", "0.49220848", "0.49019122", "0.48983014", "0.48686028", "0.4855172", "0.48511168", "0.4837923", "0.48372665", "0.48229888", "0.48168114", "0.48092508", "0.48020446", "0.47946322", "0.47930422", "0.4788318", "0.47862095", "0.47860527", "0.47813416", "0.47792202", "0.4778324", "0.47759062", "0.47694802", "0.47679576", "0.47674227", "0.4765635", "0.47529393", "0.47511858", "0.4750886", "0.4748555", "0.47464865", "0.473931", "0.4717066", "0.47106412", "0.4705226", "0.47019133", "0.4699689", "0.46994394", "0.4684794", "0.46788332", "0.4677337", "0.46694157", "0.46677655", "0.46671188", "0.46560803", "0.46535814", "0.46535814", "0.4648632", "0.46437395", "0.463997", "0.46368384", "0.46349326" ]
0.6540377
0
Test the popxl subgraph in parent in repeat example
def test_documentation_popxl_repeat_1(self): filename = "repeat_graph_1.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_documentation_popxl_basic_subgraph(self):\n filename = \"basic_graph.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_documentation_popxl_create_multi_subgraph(self):\n filename = \"create_multi_graphs_from_same_func.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_dummy3(self):\n xpb = XPathBuilder()\n xp = xpb.dummy()\n self.assertTrue(xp.parenthesize() is xp)", "def test_documentation_popxl_repeat_2(self):\n filename = \"repeat_graph_2.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_documentation_popxl_repeat_0(self):\n filename = \"repeat_graph_0.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def sub_graph_merging(self):", "def test_lacking_parent(self):\n pass", "def crossover(self, pop):\n intDiscID = self.iID + self.dID\n varID = self.cID\n goldenRatio = (1.0 + sqrt(5)) / 2.0\n dx = np.zeros_like(pop[0])\n children = []\n used = []\n for i in range(0, int(self.fracElite * len(pop)), 1):\n r = int(rand() * self.population)\n while r in used or r == i:\n r = int(rand() * self.population)\n\n used.append(i)\n children.append(cp.deepcopy(pop[r]))\n dx = abs(pop[i] - children[i]) / goldenRatio\n children[i] = children[i] + dx * varID + np.round(dx * intDiscID)\n children[i] = simple_bounds(children[i], self.lb, self.ub)\n\n return (\n children, used)", "def test_parent_label(self):\n l = self.d.label(1)\n l2 = self.d.label(31405)\n\n self.assertTrue(l.parent_label is None)\n self.assertTrue(l2 in l.sublabels)\n self.assertEqual(l2.parent_label, l)", "def test_parent_with_iterables(self):\n def makeCubesAndGrp():\n cmds.file(new=1, f=1)\n cubes = []\n for x in range(10):\n cubes.append(pm.polyCube()[0])\n group = pm.group(empty=True)\n return cubes, group\n\n cubes, group = makeCubesAndGrp()\n res1 = pm.parent(cubes[:4], group)\n self.assertEqual(cubes[:4], res1)\n res2 = pm.parent(cubes, group)\n self.assertEqual(cubes[4:], res2)\n\n cubes, group = makeCubesAndGrp()\n res1 = pm.parent(cubes[:4] + [group])\n self.assertEqual(cubes[:4], res1)\n res2 = pm.parent(cubes, group)\n self.assertEqual(cubes[4:], res2)\n\n cubes, group = makeCubesAndGrp()\n res1 = pm.parent(cubes[0], cubes[1], cubes[2], cubes[3], group)\n self.assertEqual(cubes[:4], res1)\n res2 = pm.parent(cubes, group)\n self.assertEqual(cubes[4:], res2)\n\n cubes, group = makeCubesAndGrp()\n res1 = pm.parent(cubes[0], cubes[1], [cubes[2], cubes[3], group])\n self.assertEqual(cubes[:4], res1)\n res2 = pm.parent(cubes, group)\n self.assertEqual(cubes[4:], res2)\n\n cubes, group = makeCubesAndGrp()\n res1 = pm.parent([cubes[0], cubes[1]], cubes[2], [cubes[3], group])\n self.assertEqual(cubes[:4], res1)\n res2 = pm.parent(cubes, group)\n self.assertEqual(cubes[4:], res2)", "def testParent(self):\n self.assertEqual(\n self,\n self.node.parent\n )", "def testParent(self):\n self.assertEqual(\n self,\n self.node.parent\n )", "def testParent(self):\n self.assertEqual(\n self.parent,\n self.mr.parent\n )", "def test_simple(self):\n exp = [{'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'h'},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'p'},\n {'edge_info': '1', 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hp'},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'g'},\n {'edge_info': None, 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hpg'}\n ]\n content = '((h,p)hp:1,g)hpg;'\n self._do_test(content, exp)\n content = '((h,[pretest]p[test][posttest])hp,g)hpg;'\n exp = [{'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'h'},\n {'edge_info': None, 'type': NewickEvents.TIP,\n 'comments': ['pretest', 'test', 'posttest'], 'label': 'p'},\n {'edge_info': None, 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hp'},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'g'},\n {'edge_info': None, 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hpg'}\n ]\n self._do_test(content, exp)", "def reproduce(self, popDensity, activeDrugs):\r\n # TODO\r\n\r\n resistAll = True\r\n for drug in activeDrugs:\r\n if (self.resistances[drug]==False):resistAll = False\r\n \r\n\r\n\r\n maxReproduceProb = self.maxBirthProb * (1 - popDensity)\r\n \r\n \r\n if (resistAll and random.random() < maxReproduceProb):\r\n childResistances = {}\r\n for drug in self.resistances:\r\n if random.random() < self.mutProb:\r\n childResistances[drug] = (not self.resistances[drug])\r\n else:childResistances[drug] = (self.resistances[drug])\r\n\r\n \r\n childOfVirus = ResistantVirus(self.maxBirthProb, self.clearProb,childResistances,self.mutProb)\r\n return childOfVirus\r\n \r\n else: raise NoChildException('Child not created!')", "def test_generator8(self):\n xpb = XPathBuilder()\n xp1 = xp2 = None\n base_xp = (xpb.foo.bar | xpb.x.y).parenthesize()\n base_gen = None\n with base_xp as b:\n base_gen = b\n xp1 = b() & xpb.c\n xp2 = b() & xpb.d\n xp1_exp = '(/foo/bar or /x/y) and /c'\n xp2_exp = '(/foo/bar or /x/y) and /d'\n base_exp = '(/foo/bar or /x/y)'\n # check tree structure\n self.assertTrue(base_xp._parent is None)\n self.assertTrue(base_gen._parent is None)\n # check xpath\n self.assertEqual(xp1.tostring(), xp1_exp)\n self.assertEqual(xp2.tostring(), xp2_exp)\n self.assertEqual(base_xp.tostring(), base_exp)\n self.assertEqual(base_gen.tostring(), base_exp)", "def _cross_parents(self):\n while len(self.children_population) < self.children_count:\n parent_1, parent_2 = random.sample(self.population, k=2)\n self.children_population.extend(self.crossover.cross(parent_1, parent_2))", "def test_documentation_popxl_multi_callsites_graph_input(self):\n filename = \"multi_call_graph_input.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def inversion_crossover(self, pop):\n children, tmpNonComb, used = ([] for i in range(3))\n for i in range(0, int(len(pop) * self.fracElite), 1):\n r = int(rand() * len(pop))\n while r == i:\n r = int(rand() * len(pop))\n\n if sum(self.cID + self.dID + self.iID) != 0:\n nonComb1 = pop[i][:np.where(self.cID + self.dID + self.iID == 1)[0][(-1)] + 1]\n nonComb2 = pop[r][:np.where(self.cID + self.dID + self.iID == 1)[0][(-1)] + 1]\n if sum(self.xID) != 0:\n comb1 = pop[i][:np.where(self.xID == 1)[0][(-1)] + 1]\n comb2 = pop[r][:np.where(self.xID == 1)[0][(-1)] + 1]\n if sum(self.cID + self.dID + self.iID) != 0:\n c = int(rand() * len(nonComb1))\n if rand() > 0.5:\n tmpNonComb.append(np.array(nonComb1[0:c + 1].tolist() + nonComb2[c + 1:].tolist()))\n else:\n tmpNonComb.append(np.array(nonComb2[0:c + 1].tolist() + nonComb1[c + 1:].tolist()))\n used.append(i)\n if sum(self.xID) != 0:\n c = int(rand() * len(comb1))\n for c1 in range(c, len(comb1), 1):\n d2 = (contains_sublist(comb2, comb1[c1]) + 1) % len(comb1)\n d1 = contains_sublist(comb1, comb2[d2])\n c2 = contains_sublist(comb2, comb1[((d1 + 1) % len(comb1))]) % len(comb1)\n tmp1 = cp.copy(comb1)\n if c1 < d1:\n tmp1[(c1 + 1):(d1 + 1)] = list(reversed(tmp1[c1 + 1:d1 + 1]))\n else:\n tmp1[d1:c1] = list(reversed(tmp1[d1:c1]))\n tmp2 = cp.copy(comb2)\n if c2 < d2:\n tmp2[c2:d2] = list(reversed(tmp2[c2:d2]))\n else:\n tmp2[(d2 + 1):(c2 + 1)] = list(reversed(tmp2[d2 + 1:c2 + 1]))\n if sum(self.cID + self.dID + self.iID) == 0 and sum(self.xID) != 0:\n children.append(tmp1)\n children.append(tmp2)\n elif sum(self.cID + self.dID + self.iID) != 0 and sum(self.xID) != 0:\n children.append(np.concatenate(tmpNonComb[(-1)], tmp1))\n children.append(np.concatenate(tmpNonComb[(-1)], tmp2))\n used.append(i)\n used.append(r)\n\n if sum(self.cID + self.dID + self.iID) != 0 and sum(self.xID) == 0:\n children = tmpNonComb\n return (\n children, used)", "def mutate(self, pop):\n intDiscID = self.iID + self.dID\n varID = self.cID\n children = []\n k = rand(len(pop), len(pop[0])) > self.fracMutation * rand()\n childn1 = cp.copy(permutation(pop))\n childn2 = cp.copy(permutation(pop))\n r = rand()\n for j in range(0, len(pop), 1):\n n = np.array(childn1[j] - childn2[j])\n stepSize = r * n * varID + (n * intDiscID).astype(int)\n tmp = (pop[j] + stepSize * k[j, :]) * varID + (pop[j] + stepSize * k[j, :]) * intDiscID % (self.ub + 1 - self.lb)\n children.append(simple_bounds(tmp, self.lb, self.ub))\n\n return children", "def test_aggregation(self):\n # Folder must be root to load in make_net properly\n if os.getcwd().split('\\\\')[-1] == 'tests': os.chdir('..')\n \n # Create parents\n cfg = Config()\n cfg.genome.aggregation_default = 'a'\n cfg.genome.aggregation_options = {'a': 1, 'b': 2}\n gene1, gene2 = get_simple_node_gene(0, cfg.genome)\n \n # Ratio of 0.5, so possible to cross to both parents\n p1 = False\n p2 = False\n for _ in range(100):\n gene3 = gene1.crossover(other=gene2, cfg=cfg.genome, ratio=0.5)\n if gene3.aggregation == gene1.aggregation:\n p1 = True\n elif gene3.aggregation == gene2.aggregation:\n p2 = True\n else:\n raise self.failureException(\"Must be mutated to one of parent's values\")\n if p1 and p2: break\n self.assertTrue(p1 and p2)\n \n # Ratio of 1, so always inherits from first parent\n for _ in range(100):\n gene3 = gene1.crossover(other=gene2, cfg=cfg.genome, ratio=1)\n self.assertEqual(gene3.aggregation, gene1.aggregation)\n \n # Ratio of 0, so always inherits from second parent\n for _ in range(100):\n gene3 = gene1.crossover(other=gene2, cfg=cfg.genome, ratio=0)\n self.assertEqual(gene3.aggregation, gene2.aggregation)", "def test_get_child():\n \n root_ts = TrackSegment(flow_dict=flow_dict) \n \n # ROOT MODULE\n start = root_ts.get_child(\"start\")\n # check depth\n assert(start.depth == root_ts.depth+1)\n # check parent\n assert(start.parent is root_ts)\n # check module_id\n assert(start.module_id == 'start')\n \n # CHILD (1,2,3)\n root_ts_get_child_result = root_ts.get_child((1,2,3))\n # check depth\n assert(root_ts_get_child_result.depth == root_ts.depth+1)\n # check parent\n assert(root_ts_get_child_result.parent is root_ts)\n \n # CHILD (2,3,4)\n root_ts_get_child_result2 = root_ts_get_child_result.get_child((2,3,4))\n # check depth\n assert(root_ts_get_child_result2.depth == root_ts.depth+2)\n # check parent \n assert(root_ts_get_child_result2.parent is root_ts_get_child_result)\n \n print(\"TEST GET_CHILD: success!\")", "def test_has_children_property(mock_amg):\n\n # split a cell so we can be sure it should have children\n mock_amg.cells[4].split()\n\n assert mock_amg.cells[4].has_children()\n assert not mock_amg.cells[1].has_children()\n assert not mock_amg.cells[4].children['bl'].has_children()", "def test_aggregation(self):\n # Folder must be root to load in make_net properly\n if os.getcwd().split('\\\\')[-1] == 'tests': os.chdir('..')\n \n # Create parents\n cfg = Config()\n cfg.genome.aggregation_default = 'a'\n cfg.genome.aggregation_options = {'a': 1, 'b': 2}\n gene1, gene2 = get_output_node_gene(0, cfg.genome)\n \n # Ratio of 0.5, so possible to cross to both parents\n p1 = False\n p2 = False\n for _ in range(100):\n gene3 = gene1.crossover(other=gene2, cfg=cfg.genome, ratio=0.5)\n if gene3.aggregation == gene1.aggregation:\n p1 = True\n elif gene3.aggregation == gene2.aggregation:\n p2 = True\n else:\n raise self.failureException(\"Must be mutated to one of parent's values\")\n if p1 and p2: break\n self.assertTrue(p1 and p2)\n \n # Ratio of 1, so always inherits from first parent\n for _ in range(100):\n gene3 = gene1.crossover(other=gene2, cfg=cfg.genome, ratio=1)\n self.assertEqual(gene3.aggregation, gene1.aggregation)\n \n # Ratio of 0, so always inherits from second parent\n for _ in range(100):\n gene3 = gene1.crossover(other=gene2, cfg=cfg.genome, ratio=0)\n self.assertEqual(gene3.aggregation, gene2.aggregation)", "def run_tests(g: Graph) -> None:\n print( g.nodes() , \"->\" , ', '.join([f\"{l}\" for l in g.scc()]) , f\"({g.cyclic()})\" )\n for n in g.nodes():\n for m in [m for m in g.nodes() if m != n]:\n p = g.path(n,m)\n if p is not None:\n assert p[0] == n\n assert p[-1] == m\n for i in range(1,len(p)):\n assert g.is_edge(p[i-1], p[i])\n print(\" \", n, \"->\", m, \":\", ' -> '.join([f\"{v}\" for v in p]))", "def bclone():\n node = nuke.selectedNodes()\n if len(node)==1:\n clone1 = nuke.createNode(\"NoOp\", inpanel = False)\n clone1.setName(\"Bclone\")\n clone1['label'].setValue(node[0].name()+\"\\nClone_Parent\")\n clone1['tile_color'].setValue(2521651711)\n clone1['note_font_color'].setValue(1583243007)\n clone1xpos = clone1['xpos'].getValue()\n clone1ypos = clone1['ypos'].getValue()\n \n clone2 = nuke.createNode(\"NoOp\", inpanel = False)\n clone2.setName(\"Bclone\")\n clone2['label'].setValue(node[0].name()+\"\\nClone\")\n clone2['hide_input'].setValue(True)\n clone2['tile_color'].setValue(2521651711)\n clone2['note_font_color'].setValue(1583243007)\n clone2['xpos'].setValue(clone1xpos)\n clone2['ypos'].setValue(clone1ypos)\n\n if len(node)==0:\n clone1 = nuke.createNode(\"NoOp\", inpanel = False)\n clone1.setName(\"Bclone\")\n clone1['label'].setValue(\"Clone_Parent\")\n clone1['tile_color'].setValue(2521651711)\n clone1['note_font_color'].setValue(1583243007)\n clone1xpos = clone1['xpos'].getValue()\n clone1ypos = clone1['ypos'].getValue()\n \n clone2 = nuke.createNode(\"NoOp\", inpanel = False)\n clone2.setName(\"Bclone\")\n clone2['label'].setValue(\"Clone\")\n clone2['hide_input'].setValue(True)\n clone2['tile_color'].setValue(2521651711)\n clone2['note_font_color'].setValue(1583243007)\n clone2['xpos'].setValue(clone1xpos)\n clone2['ypos'].setValue(clone1ypos)\n if len(node)!=0 and len(node)!=1:\n nuke.message('Just select one node to clone !')", "def show_subgraph(dfs_codes, nsupport, mapper):\n\tglobal __subgraph_count\n\n\tg = build_graph(dfs_codes)\n\tg.id = __subgraph_count\n\t__subgraph_count += 1\n\tg.gprint(nsupport, mapper)", "def parents_loop(self):\r\n while len(self.parents) > 0:\r\n children = 0\r\n self.parent1 = random.choice(self.parents)\r\n index = self.parents.index(self.parent1)\r\n del self.parents[index]\r\n\r\n self.parent2 = random.choice(self.parents)\r\n index = self.parents.index(self.parent2)\r\n del self.parents[index]\r\n\r\n while children < 2:\r\n self.child = copy.deepcopy(self.parent1)\r\n \r\n self.battery_loop()\r\n\r\n childsolution = random_algo.Random(self.child, self.cable_cost, self.battery_cost)\r\n childsolution.change_battery_or_house('change_battery')\r\n childsolution.change_battery_or_house('change_house')\r\n\r\n if (self.child.valid_solution() and self.child not in self.district_population\r\n and self.child not in self.best_districts and self.child not in self.worst_districts):\r\n self.district_population.append(self.child)\r\n self.cost_populations.append(self.child.total_cost(self.battery_cost, self.cable_cost))\r\n children += 1", "def test_restore_multiple_in_subgraph(self):\n subgraph = self._subgraph()\n subgraph['id'] = 15\n task1 = self._remote_task()\n task1['id'] = 1\n task2 = self._remote_task()\n task2['id'] = 2\n task1['parameters']['containing_subgraph'] = 15\n task2['parameters']['containing_subgraph'] = 15\n\n graph = self._restore_graph([subgraph, task1, task2])\n assert len(graph.tasks) == 3\n subgraphs = [op for op in graph.tasks if op.is_subgraph]\n remote_tasks = [op for op in graph.tasks if not op.is_subgraph]\n\n # those are all references to the same subgraph, the subgraph was\n # NOT restored multiple times\n assert remote_tasks[0].containing_subgraph \\\n is remote_tasks[1].containing_subgraph \\\n is subgraphs[0]\n\n assert len(subgraphs[0].tasks) == 2", "def slice_graph_fwd( startea, reg ): \r\n\tgraph = vcg_Graph.vcgGraph({\"title\":'\"Slice for %s\"' % reg, \\\r\n\t\t\"manhattan_edges\":\"no\", \"layoutalgorithm\":\"maxdepth\"})\r\n\t#\r\n\t# Retrieve the name of the current basic block\r\n\t# \r\n\tworklist = []\r\n\tdata_bib = {}\r\n\tstartnode = slice_node( startea, 0, reg )\r\n\trootnode = graph.Add_Node( startnode.to_name() )\r\n\tdata_bib[ startnode.to_name() ] = startnode\r\n\tworklist.insert( 0, rootnode )\r\n\twhile len( worklist ) > 0:\r\n\t\tcurrnode = worklist.pop()\r\n\t\tcurrslice = data_bib[ currnode.get_name() ]\r\n\t\ttgt_reg = currslice.get_target_reg()\r\n\t\tif tgt_reg == \"END\":\r\n\t\t# Do not process this node any further\r\n\t\t\tpass\r\n\t\telif tgt_reg == \"\" or (( len( currslice.get_lines()) > 0) and \\\r\n\t\t\tcurrslice.endea != currslice.get_lines()[-1][0]):\r\n\t\t\t# Nothing much happening here, just proceed to parent bocks\r\n\t\t\tif ua_mnem( currslice.endea ) == \"call\":\r\n\t\t\t\txrefs = get_short_crefs_from( currslice.endea )\r\n\t\t\telse:\r\n\t\t\t\txrefs = get_crefs_from( currslice.endea )\r\n\t\t\tfor ref in xrefs:\r\n\t\t\t\tnewslice = slice_node( ref, 0, currslice.reg )\r\n\t\t\t\tif graph.Get_Node( newslice.to_name() ) == 0:\r\n\t\t\t\t\tnewnode = graph.Add_Node( newslice.to_name() )\r\n\t\t\t\t\tworklist.insert( 0, newnode )\r\n\t\t\t\t\tdata_bib[ newslice.to_name() ] = newslice\r\n\t\t\t\tgraph.Add_Link( currnode.get_name(), newslice.to_name())\r\n\t\telse:\r\n\t\t\t# Register was modified, use new register\r\n\t\t\txrefs = get_crefs_from( currslice.endea )\r\n\t\t\tfor ref in xrefs:\r\n\t\t\t\tnewslice = slice_node( ref, 0, tgt_reg )\r\n\t\t\t\tif graph.Get_Node( newslice.to_name() ) == 0:\r\n\t\t\t\t\tnewnode = graph.Add_Node( newslice.to_name() )\r\n\t\t\t\t\tworklist.insert( 0, newnode )\r\n\t\t\t\t\tdata_bib[ newslice.to_name() ] = newslice\r\n\t\t\t\tgraph.Add_Link( currnode.get_name(), newslice.to_name())\r\n\t\t\txrefs = get_crefs_from( currslice.endea )\r\n\t\t\tfor ref in xrefs:\r\n\t\t\t\tnewslice = slice_node( ref, 0, currslice.reg )\r\n\t\t\t\tif graph.Get_Node( newslice.to_name() ) == 0:\r\n\t\t\t\t\tnewnode = graph.Add_Node( newslice.to_name() )\r\n\t\t\t\t\tworklist.insert( 0, newnode )\r\n\t\t\t\t\tdata_bib[ newslice.to_name() ] = newslice\r\n\t\t\t\tgraph.Add_Link( currnode.get_name(), newslice.to_name())\r\n\treturn [ graph, data_bib ]", "def testParentage(self):\n self.assertEqual(\n self.cd,\n self.media_ref.parent\n )\n\n self.assertEqual(\n self.cd,\n self.cc.parent\n )", "def test_generator7(self):\n xpb = XPathBuilder()\n xp1 = xp2 = None\n base_xp = xpb.foo.bar & xpb.x.y\n base_gen = None\n with base_xp as b:\n base_gen = b\n xp1 = b() | xpb.c\n xp2 = b() | xpb.d\n xp1_exp = '/foo/bar and /x/y or /c'\n xp2_exp = '/foo/bar and /x/y or /d'\n base_exp = '/foo/bar and /x/y'\n # check tree structure\n self.assertTrue(base_xp._parent is None)\n self.assertTrue(base_gen._parent is None)\n # check xpath\n self.assertEqual(xp1.tostring(), xp1_exp)\n self.assertEqual(xp2.tostring(), xp2_exp)\n self.assertEqual(base_xp.tostring(), base_exp)\n self.assertEqual(base_gen.tostring(), base_exp)", "def populate_graph(self):", "def test_dag_preserves_superrep(dimension, conversion):\n qobj = conversion(rand_super_bcsz(dimension))\n assert qobj.superrep == qobj.dag().superrep", "def test__graph_structure():\n assert PES_GRAPH == (\n ('CH2CH2+OH', 'CH2CH+H2O', 'C2H4OH', 'C2H5O', 'CH3CHO+H'),\n (frozenset({0, 1}), frozenset({0, 2}), frozenset({2, 3}),\n frozenset({3, 4}), frozenset({1, 2})))\n assert pgraph.species(PES_GRAPH) == (\n ('CH2CH2+OH', 'CH2CH+H2O', 'C2H4OH', 'C2H5O', 'CH3CHO+H'))\n assert pgraph.channels(PES_GRAPH) == (\n (frozenset({0, 1}), frozenset({0, 2}), frozenset({2, 3}),\n frozenset({3, 4}), frozenset({1, 2})))\n print('\\npes graph')\n print(PES_GRAPH)", "def test_fpy_parent():\n\n data = \"\"\"\n<depletion_chain>\n <nuclide name=\"U235\" reactions=\"1\">\n <reaction type=\"fission\" Q=\"193405400.0\"/>\n <neutron_fission_yields>\n <energies>0.0253</energies>\n <fission_yields energy=\"0.0253\">\n <products>Te134 Zr100 Xe138</products>\n <data>0.062155 0.0497641 0.0481413</data>\n </fission_yields>\n </neutron_fission_yields>\n </nuclide>\n <nuclide name=\"U238\" reactions=\"1\">\n <reaction type=\"fission\" Q=\"200.0e6\"/>\n <neutron_fission_yields parent=\"U235\"/>\n </nuclide>\n</depletion_chain>\n \"\"\"\n\n root = ET.fromstring(data)\n elems = root.findall('nuclide')\n u235 = nuclide.Nuclide.from_xml(elems[0], root)\n u238 = nuclide.Nuclide.from_xml(elems[1], root)\n\n # Make sure U238 yield is same as U235\n assert np.array_equal(u238.yield_data.energies, u235.yield_data.energies)\n assert np.array_equal(u238.yield_data.yield_matrix, u235.yield_data.yield_matrix)\n\n # Make sure XML element created has single attribute\n elem = u238.to_xml_element()\n fpy_elem = elem.find('neutron_fission_yields')\n assert fpy_elem.get('parent') == 'U235'\n assert len(fpy_elem) == 0\n\n data = \"\"\"\n<depletion_chain>\n <nuclide name=\"U235\" reactions=\"1\">\n <reaction type=\"fission\" Q=\"193405400.0\"/>\n </nuclide>\n <nuclide name=\"U238\" reactions=\"1\">\n <reaction type=\"fission\" Q=\"200.0e6\"/>\n <neutron_fission_yields parent=\"U235\"/>\n </nuclide>\n</depletion_chain>\n \"\"\"\n\n # U235 yields are missing, so we should get an exception\n root = ET.fromstring(data)\n elems = root.findall('nuclide')\n with pytest.raises(ValueError, match=\"yields\"):\n u238 = nuclide.Nuclide.from_xml(elems[1], root)", "def test_skirmish_parenting(self):\n root = SkirmishAction()\n a1 = SkirmishAction()\n a2 = SkirmishAction()\n self.sess.add_all([root, a1, a2])\n self.sess.commit()\n\n root.children.append(a1)\n root.children.append(a2)\n self.sess.commit()\n\n self.assertEqual(a1.parent_id, root.id)\n self.assertEqual(a2.parent_id, root.id)", "def test_generator6(self):\n xpb = XPathBuilder()\n xp1 = xp2 = None\n base_xp = xpb.base.foo.bar\n base_gen = None\n with base_xp as b:\n base_gen = b\n xp1 = xpb.a.b.c.join(b())\n xp2 = xpb.test.join(b())\n xp1_exp = '/a/b/c/base/foo/bar'\n xp2_exp = '/test/base/foo/bar'\n base_exp = '/base/foo/bar'\n # check tree structure\n self.assertTrue(base_xp._parent is None)\n self.assertTrue(len(base_xp._children[0]._children[0]._children) == 0)\n self.assertTrue(base_gen._parent is None)\n self.assertTrue(len(base_gen._children) == 0)\n # check xpath\n self.assertEqual(xp1.tostring(), xp1_exp)\n self.assertEqual(xp2.tostring(), xp2_exp)\n self.assertEqual(base_xp.tostring(), base_exp)\n self.assertEqual(base_gen.tostring(), base_exp)", "def test_grandchildren():\n\n # note c.upto(\"status\").desired.grandchildren\n # this is the same as *c.upto(\"status\").desired in python3.5+\n res = conf.status.conditions.choose(lambda c: (c.type, c.reason, c.upto(\"status\").desired.grandchildren))\n assert \"type\" in res\n assert \"reason\" in res\n assert \"version\" in res\n assert \"image\" in res\n assert \"force\" in res", "def subplot_1(self, Graph, n_tabs):\n # The code below walks does a pre-order traversal of the tree\n # For exact details about the structure of self.Graph refer description in init function.\n\n attr_name = list(Graph.keys())[0]\n print(\"\\t\"*(n_tabs),\"feature name :\",attr_name)\n for val in list(Graph[attr_name].keys()):\n print(\"\\t\"*(n_tabs+1),\"feature value :\",val)\n sub_graph = Graph[attr_name][val]\n if (type(sub_graph)==dict):\n self.subplot_1(sub_graph, n_tabs+2)\n else:\n print(\"\\t\"*(n_tabs+2),\"class :\", sub_graph)", "def hasSiblings():", "def test_get_related_nodes(self):\n pass", "def test_createSubLinkographWithoutCommands(self):\n self.performTestForParams()", "def crossover (self, p1, p2, p_pop, c1, c2, c_pop) :\n assert self.crossover_count < self.pop_size\n assert self.get_iteration () == self.last_gen\n self.parents.append (p1)\n self.parents.append (p2)\n self.crossover_count += 2\n if self.crossover_count == self.pop_size :\n assert (self.get_iteration () == self.last_gen)\n print (self.get_iteration ())\n sys.stdout.flush ()\n self.build_model (p_pop)\n self.sample_model (c1, c2, c_pop)\n self.crossover_count = 0\n self.parents = []\n self.children = {}\n self.last_gen += 1\n self.clear_cache ()", "def test_Tree():", "def reproduce(self, popDensity, activeDrugs):\n\n # Checks the resistance of the mother virus to all drugs in activeDrugs list\n # if mother virus is resistant to all the drugs, reproduction will proceed\n for drug in activeDrugs:\n if not self.isResistantTo(drug):\n raise NoChildException()\n\n maxReproduceProb = self.maxBirthProb * (1 - popDensity)\n\n if random.random() < maxReproduceProb:\n resistance_trait = {}\n # Calculate the transfer of resistances property to child virus\n for drug in self.resistances.keys():\n if random.random() <= (1 - self.mutProb):\n resistance_trait[drug] = self.resistances[drug]\n else:\n resistance_trait[drug] = not self.resistances[drug]\n\n childOfVirus = ResistantVirus(self.maxBirthProb, self.clearProb, resistance_trait, self.mutProb)\n return childOfVirus\n else:\n raise NoChildException()", "def dump_subgraph_for_debug(self):\n\n import pypipegraph2 as ppg\n\n nodes = []\n seen = set()\n edges = []\n counter = [0]\n node_to_counters = {}\n\n def descend(node):\n if node in seen:\n return\n seen.add(node)\n j = self.runner.jobs[node]\n if isinstance(j, ppg.FileInvariant):\n nodes.append(f\"Path('{counter[0]}').write_text('A')\")\n nodes.append(f\"job_{counter[0]} = ppg.FileInvariant('{counter[0]}')\")\n elif isinstance(j, ppg.ParameterInvariant):\n nodes.append(\n f\"job_{counter[0]} = ppg.ParameterInvariant('{counter[0]}', 55)\"\n )\n elif isinstance(j, ppg.FunctionInvariant):\n nodes.append(\n f\"job_{counter[0]} = ppg.FunctionInvariant('{counter[0]}', lambda: 55)\"\n )\n elif isinstance(j, ppg.SharedMultiFileGeneratingJob):\n nodes.append(\n f\"job_{counter[0]} = ppg.SharedMultiFileGeneratingJob('{counter[0]}', {[x.name for x in j.files]!r}, dummy_smfg, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.TempFileGeneratingJob):\n nodes.append(\n f\"job_{counter[0]} = ppg.TempFileGeneratingJob('{counter[0]}', dummy_fg, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.FileGeneratingJob):\n nodes.append(\n f\"job_{counter[0]} = ppg.FileGeneratingJob('{counter[0]}', dummy_fg, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.MultiTempFileGeneratingJob):\n files = [counter[0] + \"/\" + x.name for x in j.files]\n nodes.append(\n f\"job_{counter[0]} = ppg.MultiTempFileGeneratingJob({files!r}, dummy_mfg, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.MultiFileGeneratingJob):\n files = [str(counter[0]) + \"/\" + x.name for x in j.files]\n nodes.append(\n f\"job_{counter[0]} = ppg.MultiFileGeneratingJob({files!r}, dummy_mfg, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.DataLoadingJob):\n nodes.append(\n f\"job_{counter[0]} = ppg.DataLoadingJob('{counter[0]}', lambda: None, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.AttributeLoadingJob):\n nodes.append(\n f\"job_{counter[0]} = ppg.AttributeLoadingJob('{counter[0]}', DummyObject(), 'attr_{counter[0]}', lambda: None, depend_on_function=False)\"\n )\n else:\n raise ValueError(j)\n node_to_counters[node] = counter[0]\n counter[0] += 1\n for parent in self.runner.dag.predecessors(node):\n descend(parent)\n\n def build_edges(node):\n for parent in self.runner.dag.predecessors(node):\n edges.append(\n f\"edges.append(('{node_to_counters[node]}', '{node_to_counters[parent]}'))\"\n )\n build_edges(parent)\n\n descend(self.job_id)\n edges.append(\"edges = []\")\n build_edges(self.job_id)\n edges.extend(\n [\n \"for (a,b) in edges:\",\n \" if a in ppg.global_pipegraph.jobs and b in ppg.global_pipegraph.jobs:\",\n \" ppg.global_pipegraph.jobs[a].depends_on(ppg.global_pipegraph.jobs[b])\",\n ]\n )\n with open(\"subgraph_debug.py\", \"w\") as op:\n lines = \"\"\"\nclass DummyObject:\n pass\n\ndef dummy_smfg(files, prefix):\n Path(prefix).mkdir(exist_ok=True, parents=True)\n for f in files:\n f.write_text(\"hello\")\n\n\ndef dummy_mfg(files):\n for f in files:\n f.parent.mkdir(exist_ok=True, parents=True)\n f.write_text(\"hello\")\n\ndef dummy_fg(of):\n of.parent.mkdir(exist_ok=True, parents=True)\n of.write_text(\"fg\")\n\n\"\"\".split(\n \"\\n\"\n )\n lines += nodes\n lines += edges\n lines += [\"\", \"ppg.run()\", \"ppg.run\"]\n\n op.write(\"\\n\".join(\" \" + l for l in lines))", "def test_tree_mode4(self):\n xpb = XPathBuilder()\n xp_1 = xpb.foo\n xp_2 = xpb.baz\n xp_and = xp_1 & xp_2\n self.assertTrue(xp_and._parent is None)\n self.assertTrue(len(xp_and._children) == 2)\n self.assertTrue(xp_and._children[0] is xp_1)\n self.assertTrue(xp_and._children[1] is xp_2)\n self.assertTrue(xp_1._parent is xp_and)\n self.assertTrue(len(xp_1._children) == 0)\n self.assertTrue(xp_2._parent is xp_and)\n self.assertTrue(len(xp_2._children) == 0)\n xp_2.reparent(None)\n # check references after remove\n self.assertTrue(xp_and._parent is None)\n self.assertTrue(len(xp_and._children) == 1)\n self.assertTrue(xp_and._children[0] is xp_1)\n self.assertTrue(xp_1._parent is xp_and)\n self.assertTrue(len(xp_1._children) == 0)\n # xp_2's references were changed\n self.assertTrue(xp_2._parent is None)\n self.assertTrue(len(xp_2._children) == 0)", "def sample_from_subpop(instance, params, subpop):\n y = subpop\n x = np.random.choice([-1,+1], size=params['d'])\n x[instance['indices'][subpop]] = instance['values'][subpop]\n return x, y, subpop", "def __init__(self, firstParent, secondParent):\n CrossOver.__init__(self, \"Group Point CrossOver\", firstParent, secondParent)", "def setUp(self):\n self.complete = nx.Graph()\n self.complete.add_edge(1, 2)\n self.complete.add_edge(2, 3)\n self.complete.add_edge(1, 3)\n\n self.small_tree = nx.Graph()\n self.small_tree.add_edge(1, 2)\n self.small_tree.add_edge(2, 3)\n self.small_tree.add_edge(3, 4)\n self.small_tree.add_edge(1, 4)\n self.small_tree.add_edge(2, 4)\n self.small_tree.add_edge(4, 5)\n self.small_tree.add_edge(5, 6)\n self.small_tree.add_edge(5, 7)\n self.small_tree.add_edge(6, 7)\n\n self.deterministic_graph = nx.Graph()\n self.deterministic_graph.add_edge(1, 2)\n self.deterministic_graph.add_edge(1, 3)\n self.deterministic_graph.add_edge(3, 4)\n self.deterministic_graph.add_edge(2, 4)\n self.deterministic_graph.add_edge(3, 5)\n self.deterministic_graph.add_edge(4, 5)\n self.deterministic_graph.add_edge(3, 6)\n self.deterministic_graph.add_edge(5, 6)", "def test_1():\n for _ in range(100):\n G = NetworkTopo()\n flows = MulticastFlows(G, 10, 40)\n\n spt = ShortestPathTree(G, flows)\n\n for T in spt.multicast_trees:\n assert len(nx.cycle_basis(T)) == 0", "def reproduce(self, popDensity):\r\n\r\n # Does the virus reproduce? \r\n maxReproduceProb = self.maxBirthProb * (1 - popDensity)\r\n \r\n if random.random() < maxReproduceProb:\r\n childOfVirus = SimpleVirus(self.maxBirthProb, self.clearProb)\r\n return childOfVirus\r\n \r\n else: raise NoChildException('Child not created!')", "def create_subbasin_graph():\n subbasin_to_downstream = pd.read_csv(module_dir + '/../data/simulations_shervan/test.rvh', sep='\\s+', skiprows=7, nrows=724, names=['subbasin', 'downstream_subbasin'], usecols=[1,2])\n subbasin_to_downstream['subbasin'] = subbasin_to_downstream['subbasin']\n subbasin_to_downstream['downstream_subbasin'] = 'sub' + subbasin_to_downstream['downstream_subbasin'].astype(str)\n subbasin_to_downstream['edge'] = 1\n\n for subbasin in subbasin_to_downstream['subbasin'].unique():\n is_sink = 1 if len(subbasin_to_downstream[(subbasin_to_downstream['subbasin'] == subbasin) & subbasin_to_downstream['edge'] == 1]) == 0 else 0\n subbasin_to_downstream = subbasin_to_downstream.append({'subbasin': subbasin, 'downstream_subbasin': subbasin, 'edge': is_sink}, ignore_index=True)\n subbasin_to_downstream = subbasin_to_downstream.append({'subbasin': 'sub-1', 'downstream_subbasin': 'sub-1', 'edge': 1}, ignore_index=True)\n \n adj = subbasin_to_downstream.pivot(index='subbasin', columns='downstream_subbasin', values='edge').fillna(0) \n adj = adj.sort_index(axis=0).sort_index(axis=1)\n \n G = nx.from_numpy_matrix(adj.values, parallel_edges=False, create_using=nx.DiGraph())\n label_mapping = dict(zip(range(len(adj.values)), adj.index))\n G = nx.relabel_nodes(G, label_mapping)\n \n return G", "def test_input_not_subgraph(self, dim):\r\n with pytest.raises(ValueError, match=\"Input is not a valid subgraph\"):\r\n clique.grow([dim + 1], nx.empty_graph(dim))", "def crossover(parent: list):\n ### GENERAL METHOD\n child1 = []\n child2 = []\n child = []\n geneA = int(random.random() * len(parent[0]))\n geneB = int(random.random() * len(parent[0]))\n startgene = min(geneA, geneB)\n endgene = max(geneA, geneB)\n for i in range(startgene, endgene):\n child1.append(parent[0][i])\n child2 = [item for item in parent[1] if item not in child1]\n child = child2[:startgene] + child1 + child2[startgene:]\n return child", "def test_build_graph(self):\n insert_good_data()\n dataframe = get_dataframe()\n results = processing.build_graph(dataframe, figure_path, False)\n # 1\n self.assertEqual(results, \"Updated html File and Opened it\")", "def test_10_parents_as_minus_1_0_0_1_2_3_3_4_6_6(self):\n parents = [ -1, 0, 0, 1, 2, 3, 3, 4, 6, 6 ]\n self.init(parents)\n self.assertEqual(5, self.method_under_test())", "def reproduce(self,prob=None):\n for parent in self.agents:\n if parent.fledged(prob):\n fledgling = Agent()\n self._fledgling_move(fledgling,parent.hex)\n self.agents.append(fledgling)\n self.pop_size += 1", "def genNextPop(prevPop, masterList, popSize):\n parSize = int(popSize / 10) #top 10%\n parentPop = [Team(prevPop[i].roster) for i in range(parSize)]\n parentPop = getStats(parentPop, masterList) # inefficent\n rosterSize = len(parentPop[0].roster)\n newPop = []\n #parentsList = [] #debug\n for i in range(popSize):\n chromosome = doCrossover(parentPop, parSize, rosterSize)\n newPop.append(Team(chromosome))\n getStats(newPop, masterList)\n #showStats(newPop, masterList, \"all\")\n #debug code\n #for playerInd in chromosome:\n # print(masterList[playerInd].pos)\n\n return newPop", "def getChildren():", "def test_restore_with_subgraph(self):\n subgraph = self._subgraph()\n task = self._remote_task()\n subgraph['id'] = 15\n task['parameters']['containing_subgraph'] = 15\n\n graph = self._restore_graph([subgraph, task])\n assert len(graph.tasks) == 2\n subgraphs = [op for op in graph.tasks if op.is_subgraph]\n remote_tasks = [op for op in graph.tasks if not op.is_subgraph]\n\n assert len(subgraphs) == 1\n assert len(remote_tasks) == 1\n\n assert len(subgraphs[0].tasks) == 1\n assert remote_tasks[0].containing_subgraph is subgraphs[0]", "def test_removes_empty_subgraph(self):\n ctx = MockWorkflowContext()\n g = TaskDependencyGraph(ctx)\n\n # sg1 is just empty, no tasks inside it\n sg1 = g.subgraph(ctx)\n # sg2 contains only a NOPTask\n sg2 = g.subgraph(ctx)\n sg2.add_task(tasks.NOPLocalWorkflowTask(ctx))\n\n # sg3 contains sg4, which is empty behcause it only contains a NOPTask\n sg3 = g.subgraph(ctx)\n sg4 = g.subgraph(ctx)\n sg4.add_task(tasks.NOPLocalWorkflowTask(ctx))\n sg3.add_task(sg4)\n\n # sg5 is a subgraph that contains a real task! it is not removed\n sg5 = g.subgraph(ctx)\n real_task = tasks.WorkflowTask(ctx)\n sg5.add_task(real_task)\n\n assert set(g.tasks) > {sg1, sg2, sg3, sg4, sg5, real_task}\n g.optimize()\n assert set(g.tasks) == {sg5, real_task}", "def test_dot(self):\n graph = graphviz.Graph(comment='The Round Table')\n graph.node('A', 'King Arthur')\n graph.node('B', 'Sir Bedevere the Wise')\n graph.edges(['AB'])\n\n st.graphviz_chart(graph)\n\n c = self.get_delta_from_queue().new_element.graphviz_chart\n self.assertEqual(hasattr(c, 'spec'), True)", "def test_pod_valid_parent(self):\n session = self.login_to_apic()\n parent = PhysicalModel()\n pod = Pod.get(session, parent)\n children = parent.get_children()\n self.assertEqual(pod, children)", "def test_ExplorePath_Simple( self ):\n links = []\n n1 = graph.Node( 10, 50 )\n n2 = graph.Node( 10, 50 )\n n3 = graph.Node( 10, 50 )\n n7 = graph.Node( 10, 50 )\n\n links.append( graph.Link( n1, n2 ) )\n links.append( graph.Link( n2, n3 ) )\n links.append( graph.Link( n3, n7 ) )\n roots = [n1]\n actual = nodes.explorePath( links, roots, n1 )\n expected = [ n1, n2, n3, n7 ]\n self.assertEqual( expected, actual )", "def test_dummy4(self):\n xpb = XPathBuilder()\n xp = xpb.dummy()\n self.assertTrue(xp.log_not() is xp)", "def test_child_link(self):\n def compare_func(obj, node):\n child_nodes = self.get_children_of_node(node)\n\n for (child_obj, child_node) in zip(obj.children, child_nodes):\n # parent-child link (children must have obj as their parent)\n self.assertEqual(child_obj.parent, obj)\n\n self.recursively_compare_tree_against_html(compare_func)", "def _subgraph_isomorphism_matcher(digraph, nxpattern, node_pred, edge_pred):\n graph_matcher = iso.DiGraphMatcher(digraph, nxpattern, node_match=node_pred, edge_match=edge_pred)\n yield from graph_matcher.subgraph_isomorphisms_iter()", "def sub_graph_merging(self):\n raise NotImplementedError()", "def three_opt(self, pop):\n children, used = ([] for i in range(2))\n for i in range(0, self.population, 1):\n tmp = []\n breaks = np.sort(rand(3) * len(pop[i]) // 1)\n while breaks[1] == breaks[0] or breaks[1] == breaks[2]:\n breaks[1] = rand() * len(pop[i]) // 1\n breaks = np.sort(breaks)\n\n while breaks[2] == breaks[0]:\n breaks[2] = rand() * len(pop[i]) // 1\n\n breaks = np.sort(breaks)\n tmp[0:(int(breaks[0]))] = pop[i][0:int(breaks[0])]\n tmp[(len(tmp)):(int(len(tmp) + breaks[2] - breaks[1]))] = pop[i][int(breaks[1]):int(breaks[2])]\n tmp[(len(tmp)):(int(len(tmp) + breaks[1] - breaks[0]))] = pop[i][int(breaks[0]):int(breaks[1])]\n tmp[(len(tmp)):(int(len(tmp) + breaks[2] - len(pop[i])))] = pop[i][int(breaks[2]):len(pop[i])]\n children.append(tmp)\n used.append(i)\n tmp = []\n tmp[0:(int(breaks[0]))] = pop[i][0:int(breaks[0])]\n tmp[(len(tmp)):(int(len(tmp) + breaks[1] - breaks[0]))] = list(reversed(pop[i][int(breaks[0]):int(breaks[1])]))\n tmp[(len(tmp)):(int(len(tmp) + breaks[2] - breaks[1]))] = reversed(pop[i][int(breaks[1]):int(breaks[2])])\n tmp[(len(tmp)):(int(len(tmp) + breaks[2] - len(pop[i])))] = pop[i][int(breaks[2]):len(pop[i])]\n children.append(tmp)\n used.append(i)\n\n return (\n children, used)", "def testSetParent(self):\n for child in self.color_corrections + self.color_decisions:\n self.assertEqual(\n None,\n child.parent\n )\n\n self.node.append_children(\n self.color_corrections + self.color_decisions\n )\n\n for child in self.node.all_children:\n self.assertEqual(\n self.node,\n child.parent\n )\n child.parent = 'banana'\n self.assertEqual(\n 'banana',\n child.parent\n )\n\n self.node.set_parentage()\n\n for child in self.node.all_children:\n self.assertEqual(\n self.node,\n child.parent\n )", "def test_get_dependencies_subgraph_by_dfs(\n self, source_node, expected_nodes_in, expected_nodes_out\n ):\n graph = nx.DiGraph()\n graph.add_node(\"pack1\")\n graph.add_node(\"pack2\")\n graph.add_node(\"pack3\")\n graph.add_node(\"pack4\")\n graph.add_edge(\"pack1\", \"pack2\")\n graph.add_edge(\"pack2\", \"pack3\")\n dfs_graph = PackDependencies.get_dependencies_subgraph_by_dfs(\n graph, source_node\n )\n for i in expected_nodes_in:\n assert i in dfs_graph.nodes()\n for i in expected_nodes_out:\n assert i not in dfs_graph.nodes()", "def test_case2(self):\n\n graph = BipartiteGraph()\n\n graph.addEdge(\"supervisor1\",\"student1\")\n graph.addEdge(\"supervisor2\",\"student4\")\n graph.addEdge(\"supervisor3\",\"student3\")\n\n val1 = graph.getSupervisorDegree(\"supervisor1\")\n\n graph.addEdge(\"supervisor1\",\"student2\")\n\n curr = graph.getSupervisorDegree(\"supervisor1\")\n val2 = graph.getSupervisors(\"student2\")\n expected2 = [\"supervisor1\"]\n\n self.assertEqual((curr-1,expected2),(val1,val2))", "def test_12_parents_as_minus_1_0_0_1_1_2_3_4_4_5_7_7(self):\n parents = [ -1, 0, 0, 1, 1, 2, 3, 4, 4, 5, 7, 7 ]\n self.init(parents)\n self.assertEqual(5, self.method_under_test())", "def test_node_info_popup(self):\n def test_popup(node):\n node.details.click()\n with NodeInfo() as details:\n self.assertEqual(\n node.name.text, details.header.text,\n 'Node name')\n details.close.click()\n details.wait_until_exists()\n\n with Nodes()as n:\n test_popup(n.nodes_discovered[0])\n test_popup(n.nodes_offline[0])\n test_popup(n.nodes_error[0])", "def test_input_valid_subgraph(self, dim):\r\n with pytest.raises(ValueError, match=\"Input is not a valid subgraph\"):\r\n clique.swap([0, dim], nx.empty_graph(dim))", "def test_spec(self):\n graph = graphviz.Graph(comment='The Round Table')\n graph.node('A', 'King Arthur')\n graph.node('B', 'Sir Bedevere the Wise')\n graph.edges(['AB'])\n\n st.graphviz_chart(graph)\n\n c = self.get_delta_from_queue().new_element.graphviz_chart\n self.assertEqual(hasattr(c, 'spec'), True)", "def test_change_parent_location(self):\n pass", "def test_4_parents_as_1_minus_1_1_0(self):\n parents = [ 1, -1, 1, 0 ]\n self.init(parents)\n self.assertEqual(3, self.method_under_test())", "def test_6_parents_as_minus_1_0_1_2_3_4(self):\n parents = [ -1, 0, 1, 2, 3, 4 ]\n self.init(parents)\n self.assertEqual(6, self.method_under_test())", "def test_7_parents_as_minus_1_0_0_1_2_3_4(self):\n parents = [ -1, 0, 0, 1, 2, 3, 4 ]\n self.init(parents)\n self.assertEqual(4, self.method_under_test())", "def test_generator5(self):\n xpb = XPathBuilder()\n xp = None\n base_xp = xpb.base.foo.bar\n base_gen = None\n with base_xp as b:\n base_gen = b\n xp = b().join(xpb.a.b.c[3])\n exp = '/base/foo/bar/a/b/c[3]'\n base_exp = '/base/foo/bar'\n # check tree structure\n self.assertTrue(base_xp._parent is None)\n self.assertTrue(len(base_xp._children[0]._children[0]._children) == 0)\n self.assertTrue(base_gen._parent is None)\n self.assertTrue(len(base_gen._children) == 0)\n # check xpath\n self.assertEqual(xp.tostring(), exp)\n self.assertEqual(base_xp.tostring(), base_exp)\n self.assertEqual(base_gen.tostring(), base_exp)", "def playout(self, state):\n node = self.root\n isTerminal = False\n depth = 0\n\n while not isTerminal and depth < self.playout_depth:\n #A = len(node.children) # num_children\n A = len(node.children[0]) + len(node.children[1]) \n if A < self.num_initActions:\n #if len(node.children[0]) < self.num_initActions:\n node, init_action_xy, init_spin = self.initChildren(node, state, depth)\n _, ShotVec = CreateShot(_ShotPos(init_action_xy[0], init_action_xy[1], init_spin))\n success, ResShot = Simulation(state, ShotVec, Config.RAND, -1)\n isTerminal = (state.ShotNum==0)\n \n depth += 1\n break\n \n n_a = [c.n_visits for c in node.children[0].values()] + [c.n_visits for c in node.children[1].values()]\n # progressive widening\n # if chilren node has been visited much times then expand\n #if np.sqrt(sum(n_a)) >= A:\n if sum(n_a) >= 10 * A: \n # expand\n node, expanded_action_xy, expanded_spin = self.expand(node)\n _, ShotVec = CreateShot(_ShotPos(expanded_action_xy[0],expanded_action_xy[1], expanded_spin))\n success, ResShot = Simulation(state, ShotVec, Config.RAND, -1)\n isTerminal = (state.ShotNum==0) # one end game\n \n depth += 1\n break\n\n # select\n node, selected_action_xy, selected_spin = self.ucb_select(node)\n _, ShotVec = CreateShot(_ShotPos(selected_action_xy[0], selected_action_xy[1], selected_spin))\n success, ResShot = Simulation(state, ShotVec, Config.RAND, -1)\n isTerminal = (state.ShotNum==0) # one end game\n \n depth += 1\n\n if isTerminal:\n break\n\n if not isTerminal and depth < self.playout_depth:\n # save the rollout_state for speed.\n #if node.rollout_state is None:\n state = self.rollOut(node, state, depth)\n #node.rollout_state = state\n #else:\n # state = node.rollout_state\n \n self.update(node, state)", "def test_same_node_is_reachable(self):\n # G is an arbitrary tournament on ten nodes.\n G = DiGraph(sorted(p) for p in combinations(range(10), 2))\n assert_true(all(is_reachable(G, v, v) for v in G))", "def test_wp_association_bp(self):\n test_graph = wikipathways_to_bel(WP2359, self.hgnc_manager)\n\n self.assertEqual(type(test_graph), BELGraph, msg='Error with graph type')\n\n self.assertEqual(test_graph.summary_dict()['Number of Nodes'], 2)\n self.assertEqual(test_graph.summary_dict()['Number of Edges'], 1)\n self.assertEqual(count_relations(test_graph)['regulates'], 1)", "def test_set_node_second_level_component_with_first_level_parent(self):\n payload = {\n 'data': [\n {\n 'type': 'nodes',\n 'id': self.first_level_component._id\n },\n {\n 'type': 'nodes',\n 'id': self.second_level_component._id\n }\n ]\n }\n res = self.app.post_json_api(self.url, payload, auth=self.user.auth)\n self.view_only_link.reload()\n assert_equal(res.status_code, 201)\n assert_in(self.first_level_component, self.view_only_link.nodes.all())\n assert_in(self.second_level_component, self.view_only_link.nodes.all())", "def test_n_path_reactions(self):\n self.assertEqual(self.Npath, 3)", "def get_subgraphs(graph):\n nodes_powerset = get_nodes_combinations(graph)\n #print(\"Doing\")\n #draw_graph(graph)\n subgraphs = []\n for nodes in nodes_powerset:\n subg = graph.subgraph(nodes)\n nodes = subg.nodes(data=True)\n if nx.is_weakly_connected(subg):\n subgraphs.append(subg)\n return subgraphs", "def test_ExplorePath( self ):\n links = []\n n1 = graph.Node( 10, 10 )\n n2 = graph.Node( 10, 20 )\n n3 = graph.Node( 10, 30 )\n n4a = graph.Node( 5, 40 )\n n4b = graph.Node( 15, 40 )\n n5a = graph.Node( 5, 50 )\n n5b = graph.Node( 15, 50 )\n n6a = graph.Node( 5, 60 )\n n6b = graph.Node( 15, 60 )\n n7 = graph.Node( 10, 70 )\n\n links.append( graph.Link( n1, n2 ) )\n links.append( graph.Link( n2, n3 ) )\n links.append( graph.Link( n3, n4a ) )\n links.append( graph.Link( n3, n4b ) )\n links.append( graph.Link( n4a, n5a ) )\n links.append( graph.Link( n4b, n5b ) )\n links.append( graph.Link( n5a, n6a ) )\n links.append( graph.Link( n6a, n7 ) )\n links.append( graph.Link( n5b, n7 ) )\n roots = [n1]\n actual = nodes.explorePath( links, n1, n1 )\n expected = [ n1, n2, n3, n4b, n5b, n7 ]\n self.assertEqual( expected, actual )", "def test_cycle(self):\n g = Graph(3)\n g.add_edge(0, 1)\n g.add_edge(0, 2)\n # g.add_edge(0, 0)\n assert g.contains_cycle() is False\n g.add_edge(1, 2)\n assert g.contains_cycle() is True", "def graph(self):\n ...", "def iter_func(root_name, root, set_traverse, list_funcs, G, strings,\n plot_nodes, cur_pos, xgrain, min_weight, max_weight):\n set_traverse.append(root)\n nbs = G.neighbors(root)\n nbs = G[root]\n\n plot_nodes.append(cur_pos)\n xgrain = xgrain/2.0\n\n flag_pn = -1\n for nb in nbs.keys():\n if nb in set_traverse:\n continue\n\n next_pos = [0, 0, 0]\n if root.name == root_name:\n next_pos[0] = cur_pos[0]\n else:\n next_pos[0] = cur_pos[0] + xgrain*flag_pn*( 0.8+0.2*(nbs[nb][\"weight\"]-1.0/max_weight)/(1.0/min_weight-1.0/max_weight) ) #* (nbs[nb][\"weight\"]-1.0/max_weight)/(1.0/min_weight-1.0/max_weight)\n next_pos[1] = cur_pos[1] + 3.0*(nbs[nb][\"weight\"]-1.0/max_weight)/(1.0/min_weight-1.0/max_weight)\n next_pos[2] = nb.name\n\n flag_pn = flag_pn*(-1)\n\n strings.append([root, nb])\n set_traverse, strings, plot_nodes = iter_func(root_name, nb, set_traverse, list_funcs, G, strings, plot_nodes, next_pos, xgrain, min_weight, max_weight)\n\n return set_traverse, strings, plot_nodes", "def pick_androdioecious_parents(simu, config):\n rng = simu.getRNG()\n runif = rng.randUniform\n rint = rng.randInt\n try:\n sstar = config.sstar\n def compound_generator(pop):\n \"\"\"\n Picks up parent(s) under androdioecy using a compound parameter.\n \"\"\"\n gen = -1\n while True:\n ngen = pop.dvars().gen\n if gen != ngen:\n # At the beginning of a generation, extract the\n # sex-specific subpopulations from a parental\n # population. The sex-specific subpopulations are used\n # throughout mating events in one generation.\n gen = ngen\n males = pop.extractSubPops(subPops=[(0, 0)])\n herms = pop.extractSubPops(subPops=[(0, 1)])\n nmale = males.popSize()\n nherm = herms.popSize()\n\n if runif() < sstar: # uniparental\n yield herms.individual(rint(nherm))\n else: # biparental\n yield [males.individual(rint(nmale)), herms.individual(rint(nherm))]\n return compound_generator\n except KeyError:\n stilde = config.stilde\n tau = config.tau\n def compound_generator(pop):\n \"\"\"\n Picks up parent(s) under androdioecy using fundamental parameters.\n \"\"\"\n gen = -1\n while True:\n ngen = pop.dvars().gen\n if gen != ngen:\n # At the beginning of a generation, extract the\n # sex-specific subpopulations from a parental\n # population. The sex-specific subpopulations are used\n # throughout mating events in one generation.\n gen = ngen\n males = pop.extractSubPops(subPops=[(0, 0)])\n herms = pop.extractSubPops(subPops=[(0, 1)])\n nmale = males.popSize()\n nherm = herms.popSize()\n\n if runif() < stilde: # proportion of self-fertlized egg\n if runif() < tau: # survival rate of a uniparental zygote rel to a biparental z.\n yield herms.individual(rint(nherm))\n else: # biparental\n yield [males.individual(rint(nmale)), herms.individual(rint(nherm))]\n return compound_generator", "def test_12_parents_as_minus_1_0_0_1_1_2_3_4_4_5_7_7_9_12(self):\n parents = [ -1, 0, 0, 1, 1, 2, 3, 4, 4, 5, 7, 7, 9, 12 ]\n self.init(parents)\n self.assertEqual(6, self.method_under_test())", "def crossover(parent1, parent2):\n path1 = random_subtree(parent1, \"\")\n path2 = random_subtree(parent2, \"\")\n parent1 = parent1.copy()\n parent2 = parent2.copy()\n loc1 = parent1\n loc1parent = parent1\n loc2 = parent2\n loc2parent = parent2\n for i in range(len(path1)):\n loc1parent = loc1\n if path1[i] == \"1\":\n loc1 = loc1parent.get_right()\n else:\n loc1 = loc1parent.get_left()\n for i in range(len(path2)):\n loc2parent = loc2\n if path2[i] == \"1\":\n loc2 = loc2parent.get_right()\n else:\n loc2 = loc2parent.get_left()\n if(len(path1)-1 >= 0):\n if path1[len(path1)-1] == \"1\":\n loc1parent.right = loc2\n else:\n loc1parent.left = loc2\n if(len(path2) - 1 >= 0):\n if path2[-1] == \"1\":\n loc2parent.right = loc1\n else:\n loc2parent.left = loc1\n return (parent1, parent2)", "def test_4_parents_as_minus_1_0_1_2(self):\n parents = [ -1, 0, 1, 2 ]\n self.init(parents)\n self.assertEqual(4, self.method_under_test())", "def testBasic1(self):\n nodes = self.G.nodes()\n assert len(nodes) == len( set(nodes) )", "def test_5_parents_as_1_minus_1_1_0_0(self):\n parents = [ 1, -1, 1, 0, 0 ]\n self.init(parents)\n self.assertEqual(3, self.method_under_test())", "def test_3_parents_as_1_minus_1_1(self):\n parents = [ 1, -1, 1 ]\n self.init(parents)\n self.assertEqual(2, self.method_under_test())" ]
[ "0.7205235", "0.70020497", "0.62084246", "0.6038614", "0.5988837", "0.58964515", "0.58667445", "0.58020157", "0.5726468", "0.5667711", "0.5539633", "0.5539633", "0.5504206", "0.54905653", "0.5482558", "0.5460695", "0.54255486", "0.5419617", "0.5398533", "0.5355489", "0.53264356", "0.53245705", "0.5312008", "0.5307634", "0.5303363", "0.53014946", "0.5294228", "0.523673", "0.5232471", "0.5228123", "0.52248925", "0.52232295", "0.5218298", "0.52174497", "0.52111673", "0.52089536", "0.51829195", "0.51745975", "0.51708", "0.51655567", "0.5164763", "0.51581097", "0.51574516", "0.51436824", "0.5132396", "0.5131299", "0.51306385", "0.51233494", "0.5118409", "0.5116303", "0.5108168", "0.510709", "0.5106759", "0.510241", "0.50905424", "0.5089184", "0.50854236", "0.507906", "0.5076508", "0.50747967", "0.50699407", "0.5069547", "0.50572777", "0.5057174", "0.5051708", "0.50352585", "0.50333", "0.50322187", "0.50287956", "0.5022825", "0.5014624", "0.50122243", "0.50086933", "0.5006136", "0.5001657", "0.50005764", "0.49920267", "0.49919513", "0.49910122", "0.49844953", "0.49769056", "0.49724922", "0.4971587", "0.49644488", "0.49549103", "0.4952674", "0.49509016", "0.4941937", "0.49410596", "0.49405313", "0.49402314", "0.49369624", "0.4928777", "0.49261698", "0.49260587", "0.49215972", "0.4920297", "0.4917804", "0.49171856", "0.49147215" ]
0.6087695
3
Test the popxl subgraph in parent in repeat example
def test_documentation_popxl_repeat_2(self): filename = "repeat_graph_2.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_documentation_popxl_basic_subgraph(self):\n filename = \"basic_graph.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_documentation_popxl_create_multi_subgraph(self):\n filename = \"create_multi_graphs_from_same_func.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_dummy3(self):\n xpb = XPathBuilder()\n xp = xpb.dummy()\n self.assertTrue(xp.parenthesize() is xp)", "def test_documentation_popxl_repeat_1(self):\n filename = \"repeat_graph_1.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_documentation_popxl_repeat_0(self):\n filename = \"repeat_graph_0.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def sub_graph_merging(self):", "def test_lacking_parent(self):\n pass", "def crossover(self, pop):\n intDiscID = self.iID + self.dID\n varID = self.cID\n goldenRatio = (1.0 + sqrt(5)) / 2.0\n dx = np.zeros_like(pop[0])\n children = []\n used = []\n for i in range(0, int(self.fracElite * len(pop)), 1):\n r = int(rand() * self.population)\n while r in used or r == i:\n r = int(rand() * self.population)\n\n used.append(i)\n children.append(cp.deepcopy(pop[r]))\n dx = abs(pop[i] - children[i]) / goldenRatio\n children[i] = children[i] + dx * varID + np.round(dx * intDiscID)\n children[i] = simple_bounds(children[i], self.lb, self.ub)\n\n return (\n children, used)", "def test_parent_label(self):\n l = self.d.label(1)\n l2 = self.d.label(31405)\n\n self.assertTrue(l.parent_label is None)\n self.assertTrue(l2 in l.sublabels)\n self.assertEqual(l2.parent_label, l)", "def test_parent_with_iterables(self):\n def makeCubesAndGrp():\n cmds.file(new=1, f=1)\n cubes = []\n for x in range(10):\n cubes.append(pm.polyCube()[0])\n group = pm.group(empty=True)\n return cubes, group\n\n cubes, group = makeCubesAndGrp()\n res1 = pm.parent(cubes[:4], group)\n self.assertEqual(cubes[:4], res1)\n res2 = pm.parent(cubes, group)\n self.assertEqual(cubes[4:], res2)\n\n cubes, group = makeCubesAndGrp()\n res1 = pm.parent(cubes[:4] + [group])\n self.assertEqual(cubes[:4], res1)\n res2 = pm.parent(cubes, group)\n self.assertEqual(cubes[4:], res2)\n\n cubes, group = makeCubesAndGrp()\n res1 = pm.parent(cubes[0], cubes[1], cubes[2], cubes[3], group)\n self.assertEqual(cubes[:4], res1)\n res2 = pm.parent(cubes, group)\n self.assertEqual(cubes[4:], res2)\n\n cubes, group = makeCubesAndGrp()\n res1 = pm.parent(cubes[0], cubes[1], [cubes[2], cubes[3], group])\n self.assertEqual(cubes[:4], res1)\n res2 = pm.parent(cubes, group)\n self.assertEqual(cubes[4:], res2)\n\n cubes, group = makeCubesAndGrp()\n res1 = pm.parent([cubes[0], cubes[1]], cubes[2], [cubes[3], group])\n self.assertEqual(cubes[:4], res1)\n res2 = pm.parent(cubes, group)\n self.assertEqual(cubes[4:], res2)", "def testParent(self):\n self.assertEqual(\n self,\n self.node.parent\n )", "def testParent(self):\n self.assertEqual(\n self,\n self.node.parent\n )", "def testParent(self):\n self.assertEqual(\n self.parent,\n self.mr.parent\n )", "def test_simple(self):\n exp = [{'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'h'},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'p'},\n {'edge_info': '1', 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hp'},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'g'},\n {'edge_info': None, 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hpg'}\n ]\n content = '((h,p)hp:1,g)hpg;'\n self._do_test(content, exp)\n content = '((h,[pretest]p[test][posttest])hp,g)hpg;'\n exp = [{'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'type': NewickEvents.OPEN_SUBTREE, 'comments': []},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'h'},\n {'edge_info': None, 'type': NewickEvents.TIP,\n 'comments': ['pretest', 'test', 'posttest'], 'label': 'p'},\n {'edge_info': None, 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hp'},\n {'edge_info': None, 'type': NewickEvents.TIP, 'comments': [], 'label': 'g'},\n {'edge_info': None, 'type': NewickEvents.CLOSE_SUBTREE,\n 'comments': [], 'label': 'hpg'}\n ]\n self._do_test(content, exp)", "def reproduce(self, popDensity, activeDrugs):\r\n # TODO\r\n\r\n resistAll = True\r\n for drug in activeDrugs:\r\n if (self.resistances[drug]==False):resistAll = False\r\n \r\n\r\n\r\n maxReproduceProb = self.maxBirthProb * (1 - popDensity)\r\n \r\n \r\n if (resistAll and random.random() < maxReproduceProb):\r\n childResistances = {}\r\n for drug in self.resistances:\r\n if random.random() < self.mutProb:\r\n childResistances[drug] = (not self.resistances[drug])\r\n else:childResistances[drug] = (self.resistances[drug])\r\n\r\n \r\n childOfVirus = ResistantVirus(self.maxBirthProb, self.clearProb,childResistances,self.mutProb)\r\n return childOfVirus\r\n \r\n else: raise NoChildException('Child not created!')", "def test_generator8(self):\n xpb = XPathBuilder()\n xp1 = xp2 = None\n base_xp = (xpb.foo.bar | xpb.x.y).parenthesize()\n base_gen = None\n with base_xp as b:\n base_gen = b\n xp1 = b() & xpb.c\n xp2 = b() & xpb.d\n xp1_exp = '(/foo/bar or /x/y) and /c'\n xp2_exp = '(/foo/bar or /x/y) and /d'\n base_exp = '(/foo/bar or /x/y)'\n # check tree structure\n self.assertTrue(base_xp._parent is None)\n self.assertTrue(base_gen._parent is None)\n # check xpath\n self.assertEqual(xp1.tostring(), xp1_exp)\n self.assertEqual(xp2.tostring(), xp2_exp)\n self.assertEqual(base_xp.tostring(), base_exp)\n self.assertEqual(base_gen.tostring(), base_exp)", "def _cross_parents(self):\n while len(self.children_population) < self.children_count:\n parent_1, parent_2 = random.sample(self.population, k=2)\n self.children_population.extend(self.crossover.cross(parent_1, parent_2))", "def test_documentation_popxl_multi_callsites_graph_input(self):\n filename = \"multi_call_graph_input.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def inversion_crossover(self, pop):\n children, tmpNonComb, used = ([] for i in range(3))\n for i in range(0, int(len(pop) * self.fracElite), 1):\n r = int(rand() * len(pop))\n while r == i:\n r = int(rand() * len(pop))\n\n if sum(self.cID + self.dID + self.iID) != 0:\n nonComb1 = pop[i][:np.where(self.cID + self.dID + self.iID == 1)[0][(-1)] + 1]\n nonComb2 = pop[r][:np.where(self.cID + self.dID + self.iID == 1)[0][(-1)] + 1]\n if sum(self.xID) != 0:\n comb1 = pop[i][:np.where(self.xID == 1)[0][(-1)] + 1]\n comb2 = pop[r][:np.where(self.xID == 1)[0][(-1)] + 1]\n if sum(self.cID + self.dID + self.iID) != 0:\n c = int(rand() * len(nonComb1))\n if rand() > 0.5:\n tmpNonComb.append(np.array(nonComb1[0:c + 1].tolist() + nonComb2[c + 1:].tolist()))\n else:\n tmpNonComb.append(np.array(nonComb2[0:c + 1].tolist() + nonComb1[c + 1:].tolist()))\n used.append(i)\n if sum(self.xID) != 0:\n c = int(rand() * len(comb1))\n for c1 in range(c, len(comb1), 1):\n d2 = (contains_sublist(comb2, comb1[c1]) + 1) % len(comb1)\n d1 = contains_sublist(comb1, comb2[d2])\n c2 = contains_sublist(comb2, comb1[((d1 + 1) % len(comb1))]) % len(comb1)\n tmp1 = cp.copy(comb1)\n if c1 < d1:\n tmp1[(c1 + 1):(d1 + 1)] = list(reversed(tmp1[c1 + 1:d1 + 1]))\n else:\n tmp1[d1:c1] = list(reversed(tmp1[d1:c1]))\n tmp2 = cp.copy(comb2)\n if c2 < d2:\n tmp2[c2:d2] = list(reversed(tmp2[c2:d2]))\n else:\n tmp2[(d2 + 1):(c2 + 1)] = list(reversed(tmp2[d2 + 1:c2 + 1]))\n if sum(self.cID + self.dID + self.iID) == 0 and sum(self.xID) != 0:\n children.append(tmp1)\n children.append(tmp2)\n elif sum(self.cID + self.dID + self.iID) != 0 and sum(self.xID) != 0:\n children.append(np.concatenate(tmpNonComb[(-1)], tmp1))\n children.append(np.concatenate(tmpNonComb[(-1)], tmp2))\n used.append(i)\n used.append(r)\n\n if sum(self.cID + self.dID + self.iID) != 0 and sum(self.xID) == 0:\n children = tmpNonComb\n return (\n children, used)", "def mutate(self, pop):\n intDiscID = self.iID + self.dID\n varID = self.cID\n children = []\n k = rand(len(pop), len(pop[0])) > self.fracMutation * rand()\n childn1 = cp.copy(permutation(pop))\n childn2 = cp.copy(permutation(pop))\n r = rand()\n for j in range(0, len(pop), 1):\n n = np.array(childn1[j] - childn2[j])\n stepSize = r * n * varID + (n * intDiscID).astype(int)\n tmp = (pop[j] + stepSize * k[j, :]) * varID + (pop[j] + stepSize * k[j, :]) * intDiscID % (self.ub + 1 - self.lb)\n children.append(simple_bounds(tmp, self.lb, self.ub))\n\n return children", "def test_aggregation(self):\n # Folder must be root to load in make_net properly\n if os.getcwd().split('\\\\')[-1] == 'tests': os.chdir('..')\n \n # Create parents\n cfg = Config()\n cfg.genome.aggregation_default = 'a'\n cfg.genome.aggregation_options = {'a': 1, 'b': 2}\n gene1, gene2 = get_simple_node_gene(0, cfg.genome)\n \n # Ratio of 0.5, so possible to cross to both parents\n p1 = False\n p2 = False\n for _ in range(100):\n gene3 = gene1.crossover(other=gene2, cfg=cfg.genome, ratio=0.5)\n if gene3.aggregation == gene1.aggregation:\n p1 = True\n elif gene3.aggregation == gene2.aggregation:\n p2 = True\n else:\n raise self.failureException(\"Must be mutated to one of parent's values\")\n if p1 and p2: break\n self.assertTrue(p1 and p2)\n \n # Ratio of 1, so always inherits from first parent\n for _ in range(100):\n gene3 = gene1.crossover(other=gene2, cfg=cfg.genome, ratio=1)\n self.assertEqual(gene3.aggregation, gene1.aggregation)\n \n # Ratio of 0, so always inherits from second parent\n for _ in range(100):\n gene3 = gene1.crossover(other=gene2, cfg=cfg.genome, ratio=0)\n self.assertEqual(gene3.aggregation, gene2.aggregation)", "def test_get_child():\n \n root_ts = TrackSegment(flow_dict=flow_dict) \n \n # ROOT MODULE\n start = root_ts.get_child(\"start\")\n # check depth\n assert(start.depth == root_ts.depth+1)\n # check parent\n assert(start.parent is root_ts)\n # check module_id\n assert(start.module_id == 'start')\n \n # CHILD (1,2,3)\n root_ts_get_child_result = root_ts.get_child((1,2,3))\n # check depth\n assert(root_ts_get_child_result.depth == root_ts.depth+1)\n # check parent\n assert(root_ts_get_child_result.parent is root_ts)\n \n # CHILD (2,3,4)\n root_ts_get_child_result2 = root_ts_get_child_result.get_child((2,3,4))\n # check depth\n assert(root_ts_get_child_result2.depth == root_ts.depth+2)\n # check parent \n assert(root_ts_get_child_result2.parent is root_ts_get_child_result)\n \n print(\"TEST GET_CHILD: success!\")", "def test_has_children_property(mock_amg):\n\n # split a cell so we can be sure it should have children\n mock_amg.cells[4].split()\n\n assert mock_amg.cells[4].has_children()\n assert not mock_amg.cells[1].has_children()\n assert not mock_amg.cells[4].children['bl'].has_children()", "def test_aggregation(self):\n # Folder must be root to load in make_net properly\n if os.getcwd().split('\\\\')[-1] == 'tests': os.chdir('..')\n \n # Create parents\n cfg = Config()\n cfg.genome.aggregation_default = 'a'\n cfg.genome.aggregation_options = {'a': 1, 'b': 2}\n gene1, gene2 = get_output_node_gene(0, cfg.genome)\n \n # Ratio of 0.5, so possible to cross to both parents\n p1 = False\n p2 = False\n for _ in range(100):\n gene3 = gene1.crossover(other=gene2, cfg=cfg.genome, ratio=0.5)\n if gene3.aggregation == gene1.aggregation:\n p1 = True\n elif gene3.aggregation == gene2.aggregation:\n p2 = True\n else:\n raise self.failureException(\"Must be mutated to one of parent's values\")\n if p1 and p2: break\n self.assertTrue(p1 and p2)\n \n # Ratio of 1, so always inherits from first parent\n for _ in range(100):\n gene3 = gene1.crossover(other=gene2, cfg=cfg.genome, ratio=1)\n self.assertEqual(gene3.aggregation, gene1.aggregation)\n \n # Ratio of 0, so always inherits from second parent\n for _ in range(100):\n gene3 = gene1.crossover(other=gene2, cfg=cfg.genome, ratio=0)\n self.assertEqual(gene3.aggregation, gene2.aggregation)", "def run_tests(g: Graph) -> None:\n print( g.nodes() , \"->\" , ', '.join([f\"{l}\" for l in g.scc()]) , f\"({g.cyclic()})\" )\n for n in g.nodes():\n for m in [m for m in g.nodes() if m != n]:\n p = g.path(n,m)\n if p is not None:\n assert p[0] == n\n assert p[-1] == m\n for i in range(1,len(p)):\n assert g.is_edge(p[i-1], p[i])\n print(\" \", n, \"->\", m, \":\", ' -> '.join([f\"{v}\" for v in p]))", "def bclone():\n node = nuke.selectedNodes()\n if len(node)==1:\n clone1 = nuke.createNode(\"NoOp\", inpanel = False)\n clone1.setName(\"Bclone\")\n clone1['label'].setValue(node[0].name()+\"\\nClone_Parent\")\n clone1['tile_color'].setValue(2521651711)\n clone1['note_font_color'].setValue(1583243007)\n clone1xpos = clone1['xpos'].getValue()\n clone1ypos = clone1['ypos'].getValue()\n \n clone2 = nuke.createNode(\"NoOp\", inpanel = False)\n clone2.setName(\"Bclone\")\n clone2['label'].setValue(node[0].name()+\"\\nClone\")\n clone2['hide_input'].setValue(True)\n clone2['tile_color'].setValue(2521651711)\n clone2['note_font_color'].setValue(1583243007)\n clone2['xpos'].setValue(clone1xpos)\n clone2['ypos'].setValue(clone1ypos)\n\n if len(node)==0:\n clone1 = nuke.createNode(\"NoOp\", inpanel = False)\n clone1.setName(\"Bclone\")\n clone1['label'].setValue(\"Clone_Parent\")\n clone1['tile_color'].setValue(2521651711)\n clone1['note_font_color'].setValue(1583243007)\n clone1xpos = clone1['xpos'].getValue()\n clone1ypos = clone1['ypos'].getValue()\n \n clone2 = nuke.createNode(\"NoOp\", inpanel = False)\n clone2.setName(\"Bclone\")\n clone2['label'].setValue(\"Clone\")\n clone2['hide_input'].setValue(True)\n clone2['tile_color'].setValue(2521651711)\n clone2['note_font_color'].setValue(1583243007)\n clone2['xpos'].setValue(clone1xpos)\n clone2['ypos'].setValue(clone1ypos)\n if len(node)!=0 and len(node)!=1:\n nuke.message('Just select one node to clone !')", "def show_subgraph(dfs_codes, nsupport, mapper):\n\tglobal __subgraph_count\n\n\tg = build_graph(dfs_codes)\n\tg.id = __subgraph_count\n\t__subgraph_count += 1\n\tg.gprint(nsupport, mapper)", "def parents_loop(self):\r\n while len(self.parents) > 0:\r\n children = 0\r\n self.parent1 = random.choice(self.parents)\r\n index = self.parents.index(self.parent1)\r\n del self.parents[index]\r\n\r\n self.parent2 = random.choice(self.parents)\r\n index = self.parents.index(self.parent2)\r\n del self.parents[index]\r\n\r\n while children < 2:\r\n self.child = copy.deepcopy(self.parent1)\r\n \r\n self.battery_loop()\r\n\r\n childsolution = random_algo.Random(self.child, self.cable_cost, self.battery_cost)\r\n childsolution.change_battery_or_house('change_battery')\r\n childsolution.change_battery_or_house('change_house')\r\n\r\n if (self.child.valid_solution() and self.child not in self.district_population\r\n and self.child not in self.best_districts and self.child not in self.worst_districts):\r\n self.district_population.append(self.child)\r\n self.cost_populations.append(self.child.total_cost(self.battery_cost, self.cable_cost))\r\n children += 1", "def test_restore_multiple_in_subgraph(self):\n subgraph = self._subgraph()\n subgraph['id'] = 15\n task1 = self._remote_task()\n task1['id'] = 1\n task2 = self._remote_task()\n task2['id'] = 2\n task1['parameters']['containing_subgraph'] = 15\n task2['parameters']['containing_subgraph'] = 15\n\n graph = self._restore_graph([subgraph, task1, task2])\n assert len(graph.tasks) == 3\n subgraphs = [op for op in graph.tasks if op.is_subgraph]\n remote_tasks = [op for op in graph.tasks if not op.is_subgraph]\n\n # those are all references to the same subgraph, the subgraph was\n # NOT restored multiple times\n assert remote_tasks[0].containing_subgraph \\\n is remote_tasks[1].containing_subgraph \\\n is subgraphs[0]\n\n assert len(subgraphs[0].tasks) == 2", "def slice_graph_fwd( startea, reg ): \r\n\tgraph = vcg_Graph.vcgGraph({\"title\":'\"Slice for %s\"' % reg, \\\r\n\t\t\"manhattan_edges\":\"no\", \"layoutalgorithm\":\"maxdepth\"})\r\n\t#\r\n\t# Retrieve the name of the current basic block\r\n\t# \r\n\tworklist = []\r\n\tdata_bib = {}\r\n\tstartnode = slice_node( startea, 0, reg )\r\n\trootnode = graph.Add_Node( startnode.to_name() )\r\n\tdata_bib[ startnode.to_name() ] = startnode\r\n\tworklist.insert( 0, rootnode )\r\n\twhile len( worklist ) > 0:\r\n\t\tcurrnode = worklist.pop()\r\n\t\tcurrslice = data_bib[ currnode.get_name() ]\r\n\t\ttgt_reg = currslice.get_target_reg()\r\n\t\tif tgt_reg == \"END\":\r\n\t\t# Do not process this node any further\r\n\t\t\tpass\r\n\t\telif tgt_reg == \"\" or (( len( currslice.get_lines()) > 0) and \\\r\n\t\t\tcurrslice.endea != currslice.get_lines()[-1][0]):\r\n\t\t\t# Nothing much happening here, just proceed to parent bocks\r\n\t\t\tif ua_mnem( currslice.endea ) == \"call\":\r\n\t\t\t\txrefs = get_short_crefs_from( currslice.endea )\r\n\t\t\telse:\r\n\t\t\t\txrefs = get_crefs_from( currslice.endea )\r\n\t\t\tfor ref in xrefs:\r\n\t\t\t\tnewslice = slice_node( ref, 0, currslice.reg )\r\n\t\t\t\tif graph.Get_Node( newslice.to_name() ) == 0:\r\n\t\t\t\t\tnewnode = graph.Add_Node( newslice.to_name() )\r\n\t\t\t\t\tworklist.insert( 0, newnode )\r\n\t\t\t\t\tdata_bib[ newslice.to_name() ] = newslice\r\n\t\t\t\tgraph.Add_Link( currnode.get_name(), newslice.to_name())\r\n\t\telse:\r\n\t\t\t# Register was modified, use new register\r\n\t\t\txrefs = get_crefs_from( currslice.endea )\r\n\t\t\tfor ref in xrefs:\r\n\t\t\t\tnewslice = slice_node( ref, 0, tgt_reg )\r\n\t\t\t\tif graph.Get_Node( newslice.to_name() ) == 0:\r\n\t\t\t\t\tnewnode = graph.Add_Node( newslice.to_name() )\r\n\t\t\t\t\tworklist.insert( 0, newnode )\r\n\t\t\t\t\tdata_bib[ newslice.to_name() ] = newslice\r\n\t\t\t\tgraph.Add_Link( currnode.get_name(), newslice.to_name())\r\n\t\t\txrefs = get_crefs_from( currslice.endea )\r\n\t\t\tfor ref in xrefs:\r\n\t\t\t\tnewslice = slice_node( ref, 0, currslice.reg )\r\n\t\t\t\tif graph.Get_Node( newslice.to_name() ) == 0:\r\n\t\t\t\t\tnewnode = graph.Add_Node( newslice.to_name() )\r\n\t\t\t\t\tworklist.insert( 0, newnode )\r\n\t\t\t\t\tdata_bib[ newslice.to_name() ] = newslice\r\n\t\t\t\tgraph.Add_Link( currnode.get_name(), newslice.to_name())\r\n\treturn [ graph, data_bib ]", "def testParentage(self):\n self.assertEqual(\n self.cd,\n self.media_ref.parent\n )\n\n self.assertEqual(\n self.cd,\n self.cc.parent\n )", "def test_generator7(self):\n xpb = XPathBuilder()\n xp1 = xp2 = None\n base_xp = xpb.foo.bar & xpb.x.y\n base_gen = None\n with base_xp as b:\n base_gen = b\n xp1 = b() | xpb.c\n xp2 = b() | xpb.d\n xp1_exp = '/foo/bar and /x/y or /c'\n xp2_exp = '/foo/bar and /x/y or /d'\n base_exp = '/foo/bar and /x/y'\n # check tree structure\n self.assertTrue(base_xp._parent is None)\n self.assertTrue(base_gen._parent is None)\n # check xpath\n self.assertEqual(xp1.tostring(), xp1_exp)\n self.assertEqual(xp2.tostring(), xp2_exp)\n self.assertEqual(base_xp.tostring(), base_exp)\n self.assertEqual(base_gen.tostring(), base_exp)", "def populate_graph(self):", "def test_dag_preserves_superrep(dimension, conversion):\n qobj = conversion(rand_super_bcsz(dimension))\n assert qobj.superrep == qobj.dag().superrep", "def test__graph_structure():\n assert PES_GRAPH == (\n ('CH2CH2+OH', 'CH2CH+H2O', 'C2H4OH', 'C2H5O', 'CH3CHO+H'),\n (frozenset({0, 1}), frozenset({0, 2}), frozenset({2, 3}),\n frozenset({3, 4}), frozenset({1, 2})))\n assert pgraph.species(PES_GRAPH) == (\n ('CH2CH2+OH', 'CH2CH+H2O', 'C2H4OH', 'C2H5O', 'CH3CHO+H'))\n assert pgraph.channels(PES_GRAPH) == (\n (frozenset({0, 1}), frozenset({0, 2}), frozenset({2, 3}),\n frozenset({3, 4}), frozenset({1, 2})))\n print('\\npes graph')\n print(PES_GRAPH)", "def test_fpy_parent():\n\n data = \"\"\"\n<depletion_chain>\n <nuclide name=\"U235\" reactions=\"1\">\n <reaction type=\"fission\" Q=\"193405400.0\"/>\n <neutron_fission_yields>\n <energies>0.0253</energies>\n <fission_yields energy=\"0.0253\">\n <products>Te134 Zr100 Xe138</products>\n <data>0.062155 0.0497641 0.0481413</data>\n </fission_yields>\n </neutron_fission_yields>\n </nuclide>\n <nuclide name=\"U238\" reactions=\"1\">\n <reaction type=\"fission\" Q=\"200.0e6\"/>\n <neutron_fission_yields parent=\"U235\"/>\n </nuclide>\n</depletion_chain>\n \"\"\"\n\n root = ET.fromstring(data)\n elems = root.findall('nuclide')\n u235 = nuclide.Nuclide.from_xml(elems[0], root)\n u238 = nuclide.Nuclide.from_xml(elems[1], root)\n\n # Make sure U238 yield is same as U235\n assert np.array_equal(u238.yield_data.energies, u235.yield_data.energies)\n assert np.array_equal(u238.yield_data.yield_matrix, u235.yield_data.yield_matrix)\n\n # Make sure XML element created has single attribute\n elem = u238.to_xml_element()\n fpy_elem = elem.find('neutron_fission_yields')\n assert fpy_elem.get('parent') == 'U235'\n assert len(fpy_elem) == 0\n\n data = \"\"\"\n<depletion_chain>\n <nuclide name=\"U235\" reactions=\"1\">\n <reaction type=\"fission\" Q=\"193405400.0\"/>\n </nuclide>\n <nuclide name=\"U238\" reactions=\"1\">\n <reaction type=\"fission\" Q=\"200.0e6\"/>\n <neutron_fission_yields parent=\"U235\"/>\n </nuclide>\n</depletion_chain>\n \"\"\"\n\n # U235 yields are missing, so we should get an exception\n root = ET.fromstring(data)\n elems = root.findall('nuclide')\n with pytest.raises(ValueError, match=\"yields\"):\n u238 = nuclide.Nuclide.from_xml(elems[1], root)", "def test_skirmish_parenting(self):\n root = SkirmishAction()\n a1 = SkirmishAction()\n a2 = SkirmishAction()\n self.sess.add_all([root, a1, a2])\n self.sess.commit()\n\n root.children.append(a1)\n root.children.append(a2)\n self.sess.commit()\n\n self.assertEqual(a1.parent_id, root.id)\n self.assertEqual(a2.parent_id, root.id)", "def test_generator6(self):\n xpb = XPathBuilder()\n xp1 = xp2 = None\n base_xp = xpb.base.foo.bar\n base_gen = None\n with base_xp as b:\n base_gen = b\n xp1 = xpb.a.b.c.join(b())\n xp2 = xpb.test.join(b())\n xp1_exp = '/a/b/c/base/foo/bar'\n xp2_exp = '/test/base/foo/bar'\n base_exp = '/base/foo/bar'\n # check tree structure\n self.assertTrue(base_xp._parent is None)\n self.assertTrue(len(base_xp._children[0]._children[0]._children) == 0)\n self.assertTrue(base_gen._parent is None)\n self.assertTrue(len(base_gen._children) == 0)\n # check xpath\n self.assertEqual(xp1.tostring(), xp1_exp)\n self.assertEqual(xp2.tostring(), xp2_exp)\n self.assertEqual(base_xp.tostring(), base_exp)\n self.assertEqual(base_gen.tostring(), base_exp)", "def test_grandchildren():\n\n # note c.upto(\"status\").desired.grandchildren\n # this is the same as *c.upto(\"status\").desired in python3.5+\n res = conf.status.conditions.choose(lambda c: (c.type, c.reason, c.upto(\"status\").desired.grandchildren))\n assert \"type\" in res\n assert \"reason\" in res\n assert \"version\" in res\n assert \"image\" in res\n assert \"force\" in res", "def subplot_1(self, Graph, n_tabs):\n # The code below walks does a pre-order traversal of the tree\n # For exact details about the structure of self.Graph refer description in init function.\n\n attr_name = list(Graph.keys())[0]\n print(\"\\t\"*(n_tabs),\"feature name :\",attr_name)\n for val in list(Graph[attr_name].keys()):\n print(\"\\t\"*(n_tabs+1),\"feature value :\",val)\n sub_graph = Graph[attr_name][val]\n if (type(sub_graph)==dict):\n self.subplot_1(sub_graph, n_tabs+2)\n else:\n print(\"\\t\"*(n_tabs+2),\"class :\", sub_graph)", "def hasSiblings():", "def test_get_related_nodes(self):\n pass", "def test_createSubLinkographWithoutCommands(self):\n self.performTestForParams()", "def crossover (self, p1, p2, p_pop, c1, c2, c_pop) :\n assert self.crossover_count < self.pop_size\n assert self.get_iteration () == self.last_gen\n self.parents.append (p1)\n self.parents.append (p2)\n self.crossover_count += 2\n if self.crossover_count == self.pop_size :\n assert (self.get_iteration () == self.last_gen)\n print (self.get_iteration ())\n sys.stdout.flush ()\n self.build_model (p_pop)\n self.sample_model (c1, c2, c_pop)\n self.crossover_count = 0\n self.parents = []\n self.children = {}\n self.last_gen += 1\n self.clear_cache ()", "def test_Tree():", "def reproduce(self, popDensity, activeDrugs):\n\n # Checks the resistance of the mother virus to all drugs in activeDrugs list\n # if mother virus is resistant to all the drugs, reproduction will proceed\n for drug in activeDrugs:\n if not self.isResistantTo(drug):\n raise NoChildException()\n\n maxReproduceProb = self.maxBirthProb * (1 - popDensity)\n\n if random.random() < maxReproduceProb:\n resistance_trait = {}\n # Calculate the transfer of resistances property to child virus\n for drug in self.resistances.keys():\n if random.random() <= (1 - self.mutProb):\n resistance_trait[drug] = self.resistances[drug]\n else:\n resistance_trait[drug] = not self.resistances[drug]\n\n childOfVirus = ResistantVirus(self.maxBirthProb, self.clearProb, resistance_trait, self.mutProb)\n return childOfVirus\n else:\n raise NoChildException()", "def dump_subgraph_for_debug(self):\n\n import pypipegraph2 as ppg\n\n nodes = []\n seen = set()\n edges = []\n counter = [0]\n node_to_counters = {}\n\n def descend(node):\n if node in seen:\n return\n seen.add(node)\n j = self.runner.jobs[node]\n if isinstance(j, ppg.FileInvariant):\n nodes.append(f\"Path('{counter[0]}').write_text('A')\")\n nodes.append(f\"job_{counter[0]} = ppg.FileInvariant('{counter[0]}')\")\n elif isinstance(j, ppg.ParameterInvariant):\n nodes.append(\n f\"job_{counter[0]} = ppg.ParameterInvariant('{counter[0]}', 55)\"\n )\n elif isinstance(j, ppg.FunctionInvariant):\n nodes.append(\n f\"job_{counter[0]} = ppg.FunctionInvariant('{counter[0]}', lambda: 55)\"\n )\n elif isinstance(j, ppg.SharedMultiFileGeneratingJob):\n nodes.append(\n f\"job_{counter[0]} = ppg.SharedMultiFileGeneratingJob('{counter[0]}', {[x.name for x in j.files]!r}, dummy_smfg, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.TempFileGeneratingJob):\n nodes.append(\n f\"job_{counter[0]} = ppg.TempFileGeneratingJob('{counter[0]}', dummy_fg, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.FileGeneratingJob):\n nodes.append(\n f\"job_{counter[0]} = ppg.FileGeneratingJob('{counter[0]}', dummy_fg, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.MultiTempFileGeneratingJob):\n files = [counter[0] + \"/\" + x.name for x in j.files]\n nodes.append(\n f\"job_{counter[0]} = ppg.MultiTempFileGeneratingJob({files!r}, dummy_mfg, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.MultiFileGeneratingJob):\n files = [str(counter[0]) + \"/\" + x.name for x in j.files]\n nodes.append(\n f\"job_{counter[0]} = ppg.MultiFileGeneratingJob({files!r}, dummy_mfg, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.DataLoadingJob):\n nodes.append(\n f\"job_{counter[0]} = ppg.DataLoadingJob('{counter[0]}', lambda: None, depend_on_function=False)\"\n )\n elif isinstance(j, ppg.AttributeLoadingJob):\n nodes.append(\n f\"job_{counter[0]} = ppg.AttributeLoadingJob('{counter[0]}', DummyObject(), 'attr_{counter[0]}', lambda: None, depend_on_function=False)\"\n )\n else:\n raise ValueError(j)\n node_to_counters[node] = counter[0]\n counter[0] += 1\n for parent in self.runner.dag.predecessors(node):\n descend(parent)\n\n def build_edges(node):\n for parent in self.runner.dag.predecessors(node):\n edges.append(\n f\"edges.append(('{node_to_counters[node]}', '{node_to_counters[parent]}'))\"\n )\n build_edges(parent)\n\n descend(self.job_id)\n edges.append(\"edges = []\")\n build_edges(self.job_id)\n edges.extend(\n [\n \"for (a,b) in edges:\",\n \" if a in ppg.global_pipegraph.jobs and b in ppg.global_pipegraph.jobs:\",\n \" ppg.global_pipegraph.jobs[a].depends_on(ppg.global_pipegraph.jobs[b])\",\n ]\n )\n with open(\"subgraph_debug.py\", \"w\") as op:\n lines = \"\"\"\nclass DummyObject:\n pass\n\ndef dummy_smfg(files, prefix):\n Path(prefix).mkdir(exist_ok=True, parents=True)\n for f in files:\n f.write_text(\"hello\")\n\n\ndef dummy_mfg(files):\n for f in files:\n f.parent.mkdir(exist_ok=True, parents=True)\n f.write_text(\"hello\")\n\ndef dummy_fg(of):\n of.parent.mkdir(exist_ok=True, parents=True)\n of.write_text(\"fg\")\n\n\"\"\".split(\n \"\\n\"\n )\n lines += nodes\n lines += edges\n lines += [\"\", \"ppg.run()\", \"ppg.run\"]\n\n op.write(\"\\n\".join(\" \" + l for l in lines))", "def test_tree_mode4(self):\n xpb = XPathBuilder()\n xp_1 = xpb.foo\n xp_2 = xpb.baz\n xp_and = xp_1 & xp_2\n self.assertTrue(xp_and._parent is None)\n self.assertTrue(len(xp_and._children) == 2)\n self.assertTrue(xp_and._children[0] is xp_1)\n self.assertTrue(xp_and._children[1] is xp_2)\n self.assertTrue(xp_1._parent is xp_and)\n self.assertTrue(len(xp_1._children) == 0)\n self.assertTrue(xp_2._parent is xp_and)\n self.assertTrue(len(xp_2._children) == 0)\n xp_2.reparent(None)\n # check references after remove\n self.assertTrue(xp_and._parent is None)\n self.assertTrue(len(xp_and._children) == 1)\n self.assertTrue(xp_and._children[0] is xp_1)\n self.assertTrue(xp_1._parent is xp_and)\n self.assertTrue(len(xp_1._children) == 0)\n # xp_2's references were changed\n self.assertTrue(xp_2._parent is None)\n self.assertTrue(len(xp_2._children) == 0)", "def sample_from_subpop(instance, params, subpop):\n y = subpop\n x = np.random.choice([-1,+1], size=params['d'])\n x[instance['indices'][subpop]] = instance['values'][subpop]\n return x, y, subpop", "def __init__(self, firstParent, secondParent):\n CrossOver.__init__(self, \"Group Point CrossOver\", firstParent, secondParent)", "def setUp(self):\n self.complete = nx.Graph()\n self.complete.add_edge(1, 2)\n self.complete.add_edge(2, 3)\n self.complete.add_edge(1, 3)\n\n self.small_tree = nx.Graph()\n self.small_tree.add_edge(1, 2)\n self.small_tree.add_edge(2, 3)\n self.small_tree.add_edge(3, 4)\n self.small_tree.add_edge(1, 4)\n self.small_tree.add_edge(2, 4)\n self.small_tree.add_edge(4, 5)\n self.small_tree.add_edge(5, 6)\n self.small_tree.add_edge(5, 7)\n self.small_tree.add_edge(6, 7)\n\n self.deterministic_graph = nx.Graph()\n self.deterministic_graph.add_edge(1, 2)\n self.deterministic_graph.add_edge(1, 3)\n self.deterministic_graph.add_edge(3, 4)\n self.deterministic_graph.add_edge(2, 4)\n self.deterministic_graph.add_edge(3, 5)\n self.deterministic_graph.add_edge(4, 5)\n self.deterministic_graph.add_edge(3, 6)\n self.deterministic_graph.add_edge(5, 6)", "def test_1():\n for _ in range(100):\n G = NetworkTopo()\n flows = MulticastFlows(G, 10, 40)\n\n spt = ShortestPathTree(G, flows)\n\n for T in spt.multicast_trees:\n assert len(nx.cycle_basis(T)) == 0", "def reproduce(self, popDensity):\r\n\r\n # Does the virus reproduce? \r\n maxReproduceProb = self.maxBirthProb * (1 - popDensity)\r\n \r\n if random.random() < maxReproduceProb:\r\n childOfVirus = SimpleVirus(self.maxBirthProb, self.clearProb)\r\n return childOfVirus\r\n \r\n else: raise NoChildException('Child not created!')", "def create_subbasin_graph():\n subbasin_to_downstream = pd.read_csv(module_dir + '/../data/simulations_shervan/test.rvh', sep='\\s+', skiprows=7, nrows=724, names=['subbasin', 'downstream_subbasin'], usecols=[1,2])\n subbasin_to_downstream['subbasin'] = subbasin_to_downstream['subbasin']\n subbasin_to_downstream['downstream_subbasin'] = 'sub' + subbasin_to_downstream['downstream_subbasin'].astype(str)\n subbasin_to_downstream['edge'] = 1\n\n for subbasin in subbasin_to_downstream['subbasin'].unique():\n is_sink = 1 if len(subbasin_to_downstream[(subbasin_to_downstream['subbasin'] == subbasin) & subbasin_to_downstream['edge'] == 1]) == 0 else 0\n subbasin_to_downstream = subbasin_to_downstream.append({'subbasin': subbasin, 'downstream_subbasin': subbasin, 'edge': is_sink}, ignore_index=True)\n subbasin_to_downstream = subbasin_to_downstream.append({'subbasin': 'sub-1', 'downstream_subbasin': 'sub-1', 'edge': 1}, ignore_index=True)\n \n adj = subbasin_to_downstream.pivot(index='subbasin', columns='downstream_subbasin', values='edge').fillna(0) \n adj = adj.sort_index(axis=0).sort_index(axis=1)\n \n G = nx.from_numpy_matrix(adj.values, parallel_edges=False, create_using=nx.DiGraph())\n label_mapping = dict(zip(range(len(adj.values)), adj.index))\n G = nx.relabel_nodes(G, label_mapping)\n \n return G", "def test_input_not_subgraph(self, dim):\r\n with pytest.raises(ValueError, match=\"Input is not a valid subgraph\"):\r\n clique.grow([dim + 1], nx.empty_graph(dim))", "def crossover(parent: list):\n ### GENERAL METHOD\n child1 = []\n child2 = []\n child = []\n geneA = int(random.random() * len(parent[0]))\n geneB = int(random.random() * len(parent[0]))\n startgene = min(geneA, geneB)\n endgene = max(geneA, geneB)\n for i in range(startgene, endgene):\n child1.append(parent[0][i])\n child2 = [item for item in parent[1] if item not in child1]\n child = child2[:startgene] + child1 + child2[startgene:]\n return child", "def test_build_graph(self):\n insert_good_data()\n dataframe = get_dataframe()\n results = processing.build_graph(dataframe, figure_path, False)\n # 1\n self.assertEqual(results, \"Updated html File and Opened it\")", "def test_10_parents_as_minus_1_0_0_1_2_3_3_4_6_6(self):\n parents = [ -1, 0, 0, 1, 2, 3, 3, 4, 6, 6 ]\n self.init(parents)\n self.assertEqual(5, self.method_under_test())", "def reproduce(self,prob=None):\n for parent in self.agents:\n if parent.fledged(prob):\n fledgling = Agent()\n self._fledgling_move(fledgling,parent.hex)\n self.agents.append(fledgling)\n self.pop_size += 1", "def genNextPop(prevPop, masterList, popSize):\n parSize = int(popSize / 10) #top 10%\n parentPop = [Team(prevPop[i].roster) for i in range(parSize)]\n parentPop = getStats(parentPop, masterList) # inefficent\n rosterSize = len(parentPop[0].roster)\n newPop = []\n #parentsList = [] #debug\n for i in range(popSize):\n chromosome = doCrossover(parentPop, parSize, rosterSize)\n newPop.append(Team(chromosome))\n getStats(newPop, masterList)\n #showStats(newPop, masterList, \"all\")\n #debug code\n #for playerInd in chromosome:\n # print(masterList[playerInd].pos)\n\n return newPop", "def getChildren():", "def test_restore_with_subgraph(self):\n subgraph = self._subgraph()\n task = self._remote_task()\n subgraph['id'] = 15\n task['parameters']['containing_subgraph'] = 15\n\n graph = self._restore_graph([subgraph, task])\n assert len(graph.tasks) == 2\n subgraphs = [op for op in graph.tasks if op.is_subgraph]\n remote_tasks = [op for op in graph.tasks if not op.is_subgraph]\n\n assert len(subgraphs) == 1\n assert len(remote_tasks) == 1\n\n assert len(subgraphs[0].tasks) == 1\n assert remote_tasks[0].containing_subgraph is subgraphs[0]", "def test_removes_empty_subgraph(self):\n ctx = MockWorkflowContext()\n g = TaskDependencyGraph(ctx)\n\n # sg1 is just empty, no tasks inside it\n sg1 = g.subgraph(ctx)\n # sg2 contains only a NOPTask\n sg2 = g.subgraph(ctx)\n sg2.add_task(tasks.NOPLocalWorkflowTask(ctx))\n\n # sg3 contains sg4, which is empty behcause it only contains a NOPTask\n sg3 = g.subgraph(ctx)\n sg4 = g.subgraph(ctx)\n sg4.add_task(tasks.NOPLocalWorkflowTask(ctx))\n sg3.add_task(sg4)\n\n # sg5 is a subgraph that contains a real task! it is not removed\n sg5 = g.subgraph(ctx)\n real_task = tasks.WorkflowTask(ctx)\n sg5.add_task(real_task)\n\n assert set(g.tasks) > {sg1, sg2, sg3, sg4, sg5, real_task}\n g.optimize()\n assert set(g.tasks) == {sg5, real_task}", "def test_dot(self):\n graph = graphviz.Graph(comment='The Round Table')\n graph.node('A', 'King Arthur')\n graph.node('B', 'Sir Bedevere the Wise')\n graph.edges(['AB'])\n\n st.graphviz_chart(graph)\n\n c = self.get_delta_from_queue().new_element.graphviz_chart\n self.assertEqual(hasattr(c, 'spec'), True)", "def test_pod_valid_parent(self):\n session = self.login_to_apic()\n parent = PhysicalModel()\n pod = Pod.get(session, parent)\n children = parent.get_children()\n self.assertEqual(pod, children)", "def test_ExplorePath_Simple( self ):\n links = []\n n1 = graph.Node( 10, 50 )\n n2 = graph.Node( 10, 50 )\n n3 = graph.Node( 10, 50 )\n n7 = graph.Node( 10, 50 )\n\n links.append( graph.Link( n1, n2 ) )\n links.append( graph.Link( n2, n3 ) )\n links.append( graph.Link( n3, n7 ) )\n roots = [n1]\n actual = nodes.explorePath( links, roots, n1 )\n expected = [ n1, n2, n3, n7 ]\n self.assertEqual( expected, actual )", "def test_dummy4(self):\n xpb = XPathBuilder()\n xp = xpb.dummy()\n self.assertTrue(xp.log_not() is xp)", "def test_child_link(self):\n def compare_func(obj, node):\n child_nodes = self.get_children_of_node(node)\n\n for (child_obj, child_node) in zip(obj.children, child_nodes):\n # parent-child link (children must have obj as their parent)\n self.assertEqual(child_obj.parent, obj)\n\n self.recursively_compare_tree_against_html(compare_func)", "def _subgraph_isomorphism_matcher(digraph, nxpattern, node_pred, edge_pred):\n graph_matcher = iso.DiGraphMatcher(digraph, nxpattern, node_match=node_pred, edge_match=edge_pred)\n yield from graph_matcher.subgraph_isomorphisms_iter()", "def sub_graph_merging(self):\n raise NotImplementedError()", "def three_opt(self, pop):\n children, used = ([] for i in range(2))\n for i in range(0, self.population, 1):\n tmp = []\n breaks = np.sort(rand(3) * len(pop[i]) // 1)\n while breaks[1] == breaks[0] or breaks[1] == breaks[2]:\n breaks[1] = rand() * len(pop[i]) // 1\n breaks = np.sort(breaks)\n\n while breaks[2] == breaks[0]:\n breaks[2] = rand() * len(pop[i]) // 1\n\n breaks = np.sort(breaks)\n tmp[0:(int(breaks[0]))] = pop[i][0:int(breaks[0])]\n tmp[(len(tmp)):(int(len(tmp) + breaks[2] - breaks[1]))] = pop[i][int(breaks[1]):int(breaks[2])]\n tmp[(len(tmp)):(int(len(tmp) + breaks[1] - breaks[0]))] = pop[i][int(breaks[0]):int(breaks[1])]\n tmp[(len(tmp)):(int(len(tmp) + breaks[2] - len(pop[i])))] = pop[i][int(breaks[2]):len(pop[i])]\n children.append(tmp)\n used.append(i)\n tmp = []\n tmp[0:(int(breaks[0]))] = pop[i][0:int(breaks[0])]\n tmp[(len(tmp)):(int(len(tmp) + breaks[1] - breaks[0]))] = list(reversed(pop[i][int(breaks[0]):int(breaks[1])]))\n tmp[(len(tmp)):(int(len(tmp) + breaks[2] - breaks[1]))] = reversed(pop[i][int(breaks[1]):int(breaks[2])])\n tmp[(len(tmp)):(int(len(tmp) + breaks[2] - len(pop[i])))] = pop[i][int(breaks[2]):len(pop[i])]\n children.append(tmp)\n used.append(i)\n\n return (\n children, used)", "def testSetParent(self):\n for child in self.color_corrections + self.color_decisions:\n self.assertEqual(\n None,\n child.parent\n )\n\n self.node.append_children(\n self.color_corrections + self.color_decisions\n )\n\n for child in self.node.all_children:\n self.assertEqual(\n self.node,\n child.parent\n )\n child.parent = 'banana'\n self.assertEqual(\n 'banana',\n child.parent\n )\n\n self.node.set_parentage()\n\n for child in self.node.all_children:\n self.assertEqual(\n self.node,\n child.parent\n )", "def test_get_dependencies_subgraph_by_dfs(\n self, source_node, expected_nodes_in, expected_nodes_out\n ):\n graph = nx.DiGraph()\n graph.add_node(\"pack1\")\n graph.add_node(\"pack2\")\n graph.add_node(\"pack3\")\n graph.add_node(\"pack4\")\n graph.add_edge(\"pack1\", \"pack2\")\n graph.add_edge(\"pack2\", \"pack3\")\n dfs_graph = PackDependencies.get_dependencies_subgraph_by_dfs(\n graph, source_node\n )\n for i in expected_nodes_in:\n assert i in dfs_graph.nodes()\n for i in expected_nodes_out:\n assert i not in dfs_graph.nodes()", "def test_case2(self):\n\n graph = BipartiteGraph()\n\n graph.addEdge(\"supervisor1\",\"student1\")\n graph.addEdge(\"supervisor2\",\"student4\")\n graph.addEdge(\"supervisor3\",\"student3\")\n\n val1 = graph.getSupervisorDegree(\"supervisor1\")\n\n graph.addEdge(\"supervisor1\",\"student2\")\n\n curr = graph.getSupervisorDegree(\"supervisor1\")\n val2 = graph.getSupervisors(\"student2\")\n expected2 = [\"supervisor1\"]\n\n self.assertEqual((curr-1,expected2),(val1,val2))", "def test_12_parents_as_minus_1_0_0_1_1_2_3_4_4_5_7_7(self):\n parents = [ -1, 0, 0, 1, 1, 2, 3, 4, 4, 5, 7, 7 ]\n self.init(parents)\n self.assertEqual(5, self.method_under_test())", "def test_node_info_popup(self):\n def test_popup(node):\n node.details.click()\n with NodeInfo() as details:\n self.assertEqual(\n node.name.text, details.header.text,\n 'Node name')\n details.close.click()\n details.wait_until_exists()\n\n with Nodes()as n:\n test_popup(n.nodes_discovered[0])\n test_popup(n.nodes_offline[0])\n test_popup(n.nodes_error[0])", "def test_input_valid_subgraph(self, dim):\r\n with pytest.raises(ValueError, match=\"Input is not a valid subgraph\"):\r\n clique.swap([0, dim], nx.empty_graph(dim))", "def test_spec(self):\n graph = graphviz.Graph(comment='The Round Table')\n graph.node('A', 'King Arthur')\n graph.node('B', 'Sir Bedevere the Wise')\n graph.edges(['AB'])\n\n st.graphviz_chart(graph)\n\n c = self.get_delta_from_queue().new_element.graphviz_chart\n self.assertEqual(hasattr(c, 'spec'), True)", "def test_change_parent_location(self):\n pass", "def test_4_parents_as_1_minus_1_1_0(self):\n parents = [ 1, -1, 1, 0 ]\n self.init(parents)\n self.assertEqual(3, self.method_under_test())", "def test_6_parents_as_minus_1_0_1_2_3_4(self):\n parents = [ -1, 0, 1, 2, 3, 4 ]\n self.init(parents)\n self.assertEqual(6, self.method_under_test())", "def test_7_parents_as_minus_1_0_0_1_2_3_4(self):\n parents = [ -1, 0, 0, 1, 2, 3, 4 ]\n self.init(parents)\n self.assertEqual(4, self.method_under_test())", "def test_generator5(self):\n xpb = XPathBuilder()\n xp = None\n base_xp = xpb.base.foo.bar\n base_gen = None\n with base_xp as b:\n base_gen = b\n xp = b().join(xpb.a.b.c[3])\n exp = '/base/foo/bar/a/b/c[3]'\n base_exp = '/base/foo/bar'\n # check tree structure\n self.assertTrue(base_xp._parent is None)\n self.assertTrue(len(base_xp._children[0]._children[0]._children) == 0)\n self.assertTrue(base_gen._parent is None)\n self.assertTrue(len(base_gen._children) == 0)\n # check xpath\n self.assertEqual(xp.tostring(), exp)\n self.assertEqual(base_xp.tostring(), base_exp)\n self.assertEqual(base_gen.tostring(), base_exp)", "def playout(self, state):\n node = self.root\n isTerminal = False\n depth = 0\n\n while not isTerminal and depth < self.playout_depth:\n #A = len(node.children) # num_children\n A = len(node.children[0]) + len(node.children[1]) \n if A < self.num_initActions:\n #if len(node.children[0]) < self.num_initActions:\n node, init_action_xy, init_spin = self.initChildren(node, state, depth)\n _, ShotVec = CreateShot(_ShotPos(init_action_xy[0], init_action_xy[1], init_spin))\n success, ResShot = Simulation(state, ShotVec, Config.RAND, -1)\n isTerminal = (state.ShotNum==0)\n \n depth += 1\n break\n \n n_a = [c.n_visits for c in node.children[0].values()] + [c.n_visits for c in node.children[1].values()]\n # progressive widening\n # if chilren node has been visited much times then expand\n #if np.sqrt(sum(n_a)) >= A:\n if sum(n_a) >= 10 * A: \n # expand\n node, expanded_action_xy, expanded_spin = self.expand(node)\n _, ShotVec = CreateShot(_ShotPos(expanded_action_xy[0],expanded_action_xy[1], expanded_spin))\n success, ResShot = Simulation(state, ShotVec, Config.RAND, -1)\n isTerminal = (state.ShotNum==0) # one end game\n \n depth += 1\n break\n\n # select\n node, selected_action_xy, selected_spin = self.ucb_select(node)\n _, ShotVec = CreateShot(_ShotPos(selected_action_xy[0], selected_action_xy[1], selected_spin))\n success, ResShot = Simulation(state, ShotVec, Config.RAND, -1)\n isTerminal = (state.ShotNum==0) # one end game\n \n depth += 1\n\n if isTerminal:\n break\n\n if not isTerminal and depth < self.playout_depth:\n # save the rollout_state for speed.\n #if node.rollout_state is None:\n state = self.rollOut(node, state, depth)\n #node.rollout_state = state\n #else:\n # state = node.rollout_state\n \n self.update(node, state)", "def test_same_node_is_reachable(self):\n # G is an arbitrary tournament on ten nodes.\n G = DiGraph(sorted(p) for p in combinations(range(10), 2))\n assert_true(all(is_reachable(G, v, v) for v in G))", "def test_wp_association_bp(self):\n test_graph = wikipathways_to_bel(WP2359, self.hgnc_manager)\n\n self.assertEqual(type(test_graph), BELGraph, msg='Error with graph type')\n\n self.assertEqual(test_graph.summary_dict()['Number of Nodes'], 2)\n self.assertEqual(test_graph.summary_dict()['Number of Edges'], 1)\n self.assertEqual(count_relations(test_graph)['regulates'], 1)", "def test_set_node_second_level_component_with_first_level_parent(self):\n payload = {\n 'data': [\n {\n 'type': 'nodes',\n 'id': self.first_level_component._id\n },\n {\n 'type': 'nodes',\n 'id': self.second_level_component._id\n }\n ]\n }\n res = self.app.post_json_api(self.url, payload, auth=self.user.auth)\n self.view_only_link.reload()\n assert_equal(res.status_code, 201)\n assert_in(self.first_level_component, self.view_only_link.nodes.all())\n assert_in(self.second_level_component, self.view_only_link.nodes.all())", "def test_n_path_reactions(self):\n self.assertEqual(self.Npath, 3)", "def get_subgraphs(graph):\n nodes_powerset = get_nodes_combinations(graph)\n #print(\"Doing\")\n #draw_graph(graph)\n subgraphs = []\n for nodes in nodes_powerset:\n subg = graph.subgraph(nodes)\n nodes = subg.nodes(data=True)\n if nx.is_weakly_connected(subg):\n subgraphs.append(subg)\n return subgraphs", "def test_ExplorePath( self ):\n links = []\n n1 = graph.Node( 10, 10 )\n n2 = graph.Node( 10, 20 )\n n3 = graph.Node( 10, 30 )\n n4a = graph.Node( 5, 40 )\n n4b = graph.Node( 15, 40 )\n n5a = graph.Node( 5, 50 )\n n5b = graph.Node( 15, 50 )\n n6a = graph.Node( 5, 60 )\n n6b = graph.Node( 15, 60 )\n n7 = graph.Node( 10, 70 )\n\n links.append( graph.Link( n1, n2 ) )\n links.append( graph.Link( n2, n3 ) )\n links.append( graph.Link( n3, n4a ) )\n links.append( graph.Link( n3, n4b ) )\n links.append( graph.Link( n4a, n5a ) )\n links.append( graph.Link( n4b, n5b ) )\n links.append( graph.Link( n5a, n6a ) )\n links.append( graph.Link( n6a, n7 ) )\n links.append( graph.Link( n5b, n7 ) )\n roots = [n1]\n actual = nodes.explorePath( links, n1, n1 )\n expected = [ n1, n2, n3, n4b, n5b, n7 ]\n self.assertEqual( expected, actual )", "def test_cycle(self):\n g = Graph(3)\n g.add_edge(0, 1)\n g.add_edge(0, 2)\n # g.add_edge(0, 0)\n assert g.contains_cycle() is False\n g.add_edge(1, 2)\n assert g.contains_cycle() is True", "def graph(self):\n ...", "def iter_func(root_name, root, set_traverse, list_funcs, G, strings,\n plot_nodes, cur_pos, xgrain, min_weight, max_weight):\n set_traverse.append(root)\n nbs = G.neighbors(root)\n nbs = G[root]\n\n plot_nodes.append(cur_pos)\n xgrain = xgrain/2.0\n\n flag_pn = -1\n for nb in nbs.keys():\n if nb in set_traverse:\n continue\n\n next_pos = [0, 0, 0]\n if root.name == root_name:\n next_pos[0] = cur_pos[0]\n else:\n next_pos[0] = cur_pos[0] + xgrain*flag_pn*( 0.8+0.2*(nbs[nb][\"weight\"]-1.0/max_weight)/(1.0/min_weight-1.0/max_weight) ) #* (nbs[nb][\"weight\"]-1.0/max_weight)/(1.0/min_weight-1.0/max_weight)\n next_pos[1] = cur_pos[1] + 3.0*(nbs[nb][\"weight\"]-1.0/max_weight)/(1.0/min_weight-1.0/max_weight)\n next_pos[2] = nb.name\n\n flag_pn = flag_pn*(-1)\n\n strings.append([root, nb])\n set_traverse, strings, plot_nodes = iter_func(root_name, nb, set_traverse, list_funcs, G, strings, plot_nodes, next_pos, xgrain, min_weight, max_weight)\n\n return set_traverse, strings, plot_nodes", "def pick_androdioecious_parents(simu, config):\n rng = simu.getRNG()\n runif = rng.randUniform\n rint = rng.randInt\n try:\n sstar = config.sstar\n def compound_generator(pop):\n \"\"\"\n Picks up parent(s) under androdioecy using a compound parameter.\n \"\"\"\n gen = -1\n while True:\n ngen = pop.dvars().gen\n if gen != ngen:\n # At the beginning of a generation, extract the\n # sex-specific subpopulations from a parental\n # population. The sex-specific subpopulations are used\n # throughout mating events in one generation.\n gen = ngen\n males = pop.extractSubPops(subPops=[(0, 0)])\n herms = pop.extractSubPops(subPops=[(0, 1)])\n nmale = males.popSize()\n nherm = herms.popSize()\n\n if runif() < sstar: # uniparental\n yield herms.individual(rint(nherm))\n else: # biparental\n yield [males.individual(rint(nmale)), herms.individual(rint(nherm))]\n return compound_generator\n except KeyError:\n stilde = config.stilde\n tau = config.tau\n def compound_generator(pop):\n \"\"\"\n Picks up parent(s) under androdioecy using fundamental parameters.\n \"\"\"\n gen = -1\n while True:\n ngen = pop.dvars().gen\n if gen != ngen:\n # At the beginning of a generation, extract the\n # sex-specific subpopulations from a parental\n # population. The sex-specific subpopulations are used\n # throughout mating events in one generation.\n gen = ngen\n males = pop.extractSubPops(subPops=[(0, 0)])\n herms = pop.extractSubPops(subPops=[(0, 1)])\n nmale = males.popSize()\n nherm = herms.popSize()\n\n if runif() < stilde: # proportion of self-fertlized egg\n if runif() < tau: # survival rate of a uniparental zygote rel to a biparental z.\n yield herms.individual(rint(nherm))\n else: # biparental\n yield [males.individual(rint(nmale)), herms.individual(rint(nherm))]\n return compound_generator", "def test_12_parents_as_minus_1_0_0_1_1_2_3_4_4_5_7_7_9_12(self):\n parents = [ -1, 0, 0, 1, 1, 2, 3, 4, 4, 5, 7, 7, 9, 12 ]\n self.init(parents)\n self.assertEqual(6, self.method_under_test())", "def crossover(parent1, parent2):\n path1 = random_subtree(parent1, \"\")\n path2 = random_subtree(parent2, \"\")\n parent1 = parent1.copy()\n parent2 = parent2.copy()\n loc1 = parent1\n loc1parent = parent1\n loc2 = parent2\n loc2parent = parent2\n for i in range(len(path1)):\n loc1parent = loc1\n if path1[i] == \"1\":\n loc1 = loc1parent.get_right()\n else:\n loc1 = loc1parent.get_left()\n for i in range(len(path2)):\n loc2parent = loc2\n if path2[i] == \"1\":\n loc2 = loc2parent.get_right()\n else:\n loc2 = loc2parent.get_left()\n if(len(path1)-1 >= 0):\n if path1[len(path1)-1] == \"1\":\n loc1parent.right = loc2\n else:\n loc1parent.left = loc2\n if(len(path2) - 1 >= 0):\n if path2[-1] == \"1\":\n loc2parent.right = loc1\n else:\n loc2parent.left = loc1\n return (parent1, parent2)", "def test_4_parents_as_minus_1_0_1_2(self):\n parents = [ -1, 0, 1, 2 ]\n self.init(parents)\n self.assertEqual(4, self.method_under_test())", "def testBasic1(self):\n nodes = self.G.nodes()\n assert len(nodes) == len( set(nodes) )", "def test_5_parents_as_1_minus_1_1_0_0(self):\n parents = [ 1, -1, 1, 0, 0 ]\n self.init(parents)\n self.assertEqual(3, self.method_under_test())", "def test_3_parents_as_1_minus_1_1(self):\n parents = [ 1, -1, 1 ]\n self.init(parents)\n self.assertEqual(2, self.method_under_test())" ]
[ "0.7205235", "0.70020497", "0.62084246", "0.6087695", "0.5988837", "0.58964515", "0.58667445", "0.58020157", "0.5726468", "0.5667711", "0.5539633", "0.5539633", "0.5504206", "0.54905653", "0.5482558", "0.5460695", "0.54255486", "0.5419617", "0.5398533", "0.5355489", "0.53264356", "0.53245705", "0.5312008", "0.5307634", "0.5303363", "0.53014946", "0.5294228", "0.523673", "0.5232471", "0.5228123", "0.52248925", "0.52232295", "0.5218298", "0.52174497", "0.52111673", "0.52089536", "0.51829195", "0.51745975", "0.51708", "0.51655567", "0.5164763", "0.51581097", "0.51574516", "0.51436824", "0.5132396", "0.5131299", "0.51306385", "0.51233494", "0.5118409", "0.5116303", "0.5108168", "0.510709", "0.5106759", "0.510241", "0.50905424", "0.5089184", "0.50854236", "0.507906", "0.5076508", "0.50747967", "0.50699407", "0.5069547", "0.50572777", "0.5057174", "0.5051708", "0.50352585", "0.50333", "0.50322187", "0.50287956", "0.5022825", "0.5014624", "0.50122243", "0.50086933", "0.5006136", "0.5001657", "0.50005764", "0.49920267", "0.49919513", "0.49910122", "0.49844953", "0.49769056", "0.49724922", "0.4971587", "0.49644488", "0.49549103", "0.4952674", "0.49509016", "0.4941937", "0.49410596", "0.49405313", "0.49402314", "0.49369624", "0.4928777", "0.49261698", "0.49260587", "0.49215972", "0.4920297", "0.4917804", "0.49171856", "0.49147215" ]
0.6038614
4
Test the popxl getting / setting tensor data example
def test_documentation_popxl_get_set_tensors(self): filename = "tensor_get_write.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_documentation_popxl_addition_variable(self):\n filename = \"tensor_addition.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_predictor():", "def test_loadData():\n \n sys = LVsystem.Ecosystem()\n \n sys.loadSetup('2Prey1Predator')\n \n \n data = sys.create_data()\n \n assert data[0] == 3\n assert data[1] == ['rabbit', 'hen', 'fox']\n assert data[2] == [30,10,20]\n assert data[3] == [0.09,0.07,-0.06] \n assert data[4] == [10000,10000,1]\n assert data[5] == [400,500,250]\n assert data[6][1][2] == -data[6][2][1]\n assert data[6][2][2] == 0\n\n sys.removeSpecies('rabbit')\n sys.removeSpecies('fox')\n sys.removeSpecies('hen')", "def test_data():\n batch_size = 10\n input_dim = 28\n test_data = np.random.rand(batch_size, input_dim)\n\n return test_data", "def test_add_get_tensor(mock_data):\n dataset = Dataset(\"test-dataset\")\n\n # 1D tensors of all data types\n data = mock_data.create_data(10)\n add_get_arrays(dataset, data)", "def test_documentation_popxl_adv_get_write(self):\n filename = \"tensor_get_write_adv.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_load_data(self):\n assert len(self._mnist.get()) == 10\n assert self._mnist.get()[0].label == 7\n pass", "def test_add_get_tensor_3D(mock_data):\n dataset = Dataset(\"test-dataset\")\n\n # 3D tensors of all datatypes\n data_3D = mock_data.create_data((10, 10, 10))\n add_get_arrays(dataset, data_3D)", "def test_getitem(self):\n obs = self.tester['1.SKM7.640188']\n exp = PrepSample('1.SKM7.640188', self.tester)\n self.assertEqual(obs, exp)", "def test_documentation_popxl_mnist_rts_train_test(self):\n filename = \"mnist_rts.py --replication-factor 2 --rts --test\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_snow_pumps():\n test_path = tempfile.mkdtemp()\n x_train, metadata = snow_pumps(test_path)\n try:\n assert x_train.shape == (13, 4)\n except:\n shutil.rmtree(test_path)\n raise()", "def test_machine_learning():", "def test_synth_tr():\n test_path = tempfile.mkdtemp()\n x_train, metadata = synth_tr(test_path)\n try:\n assert x_train.shape == (250, 3)\n except:\n shutil.rmtree(test_path)\n raise()", "def setUp(self):\n self.samples = 5\n self.otus = 10\n seed(0) # this will seed numpy prng at 0 before each test", "def test_init_prediction_data(raw_data):\n prediction_data = PredictionData(**raw_data)\n assert prediction_data", "def test_documentation_popxl_mnist_rts_train(self):\n filename = \"mnist_rts.py --replication-factor 2 --rts\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def rand_data():\n # 100 examples, with seq_len=10, each holding 300 features\n return torch.randn((100, 10, 300))", "def set_data():\r\n #if not os.path.exists(filepath):\r\n #download_data()\r\n metadata = read(filepath + flist[-1])\r\n ndata = metadata['num_cases_per_batch']\r\n ndim = metadata['num_vis']\r\n\r\n data, train, test = {}, {}, {}\r\n data['labels'] = metadata['label_names']\r\n data['ntraindata'] = metadata['num_cases_per_batch'] * (len(flist) - 2)\r\n data['ntestdata'] = metadata['num_cases_per_batch']\r\n data['ndim'] = metadata['num_vis']\r\n\r\n train['x'], train['y'] = convert_train(data['ntraindata'], data['ndim'])\r\n\r\n testdata = read(filepath + flist[-2])\r\n test['x'] = testdata['data']\r\n test['y'] = testdata['labels']\r\n\r\n data['train'], data['test'] = train, test\r\n save_pkl(data)", "def _creatExamplesTensorData(self, examples):\n\n images = []\n \n images2 = []\n images3 = []\n images4 = []\n images5 = [] \n labels = []\n for (img_idx, label) in examples:\n img = self.dataset[img_idx][0]\n #print(img)\n ##exit(0)\n if self.load:\n img = Image.fromarray(img)\n else:\n img = read_image(img)\n #print(img.size)\n #print(np.array(img).shape)\n #exit(0)\n if self.transform is not None:\n img1 = self.transform(img)\n\n img2 = self.transform_test(img)\n img3 = self.transform_test(img)\n img4 = self.transform_test(img)\n img5 = self.transform_test(img) \n #print((img2-img1).abs().sum(),(img3-img1).abs().sum(),(img2-img3).abs().sum())\n #print(img.shape,'located in test_loader.py at 146')\n #exit(0)\n images.append(img1)\n \n images2.append(img2)\n images3.append(img3)\n images4.append(img4)\n images5.append(img5) \n labels.append(label)\n images = torch.stack(images, dim=0)\n\n images2 = torch.stack(images2, dim=0)\n images3 = torch.stack(images3, dim=0)\n images4 = torch.stack(images4, dim=0)\n images5 = torch.stack(images5, dim=0) \n labels = torch.LongTensor(labels)\n return images, images2,images3,images4,images5,labels", "def test_add_get_tensor_2D(mock_data):\n dataset = Dataset(\"test-dataset\")\n\n # 2D tensors of all data types\n data_2D = mock_data.create_data((10, 10))\n add_get_arrays(dataset, data_2D)", "def test_meteo():\n test_path = tempfile.mkdtemp()\n x_train, metadata = meteo(test_path)\n try:\n assert x_train.shape == (11, 6)\n except:\n shutil.rmtree(test_path)\n raise()", "def test_training(self):\n\t\tpass", "def setUp(self):\n output = np.zeros((1, 5, 2))\n target = np.zeros((1, 5, 2))\n # first channel\n output[0, 0] = [10, 4]\n target[0, 0] = [10, 0]\n # second channel\n output[0, 1] = [10, 18]\n target[0, 1] = [10, 10]\n # third channel\n output[0, 2] = [0, 0]\n target[0, 2] = [0, -1]\n # fourth channel\n output[0, 3] = [40, 40]\n target[0, 3] = [30, 30]\n # fifth channel\n output[0, 4] = [20, 10]\n target[0, 4] = [0, 10]\n\n gt_instances = InstanceData()\n gt_instances.keypoints = target\n gt_instances.keypoints_visible = np.array(\n [[True, True, False, True, True]])\n\n pred_instances = InstanceData()\n pred_instances.keypoints = output\n\n data = {'inputs': None}\n data_sample = {\n 'gt_instances': gt_instances.to_dict(),\n 'pred_instances': pred_instances.to_dict()\n }\n\n self.data_batch = [data]\n self.data_samples = [data_sample]", "def test_get_iris_setosa_data(self):\n iris = get_iris_setosa_data()\n self.assertEqual(len(iris.data), 150)\n self.assertEqual(len(iris.labels), 150)", "def test_get_mnist_data(self):\n # TODO: Remove once get_mnist_data(...) is fixed.\n pass\n # mnist = get_mnist_data()\n # self.assertEqual(len(mnist.data), 60000)\n # self.assertEqual(len(mnist.labels), 60000)", "def data_set_maker():\n\n # crate a folder in your code directory and name it: \"files\". put the .npy files iside that folder\n path = os.getcwd() # reads the current path\n x_train = np.load(path + '/files/tinyX.npy', 'r') # reads the input file\n y_train = np.load(path + '/files/tinyY.npy', 'r') # reads the input file\n x_test = np.load(path + '/files/tinyX_test.npy', 'r') # reads the input file\n x_train, y_train = shuffle(x_train, y_train)\n\n return x_train, y_train, x_test", "def test_income():\n test_path = tempfile.mkdtemp()\n x_train, metadata = income(test_path)\n try:\n assert x_train.shape == (44, 4)\n except:\n shutil.rmtree(test_path)\n raise()", "def test_gen():\n tpot_obj = TPOTClassifier()\n\n pipeline = tpot_obj._gen_grow_safe(tpot_obj._pset, 1, 3)\n\n assert len(pipeline) > 1\n assert pipeline[0].ret == Output_DF", "def test_get(self):\n obs = self.tester.get('1.SKM7.640188')\n exp = PrepSample('1.SKM7.640188', self.tester)\n self.assertEqual(obs, exp)", "def test_variational():\n # iris\n #pres = \"Test pour le data set Iris (facile, classique)\"\n #test_from_func_variational(pres, 15, 10, 3, True, Iris)\n\n # breast cancer\n pres = \"Test pour le data set Breast Cancer (facile, classique)\"\n test_from_func_variational(pres, 15, 10, 3, True, Breast_cancer)\n\n # digits\n # pres = \"Test pour le data set Digits (difficile, classique)\"\n # test_from_func(pres, 10, 10, 10, True, Digits, quantum_instance)\n\n # wine\n # pres = \"Test pour le data set Wine (moyen, classique)\"\n # test_from_func(pres, 15, 10, 5, True, Wine, quantum_instance)\n\n # gaussian\n pres = \"Test pour des données gaussiennes (moyen, classique)\"\n for _ in range(1):\n print(\"\\n\")\n print(\"New iteration\")\n test_from_func_variational(pres, 25, 10, 2, True, Gaussian)\n print(\"\\n\")\n\n # small adn strings\n pres = \"Test pour des séquences ADN courtes (difficile, classique)\"\n test_from_func_variational(pres, 10, 15, 14, True, Sequence)\n\n #Quantum data\n pres = \"Test pour des données générées par ordinateur quantique (facile, quantique)\"\n print(pres)\n _, samp_train, samp_test, labels = ad_hoc_data(15, 10, 2, 0.3, True)\n sample_m, sample_p = stock_get(20, 0.3)\n\n labels_me = [-1, 1]\n samp_train_me = {-1: np.array(sample_m[:15]), 1: np.array(sample_p[:15])}\n samp_test_me = {-1: np.array(sample_m[15:]), 1: np.array(sample_p[15:])}\n print(samp_train)\n print(samp_train_me)\n print(samp_test)\n print(samp_test_me)\n\n my_impl_variational(samp_train, samp_test, labels)\n print(\"Pour autres données quantiques\")\n my_impl_variational(samp_train_me, samp_test_me, labels_me)", "def test_dataset_property():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run([\"MNB\", \"LR\"])\n assert atom.dataset.equals(atom.mnb.dataset)\n assert check_scaling(atom.lr.dataset)", "def _get_data_for_tests():\n X = np.random.randn(100, input_dim)\n Y = np.random.randn(100, output_dim)\n X_new = np.random.randn(100, input_dim)\n return X, X_new, Y", "def setUp(self):\n self.X_train, self.y_train = load_data(\"../data/traindata.mat.tar.gz\")\n self.nn = NN_hwr([len(self.X_train[0]), 50, 10])", "def test_star():\n test_path = tempfile.mkdtemp()\n x_train, metadata = star(test_path)\n try:\n assert x_train.shape == (5748, 8)\n except:\n shutil.rmtree(test_path)\n raise()", "def test_quartet():\n test_path = tempfile.mkdtemp()\n x_train, metadata = quartet(test_path)\n try:\n assert x_train.shape == (11, 6)\n except:\n shutil.rmtree(test_path)\n raise()", "def test_show_examples():\n skip_if_no_matplotlib()\n skip_if_no_data()\n with open('temp.yaml', 'w') as f:\n f.write(\"\"\"\n!obj:pylearn2.datasets.mnist.MNIST {\n which_set: 'train'\n}\n\"\"\")\n show_examples('temp.yaml', 28, 28, out='garbage.png')\n os.remove('temp.yaml')", "def test_affect():\n test_path = tempfile.mkdtemp()\n x_train, metadata = affect(test_path)\n try:\n assert x_train.shape == (330, 20)\n except:\n shutil.rmtree(test_path)\n raise()", "def test_sample(self):\n seed = 5\n space = Space()\n probs = (0.1, 0.2, 0.3, 0.4)\n categories = (\"asdfa\", 2, 3, 4)\n dim1 = Categorical(\"yolo\", OrderedDict(zip(categories, probs)), shape=(2, 2))\n space.register(dim1)\n dim2 = Integer(\"yolo2\", \"uniform\", -3, 6)\n space.register(dim2)\n dim3 = Real(\"yolo3\", \"norm\", 0.9)\n space.register(dim3)\n\n point = space.sample(seed=seed)\n rng = check_random_state(seed)\n test_point = [\n dict(\n yolo=dim1.sample(seed=rng)[0],\n yolo2=dim2.sample(seed=rng)[0],\n yolo3=dim3.sample(seed=rng)[0],\n )\n ]\n assert len(point) == len(test_point) == 1\n assert len(point[0].params) == len(test_point[0]) == 3\n assert np.all(point[0].params[\"yolo\"] == test_point[0][\"yolo\"])\n assert point[0].params[\"yolo2\"] == test_point[0][\"yolo2\"]\n assert point[0].params[\"yolo3\"] == test_point[0][\"yolo3\"]\n\n points = space.sample(2, seed=seed)\n rng = check_random_state(seed)\n points1 = dim1.sample(2, seed=rng)\n points2 = dim2.sample(2, seed=rng)\n points3 = dim3.sample(2, seed=rng)\n test_points = [\n dict(yolo=points1[0], yolo2=points2[0], yolo3=points3[0]),\n dict(yolo=points1[1], yolo2=points2[1], yolo3=points3[1]),\n ]\n assert len(points) == len(test_points) == 2\n for i in range(2):\n assert len(points[i].params) == len(test_points[i]) == 3\n assert np.all(points[i].params[\"yolo\"] == test_points[i][\"yolo\"])\n assert points[i].params[\"yolo2\"] == test_points[i][\"yolo2\"]\n assert points[i].params[\"yolo3\"] == test_points[i][\"yolo3\"]", "def test_init(self):\n xtal_model_data = XtalModelData(self.params)\n\n assert xtal_model_data.pdb == self.params.input.pdb\n\n assert xtal_model_data.mtz == self.params.input.mtz\n\n # TODO Assert utilised method calls of these classes\n # Assert is innstance causses issues if called from somewhere else\n\n self.assertIsInstance(xtal_model_data.xrs, cctbx.xray.structure)\n\n self.assertIsInstance(\n xtal_model_data.inputs, mmtbx.utils.process_command_line_args\n )\n\n self.assertIsInstance(\n xtal_model_data.crystal_gridding, cctbx.maptbx.crystal_gridding\n )\n\n self.assertIsInstance(xtal_model_data.fmodel, mmtbx.f_model.f_model.manager)", "def setUp(self):\n self.dataset = get_test_dataset()", "def test_mlp():\r\n datasets = gen_data()\r\n\r\n train_set_x, train_set_y = datasets[0]\r\n valid_set_x, valid_set_y = datasets[1]\r\n test_set_x , test_set_y = datasets[2]\r\n\r\n\r\n\r\n batch_size = 100 # size of the minibatch\r\n\r\n # compute number of minibatches for training, validation and testing\r\n n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size\r\n n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size\r\n n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size\r\n\r\n ######################\r\n # BUILD ACTUAL MODEL #\r\n ######################\r\n #print '... building the model'\r\n\r\n # allocate symbolic variables for the data\r\n index = T.lscalar() # index to a [mini]batch\r\n x = T.matrix('x') # the data is presented as rasterized images\r\n y = T.ivector('y') # the labels are presented as 1D vector of\r\n # [int] labels\r\n\r\n rng = numpy.random.RandomState(1234)\r\n\r\n # construct the MLP class\r\n classifier = MLP( rng = rng, input=x, n_in=28*28, n_hidden = 500, n_out=10)\r\n\r\n # the cost we minimize during training is the negative log likelihood of\r\n # the model.\r\n # We take the mean of the cost over each minibatch.\r\n cost = classifier.negative_log_likelihood(y).mean()\r\n\r\n # compute the gradient of cost with respect to theta (stored in params)\r\n # the resulting gradients will be stored in a list gparams\r\n gparams = []\r\n for param in classifier.params:\r\n gparam = T.grad(cost, param)\r\n gparams.append(gparam)\r\n\r\n # Some optimizations needed are tagged with 'fast_run'\r\n # TODO: refine that and include only those\r\n mode = theano.compile.get_default_mode().including('fast_run')\r\n\r\n updates2 = OrderedDict()\r\n\r\n updates2[classifier.hiddenLayer.params[0]]=T.grad(cost,classifier.hiddenLayer.params[0])\r\n train_model =theano.function( inputs = [index],\r\n updates = updates2,\r\n givens={\r\n x:train_set_x[index*batch_size:(index+1)*batch_size],\r\n y:train_set_y[index*batch_size:(index+1)*batch_size]},\r\n mode=mode)\r\n #print 'MODEL 1'\r\n #theano.printing.debugprint(train_model, print_type=True)\r\n assert any([isinstance(i.op,T.nnet.CrossentropySoftmax1HotWithBiasDx) for i in train_model.maker.fgraph.toposort()])\r\n\r\n # Even without FeatureShape\r\n train_model =theano.function( inputs = [index],\r\n updates = updates2,\r\n mode=mode.excluding('ShapeOpt'),\r\n givens={\r\n x:train_set_x[index*batch_size:(index+1)*batch_size],\r\n y:train_set_y[index*batch_size:(index+1)*batch_size]})\r\n #print\r\n #print 'MODEL 2'\r\n #theano.printing.debugprint(train_model, print_type=True)\r\n assert any([isinstance(i.op,T.nnet.CrossentropySoftmax1HotWithBiasDx) for i in train_model.maker.fgraph.toposort()])", "def test_add_get_scalar(mock_data):\n dataset = Dataset(\"test-dataset\")\n\n # 1D tensors of all data types\n data = mock_data.create_metadata_scalars(10)\n add_get_scalars(dataset, data)", "def test_significance_individual_timepoints(tensor_directory, analysis_name):\n\n \"\"\"\n # Open Analysis Dataframe\n analysis_file = h5py.File(os.path.join(tensor_directory, analysis_name + \".hdf5\"), \"r\")\n activity_dataset = analysis_file[\"Data\"]\n metadata_dataset = analysis_file[\"metadata\"]\n number_of_timepoints, number_of_trials, number_of_pixels = np.shape(activity_dataset)\n print(\"metadata_dataset\", np.shape(metadata_dataset))\n \"\"\"\n # Open Analysis Dataframe\n analysis_file = tables.open_file(os.path.join(tensor_directory, analysis_name + \"_Trialwise_.h5\"), mode=\"r\")\n activity_dataset = analysis_file.root[\"Data\"]\n metadata_dataset = analysis_file.root[\"Trial_Details\"]\n\n # Create P and Slope Tensors\n p_value_tensor = np.ones((number_of_timepoints, number_of_pixels))\n slope_tensor = np.zeros((number_of_timepoints, number_of_pixels))\n\n for timepoint_index in tqdm(range(number_of_timepoints), position=0, desc=\"Timepoint\"):\n\n # Get Timepoint Data\n timepoint_activity = activity_dataset[timepoint_index]\n\n for pixel_index in tqdm(range(number_of_pixels), position=1, desc=\"Pixel\", leave=True):\n\n # Package Into Dataframe\n pixel_activity = timepoint_activity[:, pixel_index]\n pixel_dataframe = repackage_data_into_dataframe(pixel_activity, metadata_dataset)\n\n # Fit Mixed Effects Model\n p_value, slope = mixed_effects_random_slope_and_intercept(pixel_dataframe)\n p_value_tensor[timepoint_index, pixel_index] = p_value\n slope_tensor[timepoint_index, pixel_index] = slope\n\n\n # Save These Tensors\n np.save(os.path.join(tensor_directory, analysis_name + \"_p_value_tensor.npy\"), p_value_tensor)\n np.save(os.path.join(tensor_directory, analysis_name + \"_slope_tensor.npy\"), slope_tensor)", "def generate_x(number_dimensions, T_train, T_test, mu, feature_model):\n number_training_obeservations = T_train.shape[0]\n number_testing_obeservations = T_test.shape[0]\n\n X_train = np.zeros((number_training_obeservations,number_dimensions))\n X_test = np.zeros((number_testing_obeservations,number_dimensions))\n\n mixture_indicator_train = generate_mixture_indicator(number_training_obeservations)\n mixture_indicator_test = generate_mixture_indicator(number_testing_obeservations)\n\n G = np.random.normal(0,1,(number_dimensions,number_dimensions))\n q, r = np.linalg.qr(G)\n\n mu1 = mu*np.ones(number_dimensions)\n mu2 = -mu*np.ones(number_dimensions)\n\n if feature_model == \"A\":\n eigenvalues1 = np.random.uniform(0,1,(number_dimensions,1))\n eigenvalues1 = np.sort(eigenvalues1, axis = 0)/np.sum(eigenvalues1)\n lambda1 = np.identity(number_dimensions)\n np.fill_diagonal(lambda1,eigenvalues1)\n cov1 = q@lambda1@q.T\n\n for i in range(number_training_obeservations):\n if T_train[i] == 0:\n X_train[i,:] = np.random.multivariate_normal(mu1,cov1,1)\n else:\n X_train[i,:] = np.random.multivariate_normal(mu2,cov1,1)\n \n for i in range(number_testing_obeservations):\n if T_test[i] == 0:\n X_test[i,:] = np.random.multivariate_normal(mu1,cov1,1)\n else:\n X_test[i,:] = np.random.multivariate_normal(mu2,cov1,1)\n\n\n elif feature_model == \"B\":\n eigenvalues1 = np.random.uniform(0,1,(number_dimensions,1))\n eigenvalues1 = np.sort(eigenvalues1, axis = 0)/np.sum(eigenvalues1)\n lambda1 = np.identity(number_dimensions)\n np.fill_diagonal(lambda1,eigenvalues1)\n cov1 = q@lambda1@q.T\n\n eigenvalues2 = np.random.uniform(0,1,(number_dimensions,1))\n eigenvalues2 = np.sort(eigenvalues2, axis = 0)[::-1]/np.sum(eigenvalues2)\n lambda2 = np.identity(number_dimensions)\n np.fill_diagonal(lambda2,eigenvalues2)\n cov2 = q@lambda2@q.T\n\n\n for i in range(number_training_obeservations):\n if T_train[i] == 0:\n if mixture_indicator_train[i] == 0:\n X_train[i,:] = np.random.multivariate_normal(mu1,cov1,1)\n else:\n X_train[i,:] = np.random.multivariate_normal(mu1,cov2,1)\n else:\n if mixture_indicator_train[i] == 0:\n X_train[i,:] = np.random.multivariate_normal(mu2,cov1,1)\n else:\n X_train[i,:] = np.random.multivariate_normal(mu2,cov2,1)\n \n for i in range(number_testing_obeservations):\n if T_test[i] == 0:\n if mixture_indicator_test[i] == 0:\n X_test[i,:] = np.random.multivariate_normal(mu1,cov1,1)\n else:\n X_test[i,:] = np.random.multivariate_normal(mu1,cov2,1)\n else:\n if mixture_indicator_test[i] == 0:\n X_test[i,:] = np.random.multivariate_normal(mu2,cov1,1)\n else:\n X_test[i,:] = np.random.multivariate_normal(mu2,cov2,1)\n\n train_mean = np.mean(X_train, axis = 0)\n train_std = np.std(X_train, axis = 0)\n X_train = (X_train - train_mean)/train_std\n X_test = (X_test - train_mean)/train_std\n \n return X_train, X_test", "def test_documentation_popxl_mnist_replication_train(self):\n filename = \"mnist_rts.py --replication-factor 2\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_getitem(self):\n obs = self.tester['1.SKM7.640188']\n exp = Sample('1.SKM7.640188', self.tester)\n self.assertEqual(obs, exp)", "def test_get_random_data_basic(self):\n cdata = random_data(num_features=2,\n num_samples=4,\n labels=None)\n self.assertEqual(cdata.num_features, 2)\n self.assertEqual(cdata.num_samples, 4)", "def tensor(data, **context):\n raise NotImplementedError", "def test_000_basic_functionality() -> None:\n df = generate_test_data()\n skim(df)", "def setUpTestData(cls):\n data_gen.run()", "def setUpTestData(cls):\n data_gen.run()", "def setUpTestData(cls):\n data_gen.run()", "def setUpTestData(cls):\n data_gen.run()", "def load_data(opt=\"mnist\"):\n if opt == \"mnist\":\n train, test = tf.keras.datasets.mnist.load_data()\n \n x_train, y_train = train\n x_test, y_test = test\n \n x_train = x_train.reshape(x_train.shape[0], 28 * 28)\n x_test = x_test.reshape(x_test.shape[0], 28 * 28)\n \n y_train = y_train.astype(np.int)\n y_test = y_test.astype(np.int)\n for i in range(len(y_train)):\n y_train[i] = 1 if y_train[i] % 2 == 0 else -1\n for i in range(len(y_test)):\n y_test[i] = 1 if y_test[i] % 2 == 0 else -1\n\n elif opt == \"covertype\":\n df = pd.read_csv(\"covtype.data\", header=None)\n x = df.iloc[:, 0:54].values\n y = df[54].values\n for i in range(len(y)):\n y[i] = 1 if y[i] % 2 == 0 else -1\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3)\n \n else:\n logging.error(\"Unknown dataset!!\")\n\n logging.info(\"train data shape: {}\".format(x_train.shape))\n logging.info(\"test data shape: {}\".format(x_test.shape))\n return (x_train, y_train), (x_test, y_test)", "def test_model_sample(net, data_loader):\n net.eval()\n array = []\n with torch.no_grad():\n for data in data_loader:\n X = data['X']\n output = net(X)\n output = ToPILImage()(output)\n array.append(output)\n return array", "def load_data_set():\n\n iris = pd.read_csv('/Users/michael/Codes/bitbucket/cs584-s18-kaiyue-ma/AS2/data/iris.csv', encoding=\"gbk\")\n iris = iris.sample(frac=1.0)\n\n dummy = pd.get_dummies(iris['Species'])\n iris = pd.concat([iris, dummy], axis=1)\n\n org_x = np.array(iris.iloc[:, 1:5])\n # org_y = np.array(iris['setosa']).reshape(len(iris), 1)\n org_y = np.array(iris['setosa'])\n\n # data_arr, test_arr, label_arr, test_label = train_test_split(org_x, org_y, test_size=0.2, random_state=42)\n #\n # data_arr = data_arr.tolist()\n # test_arr = test_arr.tolist()\n # label_arr = label_arr.tolist()\n # test_label = test_label.tolist()\n #\n # data_arr = np.dot(data_arr)\n # one = np.ones(len(data_arr))\n # data_arr = np.column_stack(one , data_arr)\n # data_arr = data_arr.tolist()\n #\n # test_arr = np.dot(test_arr)\n # one = np.ones(len(test_arr))\n # test_arr = np.column_stack(one, test_arr)\n # test_arr = test_arr.tolist()\n\n x = org_x.tolist()\n y = org_y.tolist()\n x = np.mat(x)\n one = np.ones(len(x))\n x = np.column_stack((one, x))\n x = x.tolist()\n\n # print(data_arr)\n # x1_2 = data_arr[:, 1]\n # print(x1_2)\n # print('.....')\n # print(label_arr)\n # return data_arr, test_arr, label_arr, test_label\n return x, y", "def test_numpify_and_store(self):\n Nsamples = 9\n Ntimesteps = 10\n Ncolumns = 3\n X = [[[0 for a in range(Ncolumns)] for b in range(Ntimesteps)] \\\n for c in range(Nsamples)]\n y = [[0 for a in range(Ntimesteps)] for b in range(Nsamples)]\n xname = 'xname'\n yname = 'yname'\n outdatapath = os.getcwd()\n tutorial_pamap2.numpify_and_store(X, y, xname, yname, outdatapath, \\\n shuffle=True)\n filename = os.path.join(outdatapath, xname+ '.npy')\n test = os.path.isfile(filename)\n if test == True:\n os.remove(filename)\n os.remove(os.path.join(outdatapath, yname + '.npy'))\n assert test", "def test_morley():\n test_path = tempfile.mkdtemp()\n x_train, metadata = morley(test_path)\n try:\n assert x_train.shape == (100, 3)\n except:\n shutil.rmtree(test_path)\n raise()", "def test_add():\n data = io.create_sample_Dataset()\n tmp = data + data\n assert tmp[\"u\"][0, 0, 0] == 2.0", "def fixture_example_data():\n import_example_data()", "def init_tensors(self, sample, *args):\n raise NotImplementedError", "def test(self, test):\r\n self.ml_data.set_target(test[0])\r\n self.ml_data.set_features(test[1])\r\n if self.ml_data.target_type.all() == np.float64 or self.ml_data.target_type.all() == np.int64:\r\n self.model_qua.open()\r\n else:\r\n self.model_quali.open()", "def test_set_xT(self):\n s = State(substance=\"water\")\n s.xT = Q_(0.5, \"dimensionless\"), Q_(400.0, \"K\")\n # Pylance does not support NumPy ufuncs\n assert np.isclose(s.T, Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(245769.34557103913, \"Pa\")) # type: ignore\n assert np.isclose(s.xT[1], Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.xT[0], Q_(0.5, \"dimensionless\")) # type: ignore\n assert np.isclose(s.u, Q_(1534461.5163075812, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(4329.703956664546, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cp, Q_(4056.471547685226, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cv, Q_(2913.7307270395363, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(0.3656547423394701, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(1624328.2430353598, \"J/kg\")) # type: ignore\n assert np.isclose(s.x, Q_(0.5, \"dimensionless\")) # type: ignore\n s.xT = Q_(50, \"percent\"), Q_(400.0, \"K\")\n assert np.isclose(s.T, Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.p, Q_(245769.34557103913, \"Pa\")) # type: ignore\n assert np.isclose(s.xT[1], Q_(400.0, \"K\")) # type: ignore\n assert np.isclose(s.xT[0], Q_(0.5, \"dimensionless\")) # type: ignore\n assert np.isclose(s.u, Q_(1534461.5163075812, \"J/kg\")) # type: ignore\n assert np.isclose(s.s, Q_(4329.703956664546, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cp, Q_(4056.471547685226, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.cv, Q_(2913.7307270395363, \"J/(kg*K)\")) # type: ignore\n assert np.isclose(s.v, Q_(0.3656547423394701, \"m**3/kg\")) # type: ignore\n assert np.isclose(s.h, Q_(1624328.2430353598, \"J/kg\")) # type: ignore\n assert np.isclose(s.x, Q_(0.5, \"dimensionless\")) # type: ignore", "def run_experiments(data_set=\"\",compact=2,exp_name=\"\",x=\"\"):\n cwd = os.getcwd()\n results_path = cwd+'/data/geometric/'+exp_name+'_'\n dataset_path = cwd+'/data/geometric/'+data_set+'_'\n compact = int(compact)\n if \"m\" in x:\n # make new pxy\n pxy, Xdata, groups = gen_geometric_pxy()\n np.save(dataset_path+'Xdata',Xdata)\n np.save(dataset_path+'groups',groups)\n np.save(dataset_path+'pxy',pxy)\n else:\n # load existing pxy\n pxy = np.load(dataset_path+'pxy.npy')\n if \"r\" in x: # regular experiments\n if compact>1:\n metrics_stepwise, distributions_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps, distributions_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_IB(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged.pkl')\n elif compact>0:\n metrics_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_IB(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged.pkl')\n else:\n metrics_stepwise,\\\n metrics_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps = test_IB(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise.csv')\n if \"ip\" in x: # initialization experiments - positive p0\n if compact>1:\n metrics_stepwise, distributions_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps, distributions_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_p0_pos(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_p0_pos.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_p0_pos.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged_p0_pos.pkl')\n elif compact>0:\n metrics_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_p0_pos(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_p0_pos.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_p0_pos.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged_p0_pos.pkl')\n else:\n metrics_stepwise,\\\n metrics_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps = test_p0_pos(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_p0_pos.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_p0_pos.csv')\n if \"in\" in x: # initialization experiments - negative p0\n if compact>1:\n metrics_stepwise, distributions_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps, distributions_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_p0_neg(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_p0_neg.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_p0_neg.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged_p0_neg.pkl')\n elif compact>0:\n metrics_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_p0_neg(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_p0_neg.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_p0_neg.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged_p0_neg.pkl')\n else:\n metrics_stepwise,\\\n metrics_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps = test_p0_neg(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_p0_neg.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_p0_neg.csv')\n if \"c\" in x: # convergence tolerance experiments\n if compact>1:\n metrics_stepwise, distributions_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps, distributions_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_ctol(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_ctol.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_ctol.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged_ctol.pkl')\n elif compact>0:\n metrics_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_ctol(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_ctol.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_ctol.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged_ctol.pkl')\n else:\n metrics_stepwise,\\\n metrics_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps = test_ctol(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_ctol.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_ctol.csv')\n if \"z\" in x: # zeroL tolerance experiments\n if compact>1:\n metrics_stepwise, distributions_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps, distributions_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_zeroLtol(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_zeroLtol.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_zeroLtol.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged_zeroLtol.pkl')\n elif compact>0:\n metrics_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_zeroLtol(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_zeroLtol.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_zeroLtol.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged_zeroLtol.pkl')\n else:\n metrics_stepwise,\\\n metrics_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps = test_zeroLtol(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_zeroLtol.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_zeroLtol.csv')\n if \"b\" in x: # trying proposed optimal beta\n if compact>1:\n metrics_stepwise, distributions_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps, distributions_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_IB(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_bestbeta.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_bestbeta.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged_bestbeta.pkl')\n elif compact>0:\n metrics_stepwise,\\\n metrics_converged, distributions_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps, distributions_converged_allreps = test_IB(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_bestbeta.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_bestbeta.csv')\n distributions_converged.to_pickle(results_path+'distributions_converged_bestbeta.pkl')\n else:\n metrics_stepwise,\\\n metrics_converged,\\\n metrics_stepwise_allreps,\\\n metrics_converged_allreps = test_IB(pxy,compact)\n metrics_converged.to_csv(results_path+'metrics_converged_bestbeta.csv')\n metrics_stepwise.to_csv(results_path+'metrics_stepwise_bestbeta.csv')\n return 0", "def test(self, dataset) -> None:\n raise NotImplementedError()", "def test_neuron(self):\r\n # crear una lista 1-D (Horizontal, Entradas).\r\n Z = [1, 2, 3]\r\n # crear una lista 1-D (Vertical, Pesos de la red).\r\n W = [10, 20, 30]\r\n # Inicializamos la neurona, y obtenemos el valor que toma dado W * Z\r\n # X(k) = W * Z\r\n result = rhonn(W, Z).predict()\r\n # Comprobamos el resultado \r\n self.assertEqual(result, 140)", "def test_data_manipulation(self):\n target_name = self.project['target']['name']\n self.api_mock.return_value.get_metadata.return_value = [\n {'_id': '0',\n 'pid': '1',\n 'created': datetime.datetime.now(),\n 'name':'universe',\n 'originalName': 'credit-sample-200.csv',\n 'varTypeString': 'NN',\n 'shape': [2, 100],\n 'controls':{},\n 'columns': [[1,target_name,0],[3,\"age\",0]],\n 'files': ['projects/' + str(self.pid) + '/raw/' + self.testdatafile],\n 'typeConvert': {}},\n {'_id': '1',\n 'pid': '1',\n 'name':'test',\n 'originalName': 'credit-sample-200.csv',\n 'created': datetime.datetime.now(),\n 'varTypeString': 'NN',\n 'shape': [2, 100],\n 'controls':{},\n 'columns': [[1,target_name,0],[3,\"age\",0]],\n 'files': ['projects/' + str(self.pid) + '/raw/' + self.testdatafile],\n 'typeConvert': {}},\n {'_id': '2',\n 'pid': '1',\n 'name':'new',\n 'created': datetime.datetime.now(),\n 'originalName': 'credit-sample-200.csv',\n 'newdata':True,\n 'controls':{},\n 'shape': [2, 100],\n 'varTypeString': 'NN',\n 'columns': [[1,target_name,0],[3,\"age\",0]],\n 'files': ['projects/' + str(self.pid) + '/raw/' + self.testdatafile],\n 'typeConvert': {}}]\n request = WorkerRequest({'pid': '1', 'uid': '1', 'dataset_id': '1',\n 'command': 'fit', 'max_reps': 0,\n 'samplepct': 100})\n\n #target\n #this will map the target values to (0,1) because target type is Binary\n target_vector = self.dataprocessor.target_vector()\n target_series = target_vector['main']\n self.assertItemsEqual(np.unique(target_series), [0,1])\n\n #this will be none because 'holdout_pct' isn't set in the project data\n self.assertIsNone(target_vector['holdout'])\n\n #prediction dataset\n predictors = self.dataprocessor.predictors()\n pred_dataframe = predictors['1']['main']\n self.assertItemsEqual(list(pred_dataframe.columns), [\"age\"])\n self.assertEqual(self.dataprocessor.get_vartypestring_without_target('1'), \"N\")\n\n request = WorkerRequest({'pid': '1', 'uid': '1', 'dataset_id': '1', 'scoring_dataset_id': '2', 'command': 'predict', 'max_reps': 0, 'samplepct':100})\n dp2 = DataProcessor(request)\n data = dp2.request_datasets()\n self.assertEqual(data.keys(), ['1'])\n self.assertEqual(data['1'].keys(), ['scoring', 'vartypes'])\n scoring_data = data['1']['scoring']\n vartypes = data['1']['vartypes']\n self.assertEqual(list(scoring_data.columns), [\"age\"])\n self.assertEqual(vartypes, \"N\")", "def test_createData():\n\n sys = LVsystem.Ecosystem()\n\n sys.addSpecies('rabbit')\n sys.setInteraction('rabbit', 'hen', 0)\n sys.setInteraction('rabbit', 'fox', -1)\n sys.setInitialCond('rabbit', 30)\n sys.setGrowthRate('rabbit', 0.09)\n sys.setCarrCap('rabbit', 10000)\n sys.setChangeRate('rabbit', 400)\n\n sys.addSpecies('hen')\n sys.setInteraction('hen', 'rabbit', 0)\n sys.setInteraction('hen', 'fox', -1)\n sys.setInitialCond('hen', 10)\n sys.setGrowthRate('hen', 0.07)\n sys.setCarrCap('hen', 10000)\n sys.setChangeRate('hen', 500)\n\n sys.addSpecies('fox')\n sys.setInteraction('fox', 'rabbit', 1)\n sys.setInteraction('fox', 'hen', 1)\n sys.setInitialCond('fox', 20)\n sys.setGrowthRate('fox', -0.06)\n sys.setCarrCap('fox', 1)\n sys.setChangeRate('fox', 250)\n\n \n data = sys.create_data()\n \n assert data[0] == 3\n assert data[1] == ['rabbit', 'hen', 'fox']\n assert data[2] == [30,10,20]\n assert data[3] == [0.09,0.07,-0.06] \n assert data[4] == [10000,10000,1]\n assert data[5] == [400,500,250]\n assert data[6][1][2] == -data[6][2][1]\n assert data[6][2][2] == 0\n \n sys.removeSpecies('rabbit')\n sys.removeSpecies('fox')\n sys.removeSpecies('hen')", "def test(batch_size=1, num_sample=16):\n return paddle.batch(_read_creater(num_sample=num_sample), batch_size)", "def test(self, dataset):\n\n outputs, errors = self.use(dataset)\n\n ## PUT CODE HERE ##\n # I put the code in the \"use\" function, seems better :-)\n\n return outputs, errors", "def test_X_test_property():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run([\"MNB\", \"LR\"])\n assert atom.X_test.equals(atom.mnb.X_test)\n assert check_scaling(atom.lr.X_test)", "def test_op(self):\n name = \"my_mesh\"\n tensor_data = test_utils.get_random_mesh(\n 100, add_faces=True, add_colors=True\n )\n config_dict = {\"foo\": 1}\n with tf.compat.v1.Graph().as_default():\n tensor_summary = summary.op(\n name,\n tensor_data.vertices,\n faces=tensor_data.faces,\n colors=tensor_data.colors,\n config_dict=config_dict,\n )\n with self.test_session() as sess:\n proto = self.pb_via_op(tensor_summary)\n self.verify_proto(proto, name)\n plugin_metadata = metadata.parse_plugin_metadata(\n proto.value[0].metadata.plugin_data.content\n )\n self.assertEqual(\n json.dumps(config_dict, sort_keys=True),\n plugin_metadata.json_config,\n )", "def setup(self):\n (self.X, self.Y) = load_iris(problem=\"label_ranking\")", "def test_11_dataset(self, example):\n example.zonenumbers_i = np.array([100, 200, 300])\n example.groupnames_g = np.array(['Female', 'Male'], dtype='O')\n example.create_ds()\n assert isinstance(example.ds, xr.Dataset)\n for attr, dtype in example.dtypes.items():\n data_array = example.ds[attr]\n # test if the shapes are correct\n if dtype.shape:\n np.testing.assert_array_equal(dtype.get_shape(example),\n data_array.shape,\n 'shape not correct')\n else:\n # not initialized array\n assert not np.any(data_array.shape), 'shape not initialized'\n #test if the datatypes are correct\n assert np.dtype(dtype.dtype) == data_array.dtype, 'dtype not correct'\n print(example.ds)", "def test_fish():\n test_path = tempfile.mkdtemp()\n x_train, metadata = fish(test_path)\n try:\n assert x_train.shape == (97, 20)\n except:\n shutil.rmtree(test_path)\n raise()", "def test_active_inference_SPM_1b(self):", "def test_okun():\n test_path = tempfile.mkdtemp()\n x_train, metadata = okun(test_path)\n try:\n assert x_train.shape == (47, 4)\n except:\n shutil.rmtree(test_path)\n raise()", "def test_documentation_popxl_repeat_0(self):\n filename = \"repeat_graph_0.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def load_x(self):\n self.x = self.read_var(self.xvar)\n self.test_shape(self.xvar, self.x.shape, 1)", "def testingData(self):\n\n #import SampleData\n #sampleDataLogic = SampleData.SampleDataLogic()\n #mrHead = sampleDataLogic.downloadMRHead()\n #dtiBrain = sampleDataLogic.downloadDTIBrain()\n \n # w = slicer.modules.SteeredFluidRegistrationWidget\n # w.fixedSelector.setCurrentNode(mrHead)\n # w.movingSelector.setCurrentNode(dtiBrain)\n \n if not slicer.util.getNodes('testbrain1*'):\n import os\n fileName = \"C:\\\\Work\\\\testbrain1.nrrd\"\n vl = slicer.modules.volumes.logic()\n brain1Node = vl.AddArchetypeVolume(fileName, \"testbrain1\", 0)\n else:\n nodes = slicer.util.getNodes('testbrain1.nrrd')\n brain1Node = nodes[0]\n\n if not slicer.util.getNodes('testbrain2*'):\n import os\n fileName = \"C:\\\\Work\\\\testbrain2.nrrd\"\n vl = slicer.modules.volumes.logic()\n brain2Node = vl.AddArchetypeVolume(fileName, \"testbrain2\", 0)\n #TODO else assign from list\n\n # if not slicer.util.getNodes('movingToFixed*'):\n # # Create transform node\n # transform = slicer.vtkMRMLLinearTransformNode()\n # transform.SetName('movingToFixed')\n # slicer.mrmlScene.AddNode(transform)\n\n # transform = slicer.util.getNode('movingToFixed')\n \n # ###\n # # neutral.SetAndObserveTransformNodeID(transform.GetID())\n # ###\n \n compositeNodes = slicer.util.getNodes('vtkMRMLSliceCompositeNode*')\n for compositeNode in compositeNodes.values():\n compositeNode.SetBackgroundVolumeID(brain1Node.GetID())\n compositeNode.SetForegroundVolumeID(brain2Node.GetID())\n compositeNode.SetForegroundOpacity(0.5)\n applicationLogic = slicer.app.applicationLogic()\n applicationLogic.FitSliceToAll()", "def load_data(self):\n self.data = self.read_var(self.datavar)\n self.test_shape(self.datavar, self.data.shape, 2)", "def test(self, dataset):\n model_path = os.path.join(self.check_point, 'model.pt')\n if not os.path.exists(model_path):\n raise Exception('Cannot find %s.' % model_path)\n\n self.model = torch.load(model_path)\n print(self.model)\n model_parameters = filter(lambda p: p.requires_grad, self.model.parameters())\n params = sum([np.prod(p.size()) for p in model_parameters])\n print(1.0 * params / (1000 * 1000))\n _, _, stats, outputs, names = self._check_PSNR(dataset, is_test=True)\n return stats, outputs, names", "def get_data(generator, random, bench_id):\n x_train, y_train, x_test, y_test = generator(random, bench_id)\n x_train = np.c_[np.ones(len(x_train)), x_train]\n x_test = np.c_[np.ones(len(x_test)), x_test]\n return x_train, y_train, x_test, y_test", "def test(self, dataset):\n model_path = os.path.join(self.check_point, 'model.pt')\n if not os.path.exists(model_path):\n raise Exception('Cannot find %s.' %model_path)\n \n self.model = torch.load(model_path)\n _, _, stats, outputs = self._check_PSNR(dataset, is_test=True)\n return stats, outputs", "def test_significance_window(tensor_directory, analysis_name, window):\n\n \"\"\"\n # Open Analysis Dataframe\n analysis_file = h5py.File(os.path.join(tensor_directory, analysis_name + \".hdf5\"), \"r\")\n activity_dataset = analysis_file[\"Data\"]\n metadata_dataset = analysis_file[\"metadata\"]\n number_of_timepoints, number_of_trials, number_of_pixels = np.shape(activity_dataset)\n print(\"metadata_dataset\", np.shape(metadata_dataset))\n \"\"\"\n\n\n # Open Analysis Dataframe\n analysis_file = tables.open_file(os.path.join(tensor_directory, analysis_name + \"_Trialwise_.h5\"), mode=\"r\")\n activity_dataset = analysis_file.root[\"Data\"]\n metadata_dataset = analysis_file.root[\"Trial_Details\"]\n activity_dataset = np.array(activity_dataset)\n metadata_dataset = np.array(metadata_dataset)\n\n activity_dataset = np.nan_to_num(activity_dataset)\n\n number_of_trials, number_of_timepoints, number_of_pixels = np.shape(activity_dataset)\n\n\n print(\"Number of timepoints\", number_of_timepoints)\n print(\"number of pixels\", number_of_pixels)\n print(\"number of trials\", number_of_trials)\n\n # Create P and Slope Tensors\n p_value_tensor = np.ones(number_of_pixels)\n slope_tensor = np.zeros(number_of_pixels)\n\n # Get Timepoint Data\n timepoint_activity = activity_dataset[:, window]\n print(\"Timepoint activity shape\", np.shape(timepoint_activity))\n timepoint_activity = np.mean(timepoint_activity, axis=1)\n\n for pixel_index in tqdm(range(number_of_pixels), position=1, desc=\"Pixel\", leave=False):\n\n # Package Into Dataframe\n pixel_activity = timepoint_activity[:, pixel_index]\n pixel_dataframe = repackage_data_into_dataframe(pixel_activity, metadata_dataset)\n\n # Fit Mixed Effects Model\n p_value, slope = mixed_effects_random_slope_and_intercept(pixel_dataframe)\n p_value_tensor[pixel_index] = p_value\n slope_tensor[pixel_index] = slope\n\n return p_value_tensor, slope_tensor", "def test_predict_from_examples():\n examples = []\n for i in range(len(DATA)):\n (value, attribute_map) = DATA[i]\n print 'tuple created : \\n1:' + str(value) + \"\\n2:\" + str(attribute_map)\n ex = VowpalExample(i, value)\n #Extracting the features from all of the namespaces in the map. \n for (featureNamespace, features) in attribute_map.items():\n #Each \"section\" is \n ex.add_section(featureNamespace, features)\n examples.append(ex)\n train = examples[:-2]\n test = examples[-2:]\n vw = Vowpal(PATH_VW, './vw.%s', {'--passes' : '10' })\n preds = vw.predict_from_examples(train, test)\n for (id, value) in preds:\n print 'prediction for %s is %s' % (id, value)", "def test_api_predictors_get(self):\n pass", "def setUp(self):\r\n self.value_for_seed = 20\r\n\r\n # Single comp.\r\n self.labels1 = ['foo', 'bar']\r\n self.dists1 = [[1, 2, 3], [7, 8]]\r\n\r\n # Multiple comps.\r\n self.labels2 = ['foo', 'bar', 'baz']\r\n self.dists2 = [[1, 2, 3], [7, 8], [9, 10, 11]]\r\n\r\n # Too few obs.\r\n self.labels3 = ['foo', 'bar', 'baz']\r\n self.dists3 = [[1], [7], [9, 10, 11]]", "def test_education():\n test_path = tempfile.mkdtemp()\n x_train, metadata = education(test_path)\n try:\n assert x_train.shape == (50, 6)\n except:\n shutil.rmtree(test_path)\n raise()", "def load_data():\n\n print('Loading and Visualizing Data ...')\n\n file_name = path.join(getcwd(), 'ex3', 'src', 'data', 'ex3data1')\n data = scipy.io.loadmat(file_name)\n\n # training data stored in arrays X, y\n # y should be a row vector of labels\n return data['X'], data['y'].T[0]", "def test_getitem():\n atom = ATOMClassifier(X_class, y_class, random_state=1)\n atom.run(\"Tree\")\n assert atom.tree[\"alcohol\"].equals(atom.dataset[\"alcohol\"])\n with pytest.raises(TypeError, match=r\".*subscriptable with type str.*\"):\n print(atom.tree[2])", "def setUp(self):\n self._m = 100\n self._n = 30\n self._k = 5\n self._increment = 20\n self._A = get_data(ExperimentType.ExampleNo2)(self._m, np.arange(2 * self._k).astype(float))\n self._approximation = random_id(self._A, self._k, self._increment)\n self._B = self._approximation.B\n self._P = np.array(self._approximation.P)\n self._A = self._A.as_numpy_arr()\n self._n = self._A.shape[1]\n self._approximation = self._approximation.as_numpy_arr()", "def setUp(self):\n self.dataset = self.dataset_cls()", "def mnist_testing(shuffled = True):\n mndata = MNIST(MNIST_PATH)\n test_ims, test_labels = mndata.load_testing()\n test_X = np.array(test_ims).T\n test_y = np.array(test_labels).T\n return test_X, test_y", "def setUp(self):\n temperature = np.array([[185.0, 260.65, 273.15, 338.15]], dtype=np.float32)\n self.temperature = set_up_variable_cube(temperature)\n humidity = np.array([[60.0, 70.0, 75.0, 80.0]], dtype=np.float32)\n self.relative_humidity = set_up_variable_cube(\n humidity, name=\"relative_humidity\", units=\"%\"\n )\n pressure = np.array([[1.0e5, 9.9e4, 9.85e4, 9.8e4]], dtype=np.float32)\n self.pressure = set_up_variable_cube(pressure, name=\"air_pressure\", units=\"Pa\")\n self.expected_wbt_data = np.array(\n [[185.0, 259.88306, 271.78006, 333.96066]], dtype=np.float32\n )", "def setUp(self):\n self._default_call_inputs = (\n np.array([[\"one\", \"two\", \"three\"],\n [\"four\", \"five\", \"six\"]]),\n None\n )\n\n self._hash_embedding_dim = 4\n self._embedding_dim = 2\n\n self._default_config = {\n \"hash_embedding_dim\": self._hash_embedding_dim,\n \"embedding_dim\": self._embedding_dim\n }", "def setUp(self):\n self.testdatapath = os.path.join(mkdtemp())\n self.testfilenames = [os.path.join(self.testdatapath, \"0001.nc\")]\n\n self.gpis = [1, 10, 11, 12]\n self.lons = [0, 0, 1, 1]\n self.lats = [1, 1, 0, 0]\n self.cells = [1, 1, 1, 1]\n self.grid = grids.CellGrid(self.lons, self.lats, self.cells, self.gpis)", "def test_np_memory_layout_add_input_tensor_pystepiocallback():\n\n def _test(transposedInput, transposedOutput):\n builder = popart.Builder()\n\n # Create a random constant and transpose it\n np.random.seed(1)\n input1 = builder.addInputTensor(\"INT32\", [2, 2])\n\n # Run a session to prove this\n output1 = builder.aiOnnx.identity([input1])\n builder.addOutputTensor(output1)\n anchorConfig = {output1: popart.AnchorReturnType(\"ALL\")}\n\n dataFlow = popart.DataFlow(1, anchorConfig)\n deviceConfig = {\"numIPUs\": 1}\n dm = popart.DeviceManager()\n device = dm.createIpuModelDevice(deviceConfig)\n session = popart.InferenceSession(\n fnModel=builder.getModelProto(), dataFlow=dataFlow, deviceInfo=device\n )\n\n # Compile graph and place weights onto it\n session.prepareDevice()\n session.weightsFromHost()\n\n # Feed the session with a transposed (non-contiguous) tensor.\n input1Value = np.random.randint(0, 100, size=(2, 2), dtype=\"int32\")\n if transposedInput:\n input1Value = np.transpose(input1Value, [1, 0])\n output1Value = np.random.randint(0, 100, size=(2, 2), dtype=\"int32\")\n if transposedOutput:\n output1Value = np.transpose(output1Value, [1, 0])\n\n with pytest.raises(\n (Exception, RuntimeError, popart.popart_exception)\n ) as e_info:\n\n # pylint: disable=unused-argument\n def input_callback(id, prefetch):\n return input1Value\n\n def input_complete_callback(_): # id is an unused parameter\n pass\n\n def output_callback(_): # id is an unused parameter\n return output1Value\n\n def output_complete_callback(_): # id is an unused parameter\n pass\n\n stepio = popart.PyStepIOCallback(\n input_callback,\n input_complete_callback,\n output_callback,\n output_complete_callback,\n )\n\n session.run(stepio)\n\n assert \"contiguous\" in e_info.value.args[0]\n\n _test(transposedInput=True, transposedOutput=False)\n _test(transposedInput=False, transposedOutput=True)", "def demonstration_examples(kind):\n\n DIR = './data/demos/'\n RNN_DEMO = \"-demo_rnn_examples\"\n EXT = '.pkl'\n\n if kind == 'rnn':\n return pd.read_pickle(DIR+RNN_DEMO+EXT)", "def test_0():\n sync.gen_multi_fake_data()#default is only one randomly selected data set\n sync.main(testing=True)" ]
[ "0.6889487", "0.65411484", "0.62041", "0.61115885", "0.60792667", "0.60625935", "0.5973008", "0.5798675", "0.579797", "0.57839084", "0.5775764", "0.5711267", "0.57093495", "0.5704983", "0.5672286", "0.5643146", "0.5622259", "0.5604334", "0.5597279", "0.5581926", "0.558076", "0.5573886", "0.55467963", "0.55462253", "0.5545013", "0.55389446", "0.5538212", "0.5531791", "0.55170566", "0.55123717", "0.5501795", "0.5493862", "0.54827535", "0.54823494", "0.54781055", "0.54713607", "0.54701394", "0.5457522", "0.5445755", "0.54388285", "0.543875", "0.5437019", "0.54244787", "0.54244757", "0.54078555", "0.54077554", "0.5405763", "0.54027545", "0.5401663", "0.5397861", "0.5397861", "0.5397861", "0.5397861", "0.5393307", "0.5366809", "0.5359045", "0.53578895", "0.53494453", "0.5341475", "0.5337517", "0.53356194", "0.5332557", "0.5316538", "0.53101254", "0.5309958", "0.5297681", "0.5292696", "0.5291052", "0.52708805", "0.5266438", "0.52641845", "0.5263294", "0.52626467", "0.52585566", "0.5251969", "0.5242423", "0.5240648", "0.5239713", "0.5237465", "0.52365184", "0.5236094", "0.5234752", "0.523056", "0.5218264", "0.52179736", "0.52177644", "0.5213502", "0.5211706", "0.5209398", "0.5209125", "0.52076596", "0.520705", "0.52041197", "0.5200524", "0.5194009", "0.51924425", "0.51919067", "0.51887023", "0.5187254", "0.5185873" ]
0.7280651
0
Test the popxl advanced getting / writing tensor data example that shows exactly when devicehost transfers occur
def test_documentation_popxl_adv_get_write(self): filename = "tensor_get_write_adv.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_documentation_popxl_get_set_tensors(self):\n filename = \"tensor_get_write.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_remote_buffer() -> None:\n # Prepare the input and output data\n shape_1 = (1, 3, 5)\n shape_2 = (7, 11)\n d_type_1 = np.dtype(\"float32\")\n d_type_2 = np.dtype(\"float16\")\n\n data: Dict[str, np.ndarray] = {}\n\n # Store and load data for the first tensor\n data[\"store_in_1\"] = np.random.rand(*shape_1).astype(d_type_1)\n data[\"load_in_1\"] = np.zeros(shape_1).astype(d_type_1)\n data[\"load_in_1_inplace\"] = np.zeros(shape_1).astype(d_type_1)\n # Store and load data for the second tensor\n data[\"store_in_2\"] = np.random.rand(*shape_2).astype(d_type_2)\n data[\"load_in_2\"] = np.zeros(shape_2).astype(d_type_2)\n # Store and load data for the third tensor\n data[\"store_in_3\"] = np.random.rand(*shape_2).astype(d_type_2)\n data[\"load_in_3_inplace\"] = np.zeros(shape_2).astype(d_type_2)\n\n ir, d2h_streams = build_model(data)\n\n # Get the tensor_ids\n labels = (\n \"load_in_1\",\n \"load_in_1_inplace\",\n \"load_out_1\",\n \"load_out_1_inplace\",\n \"load_in_2\",\n \"load_in_3_inplace\",\n \"load_out_2\",\n \"load_out_3_inplace\",\n )\n tensor_d2h = {label: d2h_streams[label] for label in labels}\n\n session = popxl.Session(ir, \"ipu_model\")\n with session:\n outputs = session.run()\n\n # Assert that the tensors are correct\n remote_load_scenarios = (\n \"1\",\n \"1_inplace\",\n \"2\",\n \"3_inplace\",\n )\n for scenario in remote_load_scenarios:\n print(f\"Now asserting remote load scenario {scenario}\")\n # Get data to assert\n store_in_data = data[f\"store_in_{scenario.replace('_inplace', '')}\"]\n load_in_data_before_op_call = data[f\"load_in_{scenario}\"]\n load_in_data_after_op_call = outputs[tensor_d2h[f\"load_in_{scenario}\"]]\n load_out_data = outputs[tensor_d2h[f\"load_out_{scenario}\"]]\n shape = shape_1 if \"1\" in scenario else shape_2\n d_type = d_type_1 if \"1\" in scenario else d_type_2\n inplace = True if \"inplace\" in scenario else False\n # Assert shape and type\n assert load_in_data_after_op_call.shape == shape\n assert load_in_data_after_op_call.dtype == d_type\n assert load_out_data.shape == shape\n assert load_out_data.dtype == d_type\n\n # Assert that the data has been loaded\n assert np.allclose(store_in_data, load_out_data)\n if inplace:\n # Assert that the load in data has been overwritten\n assert np.allclose(load_in_data_after_op_call, store_in_data)\n else:\n # Assert that the load in data has not been overwritten\n assert np.allclose(load_in_data_after_op_call, load_in_data_before_op_call)", "def build_model(\n data: Dict[str, np.array]\n) -> Tuple[popxl.Ir, Dict[str, DeviceToHostStream]]:\n ir = popxl.Ir()\n main = ir.main_graph\n\n with main:\n # Placeholder for tensor ids\n tensors = {}\n # Create variable tensors from the data\n for name in data.keys():\n tensors[name] = popxl.variable(data[name], name=name)\n\n # Placeholder for device to host streams\n d2h_streams = {}\n\n # Store and load the first tensor\n remote_buffer_1 = RemoteBuffer(\n tensor_shape=tensors[\"store_in_1\"]._pb_tensor.info.shape(),\n tensor_dtype=dtype.as_dtype(\n tensors[\"store_in_1\"]._pb_tensor.info.data_type_lcase()\n ),\n entries=1,\n )\n offset_tensor_1 = popxl.constant(0, name=\"offset_1\")\n # Ensure that the ops are in the order we define them in\n with popxl.in_sequence(True):\n ops.remote_store(\n remote_buffer=remote_buffer_1,\n offset=offset_tensor_1,\n t=tensors[\"store_in_1\"],\n )\n tensors[\"load_out_1\"] = ops.remote_load(\n remote_buffer=remote_buffer_1, offset=offset_tensor_1, name=\"load_out_1\"\n )\n tensors[\"load_out_1_inplace\"] = ops.remote_load_(\n remote_buffer=remote_buffer_1,\n offset=offset_tensor_1,\n t=tensors[\"load_in_1_inplace\"],\n )\n # Anchor the input tensors to the load operator\n d2h_streams = make_stream(d2h_streams, tensors, \"load_in_1\")\n d2h_streams = make_stream(d2h_streams, tensors, \"load_in_1_inplace\")\n # Anchor the output tensors of the load operator\n d2h_streams = make_stream(d2h_streams, tensors, \"load_out_1\")\n d2h_streams = make_stream(d2h_streams, tensors, \"load_out_1_inplace\")\n\n # Store and load the second and third tensor using a new buffer id\n remote_buffer_2 = RemoteBuffer(\n tensor_shape=tensors[\"store_in_2\"]._pb_tensor.info.shape(),\n tensor_dtype=dtype.as_dtype(\n tensors[\"store_in_2\"]._pb_tensor.info.data_type_lcase()\n ),\n entries=2,\n )\n # Index starts at 0\n offset_tensor_2 = popxl.constant(0, name=\"offset_2\")\n offset_tensor_3 = 1 # Test that the int version of offset works\n ops.remote_store(\n remote_buffer=remote_buffer_2,\n offset=offset_tensor_2,\n t=tensors[\"store_in_2\"],\n )\n ops.remote_store(\n remote_buffer=remote_buffer_2,\n offset=offset_tensor_3,\n t=tensors[\"store_in_3\"],\n )\n tensors[\"load_out_2\"] = ops.remote_load(\n remote_buffer=remote_buffer_2, offset=offset_tensor_2, name=\"load_out_2\"\n )\n tensors[\"load_out_3_inplace\"] = ops.remote_load_(\n remote_buffer=remote_buffer_2,\n offset=offset_tensor_3,\n t=tensors[\"load_in_3_inplace\"],\n )\n\n # Anchor the input tensors to the load operator\n d2h_streams = make_stream(d2h_streams, tensors, \"load_in_2\")\n d2h_streams = make_stream(d2h_streams, tensors, \"load_in_3_inplace\")\n # Anchor the output tensors of the load operator\n d2h_streams = make_stream(d2h_streams, tensors, \"load_out_2\")\n d2h_streams = make_stream(d2h_streams, tensors, \"load_out_3_inplace\")\n\n return ir, d2h_streams", "def test_np_memory_layout_add_input_tensor_pystepiocallback():\n\n def _test(transposedInput, transposedOutput):\n builder = popart.Builder()\n\n # Create a random constant and transpose it\n np.random.seed(1)\n input1 = builder.addInputTensor(\"INT32\", [2, 2])\n\n # Run a session to prove this\n output1 = builder.aiOnnx.identity([input1])\n builder.addOutputTensor(output1)\n anchorConfig = {output1: popart.AnchorReturnType(\"ALL\")}\n\n dataFlow = popart.DataFlow(1, anchorConfig)\n deviceConfig = {\"numIPUs\": 1}\n dm = popart.DeviceManager()\n device = dm.createIpuModelDevice(deviceConfig)\n session = popart.InferenceSession(\n fnModel=builder.getModelProto(), dataFlow=dataFlow, deviceInfo=device\n )\n\n # Compile graph and place weights onto it\n session.prepareDevice()\n session.weightsFromHost()\n\n # Feed the session with a transposed (non-contiguous) tensor.\n input1Value = np.random.randint(0, 100, size=(2, 2), dtype=\"int32\")\n if transposedInput:\n input1Value = np.transpose(input1Value, [1, 0])\n output1Value = np.random.randint(0, 100, size=(2, 2), dtype=\"int32\")\n if transposedOutput:\n output1Value = np.transpose(output1Value, [1, 0])\n\n with pytest.raises(\n (Exception, RuntimeError, popart.popart_exception)\n ) as e_info:\n\n # pylint: disable=unused-argument\n def input_callback(id, prefetch):\n return input1Value\n\n def input_complete_callback(_): # id is an unused parameter\n pass\n\n def output_callback(_): # id is an unused parameter\n return output1Value\n\n def output_complete_callback(_): # id is an unused parameter\n pass\n\n stepio = popart.PyStepIOCallback(\n input_callback,\n input_complete_callback,\n output_callback,\n output_complete_callback,\n )\n\n session.run(stepio)\n\n assert \"contiguous\" in e_info.value.args[0]\n\n _test(transposedInput=True, transposedOutput=False)\n _test(transposedInput=False, transposedOutput=True)", "def test_byteps_push_pull(self):\n dtypes = ['float16', 'float32', 'float64']\n dims = [1, 2, 3]\n count = 0\n ctx = self._current_context()\n shapes = [(), (17), (17, 17), (17, 17, 17)]\n for dtype, dim in itertools.product(dtypes, dims):\n # MXNet uses gpu_id as part of the seed, so to get identical seeds\n # we must set a context.\n mx.random.seed(10 + 10 * bps.rank(), ctx=ctx)\n tensor = mx.nd.random.uniform(-100, 100, shape=shapes[dim],\n ctx=ctx)\n tensor = tensor.astype(dtype)\n input = tensor.asnumpy()\n\n bps.byteps_declare_tensor(\"tensor_\" + str(count))\n bps.byteps_push_pull(tensor, name=\"tensor_\"+str(count))\n tensor.wait_to_read()\n output = tensor.asnumpy()\n assert np.allclose(input, output)\n count += 1\n\n print('test_byteps_push_pull passed')", "def test_co_transfer():\n test_path = tempfile.mkdtemp()\n x_train, metadata = co_transfer(test_path)\n try:\n assert x_train.shape == (7, 2)\n except:\n shutil.rmtree(test_path)\n raise()", "def test_create_device_data(self):\n pass", "def test_np_memory_layout_add_input_tensor_pystepio():\n\n builder = popart.Builder()\n\n # Create a random constant and transpose it\n np.random.seed(1)\n input1 = builder.addInputTensor(\"INT32\", [2, 2])\n\n # Run a session to prove this\n output1 = builder.aiOnnx.identity([input1])\n builder.addOutputTensor(output1)\n anchorConfig = {output1: popart.AnchorReturnType(\"ALL\")}\n\n dataFlow = popart.DataFlow(1, anchorConfig)\n deviceConfig = {\"numIPUs\": 1}\n dm = popart.DeviceManager()\n device = dm.createIpuModelDevice(deviceConfig)\n session = popart.InferenceSession(\n fnModel=builder.getModelProto(), dataFlow=dataFlow, deviceInfo=device\n )\n\n # Compile graph and place weights onto it\n session.prepareDevice()\n session.weightsFromHost()\n anchors = session.initAnchorArrays()\n\n # Feed the session with a transposed (non-contiguous) tensor.\n input1Value = np.random.randint(0, 100, size=(2, 2), dtype=\"int32\")\n input1Value = np.transpose(input1Value, [1, 0])\n\n with pytest.raises((RuntimeError, popart.popart_exception)) as e_info:\n stepio = popart.PyStepIO({input1: input1Value}, anchors)\n session.run(stepio)\n\n assert \"contiguous\" in e_info.value.args[0]", "def test():\n quant_handle = QuantAndDeQuantGPU()\n import torch\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.INFO)\n tensor = torch.Tensor(np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])).cuda()\n logging.info(\"Origin Data: \")\n logging.info(tensor)\n\n start_time = datetime.datetime.now()\n quant_tensor = quant_handle(tensor)\n end_time = datetime.datetime.now()\n\n logging.info(\"Quant Data: \")\n logging.info(quant_tensor)\n\n data_expected = np.array([\n 0.0000000000, 1.0000000000, 2.0000000000, 2.9536523819, 4.0000000000,\n 4.9674310684, 5.9073047638, 7.0250086784, 8.0000000000, 8.7240619659\n ])\n\n logging.info(\"Data expected: \")\n logging.info(\" \".join([str(v) for v in data_expected]))\n\n data_diff = quant_tensor.data.detach().cpu().numpy() - data_expected\n flag = \"success.\"\n for num in data_diff:\n if abs(num) > 0.000000001:\n flag = \"failed.\"\n\n run_time = end_time - start_time\n logging.info(\"QuantAndDeQuantGPU time: %s\", str(run_time))\n logging.info(\"QuantAndDeQuantGPU %s\", flag)", "def test_significance_individual_timepoints(tensor_directory, analysis_name):\n\n \"\"\"\n # Open Analysis Dataframe\n analysis_file = h5py.File(os.path.join(tensor_directory, analysis_name + \".hdf5\"), \"r\")\n activity_dataset = analysis_file[\"Data\"]\n metadata_dataset = analysis_file[\"metadata\"]\n number_of_timepoints, number_of_trials, number_of_pixels = np.shape(activity_dataset)\n print(\"metadata_dataset\", np.shape(metadata_dataset))\n \"\"\"\n # Open Analysis Dataframe\n analysis_file = tables.open_file(os.path.join(tensor_directory, analysis_name + \"_Trialwise_.h5\"), mode=\"r\")\n activity_dataset = analysis_file.root[\"Data\"]\n metadata_dataset = analysis_file.root[\"Trial_Details\"]\n\n # Create P and Slope Tensors\n p_value_tensor = np.ones((number_of_timepoints, number_of_pixels))\n slope_tensor = np.zeros((number_of_timepoints, number_of_pixels))\n\n for timepoint_index in tqdm(range(number_of_timepoints), position=0, desc=\"Timepoint\"):\n\n # Get Timepoint Data\n timepoint_activity = activity_dataset[timepoint_index]\n\n for pixel_index in tqdm(range(number_of_pixels), position=1, desc=\"Pixel\", leave=True):\n\n # Package Into Dataframe\n pixel_activity = timepoint_activity[:, pixel_index]\n pixel_dataframe = repackage_data_into_dataframe(pixel_activity, metadata_dataset)\n\n # Fit Mixed Effects Model\n p_value, slope = mixed_effects_random_slope_and_intercept(pixel_dataframe)\n p_value_tensor[timepoint_index, pixel_index] = p_value\n slope_tensor[timepoint_index, pixel_index] = slope\n\n\n # Save These Tensors\n np.save(os.path.join(tensor_directory, analysis_name + \"_p_value_tensor.npy\"), p_value_tensor)\n np.save(os.path.join(tensor_directory, analysis_name + \"_slope_tensor.npy\"), slope_tensor)", "def test_output_data():\n output_params = dict(\n type=\"geodetic\",\n format=\"GeoTIFF\",\n path=OUT_DIR,\n pixelbuffer=0,\n metatiling=1,\n bands=1,\n dtype=\"int16\"\n )\n output = gtiff.OutputData(output_params)\n assert output.path == OUT_DIR\n assert output.file_extension == \".tif\"\n tp = BufferedTilePyramid(\"geodetic\")\n tile = tp.tile(5, 5, 5)\n # get_path\n assert output.get_path(tile) == os.path.join(*[\n OUT_DIR, \"5\", \"5\", \"5\"+\".tif\"])\n # prepare_path\n try:\n temp_dir = os.path.join(*[OUT_DIR, \"5\", \"5\"])\n output.prepare_path(tile)\n assert os.path.isdir(temp_dir)\n finally:\n shutil.rmtree(temp_dir, ignore_errors=True)\n # profile\n assert isinstance(output.profile(tile), dict)\n # write\n try:\n tile.data = np.ones((1, ) + tile.shape)*128\n output.write(tile)\n # tiles_exist\n assert output.tiles_exist(tile)\n # read\n data = output.read(tile).data\n assert isinstance(data, np.ndarray)\n assert not data[0].mask.any()\n finally:\n shutil.rmtree(temp_dir, ignore_errors=True)\n # read empty\n data = output.read(tile).data\n assert isinstance(data, np.ndarray)\n assert data[0].mask.all()\n # empty\n empty = output.empty(tile)\n assert isinstance(empty, ma.MaskedArray)\n assert not empty.any()\n # deflate with predictor\n output_params.update(compression=\"deflate\", predictor=2)\n output = gtiff.OutputData(output_params)\n assert output.profile(tile)[\"compress\"] == \"deflate\"\n assert output.profile(tile)[\"predictor\"] == 2", "def test_add_get_tensor(mock_data):\n dataset = Dataset(\"test-dataset\")\n\n # 1D tensors of all data types\n data = mock_data.create_data(10)\n add_get_arrays(dataset, data)", "def test_documentation_popxl_addition_variable(self):\n filename = \"tensor_addition.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def sample_batch(pid, args, batch_queue, port_dict, device, actor_id_to_ip_dataport, local_size, cache_array):\n def recv_data(k, data_stream, actor_set, real_data_tasks_i):\n for real_data in data_stream:\n tmp = []\n tmp.append(real_data.state)\n tmp.append(real_data.action)\n tmp.append(real_data.reward)\n tmp.append(real_data.next_state)\n tmp.append(real_data.done)\n tmp.append(actor_set[k]['w'][real_data.idx])\n tmp.append(actor_set[k]['i'][real_data.idx])\n tmp.append(actor_set[k]['t'][real_data.idx])\n tmp.append(real_data.timestamp)\n local_dict[actor_set[k]['i'][real_data.idx]] = tmp\n cache_array[actor_set[k]['i'][real_data.idx]] |= 2**pid\n decom_state = torch.FloatTensor(np.frombuffer(zlib.decompress(real_data.state), dtype=np.uint8).reshape((1, 4, 84, 84)))\n real_data_tasks_i['states'].append(decom_state) #.to(device))\n real_data_tasks_i['actions'].append(torch.LongTensor([real_data.action])) #.to(device))\n real_data_tasks_i['rewards'].append(torch.FloatTensor([real_data.reward])) #.to(device))\n decom_next_state = torch.FloatTensor(np.frombuffer(zlib.decompress(real_data.next_state), dtype=np.uint8).reshape((1, 4, 84, 84)))\n real_data_tasks_i['next_states'].append(decom_next_state) #.to(device))\n real_data_tasks_i['dones'].append(torch.FloatTensor([real_data.done])) #.to(device))\n real_data_tasks_i['batch_weights'].append(torch.FloatTensor([actor_set[k]['w'][real_data.idx]])) #.to(device))\n real_data_tasks_i['batch_idxes'].append(actor_set[k]['i'][real_data.idx])\n # is the data overwrited?\n real_data_tasks_i['batch_timestamp_store'].append(actor_set[k]['t'][real_data.idx])\n real_data_tasks_i['batch_timestamp_real'].append(real_data.timestamp)\n conn = grpc.insecure_channel(port_dict['replay_ip'] + ':' + port_dict['sampleDataPort'])\n client = apex_data_pb2_grpc.SampleDataStub(channel=conn)\n local_dict = {}\n while True:\n batch_timestamp_real = []\n batch_timestamp_store = []\n batch_weights = []\n batch_idxes = []\n\n states, actions, rewards, next_states, dones = [], [], [], [], []\n\n res_batch = client.Send(apex_data_pb2.SampleDataRequest(batch_size=args.batch_size, beta = args.beta))\n actor_ids, data_ids, timestamps, weights, idxes = res_batch.actor_ids, res_batch.data_ids, res_batch.timestamp, res_batch.weights, res_batch.idxes\n actor_set = {}\n cached_value = {'states':{},'actions':{},'rewards':{},'next_states':{},'dones':{},'batch_weights':{},'batch_idxes':{},'batch_timestamp_store':{},'batch_timestamp_real':{}}\n for i in range(len(actor_ids)):\n set_a = actor_set.get(actor_ids[i], False)\n if set_a == False:\n actor_set[actor_ids[i]] = {}\n set_a = actor_set[actor_ids[i]]\n set_a['d'] = []\n set_a['w'] = []\n set_a['i'] = []\n set_a['t'] = []\n cached_value['states'][actor_ids[i]] = []\n cached_value['actions'][actor_ids[i]] = []\n cached_value['rewards'][actor_ids[i]] = []\n cached_value['next_states'][actor_ids[i]] = []\n cached_value['dones'][actor_ids[i]] = []\n cached_value['batch_weights'][actor_ids[i]] = []\n cached_value['batch_idxes'][actor_ids[i]] = []\n cached_value['batch_timestamp_store'][actor_ids[i]] = []\n cached_value['batch_timestamp_real'][actor_ids[i]] = []\n cache_id = actor_ids[i]*local_size+data_ids[i]\n cache_trans = cache_array[cache_id]\n if cache_trans & 2**pid == 0:\n set_a['d'].append(data_ids[i])\n set_a['w'].append(weights[i])\n set_a['i'].append(idxes[i])\n set_a['t'].append(timestamps[i])\n if cache_trans == 0 and local_dict.get(cache_id, False) != False:\n del local_dict[cache_id]\n else:\n try:\n state_tmp = local_dict[cache_id][0]\n action_tmp = local_dict[cache_id][1]\n reward_tmp = local_dict[cache_id][2] \n next_state_tmp = local_dict[cache_id][3] \n done_tmp = local_dict[cache_id][4] \n batch_weight_tmp = local_dict[cache_id][5] \n batch_idx_tmp = local_dict[cache_id][6] \n batch_store_tmp = local_dict[cache_id][7] \n batch_real_tmp = local_dict[cache_id][8] \n decom_state = torch.FloatTensor(np.frombuffer(zlib.decompress(state_tmp), dtype=np.uint8).reshape((1, 4, 84, 84)))\n cached_value['states'][actor_ids[i]].append(decom_state)\n cached_value['actions'][actor_ids[i]].append(torch.LongTensor([action_tmp]))\n cached_value['rewards'][actor_ids[i]].append(torch.FloatTensor([reward_tmp]))\n decom_next_state = torch.FloatTensor(np.frombuffer(zlib.decompress(next_state_tmp), dtype=np.uint8).reshape((1, 4, 84, 84)))\n cached_value['next_states'][actor_ids[i]].append(decom_next_state)\n cached_value['dones'][actor_ids[i]].append(torch.FloatTensor([done_tmp]))\n cached_value['batch_weights'][actor_ids[i]].append(torch.FloatTensor([batch_weight_tmp]))\n cached_value['batch_idxes'][actor_ids[i]].append(batch_idx_tmp)\n cached_value['batch_timestamp_store'][actor_ids[i]].append(batch_store_tmp)\n cached_value['batch_timestamp_real'][actor_ids[i]].append(batch_real_tmp)\n except:\n set_a['d'].append(data_ids[i])\n set_a['w'].append(weights[i])\n set_a['i'].append(idxes[i])\n set_a['t'].append(timestamps[i])\n real_data_links = {}\n real_data_tasks = {}\n for k, v in actor_set.items():\n actor_ip, data_port = actor_id_to_ip_dataport[k]\n conn_actor = grpc.insecure_channel(actor_ip + ':' + data_port)\n client_actor = apex_data_pb2_grpc.SendRealDataStub(channel=conn_actor)\n real_data_links[k] = client_actor.Send(apex_data_pb2.RealBatchRequest(idxes=v['d']))\n real_data_tasks[k] = {}\n real_data_tasks[k]['states'] = cached_value['states'][k]\n real_data_tasks[k]['actions'] = cached_value['actions'][k]\n real_data_tasks[k]['rewards'] = cached_value['rewards'][k]\n real_data_tasks[k]['next_states'] = cached_value['next_states'][k]\n real_data_tasks[k]['dones'] = cached_value['dones'][k]\n real_data_tasks[k]['batch_weights'] = cached_value['batch_weights'][k]\n real_data_tasks[k]['batch_idxes'] = cached_value['batch_idxes'][k]\n real_data_tasks[k]['batch_timestamp_store'] = cached_value['batch_timestamp_store'][k]\n real_data_tasks[k]['batch_timestamp_real'] = cached_value['batch_timestamp_real'][k]\n threads = []\n for k, v in real_data_links.items():\n t = threading.Thread(target=recv_data, args=(k, v, actor_set, real_data_tasks[k],))\n threads.append(t)\n t.start()\n\n for t in threads:\n t.join()\n\n for k, v in real_data_tasks.items():\n states += v['states']\n actions += v['actions']\n rewards += v['rewards']\n next_states += v['next_states']\n dones += v['dones']\n batch_weights += v['batch_weights']\n batch_idxes += v['batch_idxes']\n batch_timestamp_real += v['batch_timestamp_real']\n batch_timestamp_store += v['batch_timestamp_store']\n\n states = torch.cat(states,0).to(device)\n actions = torch.cat(actions,0).to(device)\n rewards = torch.cat(rewards,0).to(device)\n next_states = torch.cat(next_states,0).to(device)\n dones = torch.cat(dones,0).to(device)\n batch_weights = torch.cat(batch_weights,0).to(device)\n\n batch = [states, actions, rewards, next_states, dones, batch_weights, batch_idxes]\n batch_queue.put(batch)\n data, batch = None, None", "def test(model, dataloader, params, args, val):\n\n # evaluation mode\n model.eval()\n\n # initialise buffers\n dice_lv_buffer = []\n dice_myo_buffer = []\n dice_rv_buffer = []\n\n mcd_lv_buffer = []\n hd_lv_buffer = []\n mcd_myo_buffer = []\n hd_myo_buffer = []\n mcd_rv_buffer = []\n hd_rv_buffer = []\n\n mean_mag_grad_detJ_buffer = []\n negative_detJ_buffer = []\n\n\n with tqdm(total=len(dataloader)) as t:\n # iterate over validation subjects\n for idx, (image_ed_batch, image_es_batch, label_ed_batch, label_es_batch) in enumerate(dataloader):\n # (data all in shape of (c, N, H, W))\n\n # extend to (N, c, H, W)\n image_ed_batch = image_ed_batch.permute(1, 0, 2, 3).to(device=args.device)\n image_es_batch = image_es_batch.permute(1, 0, 2, 3).to(device=args.device)\n label_es_batch = label_es_batch.permute(1, 0, 2, 3).to(device=args.device)\n\n with torch.no_grad():\n # compute optical flow and warped ED images towards ES\n dvf = model(image_ed_batch, image_es_batch)\n\n # transform label mask of ES frame\n warped_label_es_batch = resample_transform(label_es_batch.float(), dvf, interp='nearest')\n\n\n \"\"\" Move data to device \"\"\"\n if args.cuda:\n # move data to cpu to calculate metrics\n # (the axis permutation is to comply with metric calculation code which takes input shape H, W, N)\n warped_label_es_batch = warped_label_es_batch.squeeze(1).cpu().numpy().transpose(1, 2, 0)\n label_ed_batch = label_ed_batch.squeeze(0).numpy().transpose(1, 2, 0)\n dvf = dvf.data.cpu().numpy().transpose(0, 2, 3, 1) # (N, H, W, 2)\n else:\n # CPU version of the code\n warped_label_es_batch = warped_label_es_batch.squeeze(1).numpy().transpose(1, 2, 0)\n label_ed_batch = label_ed_batch.squeeze(0).numpy().transpose(1, 2, 0)\n dvf = dvf.data.numpy().transpose(0, 2, 3, 1) # (N, H, W, 2)\n \"\"\"\"\"\"\n\n \"\"\" Calculate the metrics (only works with SAX images) \"\"\"\n # (optional) extract 3 slices (apical, mid-ventricle and basal)\n if not args.all_slices:\n num_slices = label_ed_batch.shape[-1]\n apical_idx = int(round((num_slices - 1) * 0.75)) # 75% from basal\n mid_ven_idx = int(round((num_slices - 1) * 0.5)) # 50% from basal\n basal_idx = int(round((num_slices - 1) * 0.25)) # 25% from basal\n slices_idx = [apical_idx, mid_ven_idx, basal_idx]\n\n warped_label_es_batch = warped_label_es_batch[:, :, slices_idx]\n label_ed_batch = label_ed_batch[:, :, slices_idx]\n dvf = dvf[slices_idx, :, :, :] # needed for detJac\n\n # dice\n dice_lv = categorical_dice_stack(warped_label_es_batch, label_ed_batch, label_class=1)\n dice_myo = categorical_dice_stack(warped_label_es_batch, label_ed_batch, label_class=2)\n dice_rv = categorical_dice_stack(warped_label_es_batch, label_ed_batch, label_class=3)\n\n dice_lv_buffer += [dice_lv]\n dice_myo_buffer += [dice_myo]\n dice_rv_buffer += [dice_rv]\n\n # contour distances\n mcd_lv, hd_lv = contour_distances_stack(warped_label_es_batch, label_ed_batch, label_class=1, dx=params.pixel_size)\n mcd_myo, hd_myo = contour_distances_stack(warped_label_es_batch, label_ed_batch, label_class=2, dx=params.pixel_size)\n mcd_rv, hd_rv = contour_distances_stack(warped_label_es_batch, label_ed_batch, label_class=3, dx=params.pixel_size)\n\n # determinant of Jacobian\n mean_grad_detJ, mean_negative_detJ = detJac_stack(dvf)\n\n\n # update buffers\n mcd_lv_buffer += [mcd_lv]\n hd_lv_buffer += [hd_lv]\n mcd_myo_buffer += [mcd_myo]\n hd_myo_buffer += [hd_myo]\n mcd_rv_buffer += [mcd_rv]\n hd_rv_buffer += [hd_rv]\n\n mean_mag_grad_detJ_buffer += [mean_grad_detJ]\n negative_detJ_buffer += [mean_negative_detJ]\n\n t.update()\n\n # construct metrics dict\n metrics = {'dice_lv_mean': np.mean(dice_lv_buffer), 'dice_lv_std': np.std(dice_lv_buffer),\n 'dice_myo_mean': np.mean(dice_myo_buffer), 'dice_myo_std': np.std(dice_myo_buffer),\n 'dice_rv_mean': np.mean(dice_rv_buffer), 'dice_rv_std': np.std(dice_rv_buffer),\n\n 'mcd_lv_mean': np.mean(mcd_lv_buffer), 'mcd_lv_std': np.std(mcd_lv_buffer),\n 'mcd_myo_mean': np.mean(mcd_myo_buffer), 'mcd_myo_std': np.std(mcd_myo_buffer),\n 'mcd_rv_mean': np.mean(mcd_rv_buffer), 'mcd_rv_std': np.std(mcd_rv_buffer),\n\n 'hd_lv_mean': np.mean(hd_lv_buffer), 'hd_lv_std': np.std(hd_lv_buffer),\n 'hd_myo_mean': np.mean(hd_myo_buffer), 'hd_myo_std': np.std(hd_myo_buffer),\n 'hd_rv_mean': np.mean(hd_rv_buffer), 'hd_rv_std': np.std(hd_rv_buffer),\n\n 'mean_mag_grad_detJ_mean': np.mean(mean_mag_grad_detJ_buffer),\n 'mean_mag_grad_detJ_std': np.std(mean_mag_grad_detJ_buffer),\n\n 'negative_detJ_mean': np.mean(negative_detJ_buffer),\n 'negative_detJ_std': np.std(negative_detJ_buffer)\n }\n\n\n if not val:\n # testing only: save all metrics evaluated for all test subjects in pandas dataframe\n test_result_dir = os.path.join(args.model_dir, \"test_results\")\n if not os.path.exists(test_result_dir):\n os.makedirs(test_result_dir)\n\n # save metrics results mean & std\n xutils.save_dict_to_json(metrics,\n f\"{test_result_dir}/test_results_3slices_{not args.all_slices}.json\")\n\n # save accuracy metrics of every subject\n subj_id_buffer = dataloader.dataset.dir_list\n df_buffer = []\n column_method = ['DL'] * len(subj_id_buffer)\n for struct in ['LV', 'MYO', 'RV']:\n if struct == 'LV':\n ls_dice = dice_lv_buffer\n ls_mcd = mcd_lv_buffer\n ls_hd = hd_lv_buffer\n elif struct == 'MYO':\n ls_dice = dice_myo_buffer\n ls_mcd = mcd_myo_buffer\n ls_hd = hd_myo_buffer\n elif struct == 'RV':\n ls_dice = dice_rv_buffer\n ls_mcd = mcd_rv_buffer\n ls_hd = hd_rv_buffer\n\n ls_struct = [struct] * len(subj_id_buffer)\n data = {'Method': column_method,\n 'ID': subj_id_buffer,\n 'Structure': ls_struct,\n 'Dice': ls_dice,\n 'MCD': ls_mcd,\n 'HD': ls_hd}\n df_buffer += [pd.DataFrame(data=data)]\n # concatenate df and save\n metrics_df = pd.concat(df_buffer, axis=0)\n metrics_df.to_pickle(f\"{test_result_dir}/test_accuracy_results_3slices_{not args.all_slices}.pkl\")\n\n # save detJac metrics for every subject\n jac_data = {'Method': column_method,\n 'ID': subj_id_buffer,\n 'GradDetJac': mean_mag_grad_detJ_buffer,\n 'NegDetJac': negative_detJ_buffer}\n jac_df = pd.DataFrame(data=jac_data)\n jac_df.to_pickle(f\"{test_result_dir}/test_Jacobian_results_3slices{not args.all_slices}.pkl\")\n\n return metrics", "def testAllWrite(self):\n import time,copy\n time.sleep(2)\n to_config = self.config['vdevs']['slave']['icsifaces'][0]\n from_config = self.config['vdevs']['master']['clientifaces'][0]\n points = self.config['vdevs']['slave']['points']\n client = ModbusRTU(to_config, points, from_config)\n\n pts = copy.deepcopy(self.config['vdevs']['slave']['points'])\n pts = [pt for pt in pts if pt['name']!='pressure'] #Can't write to pres\n for i in xrange(50):\n ptnames = [ pt['name'] for pt in pts ]\n pointsvalues = dict(zip(ptnames, [0]*len(ptnames)))\n reply = client.writePoints(pointsvalues)\n assert reply is None, \"Write returned value other than None: \" + str(reply)\n reply = client.readPoints(ptnames)\n #print \"Reply: \", reply\n for pt in ptnames:\n #assert value == reply[ptnames.index(pt)]\n if not 0 == reply[ptnames.index(pt)]: \n print pt, ' was not read properly.'", "def testAllWrite(self):\n import time,copy\n time.sleep(2)\n client = ModbusTCP(self.config['vdevs']['slave']['icsifaces'][0],\n self.config['vdevs']['slave']['points']) \n\n pts = copy.deepcopy(self.config['vdevs']['slave']['points'])\n pts = [pt for pt in pts if pt['name']!='pressure'] #Can't write to pres\n ptnames = [ pt['name'] for pt in pts ]\n pointsvalues = dict(zip(ptnames, [0]*len(ptnames)))\n reply = client.writePoints(pointsvalues)\n assert reply is None, \"Write returned value other than None: \" + str(reply)\n reply = client.readPoints(ptnames)\n #print \"Reply: \", reply\n for pt in ptnames:\n #assert value == reply[ptnames.index(pt)]\n if not 0 == reply[ptnames.index(pt)]: \n print pt, ' was not read properly.'", "def test_mstgcn():\n node_count = 307\n num_classes = 10\n edge_per_node = 15\n\n num_for_predict = 12\n len_input = 12\n nb_time_strides = 1\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n node_features = 2\n nb_block = 2\n K = 3\n nb_chev_filter = 64\n nb_time_filter = 64\n batch_size = 32\n\n model = MSTGCN(\n nb_block,\n node_features,\n K,\n nb_chev_filter,\n nb_time_filter,\n nb_time_strides,\n num_for_predict,\n len_input,\n ).to(device)\n T = len_input\n x_seq = torch.zeros([batch_size, node_count, node_features, T]).to(device)\n target_seq = torch.zeros([batch_size, node_count, T]).to(device)\n edge_index_seq = []\n\n for b in range(batch_size):\n for t in range(T):\n x, edge_index = create_mock_data(node_count, edge_per_node, node_features)\n x_seq[b, :, :, t] = x.to(device)\n if b == 0:\n edge_index_seq.append(edge_index.to(device))\n target = create_mock_target(node_count, num_classes).to(device)\n target_seq[b, :, t] = target\n\n shuffle = True\n train_dataset = torch.utils.data.TensorDataset(x_seq, target_seq)\n train_loader = torch.utils.data.DataLoader(\n train_dataset, batch_size=batch_size, shuffle=shuffle\n )\n\n for batch_data in train_loader:\n encoder_inputs, _ = batch_data\n outputs1 = model(encoder_inputs, edge_index_seq)\n outputs2 = model(encoder_inputs, edge_index_seq[0])\n\n assert outputs1.shape == (batch_size, node_count, num_for_predict)\n assert outputs2.shape == (batch_size, node_count, num_for_predict)", "def test_synth_tr():\n test_path = tempfile.mkdtemp()\n x_train, metadata = synth_tr(test_path)\n try:\n assert x_train.shape == (250, 3)\n except:\n shutil.rmtree(test_path)\n raise()", "def test_data_manipulation(self):\n target_name = self.project['target']['name']\n self.api_mock.return_value.get_metadata.return_value = [\n {'_id': '0',\n 'pid': '1',\n 'created': datetime.datetime.now(),\n 'name':'universe',\n 'originalName': 'credit-sample-200.csv',\n 'varTypeString': 'NN',\n 'shape': [2, 100],\n 'controls':{},\n 'columns': [[1,target_name,0],[3,\"age\",0]],\n 'files': ['projects/' + str(self.pid) + '/raw/' + self.testdatafile],\n 'typeConvert': {}},\n {'_id': '1',\n 'pid': '1',\n 'name':'test',\n 'originalName': 'credit-sample-200.csv',\n 'created': datetime.datetime.now(),\n 'varTypeString': 'NN',\n 'shape': [2, 100],\n 'controls':{},\n 'columns': [[1,target_name,0],[3,\"age\",0]],\n 'files': ['projects/' + str(self.pid) + '/raw/' + self.testdatafile],\n 'typeConvert': {}},\n {'_id': '2',\n 'pid': '1',\n 'name':'new',\n 'created': datetime.datetime.now(),\n 'originalName': 'credit-sample-200.csv',\n 'newdata':True,\n 'controls':{},\n 'shape': [2, 100],\n 'varTypeString': 'NN',\n 'columns': [[1,target_name,0],[3,\"age\",0]],\n 'files': ['projects/' + str(self.pid) + '/raw/' + self.testdatafile],\n 'typeConvert': {}}]\n request = WorkerRequest({'pid': '1', 'uid': '1', 'dataset_id': '1',\n 'command': 'fit', 'max_reps': 0,\n 'samplepct': 100})\n\n #target\n #this will map the target values to (0,1) because target type is Binary\n target_vector = self.dataprocessor.target_vector()\n target_series = target_vector['main']\n self.assertItemsEqual(np.unique(target_series), [0,1])\n\n #this will be none because 'holdout_pct' isn't set in the project data\n self.assertIsNone(target_vector['holdout'])\n\n #prediction dataset\n predictors = self.dataprocessor.predictors()\n pred_dataframe = predictors['1']['main']\n self.assertItemsEqual(list(pred_dataframe.columns), [\"age\"])\n self.assertEqual(self.dataprocessor.get_vartypestring_without_target('1'), \"N\")\n\n request = WorkerRequest({'pid': '1', 'uid': '1', 'dataset_id': '1', 'scoring_dataset_id': '2', 'command': 'predict', 'max_reps': 0, 'samplepct':100})\n dp2 = DataProcessor(request)\n data = dp2.request_datasets()\n self.assertEqual(data.keys(), ['1'])\n self.assertEqual(data['1'].keys(), ['scoring', 'vartypes'])\n scoring_data = data['1']['scoring']\n vartypes = data['1']['vartypes']\n self.assertEqual(list(scoring_data.columns), [\"age\"])\n self.assertEqual(vartypes, \"N\")", "def test_gen():\n tpot_obj = TPOTClassifier()\n\n pipeline = tpot_obj._gen_grow_safe(tpot_obj._pset, 1, 3)\n\n assert len(pipeline) > 1\n assert pipeline[0].ret == Output_DF", "def test_publish_path(self):\n log.debug(\"======================= creating telem data\")\n self.create_sample_data_set_dir(\n 'node59p1_step1.dat',\n TELEM_DIR,\n \"node59p1.dat\",\n copy_metadata=False\n )\n log.debug(\"======================= creating recov data\")\n self.create_sample_data_set_dir(\n \"DOS15908_1st7_step1.DAT\",\n RECOV_DIR,\n \"DOS15908.DAT\",\n copy_metadata=False\n )\n\n self.assert_initialize()\n\n # get the telemetered metadata particle\n result = self.data_subscribers.get_samples(DataParticleType.METADATA_TELEMETERED, 1)\n log.debug(\"***************************************************** metadata result %s\", result)\n #get the telemetered instrument particles\n result1 = self.data_subscribers.get_samples(DataParticleType.SAMPLE_TELEMETERED, 1)\n result.extend(result1)\n log.debug(\"RESULT: %s\", result)\n\n # Verify values\n self.assert_data_values(result, 'test_data_1.txt.result.yml')\n \n \n # get the telemetered metadata particle\n result = self.data_subscribers.get_samples(DataParticleType.METADATA_RECOVERED, 1)\n log.debug(\"***************************************************** metadata result %s\", result)\n #get the telemetered instrument particles\n result1 = self.data_subscribers.get_samples(DataParticleType.SAMPLE_RECOVERED, 1)\n result.extend(result1)\n log.debug(\"RESULT: %s\", result)\n\n # Verify values\n self.assert_data_values(result, 'test_data_1r.txt.result.yml')", "def test_significance_window(tensor_directory, analysis_name, window):\n\n \"\"\"\n # Open Analysis Dataframe\n analysis_file = h5py.File(os.path.join(tensor_directory, analysis_name + \".hdf5\"), \"r\")\n activity_dataset = analysis_file[\"Data\"]\n metadata_dataset = analysis_file[\"metadata\"]\n number_of_timepoints, number_of_trials, number_of_pixels = np.shape(activity_dataset)\n print(\"metadata_dataset\", np.shape(metadata_dataset))\n \"\"\"\n\n\n # Open Analysis Dataframe\n analysis_file = tables.open_file(os.path.join(tensor_directory, analysis_name + \"_Trialwise_.h5\"), mode=\"r\")\n activity_dataset = analysis_file.root[\"Data\"]\n metadata_dataset = analysis_file.root[\"Trial_Details\"]\n activity_dataset = np.array(activity_dataset)\n metadata_dataset = np.array(metadata_dataset)\n\n activity_dataset = np.nan_to_num(activity_dataset)\n\n number_of_trials, number_of_timepoints, number_of_pixels = np.shape(activity_dataset)\n\n\n print(\"Number of timepoints\", number_of_timepoints)\n print(\"number of pixels\", number_of_pixels)\n print(\"number of trials\", number_of_trials)\n\n # Create P and Slope Tensors\n p_value_tensor = np.ones(number_of_pixels)\n slope_tensor = np.zeros(number_of_pixels)\n\n # Get Timepoint Data\n timepoint_activity = activity_dataset[:, window]\n print(\"Timepoint activity shape\", np.shape(timepoint_activity))\n timepoint_activity = np.mean(timepoint_activity, axis=1)\n\n for pixel_index in tqdm(range(number_of_pixels), position=1, desc=\"Pixel\", leave=False):\n\n # Package Into Dataframe\n pixel_activity = timepoint_activity[:, pixel_index]\n pixel_dataframe = repackage_data_into_dataframe(pixel_activity, metadata_dataset)\n\n # Fit Mixed Effects Model\n p_value, slope = mixed_effects_random_slope_and_intercept(pixel_dataframe)\n p_value_tensor[pixel_index] = p_value\n slope_tensor[pixel_index] = slope\n\n return p_value_tensor, slope_tensor", "def test_active_inference_SPM_1b(self):", "def test_predictor():", "def testNanotecSharedMemory():\n\t\n\tmemoryClient = NanotecSharedMemoryClient()\n\t\n\t\n\t\n\tserialPort1 = memoryClient.sendInstruction(\"NanotecMotor,/dev/ttyACM0,17\")\n\t\n\tID1 = memoryClient.sendInstruction(\"getID,\" + serialPort1)\n\tprint(\"ID1 (actual): \" + ID1 + \", expected: 17\")\n\tprint(\"\")\n\t\n\tstartTime = time.time()\n\tnumCalls = 1000\n\tfor _ in range(numCalls):\n\t\tID1 = memoryClient.sendInstruction(\"getSerialPort,\" + serialPort1)\n\tendTime = time.time()\n\ttotalTime = endTime - startTime\n\tprint(\"\")\n\tprint(\"ID Call\")\n\tprint(\"numCalls: \" + str(numCalls))\n\tprint(\"total time: \" + str(totalTime))\n\tprint(\"time per call: \" + str(totalTime/numCalls))\n\t\n\t\n\tstartTime = time.time()\n\tnumCalls = 1000\n\tfor _ in range(numCalls):\n\t\tposition = memoryClient.sendInstruction(\"getAbsoluteAngularPosition,\" + serialPort1)\n\tendTime = time.time()\n\ttotalTime = endTime - startTime\n\tprint(\"\")\n\tprint(\"Position Call\")\n\tprint(\"numCalls: \" + str(numCalls))\n\tprint(\"total time: \" + str(totalTime))\n\tprint(\"time per call: \" + str(totalTime/numCalls))\n\t\n\treturn", "def setUp(self):\n\n serial_times = {295: '1971-07-31T01:24:11.754',\n 296: '1971-07-31T01:24:36.970',\n 297: '1971-07-31T01:25:02.243',\n 298: '1971-07-31T01:25:27.457',\n 299: '1971-07-31T01:25:52.669',\n 300: '1971-07-31T01:26:17.923'}\n self.serials = ['APOLLO15/METRIC/{}'.format(i) for i in serial_times.values()]\n\n\n x = list(range(5))\n y = list(range(5))\n pid = [0,0,1,1,1]\n idx = pid\n serials = [self.serials[0], self.serials[1], self.serials[2],\n self.serials[2], self.serials[3]]\n\n\n columns = ['x', 'y', 'idx', 'pid', 'nid']\n self.data_length = 5\n\n data = [x,y, idx, pid, serials]\n\n self.creation_time = strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())\n cnet = C(data, index=columns).T\n\n io_controlnetwork.to_isis('test.net', cnet, mode='wb', targetname='Moon')\n\n self.header_message_size = 85\n self.point_start_byte = 65621", "def test_anchor_output():\n anchorDict = {\n \"ReplicationFactor\": 2,\n # Accl factor must divide batch size\n \"AccumulationFactor\": 4,\n \"Pipelining\": True,\n \"ReturnType\": \"ALL\",\n }\n label_array = np.ones([BATCH_SIZE]).astype(np.int32)\n\n micro_batch_size = BATCH_SIZE // (\n anchorDict[\"AccumulationFactor\"] * anchorDict[\"ReplicationFactor\"]\n )\n\n builder = popart.Builder()\n input_shape = [micro_batch_size, CHANNELS, DATA_LEN, DATA_LEN]\n\n data_shape = popart.TensorInfo(\"FLOAT\", input_shape)\n lbl_shape = popart.TensorInfo(\"INT32\", [micro_batch_size])\n w = builder.addInitializedInputTensor(\n np.random.random_sample(input_shape).astype(np.float32)\n )\n\n ip = builder.addInputTensor(data_shape)\n lb = builder.addInputTensor(lbl_shape)\n\n a = builder.aiOnnx.matmul([ip, w])\n o = builder.reshape_const(\n builder.aiOnnx, [a], [micro_batch_size, CHANNELS * DATA_LEN * DATA_LEN]\n )\n o = builder.aiOnnx.relu([o])\n o = builder.aiOnnx.softmax([o])\n nll = builder.aiGraphcore.nllloss([o, lb])\n\n GRAD = popart.reservedGradientPrefix() + w\n ACCL = popart.reservedAccumPrefix() + w\n art = popart.AnchorReturnType(\"All\")\n data_flow = popart.DataFlow(\n BATCHES_PER_STEP, {o: art, a: art, ip: art, w: art, GRAD: art, ACCL: art}\n )\n\n opts, deviceContext = return_options(anchorDict)\n with deviceContext as device:\n if device is None:\n pytest.skip(\"Test needs to run on IPU, but none are available\")\n\n session = popart.TrainingSession(\n fnModel=builder.getModelProto(),\n dataFlow=data_flow,\n loss=nll,\n optimizer=popart.ConstSGD(LEARNING_RATE),\n userOptions=opts,\n deviceInfo=device,\n )\n\n session.prepareDevice()\n\n if anchorDict[\"ReplicationFactor\"] > 1:\n input_shape = [anchorDict[\"ReplicationFactor\"]] + input_shape\n label_array = label_array.reshape([anchorDict[\"ReplicationFactor\"], -1])\n if anchorDict[\"AccumulationFactor\"] > 1:\n input_shape = [anchorDict[\"AccumulationFactor\"]] + input_shape\n label_array = label_array.reshape([anchorDict[\"AccumulationFactor\"], -1])\n if BATCHES_PER_STEP > 1:\n input_shape = [BATCHES_PER_STEP] + input_shape\n label_array = np.repeat(label_array[np.newaxis], BATCHES_PER_STEP, 0)\n\n anchors = session.initAnchorArrays()\n in_array = np.random.random_sample(input_shape).astype(np.float32)\n\n stepio = popart.PyStepIO({ip: in_array, lb: label_array}, anchors)\n session.weightsFromHost()\n\n session.run(stepio)\n\n # Returned anchors will be of shape\n # [bps, grad_accl_factor, repl_factor, micro_batch_size, channels, data_len, data_len]\n for batch in range(anchors[w].shape[0]):\n for replica in range(anchors[w].shape[1]):\n # Weights should not change over the gradient accumulation\n # dimension - only after gradAccl steps.\n assert np.allclose(\n anchors[w][batch, 0, :, :, :, :, :],\n anchors[w][batch, replica, :, :, :, :, :],\n )\n\n # Check that the accumulated gradient plus the weights for the current batch\n # equals the weights for the next batch.\n # Batch loop\n for batch in range(anchors[w].shape[0] - 1):\n calc_weight = {}\n # Replica loop.\n for replica in range(anchors[w].shape[2]):\n # For each replica in each batch, take the relevant replica's\n # last weight tensor in the accumulation loop minus\n # the sum of the accumulated gradients across replicas\n calc_weight[replica] = anchors[w][\n batch, -1, replica, :, :, :, :\n ] - np.sum(anchors[ACCL][batch, -1, :, :, :, :, :], axis=0)\n # Then compare against the last weight tensor of the next batch,\n # for the relevant replica. These should match.\n assert np.allclose(\n calc_weight[replica], anchors[w][batch + 1, -1, replica, :, :, :, :]\n )", "def test_add_get_tensor_2D(mock_data):\n dataset = Dataset(\"test-dataset\")\n\n # 2D tensors of all data types\n data_2D = mock_data.create_data((10, 10))\n add_get_arrays(dataset, data_2D)", "def test_large_import(self):\n self.create_sample_data_set_dir(\"node59p1.dat\", TELEM_DIR)\n self.assert_initialize()\n result = self.data_subscribers.get_samples(DataParticleType.METADATA_TELEMETERED,1,60)\n result = self.data_subscribers.get_samples(DataParticleType.SAMPLE_TELEMETERED,750,400)", "def test_case_store_data_memory( self ):\n MEM_AUX = memory.fetch_system_memory()\n def test( clk, chip_select, address, load_store, data_in, data_out ):\n count = len( MEM_AUX )\n for i in range( count+10 , count+100 ):\n chip_select.next = 1\n load_store.next = 1\n address.next = i\n data_in.next = i*32\n MEM_AUX[i]=i*32\n yield clk.posedge\n yield clk.posedge\n #chip_select.next = 1\n load_store.next = 0\n #address.next = i\n data_in.next = 0\n yield clk.posedge\n yield clk.posedge\n chip_select.next = 0\n self.assertEqual( data_out, MEM_AUX[i] )\n\n raise StopSimulation\n\n clk_s = Signal( bool( 1 ) )\n clkgen = clk_gen( clk_s )\n address, data_in, data_out =[Signal( intbv( 0 )[32:] ) for i in range( 3 )]\n chip_select, load_store = [Signal( bool( 0 ) ) for i in range( 2 )]\n\n mem_test = memory.memory( clk_s, chip_select, address, load_store, data_in, data_out )\n #check = test( clk_s, chip_select, address, load_store, data_in, data_out )\n #sim = Simulation( mem_test, check, clkgen )\n te = traceSignals(test, clk_s, chip_select, address, load_store, data_in, data_out)\n sim = Simulation( mem_test, te, clkgen )\n sim.run( quiet=1 )", "def test(args, device, test_generator, model):\n model.eval()\n\n with torch.no_grad():\n # Get inputs and labels\n inputs, inputs_prev, labels, image, _, omit = test_generator.generate_batch()\n\n # Send to device\n inputs = torch.from_numpy(inputs).to(device)\n inputs_prev = torch.from_numpy(inputs_prev).to(device)\n labels = torch.from_numpy(labels).to(device)\n\n # Initialize syn_x or hidden state\n model.syn_x = model.init_syn_x(args.batch_size).to(device)\n model.hidden = model.init_hidden(args.batch_size).to(device)\n\n output, hidden, inputs = model(inputs, inputs_prev)\n # Convert to binary prediction\n output = torch.sigmoid(output)\n pred = torch.bernoulli(output).byte()\n\n # Compute hit rate and false alarm rate\n hit_rate = (pred * (labels == 1)).sum().float().item() / \\\n (labels == 1).sum().item()\n fa_rate = (pred * (labels == -1)).sum().float().item() / \\\n (labels == -1).sum().item()\n\n # Compute dprime\n # dprime_true = dprime(hit_rate, fa_rate)\n go = (labels == 1).sum().item()\n catch = (labels == -1).sum().item()\n num_trials = (labels != 0).sum().item()\n assert (go + catch) == num_trials\n\n # dprime_true = compute_dprime(hit_rate, fa_rate, go, catch, num_trials)\n # dprime_old = dprime(hit_rate, fa_rate)\n dprime_true = dprime(hit_rate, fa_rate)\n # try:\n # assert dprime_true == dprime_old\n # except:\n # print(hit_rate, fa_rate)\n # print(dprime_true, dprime_old)\n\n return dprime_true.item(), hit_rate, fa_rate, inputs, hidden, output, pred, image, labels, omit", "def make_request(dataset, path, out, session):\n reader = tf.python_io.tf_record_iterator(\n path, options=tf.python_io.TFRecordOptions(\n compression_type=TFRecordCompressionType.GZIP))\n print >> sys.stderr, path\n for buf in reader:\n examples_tensor = json.loads(\n session.graph.get_collection('inputs')[0])['examples']\n output_tensor = json.loads(\n session.graph.get_collection('outputs')[0])[FLAGS.fetch]\n outputs = session.run(fetches=[output_tensor],\n feed_dict={examples_tensor: [buf]})\n example = tf.train.Example.FromString(buf)\n sample_name = example.features.feature['sample_name'].bytes_list.value[0]\n super_population = example.features.feature[\n 'super_population_string'].bytes_list.value[0]\n population = example.features.feature[\n 'population_string'].bytes_list.value[0]\n print >> out, '%s\\t%s\\t%s\\t%s\\t%s' % (\n dataset, sample_name, super_population, population, '\\t'.join(\n str(value) for value in outputs[0][0]))", "def step(self, observation, last_state):\n # We are omitting the details of network inference here.\n # ...\n feature_screen = observation[3]['feature_screen']\n feature_minimap = observation[3]['feature_minimap']\n feature_units = observation[3]['feature_units']\n feature_player = observation[3]['player']\n available_actions = observation[3]['available_actions']\n score_by_category = observation[3]['score_by_category']\n game_loop = observation[3]['game_loop']\n\n unit_type = feature_screen.unit_type\n empty_space = np.where(unit_type == 0)\n empty_space = np.vstack((empty_space[0], empty_space[1])).T\n random_point = random.choice(empty_space)\n #target = [random_point[0], random_point[1]]\n #action = [actions.FunctionCall(_BUILD_SUPPLY_DEPOT, [_NOT_QUEUED, target])]\n policy_logits = None\n new_state = None\n\n spatial_encoder_output = self.spatial_encoder(np.reshape(feature_screen, [1,128,128,27]))\n\n agent_statistics = get_agent_statistics(score_by_category)\n\n home_race = 'Terran'\n away_race = 'Terran'\n race = get_race_onehot(home_race, away_race)\n\n time = get_gameloop_obs(game_loop)\n\n upgrade_value = get_upgrade_obs(feature_units)\n if upgrade_value != -1:\n self.home_upgrade_array[np.where(upgrade_value[0] == 1)] = 1\n self.away_upgrade_array[np.where(upgrade_value[1] == 1)] = 1\n\n embedded_scalar = np.concatenate((agent_statistics, race, time, self.home_upgrade_array, self.away_upgrade_array), axis=0)\n scalar_encoder_output = self.scalar_encoder(np.reshape(embedded_scalar, [1,307]))\n embedded_feature_units = get_entity_obs(feature_units)\n entity_encoder_output = self.entity_encoder(np.reshape(embedded_feature_units, [1,512,464]))\n encoder_input = np.concatenate((spatial_encoder_output, scalar_encoder_output, entity_encoder_output), axis=1)\n\n core_input = np.reshape(encoder_input, [16, 8, 131])\n whole_seq_output, final_memory_state, final_carry_state = self.core(core_input)\n print(whole_seq_output.shape)\n print(final_memory_state.shape)\n print(final_carry_state.shape)\n\n action = [actions.FUNCTIONS.no_op()]\n\n return action, policy_logits, new_state", "def test_create_device1(self):\n pass", "def test_add_get_tensor_3D(mock_data):\n dataset = Dataset(\"test-dataset\")\n\n # 3D tensors of all datatypes\n data_3D = mock_data.create_data((10, 10, 10))\n add_get_arrays(dataset, data_3D)", "def setUp(self):\n output = np.zeros((1, 5, 2))\n target = np.zeros((1, 5, 2))\n # first channel\n output[0, 0] = [10, 4]\n target[0, 0] = [10, 0]\n # second channel\n output[0, 1] = [10, 18]\n target[0, 1] = [10, 10]\n # third channel\n output[0, 2] = [0, 0]\n target[0, 2] = [0, -1]\n # fourth channel\n output[0, 3] = [40, 40]\n target[0, 3] = [30, 30]\n # fifth channel\n output[0, 4] = [20, 10]\n target[0, 4] = [0, 10]\n\n gt_instances = InstanceData()\n gt_instances.keypoints = target\n gt_instances.keypoints_visible = np.array(\n [[True, True, False, True, True]])\n\n pred_instances = InstanceData()\n pred_instances.keypoints = output\n\n data = {'inputs': None}\n data_sample = {\n 'gt_instances': gt_instances.to_dict(),\n 'pred_instances': pred_instances.to_dict()\n }\n\n self.data_batch = [data]\n self.data_samples = [data_sample]", "def test5():\n setLogLevel(\"info\")\n info(\"Configuracion Unidad experimental\")\n \"\"\" 1 -> Definicion de la topologia \"\"\"\n t1 = Topologia1()\n ue1 = UnidadExperimental()\n ue1.setTopo(t1)\n ue1.definirNodosClaves(A = 'h1', C='h2', V='h3') # Caso solo para trafico normal\n ue1.setController('ryu', 'simple_switch_13.py,ofctl_rest.py')\n info(\"Configuracion del experimento\")\n \"\"\" 3. Confiracion del experimento \"\"\"\n exp1 = Experimento()\n exp1.configureParams(ue1)\n exp1.configurarTrafico('ataque')\n \"\"\" 4. Inicio del experimento \"\"\"\n exp1.startTest()\n exp1.pingAllTest() # **************** Parece que es necesario que se de un arranque al controlador\n # **************** para que aprenda las reglas antes del ataque.\n\n \"\"\" 5. Aplicacion de pruebas \"\"\"\n exp1.trafico.pingMeasure()\n #exp1.trafico.pingMeasure(filename='ping_ataque_test.log')\n \"\"\" 6. Fin del experimento \"\"\"\n exp1.endTest()\n info(\"Removiendo la topologia\\n\")\n exp1.killTest()\n info(\"Removiendo el controlador\\n\")\n exp1.killController() # Si no se pone no se finaliza el controlador", "def test_IODimensions(self):\n tasks = [(1,1,100,True),(10,1,100,True),(1,10,100,True),(10,10,100,True),\n (1,1,100,False),(10,1,100,False),(1,10,100,False),(10,10,100,False)]\n for t in tasks:\n N_in ,N_out, N_samples, tf = t\n X = np.random.randn(N_samples,N_in) if N_in > 1 else np.random.randn(N_samples)\n y = np.random.randn(N_samples,N_out) if N_out > 1 else np.random.randn(N_samples)\n Xp = np.random.randn(N_samples,N_in) if N_in > 1 else np.random.randn(N_samples)\n esn = ESN(N_in,N_out,teacher_forcing=tf)\n prediction_tr = esn.fit(X,y)\n prediction_t = esn.predict(Xp)\n self.assertEqual(prediction_tr.shape,(N_samples,N_out))\n self.assertEqual(prediction_t.shape,(N_samples,N_out))", "def gen_ep_data(self,ntrials,trlen):\n self.resort_emat()\n ## instruction\n # for each trial, generate random instruction encoding sequence\n i_encoding_input = np.array([\n np.random.permutation(np.arange(1,self.nmaps+1)) \n for i in range(ntrials)\n ])\n i_test_input = np.zeros([ntrials,trlen])\n i_input = np.concatenate([\n i_encoding_input,i_test_input],\n 1).astype(int).reshape(-1) # (ntrials,trlen+)\n ## stimulus\n x_encoding_input = i_encoding_input\n x_test_input = np.random.randint(1,self.nmaps+1,[ntrials,trlen])\n x_input = np.concatenate([x_encoding_input,x_test_input],1)\n # print('X',x_input)\n ''' \n embed x_input: \n [ntrials,nmaps+trlen] -> s_input [ntrials*(nmaps+trlen),edim]\n explicit loop required for flatten and embedd x_input\n because if switchmaps=1, matrix is resorted between trials\n and therefore same stimulus token integers correspond to\n different stimulus embeddings on different trials\n '''\n s_input = -np.ones([ntrials,(self.nmaps+trlen),self.stimdim])\n for trialn,x_input_trial in enumerate(x_input): \n if self.switchmaps: self.resort_emat()\n s_input[trialn] = self.emat[x_input_trial]\n # format output\n i_input = tr.unsqueeze(tr.LongTensor(i_input),1)\n s_input = tr.unsqueeze(tr.Tensor(np.concatenate(s_input)),1)\n yseq = tr.unsqueeze(tr.LongTensor(x_input.reshape(-1)),1)\n return i_input,s_input,yseq", "def test_qpu_0_shots():\n _aws_device(wires=2, shots=0)", "def test_data():\n batch_size = 10\n input_dim = 28\n test_data = np.random.rand(batch_size, input_dim)\n\n return test_data", "def test_get_node_sensors(self):\n pass", "def Test(self):\n print('Testing:')\n # set mode eval\n torch.cuda.empty_cache()\n self.network.eval()\n transform = transforms.Compose([Rescale(params.rescale_size),\n RandomCrop(params.image_size),\n \n ToTensor()\n ])\n dataset = Cityscapes(params.dataset_root, mode='test', transforms = transform)\n test_loader = DataLoader(dataset,\n batch_size=params.test_batch,\n shuffle=params.shuffle,\n num_workers=params.dataloader_workers)\n # prepare test data\n recal = 0\n precision = 0\n F_one = 0\n IOU = 0\n accuracy_new = 0\n test_size = 1124\n if test_size % self.params.test_batch != 0:\n total_batch = test_size // self.params.test_batch + 1\n else:\n total_batch = test_size // self.params.test_batch\n\n # test for one epoch\n for batch_idx, batch in enumerate(test_loader):\n self.pb.click(batch_idx, total_batch)\n image, label, name = batch['image'], batch['label'], batch['label_name']\n image_cuda, label_cuda = image.cuda(), label.cuda()\n pred = image_cuda\n pred = pred.to(torch.device(\"cpu\"))\n pred = pred.detach()\n img_grid = pred[0]\n #img_grid = torchvision.utils.make_grid(out) \n img_grid = img_grid.numpy().transpose(1, 2, 0)*255\n cv2.imwrite(\"/content/drive/My Drive/Test_images/original%d.jpg\" % batch_idx, img_grid)\n if self.params.should_split:\n image_cuda.requires_grad_()\n out = checkpoint_sequential(self.network, self.params.split, image_cuda)\n else:\n out = self.network(image_cuda)\n TP, FP, TN, FN = confusion(out, label_cuda)\n recal = recal+TP\n precision = precision+FP\n F_one = F_one +TN\n IOU = IOU+ FN \n _,predict = torch.max(out.data,1)\n predict = predict.to(torch.device(\"cpu\"))\n predict = predict.detach()\n img = predict[0]\n img = img.numpy()*255\n #img_grid = torchvision.utils.make_grid(out) \n cv2.imwrite(\"/content/drive/My Drive/Test_images/predict_label%d.png\" % batch_idx, img)\n label = label_cuda.to(torch.device(\"cpu\"))\n label = label.detach()\n label = label[0].numpy()*255\n cv2.imwrite(\"/content/drive/My Drive/Test_images/original_label%d.png\" % batch_idx, label)\n\n accuracy_final = accuracy(out, label_cuda)\n accuracy_new = accuracy_new + accuracy_final\n print(\"\\t\")\n print(recal/total_batch, precision/ total_batch, F_one/ total_batch, IOU/ total_batch)\n print(\"\\t\")\n print(accuracy_new/total_batch)", "def _sample(self, rnn_output, temperature):\n pass", "def test_teleport(self):\n self.log.info('test_teleport')\n pi = np.pi\n shots = 2000\n qr = QuantumRegister(3, 'qr')\n cr0 = ClassicalRegister(1, 'cr0')\n cr1 = ClassicalRegister(1, 'cr1')\n cr2 = ClassicalRegister(1, 'cr2')\n circuit = QuantumCircuit(qr, cr0, cr1, cr2, name='teleport')\n circuit.h(qr[1])\n circuit.cx(qr[1], qr[2])\n circuit.ry(pi/4, qr[0])\n circuit.cx(qr[0], qr[1])\n circuit.h(qr[0])\n circuit.barrier(qr)\n circuit.measure(qr[0], cr0[0])\n circuit.measure(qr[1], cr1[0])\n circuit.z(qr[2]).c_if(cr0, 1)\n circuit.x(qr[2]).c_if(cr1, 1)\n circuit.measure(qr[2], cr2[0])\n job = execute(circuit, backend=self.backend, shots=shots, seed_simulator=self.seed)\n results = job.result()\n data = results.get_counts('teleport')\n alice = {\n '00': data['0 0 0'] + data['1 0 0'],\n '01': data['0 1 0'] + data['1 1 0'],\n '10': data['0 0 1'] + data['1 0 1'],\n '11': data['0 1 1'] + data['1 1 1']\n }\n bob = {\n '0': data['0 0 0'] + data['0 1 0'] + data['0 0 1'] + data['0 1 1'],\n '1': data['1 0 0'] + data['1 1 0'] + data['1 0 1'] + data['1 1 1']\n }\n self.log.info('test_teleport: circuit:')\n self.log.info(circuit.qasm())\n self.log.info('test_teleport: data %s', data)\n self.log.info('test_teleport: alice %s', alice)\n self.log.info('test_teleport: bob %s', bob)\n alice_ratio = 1/np.tan(pi/8)**2\n bob_ratio = bob['0']/float(bob['1'])\n error = abs(alice_ratio - bob_ratio) / alice_ratio\n self.log.info('test_teleport: relative error = %s', error)\n self.assertLess(error, 0.05)", "def testOutputs(self):\n # Remember original (correct) example outputs\n old_files = self.read_outputs()\n\n # Set up and run Xanthos\n ini = 'example/pm_abcd_mrtm.ini'\n xth = Xanthos(ini)\n res = xth.execute()\n\n # Check result dimensions\n self.assertEqual(res.Q.shape, (67420, 372))\n\n # Test that new outputs equal old outputs.\n new_files = self.read_outputs()\n for k in new_files.keys():\n pd.testing.assert_frame_equal(new_files[k], old_files[k])", "def simulatesensor_mqtt_device_demo(args):\n # [START iot_mqtt_run]\n global minimum_backoff_time\n global MAXIMUM_BACKOFF_TIME\n\n # Publish to the events or state topic based on the flag.\n sub_topic = 'events' if args.message_type == 'event' else 'state'\n\n mqtt_topic = '/devices/{}/{}'.format(args.device_id, sub_topic)\n\n jwt_iat = datetime.datetime.utcnow()\n jwt_exp_mins = args.jwt_expires_minutes\n client = get_client(\n args.project_id, args.cloud_region, args.registry_id,\n args.device_id, args.private_key_file, args.algorithm,\n args.ca_certs, args.mqtt_bridge_hostname, args.mqtt_bridge_port)\n\n # Publish num_messages messages to the MQTT bridge once per second.\n for i in range(1, args.num_messages + 1):\n client.loop()\n\n currentTime = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')\n (ram_usage, cpu_usage, number_of_cores, number_of_processes, battery_percentage) = read_sensor(i)\n\n payloadJSON = createJSON(args.registry_id, args.device_id, currentTime, ram_usage, cpu_usage, number_of_cores, number_of_processes, battery_percentage)\n\n #payload = '{}/{}-image-{}'.format(args.registry_id, args.device_id, i)\n print('Publishing message {}/: \\'{}\\''.format(\n i, payloadJSON))\n\n # Publish \"payload\" to the MQTT topic. qos=1 means at least once\n # delivery. Cloud IoT Core also supports qos=0 for at most once\n # delivery.\n client.publish(mqtt_topic, payloadJSON, qos=1)\n\n \n # Send events every second. State should not be updated as often\n time.sleep(1)", "def test_create_device(self):\n pass", "def test_create_device(self):\n pass", "def get_data(datapath,n=10):\n device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n input_path = datapath + \"/maze_data_test_59/inputs.npy\"\n target_path = datapath + \"/maze_data_test_59/solutions.npy\"\n data = np.load(input_path)\n target = np.load(target_path)\n a = data[:n]\n a = torch.from_numpy(a)\n input = a.to(device, dtype=torch.float)\n b = target[:n]\n t = torch.from_numpy(b)\n t = t.to(device, dtype=torch.float)\n target = t\n return input, target", "def test_mlp():\r\n datasets = gen_data()\r\n\r\n train_set_x, train_set_y = datasets[0]\r\n valid_set_x, valid_set_y = datasets[1]\r\n test_set_x , test_set_y = datasets[2]\r\n\r\n\r\n\r\n batch_size = 100 # size of the minibatch\r\n\r\n # compute number of minibatches for training, validation and testing\r\n n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size\r\n n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size\r\n n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size\r\n\r\n ######################\r\n # BUILD ACTUAL MODEL #\r\n ######################\r\n #print '... building the model'\r\n\r\n # allocate symbolic variables for the data\r\n index = T.lscalar() # index to a [mini]batch\r\n x = T.matrix('x') # the data is presented as rasterized images\r\n y = T.ivector('y') # the labels are presented as 1D vector of\r\n # [int] labels\r\n\r\n rng = numpy.random.RandomState(1234)\r\n\r\n # construct the MLP class\r\n classifier = MLP( rng = rng, input=x, n_in=28*28, n_hidden = 500, n_out=10)\r\n\r\n # the cost we minimize during training is the negative log likelihood of\r\n # the model.\r\n # We take the mean of the cost over each minibatch.\r\n cost = classifier.negative_log_likelihood(y).mean()\r\n\r\n # compute the gradient of cost with respect to theta (stored in params)\r\n # the resulting gradients will be stored in a list gparams\r\n gparams = []\r\n for param in classifier.params:\r\n gparam = T.grad(cost, param)\r\n gparams.append(gparam)\r\n\r\n # Some optimizations needed are tagged with 'fast_run'\r\n # TODO: refine that and include only those\r\n mode = theano.compile.get_default_mode().including('fast_run')\r\n\r\n updates2 = OrderedDict()\r\n\r\n updates2[classifier.hiddenLayer.params[0]]=T.grad(cost,classifier.hiddenLayer.params[0])\r\n train_model =theano.function( inputs = [index],\r\n updates = updates2,\r\n givens={\r\n x:train_set_x[index*batch_size:(index+1)*batch_size],\r\n y:train_set_y[index*batch_size:(index+1)*batch_size]},\r\n mode=mode)\r\n #print 'MODEL 1'\r\n #theano.printing.debugprint(train_model, print_type=True)\r\n assert any([isinstance(i.op,T.nnet.CrossentropySoftmax1HotWithBiasDx) for i in train_model.maker.fgraph.toposort()])\r\n\r\n # Even without FeatureShape\r\n train_model =theano.function( inputs = [index],\r\n updates = updates2,\r\n mode=mode.excluding('ShapeOpt'),\r\n givens={\r\n x:train_set_x[index*batch_size:(index+1)*batch_size],\r\n y:train_set_y[index*batch_size:(index+1)*batch_size]})\r\n #print\r\n #print 'MODEL 2'\r\n #theano.printing.debugprint(train_model, print_type=True)\r\n assert any([isinstance(i.op,T.nnet.CrossentropySoftmax1HotWithBiasDx) for i in train_model.maker.fgraph.toposort()])", "def test():\n parser = argparse.ArgumentParser()\n parser.add_argument('-n', '--name', type=str, help='name of the model',\n default='model_new_o')\n parser.add_argument('-f', '--filename', type=str,\n help='name of the dataset (.h5 file)', default='./dataset.h5')\n parser.add_argument('-bs', '--batch-size', type=int,\n help='size of the batches of the training data', default=256)\n args = parser.parse_args()\n\n name = args.name\n filename = args.filename\n batch_size = args.batch_size\n\n out_channels = 400\n model_path = './model/' + name\n checkpoint_path = model_path + '/checkpoints'\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n\n for k, v in vars(args).items():\n print('{0} = \"{1}\"'.format(k, v))\n print('device = \"' + device + '\"')\n\n if not os.path.exists(checkpoint_path):\n print('Model parameters not found: ' + checkpoint_path)\n exit()\n\n # Dataset\n\n input_cols = ['camera', 'pos_x', 'pos_y', 'theta']\n target_cols = ['target_map']\n train_test_split = 11\n\n dataset = get_dataset(filename, device=device, augment=False,\n input_cols=input_cols, target_cols=target_cols)\n split_index = dataset.cumulative_sizes[train_test_split]\n\n # Model\n\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n model = NN(in_channels=3, out_channels=out_channels).to(device)\n model.load_state_dict(torch.load(checkpoint_path + '/best.pth'))\n summary(model, (3, 64, 80), device=device)\n\n auc_function = MaskedAUROC()\n\n # Testing\n\n aucs = []\n for x, px, py, pt, y in dataset.batches(batch_size, start=split_index, shuffle=False):\n pose = torch.stack([px, py, pt], dim=-1).to(device)\n mask = y > -1\n\n preds = model(x)\n\n aucs.append(auc_function(preds, y, mask).cpu().numpy())\n\n auc = np.nanmean(aucs, axis=0).reshape(20, 20)\n auc = np.rot90(auc, 1)\n auc = np.fliplr(auc) * 100\n\n print('AUC: ' + str(auc.mean().item()))\n\n print(auc)\n\n rounded = (100 * coords).round(2).astype(int)\n fig, ax = plt.subplots(nrows=2, ncols=1, figsize=(7, 5.8))\n sns.distplot(auc, bins=int(np.ceil(auc.max() - auc.min())),\n ax=ax[0], kde=False, rug=False, color='red', hist_kws={'rwidth': 0.75})\n sns.heatmap(auc, cmap='gray', annot=True, cbar_kws={'shrink': .8},\n vmin=50, vmax=100, linewidths=0, ax=ax[1])\n plt.yticks(.5 + np.arange(20), np.unique(rounded[:, 0])[::-1])\n plt.xticks(.5 + np.arange(20), np.unique(rounded[:, 1]))\n plt.xlabel('Y [cm]')\n plt.ylabel('X [cm]')\n plt.setp(ax[1].xaxis.get_majorticklabels(), rotation=0)\n plt.setp(ax[1].yaxis.get_majorticklabels(), rotation=0)\n plt.axis('equal')\n plt.tight_layout()\n plt.show()", "def test_full_house_flush_ind(self):", "def test_batch(self):\n pass", "def test_op(self):\n name = \"my_mesh\"\n tensor_data = test_utils.get_random_mesh(\n 100, add_faces=True, add_colors=True\n )\n config_dict = {\"foo\": 1}\n with tf.compat.v1.Graph().as_default():\n tensor_summary = summary.op(\n name,\n tensor_data.vertices,\n faces=tensor_data.faces,\n colors=tensor_data.colors,\n config_dict=config_dict,\n )\n with self.test_session() as sess:\n proto = self.pb_via_op(tensor_summary)\n self.verify_proto(proto, name)\n plugin_metadata = metadata.parse_plugin_metadata(\n proto.value[0].metadata.plugin_data.content\n )\n self.assertEqual(\n json.dumps(config_dict, sort_keys=True),\n plugin_metadata.json_config,\n )", "def test_auto_transfer_correct_device(ray_start_4_cpus_2_gpus):\n import nvidia_smi\n\n nvidia_smi.nvmlInit()\n\n def get_gpu_used_mem(i):\n handle = nvidia_smi.nvmlDeviceGetHandleByIndex(i)\n info = nvidia_smi.nvmlDeviceGetMemoryInfo(handle)\n return info.used\n\n start_gpu_memory = get_gpu_used_mem(1)\n\n device = torch.device(\"cuda:1\")\n small_dataloader = [(torch.randn((1024 * 4, 1024 * 4)),) for _ in range(10)]\n wrapped_dataloader = ( # noqa: F841\n ray.train.torch.train_loop_utils._WrappedDataLoader(\n small_dataloader, device, True\n )\n )\n\n end_gpu_memory = get_gpu_used_mem(1)\n\n # Verify GPU memory usage increases on the right cuda device\n assert end_gpu_memory > start_gpu_memory", "def test_output(data,idx,model):\n x,y = data[idx]\n out = model(x)\n return y.data.cpu().numpy(), out.data.cpu().numpy()", "def gen_ep_data(self,ntrials,trlen):\n ## instruction\n # for each trial, generate random instruction encoding sequence\n i_encoding_input = np.array([\n np.random.permutation(np.arange(1,self.nmaps+1)) \n for i in range(ntrials)\n ])\n i_test_input = np.zeros([ntrials,trlen])\n i_input = np.concatenate([\n i_encoding_input,i_test_input],\n 1).astype(int).reshape(-1) # (ntrials,trlen+)\n ## stimulus\n x_encoding_input = i_encoding_input\n x_test_input = np.random.randint(1,self.nmaps+1,[ntrials,trlen])\n x_input = np.concatenate([i_encoding_input,x_test_input],1)\n ''' \n embed x_input: \n [ntrials,nmaps+trlen] -> s_input [ntrials*(nmaps+trlen),edim]\n explicit loop required for flatten and embedd x_input\n because if switchmaps=1, matrix is resorted between trials\n and therefore same stimulus token integers correspond to\n different stimulus embeddings on different trials\n '''\n s_input = -np.ones([ntrials,(self.nmaps+trlen),self.stimdim])\n for trialn,x_input_trial in enumerate(x_input): \n if self.switchmaps: self.resort_emat()\n s_input[trialn] = self.emat[x_input_trial]\n \n # format output\n i_input = tr.unsqueeze(tr.LongTensor(i_input),1)\n s_input = tr.unsqueeze(tr.Tensor(np.concatenate(s_input)),1)\n yseq = tr.unsqueeze(tr.LongTensor(x_input.reshape(-1)),1)\n if return_trial_flag:\n tr_flag = np.concatenate([i*np.ones(self.nmaps+trlen) for i in range(ntrials)])\n tr_flag = tr.unsqueeze(tr.LongTensor(tr_flag),1)\n return tr_flag,i_input,s_input,yseq,\n else:\n return i_input,s_input,yseq", "def test_get_device_template(self):\n pass", "def test_tiff_io_tensor():\n width = 560\n height = 320\n channels = 4\n\n images = []\n for filename in [\n \"small-00.png\",\n \"small-01.png\",\n \"small-02.png\",\n \"small-03.png\",\n \"small-04.png\"]:\n with open(\n os.path.join(os.path.dirname(os.path.abspath(__file__)),\n \"test_image\",\n filename), 'rb') as f:\n png_contents = f.read()\n image_v = tf.image.decode_png(png_contents, channels=channels)\n assert image_v.shape == [height, width, channels]\n images.append(image_v)\n\n filename = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"test_image\", \"small.tiff\")\n filename = \"file://\" + filename\n\n tiff = tfio.IOTensor.from_tiff(filename)\n assert tiff.keys == list(range(5))\n for i in tiff.keys:\n assert np.all(images[i].numpy() == tiff(i).to_tensor().numpy())", "def test_get_device(self):\n pass", "def test_get_device(self):\n pass", "def test(self):\n # Load the trained models.\n self.train_ev_ea()\n self.restore_model(self.test_iters)\n self.encoder_v.eval()\n # Set data loader.\n data_loader = self.data_loader\n empty = torch.FloatTensor(1, 3,self.image_size,self.image_size).to(self.device) \n empty.fill_(1)\n noise = torch.FloatTensor(self.batch_size, self.nz_num)\n noise = noise.to(self.device)\n step = 0\n data_loader.test.reinitialize_index()\n with torch.no_grad():\n while True:\n try:\n x_real, wrong_images, attributes, _, label_org = data_loader.test.next_batch_test(self.batch_size,10)\n except:\n break\n x_real = x_real.to(self.device) \n label_org = label_org.to(self.device)\n attributes = attributes.to(self.device)\n \n \n ev_x = self.encoder_v(x_real)\n noise.normal_(0, 1)\n ea_a = self.encoder_a(attributes, noise)\n \n out_A2B_results = [empty]\n out_A2B_results_a = [empty]\n\n for idx1 in range(label_org.size(0)):\n out_A2B_results.append(x_real[idx1:idx1+1])\n out_A2B_results_a.append(x_real[idx1:idx1+1])\n\n for idx2 in range(label_org.size(0)):\n out_A2B_results.append(x_real[idx2:idx2+1])\n out_A2B_results_a.append(x_real[idx2:idx2+1])\n \n for idx1 in range(label_org.size(0)):\n x_fake = self.decoder(self.encoder(x_real[idx2:idx2+1]), ev_x[idx1:idx1+1])\n out_A2B_results.append(x_fake)\n \n x_fake_a = self.decoder(self.encoder(x_real[idx2:idx2+1]), ea_a[idx1:idx1+1])\n out_A2B_results_a.append(x_fake_a)\n results_concat = torch.cat(out_A2B_results)\n x_AB_results_path = os.path.join(self.result_dir, '{}_x_AB_results_test_v.jpg'.format(step+1)) \n save_image(self.denorm(results_concat.data.cpu()), x_AB_results_path, nrow=label_org.size(0)+1,padding=0)\n print('Saved real and fake images into {}...'.format(x_AB_results_path))\n \n results_concat = torch.cat(out_A2B_results_a)\n x_AB_results_path = os.path.join(self.result_dir, '{}_x_AB_results_test_a.jpg'.format(step+1)) \n save_image(self.denorm(results_concat.data.cpu()), x_AB_results_path, nrow=label_org.size(0)+1,padding=0)\n print('Saved real and fake images into {}...'.format(x_AB_results_path))\n \n step += 1", "def import_measurements():\n\n print('Receive a transfer...')", "def main():\n \"\"\"\n This is just for testing the functions\n \"\"\"\n\n x1 = np.array([1, 1, 1, 1, -1, -1, 1, 1, 1])\n x2 = np.array([1, -1, 1, 1, 1, 1, 1, -1, 1])\n x3 = np.array([-1, 1, -1, -1, 1, -1, -1, 1, -1])\n train_set = np.vstack((x1, x2))\n train_set = np.vstack((train_set, x3))\n\n\n params = {\n \"epochs\": 100,\n \"neurons\": len(x1),\n \"learn_method\": 'classic'\n }\n\n hop = hop_net.HopfieldNet(train_set, **params)\n hop.batch_train()\n show_trained(train_set)\n\n x4d = [1,1,1,1,1,1,1,1,1]\n x5d = [1,1,1,1,-1,-1,1,-1,-1]\n x45d = np.vstack((x4d, x5d))\n test_set = np.vstack((x45d, train_set))\n recalled_set = hop.recall(test_set)\n for i in range(test_set.shape[0]):\n show_tested(test_set[i], recalled_set[i])", "def test_write_broadcast(setup_teardown_file):\n f = setup_teardown_file[3]\n\n dt = np.dtype('(3,)i')\n\n dset = f.create_dataset('x', (10,), dtype=dt)\n dset[...] = 42", "def simple_unet_data():\n return tf.constant(value=1.0, shape=(1, 256, 256, 1))", "def test_gpu_memory(self):\n m = pyflamegpu.ModelDescription(\"test_gpu_memory_test\")\n a = m.newAgent(\"agent\")\n a.newVariableInt(\"id\")\n p = pyflamegpu.AgentVector(a, AGENT_COUNT)\n for i in range(AGENT_COUNT):\n instance = p[i]\n instance.setVariableInt(\"id\", i)\n cm = pyflamegpu.CUDASimulation(m)\n # copy to device then back by setting and getting population data\n cm.setPopulationData(p)\n cm.getPopulationData(p)\n # check values are the same\n for i in range(AGENT_COUNT):\n instance = p[i]\n assert instance.getVariableInt(\"id\") == i", "def _creatExamplesTensorData(self, examples):\n\n images = []\n \n images2 = []\n images3 = []\n images4 = []\n images5 = [] \n labels = []\n for (img_idx, label) in examples:\n img = self.dataset[img_idx][0]\n #print(img)\n ##exit(0)\n if self.load:\n img = Image.fromarray(img)\n else:\n img = read_image(img)\n #print(img.size)\n #print(np.array(img).shape)\n #exit(0)\n if self.transform is not None:\n img1 = self.transform(img)\n\n img2 = self.transform_test(img)\n img3 = self.transform_test(img)\n img4 = self.transform_test(img)\n img5 = self.transform_test(img) \n #print((img2-img1).abs().sum(),(img3-img1).abs().sum(),(img2-img3).abs().sum())\n #print(img.shape,'located in test_loader.py at 146')\n #exit(0)\n images.append(img1)\n \n images2.append(img2)\n images3.append(img3)\n images4.append(img4)\n images5.append(img5) \n labels.append(label)\n images = torch.stack(images, dim=0)\n\n images2 = torch.stack(images2, dim=0)\n images3 = torch.stack(images3, dim=0)\n images4 = torch.stack(images4, dim=0)\n images5 = torch.stack(images5, dim=0) \n labels = torch.LongTensor(labels)\n return images, images2,images3,images4,images5,labels", "def test_flmb(self):\n self.create_sample_data_set_dir(\"node10p1.dat\", TELEM_DIR, \"node59p1.dat\")\n self.assert_initialize()\n result = self.data_subscribers.get_samples(DataParticleType.METADATA_TELEMETERED,1,30)\n result = self.data_subscribers.get_samples(DataParticleType.SAMPLE_TELEMETERED,5,30)", "def testSampleOutput(self):\n beam_width = 3\n max_decode_length = 2\n\n smart_compose_model = model.create_smart_compose_model(self.embedding_layer_param, self.empty_url, self.min_len, self.max_len,\n beam_width, max_decode_length, self.feature_type_2_name, self.min_seq_prob,\n self.length_norm_power)\n\n # {'exist_prefix': True,\n # 'predicted_scores': [[-2.7357671, -2.7361841, -2.7503903]] (could vary due to random initialization),\n # 'predicted_texts': [[b'[CLS] build is', b'[CLS] build source', b'[CLS] build token']]}\n print(smart_compose_model.prefix_aware_beam_search({\n InputFtrType.TARGET_COLUMN_NAME: tf.constant(['bui'])\n }))\n # {'exist_prefix': True,\n # 'predicted_scores': [[-2.7357671, -2.7361841, -2.7503903]] (could vary due to random initialization),\n # 'predicted_texts': [[b'[CLS] build is', b'[CLS] build source', b'[CLS] build token']]}\n print(smart_compose_model.prefix_aware_beam_search({\n InputFtrType.TARGET_COLUMN_NAME: tf.constant(['build'])\n }))\n # {'exist_prefix': True,\n # 'predicted_scores': [[-2.7357671, -2.7361841, -2.7503903]] (could vary due to random initialization),\n # 'predicted_texts': [[b'build is [PAD]', b'build source [PAD]', b'build token [PAD]']]}\n print(smart_compose_model.prefix_aware_beam_search({\n InputFtrType.TARGET_COLUMN_NAME: tf.constant(['build '])\n }))\n # {'exist_prefix': True,\n # 'predicted_scores': [[-2.711434 , -2.7171993, -2.7329462]] (could vary due to random initialization),\n # 'predicted_texts': [[b'build function token', b'build function test', b'build function is']]\n print(smart_compose_model.prefix_aware_beam_search({\n InputFtrType.TARGET_COLUMN_NAME: tf.constant(['build f'])\n }))", "def test(self):\n img_gen, self.loss_reg, self.parsav = self.net_G(self.input_P1, self.input_P2, self.input_BP1, self.input_BP2, self.input_SPL1, self.input_SPL2)\n ## test flow ##\n\n self.save_results(img_gen, data_name='vis')\n if self.opt.save_input or self.opt.phase == 'val':\n self.save_results(self.input_P1, data_name='ref')\n self.save_results(self.input_P2, data_name='gt')\n result = torch.cat([self.input_P1, img_gen, self.input_P2], 3)\n self.save_results(result, data_name='all')", "def test_tensor_network_bell(self):\n circuit = jet.Circuit(num_wires=2)\n circuit.append_gate(jet.GateFactory.create(\"H\"), wire_ids=[0])\n circuit.append_gate(jet.GateFactory.create(\"CNOT\"), wire_ids=[0, 1])\n\n tn = circuit.tensor_network()\n tensor = tn.contract()\n\n assert tensor.indices == [\"0-2\", \"1-1\"]\n assert tensor.shape == [2, 2]\n assert tensor.data == pytest.approx([1 / sqrt(2), 0, 0, 1 / sqrt(2)])", "def test_get_node_outputs(self):\n pass", "def test_verify_connection_to_a_device():", "def test_download_terrascope():\n\n s1_belgium.download(\"sigma0_cube_terrascope.nc\",format=\"NetCDF\")", "def main():\n\n # set up output directory and file\n output_file_folder = \"output/{}\".format(args.experiment_name)\n Path(output_file_folder).mkdir(parents=True, exist_ok=True)\n args.output_file_name = \"{}/{}.csv\".format(output_file_folder, args.model_name)\n args.checkpoint_name = \"{}/{}.pt\".format(output_file_folder, args.model_name + \"_best_model\")\n\n # read lcquad merged data\n if args.dataset_name == \"lcquad\":\n df_train = pd.read_csv(\"./data/lcquad/gold_db/train_gold.csv\")\n df_valid = pd.read_csv(\"./data/lcquad/gold_db/valid_gold.csv\")\n df_test = pd.read_csv(\"./data/lcquad/gold_db/lcquad_test_sorted.csv\")\n args.gold_file_name = \"lcquad/lcquad_gt_5000.csv\"\n # elif args.dataset_name == \"qald9\":\n # df_train = pd.read_csv(\"./data/qald-9/train_gold.csv\")\n # df_valid = pd.read_csv(\"./data/qald-9/valid_gold.csv\")\n # df_test = pd.read_csv(\"./data/qald-9/test_gold.csv\")\n # args.gold_file_name = \"qald/qald_data_gt.csv\"\n # elif args.dataset_name == \"webqsp\":\n # df_train = pd.read_csv(\"./data/webqsp/train_gold.csv\")\n # df_valid = pd.read_csv(\"./data/webqsp/valid_gold.csv\")\n # df_test = pd.read_csv(\"./data/webqsp/test_gold.csv\")\n # args.gold_file_name = \"webqsp/webqsp_data_gt.csv\"\n\n train_data = read_data_file(df_train, device, \"train\")\n valid_data = read_data_file(df_valid, device, \"valid\")\n test_data = read_data_file(df_test, device, \"test\")\n\n # train model and evaluate\n if args.model_name == \"pure\":\n model = PureNameLNN(args.alpha, -1, False)\n elif args.model_name == \"ctx\":\n model = None\n elif args.model_name == 'type':\n model = None\n elif args.model_name == \"pure_ctx\":\n model = None\n elif args.model_name == \"pure_type\":\n model = None\n elif args.model_name == \"ctx_type\":\n model = None\n\n model = model.to(device)\n print(\"model: \", args.model_name, args.alpha)\n\n # training\n train(model, train_data, valid_data, test_data, args.checkpoint_name, args.num_epoch, args.margin, args.learning_rate)", "def test_compute_glycemic_load(self):\n pass", "def test_snow_pumps():\n test_path = tempfile.mkdtemp()\n x_train, metadata = snow_pumps(test_path)\n try:\n assert x_train.shape == (13, 4)\n except:\n shutil.rmtree(test_path)\n raise()", "def test_with_matrix_and_pointer(tmp_path):\n trans_man = _create_transform_manager()\n\n out_dir = tmp_path\n pointer_tip = None\n\n pointer_writer = pointer.BardPointerWriter(trans_man, out_dir, pointer_tip)\n\n pointer_writer.write_pointer_tip()\n\n pointer_tip = [0, 0, 100]\n\n pointer_writer = pointer.BardPointerWriter(trans_man, out_dir, pointer_tip)\n\n pointer_writer.write_pointer_tip()", "def test_meteo():\n test_path = tempfile.mkdtemp()\n x_train, metadata = meteo(test_path)\n try:\n assert x_train.shape == (11, 6)\n except:\n shutil.rmtree(test_path)\n raise()", "def test_chunk_memory(self):\n layer = tl.Serial(tl.Dense(1024*1024), tl.Dense(128))\n chunked = tl.Chunk(layer, 256)\n x = np.random.uniform(size=(16*1024, 16))\n chunked.init(shapes.signature(x))\n y = chunked(x)\n z = tl.Accelerate(chunked)(x)\n self.assertEqual(y.shape, (16*1024, 128))\n self.assertEqual(z.shape, (16*1024, 128))", "def test_update_device(self):\n pass", "def test_update_device(self):\n pass", "def create_mock_transformer_xl_data(\n batch_size,\n num_heads,\n head_size,\n hidden_size,\n seq_length,\n memory_length=0,\n num_predictions=2,\n two_stream=False,\n num_layers=1,\n include_biases=True,\n include_state=False,\n include_mask=False,\n include_segment=False):\n encoding_shape = (batch_size, seq_length * 2, hidden_size)\n\n data = dict(\n relative_position_encoding=tf.random.normal(shape=encoding_shape),\n content_stream=tf.random.normal(\n shape=(batch_size, seq_length, hidden_size)))\n\n if include_biases:\n attention_bias_shape = (num_heads, head_size)\n data.update(dict(\n content_attention_bias=tf.random.normal(shape=attention_bias_shape),\n segment_attention_bias=tf.random.normal(shape=attention_bias_shape),\n positional_attention_bias=tf.random.normal(shape=attention_bias_shape)))\n\n if two_stream:\n data.update(dict(\n query_stream=tf.random.normal(\n shape=(batch_size, num_predictions, hidden_size)),\n target_mapping=tf.random.normal(\n shape=(batch_size, num_predictions, seq_length))))\n\n if include_state:\n total_seq_length = seq_length + memory_length\n if num_layers > 1:\n state_shape = (num_layers, batch_size, memory_length, hidden_size)\n else:\n state_shape = (batch_size, memory_length, hidden_size)\n data.update(dict(\n state=tf.random.normal(shape=state_shape)))\n else:\n total_seq_length = seq_length\n\n if include_mask:\n mask_shape = (batch_size, num_heads, seq_length, total_seq_length)\n mask_data = np.random.randint(2, size=mask_shape).astype(\"float32\")\n data[\"content_attention_mask\"] = mask_data\n if two_stream:\n data[\"query_attention_mask\"] = mask_data\n\n if include_segment:\n # A transformer XL block takes an individual segment \"encoding\" from the\n # entirety of the Transformer XL segment \"embedding\".\n if num_layers > 1:\n segment_encoding_shape = (num_layers, 2, num_heads, head_size)\n segment_encoding_name = \"segment_embedding\"\n else:\n segment_encoding_shape = (2, num_heads, head_size)\n segment_encoding_name = \"segment_encoding\"\n\n segment_matrix = np.random.randint(\n 2, size=(batch_size, seq_length, total_seq_length))\n data[\"segment_matrix\"] = tf.math.equal(segment_matrix, 1)\n data[segment_encoding_name] = tf.random.normal(shape=segment_encoding_shape)\n\n return data", "def test_read(self):\n with HTTMock(spark_cloud_mock):\n for v in self.device.variables.keys():\n expected = self.cloud_device.read(v)\n self.assertEqual(self.device.read(v), expected)", "def _generate(device, env, qnet, ob_scale,\n number_timesteps, param_noise,\n exploration_fraction, exploration_final_eps,\n atom_num, min_value, max_value):\n noise_scale = 1e-2\n action_dim = env.action_space.n\n explore_steps = number_timesteps * exploration_fraction\n if atom_num > 1:\n vrange = torch.linspace(min_value, max_value, atom_num).to(device)\n\n o = env.reset()\n infos = dict()\n for n in range(1, number_timesteps + 1):\n epsilon = 1.0 - (1.0 - exploration_final_eps) * n / explore_steps\n epsilon = max(exploration_final_eps, epsilon)\n\n # sample action\n with torch.no_grad():\n ob = scale_ob(np.expand_dims(o, 0), device, ob_scale)\n q = qnet(ob)\n if atom_num > 1:\n q = (q.exp() * vrange).sum(2)\n if not param_noise:\n if random.random() < epsilon:\n a = int(random.random() * action_dim)\n else:\n a = q.argmax(1).cpu().numpy()[0]\n else:\n # see Appendix C of `https://arxiv.org/abs/1706.01905`\n q_dict = deepcopy(qnet.state_dict())\n for _, m in qnet.named_modules():\n if isinstance(m, nn.Linear):\n std = torch.empty_like(m.weight).fill_(noise_scale)\n m.weight.data.add_(torch.normal(0, std).to(device))\n std = torch.empty_like(m.bias).fill_(noise_scale)\n m.bias.data.add_(torch.normal(0, std).to(device))\n q_perturb = qnet(ob)\n if atom_num > 1:\n q_perturb = (q_perturb.exp() * vrange).sum(2)\n kl_perturb = ((log_softmax(q, 1) - log_softmax(q_perturb, 1)) *\n softmax(q, 1)).sum(-1).mean()\n kl_explore = -math.log(1 - epsilon + epsilon / action_dim)\n if kl_perturb < kl_explore:\n noise_scale *= 1.01\n else:\n noise_scale /= 1.01\n qnet.load_state_dict(q_dict)\n if random.random() < epsilon:\n a = int(random.random() * action_dim)\n else:\n a = q_perturb.argmax(1).cpu().numpy()[0]\n\n # take action in env\n o_, r, done, info = env.step(a)\n if info.get('episode'):\n infos = {\n 'eplenmean': info['episode']['l'],\n 'eprewmean': info['episode']['r'],\n }\n # return data and update observation\n yield (o, [a], [r], o_, [int(done)], infos)\n infos = dict()\n o = o_ if not done else env.reset()", "def test(cfg):\n # Set up environment.\n distributed.init_distributed_training(cfg)\n\n # Set random seed from configs.\n np.random.seed(cfg.RNG_SEED)\n torch.manual_seed(cfg.RNG_SEED)\n\n # Print config.\n if distributed.is_master_proc():\n print(\"Test with config:\")\n print(cfg)\n\n # Build the model and print model statistics.\n # Use cuda if available\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n # Construct the model\n model = PanopticNarrativeGroundingBaseline(cfg, device=device)\n # Determine the GPU used by the current process\n cur_device = torch.cuda.current_device()\n # Transfer the model to the current GPU device\n model = model.cuda(device=cur_device)\n if cfg.NUM_GPUS > 1:\n # Make model replica operate on the current device\n model = torch.nn.parallel.DistributedDataParallel(\n module=model, device_ids=[cur_device], output_device=cur_device,\n find_unused_parameters=True\n )\n if cfg.LOG_MODEL_INFO and distributed.is_master_proc():\n print(\"Model:\\n{}\".format(model))\n print(\"Params: {:,}\".format(np.sum([p.numel() for p in model.parameters()]).item()))\n print(\"Mem: {:,} MB\".format(torch.cuda.max_memory_allocated() / 1024 ** 3))\n print(\"nvidia-smi\")\n os.system(\"nvidia-smi\")\n\n # Load a checkpoint to test if applicable.\n checkpoint_path = osp.join(cfg.OUTPUT_DIR, 'model_final.pth')\n if cfg.TEST.CHECKPOINT_FILE_PATH != \"\":\n checkpoint_path = cfg.TEST.CHECKPOINT_FILE_PATH\n if osp.exists(checkpoint_path):\n if distributed.is_master_proc():\n print('Loading model from: {0}'.format(checkpoint_path))\n checkpoint = torch.load(checkpoint_path, map_location=\"cpu\")\n if cfg.NUM_GPUS > 1:\n model.module.load_state_dict(checkpoint['model_state'])\n else:\n model.load_state_dict(checkpoint['model_state'])\n elif cfg.TRAIN.CHECKPOINT_FILE_PATH != \"\":\n # If no checkpoint found in TEST.CHECKPOINT_FILE_PATH or in the current\n # checkpoint folder, try to load checkpoint from\n # TRAIN.CHECKPOINT_FILE_PATH and test it.\n checkpoint_path = cfg.TRAIN.CHECKPOINT_FILE_PATH\n checkpoint = torch.load(checkpoint_path, map_location=\"cpu\")\n if cfg.NUM_GPUS > 1:\n model.module.load_state_dict(checkpoint['model_state'])\n else:\n model.load_state_dict(checkpoint['model_state'])\n else:\n if distributed.is_master_proc():\n print(\"Testing with random initialization. Only for debugging.\")\n\n # Create testing loaders.\n test_dataset = PanopticNarrativeGroundingDataset(cfg, cfg.DATA.VAL_SPLIT, train=False)\n test_loader = DataLoader(\n test_dataset,\n batch_size=int(cfg.TRAIN.BATCH_SIZE / max(1, cfg.NUM_GPUS)),\n shuffle=False,\n sampler=(DistributedSampler(test_dataset) if cfg.NUM_GPUS > 1 else None),\n num_workers=cfg.DATA_LOADER.NUM_WORKERS,\n pin_memory=cfg.DATA_LOADER.PIN_MEMORY\n )\n \n if distributed.is_master_proc():\n print(\"Testing model for {} iterations\".format(len(test_loader)))\n\n # Perform test on the entire dataset.\n perform_test(test_loader, model, cfg)", "def get_GP_samples(Y,T,X,ind_kf,ind_kt,num_obs_times,num_obs_values,\n num_tcn_grid_times, cov_grid, input_dim,method, gp_params, lab_vitals_only, pad_before): ##,med_cov_grid \n\n n_mc_smps, M = gp_params.n_mc_smps, gp_params.M\n grid_max = tf.shape(X)[1]\n Z = tf.zeros([0,grid_max,input_dim])\n \n N = tf.shape(T)[0] #number of observations\n \n #setup tf while loop (have to use this bc loop size is variable)\n def cond(i,Z):\n return i<N\n \n def body(i,Z):\n Yi = tf.reshape(tf.slice(Y,[i,0],[1,num_obs_values[i]]),[-1]) #MM: tf.reshape(x, [-1]) flattens tensor x (e.g. [2,3,1] to [6]), slice cuts out all Y data of one patient\n Ti = tf.reshape(tf.slice(T,[i,0],[1,num_obs_times[i]]),[-1])\n ind_kfi = tf.reshape(tf.slice(ind_kf,[i,0],[1,num_obs_values[i]]),[-1])\n ind_kti = tf.reshape(tf.slice(ind_kt,[i,0],[1,num_obs_values[i]]),[-1])\n Xi = tf.reshape(tf.slice(X,[i,0],[1,num_tcn_grid_times[i]]),[-1])\n X_len = num_tcn_grid_times[i]\n \n GP_draws = draw_GP(Yi,Ti,Xi,ind_kfi,ind_kti,method=method, gp_params=gp_params)\n pad_len = grid_max-X_len #pad by this much\n #padding direction:\n if pad_before:\n print('Padding GP_draws before observed data..')\n padded_GP_draws = tf.concat([tf.zeros((n_mc_smps,pad_len,M)), GP_draws],1) \n else:\n padded_GP_draws = tf.concat([GP_draws,tf.zeros((n_mc_smps,pad_len,M))],1) \n\n if lab_vitals_only:\n Z = tf.concat([Z,padded_GP_draws],0) #without covs\n else: #with covs\n medcovs = tf.slice(cov_grid,[i,0,0],[1,-1,-1])\n tiled_medcovs = tf.tile(medcovs,[n_mc_smps,1,1])\n padded_GPdraws_medcovs = tf.concat([padded_GP_draws,tiled_medcovs],2)\n Z = tf.concat([Z,padded_GPdraws_medcovs],0) #with covs\n \n return i+1,Z \n \n i = tf.constant(0)\n #with tf.control_dependencies([tf.Print(tf.shape(ind_kf), [tf.shape(ind_kf), tf.shape(ind_kt), num_obs_values], 'ind_kf & ind_kt & num_obs_values')]):\n i,Z = tf.while_loop(cond,body,loop_vars=[i,Z],\n shape_invariants=[i.get_shape(),tf.TensorShape([None,None,None])])\n\n return Z", "def unit_test():\n test_net = SqueezeNetSqueezeLSTM(20, 6)\n test_net_output = test_net(\n Variable(torch.randn(5, 36, 94, 168)),\n Variable(torch.randn(5, 8, 23, 41)))\n logging.debug('Net Test Output = {}'.format(test_net_output))\n logging.debug('Network was Unit Tested')\n print(test_net.num_params())\n # for param in test_net.parameters():", "def test_write(self):\n dset = self.f.create_dataset('x2', (10, 2))\n\n x = np.zeros((10, 1))\n dset[:, 0] = x[:, 0]\n with self.assertRaises(TypeError):\n dset[:, 1] = x", "def test_input_data():\n mp = mapchete.open(\n os.path.join(SCRIPTDIR, \"testdata/cleantopo_br.mapchete\"))\n tp = BufferedTilePyramid(\"geodetic\")\n # TODO tile with existing but empty data\n tile = tp.tile(5, 5, 5)\n output_params = dict(\n type=\"geodetic\",\n format=\"GeoTIFF\",\n path=OUT_DIR,\n pixelbuffer=0,\n metatiling=1,\n bands=2,\n dtype=\"int16\"\n )\n output = gtiff.OutputData(output_params)\n with output.open(tile, mp, resampling=\"nearest\") as input_tile:\n assert input_tile.resampling == \"nearest\"\n for data in [\n input_tile.read(), input_tile.read(1), input_tile.read([1]),\n # TODO assert valid indexes are passed input_tile.read([1, 2])\n ]:\n assert isinstance(data, ma.masked_array)\n assert input_tile.is_empty()\n # open without resampling\n with output.open(tile, mp) as input_tile:\n pass", "def test():\n args = parse_args()\n\n devid = int(os.getenv('DEVICE_ID')) if os.getenv('DEVICE_ID') else 0\n context.set_context(mode=context.GRAPH_MODE, device_target='Ascend', save_graphs=True, device_id=devid)\n\n # logger\n args.outputs_dir = os.path.join(args.log_path,\n datetime.datetime.now().strftime('%Y-%m-%d_time_%H_%M_%S'))\n rank_id = int(os.environ.get('RANK_ID')) if os.environ.get('RANK_ID') else 0\n args.logger = get_logger(args.outputs_dir, rank_id)\n\n context.reset_auto_parallel_context()\n parallel_mode = ParallelMode.STAND_ALONE\n context.set_auto_parallel_context(parallel_mode=parallel_mode, gradients_mean=True, device_num=1)\n\n args.logger.info('Creating Network....')\n network = SolveOutput(YOLOV3DarkNet53(is_training=False))\n\n data_root = args.data_root\n ann_file = args.annFile\n\n args.logger.info(args.pretrained)\n if os.path.isfile(args.pretrained):\n param_dict = load_checkpoint(args.pretrained)\n param_dict_new = {}\n for key, values in param_dict.items():\n if key.startswith('moments.'):\n continue\n elif key.startswith('yolo_network.'):\n param_dict_new[key[13:]] = values\n else:\n param_dict_new[key] = values\n load_param_into_net(network, param_dict_new)\n args.logger.info('load_model {} success'.format(args.pretrained))\n else:\n args.logger.info('{} not exists or not a pre-trained file'.format(args.pretrained))\n assert FileNotFoundError('{} not exists or not a pre-trained file'.format(args.pretrained))\n exit(1)\n\n config = ConfigYOLOV3DarkNet53()\n if args.testing_shape:\n config.test_img_shape = conver_testing_shape(args)\n\n ds, data_size = create_yolo_dataset(data_root, ann_file, is_training=False, batch_size=1,\n max_epoch=1, device_num=1, rank=rank_id, shuffle=False,\n config=config)\n\n args.logger.info('testing shape : {}'.format(config.test_img_shape))\n args.logger.info('totol {} images to eval'.format(data_size))\n\n network.set_train(False)\n # build attacker\n attack = DeepFool(network, num_classes=80, model_type='detection', reserve_ratio=0.9, bounds=(0, 1))\n input_shape = Tensor(tuple(config.test_img_shape), ms.float32)\n\n args.logger.info('Start inference....')\n batch_num = args.samples_num\n adv_example = []\n for i, data in enumerate(ds.create_dict_iterator(num_epochs=1)):\n if i >= batch_num:\n break\n image = data[\"image\"]\n image_shape = data[\"image_shape\"]\n\n gt_boxes, gt_logits = network(image, input_shape)\n gt_boxes, gt_logits = gt_boxes.asnumpy(), gt_logits.asnumpy()\n gt_labels = np.argmax(gt_logits, axis=2)\n\n adv_img = attack.generate((image.asnumpy(), image_shape.asnumpy()), (gt_boxes, gt_labels))\n adv_example.append(adv_img)\n np.save('adv_example.npy', adv_example)", "def model_and_data(request, hyperparams, estep_conf):\n if tvo.get_run_policy() == \"mpi\":\n init_processes()\n\n precision, N, D, H, batch_size = get(hyperparams, \"precision\", \"N\", \"D\", \"H\", \"batch_size\")\n\n if request.param == \"BSC\":\n W_gt = generate_bars(H, bar_amp=10.0, precision=precision)\n sigma2_gt = to.ones((1,), dtype=precision, device=tvo.get_device())\n pies_gt = to.full((H,), 2.0 / H, dtype=precision, device=tvo.get_device())\n\n to.manual_seed(999)\n W_init = to.rand((D, H), dtype=precision)\n W_init = W_init.to(device=tvo.get_device())\n broadcast(W_init)\n\n sigma2_init = to.tensor([1.0], dtype=precision, device=tvo.get_device())\n pies_init = to.full((H,), 1.0 / H, dtype=precision, device=tvo.get_device())\n\n model = BSC(\n H=H, D=D, W_init=W_gt, sigma2_init=sigma2_gt, pies_init=pies_gt, precision=precision\n )\n\n fname = \"bars_test_data_bsc.h5\"\n\n write_dataset(fname, N, D, np.float32, model)\n\n model.theta[\"W\"] = W_init\n model.theta[\"sigma2\"] = sigma2_init\n model.theta[\"pies\"] = pies_init\n\n elif request.param == \"NoisyOR\":\n W_gt = generate_bars(H, bar_amp=0.8, bg_amp=0.1, precision=precision)\n pies_gt = to.full((H,), 2.0 / H, dtype=precision, device=tvo.get_device())\n\n to.manual_seed(999)\n W_init = to.rand((D, H), dtype=precision)\n W_init = W_init.to(device=tvo.get_device())\n broadcast(W_init)\n pies_init = to.full((H,), 1.0 / H, dtype=precision, device=tvo.get_device())\n\n model = NoisyOR(H=H, D=D, W_init=W_gt, pi_init=pies_gt, precision=precision)\n\n fname = \"bars_test_data_nor.h5\"\n\n write_dataset(fname, N, D, np.uint8, model)\n\n model.theta[\"W\"] = W_init\n model.theta[\"pies\"] = pies_init\n\n if tvo.get_run_policy() == \"mpi\":\n dist.barrier()\n\n return model, fname", "def test_deviceX_1():\n assert 0", "def test(batch_size=1, num_sample=16):\n return paddle.batch(_read_creater(num_sample=num_sample), batch_size)", "def test_forfatal_functions(self):\n logging.getLogger(\"tensorflow\").setLevel(logging.ERROR)\n logging.getLogger(\"batchglm\").setLevel(logging.WARNING)\n logging.getLogger(\"diffxpy\").setLevel(logging.WARNING)\n\n num_observations = 10\n num_features = 2\n\n sim = Simulator(num_observations=num_observations, num_features=num_features)\n sim.generate_sample_description(num_batches=0, num_conditions=0)\n sim.generate()\n\n random_sample_description = pd.DataFrame({\n \"pseudotime\": np.random.random(size=sim.nobs),\n \"batch\": np.random.randint(2, size=sim.nobs)\n })\n\n test = de.test.continuous_1d(\n data=sim.X,\n continuous=\"pseudotime\",\n df=3,\n formula_loc=\"~ 1 + pseudotime + batch\",\n formula_scale=\"~ 1\",\n factor_loc_totest=\"pseudotime\",\n test=\"wald\",\n sample_description=random_sample_description,\n quick_scale=True,\n batch_size=None,\n training_strategy=\"DEFAULT\",\n dtype=\"float64\"\n )\n\n summary = test.summary()\n ids = test.gene_ids\n\n # 1. Test all additional functions which depend on model computation:\n # 1.1. Only continuous model:\n temp = test.log_fold_change(genes=ids, nonnumeric=False)\n temp = test.max(genes=ids, nonnumeric=False)\n temp = test.min(genes=ids, nonnumeric=False)\n temp = test.argmax(genes=ids, nonnumeric=False)\n temp = test.argmin(genes=ids, nonnumeric=False)\n temp = test.summary(nonnumeric=False)\n # 1.2. Full model:\n temp = test.log_fold_change(genes=ids, nonnumeric=True)\n temp = test.max(genes=ids, nonnumeric=True)\n temp = test.min(genes=ids, nonnumeric=True)\n temp = test.argmax(genes=ids, nonnumeric=True)\n temp = test.argmin(genes=ids, nonnumeric=True)\n temp = test.summary(nonnumeric=True)\n\n return True", "def test_wool():\n test_path = tempfile.mkdtemp()\n x_train, metadata = wool(test_path)\n try:\n assert x_train.shape == (309, 2)\n except:\n shutil.rmtree(test_path)\n raise()", "def test_numpify_and_store(self):\n Nsamples = 9\n Ntimesteps = 10\n Ncolumns = 3\n X = [[[0 for a in range(Ncolumns)] for b in range(Ntimesteps)] \\\n for c in range(Nsamples)]\n y = [[0 for a in range(Ntimesteps)] for b in range(Nsamples)]\n xname = 'xname'\n yname = 'yname'\n outdatapath = os.getcwd()\n tutorial_pamap2.numpify_and_store(X, y, xname, yname, outdatapath, \\\n shuffle=True)\n filename = os.path.join(outdatapath, xname+ '.npy')\n test = os.path.isfile(filename)\n if test == True:\n os.remove(filename)\n os.remove(os.path.join(outdatapath, yname + '.npy'))\n assert test" ]
[ "0.66837543", "0.66518664", "0.6184355", "0.6113137", "0.58957744", "0.5887892", "0.58565533", "0.5855714", "0.5791288", "0.5715855", "0.56533664", "0.5649407", "0.5645982", "0.5569726", "0.5525689", "0.5520632", "0.5512734", "0.5490553", "0.5452423", "0.54212606", "0.53811026", "0.53693086", "0.5343723", "0.53103983", "0.5287458", "0.5286752", "0.5275774", "0.52635837", "0.524238", "0.524214", "0.5221695", "0.5210801", "0.5190939", "0.51863724", "0.51765555", "0.5174389", "0.5170602", "0.5150499", "0.51463205", "0.5139448", "0.5134264", "0.5132594", "0.5131977", "0.5130938", "0.51215065", "0.5118143", "0.511803", "0.5111194", "0.5110172", "0.5110172", "0.5107281", "0.5107077", "0.5105265", "0.51017976", "0.50974506", "0.50901556", "0.50872433", "0.5083166", "0.5081412", "0.50746423", "0.50719565", "0.50713134", "0.50713134", "0.5069252", "0.50670874", "0.5053582", "0.505035", "0.50497043", "0.5049095", "0.5046092", "0.5043105", "0.50355804", "0.5035223", "0.5028129", "0.5026561", "0.502063", "0.5015126", "0.5015065", "0.5010907", "0.5010162", "0.5010162", "0.5005787", "0.50048065", "0.5004115", "0.5004115", "0.5003922", "0.50038576", "0.500312", "0.4999934", "0.49998915", "0.49954575", "0.49934638", "0.49934524", "0.4989839", "0.4987235", "0.49862424", "0.49831748", "0.49819127", "0.49784067", "0.4976468" ]
0.65643054
2
Test the popxl autodiff op
def test_documentation_popxl_autodiff(self): filename = "autodiff.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_data_norange(self):\n ex = self.ex\n m = self.m\n n = self.n\n\n nreps = random.randint(1, 10)\n lensumrange = random.randint(1, 10)\n\n ex.nreps = nreps\n ex.sumrange = [\"j\", range(lensumrange)]\n ex.vary[\"X\"][\"with\"].add(\"rep\")\n ex.vary[\"Y\"][\"with\"].add(\"j\")\n ex.vary[\"Y\"][\"along\"] = 0\n ex.vary[\"Z\"][\"with\"].update([\"rep\", \"j\"])\n ex.infer_lds()\n\n cmds = ex.generate_cmds()\n\n self.assertIn([\"smalloc\", \"X\", nreps * m * n], cmds)\n idx = random.randint(0, nreps - 1)\n self.assertIn([\"soffset\", \"X\", idx * m * n, \"X_%d\" % idx], cmds)\n\n self.assertIn([\n \"dmalloc\", \"Y\", lensumrange * m * m + (lensumrange - 1) * m\n ], cmds)\n idx = random.randint(0, lensumrange - 1)\n self.assertIn([\"doffset\", \"Y\", idx * m, \"Y_%d\" % idx], cmds)\n\n self.assertIn([\"cmalloc\", \"Z\", nreps * lensumrange * n * n], cmds)\n idxrep = random.randint(0, nreps - 1)\n idxrange = random.randint(0, lensumrange - 1)\n self.assertIn([\"coffset\", \"Z\",\n (idxrep * lensumrange + idxrange) * n * n,\n \"Z_%d_%d\" % (idxrep, idxrange)], cmds)", "def test_57o_correct_preflop_odds(self):\n self.assertEqual(self.hand.preFlopOdds10, 7.9)", "def test_is_old_papernum(self):\n self.assertFalse(util.is_old_papernum(\"9106001\"))\n self.assertTrue(util.is_old_papernum(\"9107001\"))\n self.assertFalse(util.is_old_papernum(\"9200001\"))\n self.assertTrue(util.is_old_papernum(\"9201001\"))\n self.assertTrue(util.is_old_papernum(\"0703999\"))\n self.assertFalse(util.is_old_papernum(\"0704001\"))", "def test_99_correct_preflop_odds(self):\n self.assertEqual(self.hand.preFlopOdds10, 15.6)", "def test_pressure_increasing_check_some_decreasing(mocker, pressure_values, expected):\n profile = mocker.patch.object(argortqcpy.profile, \"Profile\")\n profile.get_property_data = mocker.Mock(return_value=ma.masked_array(pressure_values))\n\n pic = PressureIncreasingCheck(profile, None)\n output = pic.run()\n\n assert np.all(output.get_output_flags_for_property(\"PRES\").data == expected)", "def test_g_asignar_rol(self):", "def test_open_fill(self):", "def test31_clear_air():\n assert not is_precip_mode(31), 'VCP 31 is not precip'", "def test_modexp(self):\n self.assertEqual(MathFunctions.modexp(2, 5, 7), 4)\n self.assertEqual(MathFunctions.modexp(2, 10, 8), 0)", "def test_AKs_correct_preflop_odds(self):\n self.assertEqual(self.hand.preFlopOdds10, 20.7)", "def test_analytical_vs_numerical():\n pass", "def test_devide_int(self):\n self.assertEqual(operations.devide(8,4), 2)", "def test_pressure_increasing_check_some_bad(mocker, pressure_values, expected):\n profile = mocker.patch.object(argortqcpy.profile, \"Profile\")\n profile.get_property_data = mocker.Mock(return_value=ma.masked_array(pressure_values))\n\n pic = PressureIncreasingCheck(profile, None)\n output = pic.run()\n\n assert np.all(output.get_output_flags_for_property(\"PRES\").data == expected)", "def test_without_orographic_enhancement(self):\n input_cube = self.precip_cube.copy()\n input_cube.rename(\"air_temperature\")\n input_cube.units = \"K\"\n plugin = CreateExtrapolationForecast(input_cube, self.vel_x, self.vel_y)\n result = plugin.extrapolate(10)\n expected_result = np.array(\n [[np.nan, np.nan, np.nan], [np.nan, 1, 2], [np.nan, 1, 1], [np.nan, 0, 2]],\n dtype=np.float32,\n )\n expected_result = np.ma.masked_invalid(expected_result)\n expected_forecast_period = np.array([600], dtype=np.int64)\n # Check we get the expected result, and the correct time coordinates.\n self.assertArrayEqual(\n np.ma.getmask(expected_result), np.ma.getmask(result.data)\n )\n self.assertArrayAlmostEqual(expected_result.data, result.data.data)\n self.assertArrayAlmostEqual(\n result.coord(\"forecast_period\").points, expected_forecast_period\n )\n self.assertEqual(result.coord(\"forecast_period\").units, \"seconds\")\n self.assertEqual(\n result.coord(\"forecast_reference_time\").points,\n input_cube.coord(\"time\").points,\n )\n self.assertEqual(\n result.coord(\"time\").points, input_cube.coord(\"time\").points + 600\n )", "def test_one_pop(data_: tuple, _is_pop: bool):\n x_bar = cls.get_mean(data_)\n s_x = cls.get_stdev(data_, is_population=_is_pop)\n n_x = cls.get_n(data_)\n return (x_bar - h0) / (s_x / sqrt(n_x))", "def test_get_meta_range(self):\n pass", "def test_with_orographic_enhancement(self):\n plugin = CreateExtrapolationForecast(\n self.precip_cube,\n self.vel_x,\n self.vel_y,\n orographic_enhancement_cube=self.oe_cube,\n )\n result = plugin.extrapolate(10)\n expected_result = np.array(\n [\n [np.nan, np.nan, np.nan],\n [np.nan, 1.03125, 1.0],\n [np.nan, 1.0, 0.03125],\n [np.nan, 0, 2.0],\n ],\n dtype=np.float32,\n )\n expected_result = np.ma.masked_invalid(expected_result)\n expected_forecast_period = np.array([600], dtype=np.int64)\n # Check we get the expected result, and the correct time coordinates.\n self.assertArrayEqual(\n np.ma.getmask(expected_result), np.ma.getmask(result.data)\n )\n self.assertArrayAlmostEqual(expected_result.data, result.data.data)\n self.assertArrayAlmostEqual(\n result.coord(\"forecast_period\").points, expected_forecast_period\n )\n self.assertEqual(result.coord(\"forecast_period\").units, \"seconds\")\n self.assertEqual(\n result.coord(\"forecast_reference_time\").points,\n self.precip_cube.coord(\"time\").points,\n )\n self.assertEqual(\n result.coord(\"time\").points, self.precip_cube.coord(\"time\").points + 600\n )", "def testspec(arr: list[int]) -> None:\n\n print(50*'-')\n print(arr)\n print_rem(arr)\n rev_dupes(arr)\n print(arr)", "def test_FlexCrop1(self):", "def clean_data(self, opz):\n# pdb.set_trace()\n mask = (opz['Opzetstuk Noord (°)']<-1) | (opz['Opzetstuk Noord (°)']>100)\n opz = opz.drop(opz.loc[mask].index)\n opz['open'] = opz[\"Opzetstuk Noord (°)\"].apply(lambda x: 1 if x < 80 else 0)\n #Deze klopt niet. We hebben het moment nodig van opengaan en het moment van dichtgaat. Moment van openen is: wanneer de verandering van de aantal graden >1 graad is. Moment van sluiten is de laatste verandering totdat het niet meer veranderd. Zie ook code van Pieter in C#.\n opz['diff'] = opz['open'].diff()\n beweegt=opz[opz['diff']!=0]\n return beweegt", "def c_test_population_function(self, function):\r\n return 1", "def test_get_range(self):\n pass", "def test_170329_notimp(self):\n spc = parser(get_file('PTSDY2_notimp.txt'))\n # spc.draw_outlooks()\n outlook = spc.get_outlook('CATEGORICAL', 'MRGL')\n self.assertAlmostEqual(outlook.geometry.area, 110.24, 2)", "def testMedicationsImmunosupp(self):\n attr = self.session.create_visit_attr()\n\n self.util.boolTypeTest(self, attr, \"immunosupp\")\n\n self.util.boolPropertyTest(self, attr, \"immunosupp\")", "def test_wb(self):\n df = dep.read_wb(get_path('wb.txt'))\n self.assertAlmostEquals(df['precip'].max(), 162.04, 2)", "def test21_precip():\n assert is_precip_mode(21), 'VCP 21 is precip'", "def test_for_more_flags(self):\n shorter = REFRESH_COMMANDS.calculate_refresh_commands(\"Rainmeter.exe\", \"test-config\", \"file.inc\", True, False)\n longer = REFRESH_COMMANDS.calculate_refresh_commands(\"Rainmeter.exe\", \"test-config\", \"file.ini\", True, True)\n\n self.assertGreater(longer, shorter)", "def test_4_4_1_1(self):\n pass", "def test_reset(self):\r\n self.p += 8\r\n self.p.reset()\r\n self.assertEqual(str(self.p), '[>............] 0%')", "def test_documentation_popxl_replication(self):\n filename = \"replication.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_is_ramped_using_int(self):\n self.feature_test.set_percentage(100)\n self.assertTrue(self.feature_test._is_ramped(5))", "def test_untar(self):", "def test_10(self, test):\r\n return test.MANUAL()", "def test_issue_reset_time(self):\n pass", "def test_ncols_gtiff_object(self):\n self.assertEqual(_test_object(landsat_gtiff)[2], 235)", "def test_right(self):\n x = np.array([-100, -2, -1, 0, 1, 1.1])\n self.assertEqual(npinterval.half_sample_mode(x), +1.05)", "def test_dewpoint_specific_humidity_old_signature():\n p = 1013.25 * units.mbar\n temperature = 20. * units.degC\n q = 0.012 * units.dimensionless\n with pytest.raises(ValueError, match='changed in 1.0'):\n dewpoint_from_specific_humidity(q, temperature, p)", "def test_unroll_patch_true(self):\r\n self.validate((3, 2, 7, 5), (5, 2, 2, 3), 'valid', unroll_patch=True)\r\n self.validate((3, 2, 7, 5), (5, 2, 2, 3), 'full', unroll_patch=True)\r\n self.validate((3, 2, 3, 3), (4, 2, 3, 3), 'valid',\r\n unroll_patch=True, verify_grad=False)", "def test_osd(self):\n self.assertEqual(osd(self.TestData), (9,3,3))", "def test_nrows_gtiff_object(self):\n self.assertEqual(_test_object(landsat_gtiff)[1], 224)", "def test_pressure_increasing_check_some_constants(mocker, pressure_values, expected):\n profile = mocker.patch.object(argortqcpy.profile, \"Profile\")\n profile.get_property_data = mocker.Mock(return_value=ma.masked_array(pressure_values))\n\n pic = PressureIncreasingCheck(profile, None)\n output = pic.run()\n\n assert np.all(output.get_output_flags_for_property(\"PRES\").data == expected)", "def test_of_agreement(self): \n \n pop_t = world.pop_t \n pop_sub = world.pop_sub \n \n \n Ns = world.subtime.length\n \n dt = world.time.step\n ds = world.subtime.step\n N = round(ds/dt)\n for i in range(Ns):\n numpy.testing.assert_allclose(pop_sub[:,i],pop_t[i*N,:])", "def test_pickable_propertysheet(self):\n from Acquisition import aq_base\n import pickle\n\n self.assertGreater(len(pickle.dumps(aq_base(self.ldap))), 170)", "def test_left(self):\n x = np.array([-1.1, -1, 0, 1, 2, 100])\n self.assertEqual(npinterval.half_sample_mode(x), -1.05)", "def test_MINX_pass(self):\n self.assertTrue(self.mod.minx.isset)", "def testPerfilCasoInterseccionDescendente(self):\n if self.TESTALL:\n perfilOriginal = [1,9,5,0,3,7,12,0]\n resultadoEsperado = [1,9,5,7,12]\n perfil = Perfil.Perfil()\n resultado = perfil.calcularPerfil(perfilOriginal,0)\n self.assertEqual(resultadoEsperado, resultado)", "def _test_totalpop(self, obj, known_2000, known_2010):\n known_delta = known_2010 - known_2000\n known_pct = float(known_delta) / float(known_2000)\n\n self.assertEqual(float(obj['data']['2000'][\"P1\"]['P001001']), known_2000)\n self.assertEqual(float(obj['data']['2010'][\"P1\"]['P001001']), known_2010)\n self.assertEqual(float(obj['data']['delta'][\"P1\"]['P001001']), known_delta)\n self.assertAlmostEqual(\n float(obj['data']['pct_change'][\"P1\"]['P001001']),\n known_pct\n )", "def test_pressure_increasing_check_all_pass(mocker, pressure_values):\n profile = mocker.patch.object(argortqcpy.profile, \"Profile\")\n profile.get_property_data = mocker.Mock(return_value=ma.masked_array(pressure_values))\n\n pic = PressureIncreasingCheck(profile, None)\n output = pic.run()\n\n assert np.all(output.get_output_flags_for_property(\"PRES\").data == ArgoQcFlag.GOOD.value)", "def test_cliford(generator, paulixops, result):\n u = clifford(generator, paulixops)\n assert u.compare(result)", "def test_binops(self):", "def test_floor(doctest):", "def test_margalef(self):\n self.assertEqual(margalef(self.TestData), 8/log(22))", "def test_delete_o(self):\n number = 6.0\n number_return = self.new_calculation.delete_o(number)\n self.assertEqual(number_return, 6)\n self.assertNotEqual(type(number), int)\n self.assertEqual(type(number_return), int)\n\n number = 10.565\n number_return = self.new_calculation.delete_o(number)\n self.assertEqual(number_return, 10.565)\n self.assertEqual(type(number_return), float)\n\n number = 10\n number_return = self.new_calculation.delete_o(number)\n self.assertEqual(number_return, 10)\n self.assertEqual(type(number_return), int)", "def test_flceiling(doctest):", "def testCorrectForTwoAtomCellWithoutPeriodicityNEEDED(self):\n\t\texpDist = 0.01*10\n\t\tself._checkExpMatchesActual(expDist)", "def test_unroll_patch_false(self):\r\n self.validate((3, 2, 7, 5), (5, 2, 2, 3), 'valid', unroll_patch=False)\r\n self.validate((3, 2, 7, 5), (5, 2, 2, 3), 'full', unroll_patch=False)\r\n self.validate((3, 2, 3, 3), (4, 2, 3, 3), 'valid',\r\n unroll_patch=False, verify_grad=False)", "def test_iimi1():\n iimi = interactive_intrinsic_mutual_information(n_mod_m(3, 2), rvs=[[0], [1]], crvs=[2], rounds=1)\n assert iimi == pytest.approx(0.0)", "def test_fltruncate(doctest):", "def test_T4():", "def test_T4():", "def test_reset(self):\r\n self.p += 8\r\n self.p.reset()\r\n self.assertEqual(str(self.p), '0% [....................]')", "def test_rolling_before_analysis(self):\n cheese = TomoCheese.from_demo_images()\n cheese.analyze()\n original_roi_1 = copy.copy(cheese.module.rois[\"1\"].pixel_value)\n for img in cheese.dicom_stack:\n img.roll(direction=\"x\", amount=20)\n cheese.analyze()\n new_roi_1 = cheese.module.rois[\"1\"].pixel_value\n assert math.isclose(original_roi_1, new_roi_1, abs_tol=3)", "def test_y_before_x(self):", "def test_minus(self):\n print('test_minus');\n self.assertEqual(90, minus(100, 10))", "def test_screenip_unit_det(self):\n #\n # '''\n # Dose Equiv. Toxicity:\n #\n # The FI value (kg-diet) is multiplied by the reported NOAEC (mg/kg-diet) and then divided by\n # the test animal's body weight to derive the dose-equivalent chronic toxicity value (mg/kg-bw):\n #\n # Dose Equiv. Toxicity = (NOAEC * FI) / BW\n #\n # NOTE: The user enters the lowest available NOAEC for the mallard duck, for the bobwhite quail,\n # and for any other test species. The model calculates the dose equivalent toxicity values for\n # all of the modeled values (Cells F20-24 and results worksheet) and then selects the lowest dose\n # equivalent toxicity value to represent the chronic toxicity of the chemical to birds.\n # '''\n # try:\n # # result =\n # # self.assertEquals(result, )\n # pass\n # finally:\n # pass\n # return\n #\n #\n # def test_det_duck(self):\n # \"\"\"\n # unittest for function screenip.det_duck:\n # :return:\n # \"\"\"\n # try:\n # # det_duck = (self.noaec_duck * self.fi_bird(1580.)) / (1580. / 1000.)\n # screenip_empty.noaec_duck = pd.Series([1.], dtype='int')\n # screenip_empty.fi_bird = pd.Series([1.], dtype='int')\n # result = screenip_empty.det_duck()\n # npt.assert_array_almost_equal(result, 1000., 4, '', True)\n # finally:\n # pass\n # return\n #\n # def test_det_quail(self):\n # \"\"\"\n # unittest for function screenip.det_quail:\n # :return:\n # \"\"\"\n # try:\n # # det_quail = (self.noaec_quail * self.fi_bird(178.)) / (178. / 1000.)\n # screenip_empty.noaec_quail = pd.Series([1.], dtype='int')\n # screenip_empty.fi_bird = pd.Series([1.], dtype='int')\n # result = screenip_empty.det_quail()\n # npt.assert_array_almost_equal(result, 1000., 4, '', True)\n # finally:\n # pass\n # return\n #\n # def test_det_other_1(self):\n # \"\"\"\n # unittest for function screenip.det_other_1:\n # :return:\n # \"\"\"\n # try:\n # #det_other_1 = (self.noaec_bird_other_1 * self.fi_bird(self.bodyweight_bird_other_1)) / (self.bodyweight_bird_other_1 / 1000.)\n # #det_other_2 = (self.noaec_bird_other_2 * self.fi_bird(self.bodyweight_bird_other_1)) / (self.bodyweight_bird_other_1 / 1000.)\n # screenip_empty.noaec_bird_other_1 = pd.Series([400.]) # mg/kg-diet\n # screenip_empty.bodyweight_bird_other_1 = pd.Series([100]) # grams\n # result = screenip_empty.det_other_1()\n # npt.assert_array_almost_equal(result, 4666, 4)\n # finally:\n # pass\n # return\n #\n # The following tests are configured such that:\n # 1. four values are provided for each needed input\n # 2. the four input values generate four values of out_det_* per bird type\n # 3. the inputs per bird type are set so that calculations of out_det_* will result in\n # each bird type having one minimum among the bird types;\n # thus all four calculations result in one minimum per bird type\n\n # create empty pandas dataframes to create empty object for this unittest\n screenip_empty = self.create_screenip_object()\n\n expected_results = pd.Series([4.2174, 4.96125, 7.97237, 10.664648], dtype='float')\n result = pd.Series([], dtype='float')\n\n try:\n screenip_empty.bodyweight_bobwhite_quail = 178.\n screenip_empty.bodyweight_mallard_duck = 1580.\n screenip_empty.noaec_quail = pd.Series([100., 300., 75., 150.], dtype='float')\n screenip_empty.noaec_duck = pd.Series([400., 100., 200., 350.], dtype='float')\n screenip_empty.noaec_bird_other_1 = pd.Series([50., 200., 300., 250.], dtype='float')\n screenip_empty.noaec_bird_other_2 = pd.Series([350., 400., 250., 100.], dtype='float')\n screenip_empty.noaec_bodyweight_bird_other_1 = pd.Series([345.34, 453.54, 649.29, 294.56], dtype='float')\n screenip_empty.noaec_bodyweight_bird_other_2 = pd.Series([123.84, 85.743, 127.884, 176.34], dtype='float')\n screenip_empty.no_of_runs = len(expected_results)\n result = screenip_empty.det()\n npt.assert_allclose(result, expected_results, rtol=1e-4, atol=0, err_msg='', verbose=True )\n finally:\n tab = [result, expected_results]\n print(\"\\n\")\n print(inspect.currentframe().f_code.co_name)\n print(tabulate(tab, headers='keys', tablefmt='rst'))\n return", "def test11_precip():\n assert is_precip_mode(11), 'VCP 11 is precip'", "def untruncatedMode(self, x):\n self.raiseAnError(NotImplementedError,'untruncatedMode not yet implemented for ' + self.type)", "def test_case03(self):\n version1 = versions.get_version_power(\"1.1.1\")\n version2 = versions.get_version_power(\"0.2.1\")\n self.assertGreater(version1, version2)", "def test_documentation_popxl_addition(self):\n filename = \"simple_addition.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_active_pen_range(get_touchmat):\n touchmat = get_touchmat\n touchmat_model = check_device_types.get_device_model(touchmat)\n\n if touchmat_model == Devices.touchmat_g1:\n with pytest.raises(PySproutError) as execinfo:\n touchmat.active_pen_range()\n assert 'Functionality not available' in str(execinfo.value)\n\n with pytest.raises(PySproutError) as execinfo:\n touchmat.active_pen_range(\"ten_mm\")\n assert 'Functionality not available' in str(execinfo.value)\n return\n\n cur_range = touchmat.active_pen_range()\n assert isinstance(cur_range, touchmat.ActivePenRange)\n assert isinstance(cur_range.value, str)\n\n for item in TouchMat.ActivePenRange:\n pen_range = touchmat.active_pen_range(item)\n assert pen_range == item\n assert touchmat.active_pen_range() == item\n pen_range = touchmat.active_pen_range(item.value)\n assert pen_range == item\n assert touchmat.active_pen_range() == item\n\n ranges = [\"five_mm\", \"ten_mm\", \"fifteen_mm\", \"twenty_mm\"]\n for item in ranges:\n pen_range = touchmat.active_pen_range(item)\n assert pen_range.value == item\n assert touchmat.active_pen_range().value == item\n\n # Verify incorrect values return errors\n with pytest.raises(ValueError) as execinfo:\n touchmat.active_pen_range(\"thirty_mm\")\n assert 'is not a valid ActivePenRange' in str(execinfo.value)\n with pytest.raises(ValueError) as execinfo:\n touchmat.active_pen_range(3)\n assert 'is not a valid ActivePenRange' in str(execinfo.value)\n\n # Send a bad value to SoHal (bypassing the hippy enum check) and makes\n # sure SoHal throws an error...\n with pytest.raises(PySproutError) as execinfo:\n touchmat._send_msg('active_pen_range', 1) # pylint: disable=protected-access\n assert 'Invalid parameter' in execinfo.value.message\n with pytest.raises(PySproutError) as execinfo:\n touchmat._send_msg('active_pen_range', 'moo') # pylint: disable=protected-access\n assert 'Invalid parameter' in execinfo.value.message\n with pytest.raises(PySproutError) as execinfo:\n touchmat._send_msg('active_pen_range', {}) # pylint: disable=protected-access\n assert 'Invalid parameter' in execinfo.value.message", "def _test_out_of_range(self):\n self.cdbconf.setup('KKG')\n self.cdbconf.setConfiguration('CUSTOM_OPT')\n az, el, latitude = [radians(50)] * 3\n site_info = {'latitude': latitude}\n self.p.setup(site_info, self.source, self.device)\n self.p.setRewindingMode('AUTO')\n offset = 20\n max_limit = self.device.getMaxLimit() \n min_limit = self.device.getMinLimit()\n Pis = max_limit - offset/2\n time.sleep(0.2) if self.using_mock else time.sleep(3)\n self.p.setPosition(Pis)\n time.sleep(0.2) # Wait a bit for the setup\n max_rewinding_steps = (max_limit - min_limit) // self.device.getStep()\n expected = Pis - max_rewinding_steps*self.device.getStep() + offset\n self.source.setAzimuth(az)\n self.source.setElevation(el)\n self.p.startUpdating('MNG_TRACK', 'ANT_NORTH', az, el, None, None)\n time.sleep(0.2) if self.using_mock else time.sleep(3)\n self.p.setOffset(offset)\n time.sleep(0.2) if self.using_mock else time.sleep(3)\n self.assertEqual(self.device.getActPosition(), expected)", "def test_right_mode(self):\n self.dp.setRewindingMode('AUTO')\n self.assertEqual(self.dp.getRewindingMode(), 'AUTO')\n self.dp.setRewindingMode('MANUAL')", "def test_excel(test_data,tmp_path):\n\n for d in test_data:\n\n gpm = GenotypePhenotypeMap(genotype=d[\"genotype\"],\n wildtype=d[\"wildtype\"],\n phenotype=d[\"phenotype\"],\n uncertainty=d[\"uncertainty\"])\n\n # Write excel file\n excel_file = os.path.join(tmp_path,\"tmp.xlsx\")\n gpm.to_excel(filename=excel_file)\n assert os.path.isfile(excel_file)\n\n # Read in and make sure it worked.\n new_gpm = gpmap.read_excel(filename=excel_file,wildtype=d[\"wildtype\"])\n conftest.compare_gpmap(gpm,new_gpm)\n\n # Do not give wildtype. Should still work because the wildtype was\n # inferred.\n gpm_read = gpmap.read_excel(filename=excel_file)\n conftest.compare_gpmap(gpm,gpm_read)\n\n # Check ability to read labels back in\n site_labels = [f\"{x}\" for x in range(10,10+len(d[\"wildtype\"]),1)]\n gpm = GenotypePhenotypeMap(genotype=d[\"genotype\"],\n wildtype=d[\"wildtype\"],\n site_labels=site_labels)\n out_file = os.path.join(tmp_path,\"tmp.xlsx\")\n gpm.to_excel(out_file)\n\n gpm_read = gpmap.read_excel(out_file)\n\n for i in range(len(gpm_read.site_labels)):\n\n # Skip virtual site_labels added for invariant sites\n if len(d[\"mutations\"][i]) == 1:\n continue\n\n assert gpm_read.site_labels[i] == gpm.site_labels[i]\n\n # Read in with bad wildtype. Should throw warning and then have\n # sequential site labels.\n with pytest.warns(UserWarning):\n gpm_read = gpmap.read_excel(out_file,wildtype=d[\"mutant\"])\n\n assert np.array_equal(gpm_read.site_labels,range(len(d[\"wildtype\"])))", "def test_restore_backup():", "def testsub_Y_X ( self ):\r\n\t\tr = re.compile ( 'frac' )\r\n\t\tfor fracTupX, fracTupY, dictAdd, dictSub, dictMul, dictDiv in self.knownArithResultValues:\r\n\t\t\tfracX = eval ( r.sub ( 'frac.frac', fracTupX ) )\r\n\t\t\tfracY = eval ( r.sub ( 'frac.frac', fracTupY ) )\r\n\t\t\tsub_fracY_fracX = fracY - fracX\r\n\t\t\tself.assertEqual ( sub_fracY_fracX.toString ().split ()[0], dictSub ['Y-X'] )", "def test_thermallyExpands(self):\n self.assertFalse(self.component.THERMAL_EXPANSION_DIMS)", "def testRefresh(self):\n \n pass", "def test_rmg_mode(self):\n self.assertEqual(self.rmgmode, False)", "def exo2():", "def test_full(self):\n x = np.array([-5, -3, -2, -2, 100])\n self.assertEqual(\n npinterval.interval(x, 1),\n (-5, 100, 0, 5))", "def test_flonum_special(doctest):", "def test_thermallyExpands(self):\n self.assertTrue(self.component.THERMAL_EXPANSION_DIMS)", "def test_thermallyExpands(self):\n self.assertTrue(self.component.THERMAL_EXPANSION_DIMS)", "def test_thermallyExpands(self):\n self.assertTrue(self.component.THERMAL_EXPANSION_DIMS)", "def test_thermallyExpands(self):\n self.assertTrue(self.component.THERMAL_EXPANSION_DIMS)", "def test_thermallyExpands(self):\n self.assertTrue(self.component.THERMAL_EXPANSION_DIMS)", "def test_thermallyExpands(self):\n self.assertTrue(self.component.THERMAL_EXPANSION_DIMS)", "def test_thermallyExpands(self):\n self.assertTrue(self.component.THERMAL_EXPANSION_DIMS)", "def test_thermallyExpands(self):\n self.assertTrue(self.component.THERMAL_EXPANSION_DIMS)", "def test_post_nveto_pmts(self):\n pass", "def test_diferencia_porcentual_menor(self):\r\n valorNuevo = 10\r\n valorAnterior = 0\r\n self.assertEqual(diferenciaPorcentual(valorNuevo, valorAnterior), -999999)", "def test_REPLICATE_INTERVAL(self):\n self.assertIsInstance(constants.REPLICATE_INTERVAL, int,\n \"constants.REPLICATE_INTERVAL must be an \" +\n \"integer.\")", "def test_get_speed_limit():\n center = Coordinates(1 , 1)\n radius = 10\n speed_limit = 20\n\n assert get_speed_limit(center, radius, speed_limit) != center\n assert get_speed_limit(center, radius, speed_limit) != radius\n assert get_speed_limit(center, radius, speed_limit) == speed_limit", "def test_get_game_diff(self):\n pass", "def test_step_to_midi_04():\n # This should produce 131, above range.\n step = 'C'\n alter = -1\n octave = 10\n with pytest.raises(Exception):\n U.step_to_midi(step, octave, alter)\n # This should produce 128, 1 above range.\n step = 'G'\n alter = 1\n octave = 9\n with pytest.raises(Exception):\n U.step_to_midi(step, octave, alter)", "def test_damage(self):\n\n for op in self.veh.operators:\n op.experience = 10\n self.assertEqual(self.veh.damage, 0.4)", "def test_popleft_when_equal(self, mock_datetime_obj):\n max_hour_count = None\n mock_datetime_obj.return_value = self.datetime_obj + timedelta(minutes=70)\n self.deque.append((self.datetime_obj, self.timestamp))\n self.deque.append((self.datetime_obj, self.timestamp))\n deque_length = len(self.deque)\n result = feature_5(self.deque,\n self.heap,\n self.expected_dict,\n self.top_n,\n max_hour_count,\n self.time_rollover_queue)\n self.assertEqual(result, (self.datetime_obj,\n self.timestamp,\n deque_length))", "def test_parcel_profile_drop_duplicates():\n pressure = np.array([962., 951., 937.9, 925., 908., 905.7, 894., 875.,\n 41.3, 40.8, 37., 36.8, 32., 30., 27.7, 27.7, 26.4]) * units.hPa\n\n temperature = units.Quantity(19.6, 'degC')\n\n dewpoint = units.Quantity(18.6, 'degC')\n\n truth = np.array([292.75, 291.78965331, 291.12778784, 290.61996294,\n 289.93681828, 289.84313902, 289.36183185, 288.5626898,\n 135.46280886, 134.99220142, 131.27369084, 131.07055878,\n 125.93977169, 123.63877507, 120.85291224, 120.85291224,\n 119.20448296]) * units.kelvin\n\n with pytest.warns(UserWarning, match='Duplicate pressure'):\n profile = parcel_profile(pressure, temperature, dewpoint)\n assert_almost_equal(profile, truth, 5)", "def testPerfilCasoInterseccionIgnorada(self):\n if self.TESTALL:\n perfilOriginal = [0,7,5,0,1,3,2,0]\n resultadoEsperado = [0,7,5]\n perfil = Perfil.Perfil()\n resultado = perfil.calcularPerfil(perfilOriginal,0)\n self.assertEqual(resultadoEsperado, resultado)", "def test_data_range(self):\n ex = self.ex\n m = self.m\n n = self.n\n\n lenrange = random.randint(1, 10)\n nreps = random.randint(1, 10)\n\n ex.range = [\"i\", range(lenrange)]\n ex.nreps = nreps\n\n ex.vary[\"X\"][\"along\"] = 0\n ex.vary[\"X\"][\"with\"].add(\"rep\")\n ex.infer_lds()\n\n cmds = ex.generate_cmds()\n\n self.assertIn([\"smalloc\", \"X\", nreps * m * n + (nreps - 1) * m], cmds)\n rangeidx = random.randint(0, lenrange - 1)\n repidx = random.randint(0, nreps - 1)\n self.assertIn([\"soffset\", \"X\", repidx * m,\n \"X_%d_%d\" % (rangeidx, repidx)], cmds)" ]
[ "0.49731633", "0.49007177", "0.4868027", "0.48571473", "0.48517647", "0.48516244", "0.4770387", "0.47574338", "0.474696", "0.4732291", "0.47281486", "0.47268423", "0.47249767", "0.47073162", "0.46844417", "0.46804345", "0.46610695", "0.4651014", "0.4639932", "0.46340024", "0.46131676", "0.45845267", "0.45754358", "0.45751292", "0.45408538", "0.45391086", "0.45374107", "0.4537361", "0.45317215", "0.45218405", "0.45209447", "0.45156255", "0.4502764", "0.45013598", "0.44934025", "0.44912276", "0.44861484", "0.44816777", "0.4479247", "0.4470395", "0.44607255", "0.4459262", "0.44508436", "0.44495746", "0.44465953", "0.44459653", "0.444419", "0.4442795", "0.44417977", "0.4435355", "0.44348645", "0.4433324", "0.44314036", "0.44260013", "0.44217736", "0.44216588", "0.44216043", "0.44201776", "0.44113117", "0.44113117", "0.44088137", "0.44038793", "0.44009084", "0.4396257", "0.43895966", "0.4382958", "0.4381791", "0.43668365", "0.43648317", "0.43611997", "0.43597952", "0.4359033", "0.43564856", "0.43551147", "0.43379694", "0.43284866", "0.43277636", "0.4327682", "0.4322788", "0.4322088", "0.4321747", "0.43192428", "0.43192428", "0.43192428", "0.43192428", "0.43192428", "0.43192428", "0.43192428", "0.43192428", "0.43178454", "0.43116987", "0.43106747", "0.43071315", "0.4305771", "0.43054962", "0.4299678", "0.42984834", "0.42958894", "0.4293386", "0.4292231" ]
0.6394983
0
Test the popxl in sequence context manager
def test_documentation_popxl_in_sequence(self): filename = "in_sequence.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_pop_methods(self):\n\n batch = Batch(Mock())\n\n # mock BatchRequests\n mock_obj = Mock()\n mock_ref = Mock()\n batch._objects_batch = mock_obj\n batch._reference_batch = mock_ref\n\n mock_obj.pop.assert_not_called()\n mock_ref.pop.assert_not_called()\n\n # pop object default value\n batch.pop_object()\n mock_obj.pop.assert_called_with(-1)\n mock_ref.pop.assert_not_called()\n # reset mock objects\n mock_obj.reset_mock()\n mock_ref.reset_mock()\n\n # pop object at index\n batch.pop_object(10)\n mock_obj.pop.assert_called_with(10)\n mock_ref.pop.assert_not_called()\n # reset mock objects\n mock_obj.reset_mock()\n mock_ref.reset_mock()\n\n # pop reference default value\n batch.pop_reference()\n mock_obj.pop.assert_not_called()\n mock_ref.pop.assert_called_with(-1)\n # reset mock objects\n mock_obj.reset_mock()\n mock_ref.reset_mock()\n\n # pop reference at index\n batch.pop_reference(9)\n mock_obj.pop.assert_not_called()\n mock_ref.pop.assert_called_with(9)", "def test_pop_returns_value(new_dll):\n assert new_dll.pop() == 3", "def test_close():\n while True:\n yield", "def test_pop(self):\n sched = Schedule()\n inst_map = InstructionScheduleMap()\n\n inst_map.add(\"tmp\", 100, sched)\n self.assertEqual(inst_map.pop(\"tmp\", 100), sched)\n self.assertFalse(inst_map.has(\"tmp\", 100))\n\n self.assertEqual(inst_map.qubit_instructions(100), [])\n self.assertEqual(inst_map.qubits_with_instruction(\"tmp\"), [])\n with self.assertRaises(PulseError):\n inst_map.pop(\"not_there\", (0,))", "def testGetSequence():\r\n\t\r\n\t#a few of hand-tested genome positions\r\n\ttest_data = [\t('1',500,520,'GTCTGACCTGAGGAGAACTGT'),\r\n\t\t\t\t\t('2',500,520,'CCCGACCCCGACCCCGACCCA'),\r\n\t\t\t\t\t('3',50000,50020,'TCTTCTTTTATGAAAAAGGAT'),\r\n\t\t\t\t\t('4',50000,50020,'AGAGCCCTGCAATTTGAAGAT'),\r\n\t\t\t\t\t('5',100000,100020,'AATGTTCACCAGTATATTTTA'),\r\n\t\t\t\t\t('X',100000,100020,'TAGGTCTCATTGAGGACAGAT'),\r\n\t\t\t\t\t('Y',100000,100020,'TAGGTCTCATTGAGGACAGAT')]\r\n\t\t\t\t\t\r\n\tfor this_check in test_data:\r\n\t\tyield CheckGetSequence, this_check", "def tearDown(self):\n del self.pop", "def pop():", "def test_push_pop(values):\n test_stack = stack.Stack()\n\n for value in values:\n test_stack.push(value)\n\n for expected_value in reversed(values):\n value = test_stack.pop()\n assert value == expected_value\n\n with pytest.raises(stack.StackEmptyError):\n test_stack.pop()", "def test_pop_on_empty_raises_error(sample_priorityq):\n with pytest.raises(IndexError):\n sample_priorityq.pop()", "def test_pop_gate(self):\n sched = Schedule()\n inst_map = InstructionScheduleMap()\n\n inst_map.add(XGate(), 100, sched)\n self.assertEqual(inst_map.pop(XGate(), 100), sched)\n self.assertFalse(inst_map.has(XGate(), 100))\n\n self.assertEqual(inst_map.qubit_instructions(100), [])\n self.assertEqual(inst_map.qubits_with_instruction(XGate()), [])\n with self.assertRaises(PulseError):\n inst_map.pop(\"not_there\", (0,))", "def test_pop_no_args(self):\r\n msg_list = messages.MessageList()\r\n # Adds 5 Message objects to the list.\r\n msg_list.push(messages.StringMessage(\"a\"))\r\n msg_list.push(messages.StringMessage(\"b\"))\r\n msg_list.push(messages.StringMessage(\"c\"))\r\n msg_list.push(messages.StringMessage(\"d\"))\r\n msg_list.push(messages.StringMessage(\"e\"))\r\n\r\n self.assertEqual(msg_list.length(), 5)\r\n popped = msg_list.pop()\r\n self.assertEqual(msg_list.length(), 4)\r\n self.assertEqual(popped.msg, \"e\")\r\n msg_list.pop()\r\n msg_list.pop()\r\n msg_list.pop()\r\n msg_list.pop()\r\n self.assertRaises(IndexError, msg_list.pop)", "def test_open_fill(self):", "def test_peek_empty():\n test_stack = stack.Stack()\n\n with pytest.raises(stack.StackEmptyError):\n test_stack.peek()", "def test_ignore_close():\n try:\n yield\n except GeneratorExit:\n yield", "def test_pop_on_small_stack(small_stack):\n assert small_stack.pop().val == 3\n assert small_stack.pop().val == 2\n assert small_stack._size == 1", "def _pop(self, actual_call):\n try:\n expected_call, mock_result = self._queue.popleft()\n except IndexError as ex:\n error = UnexpectedCall(\n \"Queue is empty. call: {0}\"\n .format(actual_call)\n )\n self._store_pop_error(error)\n raise error\n if actual_call != expected_call:\n error = UnexpectedCall(\n \"Call does not match expectation. actual: {0}; expected: {1}\"\n .format(actual_call, expected_call)\n )\n self._store_pop_error(error)\n raise error\n # let it raise if the result is an exception or exception type.\n return mock_result()", "def testSeq(self, mock_gs):\n self.mr._sequences = ['apple', 'banana']\n\n self.assertEqual(\n 'apple',\n self.mr.seq\n )\n\n mock_gs.assert_called_once_with()\n mock_gs.reset_mock()\n\n self.mr._is_seq = False\n\n self.assertEqual(\n None,\n self.mr.seq\n )\n\n # Test that we pulled from the cache\n self.assertFalse(\n mock_gs.called\n )", "def test_pop_left_check_head(dq_3):\n dq_3.pop_left()\n assert dq_3._dll.head.data == 4", "def test_pop_returns_value_of_tail(dq_3):\n assert dq_3.pop() == 'ragtime'", "def test_appended(self):\n genFn = Mock(return_value=None)\n expected = 123\n \n wrapper = KaoGenerator(genFn)\n wrapper.queue(expected)\n actual = wrapper._queue.pop()\n self.assertEqual(expected, actual)", "def testPushPopItem(self):\n test_queue = multi_process.MultiProcessingQueue()\n\n for item in self._ITEMS:\n test_queue.PushItem(item)\n\n test_queue.SignalEndOfInput()\n test_queue_consumer = test_lib.TestQueueConsumer(test_queue)\n test_queue_consumer.ConsumeItems()\n\n self.assertEqual(test_queue_consumer.number_of_items, len(self._ITEMS))", "def test_peek_single(values, expected_value):\n test_stack = stack.Stack()\n for value in values:\n test_stack.push(value)\n\n returned_value = test_stack.peek()\n\n assert returned_value == expected_value\n\n for value in reversed(values):\n assert test_stack.pop() == value", "def test_sequence(self):\n seq_name = 'test_seq'\n\n with self.dbh.sequence_recreate(seq_name):\n try:\n self.assertEqual(self.dbh.get_seq_next_value(seq_name), 1)\n self.assertEqual(self.dbh.get_seq_next_value(seq_name), 2)\n self.assertEqual(self.dbh.get_seq_next_value(seq_name), 3)\n except Exception:\n self.dbh.rollback()\n raise", "def test_valueInQueue(self):\n genFn = Mock(return_value=None)\n expected = 123\n \n wrapper = KaoGenerator(genFn)\n wrapper.queue(expected)\n actual = wrapper.pop()\n self.assertEqual(expected, actual)", "def do_test():\n for x in execute_helper(test_info,crossmap_tests):\n yield x", "def test_populator_aborts_early():\n o1, o2 = MediaBag(media=1), MediaBag(media=2)\n\n def multi_get(*keys):\n raise AssertionError('tried calling multi_get')\n\n results = media.build_populator('id', multi_get)([o1, o2])\n assert results == [o1, o2]", "def test_generator_cleanup():\n try:\n yield 1\n finally:\n print('cleanup')", "def pop_write(self):\n ...", "def test_pushpop2_dir(self):\n TempfileManager.push()\n os.mkdir(tempdir + 'pushpop2')\n TempfileManager.add_tempfile(tempdir + 'pushpop2')\n\n TempfileManager.push()\n os.mkdir(tempdir + 'pushpop2a')\n TempfileManager.add_tempfile(tempdir + 'pushpop2a')\n TempfileManager.pop()\n if not os.path.exists(tempdir + 'pushpop2'):\n self.fail(\"pop() clean out all files\")\n if os.path.exists(tempdir + 'pushpop2a'):\n self.fail(\"pop() failed to clean out files\")\n\n TempfileManager.pop()\n if os.path.exists(tempdir + 'pushpop2'):\n self.fail(\"pop() failed to clean out files\")", "def popitem(self): # real signature unknown; restored from __doc__\n pass", "def testSeqs(self, mock_gs):\n self.mr._sequences = ['apple', 'banana']\n\n self.assertEqual(\n ['apple', 'banana'],\n self.mr.seqs\n )\n\n mock_gs.assert_called_once_with()\n mock_gs.reset_mock()\n\n self.mr._is_seq = False\n\n self.assertEqual(\n [],\n self.mr.seqs\n )\n\n # Test that we pulled from the cache\n self.assertFalse(\n mock_gs.called\n )", "def test_reading_missing(self, mock_pop, mock_shell):\n mock_result = mock.Mock()\n mock_result.communicate = mock.Mock()\n mock_result.returncode = 0\n mock_result.communicate.return_value = (self.dir, None)\n mock_pop.return_value = mock_result\n mock_shell.return_value = self.gitconf_str\n config = load_configuration(package_dir=\"womp\")\n\n self.failUnless(mock_result.communicate.called)\n mock_pop.assert_has_calls(mock.call(['git', 'rev-parse', '--show-toplevel'], stdout=-1))\n self.assertEqual(config.package_version(), '1.2.3')\n self.assertEqual(config.package_name(), 'cirrus_tests')\n self.failUnless(mock_shell.called)\n mock_shell.assert_has_calls([\n mock.call('git config --file {} -l'.format(self.gitconfig))\n ])", "def test_sequence_done(self):\n self.t(\"1,2 done\")\n code, out, err = self.t(\"_get 1.status 2.status\")\n self.assertEqual(\"completed completed\\n\", out)", "def test_tail_call(self):", "def test_priority_que_pop(priority_queue_full):\n # import pdb; pdb.set_trace()\n assert (priority_queue_full.pop(),\n priority_queue_full.pop(),\n priority_queue_full.pop(),\n priority_queue_full.pop()) == (11, 6, 12, 15)", "def test_pop_left_for_length(dq_3):\n dq_3.pop_left()\n assert dq_3.length == 2", "def test_invoke_anonymous_pipe():\n\n def processor_a(app, items):\n items = list(items)\n assert items == [holocron.Item({\"a\": \"b\"})]\n items[0][\"x\"] = 42\n yield from items\n\n def processor_b(app, items):\n items = list(items)\n assert items == [holocron.Item({\"a\": \"b\", \"x\": 42})]\n items.append(holocron.Item({\"z\": 13}))\n yield from items\n\n def processor_c(app, items):\n items = list(items)\n assert items == [\n holocron.Item({\"a\": \"b\", \"x\": 42}),\n holocron.Item({\"z\": 13}),\n ]\n yield from items\n\n testapp = holocron.Application()\n testapp.add_processor(\"processor_a\", processor_a)\n testapp.add_processor(\"processor_b\", processor_b)\n testapp.add_processor(\"processor_c\", processor_c)\n\n stream = testapp.invoke(\n [\n {\"name\": \"processor_a\"},\n {\"name\": \"processor_b\"},\n {\"name\": \"processor_c\"},\n ],\n [holocron.Item({\"a\": \"b\"})],\n )\n\n assert next(stream) == holocron.Item({\"a\": \"b\", \"x\": 42})\n assert next(stream) == holocron.Item({\"z\": 13})\n\n with pytest.raises(StopIteration):\n next(stream)", "def test_pop(self):\n self.assertRaises(EmptyHeapException, self.minheap.pop)\n self.minheap.heap = [0, 1, 4, 7, 9]\n assert self.minheap.pop() == 1\n assert self.minheap.heap == [0, 4, 9, 7]", "def test_INVOKE_repeat(self, propose):\n self.rep.proposals[1] = PROPOSAL1\n self.failIf(propose.called)", "def test_peek_on_empty_raises_error(sample_priorityq):\n with pytest.raises(IndexError):\n sample_priorityq.peek()", "def unit_test():\n queue = Queue()\n stack = Stack()\n\n for i in range(17):\n queue.push(i)\n stack.push(i)\n\n print(stack.pop())\n print(queue.pop())\n\n print(stack.peek())\n print(queue.peek())\n\n print(\"STACK: \", stack)\n print(\"QUEUE: \", queue)\n print(\"EMPTY?: QUEUE: \", queue.is_empty(), \"STACK: \", stack.is_empty())\n\n while not queue.is_empty():\n queue.pop()\n\n print(\"EMPTY?: QUEUE: \", queue.is_empty(), \"STACK: \", stack.is_empty())\n\n # print(\"POP EMPTY: \", queue.pop())", "def test_pop_sets_new_head(new_dll):\n new_dll.pop()\n assert new_dll.head.value == 4", "def test_pop(self):\n test_list = DoubleLinkedList()\n test_list.push(15)\n test_list.push(150)\n test_list.push(13)\n test_list.push(155)\n test_list.push(1)\n test_list.pop()\n self.assertEqual(test_list.last().get_elem(), 155)", "def test_teardown(self):\n assert self.search_behaviour.teardown() is None\n self.assert_quantity_in_outbox(0)", "def test_teardown(self):\n assert self.transaction_behaviour.teardown() is None\n self.assert_quantity_in_outbox(0)", "def pop(self):", "def pop(self):", "def test_multiple_commands_at_same_time(self):", "def test_sequence_modify(self):\n self.t(\"1,2 modify +xyz\")\n code, out, err = self.t(\"_get 1.tags 2.tags\")\n self.assertEqual(\"xyz xyz\\n\", out)", "def pop(self, *args, **kwargs): # real signature unknown\n pass", "def test_run_ended(self):", "def test_generator(self):\n args = Args()\n args.files = ['tests/xproto/test.xproto']\n args.target = 'tests/xtarget/test.xtarget'\n args.output = 'tests/out/dir/'\n args.write_to_file = \"target\"\n args.dest_file = None\n args.dest_extension = None\n\n expected_args = Args()\n expected_args.files = [os.path.abspath(os.getcwd() + '/' + args.files[0])]\n expected_args.target = os.path.abspath(os.getcwd() + '/' + args.target)\n expected_args.output = os.path.abspath(os.getcwd() + '/' + args.output)\n\n with patch(\"xosgenx.xosgen.XOSGenerator.generate\") as generator:\n XosGen.init(args)\n actual_args = generator.call_args[0][0]\n self.assertEqual(actual_args.files, expected_args.files)\n self.assertEqual(actual_args.target, expected_args.target)\n self.assertEqual(actual_args.output, expected_args.output)", "def test_pushpop1_dir(self):\n TempfileManager.push()\n os.mkdir(tempdir + 'pushpop1')\n TempfileManager.add_tempfile(tempdir + 'pushpop1')\n TempfileManager.pop()\n if os.path.exists(tempdir + 'pushpop1'):\n self.fail(\"pop() failed to clean out directories\")", "def test_pop_sets_new_head_next(new_dll):\n new_dll.pop()\n assert new_dll.head.next.value == 5", "def teardown(self):", "def teardown(self):", "def teardown(self):", "def popitem(self):\n pass", "def mp_test(target, args=(), kwargs={}, timeout=None):\n\n if os.name == 'nt':\n return target(*args,**kwargs)\n\n r, w = mp.Pipe(duplex=False)\n def wrap(target=None, args=(), kwargs={}):\n r.close()\n res=target(*args, **kwargs)\n try:\n w.send(res)\n except:\n os.kill(os.getpid(),signal.SIGTERM)\n else:\n w.close()\n \n p = mp.Process(target=wrap, args=[target, args, kwargs])\n p.start()\n w.close()\n p.join(timeout) # Can't timeout pipe recv, so risking block on send.\n if p.is_alive():\n p.terminate()\n raise Error(\"timeout\",\"Timeout limit was \"+str(timeout)+\" seconds\")\n if p.exitcode:\n raise Error(\"signaled\",\"Exited following signal -\"+str(p.exitcode))\n output = r.recv()\n r.close()\n return output\n #return (None,output)", "def test_invoke_processor_errors():\n\n def processor(app, documents):\n raise ValueError(\"something bad happened\")\n yield\n\n testapp = holocron.Application()\n testapp.add_processor(\"processor\", processor)\n testapp.add_pipe(\"test\", [{\"name\": \"processor\"}])\n\n stream = testapp.invoke(\"test\")\n\n with pytest.raises(ValueError, match=r\"^something bad happened$\"):\n next(stream)\n\n with pytest.raises(StopIteration):\n next(stream)", "def test_redoer():\n tock = 1.0\n redoer = doing.ReDoer()\n assert redoer.tock == 0.0\n\n tymist = tyming.Tymist()\n redoer = doing.ReDoer(tymth=tymist.tymen(), tock=tock)\n assert redoer.tock == tock == 1.0\n redoer.tock = 0.0\n assert redoer.tock == 0.0\n\n # create generator use send and run until normal exit. emulates Doist.ready\n args = {}\n dog = redoer(tymth=redoer.tymth, tock=redoer.tock, **args)\n assert inspect.isgenerator(dog)\n\n result = dog.send(None)\n assert result == redoer.tock == 0.0\n\n tymist.tick()\n result = dog.send(tymist.tyme)\n assert result == redoer.tock == 0.0\n\n tymist.tick()\n result = dog.send(tymist.tyme)\n assert result == redoer.tock == 0.0\n\n tymist.tick()\n with pytest.raises(StopIteration):\n try:\n result = dog.send(tymist.tyme)\n except StopIteration as ex:\n assert ex.value == True\n raise\n\n tymist.tick()\n with pytest.raises(StopIteration): # send after break\n try:\n result = dog.send(tymist.tyme)\n except StopIteration as ex:\n assert ex.value == None\n raise\n\n # create generator use send and then explicit close. emulates Doist.ready\n args = {}\n dog = redoer(tymth=redoer.tymth, tock=redoer.tock, **args)\n assert inspect.isgenerator(dog)\n\n result = dog.send(None)\n assert result == redoer.tock == 0.0\n\n tymist.tick()\n result = dog.send(tymist.tyme)\n assert result == redoer.tock == 0.0\n\n result = dog.close()\n assert result == None # no yielded value on close\n\n tymist.tick()\n with pytest.raises(StopIteration): # send after close\n try:\n result = dog.send(tymist.tyme)\n except StopIteration as ex:\n assert ex.value == None\n raise\n\n\n # use next instead of send\n args = {}\n dog = redoer(tymth=redoer.tymth, tock=redoer.tock, **args)\n assert inspect.isgenerator(dog)\n\n result = next(dog)\n assert result == redoer.tock == 0.0\n\n result = next(dog)\n assert result == redoer.tock == 0.0\n\n result = dog.close()\n assert result == None # no yielded value on close\n\n tymist.tick()\n with pytest.raises(StopIteration): # send after close\n try:\n result = dog.send(tymist.tyme)\n except StopIteration as ex:\n assert ex.value == None\n raise\n\n \"\"\"End Test \"\"\"", "def test_invoke_passthrough_items():\n\n def processor(app, items):\n yield from items\n\n testapp = holocron.Application()\n testapp.add_processor(\"processor\", processor)\n testapp.add_pipe(\"test\", [{\"name\": \"processor\"}])\n\n stream = testapp.invoke(\n \"test\",\n [\n holocron.Item(name=\"yoda\", rank=\"master\"),\n holocron.Item(name=\"skywalker\"),\n ],\n )\n\n assert next(stream) == holocron.Item(name=\"yoda\", rank=\"master\")\n assert next(stream) == holocron.Item(name=\"skywalker\")\n\n with pytest.raises(StopIteration):\n next(testapp.invoke(\"test\"))", "def test_invoke_propagates_processor_args(processor_args):\n\n def processor(app, items, **args):\n assert args == processor_args\n yield from items\n\n testapp = holocron.Application()\n testapp.add_processor(\"processor\", processor)\n testapp.add_pipe(\"test\", [{\"name\": \"processor\", \"args\": processor_args}])\n\n with pytest.raises(StopIteration):\n next(testapp.invoke(\"test\"))", "def teardown(self):\n pos.close()", "def teardown(self):\n pos.close()", "def test_enter_exit(self, mock_flush):\n\n batch = Batch(Mock())\n\n with batch as b:\n self.assertEqual(batch, b)\n mock_flush.assert_called()", "def test_unload(install_mockery, mock_fetch, mock_archive, mock_packages, working_env):\n install(\"mpileaks\")\n mpileaks_spec = spack.spec.Spec(\"mpileaks\").concretized()\n\n # Set so unload has something to do\n os.environ[\"FOOBAR\"] = \"mpileaks\"\n os.environ[uenv.spack_loaded_hashes_var] = \"%s:%s\" % (mpileaks_spec.dag_hash(), \"garbage\")\n\n sh_out = unload(\"--sh\", \"mpileaks\")\n csh_out = unload(\"--csh\", \"mpileaks\")\n\n assert \"unset FOOBAR\" in sh_out\n assert \"unsetenv FOOBAR\" in csh_out\n\n assert \"export %s=garbage\" % uenv.spack_loaded_hashes_var in sh_out\n assert \"setenv %s garbage\" % uenv.spack_loaded_hashes_var in csh_out", "def test_documentation_popxl_replication(self):\n filename = \"replication.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def pop(self):\n return self.new_dll.shift()", "def popitem(self):\n pass", "def test_pop_empty_list():\n from dll import DbLinkedList\n dll = DbLinkedList()\n msg = \"Cannot pop from an empty list\"\n with pytest.raises(ValueError, message=msg):\n dll.pop()", "def test_peek_on_small_stack(small_stack):\n assert small_stack.peek().val == 3", "def test_invoke_passthrough_items_empty_pipe():\n\n testapp = holocron.Application()\n testapp.add_pipe(\"test\", [])\n\n stream = testapp.invoke(\n \"test\",\n [\n holocron.Item(name=\"yoda\", rank=\"master\"),\n holocron.Item(name=\"skywalker\"),\n ],\n )\n\n assert next(stream) == holocron.Item(name=\"yoda\", rank=\"master\")\n assert next(stream) == holocron.Item(name=\"skywalker\")\n\n with pytest.raises(StopIteration):\n next(testapp.invoke(\"test\"))", "def teardown(self) -> None:", "def teardown(self) -> None:", "def teardown(self) -> None:", "def test_sequence(self):\n self.assertEqual([1, -3, 9, -27, 81, -243],\n [x for x in GeometricProgression(6, 1, -3)])\n\n self.assertEqual([1, 1, 1, 1, 1],\n [x for x in GeometricProgression(5, 1, 1)])\n\n self.assertEqual([4, 40, 400, 4000, 40000],\n [x for x in GeometricProgression(5, 4, 10)])", "def test_pop_from_empty_list(new_empty_ll):\n from linked_list import Linked_List\n with pytest.raises(IndexError):\n new_empty_ll.pop()", "def test_lose(self):\n self.choice.return_value = \"ant\" \n self.input.side_effect = list(\"bcdefg\" \"n\")\n\n gallows.main()\n\n self.xprint.assert_any_call('You have run out of guesses!')", "def tearDown(self):\n self.popen_patcher.stop()", "def tearDown(self):\n self.popen_patcher.stop()", "def pop_sm(self):\r\n while True:\r\n # wait to receive a read request\r\n req = yield self.r_in_pipe.get()\r\n # model read latency\r\n # for i in range(self.read_latency):\r\n yield self.wait_sys_clks(self.read_latency)\r\n # try to read head element\r\n if len(self.items) > 0:\r\n data = self.items[0]\r\n self.items = self.items[1:]\r\n else:\r\n print >> sys.stderr, \"ERROR: PIFO pop_sm: attempted to read from empty PIFO\"\r\n data = None\r\n # write data back\r\n self.r_out_pipe.put(data)", "def test_main_gc_2(test):\n answers = (i for i in (test, '1 1 1 1', 'q'))\n with mock.patch.object(builtins, 'input', lambda _: next(answers)):\n g_c.main()", "def test_excel(test_data,tmp_path):\n\n for d in test_data:\n\n gpm = GenotypePhenotypeMap(genotype=d[\"genotype\"],\n wildtype=d[\"wildtype\"],\n phenotype=d[\"phenotype\"],\n uncertainty=d[\"uncertainty\"])\n\n # Write excel file\n excel_file = os.path.join(tmp_path,\"tmp.xlsx\")\n gpm.to_excel(filename=excel_file)\n assert os.path.isfile(excel_file)\n\n # Read in and make sure it worked.\n new_gpm = gpmap.read_excel(filename=excel_file,wildtype=d[\"wildtype\"])\n conftest.compare_gpmap(gpm,new_gpm)\n\n # Do not give wildtype. Should still work because the wildtype was\n # inferred.\n gpm_read = gpmap.read_excel(filename=excel_file)\n conftest.compare_gpmap(gpm,gpm_read)\n\n # Check ability to read labels back in\n site_labels = [f\"{x}\" for x in range(10,10+len(d[\"wildtype\"]),1)]\n gpm = GenotypePhenotypeMap(genotype=d[\"genotype\"],\n wildtype=d[\"wildtype\"],\n site_labels=site_labels)\n out_file = os.path.join(tmp_path,\"tmp.xlsx\")\n gpm.to_excel(out_file)\n\n gpm_read = gpmap.read_excel(out_file)\n\n for i in range(len(gpm_read.site_labels)):\n\n # Skip virtual site_labels added for invariant sites\n if len(d[\"mutations\"][i]) == 1:\n continue\n\n assert gpm_read.site_labels[i] == gpm.site_labels[i]\n\n # Read in with bad wildtype. Should throw warning and then have\n # sequential site labels.\n with pytest.warns(UserWarning):\n gpm_read = gpmap.read_excel(out_file,wildtype=d[\"mutant\"])\n\n assert np.array_equal(gpm_read.site_labels,range(len(d[\"wildtype\"])))", "def test_make_pop_exception(self, patch_log, patch_random):\n patch_random.side_effect = ValueError\n # Population is initialised with no households\n pop_params = {\"population_size\": 10, \"cell_number\": 1,\n \"microcell_number\": 1}\n ToyPopulationFactory.make_pop(pop_params)\n patch_log.assert_called_once_with(\"ValueError in ToyPopulation\"\n + \"Factory.make_pop()\")", "def test_main_gc_1(test):\n answers = (i for i in (test, 'b', 'q'))\n with mock.patch.object(builtins, 'input', lambda _: next(answers)):\n g_c.main()", "def test_generator3(self):\n xpb = XPathBuilder()\n xp1 = xp2 = None\n base_xp = xpb.base.foo[xpb.attr('abc') == 'x']\n with base_xp as b:\n xp1 = b().bar.text() == 'foo'\n xp2 = b().x.y.z[42]\n base_exp = '/base/foo[@abc = \"x\"]'\n xp1_exp = '/base/foo[@abc = \"x\"]/bar/text() = \"foo\"'\n xp2_exp = '/base/foo[@abc = \"x\"]/x/y/z[42]'\n self.assertEqual(base_xp.tostring(), base_exp)\n self.assertEqual(xp1.tostring(), xp1_exp)\n self.assertEqual(xp2.tostring(), xp2_exp)", "def test_empty_pop(self, populated_collection):\n test_list, plain_list = populated_collection\n item = test_list.pop()\n assert self.is_equal(self.to_base(item), plain_list[-1])\n plain_list.pop()\n self.check_equivalent(test_list, plain_list)\n self.final_check(test_list)", "def test_run_loop_success(self):\n found = False\n pyint = Interpreter(limit=15)\n try:\n pyint.run(code=BF_CODE_LOOP_TWICE)\n except SystemExit: \n found = True\n self.assertFalse(found)", "def test_shift_returns_value(new_dll):\n assert new_dll.shift() == 5", "def test_reset(self):\n ran = []\n def foo():\n ran.append(None)\n\n c = task.Clock()\n lc = TestableLoopingCall(c, foo)\n lc.start(2, now=False)\n c.advance(1)\n lc.reset()\n c.advance(1)\n self.assertEqual(ran, [])\n c.advance(1)\n self.assertEqual(ran, [None])", "def test_population_movements_with_compilation(self):\n self._pystepx = PySTEPXIsland(nb_islands=4, init_script=init_script)\n print self._pystepx._rc[0]['gp_engine']\n self._pystepx._rc[0].execute('elems = gp_engine.get_evolver().select_and_remove_individuals(0.01)',\n block=True)\n print self._pystepx._rc[0]['elems']", "def test_documentation_popxl_addition(self):\n filename = \"simple_addition.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def pop(self):\n pass", "def pop(self, arguments):\n puts_err(colored.red(\"Not implemented!\"))", "def test(self):\n # 0x13 is nop\n self.gdb.command(\"p *((int*) 0x%x)=0x13\" % self.target.ram)\n self.gdb.command(\"p *((int*) 0x%x)=0x13\" % (self.target.ram + 4))\n self.gdb.command(\"p *((int*) 0x%x)=0x13\" % (self.target.ram + 8))\n self.gdb.p(\"$pc=0x%x\" % self.target.ram)\n self.gdb.stepi()\n assertEqual((self.target.ram + 4), self.gdb.p(\"$pc\"))\n self.gdb.stepi()\n assertEqual((self.target.ram + 8), self.gdb.p(\"$pc\"))", "def test__get_xtraback_prepare_cmd(self):\n mb = self.maria_backup\n self.assertEqual(mb._get_xtraback_prepare_cmd('/a/dir'), ['xtrabackup', '--prepare',\n '--target-dir', '/a/dir/test',\n '--use-memory', '40G',\n '--open-files-limit', '200000'])", "async def my_job():\n\n while test_plan:\n to_yield = test_plan[-1][0]\n test_plan[-1][0] = None\n yield to_yield", "def TransferMemorySequence():\r\n pass", "def exp_pop(self) -> Any:\n return self.exp_stack.popleft()" ]
[ "0.58948493", "0.5882581", "0.5770228", "0.54232585", "0.54092556", "0.54075843", "0.5373329", "0.5303253", "0.53030944", "0.5287886", "0.52534574", "0.5221736", "0.52018183", "0.5189823", "0.5175281", "0.5172834", "0.5172073", "0.51521003", "0.51421815", "0.5127545", "0.51249427", "0.51089585", "0.5103546", "0.5087489", "0.5045468", "0.5040717", "0.5031918", "0.5026089", "0.50086796", "0.4999565", "0.49845994", "0.4983298", "0.49627236", "0.4959022", "0.49560332", "0.49143422", "0.4894076", "0.48857024", "0.4881891", "0.48814523", "0.48599333", "0.48598775", "0.48365682", "0.48338538", "0.48286676", "0.4826836", "0.4826836", "0.48268154", "0.48260146", "0.48238036", "0.48233682", "0.48144883", "0.48111746", "0.48098588", "0.4806326", "0.4806326", "0.4806326", "0.48044452", "0.4801096", "0.47967568", "0.47932225", "0.47905892", "0.47885412", "0.47874752", "0.47874752", "0.47869822", "0.47862285", "0.4782183", "0.47803375", "0.47777343", "0.476481", "0.47645095", "0.47635064", "0.47543886", "0.47543886", "0.47543886", "0.47509274", "0.47483334", "0.47483286", "0.47397968", "0.47397968", "0.47386912", "0.4728878", "0.4728151", "0.47263145", "0.4724572", "0.47004166", "0.46942502", "0.46938148", "0.4691006", "0.46903977", "0.46903297", "0.4686174", "0.46684793", "0.46645814", "0.4662198", "0.46619257", "0.4660861", "0.4655052", "0.46534914" ]
0.65936136
0
Test the popxl remote variable
def test_documentation_popxl_remote_var(self): filename = "remote_variable.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_documentation_popxl_remote_rts_var(self):\n filename = \"remote_rts_var.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_documentation_popxl_rts_var(self):\n filename = \"rts_var.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_pop_returns_value(new_dll):\n assert new_dll.pop() == 3", "def _is_pop(self, words):\n if words[0] == 'pop':\n if len(words) != 3:\n raise SyntaxError(\"File line {}: Invalid number of arguments for C_POP command.\".format(self._file_line))\n if words[1] not in ['temp', 'pointer', 'static', 'local', 'argument', 'this', 'that']:\n raise SyntaxError(\"File line {}: Invalid second argument.\".format(self._file_line))\n return True\n else:\n return False", "def test_endpointPOP3(self):\n self._endpointTest(\"pop3\")", "def _platformix_get(self, context, fake_reply, prop):\r\n if hasattr(self.host, prop):\r\n self._reply(context, proto_success(getattr(self.host, prop), prop), fake_reply)\r\n else:\r\n self._reply(context, proto_failure(\"Property {} not found on {}\".format(prop, self.host.name)), fake_reply)", "def test_check_replication_unknown_valueerror2(self, mock_urlopen):\n base_url = 'http://localhost:6000/recon/'\n jdata = PropertyMock(return_value=b'X')\n mock_urlopen.return_value = MagicMock(read=jdata)\n result = check_replication(base_url, [4, 10, 4, 10])\n self.assertEqual(result,\n 3*[(STATUS_UNKNOWN,\n \"Can't parse status data\")])", "def remote_status():", "def test_documentation_popxl_replication(self):\n filename = \"replication.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_get_property():\n\n sdk = '23'\n contents = (\"[Info]\\n\"\n \"sdk = %s\" % sdk)\n\n testutils.deploy_config_raw(contents)\n\n assert prop.get_prop('info', 'sdk') == sdk\n\n testutils.undeploy()\n\n return 0", "def test_z_remote_command(self):\n\t\ttheResult = False\n\t\ttry:\n\t\t\timport subprocess\n\t\t\ttheOutputtext = subprocess.check_output([\"which\", \"check_nrpe\"])\n\t\t\tif (str(\"/check_nrpe\") in str(theOutputtext)):\n\t\t\t\ttheResult = True\n\t\texcept Exception:\n\t\t\ttheResult = False\n\t\t\ttry:\n\t\t\t\ttheOutputtext = subprocess.check_output([\"which\", \"ssh\"])\n\t\t\t\tif (str(\"/ssh\") in str(theOutputtext)):\n\t\t\t\t\ttheResult = True\n\t\t\texcept Exception:\n\t\t\t\ttheResult = False\n\t\tassert theResult", "def remote_installed(self):\n gxp_dir = os.environ[\"GXP_DIR\"]\n flag = os.path.join(gxp_dir, \"REMOTE_INSTALLED\")\n if dbg>=2: \n ioman.LOG(\"checking remote flag %s/%s\\n\" % (gxp_dir, flag))\n if os.path.exists(flag):\n if dbg>=2: ioman.LOG(\"exists, remotely installed\\n\")\n return 1\n else:\n if dbg>=2: ioman.LOG(\"does not exit, locally installed\\n\")\n return 0", "def remote():\n pass", "def test_pop3(self):\n self._endpointServerTest(\"pop3\", protocols.POP3Factory)", "def _is_pop_command(self):\n return self._match_memory_pattern(\"pop\")", "def test_check_nip(client):\n is_assigned, request_id = client.check_nip(\n \"8655104670\", \"41146786026458860703735932\"\n )\n\n assert is_assigned", "def hasGridProxy():\n import os\n from subprocess import Popen, PIPE\n \n arguments = 'dirac-proxy-info --checkvalid'\n arguments = ['dirac-command'] + arguments.split()\n logger.verbose ( 'hasGridProxy:use Popen(%s)' % arguments)\n\n p = Popen(arguments, stdout=PIPE, stderr=PIPE)\n (cout, cerr) = p.communicate()\n #\n if 0 != p.returncode: return False\n #\n if py3 :\n cout = cout.decode ( 'utf-8' ) if cout else cout \n cerr = cerr.decode ( 'utf-8' ) if cerr else cerr \n # \n\n if 'expired' in cout : return False\n if 'Insane' in cout : return False\n if 'Error' in cout : return False\n #\n return 0 == p.returncode and cout and not cerr", "def test_set_get(self):\n self.shell.onecmd(\"create %s/one 'hello'\" % (self.tests_path))\n self.shell.onecmd(\"set %s/one 'bye'\" % (self.tests_path))\n self.shell.onecmd(\"get %s/one\" % (self.tests_path))\n self.assertEqual(\"bye\\n\", self.output.getvalue())", "def test_test_property():\n\n contents = (\"[Info]\\n\"\n \"sdk = 23\")\n\n testutils.deploy_config_raw(contents)\n\n assert prop.test_prop('info', 'sdk') == 1\n\n testutils.undeploy()\n\n return 0", "def on_pullVar(self):\n if not os.path.exists(grapher.binPath):\n mess = \"!!! ERROR: rndBin path not found, check user pref !!!\"\n self.mainUi._defaultErrorDialog(mess, self.mainUi)\n else:\n tmpPath = os.path.join(self.grapher.userBinPath, 'tmp')\n tmpFile = os.path.join(tmpPath, 'varBuffer.py')\n varDict = pFile.readPyFile(tmpFile)\n for var in sorted(varDict.keys()):\n if var.startswith('selVar_'):\n newItem = self.on_addVar()\n self.setItem(newItem, **varDict[var])", "def SEMIHook(p):\n x = p['sy']['pop']()\n if (x == ';'):\n p['sy']['push'](p['OK'])\n else:\n p['sy']['push'](p['NOK'])\n #endif", "def check_remote_rpm_install(self, rpm_package_name, host):\n results = run_remote_command(\"rpm -q %s --dbpath %s\" % (rpm_package_name, RPM_DATABASE), host)\n self.assertEqual(results, rpm_package_name)", "def test_expect_status_property_about_registry_process(client, start_call_fx):\n\n url = reverse_lazy('calls:registry-list')\n\n response = client.post(url, start_call_fx, content_type='application/json')\n\n job_id = response.data.get('job_id')\n\n job = client.get(job_id)\n\n assert job.data.get('status') == 'DONE'", "def test_existing_value(self):\n var_name = \"PICCOLO_TEST_2\"\n initial_value = \"hello\"\n new_value = \"goodbye\"\n\n os.environ[var_name] = initial_value\n\n with set_env_var(var_name=var_name, temp_value=new_value):\n self.assertEqual(os.environ.get(var_name), new_value)\n\n self.assertEqual(os.environ.get(var_name), initial_value)", "def test_ipcrm():\n IPCComm.ipcrm()", "def nremote(self):", "def test_ip(response):\n \n # from comeon_core import update\n ip = getIP()\n print(ip)\n #init_db(engine)\n #update()\n assert True", "def is_remote(self): # -> Any | bool:\n ...", "def get_xcom(**context):\n ti = context['ti']\n data = ti.xcom_pull(task_ids='xcom_from_bash', key='return_value')\n logging.info(data)", "def test_remote(self):\n\n self.assertEqual(description.RepositoryDescription(\n 'git@github.com:/example/remote', '/path/to/local').remote,\n implementation.RemoteRepository(\n 'git@github.com:/example/remote'))", "def test_get_property_success(self):\r\n self.assertEqual(self.config.option1, 1337)", "def fetch():\n return True", "def PHook(p):\n x = p['sy']['pop']()\n if (x == '.'):\n p['sy']['push'](p['OK'])\n else:\n p['sy']['push'](p['NOK'])\n #endif", "def is_remote(self):\n if socket.gethostbyname(socket.gethostname()).startswith('10.7'):\n return False\n else:\n return True", "def sendPullResponse(remote, request, txpk,sock):\r\n remote = (remote[0], remote[1])\r\n\r\n m = GatewayMessage(version=request.version, token=request.token,\r\n identifier=PULL_RESP, gatewayEUI=b'9079494338994186168',\r\n remote=remote, ptype='txpk', txpk=txpk)\r\n print(\"Sending PULL_RESP message to %s:%d\" % remote)\r\n\r\n sock.sendto(m.encode(), remote)\r\n data, address = sock.recvfrom(4096)\r\n print(data)\r\n print('received %s bytes from %s' % (len(data), address))", "def test_postfix(self):\n test_sensordef = {\n \"kind\": self.test_postfix.get_kind(),\n \"name\": \"Postfix Mailqueue\",\n \"description\": \"Monitors the mailqueue of a postfix server\",\n \"help\": \"Monitors the mailqueue of a postfix server for active, deferred, hold or corrupt mail\",\n \"tag\": \"mppostfixsensor\",\n \"fields\": [],\n \"groups\": []\n }\n assert_equal(self.test_postfix.get_sensordef(), test_sensordef)", "def remote_pull(*keys):", "def check_available():\n\n rm = current_app.config['rm_object']\n\n return rm.check_availability()", "def do_local(self, *args):\n return self.do_scpi(':communicate:remote 0')", "def checkGetHostByName(self, result):\n self.assertEquals(result, '127.0.0.1')", "def unset():\n rino.remote.unset()", "def test_get_address(self):\n with self.subprocess_getoutput_patch:\n ret = self.inst._get_address()\n self.assertEqual(ret, \"http://example\")", "def test_get_value(self) -> None:\n\n expected = False\n actual = self.helper.set_name(self.test_name).exists()\n\n self.assertEqual(expected, actual)\n\n expected = \"Hello\"\n actual = self.helper.get_value(default=\"Hello\")\n\n self.assertEqual(expected, actual)\n\n expected = \"This is a test.\"\n\n os.environ[self.test_name] = \"This is a test.\"\n\n actual = self.helper.get_value(default=\"Hello\")\n\n self.assertEqual(expected, actual)\n\n del os.environ[self.test_name]", "def test_getSiblingExistsRemote(self):\n self.store.powerUp(self.contentStore1, ISiblingStore)\n self._retrievalTest()", "def test_script_p2_sh_address(self):\n pass", "def testIP(self):\n self.assertEqual([\"http://234.234.234.234\"], grab('http://234.234.234.234', self.needScheme))", "def SetPop(self, fname, var):\n\n\t\tself._pop_fname = fname\n\t\tself._pop_var = var", "def test_get_backup_command(self):\n mb = self.maria_backup\n mock = mock_open(read_data='')\n with patch('builtins.open', mock): # skip reading the port list section\n self.assertEqual(mb.get_backup_cmd('test_dir'),\n ['xtrabackup', '--backup',\n '--target-dir', 'test_dir/test',\n '--datadir', '/srv/sqldata',\n '--socket', '/run/mysqld/mysqld.sock'])", "def testProtocolReturn(self):\n self.assertEqual(\n self.protocol,\n self.mr.protocol\n )\n\n self.mr._protocol = 'burp'\n\n self.assertEqual(\n 'burp',\n self.mr.protocol\n )", "def test_get(self, publish_mock: mock.Mock) -> None:\n\n def side_effect(*args: str, **_: str) -> Any:\n if args[0] == \"registry:search:dict\":\n return [{\"host1\": \"mac1\", \"host2\": \"mac2\"}]\n\n if args[0] == \"app_url\":\n return [\"/registry\"]\n\n if args[0] == \"jinja:render\":\n return [\"\"]\n\n return mock.DEFAULT\n\n publish_mock.side_effect = side_effect\n\n self.request(\"/\")\n\n self.assertEqual(\n helpers.template_var(publish_mock, \"registry_url\"),\n \"/registry\"\n )\n self.assertEqual(\n helpers.template_var(publish_mock, \"hosts\").get(\"host1\"),\n \"mac1\"\n )\n\n self.assertNotIn(\n \"host3\",\n helpers.template_var(publish_mock, \"hosts\")\n )\n\n self.assertFalse(\n helpers.template_var(publish_mock, \"send\")\n )", "def test_variables_get(self):\n pass", "def test_get_host(self):\n pass", "def test_is_remote(self):\n self.assertEqual(self.project.is_remote(), False)", "def pop_payload(self):\n payload = self.rdb.lpop(self.key)\n if payload:\n self.pool.spawn(self.send, json.loads(payload.decode(\"utf-8\")))\n else:\n gevent.sleep(5)", "def test_retrieving_variables(self):\n\t\turl = reverse('variables', args = ('b'))\n\t\tresponse = self.client.get(url, format='json')\n\t\tself.assertEqual(response.data, {'b': 567})", "def remote_kill():", "def test_preferred_zip_precedence(self):\n subscriber = Subscriber.objects.get(id=4)\n create_subscriber_in_session(self, subscriber)\n factory = RequestFactory()\n self.assemble_session(self.session) \n request = factory.get('/hudson-valley/', follow=True) \n request.session = self.session\n site = Site.objects.get(id=3)\n preferred_zip = get_preferred_zip(request, site)\n self.assertEqual(preferred_zip, '27604')", "def rpc_ping(self):\n\t\treturn True", "def test_check_status_code_returns_true():\n plugin_instance = PluginVipCustomisation()\n assert plugin_instance._check_status_code('replace me with real xml') == True", "def test_master_address(self):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n self.supervisor.supvisors.context.master_address = '10.0.0.1'\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n self.assertEqual('10.0.0.1', rpc.get_master_address())", "def check_remote_rpm_uninstall(self, rpm_package_name, host):\n with self.assertRaisesRegexp(ExecutionError, \"%s is not installed\" % rpm_package_name):\n results = run_remote_command(\"rpm -q %s --dbpath %s\" % (rpm_package_name, RPM_DATABASE), host)", "def c_test_population_function(self, function):\r\n return 1", "def test_package(response):\n \n # from comeon_core import update\n ip = getIP()\n print(ip)\n db = connect()\n engine = db.connect() \n init_db(engine)\n update(engine)\n assert True", "def test_no_existing_value(self):\n var_name = \"PICCOLO_TEST_1\"\n\n # Make sure it definitely doesn't exist already\n if os.environ.get(var_name) is not None:\n del os.environ[var_name]\n\n new_value = \"hello world\"\n\n with set_env_var(var_name=var_name, temp_value=new_value):\n self.assertEqual(os.environ.get(var_name), new_value)\n\n self.assertEqual(os.environ.get(var_name), None)", "def testPowerOnResponse(self):\n message = (mavutil.mavlink.GOPRO_COMMAND_POWER, mavutil.mavlink.GOPRO_REQUEST_SUCCESS)\n self.mgr.set_response_callback('vehicle','name', message)\n self.mgr.processMsgQueue.assert_called_with()", "def test_rpcCall(self):\n pass", "def test_plc_read_val(plc_ip, tag_name):\n\n plc = ClxDriver()\n if plc.open(plc_ip):\n tagg = plc.read_tag(tag_name)\n plc.close()\n return (tagg)\n \n else:\n print(\"Unable to open\", plc_ip)", "def test_get(self):\n self.assertEqual(self.config.get('basic','greeting'),'hello')", "def test_preferred_zip_defaulted(self):\n factory = RequestFactory() \n request = factory.get('/hudson-valley/', follow=True)\n request.session = self.session\n request.META['site_id'] = 2\n preferred_zip = get_preferred_zip(request)\n self.assertEqual(preferred_zip, '12601')", "def test_del_property():\n\n contents = (\"[Info]\\n\"\n \"sdk = 23\")\n\n testutils.deploy_config_raw(contents)\n\n prop.del_prop('info', 'sdk')\n\n testutils.undeploy()\n\n return 0", "def is_return(self, pick):\n return ('-' in pick.name and sorted(pick.name.split('-'), reverse=True)[0].startswith('ret'))", "def test_check_regon(client):\n is_assigned, request_id = client.check_regon(\n \"730371613\", \"41146786026458860703735932\"\n )\n\n assert is_assigned", "def test_z_z_func_command_local(self):\n\t\ttheResult = False\n\t\ttry:\n\t\t\ttheOutputtext = checkPythonCommand([getPythonCommand(),\n\t\t\t\tstr(\"code/restart_service.py\"),\n\t\t\t\tstr(\"--use-local\"),\n\t\t\t\tstr(\"-E\"),\n\t\t\t\tstr(\"5\"),\n\t\t\t\tstr(\"CRITICAL\"),\n\t\t\t\tstr(\"HARD\"),\n\t\t\t\tstr(\"5\"),\n\t\t\t\tstr(\"localhost\"),\n\t\t\t\tstr(\"true\"),\n\t\t\t\tstr(\"\\\"test\\\"\")\n\t\t\t], stderr=subprocess.STDOUT)\n\t\t\tif (str(\"OK - Service test restarted on host localhost\") in str(theOutputtext)):\n\t\t\t\ttheResult = True\n\t\t\telif (str(\"OK - Service \\\"test\\\" restarted on host localhost\") in str(theOutputtext)):\n\t\t\t\ttheResult = True\n\t\t\telse:\n\t\t\t\t print(str(theOutputtext))\n\t\texcept Exception:\n\t\t\ttheResult = False\n\t\tassert theResult", "def test_ping(self):\n status, output = commands.getstatusoutput('ping -c 5 %s' % self.known_ip)\n assert status == 0", "def test_check_replication_unknown_valueerror1(self, mock_urlopen):\n base_url = '.'\n mock_urlopen.side_effect = ValueError(Mock(return_value=''))\n result = check_replication(base_url, [4, 10, 4, 10])\n self.assertEqual(result,\n 3*[(STATUS_UNKNOWN,\n \"Can't parse status data\")])", "def test_vcmp(self):\r\n if self.flask_app.config.get('VMCP_KEY'):\r\n self.flask_app.config.pop('VMCP_KEY')\r\n res = self.app.get('api/vmcp', follow_redirects=True)\r\n err = json.loads(res.data)\r\n assert res.status_code == 501, err\r\n assert err['status_code'] == 501, err\r\n assert err['status'] == \"failed\", err\r\n assert err['target'] == \"vmcp\", err\r\n assert err['action'] == \"GET\", err", "def hxlinfo():\n run_script(hxlinfo_main)", "def send_xdisploc(self):\n return self.shell.xdisploc", "def test_request(self):\n self.assertIn('list', self.api.request('sys.settings.get').data,\n msg=\"request() doesn't work properly. 'list' is not found in the response\")", "def test_select_var(self):\n self.check_metadata.side_effect = lambda: self.cube\n with patch('esmvalcore.cmor._fixes.fix.Fix.get_fixes',\n return_value=[]):\n with patch('esmvalcore.cmor.fix._get_cmor_checker',\n return_value=self.checker):\n cube_returned = fix_metadata(\n cubes=[self.cube,\n self._create_mock_cube('extra')],\n short_name='short_name',\n project='CMIP6',\n dataset='model',\n mip='mip',\n )[0]\n self.checker.assert_called_once_with(self.cube)\n self.check_metadata.assert_called_once_with()\n assert cube_returned is self.cube", "def remotes():", "def test_get_cell(workbook):\n assert workbook.get_cell(3,1) == '507906000030242007'", "def _exists_remote(self, host):\n # This file gets written after cloudinit is done\n # path = '/var/lib/cloud/instance/boot-finished'\n path = '/home/ubuntu/SETUP_COMPLETE'\n t = 0\n sleep_len = 10\n while True:\n status = subprocess.call(\n ['ssh', '-oStrictHostKeyChecking=no', '-i', '/home/ubuntu/.ssh/id_rsa', 'ubuntu@'+host, 'test -f {}'.format(pipes.quote(path))])\n if status == 0:\n return True\n else:\n return False", "def test_documentation_popxl_call_with_info(self):\n filename = \"call_with_info.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def get_response(hf, var):\n psweep = hf.attrs['UQtype']\n return unpickle(hf['/%s/%s/response' % (psweep, var)].value)", "def fingertip_no_remote(self) -> bool:\n hcell = self._get_hcell2()\n return hcell.get(\"fingertip_no_remote\", False)", "def test_unload(install_mockery, mock_fetch, mock_archive, mock_packages, working_env):\n install(\"mpileaks\")\n mpileaks_spec = spack.spec.Spec(\"mpileaks\").concretized()\n\n # Set so unload has something to do\n os.environ[\"FOOBAR\"] = \"mpileaks\"\n os.environ[uenv.spack_loaded_hashes_var] = \"%s:%s\" % (mpileaks_spec.dag_hash(), \"garbage\")\n\n sh_out = unload(\"--sh\", \"mpileaks\")\n csh_out = unload(\"--csh\", \"mpileaks\")\n\n assert \"unset FOOBAR\" in sh_out\n assert \"unsetenv FOOBAR\" in csh_out\n\n assert \"export %s=garbage\" % uenv.spack_loaded_hashes_var in sh_out\n assert \"setenv %s garbage\" % uenv.spack_loaded_hashes_var in csh_out", "def test_variablepresentations_get(self):\n pass", "def test_broadcast_setting_change():\n assert win_functions.broadcast_setting_change()", "def pull(self, pull: Optional[int] = None) -> Optional[int]:\n ...", "def test(self,version=''):\n p5cmd = ['srvinfo', 'lexxvers']\n try:\n res = self.nsdchat_call(p5cmd,5)\n p5_version = singlevalue(res)\n if (p5_version >= str(version)):\n return True\n return False\n except subprocess.TimeoutExpired:\n print(\"Could not connect to the archiware p5 server.\\nPlease review\"\n \"the connection and firewall settings.\")\n raise", "def test_delete_on_background_response_descriptor_variables_library_variable_set_library_variable_set_resource(self):\n pass", "def test_set_value(self) -> None:\n\n expected = False\n actual = self.helper.set_name(self.test_name).exists()\n\n self.assertEqual(expected, actual)\n\n self.helper.set_value(\"Hello, World!\")\n\n expected = \"Hello, World!\"\n actual = self.helper.get_value()\n\n self.assertEqual(expected, actual)\n\n del os.environ[self.test_name]", "def remote_push(self, pNamespace):", "def test_get_temp(loaded_fridge):\n assert loaded_fridge.get_temp() == loaded_fridge.temp", "def do_remote(self, *args):\n return self.do_scpi(':communicate:remote 1')", "def test_constants(self):\n self.assertTrue(getattr(unshare, 'CLONE_NEWNS'))\n self.assertTrue(getattr(unshare, 'CLONE_NEWNET'))", "def test_popen(self):\n self.executor.command(['grep', 'foo']).popen()", "def test_preferred_zip_consumer(self):\n factory = RequestFactory()\n consumer = Consumer.objects.get(id=115)\n self.assertEqual(consumer.consumer_zip_postal, '12589')\n create_consumer_in_session(self, consumer)\n self.assemble_session(self.session) \n request = factory.get('/hudson-valley/', follow=True)\n request.session = self.session\n site = Site.objects.get(id=2)\n preferred_zip = get_preferred_zip(request, site)\n self.assertEqual(preferred_zip, '12589')", "def is_remote(self):\n\t\treturn bool(call_sdk_function('PrlVmDev_IsRemote', self.handle))" ]
[ "0.62325144", "0.53577113", "0.53224456", "0.5209967", "0.51961607", "0.5094487", "0.50844604", "0.5058393", "0.5020116", "0.49631917", "0.49456343", "0.49408296", "0.4912187", "0.48899758", "0.48812777", "0.48232034", "0.47519946", "0.46955076", "0.46878415", "0.46760944", "0.46738386", "0.46679908", "0.46611106", "0.4651345", "0.4638157", "0.46368185", "0.4623075", "0.4617728", "0.46174634", "0.46088743", "0.46008083", "0.45800346", "0.4579057", "0.45739767", "0.45700228", "0.45691937", "0.45599997", "0.45450374", "0.45440426", "0.45414668", "0.4534699", "0.45326477", "0.4526917", "0.45260498", "0.4511431", "0.4495912", "0.4493665", "0.44913954", "0.4489914", "0.44856313", "0.44757453", "0.4450531", "0.44503233", "0.44452298", "0.44390523", "0.4430514", "0.442883", "0.44237134", "0.44169253", "0.44092146", "0.44072923", "0.44000322", "0.43992734", "0.43976614", "0.43970418", "0.43956864", "0.43902853", "0.43872306", "0.43826932", "0.4381051", "0.43806627", "0.4370733", "0.43697423", "0.4359181", "0.43581724", "0.43568087", "0.43543255", "0.4348264", "0.43452635", "0.4343514", "0.43326026", "0.43297276", "0.43288898", "0.4323902", "0.43234098", "0.43066952", "0.43031117", "0.43030474", "0.42980474", "0.4297566", "0.42948997", "0.42943197", "0.4285817", "0.42813385", "0.42808366", "0.42795014", "0.42719218", "0.42645165", "0.42644444", "0.42641416" ]
0.70022154
0
Test the popxl remote rts variable
def test_documentation_popxl_remote_rts_var(self): filename = "remote_rts_var.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_documentation_popxl_remote_var(self):\n filename = \"remote_variable.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_documentation_popxl_rts_var(self):\n filename = \"rts_var.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_ipcrm():\n IPCComm.ipcrm()", "def TestRetract(portVXM,portArd):\n commandString = \"F\"\n portVXM.write(commandString)\n commandString = \"PM-3,C,SA1M400,SA3M100,LM0,I3M400,P5,I1M-90,P5,L3,R \"\n portVXM.write(commandString)\n\tresp='abs'\n\twhile( '^' not in resp ):\n\t resp = portVXM.read(1000)\n\t\n\tprint \"Moving to Standard Operation.\"\t\n commandString = \"PM-2,C,SA1M400,SA3M100,LM0,I3M400,P5,I1M-90,P5,L0,R \"\n portVXM.write(commandString)\n\tt0 = time.time() \n\tt = time.time() \n\tportArd.flushInput()\n\tresp='abs'\n\t\n #while( ('21,1,1' not in resp) and ((t-t0)>30.0)):\n while( not('21,0,1\\n\\r21,1,1\\n\\r21,0,0\\n\\r' in resp) ):\n\t resp = portArd.read(10000)\n\t t = time.time()\n\n\t#print \"CONDITION: \\t\" ,('21,1,1' not in resp),'\\t'\n\tportVXM.write(\"D,\")\n resp = portVXM.read(1000)\n\tXpos, Zpos = GetXZ(portVXM)\n localtime = time.asctime(time.localtime(time.time()))\n\tprint \"Source Fully Retracted at (X,Y) : \",Xpos, \"\\t\", Zpos ,\"\\t\", \"at localtime: \", localtime\n\tprint abs(t-t0) , \"\\t Seconds \\r\"\n\tWaitUntilReady(portVXM)\n\tportVXM.write(\"C,IA1M-0,IA3M-0,R \")\n\tportVXM.write(\"Q, \")\n\tportArd.flush()", "def test_plc_read_val(plc_ip, tag_name):\n\n plc = ClxDriver()\n if plc.open(plc_ip):\n tagg = plc.read_tag(tag_name)\n plc.close()\n return (tagg)\n \n else:\n print(\"Unable to open\", plc_ip)", "def TestResponse(port):\n\tcommandString = \"F\"\n\tport.write(commandString)\n\tcommandString = \"PM3,C,I1M500,I3M-500,I3M500,I1M-500,R\"\n\tport.write(commandString)\n\tWaitUntilReady(port)\n\tport.write(\"R\")\n\tresp=WaitUntilReady(port)\n\tcount=0\n\tprint(\"starting loop:\")\n\twhile('^' in resp):\n \tport.write(\"X\")\n\t\txpos=port.read(9)\n\t\tprint(xpos)\n\t\tport.write(\"R\")\n\t\ttime.sleep(5)\n\t\tresp=WaitUntilReady(port)\n\t\tcount = count+1\n\t\tprint(count)", "def test(self):\n # 0x13 is nop\n self.gdb.command(\"p *((int*) 0x%x)=0x13\" % self.target.ram)\n self.gdb.command(\"p *((int*) 0x%x)=0x13\" % (self.target.ram + 4))\n self.gdb.command(\"p *((int*) 0x%x)=0x13\" % (self.target.ram + 8))\n self.gdb.p(\"$pc=0x%x\" % self.target.ram)\n self.gdb.stepi()\n assertEqual((self.target.ram + 4), self.gdb.p(\"$pc\"))\n self.gdb.stepi()\n assertEqual((self.target.ram + 8), self.gdb.p(\"$pc\"))", "def remote_status():", "def test_rpcCall(self):\n pass", "def testProtocolReturn(self):\n self.assertEqual(\n self.protocol,\n self.mr.protocol\n )\n\n self.mr._protocol = 'burp'\n\n self.assertEqual(\n 'burp',\n self.mr.protocol\n )", "def test_check_nip(client):\n is_assigned, request_id = client.check_nip(\n \"8655104670\", \"41146786026458860703735932\"\n )\n\n assert is_assigned", "def test_check_replication_unknown_valueerror2(self, mock_urlopen):\n base_url = 'http://localhost:6000/recon/'\n jdata = PropertyMock(return_value=b'X')\n mock_urlopen.return_value = MagicMock(read=jdata)\n result = check_replication(base_url, [4, 10, 4, 10])\n self.assertEqual(result,\n 3*[(STATUS_UNKNOWN,\n \"Can't parse status data\")])", "def test_returnCar(self):\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.connect(ADDRESS)\n self.assertTrue(Functions.returnCar(s, '38'))", "def test_emirp_check():\r\n pass", "def test_rsp_unknown_status(self):\n\n def handle(event):\n return 0xFFF0, event.modification_list\n\n self.ae = ae = AE()\n ae.acse_timeout = 5\n ae.dimse_timeout = 5\n ae.network_timeout = 5\n ae.add_supported_context(ModalityPerformedProcedureStep)\n scp = ae.start_server(\n (\"localhost\", 11112), block=False, evt_handlers=[(evt.EVT_N_SET, handle)]\n )\n\n ae.add_requested_context(ModalityPerformedProcedureStep)\n assoc = ae.associate(\"localhost\", 11112)\n assert assoc.is_established\n\n ds = Dataset()\n ds.PatientName = \"Test^test\"\n status, ds = assoc.send_n_set(\n ds, ModalityPerformedProcedureStep, \"1.2.840.10008.5.1.1.40.1\"\n )\n assert status.Status == 0xFFF0\n assert ds is None\n assoc.release()\n assert assoc.is_released\n\n scp.shutdown()", "def test_connect():\n PV_name = \"TEST:TEST.VAL\"\n casput(PV_name, 1)\n assert caget(PV_name) == 1", "def test_get_property():\n\n sdk = '23'\n contents = (\"[Info]\\n\"\n \"sdk = %s\" % sdk)\n\n testutils.deploy_config_raw(contents)\n\n assert prop.get_prop('info', 'sdk') == sdk\n\n testutils.undeploy()\n\n return 0", "def testAllRead(self):\n import time,copy\n time.sleep(2)\n client = ModbusTCP(self.config['vdevs']['slave']['icsifaces'][0],\n self.config['vdevs']['slave']['points']) \n\n pts = copy.deepcopy(self.config['vdevs']['slave']['points'])\n ptnames = [ pt['name'] for pt in pts ]\n reply = client.readPoints(ptnames)\n #print \"Reply: \", reply\n for pt in ptnames:\n value = filter(lambda x: x['name']==pt, pts)[0]['value']\n #assert value == reply[ptnames.index(pt)]\n received = reply[ptnames.index(pt)]\n if not value == received: \n print pt, ' was %s but should be %s'%(str(received),str(value))", "def test_send_network(self) :\n symbol = 'A' \n oProtocol = Protocol(symbol,mode=\"client\",debug=self.debug)\n command = \"N200\"\n message = oProtocol.send(command)\n #if message['status'] is False :\n #print(\"\\n*** ERROR : test_send_network : {}\".format(message['notify']))\n\n #Pour enregistrer les traces d'appels de fonctions dans le fichier log/client_calltrack_sorted.txt\n client_tracker_print()\n self.assertTrue( (message['status'] is not True) )", "def test_rsp_failure(self):\n\n def handle(event):\n return 0x0112, None\n\n self.ae = ae = AE()\n ae.acse_timeout = 5\n ae.dimse_timeout = 5\n ae.network_timeout = 5\n ae.add_supported_context(DisplaySystem)\n scp = ae.start_server(\n (\"localhost\", 11112), block=False, evt_handlers=[(evt.EVT_N_GET, handle)]\n )\n\n ae.add_requested_context(DisplaySystem)\n assoc = ae.associate(\"localhost\", 11112)\n assert assoc.is_established\n\n status, ds = assoc.send_n_get(\n [(0x7FE0, 0x0010)], DisplaySystem, \"1.2.840.10008.5.1.1.40.1\"\n )\n assert status.Status == 0x0112\n assert ds is None\n assoc.release()\n assert assoc.is_released\n\n scp.shutdown()", "def test_set_get_value_1(self):\n value = 23.0\n port = cn.Port(value)\n self.assertEqual(port.value, value)", "def test_check_query_response(self, node_id):\n\n print('\\n### Testing query node status RESPONSE ###')\n print('Remember that node_id must be the same 3 characters string that in test_query_node_id(node_id)')\n\n received_bytes = self.serport.readline()\n if received_bytes == b'E\\r\\n':\n print(\"You received Error Msg!\")\n print(f'Did not receive correct query status response from node {node_id}')\n print(f'Query again the node {node_id} if required')\n return False\n\n elif (len(received_bytes) == 13) and (received_bytes[0:8] == b'#B' + node_id.encode() + b'06V'):\n supply_voltage = received_bytes.decode()[8:13]\n print(f\"supply_voltage of {node_id} is {supply_voltage}\")\n print(\"response from the remote node SUCCESS\")\n return True\n else:\n print(f'Did not receive correct query status response from node {node_id}')\n print(f'Query again the node {node_id} if required')\n return False", "def test_ip(response):\n \n # from comeon_core import update\n ip = getIP()\n print(ip)\n #init_db(engine)\n #update()\n assert True", "def testSingleRead(self, point = 'pressure', expectedValue = 17.0 ):\n import time\n time.sleep(2)\n client = ModbusTCP(self.config['vdevs']['slave']['icsifaces'][0],\n self.config['vdevs']['slave']['points']) \n\n reply = client.readPoints(point)\n #print \"Slave pressure: \", reply, \"Expected:\", expectedValue\n assert reply[0] == expectedValue", "def test_rsp_unknown_status(self):\n\n def handle(event):\n ds = Dataset()\n ds.PatientName = \"Test\"\n ds.SOPClassUID = DisplaySystem\n ds.SOPInstanceUID = \"1.2.3.4\"\n return 0xFFF0, ds\n\n self.ae = ae = AE()\n ae.acse_timeout = 5\n ae.dimse_timeout = 5\n ae.network_timeout = 5\n ae.add_supported_context(DisplaySystem)\n scp = ae.start_server(\n (\"localhost\", 11112), block=False, evt_handlers=[(evt.EVT_N_GET, handle)]\n )\n\n ae.add_requested_context(DisplaySystem)\n assoc = ae.associate(\"localhost\", 11112)\n assert assoc.is_established\n\n status, ds = assoc.send_n_get(\n [(0x7FE0, 0x0010)], DisplaySystem, \"1.2.840.10008.5.1.1.40.1\"\n )\n assert status.Status == 0xFFF0\n assert ds is None\n assoc.release()\n assert assoc.is_released\n\n scp.shutdown()", "def test_rangetocidr_command(runner):\n\n result = runner.invoke(command, ['rangetocidr', '127.0.0.0', '128.0.0.3'])\n assert result.exit_code == 0\n\n assert '127.0.0.0/8' in result.output\n assert '128.0.0.0/30' in result.output", "def test(self,version=''):\n p5cmd = ['srvinfo', 'lexxvers']\n try:\n res = self.nsdchat_call(p5cmd,5)\n p5_version = singlevalue(res)\n if (p5_version >= str(version)):\n return True\n return False\n except subprocess.TimeoutExpired:\n print(\"Could not connect to the archiware p5 server.\\nPlease review\"\n \"the connection and firewall settings.\")\n raise", "def test_set_get_value_2(self):\n value = (1,)\n port = cn.Port()\n port.value = value\n self.assertEqual(port.value, value)", "def test_ipcrm_not_isntalled(): # pragma: windows\n IPCComm.ipcrm()", "def test_pop_returns_value(new_dll):\n assert new_dll.pop() == 3", "def test_plc_read(plc_ip, tag_name):\n\n plc = ClxDriver()\n if plc.open(plc_ip):\n\n print(plc.read_tag(tag_name))\n\t#plc.read_tag(tag_name)\n plc.close()\n else:\n print(\"Unable to open\", plc_ip)", "def test_getbinarystate(\n fauxmo_server: pytest.fixture, simplehttpplugin_target: pytest.fixture\n) -> None:\n data = b'Soapaction: \"urn:Belkin:service:basicevent:1#GetBinaryState\"'\n\n resp = requests.post(\n \"http://127.0.0.1:12345/upnp/control/basicevent1\", data=data\n )\n assert resp.status_code == 200\n\n root = ET.fromstring(resp.text)\n val = root.find(\".//BinaryState\").text\n assert val in [\"0\", \"1\"]", "def read_test(self, cmd):\n w_bytes = [random.randrange(0, 128) for i in range(0, 16)]\n self._pyb.send(w_bytes)\n self._serial.reset_input_buffer()\n self._serial.write('\\r\\n'.encode('utf-8'))\n self._serial.write(cmd.encode('utf-8'))\n self._serial.write('\\r\\n'.encode('utf-8'))\n\n res = self._serial.read_until(terminator=serial.to_bytes([ord(c) for c in 'Ignored '])).decode('utf-8')\n self._pyb.deinit()\n\n r_bytes = []\n for x in re.sub('\\r', '', res).split('\\n'):\n if x.find('IGNORE') != -1:\n r_bytes = [int(s, 16) for s in x.split(',') if len(s) == 2]\n break\n\n if self.compare_host_dut_result(w_bytes, r_bytes) == -1:\n print(repr(res))\n return \"Fail\"\n\n return \"Pass\"", "def test_incoming_k(self):\n m_interface = Mock()\n m_interface.callback.return_value = True\n m_interface.read.return_value = ''\n upb = UPB(m_interface)\n upb.onCommand(address=(22,255), callback=m_interface.callback)\n m_interface.read.return_value = \"PU07141610FF3090\\x0DPU07151610FF308F\\x0D\"\n# time.sleep(4000)\n time.sleep(2)\n m_interface.callback.assert_called_with(address=(22,255), command='status', source=upb)\n m_interface.read.return_value = ''", "def testSingleRead(self, point = 'pressure', expectedValue = 17.0 ):\n import time\n time.sleep(2)\n to_config = self.config['vdevs']['slave']['icsifaces'][0]\n from_config = self.config['vdevs']['master']['clientifaces'][0]\n points = self.config['vdevs']['slave']['points']\n client = ModbusRTU(to_config, points, from_config)\n\n reply = client.readPoints(point)\n #print \"Slave pressure: \", reply\n assert reply[0] == expectedValue", "def _platformix_get(self, context, fake_reply, prop):\r\n if hasattr(self.host, prop):\r\n self._reply(context, proto_success(getattr(self.host, prop), prop), fake_reply)\r\n else:\r\n self._reply(context, proto_failure(\"Property {} not found on {}\".format(prop, self.host.name)), fake_reply)", "def check_relay_status():\n \n query_cmd_packet = b'\\x04\\x18\\x00\\x00\\x00\\x1b\\x0f'\n ser_relay.write(query_cmd_packet)\n time.sleep(1)\n resp_array = array('B', ser_relay.read(7))\n \n return resp_array", "def test_expect_status_property_about_registry_process(client, start_call_fx):\n\n url = reverse_lazy('calls:registry-list')\n\n response = client.post(url, start_call_fx, content_type='application/json')\n\n job_id = response.data.get('job_id')\n\n job = client.get(job_id)\n\n assert job.data.get('status') == 'DONE'", "def test_test_property():\n\n contents = (\"[Info]\\n\"\n \"sdk = 23\")\n\n testutils.deploy_config_raw(contents)\n\n assert prop.test_prop('info', 'sdk') == 1\n\n testutils.undeploy()\n\n return 0", "def testAllRead(self):\n import time,copy\n time.sleep(2)\n to_config = self.config['vdevs']['slave']['icsifaces'][0]\n from_config = self.config['vdevs']['master']['clientifaces'][0]\n points = self.config['vdevs']['slave']['points']\n client = ModbusRTU(to_config, points, from_config)\n\n\n pts = copy.deepcopy(self.config['vdevs']['slave']['points'])\n for i in xrange(50):\n ptnames = [ pt['name'] for pt in pts ]\n reply = client.readPoints(ptnames)\n #print \"Reply: \", reply\n for pt in ptnames:\n value = filter(lambda x: x['name']==pt, pts)[0]['value']\n #assert value == reply[ptnames.index(pt)]\n received = reply[ptnames.index(pt)]\n if not value == received: \n print pt, ' was %s but should be %s'%(str(received),str(value))", "def test_rsp_failure(self):\n\n def handle(event):\n return 0x0112, None\n\n self.ae = ae = AE()\n ae.acse_timeout = 5\n ae.dimse_timeout = 5\n ae.network_timeout = 5\n ae.add_supported_context(ModalityPerformedProcedureStep)\n scp = ae.start_server(\n (\"localhost\", 11112), block=False, evt_handlers=[(evt.EVT_N_SET, handle)]\n )\n\n ae.add_requested_context(ModalityPerformedProcedureStep)\n assoc = ae.associate(\"localhost\", 11112)\n assert assoc.is_established\n\n ds = Dataset()\n ds.PatientName = \"Test^test\"\n status, ds = assoc.send_n_set(\n ds, ModalityPerformedProcedureStep, \"1.2.840.10008.5.1.1.40.1\"\n )\n assert status.Status == 0x0112\n assert ds is None\n assoc.release()\n assert assoc.is_released\n\n scp.shutdown()", "def test_documentation_popxl_replication(self):\n filename = \"replication.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_xmlrpc(text):\r\n return \"Here's a response! %s\" % str(text)", "def test_verify_connection_to_a_device():", "def test_host_port_expression_props(self):\n block = SNMPBase()\n block._create_data = MagicMock()\n block._execute_snmp_request = MagicMock(\n return_value=SAMPLE_SNMP_RESPONSE)\n block._handle_data = MagicMock()\n myOID = \"1.3.6.1.2.1.31.1.1.1.6.2\"\n ip = \"0.0.0.0\"\n port = 1611\n starting_signal = Signal({\n \"ip\": ip,\n \"port\": port\n })\n self.configure_block(block, {\n \"oids\": [{\"oid\": myOID}],\n \"agent_host\": \"{{ $ip }}\",\n \"agent_port\": \"{{ $port }}\"\n })\n block.start()\n # Send the starting signal, make sure everything was called correctly\n block.process_signals([starting_signal])\n args, kwargs = block._execute_snmp_request.call_args\n self.assertEqual(args[1], [myOID])\n block._handle_data.assert_called_once_with([], starting_signal)\n block.stop()", "def rpc_ping(self):\n\t\treturn True", "def test_invalid_port_expression_prop(self):\n block = SNMPBase()\n block._create_data = MagicMock()\n block.execute_request = MagicMock()\n myOID = \"1.3.6.1.2.1.31.1.1.1.6.2\"\n ip = \"0.0.0.0\"\n port = 1611\n starting_signal = Signal({\n \"ip\": ip,\n \"port\": port\n })\n self.configure_block(block, {\n \"oids\": [{\"oid\": myOID}],\n \"agent_port\": \"{{ port }}\"\n })\n block.start()\n # Send the starting signal, make sure everything was called correctly\n block.process_signals([starting_signal])\n self.assertEqual(0, block.execute_request.call_count)\n block.stop()", "def rpc_match():", "def on_pullVar(self):\n if not os.path.exists(grapher.binPath):\n mess = \"!!! ERROR: rndBin path not found, check user pref !!!\"\n self.mainUi._defaultErrorDialog(mess, self.mainUi)\n else:\n tmpPath = os.path.join(self.grapher.userBinPath, 'tmp')\n tmpFile = os.path.join(tmpPath, 'varBuffer.py')\n varDict = pFile.readPyFile(tmpFile)\n for var in sorted(varDict.keys()):\n if var.startswith('selVar_'):\n newItem = self.on_addVar()\n self.setItem(newItem, **varDict[var])", "def test_variables_get(self):\n pass", "def test_device_status(self):\n #071031031E3067\n self.ms.add_response({'\\x14071031031E3067\\x0D': 'PA\\x0D'})\n # Network / Device ID\n response = self.upb.status((49, 3))\n self.assertTrue(response)", "def test_package(response):\n \n # from comeon_core import update\n ip = getIP()\n print(ip)\n db = connect()\n engine = db.connect() \n init_db(engine)\n update(engine)\n assert True", "def test_snmpset_return_structure():\n result = snmpset(ipaddress=SNMP_SRV_ADDR, community='public',\n oid='SNMPv2-MIB::sysName.0', value_type='s',\n value='Test Description', port=SNMP_SRV_PORT)\n assert 'Test Description' in result", "def test_invalid_host_expression_prop(self):\n block = SNMPBase()\n block._create_data = MagicMock()\n block.execute_request = MagicMock()\n myOID = \"1.3.6.1.2.1.31.1.1.1.6.2\"\n ip = \"0.0.0.0\"\n port = 1611\n starting_signal = Signal({\n \"ip\": ip,\n \"port\": port\n })\n self.configure_block(block, {\n \"oids\": [{\"oid\": myOID}],\n \"agent_host\": \"{{ ip }}\",\n })\n block.start()\n # Send the starting signal, make sure everything was called correctly\n block.process_signals([starting_signal])\n self.assertEqual(0, block.execute_request.call_count)\n block.stop()", "def test_ipcs():\n IPCComm.ipcs()", "def test_port_get_set_value(self):\n v1 = [1, 2, 3]\n p1 = cn.Port()\n p1.value = v1\n\n p2 = cn.Port()\n p2.value = v1\n self.assertEqual(\n (p1.value == p2.value).all(),\n True\n )", "def test_query_self_status(self):\n print('\\n### Testing query self status ###')\n\n command = b'$?'\n self.serport.write(command)\n received_bytes = self.serport.readline()\n index = received_bytes.find(b'#A')\n\n if (index != -1) and (len(received_bytes) - index == 10):\n if received_bytes[index+5:index+6] == b'V':\n # print(\"check_for_valid_ack_signal SELF_STATUS V was spot on\")\n for i in range(index+6, index+10):\n if b'0' <= received_bytes[i:i+1] <= b'9':\n pass\n else:\n print(\"query self status FAILURE 1\")\n return False\n node_id = received_bytes.decode()[index+2:index+5]\n print(\"SELF_STATUS node is :\", node_id)\n\n voltage = round(float(received_bytes[index+6:index+10]) * (15.0/65536.0), 3)\n print(\"SELF_STATUS voltage is :\", voltage)\n print(\"query self status SUCCESS\")\n return True\n else:\n print(\" query self status FAILURE 2\")\n return False", "def Read_Response(self, expected = bytes([0x01])):\r\n data = self.Port.read(1)\r\n if data == expected: return True\r\n return False", "def test_rsp_unknown_status(self):\n\n def handle(event):\n return 0xFFF0, event.attribute_list\n\n self.ae = ae = AE()\n ae.acse_timeout = 5\n ae.dimse_timeout = 5\n ae.network_timeout = 5\n ae.add_supported_context(ModalityPerformedProcedureStep)\n\n handlers = [(evt.EVT_N_CREATE, handle)]\n scp = ae.start_server((\"localhost\", 11112), evt_handlers=handlers, block=False)\n\n ae.add_requested_context(ModalityPerformedProcedureStep)\n assoc = ae.associate(\"localhost\", 11112)\n assert assoc.is_established\n\n ds = Dataset()\n ds.PatientName = \"Test^test\"\n status, ds = assoc.send_n_create(\n ds, ModalityPerformedProcedureStep, \"1.2.840.10008.5.1.1.40.1\"\n )\n assert status.Status == 0xFFF0\n assert ds is None\n assoc.release()\n assert assoc.is_released\n\n scp.shutdown()", "def test_rsp_warning(self):\n\n def handle(event):\n ds = Dataset()\n ds.PatientName = \"Test\"\n ds.SOPClassUID = DisplaySystem\n ds.SOPInstanceUID = \"1.2.3.4\"\n return 0x0116, ds\n\n self.ae = ae = AE()\n ae.acse_timeout = 5\n ae.dimse_timeout = 5\n ae.network_timeout = 5\n ae.add_supported_context(DisplaySystem)\n scp = ae.start_server(\n (\"localhost\", 11112), block=False, evt_handlers=[(evt.EVT_N_GET, handle)]\n )\n\n ae.add_requested_context(DisplaySystem)\n assoc = ae.associate(\"localhost\", 11112)\n assert assoc.is_established\n\n status, ds = assoc.send_n_get(\n [(0x7FE0, 0x0010)], DisplaySystem, \"1.2.840.10008.5.1.1.40.1\"\n )\n assert status.Status == 0x0116\n assert ds is not None\n assert isinstance(ds, Dataset)\n assert ds.PatientName == \"Test\"\n assert ds.SOPClassUID == DisplaySystem\n assert ds.SOPInstanceUID == \"1.2.3.4\"\n assoc.release()\n assert assoc.is_released\n\n scp.shutdown()", "def get_xcom(**context):\n ti = context['ti']\n data = ti.xcom_pull(task_ids='xcom_from_bash', key='return_value')\n logging.info(data)", "def test_script_p2_sh_address(self):\n pass", "def test_set_get(self):\n self.shell.onecmd(\"create %s/one 'hello'\" % (self.tests_path))\n self.shell.onecmd(\"set %s/one 'bye'\" % (self.tests_path))\n self.shell.onecmd(\"get %s/one\" % (self.tests_path))\n self.assertEqual(\"bye\\n\", self.output.getvalue())", "def sendPullResponse(remote, request, txpk,sock):\r\n remote = (remote[0], remote[1])\r\n\r\n m = GatewayMessage(version=request.version, token=request.token,\r\n identifier=PULL_RESP, gatewayEUI=b'9079494338994186168',\r\n remote=remote, ptype='txpk', txpk=txpk)\r\n print(\"Sending PULL_RESP message to %s:%d\" % remote)\r\n\r\n sock.sendto(m.encode(), remote)\r\n data, address = sock.recvfrom(4096)\r\n print(data)\r\n print('received %s bytes from %s' % (len(data), address))", "def test_postfix(self):\n test_sensordef = {\n \"kind\": self.test_postfix.get_kind(),\n \"name\": \"Postfix Mailqueue\",\n \"description\": \"Monitors the mailqueue of a postfix server\",\n \"help\": \"Monitors the mailqueue of a postfix server for active, deferred, hold or corrupt mail\",\n \"tag\": \"mppostfixsensor\",\n \"fields\": [],\n \"groups\": []\n }\n assert_equal(self.test_postfix.get_sensordef(), test_sensordef)", "def test_t10_message():\n send_json_message_to_t10(\"10.89.130.68\", \"cisco\", \"cisco\", request.get_json())\n return \"ok\"", "def test_get_status(self):\n pass", "def test_get_status(self):\n pass", "def nremote(self):", "def test_verify_state_of_a_device():", "def test_process_newly_public_domain_gj(self):\n self.page_mock.text = \"\"\"{{REDaten\n|BAND=S I\n|KEINE_SCHÖPFUNGSHÖHE=ON\n|GEBURTSJAHR=1870\n}}\nbla\n{{REAutor|Stein.}}\"\"\"\n re_page = RePage(self.page_mock)\n task = PDKSTask(None, self.logger)\n compare({\"success\": True, \"changed\": True}, task.run(re_page))\n compare(\"\", re_page[0][\"GEBURTSJAHR\"].value)", "def test_snmpget_return_structure():\n result = snmpget(community='public', ipaddress=SNMP_SRV_ADDR,\n oid=SYSDESCR_OID, port=SNMP_SRV_PORT)\n assert 'Linux' in result\n assert isinstance(result, str)", "def test_rsp_warning(self):\n\n def handle(event):\n return 0x0116, event.modification_list\n\n self.ae = ae = AE()\n ae.acse_timeout = 5\n ae.dimse_timeout = 5\n ae.network_timeout = 5\n ae.add_supported_context(ModalityPerformedProcedureStep)\n scp = ae.start_server(\n (\"localhost\", 11112), block=False, evt_handlers=[(evt.EVT_N_SET, handle)]\n )\n\n ae.add_requested_context(ModalityPerformedProcedureStep)\n assoc = ae.associate(\"localhost\", 11112)\n assert assoc.is_established\n\n ds = Dataset()\n ds.PatientName = \"Test^test\"\n status, ds = assoc.send_n_set(\n ds, ModalityPerformedProcedureStep, \"1.2.840.10008.5.1.1.40.1\"\n )\n assert status.Status == 0x0116\n assert ds.PatientName == \"Test^test\"\n assoc.release()\n assert assoc.is_released\n\n scp.shutdown()", "def test_process_newly_public_domain_gj_not_yet(self):\n self.page_mock.text = \"\"\"{{REDaten\n|BAND=S I\n|KEINE_SCHÖPFUNGSHÖHE=ON\n|GEBURTSJAHR=1871\n}}\nbla\n{{REAutor|Stein.}}\"\"\"\n re_page = RePage(self.page_mock)\n task = PDKSTask(None, self.logger)\n compare({\"success\": True, \"changed\": False}, task.run(re_page))\n compare(\"1871\", re_page[0][\"GEBURTSJAHR\"].value)", "def test_system(cli_request, msg):\n from client import client\n response = client(cli_request)\n response_parts = response.split('\\r\\n')\n assert response_parts[0] == msg\n assert '' in response_parts", "def test_get(self):\n obs = self.tester.get('1.SKM7.640188')\n exp = PrepSample('1.SKM7.640188', self.tester)\n self.assertEqual(obs, exp)", "def test_get_property_success(self):\r\n self.assertEqual(self.config.option1, 1337)", "def testPowerOnResponse(self):\n message = (mavutil.mavlink.GOPRO_COMMAND_POWER, mavutil.mavlink.GOPRO_REQUEST_SUCCESS)\n self.mgr.set_response_callback('vehicle','name', message)\n self.mgr.processMsgQueue.assert_called_with()", "def test_get_address():\n\n # Wait for workspace to be initialized\n time.sleep(40)\n bambi = create_test_bambi()\n address = bambi.get_address_to_workspace()\n assert address", "def test_cmd_field():\n FNAME = 'xonsh-SESSIONID.json'\n FNAME += '.cmdfield'\n hist = History(filename=FNAME, here='yup', **HIST_TEST_KWARGS)\n # in-memory\n with mock_xonsh_env({'HISTCONTROL': set()}):\n hf = hist.append({'rtn': 1})\n yield assert_is_none, hf\n yield assert_equal, 1, hist.rtns[0]\n yield assert_equal, 1, hist.rtns[-1]\n yield assert_equal, None, hist.outs[-1]\n # slice\n yield assert_equal, [1], hist.rtns[:]\n # on disk\n hf = hist.flush()\n yield assert_is_not_none, hf\n yield assert_equal, 1, hist.rtns[0]\n yield assert_equal, 1, hist.rtns[-1]\n yield assert_equal, None, hist.outs[-1]\n os.remove(FNAME)", "def test_rpcSendRecv(self):\n cli_send = self.client_msg\n srv_send = self.server_msg\n # Send message to driver\n flag = self.client_comm.send(cli_send)\n assert(flag)\n flag, msg_recv = self.server_comm.recv(timeout=self.timeout)\n assert(flag)\n nt.assert_equal(msg_recv, srv_send)\n # Send response back to instance\n flag = self.server_comm.send(srv_send)\n assert(flag)\n # self.driver.sleep(1)\n flag, msg_recv = self.client_comm.recv(timeout=self.timeout)\n assert(flag)\n nt.assert_equal(msg_recv, cli_send)", "def test_script_p2_s_address(self):\n pass", "def test_recv(self):\n # Required to get useful test names\n super(TestCisPlyInput_local, self).test_recv()", "def ping():\n return 'pong'", "def test_rsp_success(self):\n\n def handle(event):\n ds = Dataset()\n ds.PatientName = \"Test\"\n ds.SOPClassUID = DisplaySystem\n ds.SOPInstanceUID = \"1.2.3.4\"\n return 0x0000, ds\n\n self.ae = ae = AE()\n ae.acse_timeout = 5\n ae.dimse_timeout = 5\n ae.network_timeout = 5\n ae.add_supported_context(DisplaySystem)\n scp = ae.start_server(\n (\"localhost\", 11112), block=False, evt_handlers=[(evt.EVT_N_GET, handle)]\n )\n\n ae.add_requested_context(DisplaySystem)\n assoc = ae.associate(\"localhost\", 11112)\n assert assoc.is_established\n\n status, ds = assoc.send_n_get(\n [(0x7FE0, 0x0010)], DisplaySystem, \"1.2.840.10008.5.1.1.40.1\"\n )\n assert status.Status == 0x0000\n assert ds is not None\n assert isinstance(ds, Dataset)\n assert ds.PatientName == \"Test\"\n assert ds.SOPClassUID == DisplaySystem\n assert ds.SOPInstanceUID == \"1.2.3.4\"\n assoc.release()\n assert assoc.is_released\n\n scp.shutdown()", "def read_and_response(self, vsr, address_h, address_l):\n # time.sleep(0.2)\n self.send_cmd([vsr, 0x41, address_h, address_l])\n # time.sleep(0.2)\n resp = self.read_response() # ie resp = [42, 144, 48, 49, 13]\n reply = resp[2:-1] # Omit start char, vsr address and end char\n reply = \"{}\".format(''.join([chr(x) for x in reply])) # Turn list of integers into ASCII string\n # print(\" RR. reply: {} (resp: {})\".format(reply, resp)) # ie reply = '01'\n return resp, reply", "def test_store_property_after_reconnecting_to_the_device():", "def test_get_requests(self):\n response = self.client.open('/api/provisioning/port',\n method='GET')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def main():\n test_exchange()\n print(\"2.1589225 Euros\")", "def test_get_address(self):\n with self.subprocess_getoutput_patch:\n ret = self.inst._get_address()\n self.assertEqual(ret, \"http://example\")", "def test_check_regon(client):\n is_assigned, request_id = client.check_regon(\n \"730371613\", \"41146786026458860703735932\"\n )\n\n assert is_assigned", "def _test_con(self) -> bool:\n self.reset_buffers()\n try:\n serial_number = self.get_exact(b\"<GETSERIAL>>\", size=7)\n except Exception as e:\n # Unsure of exception types.\n logger.warning(f\"{e}\", exc_info=True)\n return False\n # timeout error if wrong\n if len(serial_number) == 7:\n # Not 100% sure... no prescribed method of confirming\n # we're connected to a GMC device in specs\n logger.debug(f\"Test connection serial: {serial_number}\")\n return True\n else:\n logger.warning(f\"Unexpected response: {serial_number}\")\n return False", "def test_getirramp():\n d = gini.get_ir_ramp()\n assert len(d) == 256", "def testFailure(self):\n request = b'hello'\n reply = self.sendAndReceive(request)\n self.assertEqual(2, reply[0])", "def test_get_robax_tostring_correct(self):\n got_var, const_jtar = rapid_datatypes.get_rapid_data(self.controller, 'T_ROB1', 'MainModule', 'const_jtarget')\n if not got_var:\n print 'Couldn\\'t get variable. Test will not run.'\n sys.exit()\n robax = rapid_jointtarget.get_robax_tostring(const_jtar)\n self.assertEqual(robax, 'RobAx: [Rax_1,Rax_2,Rax_3,Rax_4,Rax_5,Rax_6] = [0,0,0,10,0,0]')", "def test_fetch_otp(self):\n otp = self.api.fetch_otp()\n self.assertIn('code', otp)", "def test_repr(self):\n namespace = {}\n exec('tunneling = {0!r}'.format(self.tunneling), globals(), namespace)\n self.assertIn('tunneling', namespace)\n tunneling = namespace['tunneling']\n self.assertAlmostEqual(self.tunneling.frequency.value, tunneling.frequency.value, 2)\n self.assertEqual(self.tunneling.frequency.units, tunneling.frequency.units)", "def test_query_node_id(self,node_id):\n\n print('\\n### Testing query node status ACK ###')\n print('Remember that node_id must be a 3 characters string')\n\n command = b'$V' + node_id.encode()\n self.serport.write(command)\n received_bytes = self.serport.readline()\n index = received_bytes.find(b'$V')\n\n #ACK COMMAND\n if (index != -1) and (len(received_bytes) - index == 5 and received_bytes.decode()[1] == 'V'):\n #print(\"SET_ADDRESS V was spot on\")\n if received_bytes[1:4] == command[1:4]:\n node_id = received_bytes.decode()[2:5]\n print(\"command has well been sent to node :\"+ node_id)\n print(\"acknowledgement of the command SUCCESS\")\n return True\n else: \n print(\"acknowledgement of the command FAILURE\")\n return False", "def PHook(p):\n x = p['sy']['pop']()\n if (x == '.'):\n p['sy']['push'](p['OK'])\n else:\n p['sy']['push'](p['NOK'])\n #endif", "def test_wrapper_processing(self):\r\n result = mib2pysnmp('conpot/tests/data/VOGON-POEM-MIB.mib')\r\n self.assertTrue('mibBuilder.exportSymbols(\"VOGON-POEM-MIB\"' in result,\r\n 'mib2pysnmp did not generate the expected output. Output: {0}'.format(result))" ]
[ "0.6584408", "0.6459491", "0.5550423", "0.54029787", "0.53768945", "0.5342253", "0.528288", "0.52809453", "0.5237307", "0.51818335", "0.5181425", "0.50585806", "0.5056922", "0.50432426", "0.503633", "0.50314456", "0.50298536", "0.50265086", "0.5013454", "0.500096", "0.49899554", "0.49847114", "0.49791014", "0.49777964", "0.4974934", "0.49557754", "0.4948672", "0.4935801", "0.49343306", "0.4926512", "0.49236172", "0.4919985", "0.49150056", "0.49105394", "0.4904867", "0.4904291", "0.49010172", "0.4898389", "0.48974383", "0.48780486", "0.48673916", "0.4863756", "0.48636496", "0.48394468", "0.48366937", "0.48347008", "0.48331434", "0.4819641", "0.48079807", "0.48011705", "0.47969285", "0.4785952", "0.4782586", "0.47802556", "0.47782713", "0.47748664", "0.47715926", "0.4771568", "0.47712806", "0.47706273", "0.47621697", "0.4743293", "0.47427595", "0.473364", "0.47276682", "0.47233862", "0.471872", "0.471872", "0.4713776", "0.47128528", "0.47074708", "0.47063836", "0.46983287", "0.46880034", "0.4684406", "0.4682207", "0.46793804", "0.46788722", "0.4678213", "0.46779972", "0.4667929", "0.4666138", "0.4663983", "0.46614596", "0.46612293", "0.46523303", "0.46520817", "0.46464512", "0.4644334", "0.46440127", "0.46426818", "0.464051", "0.4636325", "0.46345738", "0.4630314", "0.4630307", "0.46298337", "0.46290416", "0.46263152", "0.46213162" ]
0.6953812
0
Test the popxl rts variable
def test_documentation_popxl_rts_var(self): filename = "rts_var.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test(self):\n # 0x13 is nop\n self.gdb.command(\"p *((int*) 0x%x)=0x13\" % self.target.ram)\n self.gdb.command(\"p *((int*) 0x%x)=0x13\" % (self.target.ram + 4))\n self.gdb.command(\"p *((int*) 0x%x)=0x13\" % (self.target.ram + 8))\n self.gdb.p(\"$pc=0x%x\" % self.target.ram)\n self.gdb.stepi()\n assertEqual((self.target.ram + 4), self.gdb.p(\"$pc\"))\n self.gdb.stepi()\n assertEqual((self.target.ram + 8), self.gdb.p(\"$pc\"))", "def test_documentation_popxl_remote_rts_var(self):\n filename = \"remote_rts_var.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_get_robax_tostring_incorrect(self):\n got_var, const_number = rapid_datatypes.get_rapid_data(self.controller, 'T_ROB1', 'MainModule', 'const_number')\n if not got_var:\n print 'Couldn\\'t get variable. Test will not run.'\n sys.exit()\n # Checks if wrong rapid data is inserted.\n robax = rapid_jointtarget.get_robax_tostring(const_number)\n self.assertEqual(robax, 'DataType is num and not jointtarget.')\n # Checks if wrong data is inserted.\n robax = rapid_jointtarget.get_robax_tostring(10)\n self.assertIsInstance(robax, Exception)", "def test_get_extax_tostring_incorrect(self):\n got_var, const_number = rapid_datatypes.get_rapid_data(self.controller, 'T_ROB1', 'MainModule', 'const_number')\n if not got_var:\n print 'Couldn\\'t get variable. Test will not run.'\n sys.exit()\n # Checks if wrong rapid data is inserted.\n extax = rapid_jointtarget.get_extax_tostring(const_number)\n self.assertEqual(extax, 'DataType is num and not jointtarget.')\n # Checks if wrong data is inserted.\n extax = rapid_jointtarget.get_extax_tostring(10)\n self.assertIsInstance(extax, Exception)", "def has_regvar(*args):\n return _ida_frame.has_regvar(*args)", "def PHook(p):\n x = p['sy']['pop']()\n if (x == '.'):\n p['sy']['push'](p['OK'])\n else:\n p['sy']['push'](p['NOK'])\n #endif", "def test_get_robax_tostring_correct(self):\n got_var, const_jtar = rapid_datatypes.get_rapid_data(self.controller, 'T_ROB1', 'MainModule', 'const_jtarget')\n if not got_var:\n print 'Couldn\\'t get variable. Test will not run.'\n sys.exit()\n robax = rapid_jointtarget.get_robax_tostring(const_jtar)\n self.assertEqual(robax, 'RobAx: [Rax_1,Rax_2,Rax_3,Rax_4,Rax_5,Rax_6] = [0,0,0,10,0,0]')", "def pintest(self, barcode, pin):\n u = self.dump(barcode)\n if 'ERRNUM' in u:\n return False\n return len(barcode) == 14 or pin == barcode[0] * 4", "def test_documentation_popxl_remote_var(self):\n filename = \"remote_variable.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def test_pslx_34_002(self, testf=\"pslx_34_002.pslx\"):\n BlatPslCases.test_psl_34_002(self, \"pslx_34_002.pslx\", pslx=True)", "def test_is_rock_valid():\n\n\tassert game.is_val('rock') is True # muze byt jenom 'True', vsechny True stejne ID i False", "def test_pop_returns_value(new_dll):\n assert new_dll.pop() == 3", "def _is_pop(self, words):\n if words[0] == 'pop':\n if len(words) != 3:\n raise SyntaxError(\"File line {}: Invalid number of arguments for C_POP command.\".format(self._file_line))\n if words[1] not in ['temp', 'pointer', 'static', 'local', 'argument', 'this', 'that']:\n raise SyntaxError(\"File line {}: Invalid second argument.\".format(self._file_line))\n return True\n else:\n return False", "def test_prodcode_4(self):\n self.assertEqual(prodcode.functio(), \"production value\")", "def test_03_pass(self):\n if x==1:\n pass", "def test_02_pass(self):\n if x==1:\n pass", "def check():", "def test_get_extax_tostring_correct(self):\n got_var, const_jtar = rapid_datatypes.get_rapid_data(self.controller, 'T_ROB1', 'MainModule', 'const_jtarget')\n if not got_var:\n print 'Couldn\\'t get variable. Test will not run.'\n sys.exit()\n extax = rapid_jointtarget.get_extax_tostring(const_jtar)\n self.assertEqual(extax, 'Extax: [Eax_a,Eax_b,Eax_c,Eax_d,Eax_e,Eax_f] = [9E9,9E9,9E9,9E9,9E9,9E9]')", "def test_func(self):\n rol_nu = rol_get_huidige(self.request)\n return rol_nu == Rollen.ROL_RCL", "def test_01_pass(self):\n if x==1:\n pass", "def test_01_pass(self):\n if x==1:\n pass", "def TestRetract(portVXM,portArd):\n commandString = \"F\"\n portVXM.write(commandString)\n commandString = \"PM-3,C,SA1M400,SA3M100,LM0,I3M400,P5,I1M-90,P5,L3,R \"\n portVXM.write(commandString)\n\tresp='abs'\n\twhile( '^' not in resp ):\n\t resp = portVXM.read(1000)\n\t\n\tprint \"Moving to Standard Operation.\"\t\n commandString = \"PM-2,C,SA1M400,SA3M100,LM0,I3M400,P5,I1M-90,P5,L0,R \"\n portVXM.write(commandString)\n\tt0 = time.time() \n\tt = time.time() \n\tportArd.flushInput()\n\tresp='abs'\n\t\n #while( ('21,1,1' not in resp) and ((t-t0)>30.0)):\n while( not('21,0,1\\n\\r21,1,1\\n\\r21,0,0\\n\\r' in resp) ):\n\t resp = portArd.read(10000)\n\t t = time.time()\n\n\t#print \"CONDITION: \\t\" ,('21,1,1' not in resp),'\\t'\n\tportVXM.write(\"D,\")\n resp = portVXM.read(1000)\n\tXpos, Zpos = GetXZ(portVXM)\n localtime = time.asctime(time.localtime(time.time()))\n\tprint \"Source Fully Retracted at (X,Y) : \",Xpos, \"\\t\", Zpos ,\"\\t\", \"at localtime: \", localtime\n\tprint abs(t-t0) , \"\\t Seconds \\r\"\n\tWaitUntilReady(portVXM)\n\tportVXM.write(\"C,IA1M-0,IA3M-0,R \")\n\tportVXM.write(\"Q, \")\n\tportArd.flush()", "def test_plc_read_val(plc_ip, tag_name):\n\n plc = ClxDriver()\n if plc.open(plc_ip):\n tagg = plc.read_tag(tag_name)\n plc.close()\n return (tagg)\n \n else:\n print(\"Unable to open\", plc_ip)", "def test_variablepresentations_get(self):\n pass", "def test_ge(self):\n # Success\n script = self.write_script(\"\"\"\n variable = 5\n if variable >= 5:\n check = 130\n else:\n check = 0\n end\n \"\"\")\n check = script.get_variable_or_attribute(\"check\")\n self.assertEqual(check, 130)\n\n # Failure\n script = self.write_script(\"\"\"\n variable = 5\n if variable >= 8:\n check = 130\n else:\n check = 0\n end\n \"\"\")\n check = script.get_variable_or_attribute(\"check\")\n self.assertEqual(check, 0)", "def test_le(self):\n # Success\n script = self.write_script(\"\"\"\n variable = 10\n if variable <= 10:\n check = 80\n else:\n check = 10\n end\n \"\"\")\n check = script.get_variable_or_attribute(\"check\")\n self.assertEqual(check, 80)\n\n # Failure\n script = self.write_script(\"\"\"\n variable = 10\n if variable <= 8:\n check = 80\n else:\n check = 10\n end\n \"\"\")\n check = script.get_variable_or_attribute(\"check\")\n self.assertEqual(check, 10)", "def test_getx(self):\n point = (1,2)\n x = utils.getx(point)\n self.assertEqual(1, x)", "def test_MINX_pass(self):\n self.assertTrue(self.mod.minx.isset)", "def test_contains_true(self):\n self.assertTrue('1.SKM7.640188' in self.tester)", "def test_contains_true(self):\n self.assertTrue('1.SKM7.640188' in self.tester)", "def test_pickle_within_drs(free_alg):\n\n dr = free_alg\n env = dr.exec_drs(TEST_PICKLE_DRS)\n\n assert env['good_symb']\n assert env['good_indexed']\n assert env['def_'] == env['def_back']", "def test_boolean_and_selection(self):\n\n # The selection loop:\n sel = list(mol_res_spin.residue_loop(\"#Ap4Aase:4 & :Pro\"))\n\n # Test:\n self.assertEqual(len(sel), 1)\n for res in sel:\n self.assert_(res.name == \"Pro\" and res.num == 4)", "def test_31_if(self):\n\t\tinput = \"\"\"procedure main(); var x:integer; begin x:=foo(); end\n\t\tfunction foo():integer; var a:real; begin\n\t\tif a>0 then return 0;\n\t\tend\"\"\"\n\t\texpect = \"Function foo Not Return\"\n\t\tself.assertTrue(TestChecker.test(input,expect,431))", "def is_return(self, pick):\n return ('-' in pick.name and sorted(pick.name.split('-'), reverse=True)[0].startswith('ret'))", "def test_reffs(self):\n self.assertEqual((\"1\" in list(map(lambda x: str(x), self.TEI.reffs))), True)\n self.assertEqual((\"1.pr\" in list(map(lambda x: str(x), self.TEI.reffs))), True)\n self.assertEqual((\"2.40.8\" in list(map(lambda x: str(x), self.TEI.reffs))), True)", "def test_dummy4(self):\n xpb = XPathBuilder()\n xp = xpb.dummy()\n self.assertTrue(xp.log_not() is xp)", "def test_func(self):\n self.rol_nu = rol_get_huidige(self.request)\n return self.rol_nu in (Rollen.ROL_BB, Rollen.ROL_BKO, Rollen.ROL_RKO, Rollen.ROL_RCL, Rollen.ROL_HWL)", "def test_func(self):\n self.rol_nu = rol_get_huidige(self.request)\n return self.rol_nu in (Rollen.ROL_BB, Rollen.ROL_BKO, Rollen.ROL_RKO, Rollen.ROL_RCL, Rollen.ROL_HWL)", "def test_func(self):\n self.rol_nu = rol_get_huidige(self.request)\n return self.rol_nu in (Rollen.ROL_BB, Rollen.ROL_BKO, Rollen.ROL_RKO, Rollen.ROL_RCL, Rollen.ROL_HWL)", "def read_pin10(self):\n return self.PIN_10_SIGNAL_TEST", "def testHealthAssessPyoGangrenosum(self):\n attr = self.session.create_visit_attr()\n\n self.util.boolTypeTest(self, attr, \"pyo_gangrenosum\")\n\n self.util.boolPropertyTest(self, attr, \"pyo_gangrenosum\")", "def c_test_population_function(self, function):\r\n return 1", "def test_9_call_expression(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tfunction foo(a:real;b:boolean):real; begin return 1; end\n\t\tprocedure main(); var x:real; begin x:=foo(1,false); {OK} end\n\t\tprocedure f(); var y:integer; y:real; begin end\"\"\"\n\t\texpect = \"Redeclared Variable: y\"\n\t\tself.assertTrue(TestChecker.test(input,expect,409))", "def test_get(self):\n obs = self.tester.get('1.SKM7.640188')\n exp = PrepSample('1.SKM7.640188', self.tester)\n self.assertEqual(obs, exp)", "def test_is_99_preHandSimple_correct(self):\n self.assertEqual(self.hand.getPreHandSimple(), '99')", "def test_get(self):\n self.assertEqual(self.tester.get('barcodesequence'), 'AGCGCTCACATC')", "def test_getint(self):\n self.assertEqual(self.config.getint('advanced','n'),12)", "def test_variables_get(self):\n pass", "def event_m10_29_80000():\r\n \"\"\"State 0,2: [Lib] [Preset] Reproduction fire _SubState\"\"\"\r\n assert event_m10_29_x9(z58=1029000, z59=1029099)\r\n \"\"\"State 1: Finish\"\"\"\r\n EndMachine()\r\n Quit()", "def event_m20_11_81000():\n \"\"\"State 0,2: [Lib] [Preset] Reproduction fire _SubState\"\"\"\n assert event_m20_11_x53(z73=2011100, z74=2011199)\n \"\"\"State 1: Finish\"\"\"\n EndMachine()", "def true(symbol):\n return True", "def test_emirp_check():\r\n pass", "def test_32_if(self):\n\t\tinput = \"\"\"function foo():integer; var a:real; begin\n\t\twith a:integer;b:real; do begin if a=0 then return 1; else return a; end\n\t\tend\n\t\tprocedure main(); var x:integer; begin x:=foo(); foo(); end\"\"\"\n\t\texpect = \"Undeclared Procedure: foo\"\n\t\tself.assertTrue(TestChecker.test(input,expect,432))", "def test_data(clear, data):\r\n cmd = ShdlcCmdGetErrorState(clear=clear)\r\n assert type(cmd.data) is bytes\r\n assert cmd.data == data", "def test_test_property():\n\n contents = (\"[Info]\\n\"\n \"sdk = 23\")\n\n testutils.deploy_config_raw(contents)\n\n assert prop.test_prop('info', 'sdk') == 1\n\n testutils.undeploy()\n\n return 0", "def c_test_eval_inp(self, population, run_locals):\r\n return 1", "def test01getNumber(self):\n self.assertEqual( calc.getNumber(), 1234 )", "def test_get_property():\n\n sdk = '23'\n contents = (\"[Info]\\n\"\n \"sdk = %s\" % sdk)\n\n testutils.deploy_config_raw(contents)\n\n assert prop.get_prop('info', 'sdk') == sdk\n\n testutils.undeploy()\n\n return 0", "def test_get_cell(workbook):\n assert workbook.get_cell(3,1) == '507906000030242007'", "def test_pop_returns_value_of_tail(dq_3):\n assert dq_3.pop() == 'ragtime'", "def test_id():\r\n cmd = ShdlcCmdGetErrorState(clear=False)\r\n assert type(cmd.id) is int\r\n assert cmd.id == 0xD2", "def test_RUMI():\r\n\r\n start = 11051\r\n end = 11902\r\n read_poems(start, end)", "def test_stub(self):\n self.assertEqual(self._value, True)", "def test_brackets_success(self):\n found = False\n pyint = Interpreter()\n try:\n pyint.run(code=BF_CORRECT_BRACK)\n except SystemExit: \n found = True\n self.assertFalse(found)", "def test_extracting_one_value(self):\n\t\tself.assertEqual([\"b\"], au.extract_variables(bf.Var(\"b\")), \"Invalid variables extracted, expected [b].\")", "def test04(self):\n model = self.setup_model02()\n x = model.x\n x[1].fix(1)\n wts = StoreSpec.value_isfixed_isactive(only_fixed=True)\n to_json(model, fname=self.fname, human_read=True, wts=wts)\n x[1].unfix()\n x[1].value = 2\n x[2].value = 10\n model.g.deactivate()\n from_json(model, fname=self.fname, wts=wts)\n assert x[1].fixed\n assert value(x[1]) == 1\n assert value(x[2]) == 10\n assert model.g.active", "def test_oncvpsp_pseudo(self):\n ger = Pseudo.from_file(ref_file(\"ge.oncvpsp\"))\n print(repr(ger))\n print(ger)\n print(ger.as_dict())\n ger.as_tmpfile()\n\n self.assertTrue(ger.symbol == \"Ge\")\n self.assert_equal(ger.Z, 32.0)\n self.assert_equal(ger.Z_val, 4.0)\n self.assertTrue(ger.isnc)\n self.assertFalse(ger.ispaw)\n self.assert_equal(ger.l_max, 2)\n self.assert_equal(ger.l_local, 4)\n self.assert_equal(ger.rcore, None)\n self.assertFalse(ger.has_dojo_report)", "def test(self):\n res = self._dll.JLINKARM_Test()\n return (res == 0)", "def test_getirramp():\n d = gini.get_ir_ramp()\n assert len(d) == 256", "def testCheck(self):\r\n from pydsl.Check import PLYChecker\r\n from pydsl.contrib.grammar import example_ply\r\n from pydsl.Grammar.Definition import PLYGrammar\r\n grammardef = PLYGrammar(example_ply)\r\n checker = PLYChecker(grammardef)\r\n self.assertTrue(checker.check(\"O\"))\r\n self.assertTrue(checker.check([\"O\"]))\r\n self.assertFalse(checker.check(\"FALSE\"))\r\n #self.assertFalse(checker.check(\"\")) #FIXME\r", "def test_get(self):\n obs = self.tester.get('1.SKM7.640188')\n exp = Sample('1.SKM7.640188', self.tester)\n self.assertEqual(obs, exp)", "def check_register(dut, expected):\n for key, value in expected.items():\n val = dut.DUT_apple_riscv_soc.soc_cpu_core.regfile_inst.ram[key].value.integer\n assert value == val, f\"RAM1: Register {key}, Expected: {value}, Actual: {val}\"\n print(f\"RAM1: Register {key}, Expected: {value}, Actual: {val}\")", "def event_m20_11_82000():\n \"\"\"State 0,2: [Lib] [Preset] Reproduction fire _SubState\"\"\"\n assert event_m20_11_x53(z73=2011200, z74=2011299)\n \"\"\"State 1: Finish\"\"\"\n EndMachine()", "def test_contains_true(self):\n self.assertTrue('BarcodeSequence' in self.tester)\n self.assertTrue('barcodesequence' in self.tester)", "def test_dummy1(self):\n xpb = XPathBuilder()\n xp = xpb.dummy()\n self.assertFalse(xp)\n xp = xp & xpb.foo.bar\n self.assertTrue(xp)\n exp = '/foo/bar'\n self.assertEqual(xp.tostring(), exp)", "def event_m20_11_x110(z24=211000081, z25=802, z26=211020082):\n \"\"\"State 0,1: [Reproduction] Voice frog singing voice_flag_SubState\"\"\"\n call = event_m20_11_x107(z24=z24, z26=z26, z25=z25)\n if call.Get() == 1:\n pass\n elif call.Get() == 0:\n \"\"\"State 3: [Condition] Singing voice frog_flag_SubState\"\"\"\n assert event_m20_11_x108(z24=z24, z25=z25)\n \"\"\"State 2: [Execution] Voice frog singing voice_flag_SubState\"\"\"\n assert event_m20_11_x109(z26=z26, z25=z25)\n \"\"\"State 4: Finish\"\"\"\n return 0", "def test_check_nip(client):\n is_assigned, request_id = client.check_nip(\n \"8655104670\", \"41146786026458860703735932\"\n )\n\n assert is_assigned", "def test_57o_Properties(self):\n self.assertEqual(self.hand.premInd, 0)\n self.assertEqual(self.hand.connectedInd, 0)\n self.assertEqual(self.hand.oneSpaceConnectedInd, 1)\n self.assertEqual(self.hand.suitedInd, 0)\n self.assertEqual(self.hand.PPInd, 0)\n self.assertEqual(self.hand.PPCard, 0)", "def testProtocolReturn(self):\n self.assertEqual(\n self.protocol,\n self.mr.protocol\n )\n\n self.mr._protocol = 'burp'\n\n self.assertEqual(\n 'burp',\n self.mr.protocol\n )", "def verify_payload():\n return True", "def Xchg(self, String, infix):\r\n\r\n tmp1 = self.Check_code_operand(infix[0])\r\n tmp2 = self.Check_code_operand(infix[1])\r\n if (tmp1 == False) or (tmp2 == False):\r\n return False\r\n if (tmp1[0] == 'imm') or (tmp1[2] == 0) or ((tmp1[0] == 'imm') and (tmp2[0] == 'imm')):\r\n if (tmp1[2]==0)and(tmp2[2]!=0)and (tmp2[0] != 'imm'):\r\n tmp1[2]=tmp2[2]\r\n else:\r\n return False\r\n if ((tmp1[0] == 'add') and (tmp2[0] == 'add')) or ((tmp1[0] == 'imm') and (tmp2[0] == 'imm')) or ((tmp1[2] != tmp2[2]) and (tmp2[2] != 0)):\r\n return False\r\n\r\n a = 0\r\n if (tmp1[0] != 'add'):\r\n a = tmp1[1]\r\n else:\r\n a = self.Get_value_from_memory(tmp1[1], tmp1[2])\r\n b = 0\r\n if (tmp2[0] != 'add'):\r\n b = tmp2[1]\r\n else:\r\n b = self.Get_value_from_memory(tmp2[1], tmp1[2])\r\n\r\n if tmp1[0] == 'reg':\r\n if len(infix[0][0]) == 3:\r\n self.Registers[infix[0][0]] = b\r\n else:\r\n self.Save_value_in_reg_X(infix[0][0], b)\r\n else:\r\n if not self.Save_value_in_memory(tmp1[1], b, tmp1[2]):\r\n return False\r\n\r\n if tmp2[0] == 'reg':\r\n if len(infix[1][0]) == 3:\r\n self.Registers[infix[1][0]] = a\r\n else:\r\n self.Save_value_in_reg_X(infix[1][0], a)\r\n else:\r\n if not self.Save_value_in_memory(tmp2[1], a, tmp1[2]):\r\n return False\r\n return True", "def test_29_if(self):\n\t\tinput = \"\"\"procedure main(); var x:integer; begin x:=foo(); end\n\t\tfunction foo():integer; var a:real; begin\n\t\twith a:integer;b:real; do begin if a>0 then return 1; else b:=0; end\n\t\tend\"\"\"\n\t\texpect = \"Function foo Not Return\"\n\t\tself.assertTrue(TestChecker.test(input,expect,429))", "def test_read_lxyr(self):\n cwd = os.path.dirname(os.path.abspath(__file__))\n # gt_file = os.path.join(cwd, 'test_files/test_gt.lxyr')\n # ground_truths = read_lxyr(gt_file)\n test_dir = os.path.join(cwd, 'test_files/')\n ground_truths = read_lxyr(test_dir, 'test_gt')\n # print ground_truths\n self.assertTrue(any(\n gt for gt in ground_truths\n if gt.x == 553 and gt.y == 132\n and gt.radius == 16.64 and gt.class_value == 3))\n self.assertTrue(any(\n gt for gt in ground_truths\n if gt.x == 119 and gt.y == 631\n and gt.radius == 15.0 and gt.class_value == 4))", "def event_m20_11_80000():\n \"\"\"State 0,2: [Lib] [Preset] Reproduction fire _SubState\"\"\"\n assert event_m20_11_x53(z73=2011000, z74=2011099)\n \"\"\"State 1: Finish\"\"\"\n EndMachine()", "def test04_boolean_operator(self):\n\n import _cppyy\n number = _cppyy.gbl.number\n\n n = number(20)\n assert n\n\n n = number(0)\n assert not n", "def test_dummy2(self):\n xpb = XPathBuilder()\n xp = xpb.dummy()\n self.assertFalse(xp)\n xp = xp & (xpb.attr('foo') == 'xyz')\n self.assertTrue(xp)\n exp = '@foo = \"xyz\"'\n self.assertEqual(xp.tostring(), exp)", "def test_30_if(self):\n\t\tinput = \"\"\"procedure main(); var x:integer; begin x:=foo(); end\n\t\tfunction foo():integer; var a:real; begin\n\t\twith a:integer;b:real; do begin if a>0 then a:=1; else return 0; end\n\t\tend\"\"\"\n\t\texpect = \"Function foo Not Return\"\n\t\tself.assertTrue(TestChecker.test(input,expect,430))", "def test_is_57o_preHandSimple_correct(self):\n self.assertEqual(self.hand.getPreHandSimple(), '75o')", "def test_substgrpexcl00402m7_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/substGroupExclusions/substGrpExcl00402m/substGrpExcl00402m7.xsd\",\n instance=\"sunData/ElemDecl/substGroupExclusions/substGrpExcl00402m/substGrpExcl00402m7_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def event_m20_11_83000():\n \"\"\"State 0,2: [Lib] [Preset] Reproduction fire _SubState\"\"\"\n assert event_m20_11_x53(z73=2011300, z74=2011399)\n \"\"\"State 1: Finish\"\"\"\n EndMachine()", "def CHK( UDStat ):\n if UDStat != NOERRORS:\n raise UniversalLibraryError( UDStat )", "def test_get_temp(loaded_fridge):\n assert loaded_fridge.get_temp() == loaded_fridge.temp", "def test_stub() -> None:\n test_val = 3\n assert test_val == 3", "def test_27_if(self):\n\t\tinput = \"\"\"procedure main(); var x:integer; begin x:=foo(); end\n\t\tfunction foo():integer; var a:real; begin\n\t\twith a:integer;b:real; do begin if a>0 then a:=0; else b:=0; end\n\t\tend\"\"\"\n\t\texpect = \"Function foo Not Return\"\n\t\tself.assertTrue(TestChecker.test(input,expect,427))", "def test_rf_value(self):\n\t\tdetails = self.watcher.describe()\t\t\n\t\tself.assertTrue((details.rf.to_numpy()[0:8] == 9.0).all())", "def testnatom(self):\r\n assert self.data.natom == 20", "def unitTest(self):\n\n if 'y' in self.snmp_obj.dbg:\n print(self.snmp_obj.mib_dict)\n for k in self.snmp_obj.mib_dict:\n print(k, \":\", self.snmp_obj.mib_dict[k])\n\n print(\"Testing get mib oid\")\n\n for i in self.mibs:\n oid = self.snmp_obj.get_mib_oid(i)\n print('mib: %s - oid=%s' % (i, oid))\n\n return True", "def test_id():\n assert Packet106.id == 106", "def test_getting(self):\n self.assertEqual(self.test_notes['C'], self.C)", "def test_getitem(self):\n obs = self.tester['1.SKM7.640188']\n exp = PrepSample('1.SKM7.640188', self.tester)\n self.assertEqual(obs, exp)" ]
[ "0.5512495", "0.54203445", "0.5234334", "0.5186235", "0.513492", "0.5132137", "0.5124492", "0.5119325", "0.5085518", "0.50540435", "0.50286376", "0.5028228", "0.50170285", "0.50151104", "0.50103855", "0.4989811", "0.49670818", "0.49645856", "0.49455714", "0.493474", "0.493474", "0.49287537", "0.49034783", "0.49002767", "0.48985684", "0.48946097", "0.4867602", "0.48639008", "0.48600543", "0.48600543", "0.48566508", "0.48364127", "0.4805699", "0.47914377", "0.47877735", "0.47874647", "0.4786578", "0.4786578", "0.4786578", "0.47830084", "0.47660437", "0.47647673", "0.4764395", "0.47620922", "0.47511968", "0.47476676", "0.47472867", "0.47345355", "0.47344595", "0.47325748", "0.47173956", "0.47136986", "0.4705302", "0.4695226", "0.46940032", "0.46931794", "0.46907532", "0.4688725", "0.4681009", "0.46766368", "0.46735662", "0.4657959", "0.4655685", "0.4653598", "0.46529362", "0.4652088", "0.46496466", "0.46438643", "0.46414793", "0.46404964", "0.46353337", "0.4634045", "0.4633901", "0.46326703", "0.46309495", "0.46300173", "0.4626585", "0.46230108", "0.46209654", "0.46205476", "0.4619603", "0.46148315", "0.46131375", "0.46074024", "0.4606707", "0.4606233", "0.46035278", "0.4603216", "0.46000305", "0.45996812", "0.45978743", "0.45956957", "0.45882395", "0.45879036", "0.45857877", "0.45850226", "0.45849124", "0.45809078", "0.45779765", "0.45745224" ]
0.6135992
0
Test the popxl basic mnist example
def test_documentation_popxl_mnist(self): filename = "mnist.py" self.run_python(filename, file_dir=working_dir, working_dir=working_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n\n os.system(\"rm -rf images; mkdir images\")\n\n if (len(sys.argv) > 1):\n N = int(sys.argv[1])\n else:\n N = 10\n\n x_test = np.load(\"../../../../data/mnist/mnist_test_images.npy\")\n\n for i in range(N):\n r,c = random.randint(6,12), random.randint(6,12)\n g = np.zeros(r*c)\n for j in range(r*c):\n if (random.random() < 0.15):\n g[j] = 1\n g = g.reshape((r,c))\n g[:,0] = g[0,:] = g[:,-1] = g[-1,:] = 0\n\n img = np.zeros((28*r,28*c), dtype=\"uint8\")\n for x in range(r):\n for y in range(c):\n if (g[x,y] == 1):\n n = random.randint(0, x_test.shape[0])\n im = x_test[n]\n img[28*x:(28*x+28), 28*y:(28*y+28)] = im\n \n Image.fromarray(img).save(\"images/image_%04d.png\" % i)", "def mnist_testing(shuffled = True):\n mndata = MNIST(MNIST_PATH)\n test_ims, test_labels = mndata.load_testing()\n test_X = np.array(test_ims).T\n test_y = np.array(test_labels).T\n return test_X, test_y", "def test_keras_mnist():\n data = fetch(\"mnist\")\n check(data, n_samples_train=60000, n_samples_test=10000, n_features=28 * 28)", "def test_keras_mnist_return_X_y():\n X, y = fetch(\"mnist\", return_X_y=True)\n assert X.shape == (70000, 28 * 28)\n assert y.shape == (70000,)", "def test_mnist(args):\n # type: () -> None\n\n # Build dataset and model\n dataset = MNIST(path=args.path)\n model = MEMMNIST(input_shape=dataset.shape, code_length=64, cpd_channels=100, mem_dim=100, shrink_thres=0.5/100).cuda().eval()\n\n # Set up result helper and perform test\n helper = MEMResultHelper(dataset, model, checkpoints_dir=args.checkpoints, output_file='mem_mnist.txt')\n helper.test_one_class_classification()", "def test_rand(self):\n assert len(self._mnist.random()[:5]) == 5\n pass", "def test_get_mnist_data(self):\n # TODO: Remove once get_mnist_data(...) is fixed.\n pass\n # mnist = get_mnist_data()\n # self.assertEqual(len(mnist.data), 60000)\n # self.assertEqual(len(mnist.labels), 60000)", "def mnist(path=None):\r\n url = 'http://yann.lecun.com/exdb/mnist/'\r\n files = ['train-images-idx3-ubyte.gz',\r\n 'train-labels-idx1-ubyte.gz',\r\n 't10k-images-idx3-ubyte.gz',\r\n 't10k-labels-idx1-ubyte.gz']\r\n\r\n if path is None:\r\n # Set path to /home/USER/data/mnist or C:\\Users\\USER\\data\\mnist\r\n path = os.path.join(os.path.expanduser('~'), 'data', 'mnist')\r\n\r\n # Create path if it doesn't exist\r\n os.makedirs(path, exist_ok=True)\r\n\r\n # Download any missing files\r\n for file in files:\r\n if file not in os.listdir(path):\r\n urlretrieve(url + file, os.path.join(path, file))\r\n print(\"Downloaded %s to %s\" % (file, path))\r\n\r\n def _images(path):\r\n \"\"\"Return images loaded locally.\"\"\"\r\n with gzip.open(path) as f:\r\n # First 16 bytes are magic_number, n_imgs, n_rows, n_cols\r\n pixels = np.frombuffer(f.read(), 'B', offset=16)\r\n return pixels.reshape(-1, 784).astype('float32') / 255\r\n\r\n def _labels(path):\r\n \"\"\"Return labels loaded locally.\"\"\"\r\n with gzip.open(path) as f:\r\n # First 8 bytes are magic_number, n_labels\r\n integer_labels = np.frombuffer(f.read(), 'B', offset=8)\r\n\r\n def _onehot(integer_labels):\r\n \"\"\"Return matrix whose rows are onehot encodings of integers.\"\"\"\r\n n_rows = len(integer_labels)\r\n n_cols = integer_labels.max() + 1\r\n onehot = np.zeros((n_rows, n_cols), dtype='uint8')\r\n onehot[np.arange(n_rows), integer_labels] = 1\r\n return onehot\r\n\r\n return _onehot(integer_labels)\r\n\r\n train_images = _images(os.path.join(path, files[0]))\r\n train_labels = _labels(os.path.join(path, files[1]))\r\n test_images = _images(os.path.join(path, files[2]))\r\n test_labels = _labels(os.path.join(path, files[3]))\r\n\r\n return train_images, train_labels, test_images, test_labels", "def test_dataset():\n X,Y = get_MNIST_training_normalized()\n digits_test_truth = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 632, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 166, 0, 0, 0, 0, 0]\n digits_test = []\n for example in itertools.islice(X,30):\n digits_test.append(sum(example[1:100]))\n assert(example.shape == (28*28,))\n\n show_as_image(X[0,:], 28, 28)\n print digits_test\n print digits_test_truth\n assert(digits_test_truth == digits_test)\n assert(X.shape == (60000, 28*28))\n assert(Y.shape == (60000,))\n return \"Dziala :)\"", "def MNIST_data():\n\n # Pobieramy macierze numpy z cyframi\n # images[i,j,k] <=> piksel (j,k) z i-tego obrazka w zbiorze danych\n images, labels = get_MNIST_dataset(range(10), \"training\") #pierwszy argument to\n\n # a) Ilosc przykladow i rozmiary danych\n print \"Raw training data dimensions \", images.shape\n print \"Labels dimensions \",labels.shape\n\n # b) Ile jest cyfr 2?\n print \"Counting 2 in training dataset \",len(filter(lambda x: x == 2, labels))\n\n # c) Jaki jest sredni obrazek 2 ? (Usrednienie wszystkich macierzy ktore sa 2)\n\n #1. Pobierzmy wszystkie dwojki, fajny sposob indeksowania\n print labels == 2\n only_2 = images[labels == 2, :, :]\n print \"Checking number of 2s \", only_2.shape\n\n #2. TODO: Usrednienie (matrix.mean moze byc przydatne)\n\n #3. TODO: narysowanie usrednionej cyfry (zobacz pl.imshow)\n\n # d) Ostatnie - przetworzmy ostatnia cyfre do 1 wymiarowego wektora\n vectorized = np.reshape(images[-1], newshape=(images[-1].shape[0]*images[-1].shape[1]))\n print \"Vectorized last digit \", vectorized", "def test_show_examples():\n skip_if_no_matplotlib()\n skip_if_no_data()\n with open('temp.yaml', 'w') as f:\n f.write(\"\"\"\n!obj:pylearn2.datasets.mnist.MNIST {\n which_set: 'train'\n}\n\"\"\")\n show_examples('temp.yaml', 28, 28, out='garbage.png')\n os.remove('temp.yaml')", "def mnist_training():\n mndata = MNIST(MNIST_PATH)\n train_ims, train_labels = mndata.load_training()\n train_X = np.array(train_ims).T\n train_y = np.array(train_labels).T\n return train_X, train_y", "def create_mnistm(X: Any) -> Any:\n\n bst_path = \"./data/MNIST_M/BSR_bsds500.tgz\"\n\n rand = np.random.RandomState(42)\n train_files = []\n\n with tarfile.open(bst_path, \"r\") as bsr_file:\n for name in bsr_file.getnames():\n if name.startswith(\"BSR/BSDS500/data/images/train/\"):\n train_files.append(name)\n\n print(\"Loading BSR training images\")\n background_data = []\n for name in train_files:\n try:\n fp = bsr_file.extractfile(name)\n bg_img = skimage.io.imread(fp)\n background_data.append(bg_img)\n except:\n continue\n\n X_ = np.zeros([X.shape[0], 28, 28, 3], np.uint8)\n for i in range(X.shape[0]):\n if i % 1000 == 0:\n print(\"Processing example\", i)\n\n bg_img = rand.choice(background_data)\n d = mnist_to_img(X[i])\n d = compose_image(d, bg_img)\n X_[i] = d\n\n return X_", "def test_load_data(self):\n assert len(self._mnist.get()) == 10\n assert self._mnist.get()[0].label == 7\n pass", "def MNIST_experiment():\n tsetlin_machine = TsetlinMachine(number_clauses=1000,\n number_action_states=1000,\n precision=3.0,\n threshold=10)\n\n X, y, val_X, val_y = MNIST()\n\n tsetlin_machine.fit(X, y, val_X, val_y, 300)\n print('Final training accuracy:', tsetlin_machine.accuracy(X, y))\n print('Final validation accuracy:', tsetlin_machine.accuracy(val_X, val_y))", "def get_mnist():\n from keras.datasets import mnist\n\n # input image dimensions\n img_rows, img_cols = 28, 28\n num_classes = 10\n # the data, shuffled and split between train and test sets\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n\n if K.image_data_format() == 'channels_first':\n print (\"Using Channels first\")\n x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)\n x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)\n input_shape = (1, img_rows, img_cols)\n else:\n print(\"Channels last\")\n x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)\n x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)\n input_shape = (img_rows, img_cols, 1)\n\n return (x_train, y_train), (x_test, y_test)", "def test_readme_minimal():\n # Data sampler that generates balanced batches from MNIST dataset\n sampler = TFDatasetMultiShotMemorySampler(\n dataset_name='mnist',\n classes_per_batch=10\n )\n\n # Build a Similarity model using standard Keras layers\n inputs = layers.Input(shape=(28, 28, 1))\n x = layers.experimental.preprocessing.Rescaling(1/255)(inputs)\n x = layers.Conv2D(64, 3, activation='relu')(x)\n x = layers.Flatten()(x)\n x = layers.Dense(64, activation='relu')(x)\n outputs = MetricEmbedding(64)(x)\n\n # Build a specialized Similarity model\n model = SimilarityModel(inputs, outputs)\n\n # Train Similarity model using contrastive loss\n model.compile('adam', loss=MultiSimilarityLoss())\n model.fit(sampler, epochs=5)\n\n # Index 100 embedded MNIST examples to make them searchable\n sx, sy = sampler.get_slice(0, 100)\n model.index(x=sx, y=sy, data=sx)\n\n # Find the top 5 most similar indexed MNIST examples for a given example\n qx, qy = sampler.get_slice(3713, 1)\n nns = model.single_lookup(qx[0]) # noqa\n\n # ! don't add viz its block the test in certain env.\n # Visualize the query example and its top 5 neighbors\n # viz_neigbors_imgs(qx[0], qy[0], nns)", "def get_mnist_mlp():\n # Set defaults.\n nb_classes = 10 #dataset dependent \n batch_size = 64\n epochs = 4\n input_shape = (784,)\n\n # Get the data.\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n x_train = x_train.reshape(60000, 784)\n x_test = x_test.reshape(10000, 784)\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n x_train /= 255\n x_test /= 255\n\n # convert class vectors to binary class matrices\n y_train = to_categorical(y_train, nb_classes)\n y_test = to_categorical(y_test, nb_classes)\n\n return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test, epochs)", "def load_mnist(path='./', test_size=0.3, random_state = 123):\n \n np.random.seed(random_state)\n if 'X_train.npy' not in os.listdir(path=path) or 'y_train.npy' not in os.listdir(path=path):\n print (\"Train dataset not found. Downloading...\")\n os.system(\"curl -L -o train.zip {}\".format(TRAIN_DATA_LINK))\n os.system(\"unzip train.zip\")\n os.system(\"tar -xf trainingSet.tar.gz\")\n images = []\n labels = []\n for class_name in os.listdir('./trainingSet'):\n if 'ipynb' not in class_name and '.DS' not in class_name:\n for image_name in os.listdir('./trainingSet/{}'.format(class_name)):\n image = imread('./trainingSet/{}/{}'.format(class_name, image_name))\n images.append(image)\n labels.append(int(class_name))\n X_train = np.array(images)\n y_train = np.array(labels)\n\n permutation = np.random.permutation(X_train.shape[0])\n X_train = X_train[permutation]\n y_train = y_train[permutation]\n\n with open('X_train.npy', 'wb') as f:\n np.save(f, X_train)\n with open('y_train.npy', 'wb') as f:\n np.save(f, y_train)\n os.system(\"rm -rf trainingSet\")\n os.system(\"rm -rf train.zip\")\n os.system(\"rm -rf trainingSet.tar.gz\")\n else:\n X_train = np.load('X_train.npy')\n y_train = np.load('y_train.npy')\n\n if 'X_test.npy' not in os.listdir(path=path) or 'y_test.npy' not in os.listdir(path=path):\n print (\"Test dataset not found. Downloading...\")\n os.system(\"curl -L -o test.zip {}\".format(TEST_DATA_LINK))\n os.system(\"unzip test.zip\")\n os.system(\"tar -xf trainingSample.tar.gz\")\n images = []\n labels = []\n for class_name in os.listdir('./trainingSample'):\n if 'ipynb' not in class_name and '.DS' not in class_name:\n for image_name in os.listdir('./trainingSample/{}'.format(class_name)):\n image = imread('./trainingSample/{}/{}'.format(class_name, image_name))\n images.append(image)\n labels.append(int(class_name))\n X_test = np.array(images)\n y_test = np.array(labels)\n with open('X_test.npy', 'wb') as f:\n np.save(f, X_test)\n with open('y_test.npy', 'wb') as f:\n np.save(f, y_test)\n\n os.system(\"rm -rf trainingSample\")\n os.system(\"rm -rf test.zip\")\n os.system(\"rm -rf trainingSet.tar.gz\")\n\n else:\n X_test = np.load('X_test.npy')\n y_test = np.load('y_test.npy')\n\n return X_train, X_test, y_train, y_test", "def main():\n # Import or download the mnist data, from target file path.\n mnist = input_data.read_data_sets(\"Data/\", one_hot=True)\n\n # Train and test model.\n train(mnist)", "def test_neuron(self):\r\n # crear una lista 1-D (Horizontal, Entradas).\r\n Z = [1, 2, 3]\r\n # crear una lista 1-D (Vertical, Pesos de la red).\r\n W = [10, 20, 30]\r\n # Inicializamos la neurona, y obtenemos el valor que toma dado W * Z\r\n # X(k) = W * Z\r\n result = rhonn(W, Z).predict()\r\n # Comprobamos el resultado \r\n self.assertEqual(result, 140)", "def mnist(path):\n with open(path, 'r') as f:\n for line in f:\n data = line.strip().split(',')\n\n # Label is a vector with one element per class\n label = [0.0] * 10\n label[int(data[0])] = 1.0 \n\n # The data are images of 28x28 pixels\n image_array = np.asfarray(data[1:]).reshape((28, 28))\n # Normalize the pictures \n image_array = image_array / 255.0\n\n #plt.imshow(image_array, cmap='Greys', interpolation='None')\n yield (image_array, label)", "def mnist(path):\n with open(path, 'r') as f:\n for line in f:\n data = line.strip().split(',')\n\n # Label is a vector with one element per class\n label = [0.01] * 10\n label[int(data[0])] = 0.99\n\n # The data are images of 28x28 pixels\n #image_array = np.asfarray(data[1:]).reshape((28, 28))\n image_array = np.asfarray(data[1:])\n # Normalize all values between [0.01, 1.0]\n image_array = ((image_array) / 255.0 * 0.99) + 0.01\n\n #plt.imshow(image_array, cmap='Greys', interpolation='None')\n yield (image_array, label)", "def test_documentation_popxl_mnist_rts_train_test(self):\n filename = \"mnist_rts.py --replication-factor 2 --rts --test\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def create_mnistm(X):\r\n X_ = np.zeros([X.shape[0], 28, 28, 3], np.uint8)\r\n for i in range(X.shape[0]):\r\n bg_img = rand.choice(background_data)\r\n d = mnist_to_img(X[i])\r\n d = compose_image(d, bg_img)\r\n X_[i] = d\r\n return X_", "def load_mnist(path='mnist/mnist.npz'):\n\n with np.load(path) as f:\n x_train, y_train = f['x_train'], f['y_train']\n x_test, y_test = f['x_test'], f['y_test']\n x_train = x_train.astype(np.float32) / 255.\n y_train = y_train.astype(np.int32)\n x_test = x_test.astype(np.float32) / 255.\n y_test = y_test.astype(np.int32)\n \n return (x_train, y_train), (x_test, y_test)", "def test_documentation_popxl_mnist_rts_train(self):\n filename = \"mnist_rts.py --replication-factor 2 --rts\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def import_mnist():\n\turl_mnist = \"http://deeplearning.net/data/mnist/mnist.pkl.gz\"\n\tfile_name = \"mnist.pkl.gz\"\n\twork_directory = \"mnist\"\n\tfile_path = maybe_download(url=url_mnist, file_name=file_name, work_directory=work_directory)\n\n\timport pickle\n\twith gzip.open(file_path,'rb') as ff :\n\t\tu = pickle._Unpickler( ff )\n\t\tu.encoding = 'latin1'\n\t\ttrain, val, test = u.load()\n\t\ttrainX = np.array(train[0])\n\t\ttrainY = np.reshape(train[1], [50000, 1])\n\t\tvalX = np.array(val[0])\n\t\tvalY = np.reshape(val[1], [10000, 1])\n\t\ttestX = np.array(test[0])\n\t\ttestY = np.reshape(test[1], [10000, 1])\n\t\ttrainX = np.concatenate((trainX, valX), axis = 0)\n\t\ttrainY = np.concatenate((trainY, valY), axis = 0)\n\treturn trainX, trainY, testX, testY", "def load_mnist_dataset(shape=(-1,784)):\n # We first define a download function, supporting both Python 2 and 3.\n if sys.version_info[0] == 2:\n from urllib import urlretrieve\n else:\n from urllib.request import urlretrieve\n\n def download(filename, source='http://yann.lecun.com/exdb/mnist/'):\n print(\"Downloading %s\" % filename)\n urlretrieve(source + filename, filename)\n\n # We then define functions for loading MNIST images and labels.\n # For convenience, they also download the requested files if needed.\n import gzip\n\n def load_mnist_images(filename):\n if not os.path.exists(filename):\n download(filename)\n # Read the inputs in Yann LeCun's binary format.\n with gzip.open(filename, 'rb') as f:\n data = np.frombuffer(f.read(), np.uint8, offset=16)\n # The inputs are vectors now, we reshape them to monochrome 2D images,\n # following the shape convention: (examples, channels, rows, columns)\n data = data.reshape(shape)\n # data = data.reshape(-1, 1, 28, 28) # for lasagne\n # data = data.reshape(-1, 28, 28, 1) # for tensorflow\n # data = data.reshape(-1, 784) # for tensorflow\n # The inputs come as bytes, we convert them to float32 in range [0,1].\n # (Actually to range [0, 255/256], for compatibility to the version\n # provided at http://deeplearning.net/data/mnist/mnist.pkl.gz.)\n return data / np.float32(256)\n\n def load_mnist_labels(filename):\n if not os.path.exists(filename):\n download(filename)\n # Read the labels in Yann LeCun's binary format.\n with gzip.open(filename, 'rb') as f:\n data = np.frombuffer(f.read(), np.uint8, offset=8)\n # The labels are vectors of integers now, that's exactly what we want.\n return data\n\n # We can now download and read the training and test set images and labels.\n ## you may want to change the path\n data_dir = '' #os.getcwd() + '/lasagne_tutorial/'\n # print('data_dir > %s' % data_dir)\n\n X_train = load_mnist_images(data_dir+'train-images-idx3-ubyte.gz')\n y_train = load_mnist_labels(data_dir+'train-labels-idx1-ubyte.gz')\n X_test = load_mnist_images(data_dir+'t10k-images-idx3-ubyte.gz')\n y_test = load_mnist_labels(data_dir+'t10k-labels-idx1-ubyte.gz')\n\n # We reserve the last 10000 training examples for validation.\n X_train, X_val = X_train[:-10000], X_train[-10000:]\n y_train, y_val = y_train[:-10000], y_train[-10000:]\n\n ## you may want to plot one example\n # print('X_train[0][0] >', X_train[0][0].shape, type(X_train[0][0])) # for lasagne\n # print('X_train[0] >', X_train[0].shape, type(X_train[0])) # for tensorflow\n # # exit()\n # # [[..],[..]] (28, 28) numpy.ndarray\n # # plt.imshow 只支持 (28, 28)格式,不支持 (1, 28, 28),所以用 [0][0]\n # fig = plt.figure()\n # #plotwindow = fig.add_subplot(111)\n # # plt.imshow(X_train[0][0], cmap='gray') # for lasagne (-1, 1, 28, 28)\n # plt.imshow(X_train[0].reshape(28,28), cmap='gray') # for tensorflow (-1, 28, 28, 1)\n # plt.title('A training image')\n # plt.show()\n\n # We just return all the arrays in order, as expected in main().\n # (It doesn't matter how we do this as long as we can read them again.)\n return X_train, y_train, X_val, y_val, X_test, y_test", "def test_machine_learning():", "def test_documentation_popxl_mnist_replication_train(self):\n filename = \"mnist_rts.py --replication-factor 2\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def get_mnist(self):\n # Set defaults.\n nb_classes = 10\n batch_size = 100\n input_shape = (784,)\n\n # Get the data.\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n x_train = x_train.reshape(60000, 784)\n x_test = x_test.reshape(10000, 784)\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n x_train /= 255\n x_test /= 255\n\n # convert class vectors to binary class matrices\n y_train = to_categorical(y_train, nb_classes)\n y_test = to_categorical(y_test, nb_classes)\n\n return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test)", "def readMNISTData():\n mnist = input_data.read_data_sets(\"MNIST_data\",one_hot=True) \n return mnist", "def load_mnist(path, kind='train'):\n '''ref: http://yann.lecun.com/exdb/mnist/ '''\n ''' each hand write is 28x28 = 784, a 1 dim vector'''\n labels_path = os.path.join(path,\n '%s-labels-idx1-ubyte'\n % kind)\n images_path = os.path.join(path,\n '%s-images-idx3-ubyte'\n % kind)\n\n # check the offical doc to know how to extract the content\n '''\n [offset] [type] [value] [description]\n 0000 32 bit integer 0x00000801(2049) magic number (MSB first)\n 0004 32 bit integer 60000 number of items\n 0008 unsigned byte ?? label\n 0009 unsigned byte ?? label\n ........\n xxxx unsigned byte ?? label\n The labels values are 0 to 9.\n '''\n with open(labels_path, 'rb') as lbpath:\n magic, n = struct.unpack('>II',\n lbpath.read(8))\n labels = np.fromfile(lbpath,\n dtype=np.uint8)\n\n '''\n [offset] [type] [value] [description]\n 0000 32 bit integer 0x00000803(2051) magic number\n 0004 32 bit integer 60000 number of images\n 0008 32 bit integer 28 number of rows\n 0012 32 bit integer 28 number of columns\n 0016 unsigned byte ?? pixel\n 0017 unsigned byte ?? pixel\n ........\n xxxx unsigned byte ?? pixel\n Pixels are organized row-wise. Pixel values are 0 to 255. 0 means background (white), 255 means foreground (black).\n '''\n with open(images_path, 'rb') as imgpath:\n magic, num, rows, cols = struct.unpack(\">IIII\",\n imgpath.read(16))\n ''' each hand write is 28x28 = 784, a 1 dim vector'''\n images = np.fromfile(imgpath,\n dtype=np.uint8).reshape(len(labels), 784)\n\n return images, labels", "def load_mnist(dataset=\"training\", digits=np.arange(10), path=\".\"):\n\n if dataset == \"training\":\n fname_img = os.path.join(path, 'train-images-idx3-ubyte')\n fname_lbl = os.path.join(path, 'train-labels-idx1-ubyte')\n elif dataset == \"testing\":\n fname_img = os.path.join(path, 't10k-images-idx3-ubyte')\n fname_lbl = os.path.join(path, 't10k-labels-idx1-ubyte')\n else:\n raise ValueError(\"dataset must be 'testing' or 'training'\")\n\n flbl = open(fname_lbl, 'rb')\n magic_nr, size = struct.unpack(\">II\", flbl.read(8))\n lbl = pyarray(\"b\", flbl.read())\n flbl.close()\n\n fimg = open(fname_img, 'rb')\n magic_nr, size, rows, cols = struct.unpack(\">IIII\", fimg.read(16))\n img = pyarray(\"B\", fimg.read())\n fimg.close()\n\n ind = [ k for k in range(size) if lbl[k] in digits ]\n N = len(ind)\n\n images = zeros((N, rows, cols), dtype=uint8)\n labels = zeros((N, 1), dtype=int8)\n for i in range(len(ind)):\n images[i] = array(img[ ind[i]*rows*cols : (ind[i]+1)*rows*cols ]).reshape((rows, cols))\n labels[i] = lbl[ind[i]]\n\n return images, labels", "def test_train():\n set_seed(42) # Noqa\n transform = transforms.Compose([\n transforms.ToTensor(),\n ])\n mnist_train = MNIST(\"./\", download=True, train=False, transform=transform)\n model = SimpleNet()\n\n optimizer = torch.optim.SGD(model.parameters(), lr=1e-2, momentum=0.5)\n criterion = nn.CrossEntropyLoss()\n\n train_loader = DataLoader(mnist_train, batch_size=64, shuffle=True,\n num_workers=0)\n loss, accuracy = train(model, optimizer, criterion, train_loader,\n imshape=(-1, 28*28))\n\n assert type(loss) == torch.Tensor\n assert type(accuracy) == np.float64\n assert len(loss.shape) == 0", "def test_mnist():\n skip_if_no_data()\n mode = get_default_mode()\n if hasattr(mode, 'check_py_code'):\n old_value = mode.check_py_code\n mode.check_py_code = False\n try:\n if config.mode == \"DEBUG_MODE\":\n yaml_file = 'mnist_fast'\n else:\n yaml_file = 'mnist'\n limited_epoch_train(os.path.join(yaml_file_path, '%s.yaml'\n % yaml_file))\n try:\n os.remove(os.path.join(save_path, '%s.pkl' % yaml_file))\n os.remove(os.path.join(save_path, '%s_best.pkl' % yaml_file))\n except Exception:\n pass\n finally:\n if hasattr(mode, 'check_py_code'):\n mode.check_py_code = old_value", "def main():\n\n dataset = ConvMNIST(64)\n print(dataset.get_train().x.shape)\n\n\n inputs = Value(type=tf.float32, shape=(None, 28, 28, 1), cls = None)\n targets = Value(type=tf.int64, shape=(None), cls = 10)\n learning_rate = 0.0001\n\n fc_hidden = [1024, 500]\n c_h = [\n (3, 3, 1, 32),\n (3, 3, 32, 64)\n ]\n conv_hidden = ConvHidden(conv_weights=c_h, fc_weights=fc_hidden)\n\n config = Config(inputs, targets, conv_hidden, learning_rate)\n\n network = ConvNetworkBuilder(config)\n hidden = FFConvHiddenBuilder()\n _ = network.build_network(hidden)\n\n\n train_config = TrainerConfig(\n epochs = EPOCHS, display_after = DISPLAY_STEP, \n keep_prob = KEEP_PROB,checkpoint_path=None, \n summary_path=None\n )\n\n trainer = Trainer(network, train_config)\n trainer.train(dataset)", "def test_X_test_property():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run([\"MNB\", \"LR\"])\n assert atom.X_test.equals(atom.mnb.X_test)\n assert check_scaling(atom.lr.X_test)", "def cg_optimization_mnist(n_epochs=50, mnist_pkl_gz='mnist.pkl.gz'):\r\n #############\r\n # LOAD DATA #\r\n #############\r\n datasets = load_data(mnist_pkl_gz)\r\n\r\n train_set_x, train_set_y = datasets[0]\r\n valid_set_x, valid_set_y = datasets[1]\r\n test_set_x, test_set_y = datasets[2]\r\n\r\n batch_size = 600 # size of the minibatch\r\n\r\n n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size\r\n n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size\r\n n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size\r\n\r\n ishape = (28, 28) # this is the size of MNIST images\r\n n_in = 28 * 28 # number of input units\r\n n_out = 10 # number of output units\r\n\r\n ######################\r\n # BUILD ACTUAL MODEL #\r\n ######################\r\n print '... building the model'\r\n\r\n # allocate symbolic variables for the data\r\n minibatch_offset = T.lscalar() # offset to the start of a [mini]batch\r\n x = T.matrix() # the data is presented as rasterized images\r\n y = T.ivector() # the labels are presented as 1D vector of\r\n # [int] labels\r\n\r\n # construct the logistic regression class\r\n classifier = LogisticRegression(input=x, n_in=28 * 28, n_out=10)\r\n\r\n # the cost we minimize during training is the negative log likelihood of\r\n # the model in symbolic format\r\n cost = classifier.negative_log_likelihood(y).mean()\r\n\r\n # compile a theano function that computes the mistakes that are made by\r\n # the model on a minibatch\r\n test_model = theano.function([minibatch_offset], classifier.errors(y),\r\n givens={\r\n x: test_set_x[minibatch_offset:minibatch_offset + batch_size],\r\n y: test_set_y[minibatch_offset:minibatch_offset + batch_size]},\r\n name=\"test\")\r\n\r\n validate_model = theano.function([minibatch_offset], classifier.errors(y),\r\n givens={\r\n x: valid_set_x[minibatch_offset:\r\n minibatch_offset + batch_size],\r\n y: valid_set_y[minibatch_offset:\r\n minibatch_offset + batch_size]},\r\n name=\"validate\")\r\n\r\n # compile a thenao function that returns the cost of a minibatch\r\n batch_cost = theano.function([minibatch_offset], cost,\r\n givens={\r\n x: train_set_x[minibatch_offset:\r\n minibatch_offset + batch_size],\r\n y: train_set_y[minibatch_offset:\r\n minibatch_offset + batch_size]},\r\n name=\"batch_cost\")\r\n\r\n # compile a theano function that returns the gradient of the minibatch\r\n # with respect to theta\r\n batch_grad = theano.function([minibatch_offset],\r\n T.grad(cost, classifier.theta),\r\n givens={\r\n x: train_set_x[minibatch_offset:\r\n minibatch_offset + batch_size],\r\n y: train_set_y[minibatch_offset:\r\n minibatch_offset + batch_size]},\r\n name=\"batch_grad\")\r\n\r\n # creates a function that computes the average cost on the training set\r\n def train_fn(theta_value):\r\n classifier.theta.set_value(theta_value, borrow=True)\r\n train_losses = [batch_cost(i * batch_size)\r\n for i in xrange(n_train_batches)]\r\n return numpy.mean(train_losses)\r\n\r\n # creates a function that computes the average gradient of cost with\r\n # respect to theta\r\n def train_fn_grad(theta_value):\r\n classifier.theta.set_value(theta_value, borrow=True)\r\n grad = batch_grad(0)\r\n for i in xrange(1, n_train_batches):\r\n grad += batch_grad(i * batch_size)\r\n return grad / n_train_batches\r\n\r\n validation_scores = [numpy.inf, 0]\r\n\r\n # creates the validation function\r\n def callback(theta_value):\r\n classifier.theta.set_value(theta_value, borrow=True)\r\n #compute the validation loss\r\n validation_losses = [validate_model(i * batch_size)\r\n for i in xrange(n_valid_batches)]\r\n this_validation_loss = numpy.mean(validation_losses)\r\n print('validation error %f %%' % (this_validation_loss * 100.,))\r\n\r\n # check if it is better then best validation score got until now\r\n if this_validation_loss < validation_scores[0]:\r\n # if so, replace the old one, and compute the score on the\r\n # testing dataset\r\n validation_scores[0] = this_validation_loss\r\n test_losses = [test_model(i * batch_size)\r\n for i in xrange(n_test_batches)]\r\n validation_scores[1] = numpy.mean(test_losses)\r\n\r\n ###############\r\n # TRAIN MODEL #\r\n ###############\r\n\r\n # using scipy conjugate gradient optimizer\r\n import scipy.optimize\r\n print (\"Optimizing using scipy.optimize.fmin_cg...\")\r\n start_time = time.clock()\r\n best_w_b = scipy.optimize.fmin_cg(\r\n f=train_fn,\r\n x0=numpy.zeros((n_in + 1) * n_out, dtype=x.dtype),\r\n fprime=train_fn_grad,\r\n callback=callback,\r\n disp=0,\r\n maxiter=n_epochs)\r\n end_time = time.clock()\r\n print(('Optimization complete with best validation score of %f %%, with '\r\n 'test performance %f %%') %\r\n (validation_scores[0] * 100., validation_scores[1] * 100.))\r\n\r\n print >> sys.stderr, ('The code for file ' +\r\n os.path.split(__file__)[1] +\r\n ' ran for %.1fs' % ((end_time - start_time)))", "def train_mnist():\r\n # type: () -> None\r\n\r\n # Build dataset and model\r\n dataset = MNIST_TRAIN(path=Config.video_folder)\r\n model = LSAMNIST(input_shape=dataset.shape, code_length=64,\r\n cpd_channels=100).to(device).train()\r\n\r\n # Set up result helper and perform test\r\n helper = OneClassResultHelper(dataset, model,\r\n checkpoints_dir=Config.model_ckpt,\r\n output_file='mnist.txt')\r\n helper.train_one_class_classification()", "def test_custom_relu_mnist():\n loss1 = mnist()\n loss2 = custom_mnist()\n assert np.allclose(loss1, loss2, equal_nan=True)", "def pick_data(ns, digits):\n f = gzip.open('data/mnist.pkl.gz', 'rb')\n train_set, valid_set, test_set = cPickle.load(f)\n f.close()\n images, labels = train_set\n\n originals = []; \n shapes = []; \n true_labels = [];\n i = 0\n for n, d in zip(ns, digits):\n # picking n elements with digit d\n x = np.where(labels==d)[0]\n idx = np.random.choice(x, n, replace=False)\n imgs = images[idx]\n originals.append(imgs)\n contours = [mnistshape.get_shape2(im.reshape((28,28)), n=30, s=5, ir=2)\n for im in imgs]\n shapes.append(contours)\n true_labels.append([i]*n)\n i += 1\n originals = np.concatenate(originals)\n true_labels = np.concatenate(true_labels)\n \n new_shapes = []\n for cluster in shapes:\n for shape in cluster:\n new_shapes.append(shape)\n new_shapes = np.array(new_shapes)\n\n # return shuffled data\n idx = range(len(originals))\n np.random.shuffle(idx)\n return originals[idx], new_shapes[idx], true_labels[idx]", "def test_Gaussian_NB_estimators():", "def test_n_iris(self):\r\n n = NeuronNetwork(1,\r\n [3],\r\n [[[0.2,0.2,0.2,0.2]]*3],\r\n [[-1.0,-1.0,-1.0]],learningRate=0.3)\r\n print(n)\r\n \r\n data = load_iris()\r\n\r\n inputs = data.data\r\n target = []\r\n for x in data.target:\r\n empty = [0,0,0]\r\n empty[x] = 1\r\n target.append(empty)\r\n \r\n n.train(inputs, target, 2000, 10*60)\r\n print(n)\r\n\r\n total = 0\r\n error = 0\r\n for i, x in enumerate(target, 0):\r\n out = n.feed_forward(inputs[i])\r\n if i < 50:\r\n error += self.mse(out, [1,0,0])\r\n if np.argmax(out) == 0:\r\n total +=1\r\n print(i, out, 1)\r\n elif i >= 50 and i < 100:\r\n error += self.mse(out, [0,1,0])\r\n if np.argmax(out) == 1:\r\n total +=1\r\n print(i, out, 2)\r\n elif i >= 100 and i < 150:\r\n error += self.mse(out, [0,0,1])\r\n if np.argmax(out) == 2:\r\n total +=1\r\n print(i, out, 3)\r\n\r\n print(f'MSE: {error/150}, RMSE:{math.sqrt(error/150)}')\r\n print(f'Accuracy:{total/len(target)}')", "def load_data():\n f = gzip.open('mnist.pkl.gz', 'rb')\n training_data, validation_data, test_data = pickle.load(f, encoding=\"latin1\")\n f.close()\n \n X_train = [np.reshape(x, (784, 1)) for x in training_data[0]]\n Y_train = [vectorized_result(y) for y in training_data[1]]\n \n X_validation = [np.reshape(x, (784, 1)) for x in validation_data[0]]\n Y_validation = validation_data[1]\n \n X_test = [np.reshape(x, (784, 1)) for x in test_data[0]]\n Y_test = test_data[1]\n \n return (X_train, Y_train, X_validation, Y_validation, X_test, Y_test)", "def load_data():\n # Load image data from MNIST.\n (train_x, train_y),(eval_x, eval_y) = keras.datasets.mnist.load_data()\n\n # We convert the input data to (60000, 28, 28, 1), float32 and normalize our data values to the range [0, 1].\n train_x = train_x.reshape(train_x.shape[0], train_x.shape[1], train_x.shape[2], 1)\n eval_x = eval_x.reshape(eval_x.shape[0], eval_x.shape[1], eval_x.shape[2], 1)\n\n train_x = train_x.astype('float32')\n eval_x = eval_x.astype('float32')\n train_x /= 255\n eval_x /= 255\n\n # Preprocess class labels \n train_y = train_y.astype(np.int32)\n eval_y = eval_y.astype(np.int32)\n\n train_y = np_utils.to_categorical(train_y, 10)\n eval_y = np_utils.to_categorical(eval_y, 10)\n\n return train_x, train_y, eval_x, eval_y", "def main():\n # construct the argument parse and parse the arguments\n args = argparse.ArgumentParser()\n args.add_argument(\"-o\", \"--output\", required=True, help=\"path to the output loss/accuracy plot\")\n args = vars(args.parse_args())\n\n # grab the MNIST dataset (if this is your first time using this\n # dataset then the 11MB download may take a minute)\n print(\"[INFO] accessing MNIST...\")\n ((train_x, train_y), (test_x, test_y)) = mnist.load_data()\n\n # each image in the MNIST dataset is represented as a 28x28x1\n # image, but in order to apply a standard neural network we must\n # first \"flatten\" the image to be simple list of 28x28=784 pixels\n train_x = train_x.reshape((train_x.shape[0], 28 * 28 * 1))\n test_x = test_x.reshape((test_x.shape[0], 28 * 28 * 1))\n # scale data to the range of [0, 1]\n train_x = train_x.astype(\"float32\") / 255.0\n test_x = test_x.astype(\"float32\") / 255.0\n\n # convert the labels from integers to vectors\n label_binarizer = LabelBinarizer()\n train_y = label_binarizer.fit_transform(train_y)\n test_y = label_binarizer.transform(test_y)\n\n # define the 784-256-128-10 architecture using Keras\n model = Sequential()\n model.add(Dense(256, input_shape=(784,), activation=\"sigmoid\"))\n model.add(Dense(128, activation=\"sigmoid\"))\n model.add(Dense(10, activation=\"softmax\"))\n\n # train the model using SGD\n print(\"[INFO] training network...\")\n sgd = SGD(0.01)\n model.compile(loss=\"categorical_crossentropy\", optimizer=sgd, metrics=[\"accuracy\"])\n model_fit = model.fit(train_x, train_y, validation_data=(test_x, test_y), epochs=100, batch_size=128)\n\n # evaluate the network\n print(\"[INFO] evaluating network...\")\n predictions = model.predict(test_x, batch_size=128)\n print(\n classification_report(\n test_y.argmax(axis=1), predictions.argmax(axis=1), target_names=[str(x) for x in label_binarizer.classes_]\n )\n )\n\n # plot the training loss and accuracy\n plt.style.use(\"ggplot\")\n plt.figure()\n plt.plot(np.arange(0, 100), model_fit.history[\"loss\"], label=\"train_loss\")\n plt.plot(np.arange(0, 100), model_fit.history[\"val_loss\"], label=\"val_loss\")\n plt.plot(np.arange(0, 100), model_fit.history[\"acc\"], label=\"train_acc\")\n plt.plot(np.arange(0, 100), model_fit.history[\"val_acc\"], label=\"val_acc\")\n plt.title(\"Training Loss and Accuracy\")\n plt.xlabel(\"Epoch #\")\n plt.ylabel(\"Loss/Accuracy\")\n plt.legend()\n plt.savefig(args[\"output\"])", "def get_data(numbers):\r\n numbers = numbers\r\n n_classes = len(numbers)\r\n z = zipfile.ZipFile('lab3/mnist.pkl.zip', 'r')\r\n k = z.extract('mnist.pkl') # Извлечь файл из архива\r\n with open(k, 'rb') as f:\r\n train_set, _, test_set = pickle.load(f, encoding=\"bytes\")\r\n x_train = train_set[0]\r\n x_test = test_set[0]\r\n x_train[x_train >= 0.5] = 1\r\n x_train[x_train < 0.5] = 0\r\n x_test[x_test >= 0.5] = 1\r\n x_test[x_test < 0.5] = 0\r\n y_train = train_set[1]\r\n y_test = test_set[1]\r\n idx_train = [[np.where(y_train == i)] for i in numbers]\r\n idx_test = [[np.where(y_test == i)] for i in numbers]\r\n idx_x_train = [x_train[idx_train[i][0]] for i in range(len(idx_train))]\r\n idx_x_test = [x_test[idx_test[i][0]] for i in range(len(idx_test))]\r\n idx_y_test = [y_test[idx_test[i][0]] for i in range(len(idx_test))]\r\n x_train_new = shuffle(np.concatenate(idx_x_train))\r\n x_test_new = shuffle(np.concatenate(idx_x_test))\r\n y_test_new = shuffle(np.concatenate(idx_y_test))\r\n return x_train_new, x_test_new, y_test_new, numbers, n_classes", "def test_model_evaluation(model, mnist, idx, label):\n expected_probabilities = np.zeros((10,))\n expected_probabilities[label] = 1.0\n assert_array_almost_equal(\n model.classify(mnist.get_test_image(idx)),\n expected_probabilities\n )", "def load_data(m=5000, n=100, path='D:/file/vscode/py/data/mnist.npz'):\r\n f = np.load(path)\r\n x_train, y_train = f['x_train'], f['y_train']\r\n\r\n x_test, y_test = f['x_test'], f['y_test']\r\n\r\n f.close()\r\n return (x_train, y_train), (x_test, y_test)", "def __init__(self):\n\n TEST_RATIO = 0.05\n mnist_trainset = datasets.MNIST(root='./data', train=True, download=True, transform=None)\n idxs = np.arange(mnist_trainset.train_data.size(0))\n np.random.shuffle(idxs)\n\n #print(torch.min(mnist_trainset.train_labels), torch.max(mnist_trainset.train_labels))\n #print(mnist_trainset.train_labels.size())\n \n # reshape input data to (1, 784) and normalize to range [0., 1.]\n self.train_data = torch.reshape(\n mnist_trainset.train_data[idxs].float(), (-1,1,28,28))/255.\n self.data_size = self.train_data.size(0)\n self.train_len = self.train_data.size(0)\n self.train_label = torch.Tensor([1]).float() # since there is only one class - 'real' image\n\n print('Train images -- {}'.format(self.train_data.size()))", "def test_menhinick(self):\n self.assertEqual(menhinick(self.TestData), 9/sqrt(22))", "def get_mnist_cnn():\n # Set defaults.\n nb_classes = 10 #dataset dependent \n batch_size = 128\n epochs = 4\n \n # Input image dimensions\n img_rows, img_cols = 28, 28\n\n # Get the data.\n # the data, shuffled and split between train and test sets\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n \n if K.image_data_format() == 'channels_first':\n x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)\n x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)\n input_shape = (1, img_rows, img_cols)\n else:\n x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)\n x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)\n input_shape = (img_rows, img_cols, 1)\n\n #x_train = x_train.reshape(60000, 784)\n #x_test = x_test.reshape(10000, 784)\n \n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n x_train /= 255\n x_test /= 255\n\n #print('x_train shape:', x_train.shape)\n #print(x_train.shape[0], 'train samples')\n #print(x_test.shape[0], 'test samples')\n\n # convert class vectors to binary class matrices\n y_train = to_categorical(y_train, nb_classes)\n y_test = to_categorical(y_test, nb_classes)\n\n # convert class vectors to binary class matrices\n #y_train = keras.utils.to_categorical(y_train, nb_classes)\n #y_test = keras.utils.to_categorical(y_test, nb_classes)\n\n return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test, epochs)", "def get_mini_samples():\n if GLOBALS['project_root']=='':\n print('please initialize project_root in GLOBALS first')\n return None\n data_path = os.path.join(GLOBALS['project_root'], 'data/MNIST/')\n pickle_path = os.path.join(data_path, 'mnist_mini_samples.pickle')\n if os.path.exists(pickle_path):\n with open(pickle_path, 'rb') as f:\n mini_samples = pickle.load(f)\n else:\n mnist = get_mnist()\n mini_samples = mnist.train.next_batch(50)\n with open(pickle_path, 'wb') as f:\n pickle.dump(mini_samples, f, pickle.HIGHEST_PROTOCOL)\n\n return mini_samples", "def main():\n training_data, validation_data, test_data = mnist.load()\n\n model = nn.NeuralNetwork([784, 100, 10], learning_rate=0.01, batch_size=50)\n\n model_training = training.EarlyStoppingRegularization(model,\n training_data,\n validation_data,\n test_data,\n max_steps_without_progression=2)\n result = model_training.train()\n\n result.save('models/mnist')", "def test_X_train_property():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run([\"MNB\", \"LR\"])\n assert atom.X_train.equals(atom.mnb.X_train)\n assert check_scaling(atom.lr.X_train)", "def sgd_optimization_mnist(learning_rate=0.13, n_epochs=1000,\r\n dataset='mnist.pkl.gz',\r\n batch_size=600):\r\n datasets = load_data(dataset)\r\n\r\n train_set_x, train_set_y = datasets[0]\r\n valid_set_x, valid_set_y = datasets[1]\r\n test_set_x, test_set_y = datasets[2]\r\n\r\n # compute number of minibatches for training, validation and testing\r\n n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size\r\n n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size\r\n n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size\r\n\r\n ######################\r\n # BUILD ACTUAL MODEL #\r\n ######################\r\n print '... building the model'\r\n\r\n # allocate symbolic variables for the data\r\n index = T.lscalar() # index to a [mini]batch\r\n x = T.matrix('x') # the data is presented as rasterized images\r\n y = T.ivector('y') # the labels are presented as 1D vector of\r\n # [int] labels\r\n\r\n # construct the logistic regression class\r\n # Each MNIST image has size 28*28\r\n classifier = LogisticRegression(input=x, n_in=28 * 28, n_out=10)\r\n\r\n # the cost we minimize during training is the negative log likelihood of\r\n # the model in symbolic format\r\n cost = classifier.negative_log_likelihood(y)\r\n\r\n # compiling a Theano function that computes the mistakes that are made by\r\n # the model on a minibatch\r\n test_model = theano.function(inputs=[index],\r\n outputs=classifier.errors(y),\r\n givens={\r\n x: test_set_x[index * batch_size: (index + 1) * batch_size],\r\n y: test_set_y[index * batch_size: (index + 1) * batch_size]})\r\n\r\n validate_model = theano.function(inputs=[index],\r\n outputs=classifier.errors(y),\r\n givens={\r\n x: valid_set_x[index * batch_size:(index + 1) * batch_size],\r\n y: valid_set_y[index * batch_size:(index + 1) * batch_size]})\r\n\r\n # compute the gradient of cost with respect to theta = (W,b)\r\n g_W = T.grad(cost=cost, wrt=classifier.W)\r\n g_b = T.grad(cost=cost, wrt=classifier.b)\r\n\r\n # specify how to update the parameters of the model as a list of\r\n # (variable, update expression) pairs.\r\n updates = [(classifier.W, classifier.W - learning_rate * g_W),\r\n (classifier.b, classifier.b - learning_rate * g_b)]\r\n\r\n # compiling a Theano function `train_model` that returns the cost, but in\r\n # the same time updates the parameter of the model based on the rules\r\n # defined in `updates`\r\n train_model = theano.function(inputs=[index],\r\n outputs=cost,\r\n updates=updates,\r\n givens={\r\n x: train_set_x[index * batch_size:(index + 1) * batch_size],\r\n y: train_set_y[index * batch_size:(index + 1) * batch_size]})\r\n\r\n ###############\r\n # TRAIN MODEL #\r\n ###############\r\n print '... training the model'\r\n # early-stopping parameters\r\n patience = 5000 # look as this many examples regardless\r\n patience_increase = 2 # wait this much longer when a new best is\r\n # found\r\n improvement_threshold = 0.995 # a relative improvement of this much is\r\n # considered significant\r\n validation_frequency = min(n_train_batches, patience / 2)\r\n # go through this many\r\n # minibatche before checking the network\r\n # on the validation set; in this case we\r\n # check every epoch\r\n\r\n best_params = None\r\n best_validation_loss = numpy.inf\r\n test_score = 0.\r\n start_time = time.clock()\r\n\r\n done_looping = False\r\n epoch = 0\r\n while (epoch < n_epochs) and (not done_looping):\r\n epoch = epoch + 1\r\n for minibatch_index in xrange(n_train_batches):\r\n\r\n minibatch_avg_cost = train_model(minibatch_index)\r\n # iteration number\r\n iter = (epoch - 1) * n_train_batches + minibatch_index\r\n\r\n if (iter + 1) % validation_frequency == 0:\r\n # compute zero-one loss on validation set\r\n validation_losses = [validate_model(i)\r\n for i in xrange(n_valid_batches)]\r\n this_validation_loss = numpy.mean(validation_losses)\r\n\r\n print('epoch %i, minibatch %i/%i, validation error %f %%' % \\\r\n (epoch, minibatch_index + 1, n_train_batches,\r\n this_validation_loss * 100.))\r\n\r\n # if we got the best validation score until now\r\n if this_validation_loss < best_validation_loss:\r\n #improve patience if loss improvement is good enough\r\n if this_validation_loss < best_validation_loss * \\\r\n improvement_threshold:\r\n patience = max(patience, iter * patience_increase)\r\n\r\n best_validation_loss = this_validation_loss\r\n # test it on the test set\r\n\r\n test_losses = [test_model(i)\r\n for i in xrange(n_test_batches)]\r\n test_score = numpy.mean(test_losses)\r\n\r\n print((' epoch %i, minibatch %i/%i, test error of best'\r\n ' model %f %%') %\r\n (epoch, minibatch_index + 1, n_train_batches,\r\n test_score * 100.))\r\n\r\n if patience <= iter:\r\n done_looping = True\r\n break\r\n\r\n end_time = time.clock()\r\n print(('Optimization complete with best validation score of %f %%,'\r\n 'with test performance %f %%') %\r\n (best_validation_loss * 100., test_score * 100.))\r\n print 'The code run for %d epochs, with %f epochs/sec' % (\r\n epoch, 1. * epoch / (end_time - start_time))\r\n print >> sys.stderr, ('The code for file ' +\r\n os.path.split(__file__)[1] +\r\n ' ran for %.1fs' % ((end_time - start_time)))", "def neural_network(X, Y, Xs_test, Ys_test):\n ## YOUR CODE HERE\n #################\n return 0", "def use_mnist_model(self):\n\n\t\t# load the model\n\t\tnumber_recognizer_MNIST = load_model('models/MNIST_digits_recognition.h5', compile=False)\n\n\t\t# create empty ndarray\n\t\tnumbers_mnist = np.ones(shape=(self.sudoku_size, self.sudoku_size))\n\n\t\tpics = deepcopy(self.list_of_number_pictures)\n\t\tfor i in range(self.sudoku_size):\n\t\t\tfor j in range(self.sudoku_size):\n\t\t\t\tpics[i][j] = self.preprocess_cell(pics[i][j], mnist=True, resize=True, clean_remains=True)\n\t\t\t\tif self.empty_cells[i][j] != 0:\n\t\t\t\t\tnumbers_mnist[i][j] = np.argmax(number_recognizer_MNIST.predict([[pics[i][j].reshape(28,28,1)]]))\n\n\t\treturn numbers_mnist", "def test_star():\n test_path = tempfile.mkdtemp()\n x_train, metadata = star(test_path)\n try:\n assert x_train.shape == (5748, 8)\n except:\n shutil.rmtree(test_path)\n raise()", "def mnist_v1(batch_size=128, epochs=20, kernel_size=3):\n (X_train, Y_train), (X_test, Y_test) = mnist.load_data()\n\n # Data preparation\n X_train = prepare(X_train)\n X_test = prepare(X_test)\n Y_train = np_utils.to_categorical(Y_train, 10) # 0..9\n Y_test = np_utils.to_categorical(Y_test, 10) # 0..9\n\n # Fitting the data to the augmentation data generator\n datagen = augmentedData(X_train)\n\n # --------------------\n # NEURAL NETWORK MODEL\n # --------------------\n\n # Model architecture\n model = Sequential()\n\n model.add(Conv2D(32, (kernel_size, kernel_size), activation='relu', input_shape=(1, 28, 28)))\n model.add(Conv2D(32, (kernel_size, kernel_size), activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n model.add(Dropout(0.25))\n\n model.add(Flatten())\n model.add(Dense(128, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(10, activation='softmax'))\n\n # Model compilation\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n #Tensor board saves\n now = datetime.datetime.now()\n tensorboard = TensorBoard(log_dir=\"logs_first/kernel_size:{}\".format(kernel_size))\n\n model.fit_generator(datagen.flow(X_train, Y_train, batch_size=batch_size), epochs=epochs, verbose=1, callbacks=[tensorboard])\n\n # Model saves\n now = datetime.datetime.now()\n model.save(\"sirr_HYPERPARAMETERS_mnist_first_\" + str(now.hour) + \"h\" + str(now.minute) + \".h5\")\n\n # Model evaluation\n return model.evaluate(X_test, Y_test, verbose=1)", "def fetch_mnist():\n data_path = check_fetch_mnist()\n f = gzip.open(data_path, 'rb')\n try:\n train_set, valid_set, test_set = pickle.load(f, encoding=\"latin1\")\n except TypeError:\n train_set, valid_set, test_set = pickle.load(f)\n f.close()\n train_indices = np.arange(0, len(train_set[0]))\n valid_indices = np.arange(0, len(valid_set[0])) + train_indices[-1] + 1\n test_indices = np.arange(0, len(test_set[0])) + valid_indices[-1] + 1\n return {\"data\": np.concatenate((train_set[0], valid_set[0], test_set[0]),\n axis=0).astype(theano.config.floatX),\n \"target\": np.concatenate((train_set[1], valid_set[1], test_set[1]),\n axis=0).astype(np.int32),\n \"train_indices\": train_indices.astype(np.int32),\n \"valid_indices\": valid_indices.astype(np.int32),\n \"test_indices\": test_indices.astype(np.int32)}", "def test_predictor():", "def wild_test(img, mod):\n img = cv2.imread(img)\n img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n img_resize = cv2.resize(img_gray, (28, 28))\n img_resize = img_resize.reshape((1, 28, 28))\n print (\"Image size\", img_resize.shape)\n # it is ugly, you can make this much better\n data = np.asarray([img_resize]*100)\n test_iter = mx.io.NDArrayIter(data, None, 100)\n prob = mod.predict(test_iter)\n print (\"The prediction is :\", np.argmax(prob.asnumpy()[0]))", "def test_X_property():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run([\"MNB\", \"LR\"])\n assert atom.X.equals(atom.mnb.X)\n assert check_scaling(atom.lr.X)", "def testvis(layers='first'):\n\n\tfrom scipy.io import loadmat\n\tfrom setup import NeuralNetwork as nnsetup\n\n\tresult = loadmat(\"goodmatx.mat\")\n\tw1 = result['v1']\n\tw0 = result['v0']\n\tx,y = result['train_x'], result['train_y']\n\n\t# result = loadmat(\"ducky.mat\")\n\t# x = result['train_x']\n\t# y = result['train_y']\n\n\tsize = [x.shape[1], 1000, y.shape[1]]\n\n\tnn = nnsetup([size[0],size[1],size[0]],output='sigm')\n\n\tnn.W[0] = w0\n\tnn.W[1] = w1\n\t\n\tfor i in range(50):\n\t\tvisualize( nn, x, k = 3000+i*4, layers=layers, mode='save' )", "def main():\n # \"\"\"Prepare neuromorphic MNIST image datasets for use in caffe\n # Each dataset will be generated with different number of unique spikes\n # \"\"\"\n # initial_size = 1e6 #best to make this big enough avoid expensive\n # re-allocation\n # test_dir = os.path.abspath('testFull')\n # train_dir = os.path.abspath('trainFull')\n\n # for num_spikes in range(150, 260, 10):\n # #test directory\n # image_dataset = generate_nmnist_dataset(initial_size, test_dir,\n # num_spikes, 0.75)\n # output_lmdb = 'testlmdb' + str(num_spikes)\n # database = save_to_lmdb(image_dataset, output_lmdb)\n # #database.process_all_data(show_lmdb_datum)\n\n # #train directory\n # image_dataset = generate_nmnist_dataset(initial_size, train_dir,\n # num_spikes, 0.75)\n # output_lmdb = 'trainlmdb' + str(num_spikes)\n # save_to_lmdb(image_dataset, output_lmdb)\n\n # TD = ev.read_dataset(os.path.abspath('trainReduced/0/00002.bin'))\n # best to make this big enough avoid expensive re-allocation\n initial_size = 6e5\n test_dir = os.path.abspath('testFull')\n train_dir = os.path.abspath('trainFull')\n\n # test directory\n image_dataset = generate_nmnist_continuous_dataset(initial_size, test_dir)\n save_to_lmdb(image_dataset, 'testlmdb_continuous', True)\n save_to_mat(image_dataset, 'MNIST_continuous_test.mat')\n # database.process_all_data(show_lmdb_datum)\n\n # train directory\n image_dataset = generate_nmnist_continuous_dataset(initial_size, train_dir)\n save_to_lmdb(image_dataset, 'trainlmdb_continuous', True)\n save_to_mat(image_dataset, 'MNIST_continuous_train.mat')\n\n # TD = ev.read_dataset(os.path.abspath('trainReduced/0/00002.bin'))", "def codeepneat_mnist_example(_):\n # Set standard configuration specific to TFNE but not the neuroevolution process\n logging_level = logging.INFO\n config_file_path = './codeepneat_mnist_example_config.cfg'\n backup_dir_path = './tfne_state_backups/'\n max_generations = 20\n max_fitness = None\n\n # Read in optionally supplied flags, changing the just set standard configuration\n if flags.FLAGS.logging_level is not None:\n logging_level = flags.FLAGS.logging_level\n if flags.FLAGS.config_file is not None:\n config_file_path = flags.FLAGS.config_file\n if flags.FLAGS.backup_dir is not None:\n backup_dir_path = flags.FLAGS.backup_dir\n if flags.FLAGS.max_generations is not None:\n max_generations = flags.FLAGS.max_generations\n if flags.FLAGS.max_fitness is not None:\n max_fitness = flags.FLAGS.max_fitness\n\n # Set logging, parse config\n logging.set_verbosity(logging_level)\n config = tfne.parse_configuration(config_file_path)\n\n # Initialize the environment and the specific NE algorithm\n environment = tfne.environments.MNISTEnvironment(weight_training=True, config=config, verbosity=logging_level)\n ne_algorithm = tfne.algorithms.CoDeepNEAT(config)\n\n # Initialize evolution engine and supply config as well as initialized NE algorithm and evaluation environment.\n engine = tfne.EvolutionEngine(ne_algorithm=ne_algorithm,\n environment=environment,\n backup_dir_path=backup_dir_path,\n max_generations=max_generations,\n max_fitness=max_fitness)\n\n # Start training process, returning the best genome when training ends\n best_genome = engine.train()\n print(\"Best genome returned by evolution:\\n\")\n print(best_genome)\n\n # Increase epoch count in environment for a final training of the best genome. Train the genome and then replay it.\n print(\"Training best genome for 200 epochs...\\n\")\n environment.epochs = 20\n environment.eval_genome_fitness(best_genome)\n environment.replay_genome(best_genome)\n\n # Serialize and save genotype and Tensorflow model to demonstrate serialization\n best_genome.save_genotype(save_dir_path='./best_genome_genotype/')\n best_genome.save_model(file_path='./best_genome_model/')", "def mnist_noniid(dataset, num_users):\n # num_shards, num_imgs = 2*num_users, int(dataset.data.size()[0]/2/num_users) # choose two number from a set with num_shards, each client has 2*num_imgs images\n # idx_shard = [i for i in range(num_shards)]\n # dict_users = {i: np.array([], dtype='int64') for i in range(num_users)}\n # idxs = np.arange(dataset.data.size()[0])\n # labels = dataset.train_labels.numpy()\n #\n # # sort labels\n # idxs_labels = np.vstack((idxs, labels))\n # idxs_labels = idxs_labels[:,idxs_labels[1,:].argsort()]\n # idxs = idxs_labels[0,:]\n #\n # # divide and assign\n # for i in range(num_users):\n # rand_set = set(np.random.choice(idx_shard, 2, replace=False))\n # idx_shard = list(set(idx_shard) - rand_set)\n # for rand in rand_set:\n # dict_users[i] = np.concatenate((dict_users[i], idxs[rand*num_imgs:(rand+1)*num_imgs]), axis=0)\n # return dict_users\n\n label_list = dataset.targets.numpy()\n minLabel = min(label_list)\n numLabels = len(dataset.classes)\n\n dict_users = {i: np.array([], dtype='int64') for i in range(num_users)}\n for i in range(0, len(label_list)):\n tmp_target_node = int((label_list[i] - minLabel) % num_users)\n if num_users > numLabels:\n tmpMinIndex = 0\n tmpMinVal = math.inf\n for n in range(0, num_users):\n if (n) % numLabels == tmp_target_node and len(dict_users[n]) < tmpMinVal:\n tmpMinVal = len(dict_users[n])\n tmpMinIndex = n\n tmp_target_node = tmpMinIndex\n dict_users[tmp_target_node] = np.concatenate((dict_users[tmp_target_node], [i]), axis=0)\n return dict_users", "def get_MNIST_testing_normalized():\n X, Y = get_dataset(\"testing\")\n X/=255.0\n X-=0.5\n return X,Y\n\n global MNIST_testing\n if MNIST_testing is None:\n if os.path.exists(\"./data/MNIST_testing.pkl\"):\n MNIST_testing = cPickle.load(open(\"./data/MNIST_testing.pkl\", \"r\"))\n else:\n MNIST_testing = get_dataset(\"testing\")\n MNIST_testing = normalize_data(MNIST_testing[0], MNIST_testing[1])\n cPickle.dump(MNIST_training, open(\"./data/MNIST_testing.pkl\",\"w\"))\n return MNIST_testing", "def test_data():\n batch_size = 10\n input_dim = 28\n test_data = np.random.rand(batch_size, input_dim)\n\n return test_data", "def load_mnist(train_data=True, test_data=False):\n os.chdir(ROOT_DIR)\n RESOURCES = [\n 'train-images-idx3-ubyte.gz', 'train-labels-idx1-ubyte.gz',\n 't10k-images-idx3-ubyte.gz', 't10k-labels-idx1-ubyte.gz'\n ]\n\n if (os.path.isdir('data') == 0):\n os.mkdir('data')\n if (os.path.isdir('data/mnist') == 0):\n os.mkdir('data/mnist')\n for name in RESOURCES:\n if (os.path.isfile('data/mnist/' + name) == 0):\n url = 'https://github.com/HIPS/hypergrad/raw/master/data/mnist/' + name\n r = requests.get(url, allow_redirects=True)\n open('data/mnist/' + name, 'wb').write(r.content)\n\n return get_images(train_data, test_data), get_labels(train_data, test_data)", "def test_snow_pumps():\n test_path = tempfile.mkdtemp()\n x_train, metadata = snow_pumps(test_path)\n try:\n assert x_train.shape == (13, 4)\n except:\n shutil.rmtree(test_path)\n raise()", "def run_test():\n # Get the sets of images and labels for training, validation, and\n # test on MNIST.\n train ,validation,test = datasets_mnist.read_data_sets(FLAGS.input_data_dir, FLAGS.fake_data)\n # Tell TensorFlow that the model will be built into the default Graph.\n with tf.Graph().as_default():\n # Generate placeholders for the images and labels.\n images_placeholder, labels_placeholder, phase_pl = placeholder_inputs(\n FLAGS.batch_size)\n\n # Build a Graph that computes predictions from the inference model.\n logits = mnist.inference(images_placeholder,\n FLAGS.hidden1,\n FLAGS.hidden2, \n phase_pl)\n\n eval_correct = mnist.evaluation(logits, labels_placeholder)\n # Add the variable initializer Op.\n all_variable = tf.global_variables()\n \n # Create a saver for writing training checkpoints.\n saver = tf.train.Saver()\n\n # Create a session for running Ops on the Graph.\n with tf.Session() as sess:\n\n saver.restore(sess, \"log/model.ckpt-1999\")\n for variable in all_variable:\n if \"moving\" in variable.name:\n print(variable.name, variable.eval())\n do_eval(sess,\n eval_correct,\n images_placeholder,\n labels_placeholder,\n phase_pl,\n test)", "def as_mnist(filename, imwidth):\n\n images = []\n labels = []\n \n if filename.find(\"devel\") != -1:\n print(\"we're working with the development set: \" + filename)\n\n for cls, data in enumerate(load(filename)):\n for example in data:\n labels.append(cls)\n image = numpy.zeros(shape=(imwidth, imwidth), dtype='uint8')\n for (x, y) in example:\n x_ = int(round(imwidth * x))\n y_ = int(round(1-(imwidth * y)))\n image[y_, x_] = 255\n images.append(image.flatten())\n\n return numpy.vstack(images).T.copy(), numpy.array(labels)", "def get_mnist_data(batch=128):\n \n def transformer(data, label):\n data = data.flatten().expand_dims(0).astype(np.float32)/255\n data = data-0.13/0.31\n label = label.astype(np.float32)\n return data, label\n\n train_dataset = gluon.data.vision.datasets.MNIST(root=M5_IMAGES, train=True, transform=transformer)\n validation_dataset = gluon.data.vision.datasets.MNIST(root=M5_IMAGES, train=False, transform=transformer)\n train_dataloader = gluon.data.DataLoader(train_dataset, batch_size=batch, last_batch='keep',shuffle=True)\n validation_dataloader = gluon.data.DataLoader(validation_dataset, batch_size=batch, last_batch='keep')\n \n return train_dataloader, validation_dataloader", "def ex_3_b(x_train, y_train, x_test, y_test):\n ###########\n ## TODO:\n ## Train multi-class SVMs with a LINEAR kernel\n ## Use the sklearn.metrics.confusion_matrix to plot the confusion matrix.\n ## Find the index for which you get the highest error rate.\n ## Plot the confusion matrix with plot_confusion_matrix.\n ## Plot the first 10 images classified as the most misclassified digit using plot_mnist.\n ###########\n\n labels = range(1, 6)\n\n lin = svm.SVC(decision_function_shape='ovr', kernel='linear')\n lin.fit(x_train, y_train)\n\n y_test_predict =lin.predict(x_test)\n\n score_train = lin.score(x_train, y_train)\n score_test = lin.score(x_test, y_test)\n\n cm = confusion_matrix(y_test, y_test_predict)\n plot_confusion_matrix(cm, labels)\n #print(cm)\n\n diff_list = y_test_predict == y_test\n\n # indexes of all missclassiefied images\n misclassifieds = [i for i, val in enumerate(diff_list) if val == False]\n\n # remove diagonal elements from cm for later processing\n cm_no_diagonal = cm\n np.fill_diagonal(cm_no_diagonal, 0)\n #print(cm_no_diagonal)\n\n errors_per_class = np.sum(cm_no_diagonal, axis=0)\n #print(errors_per_class)\n\n sel_err = np.array(misclassifieds) # CHANGE ME! Numpy indices to select all images that are misclassified.\n i = np.argmax(errors_per_class) # CHANGE ME! Should be the label number corresponding the largest classification error.\n #print(i)\n\n # Plot with mnist plot\n plot_mnist(x_test[sel_err], y_test_predict[sel_err], labels=labels[i], k_plots=10, prefix='Predicted class')", "def get_mnist():\n if GLOBALS['project_root']=='':\n print('please initialize project_root in GLOBALS first')\n return None\n data_path = os.path.join(GLOBALS['project_root'], 'data/MNIST/')\n pickle_path = os.path.join(data_path, 'mnist.pickle')\n if os.path.exists(pickle_path):\n with open(pickle_path, 'rb') as f:\n mnist = pickle.load(f)\n else:\n from tensorflow.examples.tutorials.mnist import input_data\n mnist = input_data.read_data_sets(data_path, one_hot=True)\n with open(pickle_path, 'wb') as f:\n pickle.dump(mnist, f, pickle.HIGHEST_PROTOCOL)\n\n return mnist", "def load_mnist(dataset_name='mnist', **kwargs):\n dataset_name = dataset_name.strip().lower().replace('minist', 'mnist')\n\n if dataset_name.lower() not in ['mnist', 'fashion-mnist']:\n raise ValueError('Only mnist or fashion-mnist are valid dataset_name.')\n\n base = 'http://yann.lecun.com/exdb/mnist/'\n if dataset_name == 'fashion-mnist':\n base = 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/'\n\n dirname = os.path.join(_trident_dir, dataset_name)\n make_dir_if_need(dirname)\n\n \"\"\"Load MNIST data from `path`\"\"\"\n trainData = None\n testData = None\n for kind in ['train', 'test']:\n labels_file = '{0}-labels-idx1-ubyte.gz'.format( 't10k' if dataset_name in ('mnist', 'fashion-mnist') and kind == 'test' else kind)\n images_file = '{0}-images-idx3-ubyte.gz'.format( 't10k' if dataset_name in ('mnist', 'fashion-mnist') and kind == 'test' else kind)\n # if dataset_name == 'emnist' :\n # labels_file='emnist-balanced-'+labels_file\n # images_file = 'emnist-balanced-' + images_file\n\n is_data_download = download_file(base + labels_file, dirname, labels_file, dataset_name + '_labels_{0}'.format(kind))\n is_label_download = download_file(base + images_file, dirname, images_file, dataset_name + '_images_{0}'.format(kind))\n if is_data_download and is_label_download:\n labels_path = os.path.join(dirname, labels_file)\n images_path = os.path.join(dirname, images_file)\n labeldata = None\n imagedata = None\n with gzip.open(labels_path, 'rb') as lbpath:\n labels = np.frombuffer(lbpath.read(), dtype=np.uint8, offset=8)\n labels = np.squeeze(labels).astype(np.int64)\n labeldata = LabelDataset(labels.tolist(),object_type=ObjectType.classification_label)\n\n with gzip.open(images_path, 'rb') as imgpath:\n images = np.frombuffer(imgpath.read(), dtype=np.uint8, offset=16)\n images = np.reshape(images, (len(labels), 784)).astype(dtype=_session.floatx)\n images = np.reshape(images, (-1, 28, 28))\n imagedata = ImageDataset(images, object_type=ObjectType.gray)\n if kind == 'train':\n trainData = Iterator(data=imagedata, label=labeldata)\n else:\n testData = Iterator(data=imagedata, label=labeldata)\n\n dataset = DataProvider(dataset_name, traindata=trainData, testdata=testData)\n dataset.binding_class_names(\n [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] if dataset_name == 'mnist' else ['T-shirt/top', 'Trouser', 'Pullover',\n 'Dress', 'Coat', 'Sandal', 'Shirt',\n 'Sneaker', 'Bag', 'Ankle boot'],\n 'en-US')\n\n return dataset\n return None", "def test_mlp():\r\n datasets = gen_data()\r\n\r\n train_set_x, train_set_y = datasets[0]\r\n valid_set_x, valid_set_y = datasets[1]\r\n test_set_x , test_set_y = datasets[2]\r\n\r\n\r\n\r\n batch_size = 100 # size of the minibatch\r\n\r\n # compute number of minibatches for training, validation and testing\r\n n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size\r\n n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size\r\n n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size\r\n\r\n ######################\r\n # BUILD ACTUAL MODEL #\r\n ######################\r\n #print '... building the model'\r\n\r\n # allocate symbolic variables for the data\r\n index = T.lscalar() # index to a [mini]batch\r\n x = T.matrix('x') # the data is presented as rasterized images\r\n y = T.ivector('y') # the labels are presented as 1D vector of\r\n # [int] labels\r\n\r\n rng = numpy.random.RandomState(1234)\r\n\r\n # construct the MLP class\r\n classifier = MLP( rng = rng, input=x, n_in=28*28, n_hidden = 500, n_out=10)\r\n\r\n # the cost we minimize during training is the negative log likelihood of\r\n # the model.\r\n # We take the mean of the cost over each minibatch.\r\n cost = classifier.negative_log_likelihood(y).mean()\r\n\r\n # compute the gradient of cost with respect to theta (stored in params)\r\n # the resulting gradients will be stored in a list gparams\r\n gparams = []\r\n for param in classifier.params:\r\n gparam = T.grad(cost, param)\r\n gparams.append(gparam)\r\n\r\n # Some optimizations needed are tagged with 'fast_run'\r\n # TODO: refine that and include only those\r\n mode = theano.compile.get_default_mode().including('fast_run')\r\n\r\n updates2 = OrderedDict()\r\n\r\n updates2[classifier.hiddenLayer.params[0]]=T.grad(cost,classifier.hiddenLayer.params[0])\r\n train_model =theano.function( inputs = [index],\r\n updates = updates2,\r\n givens={\r\n x:train_set_x[index*batch_size:(index+1)*batch_size],\r\n y:train_set_y[index*batch_size:(index+1)*batch_size]},\r\n mode=mode)\r\n #print 'MODEL 1'\r\n #theano.printing.debugprint(train_model, print_type=True)\r\n assert any([isinstance(i.op,T.nnet.CrossentropySoftmax1HotWithBiasDx) for i in train_model.maker.fgraph.toposort()])\r\n\r\n # Even without FeatureShape\r\n train_model =theano.function( inputs = [index],\r\n updates = updates2,\r\n mode=mode.excluding('ShapeOpt'),\r\n givens={\r\n x:train_set_x[index*batch_size:(index+1)*batch_size],\r\n y:train_set_y[index*batch_size:(index+1)*batch_size]})\r\n #print\r\n #print 'MODEL 2'\r\n #theano.printing.debugprint(train_model, print_type=True)\r\n assert any([isinstance(i.op,T.nnet.CrossentropySoftmax1HotWithBiasDx) for i in train_model.maker.fgraph.toposort()])", "def test_generate_nb(self):\n pass", "def neural_net_ex4_ng():\n # ==================\n # read data\n dataset = loadmat('data/ex4data1.mat')\n print(dataset.keys())\n\n y = dataset['y'] # 5000 x 1\n print('dims y: ', y.shape)\n # print('y[0]: ', y[0])\n\n X = dataset['X'] # 5000 x 400\n print('dims X: ', X.shape)\n # print('X[0]: ', X[0])\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)\n\n num_samples_test = X_test.shape[0]\n\n # ==================\n # display data\n\n # pick 20 examples and visualize them\n fig = plt.figure(figsize=(10, 8), facecolor='white')\n fig.add_subplot(651)\n samples = np.random.choice(num_samples_test, 10)\n print('samples:', samples)\n plt.imshow(X_test[samples, :].reshape(-1, 20).T, cmap=\"Greys\")\n plt.axis('off')\n\n # ==================\n # run neural net\n hidden_layer_size = 25\n\n mlp = MLPClassifier(hidden_layer_sizes=(25,), max_iter=20, alpha=1e-4,\n solver='sgd', verbose=False, tol=1e-4, random_state=1,\n learning_rate_init=.1)\n mlp.fit(X_train, y_train.ravel())\n\n predictions = mlp.predict(X_test)\n print('Test set accuracy: {} %'.format(np.mean(predictions == y_test.ravel())*100))\n\n # print(confusion_matrix(y_test, predictions))\n # print(classification_report(y_test, predictions))\n print(\"Training set score: %f\" % mlp.score(X_train, y_train))\n print(\"Test set score: %f\" % mlp.score(X_test, y_test))\n print('coeffs shape', (mlp.coefs_[0]).shape)\n\n # ==================\n # display coefficients of hidden layer\n fig.add_subplot(652)\n plt.imshow(mlp.coefs_[0][:, 0].reshape(20, 20))\n plt.axis('off')\n\n gs = gridspec.GridSpec(6, 5)\n cur_img_idx = 5\n\n # use global min / max to ensure all weights are shown on the same scale\n vmin, vmax = mlp.coefs_[0].min(), mlp.coefs_[0].max()\n for coef, ax in zip(mlp.coefs_[0].T, range(hidden_layer_size)):\n fig.add_subplot(gs[cur_img_idx])\n plt.imshow(coef.reshape(20, 20), cmap=plt.cm.gray, vmin=.5 * vmin, vmax=.5 * vmax)\n plt.axis('off')\n cur_img_idx += 1\n\n plt.show()", "def demo():\n def load_data():\n train = open(\"csv/svd_train.csv\", \"r\")\n r = csv.reader(train)\n next(r)\n\n data = []\n target = []\n\n print \"Prepping data...\"\n for row in r:\n aux = [0 for x in xrange(10)]\n aux[int(row[0])] = 1\n target.append(aux)\n data.append([float(x) for x in row[1:]])\n\n train.close()\n\n data = np.array(data)\n\n target = np.array(target)\n\n #train = [target[:35000],data[:35000]]\n #test = [target[35000:],data[35000:]]\n\n return [target, data]\n\n NN = MLP_NeuralNetwork(101, 75, 35, 10,\n iterations = 200,\n learning_rate = 0.5,\n momentum = 0.05,\n rate_decay = 0.005)\n\n train = load_data()\n\n NN.train(train)\n #NN.test_cross(test)\n #NN.test()\n NN.test_against()", "def gen_train_val_test_images(data_dir, seed=131):\n np.random.seed(seed)\n\n # Load SVHN Dataset (single digits)\n train_data = scipy_io.loadmat(data_dir + '/train_32x32.mat')\n test_data = scipy_io.loadmat(data_dir + '/test_32x32.mat')\n extra_data = scipy_io.loadmat(data_dir + '/extra_32x32.mat')\n\n train_X, train_y = train_data['X'], train_data['y']\n test_X, test_y = test_data['X'], test_data['y']\n extra_X, extra_y = extra_data['X'], extra_data['y']\n\n train_y = train_y.squeeze()\n test_y = test_y.squeeze()\n extra_y = extra_y.squeeze()\n\n # Change labels for '0' digit from 10 to 0\n train_y[train_y == 10] = 0\n test_y[test_y == 10] = 0\n extra_y[extra_y == 10] = 0\n\n del extra_data\n\n num_classes = 10\n\n train_val_sample_idxs = np.array([], int)\n for i in range(num_classes):\n class_idxs = np.arange(len(train_y))[train_y == i]\n sel_class_idxs = np.random.choice(class_idxs, size=400)\n train_val_sample_idxs = np.concatenate((train_val_sample_idxs,\n sel_class_idxs))\n not_train_val_sample_idxs = np.setdiff1d(np.arange(len(train_y)),\n train_val_sample_idxs)\n\n val_X = train_X[:, :, :, train_val_sample_idxs]\n val_y = train_y[train_val_sample_idxs]\n\n extra_val_sample_idxs = np.array([], int)\n for i in range(num_classes):\n class_idxs = np.arange(len(extra_y))[extra_y == i]\n sel_class_idxs = np.random.choice(class_idxs, size=200)\n extra_val_sample_idxs = np.concatenate((extra_val_sample_idxs,\n sel_class_idxs))\n not_extra_val_sample_idxs = np.setdiff1d(np.arange(len(extra_y)),\n extra_val_sample_idxs)\n\n val_X = np.concatenate((val_X, extra_X[:, :, :, extra_val_sample_idxs]), axis=3)\n val_y = np.hstack((val_y, extra_y[extra_val_sample_idxs]))\n\n train_X = np.concatenate((train_X[:, :, :, not_train_val_sample_idxs],\n extra_X[:, :, :, not_extra_val_sample_idxs]), axis=3)\n train_y = np.hstack((train_y[not_train_val_sample_idxs],\n extra_y[not_extra_val_sample_idxs]))\n\n # Create directories and save images\n train_dir = data_dir + '/imgs/train'\n test_dir = data_dir + '/imgs/test'\n validation_dir = data_dir + '/imgs/validation'\n\n if not os.path.exists(train_dir):\n os.makedirs(train_dir)\n\n if not os.path.exists(validation_dir):\n os.makedirs(validation_dir)\n\n if not os.path.exists(test_dir):\n os.makedirs(test_dir)\n\n for i in range(num_classes):\n if not os.path.exists(train_dir + '/' + str(i)):\n os.makedirs(train_dir + '/' + str(i))\n\n if not os.path.exists(validation_dir + '/' + str(i)):\n os.makedirs(validation_dir + '/' + str(i))\n\n if not os.path.exists(test_dir + '/' + str(i)):\n os.makedirs(test_dir + '/' + str(i))\n\n print \"Creating train images ... \"\n for i in range(len(train_y)):\n filename = train_dir + '/' + str(train_y[i]) + '/' + str(i) + '.png'\n skimage_io.imsave(filename, train_X[:, :, :, i])\n\n print \"Creating validation images ... \"\n for i in range(len(val_y)):\n filename = validation_dir + '/' + str(val_y[i]) + '/' + str(i) + '.png'\n skimage_io.imsave(filename, val_X[:, :, :, i])\n\n print \"Creating test images ... \"\n for i in range(len(test_y)):\n filename = test_dir + '/' + str(test_y[i]) + '/' + str(i) + '.png'\n skimage_io.imsave(filename, test_X[:, :, :, i])", "def test_on_all(self) -> None:\n x_test, y_test = self.mnist.test.images, self.mnist.test.labels\n N = self.mnist.test.num_examples\n\n # I have replaced all -1 with self.mb_size to be sure about exact shapes of all layers.\n assert N % self.mb_size == 0,\\\n \"Sorry, mb_size must divide the number of images in test set\"\n\n results = np.array([0., 0.])\n for batch_no in range(N // self.mb_size):\n beg = batch_no * self.mb_size\n end = min(N, (batch_no + 1) * self.mb_size)\n len_batch = end - beg\n batch_results = np.array(self.test_on_batch(x_test[beg:end], y_test[beg:end]))\n results += batch_results * len_batch\n results /= N\n self.logger.info(\"(Test(final): Loss: {0[0]}, accuracy: {0[1]}\".format(results))", "def fetch_multimnist_image(label):\n dataset = MultiMNIST('./data', train=False, download=True,\n transform=transforms.ToTensor(),\n target_transform=charlist_tensor)\n images = dataset.test_data\n labels = dataset.test_labels\n n_rows = len(images)\n\n images = []\n for i in xrange(n_rows):\n image = images[i]\n text = labels[i]\n if tensor_to_string(text.squeeze(0)) == label:\n images.append(image)\n\n if len(images) == 0:\n sys.exit('No images with label (%s) found.' % label)\n\n images = torch.cat(images).cpu().numpy()\n ix = np.random.choice(np.arange(images.shape[0]))\n image = images[ix]\n image = torch.from_numpy(image).float() \n image = image.unsqueeze(0)\n return Variable(image, volatile=True)", "def _load_mnist(path, dataset=\"training\", digits=None, asbytes=False,\n selection=None, return_labels=True, return_indices=False):\n\n # The files are assumed to have these names and should be found in 'path'\n files = {\n 'training': ('train-images-idx3-ubyte', 'train-labels-idx1-ubyte'),\n 'testing': ('t10k-images-idx3-ubyte', 't10k-labels-idx1-ubyte'),\n }\n\n try:\n images_fname = os.path.join(path, files[dataset][0])\n labels_fname = os.path.join(path, files[dataset][1])\n except KeyError:\n raise ValueError(\"Data set must be 'testing' or 'training'\")\n\n # We can skip the labels file only if digits aren't specified and labels\n # aren't asked for\n if return_labels or digits is not None:\n flbl = open(labels_fname, 'rb')\n magic_nr, size = struct.unpack(\">II\", flbl.read(8))\n labels_raw = pyarray(\"b\", flbl.read())\n flbl.close()\n\n fimg = open(images_fname, 'rb')\n magic_nr, size, rows, cols = struct.unpack(\">IIII\", fimg.read(16))\n images_raw = pyarray(\"B\", fimg.read())\n fimg.close()\n\n if digits:\n indices = [k for k in range(size) if labels_raw[k] in digits]\n else:\n indices = range(size)\n\n if selection:\n indices = indices[selection]\n\n images = np.zeros((len(indices), rows, cols), dtype=np.uint8)\n\n if return_labels:\n labels = np.zeros((len(indices)), dtype=np.int8)\n for i in range(len(indices)):\n images[i] = np.array(images_raw[indices[i] * rows * cols:(indices[i] + 1) * rows * cols]).reshape((rows, cols))\n if return_labels:\n labels[i] = labels_raw[indices[i]]\n\n if not asbytes:\n images = images.astype(float)/255.0\n\n ret = (images,)\n if return_labels:\n ret += (labels,)\n if return_indices:\n ret += (indices,)\n\n if len(ret) == 1:\n return ret[0] # Don't return a tuple of one\n\n return ret", "def fetch_binarized_mnist():\n mnist = fetch_mnist()\n random_state = np.random.RandomState(1999)\n\n def get_sampled(arr):\n # make sure that a pixel can always be turned off\n return random_state.binomial(1, arr * 255 / 256., size=arr.shape)\n\n data = get_sampled(mnist[\"data\"]).astype(theano.config.floatX)\n return {\"data\": data,\n \"target\": mnist[\"target\"],\n \"train_indices\": mnist[\"train_indices\"],\n \"valid_indices\": mnist[\"valid_indices\"],\n \"test_indices\": mnist[\"test_indices\"]}", "def get_data():\n\t(X_train, y_train), (X_val, y_val) = mnist.load_data()\n\tn_features = X_train.shape[1]*X_train.shape[1]\n\tn_train = X_train.shape[0]\n\tn_val = X_val.shape[0]\n\tX_train = X_train.reshape(n_train, n_features)\n\tX_val = X_val.reshape(n_val, n_features)\n\tX_train = X_train.astype(\"float32\") / 255\n\tX_val = X_val.astype(\"float32\") / 255\n\n\treturn (X_train, y_train), (X_val, y_val)", "def try4():\n path = '/Users/mayankkejriwal/git-projects/bioExperiments/tsne_python/'\n mnist = path+'mnist2500_X.txt'\n X = numpy.loadtxt(mnist)\n labels = numpy.loadtxt(path+\"mnist2500_labels.txt\")\n Y = tsne.tsne(X, 2, 50, 20.0)\n pylab.scatter(Y[:,0], Y[:,1], 20, labels)\n pylab.show()", "def test_mlp(learning_rate=0.01, L1_reg=0.00, L2_reg=0.0001, n_epochs=1000,\r\n dataset='mnist.pkl.gz', batch_size=20, n_hidden=500):\r\n datasets = load_data(dataset)\r\n\r\n train_set_x, train_set_y = datasets[0]\r\n valid_set_x, valid_set_y = datasets[1]\r\n test_set_x, test_set_y = datasets[2]\r\n\r\n # compute number of minibatches for training, validation and testing\r\n n_train_batches = train_set_x.get_value(borrow=True).shape[0] / batch_size\r\n n_valid_batches = valid_set_x.get_value(borrow=True).shape[0] / batch_size\r\n n_test_batches = test_set_x.get_value(borrow=True).shape[0] / batch_size\r\n\r\n ######################\r\n # BUILD ACTUAL MODEL #\r\n ######################\r\n print '... building the model'\r\n\r\n # allocate symbolic variables for the data\r\n index = T.lscalar() # index to a [mini]batch\r\n x = T.matrix('x') # the data is presented as rasterized images\r\n y = T.ivector('y') # the labels are presented as 1D vector of\r\n # [int] labels\r\n\r\n rng = numpy.random.RandomState(1234)\r\n\r\n # construct the MLP class\r\n classifier = MLP(rng=rng, input=x, n_in=28 * 28,\r\n n_hidden=n_hidden, n_out=10)\r\n\r\n # the cost we minimize during training is the negative log likelihood of\r\n # the model plus the regularization terms (L1 and L2); cost is expressed\r\n # here symbolically\r\n cost = classifier.negative_log_likelihood(y) \\\r\n + L1_reg * classifier.L1 \\\r\n + L2_reg * classifier.L2_sqr\r\n\r\n # compiling a Theano function that computes the mistakes that are made\r\n # by the model on a minibatch\r\n test_model = theano.function(inputs=[index],\r\n outputs=classifier.errors(y),\r\n givens={\r\n x: test_set_x[index * batch_size:(index + 1) * batch_size],\r\n y: test_set_y[index * batch_size:(index + 1) * batch_size]})\r\n\r\n validate_model = theano.function(inputs=[index],\r\n outputs=classifier.errors(y),\r\n givens={\r\n x: valid_set_x[index * batch_size:(index + 1) * batch_size],\r\n y: valid_set_y[index * batch_size:(index + 1) * batch_size]})\r\n\r\n # compute the gradient of cost with respect to theta (sotred in params)\r\n # the resulting gradients will be stored in a list gparams\r\n gparams = []\r\n for param in classifier.params:\r\n gparam = T.grad(cost, param)\r\n gparams.append(gparam)\r\n\r\n # specify how to update the parameters of the model as a list of\r\n # (variable, update expression) pairs\r\n updates = []\r\n # given two list the zip A = [a1, a2, a3, a4] and B = [b1, b2, b3, b4] of\r\n # same length, zip generates a list C of same size, where each element\r\n # is a pair formed from the two lists :\r\n # C = [(a1, b1), (a2, b2), (a3, b3), (a4, b4)]\r\n for param, gparam in zip(classifier.params, gparams):\r\n updates.append((param, param - learning_rate * gparam))\r\n\r\n # compiling a Theano function `train_model` that returns the cost, but\r\n # in the same time updates the parameter of the model based on the rules\r\n # defined in `updates`\r\n train_model = theano.function(inputs=[index], outputs=cost,\r\n updates=updates,\r\n givens={\r\n x: train_set_x[index * batch_size:(index + 1) * batch_size],\r\n y: train_set_y[index * batch_size:(index + 1) * batch_size]})\r\n\r\n ###############\r\n # TRAIN MODEL #\r\n ###############\r\n print '... training'\r\n\r\n # early-stopping parameters\r\n patience = 10000 # look as this many examples regardless\r\n patience_increase = 2 # wait this much longer when a new best is\r\n # found\r\n improvement_threshold = 0.995 # a relative improvement of this much is\r\n # considered significant\r\n validation_frequency = min(n_train_batches, patience / 2)\r\n # go through this many\r\n # minibatche before checking the network\r\n # on the validation set; in this case we\r\n # check every epoch\r\n\r\n best_params = None\r\n best_validation_loss = numpy.inf\r\n best_iter = 0\r\n test_score = 0.\r\n start_time = time.clock()\r\n\r\n epoch = 0\r\n done_looping = False\r\n\r\n while (epoch < n_epochs) and (not done_looping):\r\n epoch = epoch + 1\r\n for minibatch_index in xrange(n_train_batches):\r\n\r\n minibatch_avg_cost = train_model(minibatch_index)\r\n # iteration number\r\n iter = (epoch - 1) * n_train_batches + minibatch_index\r\n\r\n if (iter + 1) % validation_frequency == 0:\r\n # compute zero-one loss on validation set\r\n validation_losses = [validate_model(i) for i\r\n in xrange(n_valid_batches)]\r\n this_validation_loss = numpy.mean(validation_losses)\r\n\r\n print('epoch %i, minibatch %i/%i, validation error %f %%' %\r\n (epoch, minibatch_index + 1, n_train_batches,\r\n this_validation_loss * 100.))\r\n\r\n # if we got the best validation score until now\r\n if this_validation_loss < best_validation_loss:\r\n #improve patience if loss improvement is good enough\r\n if this_validation_loss < best_validation_loss * \\\r\n improvement_threshold:\r\n patience = max(patience, iter * patience_increase)\r\n\r\n best_validation_loss = this_validation_loss\r\n best_iter = iter\r\n\r\n # test it on the test set\r\n test_losses = [test_model(i) for i\r\n in xrange(n_test_batches)]\r\n test_score = numpy.mean(test_losses)\r\n\r\n print((' epoch %i, minibatch %i/%i, test error of '\r\n 'best model %f %%') %\r\n (epoch, minibatch_index + 1, n_train_batches,\r\n test_score * 100.))\r\n\r\n if patience <= iter:\r\n done_looping = True\r\n break\r\n\r\n end_time = time.clock()\r\n print(('Optimization complete. Best validation score of %f %% '\r\n 'obtained at iteration %i, with test performance %f %%') %\r\n (best_validation_loss * 100., best_iter + 1, test_score * 100.))\r\n print >> sys.stderr, ('The code for file ' +\r\n os.path.split(__file__)[1] +\r\n ' ran for %.2fm' % ((end_time - start_time) / 60.))", "def test_data():\n global _MEAN # pylint: disable=global-statement\n _np.random.seed(1)\n view = _skdc10.view.OfficialImageClassificationTask()\n permutation = _np.random.permutation(range(10000))\n if _MEAN is None:\n _MEAN = view.train.x.reshape((50000 * 32 * 32, 3)).mean(axis=0)\n return ((view.test.x[:10000, :][permutation, :] - _MEAN).\n transpose((0, 3, 1, 2)).astype('float32'),\n view.test.y[:10000][permutation].reshape((10000, 1)).astype('float32'))", "def load_data():\n prefix = 'mnist_data/'\n train_data = np.load(prefix + 'mnist_train_images.npy')\n train_labels = np.load(prefix + 'mnist_train_labels.npy')\n val_data = np.load(prefix + 'mnist_validation_images.npy')\n val_labels = np.load(prefix + 'mnist_validation_labels.npy')\n test_data = np.load(prefix + 'mnist_test_images.npy')\n test_labels = np.load(prefix + 'mnist_test_labels.npy')\n assert train_data.shape == (55000, 784) and train_labels.shape == (55000, 10)\n assert val_data.shape == (5000, 784) and val_labels.shape == (5000, 10)\n assert test_data.shape == (10000, 784) and test_labels.shape == (10000, 10)\n return train_data, train_labels, val_data, val_labels, test_data, test_labels", "def test_basic(self):\n result = NonLinearWeights(0.85).nonlinear_weights(3)\n self.assertIsInstance(result, np.ndarray)", "def test(ndigit, elambda, showSamples, showConfusion):\n Data, Label = getData()\n trainX, trainY, testX, testY = splitData(Data, Label, ndigit)\n trainX_mean = np.mean(trainX, axis=0)\n trainX_new = trainX - trainX_mean\n eigenvectors = getEigenVectors(trainX_new, elambda)\n trainX_eigen = trainX_new.dot(eigenvectors)\n testX_new = testX - trainX_mean\n testX_eigen = testX_new.dot(eigenvectors)\n testO = []\n if showSamples:\n correct_samples = []\n correct_samples_nearest = []\n correct_samples_eigen = []\n correct_samples_nearest_eigen = []\n correct_samples_labels = []\n correct_samples_predictions = []\n wrong_samples = []\n wrong_samples_nearest = []\n wrong_samples_eigen = []\n wrong_samples_nearest_eigen = []\n wrong_samples_labels = []\n wrong_samples_predictions = []\n if showConfusion:\n conf = np.zeros((ndigit, ndigit))\n for i in xrange(testX_eigen.shape[0]):\n t = testX_eigen[i]\n j = getNearestSampleIndex(t, trainX_eigen)\n p = int(trainY[j])\n y = int(testY[i])\n if showConfusion:\n conf[p, y] += 1\n if showSamples:\n if p == y:\n if len(correct_samples) < y + 1:\n correct_samples.append(testX[i])\n correct_samples_nearest.append(trainX[j])\n correct_samples_eigen.append(testX_eigen[i])\n correct_samples_nearest_eigen.append(trainX_eigen[j])\n correct_samples_labels.append(y)\n correct_samples_predictions.append(p)\n else:\n if len(wrong_samples) < y + 1:\n wrong_samples.append(testX[i])\n wrong_samples_nearest.append(trainX[j])\n wrong_samples_eigen.append(testX_eigen[i])\n wrong_samples_nearest_eigen.append(trainX_eigen[j])\n wrong_samples_labels.append(y)\n wrong_samples_predictions.append(p)\n testO.append(p)\n testO = np.array(testO)\n train0 = []\n for i in xrange(trainX_eigen.shape[0]):\n t = trainX_eigen[i]\n j = getNearestSampleIndex(t, trainX_eigen)\n min_class = trainY[j]\n train0.append(min_class)\n train0 = np.array(train0)\n print \"for digits = %d lambda = %.2f train = %.6f test = %.6f \" % (\n ndigit, elambda, (train0 == trainY).mean(), (testO == testY).mean())\n if showConfusion:\n print conf\n if showSamples:\n displaySamples(correct_samples_labels, correct_samples_predictions,\n correct_samples, correct_samples_nearest,\n correct_samples_eigen, correct_samples_nearest_eigen,\n eigenvectors, trainX_mean, elambda, 'Correct')\n displaySamples(wrong_samples_labels, wrong_samples_predictions,\n wrong_samples, wrong_samples_nearest,\n wrong_samples_eigen, wrong_samples_nearest_eigen,\n eigenvectors, trainX_mean, elambda, 'Wrong')", "def load_mnist(fashion, onehot_encode=True, flatten_x=False, crop_x=0, classes=None):\n\tif not fashion:\n\t\t(x_train, y_train),(x_test, y_test) = tf.keras.datasets.mnist.load_data()\n\t\tx_train, x_test = x_train / 255.0, x_test / 255.0\n\telse:\n\t\t(x_train, y_train),(x_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()\n\t\tx_train, x_test = x_train / 255.0, x_test / 255.0 \n \n\tdef crop(X, crop_size):\n\t\tassert crop_x < X.shape[1]/2\n\t\tassert crop_x < X.shape[2]/2\n\t\treturn X[:,crop_size:-crop_size,crop_size:-crop_size]\n\n\tif crop_x > 0:\n\t\tx_train = crop(x_train, crop_x)\n\t\tx_test = crop(x_test, crop_x)\n\n\t# Flatten to 2d arrays (each example 1d)\n\tdef flatten_image(X):\n\t return X.reshape(X.shape[0], X.shape[1]*X.shape[1])\n\tif flatten_x:\n\t\tx_train = flatten_image(x_train)\n\t\tx_test = flatten_image(x_test)\n\n\tif onehot_encode:\n\t\ty_train = onehot_encode_labels(y_train)\n\t\ty_test = onehot_encode_labels(y_test)\n\n\tif classes is not None:\n\t\tassert len(classes) == 2\n\t\tc0, c1 = classes\n\t\ttrain_idxs_to_keep = np.logical_or(y_train==c0, y_train==c1)\n\t\tx_train, y_train = x_train[train_idxs_to_keep,:], y_train[train_idxs_to_keep]\n\t\ttest_idxs_to_keep = np.logical_or(y_test==c0, y_test==c1)\n\t\tx_test, y_test = x_test[test_idxs_to_keep,:], y_test[test_idxs_to_keep]\n\n\t\ty_train = (y_train==c1).astype(int)[:,np.newaxis]\n\t\ty_test = (y_test==c1).astype(int)[:,np.newaxis]\n\n\treturn x_train, y_train, x_test, y_test", "def test_metric_learning(smote_class):\n nn_params = {'metric': 'precomputed',\n 'metric_learning_method': 'ITML'}\n X, y = smote_class(nn_params=nn_params).sample(dataset['data'],\n dataset['target'])\n\n assert np.unique(y).shape[0] == 2\n assert X.shape[0] > 0", "def load_EMNIST_data(file, verbose = False, standarized = False): \n mat = sio.loadmat(file)\n data = mat[\"dataset\"]\n \n X_train = data['train'][0,0]['images'][0,0]\n X_train = X_train.reshape((X_train.shape[0], 28, 28), order = \"F\")\n y_train = data['train'][0,0]['labels'][0,0]\n y_train = np.squeeze(y_train)\n y_train -= 1 #y_train is zero-based\n \n X_test = data['test'][0,0]['images'][0,0]\n X_test= X_test.reshape((X_test.shape[0], 28, 28), order = \"F\")\n y_test = data['test'][0,0]['labels'][0,0]\n y_test = np.squeeze(y_test)\n y_test -= 1 #y_test is zero-based\n \n if standarized: \n X_train = X_train/255\n X_test = X_test/255\n mean_image = np.mean(X_train, axis=0)\n X_train -= mean_image\n X_test -= mean_image\n \n\n if verbose == True: \n print(\"EMNIST-letter dataset ... \")\n print(\"X_train shape :\", X_train.shape)\n print(\"X_test shape :\", X_test.shape)\n print(\"y_train shape :\", y_train.shape)\n print(\"y_test shape :\", y_test.shape)\n \n return X_train, y_train, X_test, y_test", "def test_000_basic_functionality() -> None:\n df = generate_test_data()\n skim(df)" ]
[ "0.7192644", "0.7088527", "0.6992389", "0.6823488", "0.6820499", "0.65735173", "0.6531278", "0.6529289", "0.6502993", "0.63889843", "0.6371827", "0.6368664", "0.63274115", "0.6315902", "0.62861085", "0.62725544", "0.6220246", "0.6188648", "0.6144985", "0.61362416", "0.6119027", "0.6045903", "0.6035213", "0.6018793", "0.6013625", "0.6011223", "0.6001034", "0.5967618", "0.5955806", "0.5953214", "0.59335077", "0.5884085", "0.58810836", "0.5856891", "0.58554476", "0.58467555", "0.5846364", "0.58420515", "0.5834867", "0.5833051", "0.5830527", "0.58132356", "0.5802065", "0.57884526", "0.57714105", "0.5755312", "0.57360905", "0.57294244", "0.57265216", "0.57239056", "0.5703129", "0.5699589", "0.5678674", "0.5674968", "0.56662554", "0.56531256", "0.56452096", "0.5642889", "0.563169", "0.56229085", "0.56223816", "0.5610885", "0.55906284", "0.55791396", "0.5569076", "0.5564868", "0.55626106", "0.5559306", "0.555815", "0.5556102", "0.5549294", "0.55482215", "0.55392706", "0.55255824", "0.5516632", "0.55111265", "0.5501207", "0.5490707", "0.5485328", "0.5485075", "0.5480144", "0.5476179", "0.5468571", "0.5462391", "0.5462032", "0.54609066", "0.5451851", "0.54505485", "0.5434967", "0.54316974", "0.5430205", "0.5429624", "0.54293615", "0.54290414", "0.541637", "0.5412779", "0.5411554", "0.54080427", "0.5401129", "0.5398112" ]
0.6742171
5