prompt
large_stringlengths
70
991k
completion
large_stringlengths
0
1.02k
<|file_name|>db.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- from google.appengine.api import datastore, datastore_types, datastore_errors from google.appengine.datastore import datastore_query, datastore_rpc from google.appengine.api import memcache from google.appengine.api import search from server.config import conf import logging """ Tiny wrapper around *google.appengine.api.datastore*. This just ensures that operations issued directly through the database-api doesn't interfere with ViURs internal caching. If you need skeletons anyway, query the database using skel.all(); its faster and is able to serve more requests from cache. """ __cacheLockTime__ = 42 #Prevent an entity from creeping into the cache for 42 Secs if it just has been altered. __cacheTime__ = 15*60 #15 Mins __CacheKeyPrefix__ ="viur-db-cache:" #Our Memcache-Namespace. Dont use that for other purposes __MemCacheBatchSize__ = 30 __undefinedC__ = object() def PutAsync( entities, **kwargs ): """ Asynchronously store one or more entities in the data store. This function is identical to :func:`server.db.Put`, except that it returns an asynchronous object. Call ``get_result()`` on the return value to block on the call and get the results. """ if isinstance( entities, Entity ): entities._fixUnindexedProperties() elif isinstance( entities, list ): for entity in entities: assert isinstance( entity, Entity ) entity._fixUnindexedProperties()<|fim▁hole|> if entities.is_saved(): #Its an update memcache.delete( str( entities.key() ), namespace=__CacheKeyPrefix__, seconds=__cacheLockTime__ ) elif isinstance( entities, list ): for entity in entities: assert isinstance( entity, Entity ) if entity.is_saved(): #Its an update memcache.delete( str( entity.key() ), namespace=__CacheKeyPrefix__, seconds=__cacheLockTime__ ) return( datastore.PutAsync( entities, **kwargs ) ) def Put( entities, **kwargs ): """ Store one or more entities in the data store. The entities may be new or previously existing. For new entities, ``Put()`` will fill in the app id and key assigned by the data store. :param entities: Entity or list of entities to be stored. :type entities: :class:`server.db.Entity` | list of :class:`server.db.Entity` :param config: Optional configuration to use for this request. This must be specified\ as a keyword argument. :type config: dict :returns: If the argument ``entities`` is a single :class:`server.db.Entity`, \ a single Key is returned. If the argument is a list of :class:`server.db.Entity`, \ a list of Keys will be returned. :rtype: Key | list of keys :raises: :exc:`TransactionFailedError`, if the action could not be committed. """ if isinstance( entities, Entity ): entities._fixUnindexedProperties() elif isinstance( entities, list ): for entity in entities: assert isinstance( entity, Entity ) entity._fixUnindexedProperties() if conf["viur.db.caching" ]>0: if isinstance( entities, Entity ): #Just one: if entities.is_saved(): #Its an update memcache.delete( str( entities.key() ), namespace=__CacheKeyPrefix__, seconds=__cacheLockTime__ ) elif isinstance( entities, list ): for entity in entities: assert isinstance( entity, Entity ) if entity.is_saved(): #Its an update memcache.delete( str( entity.key() ), namespace=__CacheKeyPrefix__, seconds=__cacheLockTime__ ) return( datastore.Put( entities, **kwargs ) ) def GetAsync( keys, **kwargs ): """ Asynchronously retrieves one or more entities from the data store. This function is identical to :func:`server.db.Get`, except that it returns an asynchronous object. Call ``get_result()`` on the return value to block on the call and get the results. """ class AsyncResultWrapper: """ Wraps an result thats allready there into something looking like an RPC-Object. """ def __init__( self, res ): self.res = res def get_result( self ): return( self.res ) if conf["viur.db.caching" ]>0 and not datastore.IsInTransaction(): if isinstance( keys, datastore_types.Key ) or isinstance( keys, basestring ): #Just one: res = memcache.get( str(keys), namespace=__CacheKeyPrefix__ ) if res: return( AsyncResultWrapper( res ) ) #Either the result wasnt found, or we got a list of keys to fetch; # --> no caching possible return( datastore.GetAsync( keys, **kwargs ) ) def Get( keys, **kwargs ): """ Retrieve one or more entities from the data store. Retrieves the entity or entities with the given key(s) from the data store and returns them as fully populated :class:`server.db.Entity` objects. If there is an error, the function raises a subclass of :exc:`datastore_errors.Error`. If keys is a single key or str, an Entity will be returned, or :exc:`EntityNotFoundError` will be raised if no existing entity matches the key. However, if keys is a list or tuple, a list of entities will be returned that corresponds to the sequence of keys. It will include entities for keys that were found and None placeholders for keys that were not found. :param keys: Key, str or list of keys or strings to be retrieved. :type keys: Key | str | list of Key | list of str :param config: Optional configuration to use for this request. This must be specified\ as a keyword argument. :type config: dict :returns: Entity or list of Entity objects corresponding to the specified key(s). :rtype: :class:`server.db.Entity` | list of :class:`server.db.Entity` """ if conf["viur.db.caching" ]>0 and not datastore.IsInTransaction(): if isinstance( keys, datastore_types.Key ) or isinstance( keys, basestring ): #Just one: res = memcache.get( str(keys), namespace=__CacheKeyPrefix__ ) if not res: #Not cached - fetch and cache it :) res = Entity.FromDatastoreEntity( datastore.Get( keys, **kwargs ) ) res[ "key" ] = str( res.key() ) memcache.set( str(res.key() ), res, time=__cacheTime__, namespace=__CacheKeyPrefix__ ) return( res ) #Either the result wasnt found, or we got a list of keys to fetch; elif isinstance( keys,list ): #Check Memcache first cacheRes = {} tmpRes = [] keyList = [ str(x) for x in keys ] while keyList: #Fetch in Batches of 30 entries, as the max size for bulk_get is limited to 32MB currentBatch = keyList[:__MemCacheBatchSize__] keyList = keyList[__MemCacheBatchSize__:] cacheRes.update( memcache.get_multi( currentBatch, namespace=__CacheKeyPrefix__) ) #Fetch the rest from DB missigKeys = [ x for x in keys if not str(x) in cacheRes ] dbRes = [ Entity.FromDatastoreEntity(x) for x in datastore.Get( missigKeys ) if x is not None ] # Cache what we had fetched saveIdx = 0 while len(dbRes)>saveIdx*__MemCacheBatchSize__: cacheMap = {str(obj.key()): obj for obj in dbRes[saveIdx*__MemCacheBatchSize__:(saveIdx+1)*__MemCacheBatchSize__]} try: memcache.set_multi( cacheMap, time=__cacheTime__ , namespace=__CacheKeyPrefix__ ) except: pass saveIdx += 1 for key in [ str(x) for x in keys ]: if key in cacheRes: tmpRes.append( cacheRes[ key ] ) else: for e in dbRes: if str( e.key() ) == key: tmpRes.append ( e ) break if conf["viur.debug.traceQueries"]: logging.debug( "Fetched a result-set from Datastore: %s total, %s from cache, %s from datastore" % (len(tmpRes),len( cacheRes.keys()), len( dbRes ) ) ) return( tmpRes ) if isinstance( keys, list ): return( [ Entity.FromDatastoreEntity(x) for x in datastore.Get( keys, **kwargs ) ] ) else: return( Entity.FromDatastoreEntity( datastore.Get( keys, **kwargs ) ) ) def GetOrInsert( key, kindName=None, parent=None, **kwargs ): """ Either creates a new entity with the given key, or returns the existing one. Its guaranteed that there is no race-condition here; it will never overwrite an previously created entity. Extra keyword arguments passed to this function will be used to populate the entity if it has to be created; otherwise they are ignored. :param key: The key which will be fetched or created. \ If key is a string, it will be used as the name for the new entity, therefore the \ collectionName is required in this case. :type key: server.db.Key | str :param kindName: The data kind to use for that entity. Ignored if key is a db.Key. :type kindName: str :param parent: The parent entity of the entity. :type parent: db.Key or None :returns: Returns the wanted Entity. :rtype: server.db.Entity """ def txn( key, kwargs ): try: res = Entity.FromDatastoreEntity(datastore.Get( key )) except datastore_errors.EntityNotFoundError: res = Entity( kind=key.kind(), parent=key.parent(), name=key.name(), id=key.id() ) for k, v in kwargs.items(): res[ k ] = v datastore.Put( res ) return( res ) if not isinstance( key, datastore_types.Key ): try: key = datastore_types.Key( encoded=key ) except: assert kindName key = datastore_types.Key.from_path( kindName, key, parent=parent ) if datastore.IsInTransaction(): return txn(key, kwargs) return datastore.RunInTransaction( txn, key, kwargs ) def DeleteAsync(keys, **kwargs): """ Asynchronously deletes one or more entities from the data store. This function is identical to :func:`server.db.Delete`, except that it returns an asynchronous object. Call ``get_result()`` on the return value to block on the call and get the results. """ if conf["viur.db.caching" ]>0: if isinstance( keys, datastore_types.Key ): #Just one: memcache.delete( str( keys ), namespace=__CacheKeyPrefix__, seconds=__cacheLockTime__ ) elif isinstance( keys, list ): for key in keys: assert isinstance( key, datastore_types.Key ) or isinstance( key, basestring ) memcache.delete( str( key ), namespace=__CacheKeyPrefix__, seconds=__cacheLockTime__ ) return( datastore.DeleteAsync( keys, **kwargs ) ) def Delete(keys, **kwargs): """ Deletes one or more entities from the data store. :warning: Permanently deletes entities, use with care! Deletes the given entity or entities from the data store. You can only delete entities from your app. If there is an error, the function raises a subclass of :exc:`datastore_errors.Error`. :param keys: Key, str or list of keys or strings to be deleted. :type keys: Key | str | list of Key | list of str :param config: Optional configuration to use for this request. This must be specified\ as a keyword argument. :type config: dict :raises: :exc:`TransactionFailedError`, if the deletion could not be committed. """ if conf["viur.db.caching" ]>0: if isinstance( keys, datastore_types.Key ) or isinstance( keys, basestring ): #Just one: memcache.delete( str( keys ), namespace=__CacheKeyPrefix__, seconds=__cacheLockTime__ ) elif isinstance( keys, list ): for key in keys: assert isinstance( key, datastore_types.Key ) or isinstance( key, basestring ) memcache.delete( str( key ), namespace=__CacheKeyPrefix__, seconds=__cacheLockTime__ ) return( datastore.Delete( keys, **kwargs ) ) class Query( object ): """ Thin wrapper around datastore.Query to provide a consistent (camelCase) API. """ def __init__(self, kind, srcSkelClass=None, *args, **kwargs ): super( Query, self ).__init__( ) self.datastoreQuery = datastore.Query( kind, *args, **kwargs ) self.srcSkel = srcSkelClass self.amount = 30 self._filterHook = None self._orderHook = None self._origCursor = None self._customMultiQueryMerge = None # Sometimes, the default merge functionality from MultiQuery is not sufficient self._calculateInternalMultiQueryAmount = None # Some (Multi-)Queries need a different amount of results per subQuery than actually returned self.customQueryInfo = {} # Allow carrying custom data along with the query. Currently only used by spartialBone to record the guranteed correctnes self.origKind = kind def setFilterHook(self, hook): """ Installs *hook* as a callback function for new filters. *hook* will be called each time a new filter constrain is added to the query. This allows e. g. the relationalBone to rewrite constrains added after the initial processing of the query has been done (e. g. by ``listFilter()`` methods). :param hook: The function to register as callback. \ A value of None removes the currently active hook. :type hook: callable :returns: The previously registered hook (if any), or None. """ old = self._filterHook self._filterHook = hook return( old ) def setOrderHook(self, hook): """ Installs *hook* as a callback function for new orderings. *hook* will be called each time a :func:`db.Query.order` is called on this query. :param hook: The function to register as callback. \ A value of None removes the currently active hook. :type hook: callable :returns: The previously registered hook (if any), or None. """ old = self._orderHook self._orderHook = hook return( old ) def mergeExternalFilter(self, filters ): """ Safely merges filters according to the data model. Its only valid to call this function if the query has been created using :func:`server.skeleton.Skeleton.all`. Its safe to pass filters received from an external source (a user); unknown/invalid filters will be ignored, so the query-object is kept in a valid state even when processing malformed data. If complex queries are needed (e.g. filter by relations), this function shall also be used. See also :func:`server.db.Query.filter` for simple filters. :param filters: A dictionary of attributes and filter pairs. :type filters: dict :returns: Returns the query itself for chaining. :rtype: server.db.Query """ from server.bones import baseBone, relationalBone if "id" in filters: self.datastoreQuery = None logging.error("Filtering by id is no longer supported. Use key instead.") return self if self.srcSkel is None: raise NotImplementedError("This query has not been created using skel.all()") if self.datastoreQuery is None: #This query is allready unsatifiable and adding more constrains to this wont change this return( self ) skel = self.srcSkel if skel.searchIndex and "search" in filters: #We perform a Search via Google API - all other parameters are ignored try: searchRes = search.Index( name=skel.searchIndex ).search( query=search.Query( query_string=filters["search"], options=search.QueryOptions( limit=25 ) ) ) except search.QueryError: #We cant parse the query, treat it as verbatim qstr = u"\"%s\"" % filters["search"].replace(u"\"",u"") try: searchRes = search.Index(name=skel.searchIndex).search(query=search.Query(query_string=qstr, options=search.QueryOptions(limit=25))) except search.QueryError: # Still cant parse it searchRes = [] tmpRes = [ datastore_types.Key( encoded=x.doc_id[ 2: ] ) for x in searchRes ] if tmpRes: filters = [] for x in tmpRes: filters.append( datastore.Query( self.getKind(), { "%s =" % datastore_types.KEY_SPECIAL_PROPERTY: x } ) ) self.datastoreQuery = datastore.MultiQuery( filters, () ) else: self.datastoreQuery = None return( self ) #bones = [ (getattr( skel, key ), key) for key in dir( skel ) if not "__" in key and isinstance( getattr( skel, key ) , baseBone ) ] bones = [ (y,x) for x,y in skel.items() ] try: #First, filter non-relational bones for bone, key in [ x for x in bones if not isinstance( x[0], relationalBone ) ]: bone.buildDBFilter( key, skel, self, filters ) #Second, process orderings of non-relational bones for bone, key in [ x for x in bones if not isinstance( x[0], relationalBone ) ]: bone.buildDBSort( key, skel, self, filters ) #Now filter relational bones for bone, key in [ x for x in bones if isinstance( x[0], relationalBone ) ]: bone.buildDBFilter( key, skel, self, filters ) #finally process orderings of relational bones for bone, key in [ x for x in bones if isinstance( x[0], relationalBone ) ]: bone.buildDBSort( key, skel, self, filters ) except RuntimeError as e: logging.exception(e) self.datastoreQuery = None return( self ) if "search" in filters and filters["search"]: if isinstance( filters["search"], list ): taglist = [ "".join([y for y in unicode(x).lower() if y in conf["viur.searchValidChars"] ] ) for x in filters["search"] ] else: taglist = [ "".join([y for y in unicode(x).lower() if y in conf["viur.searchValidChars"] ]) for x in unicode(filters["search"]).split(" ")] assert not isinstance( self.datastoreQuery, datastore.MultiQuery ), "Searching using viur-tags is not possible on a query that already uses an IN-filter!" origFilter = self.datastoreQuery queries = [] for tag in taglist[:30]: #Limit to max 30 keywords q = datastore.Query( kind=origFilter.__kind ) q[ "viur_tags" ] = tag queries.append( q ) self.datastoreQuery = datastore.MultiQuery( queries, origFilter.__orderings ) for k, v in origFilter.items(): self.datastoreQuery[ k ] = v if "cursor" in filters and filters["cursor"] and filters["cursor"].lower()!="none": self.cursor( filters["cursor"] ) if "amount" in filters and str(filters["amount"]).isdigit() and int( filters["amount"] ) >0 and int( filters["amount"] ) <= 100: self.limit( int(filters["amount"]) ) if "postProcessSearchFilter" in dir( skel ): skel.postProcessSearchFilter( self, filters ) return( self ) def filter(self, filter, value=__undefinedC__ ): """ Adds a filter to this query. #fixme: Better description required here... The following examples are equivalent: ``filter( "name", "John" )`` and ``filter( {"name": "John"} )``. See also :func:`server.db.Query.mergeExternalFilter` for a safer filter implementation. :param filter: A dictionary to read the filters from, or a string (name of that filter) :type filter: dict | str :param value: The value of that filter. Only valid, if *key* is a string. :type: value: int | long | float | bytes | string | list | datetime :returns: Returns the query itself for chaining. :rtype: server.db.Query """ if self.datastoreQuery is None: #This query is already unsatisfiable and adding more constrains to this won't change this return( self ) if isinstance( filter, dict ): for k, v in filter.items(): self.filter( k, v ) return( self ) if self._filterHook is not None: try: r = self._filterHook( self, filter, value ) except RuntimeError: self.datastoreQuery = None return( self ) if r is None: # The Hook did something special directly on 'self' to apply that filter, # no need for us to do anything return( self ) filter, value = r # Cast keys into string if filter != datastore_types.KEY_SPECIAL_PROPERTY and isinstance(value, datastore_types.Key): value = str(value) if value!=None and (filter.endswith(" !=") or filter.lower().endswith(" in")): if isinstance( self.datastoreQuery, datastore.MultiQuery ): raise NotImplementedError("You cannot use multiple IN or != filter") origQuery = self.datastoreQuery queries = [] if filter.endswith("!="): q = datastore.Query( kind=self.getKind() ) q[ "%s <" % filter.split(" ")[0] ] = value queries.append( q ) q = datastore.Query( kind=self.getKind() ) q[ "%s >" % filter.split(" ")[0] ] = value queries.append( q ) else: #IN filter if not (isinstance( value, list ) or isinstance( value, tuple ) ): raise NotImplementedError("Value must be list or tuple if using IN filter!") for val in value: q = datastore.Query( kind=self.getKind() ) q[ "%s =" % filter.split(" ")[0] ] = val q.Order( *origQuery.__orderings ) queries.append( q ) self.datastoreQuery = MultiQuery( queries, origQuery.__orderings ) for k,v in origQuery.items(): self.datastoreQuery[ k ] = v elif filter and value is not __undefinedC__: self.datastoreQuery[ filter ] = value else: raise NotImplementedError("Incorrect call to query.filter()!") return( self ) def order(self, *orderings): """ Specify a query sorting. Resulting entities will be sorted by the first property argument, then by the second, and so on. The following example .. code-block:: python query = Query( "Person" ) query.order( "bday", ( "age", Query.DESCENDING ) ) sorts every Person in order of their birthday, starting with January 1. People with the same birthday are sorted by age, oldest to youngest. The direction for each sort property may be provided; if omitted, it defaults to ascending. ``order()`` may be called multiple times. Each call resets the sort order from scratch. If an inequality filter exists in this Query it must be the first property passed to ``order()``. Any number of sort orders may be used after the inequality filter property. Without inequality filters, any number of filters with different orders may be specified. Entities with multiple values for an order property are sorted by their lowest value. Note that a sort order implies an existence filter! In other words, Entities without the sort order property are filtered out, and *not* included in the query results. If the sort order property has different types in different entities - e.g. if bob['id'] is an int and fred['id'] is a string - the entities will be grouped first by the property type, then sorted within type. No attempt is made to compare property values across types. Raises BadArgumentError if any argument is of the wrong format. :param orderings: The properties to sort by, in sort order.\ Each argument may be either a string or (string, direction) 2-tuple. :param orderings: str | tuple :returns: Returns the query itself for chaining. :rtype: server.db.Query """ for reqOrder in orderings: if isinstance(reqOrder, str): fieldName = reqOrder elif isinstance(reqOrder, tuple): fieldName = reqOrder[0] else: raise BadArgumentError("Dont know what to do with %s" % type(fieldName),) if self._orderHook is not None: try: orderings = self._orderHook( self, orderings ) except RuntimeError: self.datastoreQuery = None return( self ) if orderings is None: return( self ) if self.datastoreQuery is None: return self.datastoreQuery.Order( *orderings ) return( self ) def ancestor(self, ancestor): """ Sets an ancestor for this query. This restricts the query to only return result entities that are descended from a given entity. In other words, all of the results will have the ancestor as their parent, or parent's parent, and so on. Raises BadArgumentError or BadKeyError if parent is not an existing Entity or Key in the data store. :param ancestor: Entity or Key. The key must be complete. :type ancestor: server.db.Entity | Key :returns: Returns the query itself for chaining. :rtype: server.db.Query """ self.datastoreQuery.Ancestor( ancestor ) return( self ) def cursor( self, cursor, endCursor=None ): """ Sets the start cursor for this query. The result set will only include results behind that cursor. The cursor is generated by an earlier query with exactly the same configuration. Its safe to use client-supplied cursors, a cursor can't be abused to access entities which don't match the current filters. :param cursor: The cursor key to set to the Query. :type cursor: str | datastore_query.Cursor :returns: Returns the query itself for chaining. :rtype: server.db.Query """ if isinstance( cursor, basestring ): cursor = datastore_query.Cursor( urlsafe=cursor ) elif isinstance( cursor, datastore_query.Cursor ) or cursor==None: pass else: raise ValueError("Cursor must be String, datastore_query.Cursor or None") if endCursor is not None: if isinstance( endCursor, basestring ): endCursor = datastore_query.Cursor( urlsafe=endCursor ) elif isinstance( cursor, datastore_query.Cursor ) or endCursor==None: pass else: raise ValueError("endCursor must be String, datastore_query.Cursor or None") qo = self.datastoreQuery.__query_options self.datastoreQuery.__query_options = datastore_query.QueryOptions( keys_only=qo.keys_only, produce_cursors=qo.produce_cursors, start_cursor=cursor, end_cursor=endCursor or qo.end_cursor, projection=qo.projection ) self._origCursor = cursor return( self ) def limit( self, amount ): """ Sets the query limit to *amount* entities in the result. Specifying an amount of 0 disables the limit (use with care!). :param amount: The maximum number of entities. :type amount: int :returns: Returns the query itself for chaining. :rtype: server.db.Query """ self.amount = amount return self def isKeysOnly(self): """ Returns True if this query is configured as *keys only*, False otherwise. :rtype: bool """ return( self.datastoreQuery.IsKeysOnly() ) def getQueryOptions(self): """ Returns a datastore_query.QueryOptions for the current instance. :rtype: datastore_query.QueryOptions """ return( self.datastoreQuery.GetQueryOptions() ) def getQuery(self): """ Returns a datastore_query.Query for the current instance. :rtype: datastore_query.Query """ return( self.datastoreQuery.GetQuery() ) def getOrder(self): """ Gets a datastore_query.Order for the current instance. :returns: The sort orders set on the current query, or None. :rtype: datastore_query.Order or None """ if self.datastoreQuery is None: return( None ) return( self.datastoreQuery.GetOrder() ) def getFilter(self): """ Returns the filters applied to the current query as dictionary. :returns: Filter as dictionary. :rtype: dict """ if self.datastoreQuery is None: return( None ) elif isinstance(self.datastoreQuery, MultiQuery): res = [] for qry in getattr(self.datastoreQuery,"_MultiQuery__bound_queries"): res.append( { k:v for (k, v) in qry.items() } ) return res return( { k:v for (k, v) in self.datastoreQuery.items() } ) def getOrders(self): """ Returns a list of orders applied to this query. Every element in the list returned (if any), is a tuple of (property,direction). Property is the name of the property used to sort, direction a bool (false => ascending, True => descending). :returns: list of orderings, in tuples (property,direction). :rtype: list """ try: order = self.datastoreQuery.__orderings return( [ (prop, dir) for (prop, dir) in order ] ) except: return( [] ) def getCursor(self): """ Get a valid cursor from the last run of this query. The source of this cursor varies depending on what the last call was: - :func:`server.db.Query.run`: A cursor that points immediatelly behind the\ last result pulled off the returned iterator. - :func:`server.db.Query.get`:: A cursor that points immediatelly behind the\ last result in the returned list. - :func:`server.db.Query.count`: A cursor that points immediatelly behind the\ last result counted. :returns: A cursor that can be used in subsequent query requests. :rtype: datastore_query.Cursor :raises: :exc:`AssertionError` if the query has not yet been run or cannot be compiled. """ if self.datastoreQuery is None: return( None ) return( self.datastoreQuery.GetCursor() ) def getKind(self): """ Returns the kind of this query. :rtype: str """ if self.datastoreQuery is None: return( None ) return( self.datastoreQuery.__kind ) def setKind( self, newKind ): """ Sets the kind of this query. :param newKind: New query kind. :type newKind: str """ if self.datastoreQuery is None: return self.datastoreQuery.__kind = newKind def getAncestor(self): """ Returns the ancestor of this query (if any). :rtype: str | None """ return( self.datastoreQuery.ancestor ) def run(self, limit=-1, keysOnly=False, **kwargs): """ Run this query. It is more efficient to use *limit* if the number of results is known. If queried data is wanted as instances of Skeletons, :func:`server.db.Query.fetch` should be used. :param limit: Limits the query to the defined maximum entities. :type limit: int :param keysOnly: If the query should be used to retrieve entity keys only. :type keysOnly: bool :param kwargs: Any keyword arguments accepted by datastore_query.QueryOptions(). :returns: An iterator that provides access to the query results iterator :rtype: list :raises: :exc:`BadFilterError` if a filter string is invalid :raises: :exc:`BadValueError` if a filter value is invalid. :raises: :exc:`BadQueryError` if an IN filter in combination with a sort order on\ another property is provided """ if self.datastoreQuery is None: return( None ) origLimit = limit if limit!=-1 else self.amount kwargs["limit"] = origLimit if not isinstance( self.datastoreQuery, datastore.MultiQuery ): internalKeysOnly = True else: internalKeysOnly = False if conf["viur.db.caching" ]<2: # Query-Caching is disabled, make this query keys-only if (and only if) explicitly requested for this query internalKeysOnly = keysOnly if self._customMultiQueryMerge: # We do a really dirty trick here: Running the queries in our MultiQuery by hand, as # we don't want the default sort&merge functionality from :class:`google.appengine.api.datastore.MultiQuery` assert isinstance( self.datastoreQuery, MultiQuery), "Got a customMultiQueryMerge - but no multiQuery" res = [] if self._calculateInternalMultiQueryAmount: kwargs["limit"] = self._calculateInternalMultiQueryAmount(kwargs["limit"]) for qry in getattr(self.datastoreQuery,"_MultiQuery__bound_queries"): res.append( qry.Run( keys_only=internalKeysOnly, **kwargs ) ) # As the results are now available, perform the actual merge res = self._customMultiQueryMerge(self, res, origLimit) else: res = list( self.datastoreQuery.Run( keys_only=internalKeysOnly, **kwargs ) ) if conf["viur.debug.traceQueries"]: kindName = self.getKind() orders = self.getOrders() filters = self.getFilter() logging.debug("Queried %s with filter %s and orders %s. Returned %s results" % (kindName, filters, orders, len(res))) if keysOnly and not internalKeysOnly: #Wanted key-only, but this wasn't directly possible if len(res)>0 and res[0].key().kind()!=self.origKind and res[0].key().parent().kind()==self.origKind: #Fixing the kind - it has been changed (probably by quering an relation) res = [ x.key().parent() for x in res ] if res and isinstance(res[0], datastore_types.Key): return res else: return( [x.key() for x in res] ) elif keysOnly and internalKeysOnly: #Keys-only requested and we did it if len(res)>0 and res[0].kind()!=self.origKind and res[0].parent().kind()==self.origKind: #Fixing the kind - it has been changed (probably by quering an relation) res = [ x.parent() for x in res ] return( res ) elif not keysOnly and not internalKeysOnly: #Full query requested and we did it if len(res)>0 and res[0].key().kind()!=self.origKind and res[0].key().parent().kind()==self.origKind: #Fixing the kind - it has been changed (probably by quering an relation) res = Get( [ x.key().parent() for x in res ] ) return( res ) else: #Well.. Full results requested, but we did keys-only if len(res)>0 and res[0].kind()!=self.origKind and res[0].parent().kind()==self.origKind: #Fixing the kind - it has been changed (probably by quering an relation) res = [ x.parent() for x in res ] return( Get( res ) ) def fetch(self, limit=-1, **kwargs ): """ Run this query and fetch results as :class:`server.skeleton.SkelList`. This function is similar to :func:`server.db.Query.run`, but returns a :class:`server.skeleton.SkelList` instance instead of Entities. :warning: The query must be limited! If queried data is wanted as instances of Entity, :func:`server.db.Query.run` should be used. :param limit: Limits the query to the defined maximum entities. \ A maxiumum value of 99 entries can be fetched at once. :type limit: int :raises: :exc:`BadFilterError` if a filter string is invalid :raises: :exc:`BadValueError` if a filter value is invalid. :raises: :exc:`BadQueryError` if an IN filter in combination with a sort order on\ another property is provided """ if self.srcSkel is None: raise NotImplementedError("This query has not been created using skel.all()") amount = limit if limit!=-1 else self.amount if amount < 1 or amount > 100: raise NotImplementedError("This query is not limited! You must specify an upper bound using limit() between 1 and 100") from server.skeleton import SkelList res = SkelList( self.srcSkel ) dbRes = self.run( amount ) res.customQueryInfo = self.customQueryInfo if dbRes is None: return( res ) for e in dbRes: #s = self.srcSkel.clone() valueCache = {} self.srcSkel.setValuesCache(valueCache) self.srcSkel.setValues(e) res.append( self.srcSkel.getValuesCache() ) try: c = self.datastoreQuery.GetCursor() if c: res.cursor = c.urlsafe() else: res.cursor = None except AssertionError: #No Cursors available on MultiQueries ( in or != ) res.cursor = None return( res ) def iter(self, keysOnly=False): """ Run this query and return an iterator for the results. The advantage of this function is, that it allows for iterating over a large result-set, as it hasn't have to be pulled in advance from the data store. The disadvantage is, that is supports no caching yet. This function intentionally ignores a limit set by :func:`server.db.Query.limit`. :warning: If iterating over a large result set, make sure the query supports cursors. \ Otherwise, it might not return all results as the AppEngine doesn't maintain the view \ for a query for more than ~30 seconds. :param keysOnly: If the query should be used to retrieve entity keys only. :type keysOnly: bool """ if self.datastoreQuery is None: #Noting to pull here raise StopIteration() if isinstance( self.datastoreQuery, datastore.MultiQuery ) and keysOnly: # Wanted KeysOnly, but MultiQuery is unable to give us that. for res in self.datastoreQuery.Run(): yield res.key() else: #The standard-case stopYield = False lastCursor = None while not stopYield: try: for res in self.datastoreQuery.Run( keys_only=keysOnly ): yield res try: lastCursor = self.datastoreQuery.GetCursor() except Exception as e: pass stopYield = True # No more results to yield except: if lastCursor is None: stopYield = True logging.warning("Cannot this continue this query - it has no cursors") logging.warning("Not all results have been yielded!") else: logging.debug("Continuing iter() on fresh a query") q = self.clone() q.cursor( lastCursor ) self.datastoreQuery = q.datastoreQuery lastCursor = None def get( self ): """ Returns only the first entity of the current query. :returns: dict on success, or None if the result-set is empty. :rtype: dict """ try: res = list( self.run( limit=1 ) )[0] return( res ) except IndexError: #Empty result-set return( None ) except TypeError: #Also Empty result-set return( None ) def getSkel( self ): """ Returns a matching :class:`server.db.skeleton.Skeleton` instance for the current query. Its only possible to use this function if this query has been created using :func:`server.skeleton.Skeleton.all`. :returns: The Skeleton or None if the result-set is empty. :rtype: :class:`server.skeleton.Skeleton` """ if self.srcSkel is None: raise NotImplementedError("This query has not been created using skel.all()") res = self.get() if res is None: return( None ) #s = self.srcSkel.clone() self.srcSkel.setValues(res) return self.srcSkel def count( self, limit=1000, **kwargs ): """ Returns the number of entities that this query matches. :param limit: Limits the query to the defined maximum entities count.\ If there are more results than this limit, stop short and just return this number.\ Providing this argument makes the count operation more efficient. :type limit: int :param config: Optional configuration to use for this request. This must be specified\ as a keyword argument. :type config: dict :returns: The number of results. :rtype: int """ return( self.datastoreQuery.Count( limit, **kwargs ) ) def clone(self, keysOnly=None): """ Returns a deep copy of the current query. :param keysOnly: If the query should be used to retrieve entity keys only\ in the new query. :type keysOnly: bool :returns: The cloned query. :rtype: server.db.Query """ if keysOnly is None: keysOnly = self.isKeysOnly() res = Query( self.getKind(), self.srcSkel, keys_only=keysOnly ) res.limit( self.amount ) for k, v in self.getFilter().items(): res.filter( k, v ) orders = self.getOrders() if len( orders )==1: res.order( orders[0] ) elif len( orders ) > 1: res.order( tuple( orders ) ) return( res ) class Entity( datastore.Entity ): """ Wraps ``datastore.Entity`` to prevent trying to add a string with more than 500 chars to an index and providing a camelCase-API. """ def _fixUnindexedProperties( self ): """ Ensures that no property with strlen > 500 makes it into the index. """ unindexed = list( self.getUnindexedProperties() ) for k,v in self.items(): if isinstance( v, basestring ) and len( v )>=500 and not k in unindexed: logging.warning("Your property %s cant be indexed!" % k) unindexed.append( k ) elif isinstance( v, list ) or isinstance( v, tuple() ): if any( [ isinstance(x,basestring) and len(x)>=500 for x in v] ) and not k in unindexed: logging.warning("Your property %s cant be indexed!" % k) unindexed.append( k ) self.set_unindexed_properties( unindexed ) def isSaved(self): """ Returns True if this entity has been saved to the data store. :rtype: bool """ return( self.is_saved() ) def entityGroup(self): """ Returns this entity's entity group as a Key. Note that the returned Key will be incomplete if this is a a root entity and its key is incomplete. """ return( self.entity_group() ) def getUnindexedProperties(self): """ Returns this entity's unindexed properties, as a frozen set of strings. """ return( self.unindexed_properties() ) def setUnindexedProperties(self, unindexed_properties): """ Sets the list of unindexed properties. Properties listed here are *not* saved in an index; its impossible to use them in a query filter / sort. But it saves one db-write op per property listed here. """ self.set_unindexed_properties( unindexed_properties ) def __setitem__(self, name, value): """ Implements the [] operator. Used to set property value(s). :param name: Name of the property to set. :type name: str :param value: Any value to set tot the property. :raises: :exc:`BadPropertyError` if the property name is the \ empty string or not a string. :raises: :exc:`BadValueError` if the value is not a supported type. """ if isinstance(value,list) or isinstance(value,tuple): # We cant store an empty list, so we catch any attempts # and store None. As "does not exists" queries aren't # possible anyway, this makes no difference if len( value ) == 0: value = None super( Entity, self ).__setitem__( name, value ) def set(self, key, value, indexed=True): """ Sets a property. :param key: key of the property to set. :type key: str :param value: Any value to set tot the property. :param indexed: Defines if the value is indexed. :type indexed: bool :raises: :exc:`BadPropertyError` if the property name is the \ empty string or not a string. :raises: :exc:`BadValueError` if the value is not a supported type. """ unindexed = list(self.getUnindexedProperties()) if not indexed and not key in unindexed: unindexed.append(key) self.setUnindexedProperties(unindexed) elif indexed and key in unindexed: unindexed.remove(key) self.setUnindexedProperties(unindexed) self[key] = value @staticmethod def FromDatastoreEntity( entity ): """ Converts a datastore.Entity into a :class:`db.server.Entity`. Required, as ``datastore.Get()`` always returns a datastore.Entity (and it seems that currently there is no valid way to change that). """ res = Entity( entity.kind(), parent=entity.key().parent(), _app=entity.key().app(), name=entity.key().name(), id=entity.key().id(), unindexed_properties=entity.unindexed_properties(), namespace=entity.namespace() ) res.update( entity ) return( res ) AllocateIdsAsync = datastore.AllocateIdsAsync AllocateIds = datastore.AllocateIds RunInTransaction = datastore.RunInTransaction RunInTransactionCustomRetries = datastore.RunInTransactionCustomRetries RunInTransactionOptions = datastore.RunInTransactionOptions TransactionOptions = datastore_rpc.TransactionOptions Key = datastore_types.Key ## Errors ## Error = datastore_errors.Error BadValueError = datastore_errors.BadValueError BadPropertyError = datastore_errors.BadPropertyError BadRequestError = datastore_errors.BadRequestError EntityNotFoundError = datastore_errors.EntityNotFoundError BadArgumentError = datastore_errors.BadArgumentError QueryNotFoundError = datastore_errors.QueryNotFoundError TransactionNotFoundError = datastore_errors.TransactionNotFoundError Rollback = datastore_errors.Rollback TransactionFailedError = datastore_errors.TransactionFailedError BadFilterError = datastore_errors.BadFilterError BadQueryError = datastore_errors.BadQueryError BadKeyError = datastore_errors.BadKeyError InternalError = datastore_errors.InternalError NeedIndexError = datastore_errors.NeedIndexError ReferencePropertyResolveError = datastore_errors.ReferencePropertyResolveError Timeout = datastore_errors.Timeout CommittedButStillApplying = datastore_errors.CommittedButStillApplying DatastoreQuery = datastore.Query MultiQuery = datastore.MultiQuery Cursor = datastore_query.Cursor IsInTransaction = datastore.IsInTransaction #Consts KEY_SPECIAL_PROPERTY = datastore_types.KEY_SPECIAL_PROPERTY ASCENDING = datastore_query.PropertyOrder.ASCENDING DESCENDING = datastore_query.PropertyOrder.DESCENDING __all__ = [ PutAsync, Put, GetAsync, Get, DeleteAsync, Delete, AllocateIdsAsync, AllocateIds, RunInTransaction, RunInTransactionCustomRetries, RunInTransactionOptions, TransactionOptions, Error, BadValueError, BadPropertyError, BadRequestError, EntityNotFoundError, BadArgumentError, QueryNotFoundError, TransactionNotFoundError, Rollback, TransactionFailedError, BadFilterError, BadQueryError, BadKeyError, BadKeyError, InternalError, NeedIndexError, ReferencePropertyResolveError, Timeout, CommittedButStillApplying, Entity, Query, DatastoreQuery, MultiQuery, Cursor, KEY_SPECIAL_PROPERTY, ASCENDING, DESCENDING, IsInTransaction ]<|fim▁end|>
if conf["viur.db.caching" ]>0: if isinstance( entities, Entity ): #Just one:
<|file_name|>webhdfs.js<|end_file_name|><|fim▁begin|>'use strict'; var fs = require('fs'); var demand = require('must'); var sinon = require('sinon'); var WebHDFS = require('../lib/webhdfs'); var WebHDFSProxy = require('webhdfs-proxy'); var WebHDFSProxyMemoryStorage = require('webhdfs-proxy-memory'); describe('WebHDFS', function () { var path = '/files/' + Math.random(); var hdfs = WebHDFS.createClient({ user: process.env.USER, port: 45000 }); this.timeout(10000); before(function (done) { var opts = { path: '/webhdfs/v1', http: { port: 45000 } }; WebHDFSProxy.createServer(opts, WebHDFSProxyMemoryStorage, done); }); it('should make a directory', function (done) { hdfs.mkdir(path, function (err) { demand(err).be.null(); done(); }); }); it('should create and write data to a file', function (done) { hdfs.writeFile(path + '/file-1', 'random data', function (err) { demand(err).be.null(); done(); }); }); it('should append content to an existing file', function (done) { hdfs.appendFile(path + '/file-1', 'more random data', function (err) { demand(err).be.null(); done(); }); }); it('should create and stream data to a file', function (done) { var localFileStream = fs.createReadStream(__filename); var remoteFileStream = hdfs.createWriteStream(path + '/file-2'); var spy = sinon.spy(); localFileStream.pipe(remoteFileStream); remoteFileStream.on('error', spy); remoteFileStream.on('finish', function () { demand(spy.called).be.falsy(); done(); }); }); it('should append stream content to an existing file', function (done) { var localFileStream = fs.createReadStream(__filename); var remoteFileStream = hdfs.createWriteStream(path + '/file-2', true); var spy = sinon.spy(); localFileStream.pipe(remoteFileStream); remoteFileStream.on('error', spy); remoteFileStream.on('finish', function () { demand(spy.called).be.falsy(); done(); }); }); it('should open and read a file stream', function (done) { var remoteFileStream = hdfs.createReadStream(path + '/file-1'); var spy = sinon.spy(); var data = []; remoteFileStream.on('error', spy); remoteFileStream.on('data', function onData (chunk) { data.push(chunk); }); remoteFileStream.on('finish', function () { demand(spy.called).be.falsy(); demand(Buffer.concat(data).toString()).be.equal('random datamore random data'); done(); }); }); it('should open and read a file', function (done) { hdfs.readFile(path + '/file-1', function (err, data) { demand(err).be.null(); demand(data.toString()).be.equal('random datamore random data'); done(); }); }); it('should list directory status', function (done) { hdfs.readdir(path, function (err, files) { demand(err).be.null(); demand(files).have.length(2); demand(files[0].pathSuffix).to.eql('file-1'); demand(files[1].pathSuffix).to.eql('file-2'); demand(files[0].type).to.eql('FILE'); demand(files[1].type).to.eql('FILE'); done(); }); }); it('should change file permissions', function (done) { hdfs.chmod(path, '0777', function (err) { demand(err).be.null(); done(); }); }); it('should change file owner', function (done) { hdfs.chown(path, process.env.USER, 'supergroup', function (err) { demand(err).be.null(); done(); }); }); it('should rename file', function (done) { hdfs.rename(path+ '/file-2', path + '/bigfile', function (err) { demand(err).be.null(); done(); }); }); it('should check file existence', function (done) { hdfs.exists(path + '/bigfile', function (exists) { demand(exists).be.true(); done(); }); }); it('should stat file', function (done) { hdfs.stat(path + '/bigfile', function (err, stats) { demand(err).be.null(); demand(stats).be.object(); demand(stats.type).to.eql('FILE'); demand(stats.owner).to.eql(process.env.USER); done(); }); }); it('should create symbolic link', function (done) { hdfs.symlink(path+ '/bigfile', path + '/biggerfile', function (err) { // Pass if server doesn't support symlinks if (err && err.message.indexOf('Symlinks not supported') !== -1) { done(); } else { demand(err).be.null(); done(); } }); }); it('should delete file', function (done) { hdfs.rmdir(path+ '/file-1', function (err) { demand(err).be.null(); done(); }); }); it('should delete directory recursively', function (done) { hdfs.rmdir(path, true, function (err) {<|fim▁hole|> done(); }); }); it('should support optional opts', function (done) { var myOpts = { "user.name": "testuser" } hdfs.writeFile(path + '/file-1', 'random data', myOpts, function (err) { demand(err).be.null(); done(); }); }); }); describe('WebHDFS with requestParams', function() { var path = '/files/' + Math.random(); var hdfs = WebHDFS.createClient({ user: process.env.USER, port: 45001 }, { headers: { 'X-My-Custom-Header': 'Kerberos' } }); this.timeout(10000); before(function (done) { var opts = { path: '/webhdfs/v1', http: { port: 45001 } }; WebHDFSProxy.createServer(opts, WebHDFSProxyMemoryStorage, done); }); it('should override request() options', function (done) { var localFileStream = fs.createReadStream(__filename); var remoteFileStream = hdfs.createWriteStream(path + '/file-2'); var spy = sinon.spy(); localFileStream.pipe(remoteFileStream); remoteFileStream.on('error', spy); remoteFileStream.on('response', function(response) { var customHeader = response.req.getHeader('X-My-Custom-Header'); demand(customHeader).equal('Kerberos'); demand(spy.called).be.falsy(); done(); }) }); it('should pass requestParams to _sendRequest', function (done) { var req = hdfs.readdir('/'); req.on('response', function(response) { var customHeader = response.req.getHeader('X-My-Custom-Header'); demand(customHeader).equal('Kerberos'); done(); }); }); it('should not override explicit opts with _sendRequest', function (done) { var mostSpecificParams = { headers: { 'X-My-Custom-Header': 'Bear' } } var endpoint = hdfs._getOperationEndpoint('liststatus', '/file-2'); hdfs._sendRequest('GET', endpoint, mostSpecificParams, function(err, response, body) { var customHeader = response.req.getHeader('X-My-Custom-Header'); demand(customHeader).equal('Bear'); done(err) }); }); });<|fim▁end|>
demand(err).be.null();
<|file_name|>basetest_pgt.py<|end_file_name|><|fim▁begin|>from __future__ import print_function from __future__ import unicode_literals from __future__ import division from __future__ import absolute_import from builtins import * # NOQA from future import standard_library standard_library.install_aliases() # NOQA import chainer from chainer import functions as F from chainer import links as L from chainer import optimizers import numpy as np from chainerrl.envs.abc import ABC from chainerrl.explorers.epsilon_greedy import LinearDecayEpsilonGreedy from chainerrl.links import Sequence from chainerrl import policies from chainerrl import q_function from chainerrl import replay_buffer from basetest_training import _TestTraining class _TestPGTOnABC(_TestTraining): <|fim▁hole|> policy = model['policy'] q_func = model['q_function'] actor_opt = optimizers.Adam(alpha=1e-4) actor_opt.setup(policy) critic_opt = optimizers.Adam(alpha=1e-3) critic_opt.setup(q_func) explorer = self.make_explorer(env) rbuf = self.make_replay_buffer(env) return self.make_pgt_agent(env=env, model=model, actor_opt=actor_opt, critic_opt=critic_opt, explorer=explorer, rbuf=rbuf, gpu=gpu) def make_pgt_agent(self, env, model, actor_opt, critic_opt, explorer, rbuf, gpu): raise NotImplementedError() def make_explorer(self, env): def random_action_func(): a = env.action_space.sample() if isinstance(a, np.ndarray): return a.astype(np.float32) else: return a return LinearDecayEpsilonGreedy(1.0, 0.2, 1000, random_action_func) def make_replay_buffer(self, env): return replay_buffer.ReplayBuffer(10 ** 5) class _TestPGTOnContinuousPOABC(_TestPGTOnABC): def make_model(self, env): n_dim_obs = env.observation_space.low.size n_dim_action = env.action_space.low.size n_hidden_channels = 50 policy = Sequence( L.Linear(n_dim_obs, n_hidden_channels), F.relu, L.Linear(n_hidden_channels, n_hidden_channels), F.relu, L.LSTM(n_hidden_channels, n_hidden_channels), policies.FCGaussianPolicy( n_input_channels=n_hidden_channels, action_size=n_dim_action, min_action=env.action_space.low, max_action=env.action_space.high) ) q_func = q_function.FCLSTMSAQFunction( n_dim_obs=n_dim_obs, n_dim_action=n_dim_action, n_hidden_layers=2, n_hidden_channels=n_hidden_channels) return chainer.Chain(policy=policy, q_function=q_func) def make_env_and_successful_return(self, test): return ABC(discrete=False, partially_observable=True, deterministic=test), 1 def make_replay_buffer(self, env): return replay_buffer.EpisodicReplayBuffer(10 ** 5) class _TestPGTOnContinuousABC(_TestPGTOnABC): def make_model(self, env): n_dim_obs = env.observation_space.low.size n_dim_action = env.action_space.low.size n_hidden_channels = 50 policy = policies.FCGaussianPolicy( n_input_channels=n_dim_obs, n_hidden_layers=2, n_hidden_channels=n_hidden_channels, action_size=n_dim_action, min_action=env.action_space.low, max_action=env.action_space.high) q_func = q_function.FCSAQFunction( n_dim_obs=n_dim_obs, n_dim_action=n_dim_action, n_hidden_layers=2, n_hidden_channels=n_hidden_channels) return chainer.Chain(policy=policy, q_function=q_func) def make_env_and_successful_return(self, test): return ABC(discrete=False, deterministic=test), 1<|fim▁end|>
def make_agent(self, env, gpu): model = self.make_model(env)
<|file_name|>0007_contact_recruiter.py<|end_file_name|><|fim▁begin|># Generated by Django 2.0.4 on 2018-04-24 21:10 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('contacts', '0006_auto_20180423_1629'), ] operations = [ migrations.AddField(<|fim▁hole|> field=models.BooleanField(default=False), ), ]<|fim▁end|>
model_name='contact', name='recruiter',
<|file_name|>Reconciler.java<|end_file_name|><|fim▁begin|>// The MIT License (MIT) // // Copyright (c) 2016 Tim Jones // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. package io.github.jonestimd.finance.file; import java.util.Collection; import java.util.Comparator; import java.util.List; import java.util.Objects; import java.util.function.Predicate; import java.util.stream.Collectors; import io.github.jonestimd.finance.domain.transaction.Transaction; import io.github.jonestimd.finance.swing.transaction.TransactionTableModel; import static io.github.jonestimd.util.JavaPredicates.*; public class Reconciler { private final TransactionTableModel tableModel; private final List<Transaction> uncleared; public Reconciler(TransactionTableModel tableModel) { this.tableModel = tableModel; this.uncleared = tableModel.getBeans().stream() .filter(not(Transaction::isCleared)).filter(not(Transaction::isNew)) .collect(Collectors.toList()); } public void reconcile(Collection<Transaction> transactions) { transactions.forEach(this::reconcile); } private void reconcile(Transaction transaction) { Transaction toClear = select(transaction); if (toClear.isNew()) {<|fim▁hole|> else { tableModel.setValueAt(true, tableModel.rowIndexOf(toClear), tableModel.getClearedColumn()); } } private Transaction select(Transaction transaction) { return uncleared.stream().filter(sameProperties(transaction)).min(nearestDate(transaction)).orElse(transaction); } private Predicate<Transaction> sameProperties(Transaction transaction) { return t2 -> transaction.getAmount().compareTo(t2.getAmount()) == 0 && transaction.getAssetQuantity().compareTo(t2.getAssetQuantity()) == 0 && Objects.equals(transaction.getPayee(), t2.getPayee()) && Objects.equals(transaction.getSecurity(), t2.getSecurity()); } private Comparator<Transaction> nearestDate(Transaction transaction) { return Comparator.comparingInt(t -> transaction.getDate().compareTo(t.getDate())); } }<|fim▁end|>
toClear.setCleared(true); tableModel.queueAdd(tableModel.getBeanCount()-1, toClear); }
<|file_name|>apprun-dev-tools.js<|end_file_name|><|fim▁begin|>var _a; import app from './app'; import toHTML from './vdom-to-html'; import { _createEventTests, _createStateTests } from './apprun-dev-tools-tests'; app['debug'] = true; window['_apprun-help'] = ['', () => { Object.keys(window).forEach(cmd => { if (cmd.startsWith('_apprun-')) { cmd === '_apprun-help' ? console.log('AppRun Commands:') : console.log(`* ${cmd.substring(8)}: ${window[cmd][0]}`); } }); }]; function newWin(html) { const win = window.open('', '_apprun_debug', 'toolbar=0'); win.document.write(`<html> <title>AppRun Analyzer | ${document.location.href}</title> <style> body { font-family: -apple-system, BlinkMacSystemFont, "Segoe UI" } li { margin-left: 80px; } </style> <body> <div id="main">${html}</div> </script> </body> </html>`); win.document.close(); } const get_components = () => { const o = { components: {} }; app.run('get-components', o); const { components } = o; return components; }; const viewElement = element => app.h("div", null, element.tagName.toLowerCase(), element.id ? '#' + element.id : '', ' ', element.className && element.className.split(' ').map(c => '.' + c).join()); const viewComponents = state => { const Events = ({ events }) => app.h("ul", null, events && events.filter(event => event.name !== '.').map(event => app.h("li", null, event.name))); const Components = ({ components }) => app.h("ul", null, components.map(component => app.h("li", null, app.h("div", null, component.constructor.name), app.h(Events, { events: component['_actions'] })))); return app.h("ul", null, state.map(({ element, comps }) => app.h("li", null, app.h("div", null, viewElement(element)), app.h(Components, { components: comps })))); }; const viewEvents = state => { const Components = ({ components }) => app.h("ul", null, components.map(component => app.h("li", null, app.h("div", null, component.constructor.name)))); const Events = ({ events, global }) => app.h("ul", null, events && events .filter(event => event.global === global && event.event !== '.') .map(({ event, components }) => app.h("li", null, app.h("div", null, event), app.h(Components, { components: components })))); return app.h("div", null, app.h("div", null, "GLOBAL EVENTS"), app.h(Events, { events: state, global: true }), app.h("div", null, "LOCAL EVENTS"), app.h(Events, { events: state, global: false })); }; const _events = (print) => { const global_events = app['_events']; const events = {}; const cache = get_components(); const add_component = component => component['_actions'].forEach(event => { events[event.name] = events[event.name] || []; events[event.name].push(component); }); if (cache instanceof Map) { for (let [key, comps] of cache) { comps.forEach(add_component); } } else { Object.keys(cache).forEach(el => cache[el].forEach(add_component)); } const data = []; Object.keys(events).forEach(event => { data.push({ event, components: events[event], global: global_events[event] ? true : false }); }); data.sort(((a, b) => a.event > b.event ? 1 : -1)).map(e => e.event); if (print) { const vdom = viewEvents(data); newWin(toHTML(vdom)); } else { console.log('=== GLOBAL EVENTS ==='); data.filter(event => event.global && event.event !== '.') .forEach(({ event, components }) => console.log({ event }, components)); console.log('=== LOCAL EVENTS ==='); data.filter(event => !event.global && event.event !== '.') .forEach(({ event, components }) => console.log({ event }, components)); } }; const _components = (print) => { const components = get_components(); const data = []; if (components instanceof Map) { for (let [key, comps] of components) { const element = typeof key === 'string' ? document.getElementById(key) : key; data.push({ element, comps }); } } else { Object.keys(components).forEach(el => { const element = typeof el === 'string' ? document.getElementById(el) : el; data.push({ element, comps: components[el] }); }); } if (print) { const vdom = viewComponents(data); newWin(toHTML(vdom)); } else { data.forEach(({ element, comps }) => console.log(element, comps)); } }; let debugging = Number((_a = window === null || window === void 0 ? void 0 : window.localStorage) === null || _a === void 0 ? void 0 : _a.getItem('__apprun_debugging__')) || 0; app.on('debug', p => { if (debugging & 1 && p.event) console.log(p); if (debugging & 2 && p.vdom) console.log(p); }); window['_apprun-components'] = ['components [print]', (p) => { _components(p === 'print'); }]; window['_apprun-events'] = ['events [print]', (p) => { _events(p === 'print'); }]; window['_apprun-log'] = ['log [event|view] on|off', (a1, a2) => { var _a; if (a1 === 'on') { debugging = 3; } else if (a1 === 'off') { debugging = 0; } else if (a1 === 'event') { if (a2 === 'on') { debugging |= 1; } else if (a2 === 'off') { debugging &= ~1; } } else if (a1 === 'view') { if (a2 === 'on') { debugging |= 2; } else if (a2 === 'off') { debugging &= ~2; } } console.log(`* log ${a1} ${a2 || ''}`); (_a = window === null || window === void 0 ? void 0 : window.localStorage) === null || _a === void 0 ? void 0 : _a.setItem('__apprun_debugging__', `${debugging}`); }]; window['_apprun-create-event-tests'] = ['create-event-tests', () => _createEventTests() ]; window['_apprun-create-state-tests'] = ['create-state-tests <start|stop>', (p) => _createStateTests(p) ]; window['_apprun'] = (strings) => { const [cmd, ...p] = strings[0].split(' ').filter(c => !!c); const command = window[`_apprun-${cmd}`]; if (command) command[1](...p); else window['_apprun-help'][1](); }; console.info('AppRun DevTools 2.27: type "_apprun `help`" to list all available commands.'); const reduxExt = window['__REDUX_DEVTOOLS_EXTENSION__']; if (reduxExt) { let devTools_running = false; const devTools = window['__REDUX_DEVTOOLS_EXTENSION__'].connect(); if (devTools) { const hash = location.hash || '#'; devTools.send(hash, ''); const buf = [{ component: null, state: '' }]; console.info('Connected to the Redux DevTools'); devTools.subscribe((message) => { if (message.type === 'START') devTools_running = true; else if (message.type === 'STOP') devTools_running = false; else if (message.type === 'DISPATCH') { // console.log('From Redux DevTools: ', message); const idx = message.payload.index; if (idx === 0) { app.run(hash); } else { const { component, state } = buf[idx]; component === null || component === void 0 ? void 0 : component.setState(state);<|fim▁hole|> const send = (component, action, state) => { if (state == null) return; buf.push({ component, state }); devTools.send(action, state); }; app.on('debug', p => { if (devTools_running && p.event) { const state = p.newState; const type = p.event; const payload = p.p; const action = { type, payload }; const component = p.component; if (state instanceof Promise) { state.then(s => send(component, action, s)); } else { send(component, action, state); } } }); } } //# sourceMappingURL=apprun-dev-tools.js.map<|fim▁end|>
} } });
<|file_name|>ftp.py<|end_file_name|><|fim▁begin|><|fim▁hole|>import os from ftplib import FTP def place_file(ftp, filename): ftp.storbinary('STOR ' + filename,open(filename, 'rb')) if __name__ == '__main__': url = 'ftp.k-bits.com' ftp = FTP(url) user = 'usuario1@k-bits.com' passw = 'happy1234' ftp.login(user, passw) remoto = [] ftp.dir(remoto.append) for r in remoto: print(r) directorio_local = os.listdir('.') place_file(ftp, 'Banamex.csv') ftp.quit()<|fim▁end|>
from __future__ import print_function
<|file_name|>Rain_Water_Trapping.py<|end_file_name|><|fim▁begin|># Rain_Water_Trapping def trappedWater(a, size) : # left[i] stores height of tallest bar to the to left of it including itself left = [0] * size # Right [i] stores height of tallest bar to the to right of it including itself right = [0] * size # Initialize result waterVolume = 0 <|fim▁hole|> # filling right (list/array) right[size - 1] = a[size - 1] for i in range(size - 2, - 1, - 1): right[i] = max(right[i + 1], a[i]); # Calculating volume of the accumulated water element by element for i in range(0, size): waterVolume += min(left[i],right[i]) - a[i] return waterVolume # main program arr =[] n = int(input()) #input the number of towers for i in range(n): arr.append(int(input())) #storing length of each tower in array print("Maximum water that can be accumulated is ", trappedWater(arr, len(arr))) #Input: #12 #0 #1 #0 #2 #1 #0 #1 #3 #2 #1 #2 #1 #Output: #The maximum water trapped is 6<|fim▁end|>
# filling left (list/array) left[0] = a[0] for i in range( 1, size): left[i] = max(left[i-1], a[i])
<|file_name|>TimespanSpecifier.py<|end_file_name|><|fim▁begin|>import abjad from abjad.tools import abctools class TimespanSpecifier(abctools.AbjadValueObject): ### CLASS VARIABLES ### __slots__ = ( '_forbid_fusing', '_forbid_splitting', '_minimum_duration', ) ### INITIALIZER ### def __init__( self, forbid_fusing=None, forbid_splitting=None, minimum_duration=None, ): if forbid_fusing is not None: forbid_fusing = bool(forbid_fusing) self._forbid_fusing = forbid_fusing if forbid_splitting is not None: forbid_splitting = bool(forbid_splitting) self._forbid_splitting = forbid_splitting if minimum_duration is not None: minimum_duration = abjad.Duration(minimum_duration) self._minimum_duration = minimum_duration ### PUBLIC PROPERTIES ### @property def forbid_fusing(self): return self._forbid_fusing @property def forbid_splitting(self): return self._forbid_splitting @property def minimum_duration(self):<|fim▁hole|><|fim▁end|>
return self._minimum_duration
<|file_name|>fsu-moves-and-copies.rs<|end_file_name|><|fim▁begin|>// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Issue 4691: Ensure that functional-struct-updates operates // correctly and moves rather than copy when appropriate. #![allow(unknown_features)] #![feature(box_syntax)] use std::marker::NoCopy as NP; struct ncint { np: NP, v: int } fn ncint(v: int) -> ncint { ncint { np: NP, v: v } } struct NoFoo { copied: int, nocopy: ncint, } impl NoFoo { fn new(x:int,y:int) -> NoFoo { NoFoo { copied: x, nocopy: ncint(y) } } }<|fim▁hole|>} struct DropNoFoo { inner: NoFoo } impl DropNoFoo { fn new(x:int,y:int) -> DropNoFoo { DropNoFoo { inner: NoFoo::new(x,y) } } } impl Drop for DropNoFoo { fn drop(&mut self) { } } struct DropMoveFoo { inner: MoveFoo } impl DropMoveFoo { fn new(x:int,y:int) -> DropMoveFoo { DropMoveFoo { inner: MoveFoo::new(x,y) } } } impl Drop for DropMoveFoo { fn drop(&mut self) { } } fn test0() { // just copy implicitly copyable fields from `f`, no moves // (and thus it is okay that these are Drop; compare against // compile-fail test: borrowck-struct-update-with-dtor.rs). // Case 1: Nocopyable let f = DropNoFoo::new(1, 2); let b = DropNoFoo { inner: NoFoo { nocopy: ncint(3), ..f.inner }}; let c = DropNoFoo { inner: NoFoo { nocopy: ncint(4), ..f.inner }}; assert_eq!(f.inner.copied, 1); assert_eq!(f.inner.nocopy.v, 2); assert_eq!(b.inner.copied, 1); assert_eq!(b.inner.nocopy.v, 3); assert_eq!(c.inner.copied, 1); assert_eq!(c.inner.nocopy.v, 4); // Case 2: Owned let f = DropMoveFoo::new(5, 6); let b = DropMoveFoo { inner: MoveFoo { moved: box 7, ..f.inner }}; let c = DropMoveFoo { inner: MoveFoo { moved: box 8, ..f.inner }}; assert_eq!(f.inner.copied, 5); assert_eq!(*f.inner.moved, 6); assert_eq!(b.inner.copied, 5); assert_eq!(*b.inner.moved, 7); assert_eq!(c.inner.copied, 5); assert_eq!(*c.inner.moved, 8); } fn test1() { // copying move-by-default fields from `f`, so it moves: let f = MoveFoo::new(11, 12); let b = MoveFoo {moved: box 13, ..f}; let c = MoveFoo {copied: 14, ..f}; assert_eq!(b.copied, 11); assert_eq!(*b.moved, 13); assert_eq!(c.copied, 14); assert_eq!(*c.moved, 12); } fn test2() { // move non-copyable field let f = NoFoo::new(21, 22); let b = NoFoo {nocopy: ncint(23), ..f}; let c = NoFoo {copied: 24, ..f}; assert_eq!(b.copied, 21); assert_eq!(b.nocopy.v, 23); assert_eq!(c.copied, 24); assert_eq!(c.nocopy.v, 22); } pub fn main() { test0(); test1(); test2(); }<|fim▁end|>
struct MoveFoo { copied: int, moved: Box<int>, } impl MoveFoo { fn new(x:int,y:int) -> MoveFoo { MoveFoo { copied: x, moved: box y } }
<|file_name|>Generator.py<|end_file_name|><|fim▁begin|>import collections from sets import Set from Drawer import Drawer class Cultivar(): def __init__(self, id, name, year): self.id = id self.name = name self.year = year self.parent1 = None self.parent2 = None # row index starts from 0 self.row = 0 # column index starts from 0 self.col = None self.rowAmount = None class Grid(): middleColumn = 5 def initGrid(self, grid, rowNum, colNum, default): for i in range(rowNum): row = [] for j in range(colNum): row.append(default) grid.append(row) def __init__(self, row): self.rowTotalNumber = row self.colTotalNumber = Grid.middleColumn * 2 + 1 self.grid = [] self.initGrid(self.grid, self.rowTotalNumber, self.colTotalNumber, 0) # only double columns def doubleGrid(self): colTotalNumber = self.colTotalNumber middleColumn = Grid.middleColumn Grid.middleColumn += Grid.middleColumn self.colTotalNumber = Grid.middleColumn * 2 + 1 grid = [] self.initGrid(grid, self.rowTotalNumber, self.colTotalNumber, 0) for rowIndex in range(self.rowTotalNumber): for colIndex in range(colTotalNumber): newColIndex = middleColumn + colIndex grid[rowIndex][newColIndex] = self.grid[rowIndex][colIndex] self.grid = grid def isAvailable(self, row, rowAmount, col): for i in range(rowAmount): if self.grid[row + i][col] != 0: return False return True def occupyGrid(self, row, rowAmount, col, id): for i in range(rowAmount): self.grid[row + i][col] = id def autoOccupy(self, row, rowAmount, col, id, offset=0): targetColumn = col + offset if targetColumn < 0 or targetColumn >= self.colTotalNumber: self.doubleGrid() col += (Grid.middleColumn / 2) return self.autoOccupy(row, rowAmount, col, id, offset) else: if self.isAvailable(row, rowAmount, targetColumn): self.occupyGrid(row, rowAmount, targetColumn, id) return targetColumn else: if offset < 0: return self.autoOccupy(row, rowAmount, col, id, -offset) elif offset > 0: return self.autoOccupy(row, rowAmount, col, id, -offset - 1) elif offset == 0: return self.autoOccupy(row, rowAmount, col, id, -1) def occupy(self, cultivar, col): return self.autoOccupy(cultivar.row, cultivar.rowAmount, col, cultivar.id) class Generator(): def formatNone(self, value): if value == None: return None if value == '' or value == 'None' or value == 'none': return None try: return int(value) except: print "Year value or cultivar ID cannot be converted to integer" exit(1) # data is rows # each row is [cultivar.ID, cultivar.Name, cultivar.Year, cultivar.Parent1, cultivar.Parent2] def __init__(self, data, outputFolder): self.outputFolder = outputFolder self.data = data self.outputFolder = outputFolder self.nameIDMap = {} self.IDCultivarMap = {} self.idRowMap = {} self.treeSet = None for row in data: self.idRowMap[row[0]] = row for row in data: id = row[0] name = row[1] year = self.formatNone(row[2]) parent1 = self.formatNone(row[3]) parent2 = self.formatNone(row[4]) self.nameIDMap[name] = id cultivar = self.IDCultivarMap.get(id) if cultivar == None: cultivar = Cultivar(id, name, year) self.IDCultivarMap[id] = cultivar if parent1 != None: parent1_Obj = self.IDCultivarMap.get(parent1) if parent1_Obj == None: row = self.idRowMap[parent1] parent1_Obj = Cultivar(row[0], row[1], row[2]) self.IDCultivarMap[parent1] = parent1_Obj cultivar.parent1 = parent1_Obj if parent2 != None: parent2_Obj = self.IDCultivarMap.get(parent2) if parent2_Obj == None: row = self.idRowMap[parent2] parent2_Obj = Cultivar(row[0], row[1], row[2]) self.IDCultivarMap[parent2] = parent2_Obj cultivar.parent2 = parent2_Obj def calcRow(self, cultivar): self.treeSet = Set() queue = collections.deque() queue.append(cultivar) cultivar.row = 0 while len(queue) != 0: c = queue.popleft() self.treeSet.add(c) if c.parent1 != None: queue.append(c.parent1) c.parent1.row = c.row + 1 if c.parent2 != None and c.parent2 != c.parent1: queue.append(c.parent2) c.parent2.row = c.row + 1 def printTree(self, cultivar): queue = collections.deque() queue.append(cultivar) while len(queue) != 0: c = queue.popleft() if c.parent1 != None: queue.append(c.parent1) if c.parent2 != None: queue.append(c.parent2) print c.name, c.rowAmount<|fim▁hole|> def printGrid(self, grid): digit = 3 for row in grid: newRow = [] for col in row: col = str(col) for i in range(digit - len(col)): col = '0' + col newRow.append(col) print newRow # calcRow should be called before this function def calcRowAmount(self, cultivar): queue = collections.deque() queue.append(cultivar) self.totalRow = 0 while len(queue) != 0: c = queue.popleft() biggest = 1 if c.parent1 != None: queue.append(c.parent1) biggest = c.parent1.row - c.row if c.parent2 != None and c.parent2 != c.parent1: queue.append(c.parent2) if c.parent2.row > c.parent1.row: biggest = c.parent2.row - c.row c.rowAmount = biggest tmp = c.rowAmount + c.row if tmp > self.totalRow: self.totalRow = tmp def calcMaxRow(self, cultivar): queue = collections.deque() queue.append(cultivar) biggest = 0 while len(queue) != 0: c = queue.popleft() tmp = c.row + c.rowAmount if tmp > biggest: biggest = tmp biggest = c.row + c.rowAmount if c.parent1 != None: queue.append(c.parent1) if c.parent2 != None: queue.append(c.parent2) return biggest def calcBestCol(self, cultivar): return Grid.middleColumn def calcCol(self, cultivar): self.grid = Grid(self.totalRow) for node in self.treeSet: node.col = self.grid.occupy(node, self.calcBestCol(node)) def generateByName(self, name): self.generateByID(self.nameIDMap[name.strip()]) def generateByID(self, id): cultivar = self.IDCultivarMap[id] self.calcRow(cultivar) self.calcRowAmount(cultivar) self.calcCol(cultivar) self.printGrid(self.grid.grid) Drawer(cultivar).draw(self.outputFolder)<|fim▁end|>
<|file_name|>util.py<|end_file_name|><|fim▁begin|># encoding: utf-8 # #<|fim▁hole|># Author: Kyle Lahnakoski (kyle@lahnakoski.com) # from __future__ import unicode_literals from __future__ import division from __future__ import absolute_import from pyLibrary.dot import wrap def es_query_template(): output = wrap({ "query": {"match_all": {}}, "from": 0, "size": 0, "sort": [] }) return output def qb_sort_to_es_sort(sort): output = [] for s in sort: if s.sort == 1: output.append(s.field) elif s.sort == -1: output.append({s.field: "desc"}) else: pass return output # FOR ELASTICSEARCH aggs aggregates1_4 = { "none": "none", "one": "count", "sum": "sum", "add": "sum", "count": "value_count", "maximum": "max", "minimum": "min", "max": "max", "min": "min", "mean": "avg", "average": "avg", "avg": "avg", "N": "count", "X0": "count", "X1": "sum", "X2": "sum_of_squares", "std": "std_deviation", "stddev": "std_deviation", "var": "variance", "variance": "variance" }<|fim▁end|>
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this file, # You can obtain one at http:# mozilla.org/MPL/2.0/. #
<|file_name|>lib.go<|end_file_name|><|fim▁begin|><|fim▁hole|> func M() int { return 42 }<|fim▁end|>
package lib
<|file_name|>DigestIntro.tsx<|end_file_name|><|fim▁begin|>import React from 'react'; import styled from 'styled-components'; import Color from 'color'; import { Community } from 'types'; import { communityUrl } from 'utils/canonicalUrls'; import { formatDate } from 'utils/dates'; import { Icon } from 'client/components'; import { Section, Button } from '..'; import { BaseTableStyle } from '../shared'; type Props = { accentColorDark: string; community: Community; date: Date; }; type StyleProps = { accentColorDark: string; }; const TableStyle = styled(BaseTableStyle)<StyleProps>` border-top: 1px solid ${(props) => props.accentColorDark}; `; const ButtonCellStyle = styled.td` vertical-align: middle; width: 39%; `; const TextCellStyle = styled.td` padding-right: 30px; font-size: 12px; line-height: 18px; font-weight: 400; text-align: justify; width: 61%; `; const SpanStyle = styled.span` fill: #333333; padding-right: 9px; `; export const DigestIntro = (props: Props) => { const { date } = props; const fadedBackgroundColor = Color(props.accentColorDark).fade(0.95).rgb().string(); return ( <TableStyle accentColorDark={props.accentColorDark}> <tr> <td> <Section backgroundColor={fadedBackgroundColor}> <BaseTableStyle> <tr> <TextCellStyle> This digest is a compilation of activity in the&nbsp; <a href={communityUrl(props.community)}> {props.community.title} </a> &nbsp;community for the day of&nbsp; {formatDate(date)}. </TextCellStyle> <ButtonCellStyle> <Button linkUrl={`${communityUrl(props.community)}/dash/activity`} width="100%" ><|fim▁hole|> </Button> </ButtonCellStyle> </tr> </BaseTableStyle> </Section> </td> </tr> </TableStyle> ); };<|fim▁end|>
<SpanStyle> <Icon icon="pulse" /> </SpanStyle> <span>View latest activity</span>
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|><|fim▁hole|><|fim▁end|>
from .gaussian_process import RandomFeatureGaussianProcess, mean_field_logits from .spectral_normalization import SpectralNormalization
<|file_name|>list-log-streams.rs<|end_file_name|><|fim▁begin|>/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ use aws_config::meta::region::RegionProviderChain; use aws_sdk_cloudwatchlogs::{Client, Error, Region, PKG_VERSION}; use structopt::StructOpt; #[derive(Debug, StructOpt)] struct Opt { /// The AWS Region. #[structopt(short, long)] region: Option<String>, /// The log group prefix. #[structopt(short, long)] prefix: String, /// The log group name. #[structopt(short, long)] group: String, /// Whether to display additional information. #[structopt(short, long)] verbose: bool, } // Lists the streams for a log group. // snippet-start:[cloudwatchlogs.rust.list-log-streams] async fn show_log_streams( client: &aws_sdk_cloudwatchlogs::Client, name: &str, ) -> Result<(), aws_sdk_cloudwatchlogs::Error> { let resp = client .describe_log_streams() .log_group_name(name) .send() .await?; let streams = resp.log_streams().unwrap_or_default(); println!("Found {} streams:", streams.len()); for stream in streams { println!(" {}", stream.log_stream_name().unwrap_or_default()); } Ok(()) } // snippet-end:[cloudwatchlogs.rust.list-log-streams] /// Lists the log streams for a log group in the Region. /// # Arguments ///<|fim▁hole|>/// * `[-v]` - Whether to display additional information. #[tokio::main] async fn main() -> Result<(), Error> { let Opt { region, group, prefix, verbose, } = Opt::from_args(); if verbose { tracing_subscriber::fmt::init(); } let region_provider = RegionProviderChain::first_try(region.map(Region::new)) .or_default_provider() .or_else(Region::new("us-west-2")); // Construct log group name let mut name: String = "/aws/".to_owned(); name.push_str(&prefix); name.push('/'); name.push_str(&group); if verbose { println!(); println!("CloudWatchLogs client version: {}", PKG_VERSION); println!( "Region: {}", region_provider.region().await.unwrap().as_ref() ); println!("Log group name: {}", &name); println!(); } let shared_config = aws_config::from_env().region(region_provider).load().await; let client = Client::new(&shared_config); show_log_streams(&client, &name).await }<|fim▁end|>
/// * `-g LOG-GROUP` - The name of the log group. /// * `[-r REGION]` - The Region in which the client is created. /// If not supplied, uses the value of the **AWS_REGION** environment variable. /// If the environment variable is not set, defaults to **us-west-2**.
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3.4<|fim▁hole|><|fim▁end|>
# -*- coding: utf-8 -*-
<|file_name|>IndividualQcUtilsTest.java<|end_file_name|><|fim▁begin|>/* * Copyright 2015-2020 OpenCB * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.<|fim▁hole|> */ package org.opencb.opencga.analysis.individual.qc; import org.junit.Test; import org.opencb.biodata.models.clinical.qc.MendelianErrorReport; import org.opencb.biodata.models.clinical.qc.RelatednessReport; import org.opencb.biodata.models.variant.Variant; import org.opencb.biodata.models.variant.avro.IssueEntry; import org.opencb.biodata.models.variant.avro.IssueType; import org.opencb.opencga.analysis.family.qc.IBDComputation; import org.opencb.opencga.core.common.JacksonUtils; import org.opencb.opencga.core.exceptions.ToolException; import java.io.File; import java.io.IOException; import java.net.URI; import java.nio.file.Paths; import java.util.*; import static org.opencb.opencga.storage.core.variant.VariantStorageBaseTest.getResourceUri; public class IndividualQcUtilsTest { @Test public void buildRelatednessReport() throws ToolException, IOException { URI resourceUri = getResourceUri("ibd.genome"); File file = Paths.get(resourceUri.getPath()).toFile(); List<RelatednessReport.RelatednessScore> relatednessReport = IBDComputation.parseRelatednessScores(file); System.out.println(JacksonUtils.getDefaultNonNullObjectMapper().writerWithDefaultPrettyPrinter().writeValueAsString(relatednessReport)); } @Test public void parseMendelianError() throws IOException { URI resourceUri = getResourceUri("mendelian.error.variants.json"); File file = Paths.get(resourceUri.getPath()).toFile(); List<Variant> variants = Arrays.asList(JacksonUtils.getDefaultNonNullObjectMapper().readValue(file, Variant[].class)); System.out.println(variants.size()); MendelianErrorReport mendelianErrorReport = buildMendelianErrorReport(variants.iterator(), variants.size()); System.out.println(JacksonUtils.getDefaultNonNullObjectMapper().writerWithDefaultPrettyPrinter().writeValueAsString(mendelianErrorReport)); // List<Variant> variants = JacksonUtils.getDefaultNonNullObjectMapper().readerFor(Variant.class).readValue(path.toFile()); // System.out.println(variants.size()); } @Test public void parseKaryotypicSexThresholds() throws IOException { URI resourceUri = getResourceUri("karyotypic_sex_thresholds.json"); File file = Paths.get(resourceUri.getPath()).toFile(); Map<String, Double> thresholds = JacksonUtils.getDefaultNonNullObjectMapper().readerFor(Map.class).readValue(file); System.out.println(JacksonUtils.getDefaultNonNullObjectMapper().writerWithDefaultPrettyPrinter().writeValueAsString(thresholds)); } private MendelianErrorReport buildMendelianErrorReport(Iterator iterator, long numVariants) { // Create auxiliary map // sample chrom error count Map<String, Map<String, Map<String, Integer>>> counter = new HashMap<>(); int numErrors = 0; while (iterator.hasNext()) { Variant variant = (Variant) iterator.next(); // Get sampleId and error code from variant issues boolean foundError = false; for (IssueEntry issue : variant.getStudies().get(0).getIssues()) { if (IssueType.MENDELIAN_ERROR == issue.getType() || IssueType.DE_NOVO == issue.getType()) { foundError = true; String sampleId = issue.getSample().getSampleId(); String errorCode = issue.getSample().getData().get(0); if (!counter.containsKey(sampleId)) { counter.put(sampleId, new HashMap<>()); } if (!counter.get(sampleId).containsKey(variant.getChromosome())) { counter.get(sampleId).put(variant.getChromosome(), new HashMap<>()); } int val = 0; if (counter.get(sampleId).get(variant.getChromosome()).containsKey(errorCode)) { val = counter.get(sampleId).get(variant.getChromosome()).get(errorCode); } counter.get(sampleId).get(variant.getChromosome()).put(errorCode, val + 1); } } if (foundError) { numErrors++; } } // Create mendelian error report from auxiliary map MendelianErrorReport meReport = new MendelianErrorReport(); meReport.setNumErrors(numErrors); for (String sampleId : counter.keySet()) { MendelianErrorReport.SampleAggregation sampleAgg = new MendelianErrorReport.SampleAggregation(); int numSampleErrors = 0; for (String chrom : counter.get(sampleId).keySet()) { int numChromErrors = counter.get(sampleId).get(chrom).values().stream().mapToInt(Integer::intValue).sum(); MendelianErrorReport.SampleAggregation.ChromosomeAggregation chromAgg = new MendelianErrorReport.SampleAggregation.ChromosomeAggregation(); chromAgg.setChromosome(chrom); chromAgg.setNumErrors(numChromErrors); chromAgg.setErrorCodeAggregation(counter.get(sampleId).get(chrom)); // Update sample aggregation sampleAgg.getChromAggregation().add(chromAgg); numSampleErrors += numChromErrors; } sampleAgg.setSample(sampleId); sampleAgg.setNumErrors(numSampleErrors); sampleAgg.setRatio(1.0d * numSampleErrors / numVariants); meReport.getSampleAggregation().add(sampleAgg); } return meReport; } }<|fim▁end|>
* See the License for the specific language governing permissions and * limitations under the License.
<|file_name|>locks.py<|end_file_name|><|fim▁begin|># This file is part of Buildbot. Buildbot is free software: you can # redistribute it and/or modify it under the terms of the GNU General Public # License as published by the Free Software Foundation, version 2. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program; if not, write to the Free Software Foundation, Inc., 51 # Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # Copyright Buildbot Team Members from twisted.python import log from twisted.internet import reactor, defer from buildbot import util if False: # for debugging debuglog = log.msg else: debuglog = lambda m: None class BaseLock: """ Class handling claiming and releasing of L{self}, and keeping track of current and waiting owners. @note: Ideally, we'd like to maintain FIFO order. The place to do that would be the L{isAvailable()} function. However, this function is called by builds/steps both for the first time, and after waking them up by L{self} from the L{self.waiting} queue. There is currently no way of distinguishing between them. """ description = "<BaseLock>" def __init__(self, name, maxCount=1): self.name = name # Name of the lock self.waiting = [] # Current queue, tuples (LockAccess, deferred) self.owners = [] # Current owners, tuples (owner, LockAccess) self.maxCount = maxCount # maximal number of counting owners def __repr__(self): return self.description def _getOwnersCount(self): """ Return the number of current exclusive and counting owners. @return: Tuple (number exclusive owners, number counting owners) """ num_excl, num_counting = 0, 0 for owner in self.owners: if owner[1].mode == 'exclusive': num_excl = num_excl + 1 else: # mode == 'counting' num_counting = num_counting + 1 assert (num_excl == 1 and num_counting == 0) \ or (num_excl == 0 and num_counting <= self.maxCount) return num_excl, num_counting def isAvailable(self, access): """ Return a boolean whether the lock is available for claiming """ debuglog("%s isAvailable(%s): self.owners=%r"<|fim▁hole|> num_excl, num_counting = self._getOwnersCount() if access.mode == 'counting': # Wants counting access return num_excl == 0 and num_counting < self.maxCount else: # Wants exclusive access return num_excl == 0 and num_counting == 0 def claim(self, owner, access): """ Claim the lock (lock must be available) """ debuglog("%s claim(%s, %s)" % (self, owner, access.mode)) assert owner is not None assert self.isAvailable(access), "ask for isAvailable() first" assert isinstance(access, LockAccess) assert access.mode in ['counting', 'exclusive'] self.owners.append((owner, access)) debuglog(" %s is claimed '%s'" % (self, access.mode)) def release(self, owner, access): """ Release the lock """ assert isinstance(access, LockAccess) debuglog("%s release(%s, %s)" % (self, owner, access.mode)) entry = (owner, access) assert entry in self.owners self.owners.remove(entry) # who can we wake up? # After an exclusive access, we may need to wake up several waiting. # Break out of the loop when the first waiting client should not be awakened. num_excl, num_counting = self._getOwnersCount() while len(self.waiting) > 0: access, d = self.waiting[0] if access.mode == 'counting': if num_excl > 0 or num_counting == self.maxCount: break else: num_counting = num_counting + 1 else: # access.mode == 'exclusive' if num_excl > 0 or num_counting > 0: break else: num_excl = num_excl + 1 del self.waiting[0] reactor.callLater(0, d.callback, self) def waitUntilMaybeAvailable(self, owner, access): """Fire when the lock *might* be available. The caller will need to check with isAvailable() when the deferred fires. This loose form is used to avoid deadlocks. If we were interested in a stronger form, this would be named 'waitUntilAvailable', and the deferred would fire after the lock had been claimed. """ debuglog("%s waitUntilAvailable(%s)" % (self, owner)) assert isinstance(access, LockAccess) if self.isAvailable(access): return defer.succeed(self) d = defer.Deferred() self.waiting.append((access, d)) return d def stopWaitingUntilAvailable(self, owner, access, d): debuglog("%s stopWaitingUntilAvailable(%s)" % (self, owner)) assert isinstance(access, LockAccess) assert (access, d) in self.waiting self.waiting.remove( (access, d) ) def isOwner(self, owner, access): return (owner, access) in self.owners class RealMasterLock(BaseLock): def __init__(self, lockid): BaseLock.__init__(self, lockid.name, lockid.maxCount) self.description = "<MasterLock(%s, %s)>" % (self.name, self.maxCount) def getLock(self, slave): return self class RealSlaveLock: def __init__(self, lockid): self.name = lockid.name self.maxCount = lockid.maxCount self.maxCountForSlave = lockid.maxCountForSlave self.description = "<SlaveLock(%s, %s, %s)>" % (self.name, self.maxCount, self.maxCountForSlave) self.locks = {} def __repr__(self): return self.description def getLock(self, slavebuilder): slavename = slavebuilder.slave.slavename if not self.locks.has_key(slavename): maxCount = self.maxCountForSlave.get(slavename, self.maxCount) lock = self.locks[slavename] = BaseLock(self.name, maxCount) desc = "<SlaveLock(%s, %s)[%s] %d>" % (self.name, maxCount, slavename, id(lock)) lock.description = desc self.locks[slavename] = lock return self.locks[slavename] class LockAccess(util.ComparableMixin): """ I am an object representing a way to access a lock. @param lockid: LockId instance that should be accessed. @type lockid: A MasterLock or SlaveLock instance. @param mode: Mode of accessing the lock. @type mode: A string, either 'counting' or 'exclusive'. """ compare_attrs = ['lockid', 'mode'] def __init__(self, lockid, mode): self.lockid = lockid self.mode = mode assert isinstance(lockid, (MasterLock, SlaveLock)) assert mode in ['counting', 'exclusive'] class BaseLockId(util.ComparableMixin): """ Abstract base class for LockId classes. Sets up the 'access()' function for the LockId's available to the user (MasterLock and SlaveLock classes). Derived classes should add - Comparison with the L{util.ComparableMixin} via the L{compare_attrs} class variable. - Link to the actual lock class should be added with the L{lockClass} class variable. """ def access(self, mode): """ Express how the lock should be accessed """ assert mode in ['counting', 'exclusive'] return LockAccess(self, mode) def defaultAccess(self): """ For buildbot 0.7.7 compability: When user doesn't specify an access mode, this one is chosen. """ return self.access('counting') # master.cfg should only reference the following MasterLock and SlaveLock # classes. They are identifiers that will be turned into real Locks later, # via the BotMaster.getLockByID method. class MasterLock(BaseLockId): """I am a semaphore that limits the number of simultaneous actions. Builds and BuildSteps can declare that they wish to claim me as they run. Only a limited number of such builds or steps will be able to run simultaneously. By default this number is one, but my maxCount parameter can be raised to allow two or three or more operations to happen at the same time. Use this to protect a resource that is shared among all builders and all slaves, for example to limit the load on a common SVN repository. """ compare_attrs = ['name', 'maxCount'] lockClass = RealMasterLock def __init__(self, name, maxCount=1): self.name = name self.maxCount = maxCount class SlaveLock(BaseLockId): """I am a semaphore that limits simultaneous actions on each buildslave. Builds and BuildSteps can declare that they wish to claim me as they run. Only a limited number of such builds or steps will be able to run simultaneously on any given buildslave. By default this number is one, but my maxCount parameter can be raised to allow two or three or more operations to happen on a single buildslave at the same time. Use this to protect a resource that is shared among all the builds taking place on each slave, for example to limit CPU or memory load on an underpowered machine. Each buildslave will get an independent copy of this semaphore. By default each copy will use the same owner count (set with maxCount), but you can provide maxCountForSlave with a dictionary that maps slavename to owner count, to allow some slaves more parallelism than others. """ compare_attrs = ['name', 'maxCount', '_maxCountForSlaveList'] lockClass = RealSlaveLock def __init__(self, name, maxCount=1, maxCountForSlave={}): self.name = name self.maxCount = maxCount self.maxCountForSlave = maxCountForSlave # for comparison purposes, turn this dictionary into a stably-sorted # list of tuples self._maxCountForSlaveList = self.maxCountForSlave.items() self._maxCountForSlaveList.sort() self._maxCountForSlaveList = tuple(self._maxCountForSlaveList)<|fim▁end|>
% (self, access, self.owners))
<|file_name|>gruntfile.js<|end_file_name|><|fim▁begin|>'use strict'; var _ = require('lodash'); var webpack = require('webpack'); var mergeWebpackConfig = function (config) { // Load webpackConfig only when using `grunt:webpack` // load of grunt tasks is faster var webpackConfig = require('./webpack.config'); return _.merge({}, webpackConfig, config, function (a, b) { if (_.isArray(a)) { return a.concat(b); } }); }; module.exports = function(grunt) { grunt.initConfig({ pkg: grunt.file.readJSON('package.json'), sass: { min: { files: { 'dist/react-datepicker.css': 'src/stylesheets/datepicker.scss' }, options: { sourcemap: 'none', style: 'expanded' } }, unmin: { files: { 'dist/react-datepicker.min.css': 'src/stylesheets/datepicker.scss' }, options: { sourcemap: 'none', style: 'compressed' } } }, watch: { jshint: { files: ['src/**/*.js', 'src/**/*.jsx'], tasks: ['jshint'] }, jest: { files: ['src/**/*.jsx', 'src/**/*.js', 'test/**/*.js'], tasks: ['jest'] }, css: { files: '**/*.scss', tasks: ['sass'] }, webpack: { files: ['src/**/*.js', 'src/**/*.jsx'], tasks: ['webpack'] } }, scsslint: { files: 'src/stylesheets/*.scss', options: { config: '.scss-lint.yml', colorizeOutput: true } }, jshint: { all: ['src/**/*.jsx', 'src/**/*.js'], options: { eqnull: true } }, webpack: { example: { entry: './example/boot', output: { filename: 'example.js', library: 'ExampleApp', path: './example/' }, resolve: { extensions: ['', '.js', '.jsx'] }, module: { loaders: [ {test: /\.js/, loaders: ['babel-loader'], exclude: /node_modules/} ] }, node: {Buffer: false}, plugins: [ new webpack.optimize.DedupePlugin(), new webpack.DefinePlugin({ 'process.env.NODE_ENV': JSON.stringify(process.env.NODE_ENV) }) ] }, unmin: mergeWebpackConfig({ output: { filename: 'react-datepicker.js' } }), min: mergeWebpackConfig({ output: { filename: 'react-datepicker.min.js' }, plugins: [ new webpack.optimize.UglifyJsPlugin({ compressor: { warnings: false } }) ] }) } }); grunt.loadNpmTasks('grunt-contrib-sass'); grunt.loadNpmTasks('grunt-scss-lint'); grunt.loadNpmTasks('grunt-contrib-watch'); grunt.loadNpmTasks('grunt-jsxhint'); grunt.loadNpmTasks('grunt-webpack');<|fim▁hole|> grunt.registerTask('default', ['watch', 'scsslint']); grunt.registerTask('travis', ['jshint', 'jest', 'scsslint']); grunt.registerTask('build', ['jshint', 'scsslint', 'webpack', 'sass']); grunt.registerTask('jest', require('./grunt/tasks/jest')); };<|fim▁end|>
<|file_name|>Vec2Type.java<|end_file_name|><|fim▁begin|>// // Ce fichier a été généré par l'implémentation de référence JavaTM Architecture for XML Binding (JAXB), v2.2.8-b130911.1802 // Voir <a href="http://java.sun.com/xml/jaxb">http://java.sun.com/xml/jaxb</a> // Toute modification apportée à ce fichier sera perdue lors de la recompilation du schéma source. // Généré le : 2015.10.08 à 11:14:04 PM CEST // package org.klipdev.spidergps3p.kml; import javax.xml.bind.annotation.XmlAccessType; import javax.xml.bind.annotation.XmlAccessorType; import javax.xml.bind.annotation.XmlAttribute; import javax.xml.bind.annotation.XmlType; /** * <p>Classe Java pour vec2Type complex type. * * <p>Le fragment de schéma suivant indique le contenu attendu figurant dans cette classe. * * <pre> * &lt;complexType name="vec2Type"> * &lt;complexContent> * &lt;restriction base="{http://www.w3.org/2001/XMLSchema}anyType"> * &lt;attribute name="x" type="{http://www.w3.org/2001/XMLSchema}double" default="1.0" /> * &lt;attribute name="y" type="{http://www.w3.org/2001/XMLSchema}double" default="1.0" /> * &lt;attribute name="xunits" type="{http://www.opengis.net/kml/2.2}unitsEnumType" default="fraction" /> * &lt;attribute name="yunits" type="{http://www.opengis.net/kml/2.2}unitsEnumType" default="fraction" /><|fim▁hole|> * </pre> * * */ @XmlAccessorType(XmlAccessType.FIELD) @XmlType(name = "vec2Type") public class Vec2Type { @XmlAttribute(name = "x") protected Double x; @XmlAttribute(name = "y") protected Double y; @XmlAttribute(name = "xunits") protected UnitsEnumType xunits; @XmlAttribute(name = "yunits") protected UnitsEnumType yunits; /** * Obtient la valeur de la propriété x. * * @return * possible object is * {@link Double } * */ public double getX() { if (x == null) { return 1.0D; } else { return x; } } /** * Définit la valeur de la propriété x. * * @param value * allowed object is * {@link Double } * */ public void setX(Double value) { this.x = value; } /** * Obtient la valeur de la propriété y. * * @return * possible object is * {@link Double } * */ public double getY() { if (y == null) { return 1.0D; } else { return y; } } /** * Définit la valeur de la propriété y. * * @param value * allowed object is * {@link Double } * */ public void setY(Double value) { this.y = value; } /** * Obtient la valeur de la propriété xunits. * * @return * possible object is * {@link UnitsEnumType } * */ public UnitsEnumType getXunits() { if (xunits == null) { return UnitsEnumType.FRACTION; } else { return xunits; } } /** * Définit la valeur de la propriété xunits. * * @param value * allowed object is * {@link UnitsEnumType } * */ public void setXunits(UnitsEnumType value) { this.xunits = value; } /** * Obtient la valeur de la propriété yunits. * * @return * possible object is * {@link UnitsEnumType } * */ public UnitsEnumType getYunits() { if (yunits == null) { return UnitsEnumType.FRACTION; } else { return yunits; } } /** * Définit la valeur de la propriété yunits. * * @param value * allowed object is * {@link UnitsEnumType } * */ public void setYunits(UnitsEnumType value) { this.yunits = value; } }<|fim▁end|>
* &lt;/restriction> * &lt;/complexContent> * &lt;/complexType>
<|file_name|>exception.py<|end_file_name|><|fim▁begin|>class ShirtsioError(Exception):<|fim▁hole|> self.http_body = http_body and http_body.decode('utf-8') self.http_status = http_status self.json_body = json_body class APIError(ShirtsioError): pass class APIConnectionError(ShirtsioError): pass class InvalidRequestError(ShirtsioError): def __init__(self, message, http_body=None, http_status=None, json_body=None): super(InvalidRequestError, self).__init__(message, http_body, http_status, json_body) class AuthenticationError(ShirtsioError): pass<|fim▁end|>
def __init__(self, message=None, http_body=None, http_status=None, json_body=None): super(ShirtsioError, self).__init__(message)
<|file_name|>decoder.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python # -*- coding: utf-8 -*- import sys from ErrorsHandler import * from string import strip, find, split, lower, rjust, atoi, atof, replace, digits, zfill, join from tools import cleanlist, which, filecopy, mkdirtree, touch, listmatch, rm, escapedfilename from tools import get_username, get_tempdir, cmdexec import os, os.path import mp3info, ogg.vorbis logfile = 'decoder.log' logger.InitAll(logfile, '') sys.stdout.save = False class Decoder: def __init__(self): """ Converts mp3/ogg-files to wav-files. """ #logger.debug2("Decoder class instantiated") pass def convert2wav(self, files, targetPath): """walk files list and apply decode() to each""" i = 0 for source in files: target = targetPath + "/" + str(i) + ".wav" #logger.info("Decoding %s to %s ..." % (source, target)) logger.info("Decoding %s ..." % os.path.basename(source)) self.decode(source, target) i += 1 logger.info("Decoding finished") def decode(self, filename, target): """decode a file to wav""" if not os.path.isfile(filename): logger.error("Decoding failed: %s not found" % filename) return False mp3count = 0 oggcount = 0 if (lower(filename[-4:]) == ".mp3"): mp3count = mp3count + 1 if (lower(filename[-4:]) == ".ogg"): oggcount = oggcount + 1 # Check whether mpg123 and oggdec exists mpg123_command = which("mpg123") oggdec_command = which("oggdec") if ((mp3count > 0) and (mpg123_command == "")): logger.warn( "mpg123 not found for converting mp3 files" ) if ((oggcount > 0) and (oggdec_command == "")): logger.warn( "oggdec not found for converting ogg files" ) #logger.info( "Converting %d file(s) now" % (mp3count + oggcount) ) if ((mp3count > 0) or (oggcount > 0)): #if (lower(filename[-4:]) == ".mp3") or (lower(filename[-4:]) == ".ogg"): #wavfilename = "%s/%s.wav" % (targetPath, os.path.basename(filename)[:-4]) #logger.info( "target = " + target ) if (lower(filename[-4:]) == ".mp3"): # Make sure that conversion is done with the correct sample rate file = open(filename, "rb") mpeg3info = mp3info.MP3Info(file)<|fim▁hole|> # get OGG samplerate vf = ogg.vorbis.VorbisFile(filename) vi = vf.info() samplerate = vi.rate channels = vi.channels #logger.info( 'OGG info: samplerate = %s , channels = %s' % (samplerate, channels) ) if ( samplerate != 44100) or (channels != 2): #logger.warn( 'samplerate not 44100, using sox to resample' ) command = "(sox \"%s\" -r 44100 -c 2 -t wav \"%s\") 2>&1" % (escapedfilename(filename), escapedfilename(target)) else: command = "%s -Q -o \"%s\" \"%s\" 2>&1" % (oggdec_command, escapedfilename(target), escapedfilename(filename)) #logger.info( "Executing: %s" % command ) (result, (stdout_output, stderr_output)) = cmdexec(command) if (result != 0): if (lower(filename[-4:]) == ".mp3"): result = listmatch(output, "Playing") output = output[result[0]:] return False else: return True<|fim▁end|>
file.close() samplerate = mpeg3info.mpeg.samplerate command = "(%s --stereo -s \"%s\" | sox -t raw -r %d -w -s -c 2 - -r 44100 -t wav \"%s\") 2>&1" % (mpg123_command, escapedfilename(filename), samplerate, escapedfilename(target)) elif (lower(filename[-4:]) == ".ogg"):
<|file_name|>CacheAbstractJdbcStore.java<|end_file_name|><|fim▁begin|>/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.ignite.cache.store.jdbc; import java.nio.ByteBuffer; import java.sql.BatchUpdateException; import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.SQLException; import java.sql.Statement; import java.sql.Types; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.UUID; import java.util.concurrent.Callable; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; import javax.cache.Cache; import javax.cache.CacheException; import javax.cache.integration.CacheLoaderException; import javax.cache.integration.CacheWriterException; import javax.sql.DataSource; import org.apache.ignite.Ignite; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteLogger; import org.apache.ignite.cache.CacheTypeFieldMetadata; import org.apache.ignite.cache.CacheTypeMetadata; import org.apache.ignite.cache.store.CacheStore; import org.apache.ignite.cache.store.CacheStoreSession; import org.apache.ignite.cache.store.jdbc.dialect.BasicJdbcDialect; import org.apache.ignite.cache.store.jdbc.dialect.DB2Dialect; import org.apache.ignite.cache.store.jdbc.dialect.H2Dialect; import org.apache.ignite.cache.store.jdbc.dialect.JdbcDialect; import org.apache.ignite.cache.store.jdbc.dialect.MySQLDialect; import org.apache.ignite.cache.store.jdbc.dialect.OracleDialect; import org.apache.ignite.cache.store.jdbc.dialect.SQLServerDialect; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.internal.util.tostring.GridToStringExclude; import org.apache.ignite.internal.util.typedef.C1; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteBiInClosure; import org.apache.ignite.lang.IgnitePredicate; import org.apache.ignite.lifecycle.LifecycleAware; import org.apache.ignite.resources.CacheStoreSessionResource; import org.apache.ignite.resources.IgniteInstanceResource; import org.apache.ignite.resources.LoggerResource; import org.apache.ignite.transactions.Transaction; import org.jetbrains.annotations.Nullable; import static java.sql.Statement.EXECUTE_FAILED; import static java.sql.Statement.SUCCESS_NO_INFO; /** * Implementation of {@link CacheStore} backed by JDBC. * <p> * Store works with database via SQL dialect. Ignite ships with dialects for most popular databases: * <ul> * <li>{@link DB2Dialect} - dialect for IBM DB2 database.</li> * <li>{@link OracleDialect} - dialect for Oracle database.</li> * <li>{@link SQLServerDialect} - dialect for Microsoft SQL Server database.</li> * <li>{@link MySQLDialect} - dialect for Oracle MySQL database.</li> * <li>{@link H2Dialect} - dialect for H2 database.</li> * <li>{@link BasicJdbcDialect} - dialect for any database via plain JDBC.</li> * </ul> * <p> * <h2 class="header">Configuration</h2> * <ul> * <li>Data source (see {@link #setDataSource(DataSource)}</li> * <li>Dialect (see {@link #setDialect(JdbcDialect)}</li> * <li>Maximum batch size for writeAll and deleteAll operations. (see {@link #setBatchSize(int)})</li> * <li>Max workers thread count. These threads are responsible for load cache. (see {@link #setMaximumPoolSize(int)})</li> * <li>Parallel load cache minimum threshold. (see {@link #setParallelLoadCacheMinimumThreshold(int)})</li> * </ul> * <h2 class="header">Java Example</h2> * <pre name="code" class="java"> * ... * CacheConfiguration ccfg = new CacheConfiguration&lt;&gt;(); * * // Configure cache store. * ccfg.setCacheStoreFactory(new FactoryBuilder.SingletonFactory(ConfigurationSnippet.store())); * ccfg.setReadThrough(true); * ccfg.setWriteThrough(true); * * // Configure cache types metadata. * ccfg.setTypeMetadata(ConfigurationSnippet.typeMetadata()); * * cfg.setCacheConfiguration(ccfg); * ... * </pre> */ public abstract class CacheAbstractJdbcStore<K, V> implements CacheStore<K, V>, LifecycleAware { /** Max attempt write count. */ protected static final int MAX_ATTEMPT_WRITE_COUNT = 2; /** Default batch size for put and remove operations. */ protected static final int DFLT_BATCH_SIZE = 512; /** Default batch size for put and remove operations. */ protected static final int DFLT_PARALLEL_LOAD_CACHE_MINIMUM_THRESHOLD = 512; /** Connection attribute property name. */ protected static final String ATTR_CONN_PROP = "JDBC_STORE_CONNECTION"; /** Empty column value. */ protected static final Object[] EMPTY_COLUMN_VALUE = new Object[] { null }; /** Auto-injected store session. */ @CacheStoreSessionResource private CacheStoreSession ses; /** Auto injected ignite instance. */ @IgniteInstanceResource private Ignite ignite; /** Auto-injected logger instance. */ @LoggerResource protected IgniteLogger log; /** Lock for metadata cache. */ @GridToStringExclude private final Lock cacheMappingsLock = new ReentrantLock(); /** Data source. */ protected DataSource dataSrc; /** Cache with entry mapping description. (cache name, (key id, mapping description)). */ protected volatile Map<String, Map<Object, EntryMapping>> cacheMappings = Collections.emptyMap(); /** Database dialect. */ protected JdbcDialect dialect; /** Max workers thread count. These threads are responsible for load cache. */ private int maxPoolSz = Runtime.getRuntime().availableProcessors(); /** Maximum batch size for writeAll and deleteAll operations. */ private int batchSz = DFLT_BATCH_SIZE; /** Parallel load cache minimum threshold. If {@code 0} then load sequentially. */ private int parallelLoadCacheMinThreshold = DFLT_PARALLEL_LOAD_CACHE_MINIMUM_THRESHOLD; /** * Get field value from object for use as query parameter. * * @param cacheName Cache name. * @param typeName Type name. * @param fieldName Field name. * @param obj Cache object. * @return Field value from object. * @throws CacheException in case of error. */ @Nullable protected abstract Object extractParameter(@Nullable String cacheName, String typeName, String fieldName, Object obj) throws CacheException; /** * Construct object from query result. * * @param <R> Type of result object. * @param cacheName Cache name. * @param typeName Type name. * @param fields Fields descriptors. * @param loadColIdxs Select query columns index. * @param rs ResultSet. * @return Constructed object. * @throws CacheLoaderException If failed to construct cache object. */ protected abstract <R> R buildObject(@Nullable String cacheName, String typeName, Collection<CacheTypeFieldMetadata> fields, Map<String, Integer> loadColIdxs, ResultSet rs) throws CacheLoaderException; /** * Extract key type id from key object. * * @param key Key object. * @return Key type id. * @throws CacheException If failed to get type key id from object. */ protected abstract Object keyTypeId(Object key) throws CacheException; /** * Extract key type id from key class name. * * @param type String description of key type. * @return Key type id. * @throws CacheException If failed to get type key id from object. */ protected abstract Object keyTypeId(String type) throws CacheException; /** * Prepare internal store specific builders for provided types metadata. * * @param cacheName Cache name to prepare builders for. * @param types Collection of types. * @throws CacheException If failed to prepare internal builders for types. */ protected abstract void prepareBuilders(@Nullable String cacheName, Collection<CacheTypeMetadata> types) throws CacheException; /** * Perform dialect resolution. * * @return The resolved dialect. * @throws CacheException Indicates problems accessing the metadata. */ protected JdbcDialect resolveDialect() throws CacheException { Connection conn = null; String dbProductName = null; try { conn = openConnection(false); dbProductName = conn.getMetaData().getDatabaseProductName(); } catch (SQLException e) { throw new CacheException("Failed access to metadata for detect database dialect.", e); } finally { U.closeQuiet(conn); } if ("H2".equals(dbProductName)) return new H2Dialect(); if ("MySQL".equals(dbProductName)) return new MySQLDialect(); if (dbProductName.startsWith("Microsoft SQL Server")) return new SQLServerDialect(); if ("Oracle".equals(dbProductName)) return new OracleDialect(); if (dbProductName.startsWith("DB2/")) return new DB2Dialect(); U.warn(log, "Failed to resolve dialect (BasicJdbcDialect will be used): " + dbProductName); return new BasicJdbcDialect(); } /** {@inheritDoc} */ @Override public void start() throws IgniteException { if (dataSrc == null) throw new IgniteException("Failed to initialize cache store (data source is not provided)."); if (dialect == null) { dialect = resolveDialect(); if (log.isDebugEnabled() && dialect.getClass() != BasicJdbcDialect.class) log.debug("Resolved database dialect: " + U.getSimpleName(dialect.getClass())); } } /** {@inheritDoc} */ @Override public void stop() throws IgniteException { // No-op. } /** * Gets connection from a pool. * * @param autocommit {@code true} If connection should use autocommit mode. * @return Pooled connection. * @throws SQLException In case of error. */ protected Connection openConnection(boolean autocommit) throws SQLException { Connection conn = dataSrc.getConnection(); conn.setAutoCommit(autocommit); return conn; } /** * @return Connection. * @throws SQLException In case of error. */ protected Connection connection() throws SQLException { CacheStoreSession ses = session(); if (ses.transaction() != null) { Map<String, Connection> prop = ses.properties(); Connection conn = prop.get(ATTR_CONN_PROP); if (conn == null) { conn = openConnection(false); // Store connection in session to used it for other operations in the same session. prop.put(ATTR_CONN_PROP, conn); } return conn; } // Transaction can be null in case of simple load operation. else return openConnection(true); } /** * Closes connection. * * @param conn Connection to close. */ protected void closeConnection(@Nullable Connection conn) { CacheStoreSession ses = session(); // Close connection right away if there is no transaction. if (ses.transaction() == null) U.closeQuiet(conn); } /** * Closes allocated resources depending on transaction status. * * @param conn Allocated connection. * @param st Created statement, */ protected void end(@Nullable Connection conn, @Nullable Statement st) { U.closeQuiet(st); closeConnection(conn); } /** {@inheritDoc} */ @Override public void sessionEnd(boolean commit) throws CacheWriterException { CacheStoreSession ses = session(); Transaction tx = ses.transaction(); if (tx != null) { Map<String, Connection> sesProps = ses.properties(); Connection conn = sesProps.get(ATTR_CONN_PROP); if (conn != null) { sesProps.remove(ATTR_CONN_PROP); try { if (commit) conn.commit(); else conn.rollback(); } catch (SQLException e) { throw new CacheWriterException( "Failed to end transaction [xid=" + tx.xid() + ", commit=" + commit + ']', e); } finally { U.closeQuiet(conn); } } if (log.isDebugEnabled()) log.debug("Transaction ended [xid=" + tx.xid() + ", commit=" + commit + ']'); } } /** * Retrieves the value of the designated column in the current row of this <code>ResultSet</code> object and * will convert to the requested Java data type. * * @param rs Result set. * @param colIdx Column index in result set. * @param type Class representing the Java data type to convert the designated column to. * @return Value in column. * @throws SQLException If a database access error occurs or this method is called. */ protected Object getColumnValue(ResultSet rs, int colIdx, Class<?> type) throws SQLException { Object val = rs.getObject(colIdx); if (val == null) return null; if (type == int.class) return rs.getInt(colIdx); if (type == long.class) return rs.getLong(colIdx); if (type == double.class) return rs.getDouble(colIdx); if (type == boolean.class || type == Boolean.class) return rs.getBoolean(colIdx); if (type == byte.class) return rs.getByte(colIdx); if (type == short.class) return rs.getShort(colIdx); if (type == float.class) return rs.getFloat(colIdx); if (type == Integer.class || type == Long.class || type == Double.class || type == Byte.class || type == Short.class || type == Float.class) { Number num = (Number)val; if (type == Integer.class) return num.intValue(); else if (type == Long.class) return num.longValue(); else if (type == Double.class) return num.doubleValue(); else if (type == Byte.class) return num.byteValue(); else if (type == Short.class) return num.shortValue(); else if (type == Float.class) return num.floatValue(); } if (type == UUID.class) { if (val instanceof UUID) return val; if (val instanceof byte[]) { ByteBuffer bb = ByteBuffer.wrap((byte[])val); long most = bb.getLong(); long least = bb.getLong(); return new UUID(most, least); } if (val instanceof String) return UUID.fromString((String)val); } return val; } /** * Construct load cache from range. * * @param em Type mapping description. * @param clo Closure that will be applied to loaded values. * @param lowerBound Lower bound for range. * @param upperBound Upper bound for range. * @return Callable for pool submit. */ private Callable<Void> loadCacheRange(final EntryMapping em, final IgniteBiInClosure<K, V> clo, @Nullable final Object[] lowerBound, @Nullable final Object[] upperBound) { return new Callable<Void>() { @Override public Void call() throws Exception { Connection conn = null; PreparedStatement stmt = null; try { conn = openConnection(true); stmt = conn.prepareStatement(lowerBound == null && upperBound == null ? em.loadCacheQry : em.loadCacheRangeQuery(lowerBound != null, upperBound != null)); int ix = 1; if (lowerBound != null) for (int i = lowerBound.length; i > 0; i--) for (int j = 0; j < i; j++) stmt.setObject(ix++, lowerBound[j]); if (upperBound != null) for (int i = upperBound.length; i > 0; i--) for (int j = 0; j < i; j++) stmt.setObject(ix++, upperBound[j]); ResultSet rs = stmt.executeQuery(); while (rs.next()) { K key = buildObject(em.cacheName, em.keyType(), em.keyColumns(), em.loadColIdxs, rs); V val = buildObject(em.cacheName, em.valueType(), em.valueColumns(), em.loadColIdxs, rs); clo.apply(key, val); } } catch (SQLException e) { throw new IgniteCheckedException("Failed to load cache", e); } finally { U.closeQuiet(stmt); U.closeQuiet(conn); } return null; } }; } /** * Construct load cache in one select. * * @param m Type mapping description. * @param clo Closure for loaded values. * @return Callable for pool submit. */ private Callable<Void> loadCacheFull(EntryMapping m, IgniteBiInClosure<K, V> clo) { return loadCacheRange(m, clo, null, null); } /** * Object is a simple type. * * @param cls Class. * @return {@code True} if object is a simple type. */ protected static boolean simpleType(Class<?> cls) { return (Number.class.isAssignableFrom(cls) || String.class.isAssignableFrom(cls) || java.util.Date.class.isAssignableFrom(cls) || Boolean.class.isAssignableFrom(cls) || UUID.class.isAssignableFrom(cls)); } /** * @param cacheName Cache name to check mapping for. * @param clsName Class name. * @param fields Fields descriptors. * @throws CacheException If failed to check type metadata. */ private static void checkMapping(@Nullable String cacheName, String clsName, Collection<CacheTypeFieldMetadata> fields) throws CacheException { try { Class<?> cls = Class.forName(clsName); if (simpleType(cls)) { if (fields.size() != 1) throw new CacheException("More than one field for simple type [cache name=" + cacheName + ", type=" + clsName + " ]"); CacheTypeFieldMetadata field = F.first(fields); if (field.getDatabaseName() == null) throw new CacheException("Missing database name in mapping description [cache name=" + cacheName + ", type=" + clsName + " ]"); field.setJavaType(cls); } else for (CacheTypeFieldMetadata field : fields) { if (field.getDatabaseName() == null) throw new CacheException("Missing database name in mapping description [cache name=" + cacheName + ", type=" + clsName + " ]"); if (field.getJavaName() == null) throw new CacheException("Missing field name in mapping description [cache name=" + cacheName + ", type=" + clsName + " ]"); if (field.getJavaType() == null) throw new CacheException("Missing field type in mapping description [cache name=" + cacheName + ", type=" + clsName + " ]"); } } catch (ClassNotFoundException e) { throw new CacheException("Failed to find class: " + clsName, e); } } /** * @param cacheName Cache name to check mappings for. * @return Type mappings for specified cache name. * @throws CacheException If failed to initialize cache mappings. */ private Map<Object, EntryMapping> cacheMappings(@Nullable String cacheName) throws CacheException { Map<Object, EntryMapping> entryMappings = cacheMappings.get(cacheName); if (entryMappings != null) return entryMappings; cacheMappingsLock.lock(); try { entryMappings = cacheMappings.get(cacheName); if (entryMappings != null) return entryMappings; CacheConfiguration ccfg = ignite().cache(cacheName).getConfiguration(CacheConfiguration.class); Collection<CacheTypeMetadata> types = ccfg.getTypeMetadata(); entryMappings = U.newHashMap(types.size()); for (CacheTypeMetadata type : types) { Object keyTypeId = keyTypeId(type.getKeyType()); if (entryMappings.containsKey(keyTypeId)) throw new CacheException("Key type must be unique in type metadata [cache name=" + cacheName + ", key type=" + type.getKeyType() + "]"); checkMapping(cacheName, type.getKeyType(), type.getKeyFields()); checkMapping(cacheName, type.getValueType(), type.getValueFields()); entryMappings.put(keyTypeId(type.getKeyType()), new EntryMapping(cacheName, dialect, type)); } Map<String, Map<Object, EntryMapping>> mappings = new HashMap<>(cacheMappings); mappings.put(cacheName, entryMappings); prepareBuilders(cacheName, types); cacheMappings = mappings; return entryMappings; } finally { cacheMappingsLock.unlock(); } } /** * @param cacheName Cache name. * @param keyTypeId Key type id. * @param key Key object. * @return Entry mapping. * @throws CacheException If mapping for key was not found. */ private EntryMapping entryMapping(String cacheName, Object keyTypeId, Object key) throws CacheException { EntryMapping em = cacheMappings(cacheName).get(keyTypeId); if (em == null) { String maskedCacheName = U.maskName(cacheName); throw new CacheException("Failed to find mapping description [key=" + key + ", cache=" + maskedCacheName + "]. Please configure CacheTypeMetadata to associate '" + maskedCacheName + "' with JdbcPojoStore."); } return em; } /** {@inheritDoc} */ @Override public void loadCache(final IgniteBiInClosure<K, V> clo, @Nullable Object... args) throws CacheLoaderException { ExecutorService pool = null; String cacheName = session().cacheName(); try { pool = Executors.newFixedThreadPool(maxPoolSz); Collection<Future<?>> futs = new ArrayList<>(); if (args != null && args.length > 0) { if (args.length % 2 != 0) throw new CacheLoaderException("Expected even number of arguments, but found: " + args.length); if (log.isDebugEnabled()) log.debug("Start loading entries from db using user queries from arguments"); for (int i = 0; i < args.length; i += 2) { String keyType = args[i].toString(); String selQry = args[i + 1].toString(); EntryMapping em = entryMapping(cacheName, keyTypeId(keyType), keyType); futs.add(pool.submit(new LoadCacheCustomQueryWorker<>(em, selQry, clo))); } } else { Collection<EntryMapping> entryMappings = cacheMappings(session().cacheName()).values(); for (EntryMapping em : entryMappings) { if (parallelLoadCacheMinThreshold > 0) { log.debug("Multithread loading entries from db [cache name=" + cacheName + ", key type=" + em.keyType() + " ]"); Connection conn = null; try { conn = connection(); PreparedStatement stmt = conn.prepareStatement(em.loadCacheSelRangeQry); stmt.setInt(1, parallelLoadCacheMinThreshold); ResultSet rs = stmt.executeQuery(); if (rs.next()) { int keyCnt = em.keyCols.size(); Object[] upperBound = new Object[keyCnt]; for (int i = 0; i < keyCnt; i++) upperBound[i] = rs.getObject(i + 1); futs.add(pool.submit(loadCacheRange(em, clo, null, upperBound))); while (rs.next()) { Object[] lowerBound = upperBound; upperBound = new Object[keyCnt]; for (int i = 0; i < keyCnt; i++) upperBound[i] = rs.getObject(i + 1); futs.add(pool.submit(loadCacheRange(em, clo, lowerBound, upperBound))); } futs.add(pool.submit(loadCacheRange(em, clo, upperBound, null))); } else futs.add(pool.submit(loadCacheFull(em, clo))); } catch (SQLException ignored) { futs.add(pool.submit(loadCacheFull(em, clo))); } finally { U.closeQuiet(conn); } } else { if (log.isDebugEnabled()) log.debug("Single thread loading entries from db [cache name=" + cacheName + ", key type=" + em.keyType() + " ]"); futs.add(pool.submit(loadCacheFull(em, clo))); } } } for (Future<?> fut : futs) U.get(fut); if (log.isDebugEnabled()) log.debug("Cache loaded from db: " + cacheName); } catch (IgniteCheckedException e) { throw new CacheLoaderException("Failed to load cache: " + cacheName, e.getCause()); } finally { U.shutdownNow(getClass(), pool, log); } } /** {@inheritDoc} */ @Nullable @Override public V load(K key) throws CacheLoaderException { assert key != null; EntryMapping em = entryMapping(session().cacheName(), keyTypeId(key), key); if (log.isDebugEnabled()) log.debug("Load value from db [table= " + em.fullTableName() + ", key=" + key + "]"); Connection conn = null; PreparedStatement stmt = null; try { conn = connection(); stmt = conn.prepareStatement(em.loadQrySingle); fillKeyParameters(stmt, em, key); ResultSet rs = stmt.executeQuery(); if (rs.next()) return buildObject(em.cacheName, em.valueType(), em.valueColumns(), em.loadColIdxs, rs); } catch (SQLException e) { throw new CacheLoaderException("Failed to load object [table=" + em.fullTableName() + ", key=" + key + "]", e); } finally { end(conn, stmt); } return null; } /** {@inheritDoc} */ @Override public Map<K, V> loadAll(Iterable<? extends K> keys) throws CacheLoaderException { assert keys != null; Connection conn = null; try { conn = connection(); String cacheName = session().cacheName(); Map<Object, LoadWorker<K, V>> workers = U.newHashMap(cacheMappings(cacheName).size()); Map<K, V> res = new HashMap<>(); for (K key : keys) { Object keyTypeId = keyTypeId(key); EntryMapping em = entryMapping(cacheName, keyTypeId, key); LoadWorker<K, V> worker = workers.get(keyTypeId); if (worker == null) workers.put(keyTypeId, worker = new LoadWorker<>(conn, em)); worker.keys.add(key); if (worker.keys.size() == em.maxKeysPerStmt) res.putAll(workers.remove(keyTypeId).call()); } for (LoadWorker<K, V> worker : workers.values()) res.putAll(worker.call()); return res; } catch (Exception e) { throw new CacheWriterException("Failed to load entries from database", e); } finally { closeConnection(conn); } } /** * @param insStmt Insert statement. * @param updStmt Update statement. * @param em Entry mapping. * @param entry Cache entry. * @throws CacheWriterException If failed to update record in database. */ private void writeUpsert(PreparedStatement insStmt, PreparedStatement updStmt, EntryMapping em, Cache.Entry<? extends K, ? extends V> entry) throws CacheWriterException { try { CacheWriterException we = null; for (int attempt = 0; attempt < MAX_ATTEMPT_WRITE_COUNT; attempt++) { int paramIdx = fillValueParameters(updStmt, 1, em, entry.getValue()); fillKeyParameters(updStmt, paramIdx, em, entry.getKey()); if (updStmt.executeUpdate() == 0) { paramIdx = fillKeyParameters(insStmt, em, entry.getKey()); fillValueParameters(insStmt, paramIdx, em, entry.getValue()); try { insStmt.executeUpdate(); if (attempt > 0) U.warn(log, "Entry was inserted in database on second try [table=" + em.fullTableName() + ", entry=" + entry + "]"); } catch (SQLException e) { String sqlState = e.getSQLState(); SQLException nested = e.getNextException(); while (sqlState == null && nested != null) { sqlState = nested.getSQLState(); nested = nested.getNextException(); } // The error with code 23505 or 23000 is thrown when trying to insert a row that // would violate a unique index or primary key. if ("23505".equals(sqlState) || "23000".equals(sqlState)) { if (we == null) we = new CacheWriterException("Failed insert entry in database, violate a unique" + " index or primary key [table=" + em.fullTableName() + ", entry=" + entry + "]"); we.addSuppressed(e); U.warn(log, "Failed insert entry in database, violate a unique index or primary key" + " [table=" + em.fullTableName() + ", entry=" + entry + "]"); continue; } throw new CacheWriterException("Failed insert entry in database [table=" + em.fullTableName() + ", entry=" + entry, e); } } if (attempt > 0) U.warn(log, "Entry was updated in database on second try [table=" + em.fullTableName() + ", entry=" + entry + "]"); return; } throw we; } catch (SQLException e) { throw new CacheWriterException("Failed update entry in database [table=" + em.fullTableName() + ", entry=" + entry + "]", e); } } /** {@inheritDoc} */ @Override public void write(Cache.Entry<? extends K, ? extends V> entry) throws CacheWriterException { assert entry != null; K key = entry.getKey(); EntryMapping em = entryMapping(session().cacheName(), keyTypeId(key), key); if (log.isDebugEnabled()) log.debug("Start write entry to database [table=" + em.fullTableName() + ", entry=" + entry + "]"); Connection conn = null; try { conn = connection(); if (dialect.hasMerge()) { PreparedStatement stmt = null; try { stmt = conn.prepareStatement(em.mergeQry); int i = fillKeyParameters(stmt, em, key); fillValueParameters(stmt, i, em, entry.getValue()); int updCnt = stmt.executeUpdate(); if (updCnt != 1) U.warn(log, "Unexpected number of updated entries [table=" + em.fullTableName() + ", entry=" + entry + "expected=1, actual=" + updCnt + "]"); } finally { U.closeQuiet(stmt); } } else { PreparedStatement insStmt = null; PreparedStatement updStmt = null; try { insStmt = conn.prepareStatement(em.insQry); updStmt = conn.prepareStatement(em.updQry); writeUpsert(insStmt, updStmt, em, entry); } finally { U.closeQuiet(insStmt); U.closeQuiet(updStmt); } } } catch (SQLException e) { throw new CacheWriterException("Failed to write entry to database [table=" + em.fullTableName() + ", entry=" + entry + "]", e); } finally { closeConnection(conn); } } /** {@inheritDoc} */ @Override public void writeAll(final Collection<Cache.Entry<? extends K, ? extends V>> entries) throws CacheWriterException { assert entries != null; Connection conn = null; try { conn = connection(); String cacheName = session().cacheName(); Object currKeyTypeId = null; if (dialect.hasMerge()) { PreparedStatement mergeStmt = null; try { EntryMapping em = null; LazyValue<Object[]> lazyEntries = new LazyValue<Object[]>() { @Override public Object[] create() { return entries.toArray(); } }; int fromIdx = 0, prepared = 0; for (Cache.Entry<? extends K, ? extends V> entry : entries) { K key = entry.getKey(); Object keyTypeId = keyTypeId(key); em = entryMapping(cacheName, keyTypeId, key); if (currKeyTypeId == null || !currKeyTypeId.equals(keyTypeId)) { if (mergeStmt != null) { if (log.isDebugEnabled()) log.debug("Write entries to db [cache name=" + cacheName + ", key type=" + em.keyType() + ", count=" + prepared + "]"); executeBatch(em, mergeStmt, "writeAll", fromIdx, prepared, lazyEntries); U.closeQuiet(mergeStmt); } mergeStmt = conn.prepareStatement(em.mergeQry); currKeyTypeId = keyTypeId; fromIdx += prepared; prepared = 0; } int i = fillKeyParameters(mergeStmt, em, key); fillValueParameters(mergeStmt, i, em, entry.getValue()); mergeStmt.addBatch(); if (++prepared % batchSz == 0) { if (log.isDebugEnabled()) log.debug("Write entries to db [cache name=" + cacheName + ", key type=" + em.keyType() + ", count=" + prepared + "]"); executeBatch(em, mergeStmt, "writeAll", fromIdx, prepared, lazyEntries); fromIdx += prepared; prepared = 0; } } if (mergeStmt != null && prepared % batchSz != 0) { if (log.isDebugEnabled()) log.debug("Write entries to db [cache name=" + cacheName + ", key type=" + em.keyType() + ", count=" + prepared + "]"); executeBatch(em, mergeStmt, "writeAll", fromIdx, prepared, lazyEntries); } } finally { U.closeQuiet(mergeStmt); } } else { log.debug("Write entries to db one by one using update and insert statements [cache name=" + cacheName + ", count=" + entries.size() + "]"); PreparedStatement insStmt = null; PreparedStatement updStmt = null; try { for (Cache.Entry<? extends K, ? extends V> entry : entries) { K key = entry.getKey(); Object keyTypeId = keyTypeId(key); <|fim▁hole|> EntryMapping em = entryMapping(cacheName, keyTypeId, key); if (currKeyTypeId == null || !currKeyTypeId.equals(keyTypeId)) { U.closeQuiet(insStmt); insStmt = conn.prepareStatement(em.insQry); U.closeQuiet(updStmt); updStmt = conn.prepareStatement(em.updQry); currKeyTypeId = keyTypeId; } writeUpsert(insStmt, updStmt, em, entry); } } finally { U.closeQuiet(insStmt); U.closeQuiet(updStmt); } } } catch (SQLException e) { throw new CacheWriterException("Failed to write entries in database", e); } finally { closeConnection(conn); } } /** {@inheritDoc} */ @Override public void delete(Object key) throws CacheWriterException { assert key != null; EntryMapping em = entryMapping(session().cacheName(), keyTypeId(key), key); if (log.isDebugEnabled()) log.debug("Remove value from db [table=" + em.fullTableName() + ", key=" + key + "]"); Connection conn = null; PreparedStatement stmt = null; try { conn = connection(); stmt = conn.prepareStatement(em.remQry); fillKeyParameters(stmt, em, key); int delCnt = stmt.executeUpdate(); if (delCnt != 1) U.warn(log, "Unexpected number of deleted entries [table=" + em.fullTableName() + ", key=" + key + ", expected=1, actual=" + delCnt + "]"); } catch (SQLException e) { throw new CacheWriterException("Failed to remove value from database [table=" + em.fullTableName() + ", key=" + key + "]", e); } finally { end(conn, stmt); } } /** * @param em Entry mapping. * @param stmt Statement. * @param desc Statement description for error message. * @param fromIdx Objects in batch start from index. * @param prepared Expected objects in batch. * @param lazyObjs All objects used in batch statement as array. * @throws SQLException If failed to execute batch statement. */ private void executeBatch(EntryMapping em, Statement stmt, String desc, int fromIdx, int prepared, LazyValue<Object[]> lazyObjs) throws SQLException { try { int[] rowCounts = stmt.executeBatch(); int numOfRowCnt = rowCounts.length; if (numOfRowCnt != prepared) U.warn(log, "Unexpected number of updated rows [table=" + em.fullTableName() + ", expected=" + prepared + ", actual=" + numOfRowCnt + "]"); for (int i = 0; i < numOfRowCnt; i++) { int cnt = rowCounts[i]; if (cnt != 1 && cnt != SUCCESS_NO_INFO) { Object[] objs = lazyObjs.value(); U.warn(log, "Batch " + desc + " returned unexpected updated row count [table=" + em.fullTableName() + ", entry=" + objs[fromIdx + i] + ", expected=1, actual=" + cnt + "]"); } } } catch (BatchUpdateException be) { int[] rowCounts = be.getUpdateCounts(); for (int i = 0; i < rowCounts.length; i++) { if (rowCounts[i] == EXECUTE_FAILED) { Object[] objs = lazyObjs.value(); U.warn(log, "Batch " + desc + " failed on execution [table=" + em.fullTableName() + ", entry=" + objs[fromIdx + i] + "]"); } } throw be; } } /** {@inheritDoc} */ @Override public void deleteAll(final Collection<?> keys) throws CacheWriterException { assert keys != null; Connection conn = null; try { conn = connection(); LazyValue<Object[]> lazyKeys = new LazyValue<Object[]>() { @Override public Object[] create() { return keys.toArray(); } }; String cacheName = session().cacheName(); Object currKeyTypeId = null; EntryMapping em = null; PreparedStatement delStmt = null; int fromIdx = 0, prepared = 0; for (Object key : keys) { Object keyTypeId = keyTypeId(key); em = entryMapping(cacheName, keyTypeId, key); if (delStmt == null) { delStmt = conn.prepareStatement(em.remQry); currKeyTypeId = keyTypeId; } if (!currKeyTypeId.equals(keyTypeId)) { if (log.isDebugEnabled()) log.debug("Delete entries from db [cache name=" + cacheName + ", key type=" + em.keyType() + ", count=" + prepared + "]"); executeBatch(em, delStmt, "deleteAll", fromIdx, prepared, lazyKeys); fromIdx += prepared; prepared = 0; currKeyTypeId = keyTypeId; } fillKeyParameters(delStmt, em, key); delStmt.addBatch(); if (++prepared % batchSz == 0) { if (log.isDebugEnabled()) log.debug("Delete entries from db [cache name=" + cacheName + ", key type=" + em.keyType() + ", count=" + prepared + "]"); executeBatch(em, delStmt, "deleteAll", fromIdx, prepared, lazyKeys); fromIdx += prepared; prepared = 0; } } if (delStmt != null && prepared % batchSz != 0) { if (log.isDebugEnabled()) log.debug("Delete entries from db [cache name=" + cacheName + ", key type=" + em.keyType() + ", count=" + prepared + "]"); executeBatch(em, delStmt, "deleteAll", fromIdx, prepared, lazyKeys); } } catch (SQLException e) { throw new CacheWriterException("Failed to remove values from database", e); } finally { closeConnection(conn); } } /** * Sets the value of the designated parameter using the given object. * * @param stmt Prepare statement. * @param i Index for parameters. * @param field Field descriptor. * @param fieldVal Field value. * @throws CacheException If failed to set statement parameter. */ protected void fillParameter(PreparedStatement stmt, int i, CacheTypeFieldMetadata field, @Nullable Object fieldVal) throws CacheException { try { if (fieldVal != null) { if (field.getJavaType() == UUID.class) { switch (field.getDatabaseType()) { case Types.BINARY: fieldVal = U.uuidToBytes((UUID)fieldVal); break; case Types.CHAR: case Types.VARCHAR: fieldVal = fieldVal.toString(); break; } } stmt.setObject(i, fieldVal); } else stmt.setNull(i, field.getDatabaseType()); } catch (SQLException e) { throw new CacheException("Failed to set statement parameter name: " + field.getDatabaseName(), e); } } /** * @param stmt Prepare statement. * @param idx Start index for parameters. * @param em Entry mapping. * @param key Key object. * @return Next index for parameters. * @throws CacheException If failed to set statement parameters. */ protected int fillKeyParameters(PreparedStatement stmt, int idx, EntryMapping em, Object key) throws CacheException { for (CacheTypeFieldMetadata field : em.keyColumns()) { Object fieldVal = extractParameter(em.cacheName, em.keyType(), field.getJavaName(), key); fillParameter(stmt, idx++, field, fieldVal); } return idx; } /** * @param stmt Prepare statement. * @param m Type mapping description. * @param key Key object. * @return Next index for parameters. * @throws CacheException If failed to set statement parameters. */ protected int fillKeyParameters(PreparedStatement stmt, EntryMapping m, Object key) throws CacheException { return fillKeyParameters(stmt, 1, m, key); } /** * @param stmt Prepare statement. * @param idx Start index for parameters. * @param em Type mapping description. * @param val Value object. * @return Next index for parameters. * @throws CacheException If failed to set statement parameters. */ protected int fillValueParameters(PreparedStatement stmt, int idx, EntryMapping em, Object val) throws CacheWriterException { for (CacheTypeFieldMetadata field : em.uniqValFields) { Object fieldVal = extractParameter(em.cacheName, em.valueType(), field.getJavaName(), val); fillParameter(stmt, idx++, field, fieldVal); } return idx; } /** * @return Data source. */ public DataSource getDataSource() { return dataSrc; } /** * @param dataSrc Data source. */ public void setDataSource(DataSource dataSrc) { this.dataSrc = dataSrc; } /** * Get database dialect. * * @return Database dialect. */ public JdbcDialect getDialect() { return dialect; } /** * Set database dialect. * * @param dialect Database dialect. */ public void setDialect(JdbcDialect dialect) { this.dialect = dialect; } /** * Get Max workers thread count. These threads are responsible for execute query. * * @return Max workers thread count. */ public int getMaximumPoolSize() { return maxPoolSz; } /** * Set Max workers thread count. These threads are responsible for execute query. * * @param maxPoolSz Max workers thread count. */ public void setMaximumPoolSize(int maxPoolSz) { this.maxPoolSz = maxPoolSz; } /** * Get maximum batch size for delete and delete operations. * * @return Maximum batch size. */ public int getBatchSize() { return batchSz; } /** * Set maximum batch size for write and delete operations. * * @param batchSz Maximum batch size. */ public void setBatchSize(int batchSz) { this.batchSz = batchSz; } /** * Parallel load cache minimum row count threshold. * * @return If {@code 0} then load sequentially. */ public int getParallelLoadCacheMinimumThreshold() { return parallelLoadCacheMinThreshold; } /** * Parallel load cache minimum row count threshold. * * @param parallelLoadCacheMinThreshold Minimum row count threshold. If {@code 0} then load sequentially. */ public void setParallelLoadCacheMinimumThreshold(int parallelLoadCacheMinThreshold) { this.parallelLoadCacheMinThreshold = parallelLoadCacheMinThreshold; } /** * @return Ignite instance. */ protected Ignite ignite() { return ignite; } /** * @return Store session. */ protected CacheStoreSession session() { return ses; } /** * Entry mapping description. */ protected static class EntryMapping { /** Cache name. */ private final String cacheName; /** Database dialect. */ private final JdbcDialect dialect; /** Select border for range queries. */ private final String loadCacheSelRangeQry; /** Select all items query. */ private final String loadCacheQry; /** Select item query. */ private final String loadQrySingle; /** Select items query. */ private final String loadQry; /** Merge item(s) query. */ private final String mergeQry; /** Update item query. */ private final String insQry; /** Update item query. */ private final String updQry; /** Remove item(s) query. */ private final String remQry; /** Max key count for load query per statement. */ private final int maxKeysPerStmt; /** Database key columns. */ private final Collection<String> keyCols; /** Database unique value columns. */ private final Collection<String> cols; /** Select query columns index. */ private final Map<String, Integer> loadColIdxs; /** Unique value fields. */ private final Collection<CacheTypeFieldMetadata> uniqValFields; /** Type metadata. */ private final CacheTypeMetadata typeMeta; /** Full table name. */ private final String fullTblName; /** * @param cacheName Cache name. * @param dialect JDBC dialect. * @param typeMeta Type metadata. */ public EntryMapping(@Nullable String cacheName, JdbcDialect dialect, CacheTypeMetadata typeMeta) { this.cacheName = cacheName; this.dialect = dialect; this.typeMeta = typeMeta; Collection<CacheTypeFieldMetadata> keyFields = typeMeta.getKeyFields(); Collection<CacheTypeFieldMetadata> valFields = typeMeta.getValueFields(); keyCols = databaseColumns(keyFields); uniqValFields = F.view(valFields, new IgnitePredicate<CacheTypeFieldMetadata>() { @Override public boolean apply(CacheTypeFieldMetadata col) { return !keyCols.contains(col.getDatabaseName()); } }); String schema = typeMeta.getDatabaseSchema(); String tblName = typeMeta.getDatabaseTable(); fullTblName = F.isEmpty(schema) ? tblName : schema + "." + tblName; Collection<String> uniqValCols = databaseColumns(uniqValFields); cols = F.concat(false, keyCols, uniqValCols); loadColIdxs = U.newHashMap(cols.size()); int idx = 1; for (String col : cols) loadColIdxs.put(col, idx++); loadCacheQry = dialect.loadCacheQuery(fullTblName, cols); loadCacheSelRangeQry = dialect.loadCacheSelectRangeQuery(fullTblName, keyCols); loadQrySingle = dialect.loadQuery(fullTblName, keyCols, cols, 1); maxKeysPerStmt = dialect.getMaxParameterCount() / keyCols.size(); loadQry = dialect.loadQuery(fullTblName, keyCols, cols, maxKeysPerStmt); insQry = dialect.insertQuery(fullTblName, keyCols, uniqValCols); updQry = dialect.updateQuery(fullTblName, keyCols, uniqValCols); mergeQry = dialect.mergeQuery(fullTblName, keyCols, uniqValCols); remQry = dialect.removeQuery(fullTblName, keyCols); } /** * Extract database column names from {@link CacheTypeFieldMetadata}. * * @param dsc collection of {@link CacheTypeFieldMetadata}. * @return Collection with database column names. */ private static Collection<String> databaseColumns(Collection<CacheTypeFieldMetadata> dsc) { return F.transform(dsc, new C1<CacheTypeFieldMetadata, String>() { /** {@inheritDoc} */ @Override public String apply(CacheTypeFieldMetadata col) { return col.getDatabaseName(); } }); } /** * Construct query for select values with key count less or equal {@code maxKeysPerStmt} * * @param keyCnt Key count. * @return Load query statement text. */ protected String loadQuery(int keyCnt) { assert keyCnt <= maxKeysPerStmt; if (keyCnt == maxKeysPerStmt) return loadQry; if (keyCnt == 1) return loadQrySingle; return dialect.loadQuery(fullTblName, keyCols, cols, keyCnt); } /** * Construct query for select values in range. * * @param appendLowerBound Need add lower bound for range. * @param appendUpperBound Need add upper bound for range. * @return Query with range. */ protected String loadCacheRangeQuery(boolean appendLowerBound, boolean appendUpperBound) { return dialect.loadCacheRangeQuery(fullTblName, keyCols, cols, appendLowerBound, appendUpperBound); } /** * @return Key type. */ protected String keyType() { return typeMeta.getKeyType(); } /** * @return Value type. */ protected String valueType() { return typeMeta.getValueType(); } /** * Gets key columns. * * @return Key columns. */ protected Collection<CacheTypeFieldMetadata> keyColumns() { return typeMeta.getKeyFields(); } /** * Gets value columns. * * @return Value columns. */ protected Collection<CacheTypeFieldMetadata> valueColumns() { return typeMeta.getValueFields(); } /** * Get full table name. * * @return &lt;schema&gt;.&lt;table name&gt */ protected String fullTableName() { return fullTblName; } } /** * Worker for load cache using custom user query. * * @param <K1> Key type. * @param <V1> Value type. */ private class LoadCacheCustomQueryWorker<K1, V1> implements Callable<Void> { /** Entry mapping description. */ private final EntryMapping em; /** User query. */ private final String qry; /** Closure for loaded values. */ private final IgniteBiInClosure<K1, V1> clo; /** * @param em Entry mapping description. * @param qry User query. * @param clo Closure for loaded values. */ private LoadCacheCustomQueryWorker(EntryMapping em, String qry, IgniteBiInClosure<K1, V1> clo) { this.em = em; this.qry = qry; this.clo = clo; } /** {@inheritDoc} */ @Override public Void call() throws Exception { if (log.isDebugEnabled()) log.debug("Load cache using custom query [cache name= " + em.cacheName + ", key type=" + em.keyType() + ", query=" + qry + "]"); Connection conn = null; PreparedStatement stmt = null; try { conn = openConnection(true); stmt = conn.prepareStatement(qry); ResultSet rs = stmt.executeQuery(); ResultSetMetaData meta = rs.getMetaData(); Map<String, Integer> colIdxs = U.newHashMap(meta.getColumnCount()); for (int i = 1; i <= meta.getColumnCount(); i++) colIdxs.put(meta.getColumnLabel(i), i); while (rs.next()) { K1 key = buildObject(em.cacheName, em.keyType(), em.keyColumns(), colIdxs, rs); V1 val = buildObject(em.cacheName, em.valueType(), em.valueColumns(), colIdxs, rs); clo.apply(key, val); } return null; } catch (SQLException e) { throw new CacheLoaderException("Failed to execute custom query for load cache", e); } finally { U.closeQuiet(stmt); U.closeQuiet(conn); } } } /** * Lazy initialization of value. * * @param <T> Cached object type */ private abstract static class LazyValue<T> { /** Cached value. */ private T val; /** * @return Construct value. */ protected abstract T create(); /** * @return Value. */ public T value() { if (val == null) val = create(); return val; } } /** * Worker for load by keys. * * @param <K1> Key type. * @param <V1> Value type. */ private class LoadWorker<K1, V1> implements Callable<Map<K1, V1>> { /** Connection. */ private final Connection conn; /** Keys for load. */ private final Collection<K1> keys; /** Entry mapping description. */ private final EntryMapping em; /** * @param conn Connection. * @param em Entry mapping description. */ private LoadWorker(Connection conn, EntryMapping em) { this.conn = conn; this.em = em; keys = new ArrayList<>(em.maxKeysPerStmt); } /** {@inheritDoc} */ @Override public Map<K1, V1> call() throws Exception { if (log.isDebugEnabled()) log.debug("Load values from db [table= " + em.fullTableName() + ", key count=" + keys.size() + "]"); PreparedStatement stmt = null; try { stmt = conn.prepareStatement(em.loadQuery(keys.size())); int idx = 1; for (Object key : keys) for (CacheTypeFieldMetadata field : em.keyColumns()) { Object fieldVal = extractParameter(em.cacheName, em.keyType(), field.getJavaName(), key); fillParameter(stmt, idx++, field, fieldVal); } ResultSet rs = stmt.executeQuery(); Map<K1, V1> entries = U.newHashMap(keys.size()); while (rs.next()) { K1 key = buildObject(em.cacheName, em.keyType(), em.keyColumns(), em.loadColIdxs, rs); V1 val = buildObject(em.cacheName, em.valueType(), em.valueColumns(), em.loadColIdxs, rs); entries.put(key, val); } return entries; } finally { U.closeQuiet(stmt); } } } }<|fim▁end|>
<|file_name|>queueoftwostacks.py<|end_file_name|><|fim▁begin|># -*- coding:utf-8; mode:python -*- """ Implements a queue efficiently using only two stacks. """ from helpers import SingleNode from stack import Stack class QueueOf2Stacks: def __init__(self): self.stack_1 = Stack() self.stack_2 = Stack() def enqueue(self, value): self.stack_1.push(value) def dequeue(self): self.transfer_if_necessary() if self.stack_2: return self.stack_2.pop() def peek(self): self.transfer_if_necessary() if self.stack_2: return self.stack_2.peek() def transfer_if_necessary(self): if not self.stack_2:<|fim▁hole|> def __len__(self): return len(self.stack_1) + len(self.stack_2) def main(): queue = QueueOf2Stacks() print() for i in range(10): queue.enqueue(i) print(i) print("---") for i in range(len(queue)): print(queue.dequeue()) if __name__ == "__main__": main() main()<|fim▁end|>
while self.stack_1: self.stack_2.push(self.stack_1.pop())
<|file_name|>threatcrowd.py<|end_file_name|><|fim▁begin|>import datetime import json import logging import requests from core.analytics import OneShotAnalytics from core.errors import ObservableValidationError from core.observables import Hostname, Email, Ip, Hash class ThreatCrowdAPI(object): """Base class for querying the ThreatCrowd API.""" @staticmethod def fetch(observable): base_url_api = "https://www.threatcrowd.org/searchApi/v2" if isinstance(observable, Hostname): url = base_url_api + "/domain/report/" params = {"domain": observable.value} try: res = requests.get(url, params) if res.ok: return res.json() except Exception as e: print("Exception while getting domain report {}".format(e.message)) return None elif isinstance(observable, Email): url = base_url_api + "/email/report/" params = {"email": observable.value} try: res = requests.get(url, params) if res.ok: return res.json() except Exception as e: print("Exception while getting email report {}".format(e.message)) return None elif isinstance(observable, Ip): url = base_url_api + "/ip/report/" print(url) params = {"ip": observable.value} print(params) try: res = requests.get(url, params) if res.ok: return res.json() except Exception as e: print("Exception while getting email report {}".format(e.message)) return None elif isinstance(observable, Hash): url = base_url_api + "/file/report/" params = {"resource": observable.value} try: res = requests.get(url, params) if res.ok: return res.json() except Exception as e: print("Exception while getting email report {}".format(e.message)) return None class ThreatCrowdQuery(ThreatCrowdAPI, OneShotAnalytics): default_values = { "name": "ThreatCrowd", "description": "Perform a ThreatCrowd query.", } ACTS_ON = ["Ip", "Hostname", "Hash", "Email"] @staticmethod def analyze(observable, results): links = set() json_result = ThreatCrowdAPI.fetch(observable) json_string = json.dumps( json_result, sort_keys=True, indent=4, separators=(",", ": ") ) results.update(raw=json_string) result = {} if isinstance(observable, Hostname): if "resolutions" in json_result: result["ip on this domains"] = 0 for ip in json_result["resolutions"]: if ip["ip_address"].strip() != observable.value: if ip["last_resolved"] != "0000-00-00": last_resolved = datetime.datetime.strptime( ip["last_resolved"], "%Y-%m-%d" ) try: new_ip = Ip.get_or_create( value=ip["ip_address"].strip() ) links.update( new_ip.active_link_to( observable, "IP", "ThreatCrowd", last_resolved ) ) result["ip on this domains"] += 1 except ObservableValidationError: logging.error( "An error occurred when trying to add subdomain {} to the database".format( ip["ip_address"] ) )<|fim▁hole|> if "emails" in json_result: result["nb emails"] = 0 for email in json_result["emails"]: try: new_email = Email.get_or_create(value=email) links.update( new_email.active_link_to( observable, "Used by", "ThreatCrowd" ) ) result["nb emails"] += 1 except ObservableValidationError: logging.error( "An error occurred when trying to add email {} to the database".format( email ) ) if "subdomains" in json_result: result["nb subdomains"] = 0 for subdomain in json_result["subdomains"]: try: new_domain = Hostname.get_or_create(value=subdomain) links.update( observable.active_link_to( new_domain, "subdomain", "ThreatCrowd" ) ) result["nb subdomains"] += 1 except ObservableValidationError: logging.error( "An error occurred when trying to add subdomain {} to the database".format( subdomain ) ) if isinstance(observable, Ip): if "resolutions" in json_result: result["domains resolved"] = 0 for domain in json_result["resolutions"]: if domain["domain"].strip() != observable.value: try: last_resolved = datetime.datetime.strptime( domain["last_resolved"], "%Y-%m-%d" ) new_domain = Hostname.get_or_create( value=domain["domain"].strip() ) links.update( new_domain.active_link_to( observable, "A Record", "ThreatCrowd", last_resolved ) ) result["domains resolved"] += 1 except ObservableValidationError: logging.error( "An error occurred when trying to add domain {} to the database".format( domain["domain"] ) ) if "hashes" in json_result and len(json_result["hashes"]) > 0: result["malwares"] = 0 for h in json_result["hashes"]: new_hash = Hash.get_or_create(value=h) links.update( new_hash.active_link_to(observable, "hash", "ThreatCrowd") ) result["malwares"] += 1 if isinstance(observable, Email): if "domains" in json_result and len(json_result) > 0: result["domains recorded by email"] = 0 for domain in json_result["domains"]: new_domain = Hostname.get_or_create(value=domain) links.update( new_domain.active_link_to( observable, "recorded by", "ThreatCrowd" ) ) result["domains recorded by email"] += 1 if isinstance(observable, Hash): result["nb c2"] = 0 if "md5" in json_result: new_hash = Hash.get_or_create(value=json_result["md5"]) links.update(new_hash.active_link_to(observable, "md5", "ThreadCrowd")) if "sha1" in json_result: new_hash = Hash.get_or_create(value=json_result["sha1"]) links.update(new_hash.active_link_to(observable, "sha1", "ThreadCrowd")) if "sha256" in json_result: new_hash = Hash.get_or_create(value=json_result["sha256"]) links.update( new_hash.active_link_to(observable, "sha256", "ThreadCrowd") ) if "domains" in json_result and len(json_result["domains"]): for domain in json_result["domains"]: new_domain = Hostname.get_or_create(value=domain) links.update( observable.active_link_to(new_domain, "c2", "ThreatCrowd") ) result["nb c2"] += 1 if "ips" in json_result and len(json_result["ips"]): for ip in json_result["ips"]: new_ip = Ip.get_or_create(value=ip.strip()) links.update(observable.active_link_to(new_ip, "c2", "ThreatCrowd")) result["nb c2"] += 1 if "permalink" in json_result: result["permalink"] = json_result["permalink"] result["source"] = "threatcrowd_query" result["raw"] = json_string observable.add_context(result) return list(links)<|fim▁end|>
<|file_name|>urls.py<|end_file_name|><|fim▁begin|>from django.conf.urls import patterns, include, url from django.conf.urls.static import static from django.conf import settings from django.contrib import admin from wagtail.wagtailadmin import urls as wagtailadmin_urls from wagtail.wagtailsearch import urls as wagtailsearch_urls from wagtail.wagtaildocs import urls as wagtaildocs_urls from wagtail.wagtailcore import urls as wagtail_urls from feeds.feeds import BlogFeed admin.autodiscover() urlpatterns = patterns('', url(r'^django-admin/', include(admin.site.urls)), url(r'^admin/', include(wagtailadmin_urls)), url(r'^search/', include(wagtailsearch_urls)), url(r'^documents/', include(wagtaildocs_urls)), url(r'^blog/feed/$', BlogFeed(), name='blog_feed'), <|fim▁hole|> # Wagtail's serving mechanism url(r'', include(wagtail_urls)), ) if settings.DEBUG: from django.contrib.staticfiles.urls import staticfiles_urlpatterns from django.views.generic.base import RedirectView urlpatterns += staticfiles_urlpatterns() urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) urlpatterns += [ url(r'^favicon\.ico$', RedirectView.as_view(url=settings.STATIC_URL + 'favicon.ico', permanent=True)) ]<|fim▁end|>
# For anything not caught by a more specific rule above, hand over to
<|file_name|>sample_test.go<|end_file_name|><|fim▁begin|>package tddbc<|fim▁hole|> "github.com/stretchr/testify/assert" "testing" ) func TestSay(t *testing.T) { actual := Say("Wow!") expected := "Wow! TDD BootCamp!!" if actual != expected { t.Errorf("actual=%s, expect=%s", actual, expected) } } func TestSay_testify(t *testing.T) { actual := Say("Hello!") assert.Equal(t, "Hello! TDD BootCamp!!", actual, "they should be equal") }<|fim▁end|>
import (
<|file_name|>funcReacLotkaVolterra.py<|end_file_name|><|fim▁begin|>######################################################################### ## This program is part of 'MOOSE', the ## Messaging Object Oriented Simulation Environment. ## Copyright (C) 2013 Upinder S. Bhalla. and NCBS ## It is made available under the terms of the ## GNU Lesser General Public License version 2.1 ## See the file COPYING.LIB for the full notice. ######################################################################### import math import pylab import numpy import moose runtime = 50.0 def makeModel(): # create container for model model = moose.Neutral( 'model' ) harmonic = moose.CubeMesh( '/model/harmonic' ) harmonic.volume = 1e-15 lotka = moose.CubeMesh( '/model/lotka' ) lotka.volume = 1e-15 # create molecules and reactions x = moose.Pool( '/model/lotka/x' ) y = moose.Pool( '/model/lotka/y' ) z = moose.BufPool( '/model/lotka/z' ) # Dummy molecule. xreac = moose.Reac( '/model/lotka/xreac' ) yreac = moose.Reac( '/model/lotka/yreac' ) xrate = moose.Function( '/model/lotka/xreac/func' ) yrate = moose.Function( '/model/lotka/yreac/func' ) # Parameters alpha = 1.0 beta = 1.0 gamma = 1.0 delta = 1.0 k = 1.0 x.nInit = 2.0 y.nInit = 1.0 z.nInit = 0.0 xrate.x.num = 1 yrate.x.num = 1 xrate.expr = "x0 * " + str( beta ) + " - " + str( alpha ) yrate.expr = str( gamma ) + " - x0 * " + str( delta ) xreac.Kf = k yreac.Kf = k xreac.Kb = 0 yreac.Kb = 0 # connect them up for reactions moose.connect( y, 'nOut', xrate.x[0], 'input' ) moose.connect( x, 'nOut', yrate.x[0], 'input' ) moose.connect( xrate, 'valueOut', xreac, 'setNumKf' ) moose.connect( yrate, 'valueOut', yreac, 'setNumKf' ) moose.connect( xreac, 'sub', x, 'reac' ) moose.connect( xreac, 'prd', z, 'reac' ) moose.connect( yreac, 'sub', y, 'reac' ) moose.connect( yreac, 'prd', z, 'reac' ) # Create the output tables graphs = moose.Neutral( '/model/graphs' ) xplot = moose.Table2 ( '/model/graphs/x' ) yplot = moose.Table2 ( '/model/graphs/y' ) # connect up the tables moose.connect( xplot, 'requestOut', x, 'getN' ); moose.connect( yplot, 'requestOut', y, 'getN' ); def main(): """ The funcReacLotkaVolterra example shows how to use function objects as part of differential equation systems in the framework of the MOOSE kinetic solvers. Here the system is set up explicitly using the scripting, in normal use one would expect to use SBML. In this example we set up a Lotka-Volterra system. The equations are readily expressed as a pair of reactions each of whose rate is governed by a function:: x' = x( alpha - beta.y ) y' = -y( gamma - delta.x ) This translates into two reactions:: x ---> z Kf = beta.y - alpha y ---> z Kf = gamma - delta.x <|fim▁hole|> Here z is a dummy molecule whose concentration is buffered to zero. The model first runs using default Exponential Euler integration. This is not particularly accurate even with a small timestep. The model is then converted to use the deterministic Kinetic solver Ksolve. This is accurate and faster. Note that we cannot use the stochastic GSSA solver for this system, it cannot handle a reaction term whose rate keeps changing. """ makeModel() for i in range( 11, 18 ): moose.setClock( i, 0.001 ) moose.setClock( 18, 0.1 ) moose.reinit() moose.start( runtime ) # Run the model # Iterate through all plots, dump their contents to data.plot. for x in moose.wildcardFind( '/model/graphs/#' ): #x.xplot( 'scriptKineticModel.plot', x.name ) t = numpy.arange( 0, x.vector.size, 1 ) * x.dt # sec pylab.plot( t, x.vector, label=x.name ) pylab.ylim( 0, 2.5 ) pylab.title( "Exponential Euler solution. Note slight error buildup" ) pylab.legend() pylab.figure() compt = moose.element( '/model/lotka' ) ksolve = moose.Ksolve( '/model/lotka/ksolve' ) stoich = moose.Stoich( '/model/lotka/stoich' ) stoich.compartment = compt stoich.ksolve = ksolve stoich.path = '/model/lotka/##' moose.reinit() moose.start( runtime ) # Run the model for i in range( 11, 18 ): moose.setClock( i, 0.1 ) for x in moose.wildcardFind( '/model/graphs/#' ): t = numpy.arange( 0, x.vector.size, 1 ) * x.dt # sec pylab.plot( t, x.vector, label=x.name ) pylab.ylim( 0, 2.5 ) pylab.title( "Runge-Kutta solution." ) pylab.legend() pylab.show() quit() # Run the 'main' if this script is executed standalone. if __name__ == '__main__': main()<|fim▁end|>
<|file_name|>VideoPlayer.cpp<|end_file_name|><|fim▁begin|>/* * Copyright (C) 2005-2015 Team Kodi * http://kodi.tv * * This Program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This Program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with Kodi; see the file COPYING. If not, see * <http://www.gnu.org/licenses/>. * */ #include "system.h" #include "VideoPlayerRadioRDS.h" #include "VideoPlayer.h" #include "DVDInputStreams/DVDInputStream.h" #include "DVDInputStreams/DVDFactoryInputStream.h" #include "DVDInputStreams/DVDInputStreamNavigator.h" #include "DVDInputStreams/DVDInputStreamPVRManager.h" #include "DVDDemuxers/DVDDemux.h" #include "DVDDemuxers/DVDDemuxUtils.h" #include "DVDDemuxers/DVDDemuxVobsub.h" #include "DVDDemuxers/DVDFactoryDemuxer.h" #include "DVDDemuxers/DVDDemuxFFmpeg.h" #include "DVDFileInfo.h" #include "utils/LangCodeExpander.h" #include "input/Key.h" #include "guilib/LocalizeStrings.h" #include "utils/URIUtils.h" #include "GUIInfoManager.h" #include "cores/DataCacheCore.h" #include "guilib/GUIWindowManager.h" #include "guilib/StereoscopicsManager.h" #include "Application.h" #include "messaging/ApplicationMessenger.h" #include "DVDDemuxers/DVDDemuxCC.h" #include "cores/VideoPlayer/VideoRenderers/RenderManager.h" #include "cores/VideoPlayer/VideoRenderers/RenderFlags.h" #ifdef HAS_PERFORMANCE_SAMPLE #include "xbmc/utils/PerformanceSample.h" #else #define MEASURE_FUNCTION #endif #include "settings/AdvancedSettings.h" #include "FileItem.h" #include "GUIUserMessages.h" #include "settings/Settings.h" #include "settings/MediaSettings.h" #include "utils/log.h" #include "utils/StreamDetails.h" #include "pvr/PVRManager.h" #include "utils/StreamUtils.h" #include "utils/Variant.h" #include "storage/MediaManager.h" #include "dialogs/GUIDialogBusy.h" #include "dialogs/GUIDialogKaiToast.h" #include "utils/StringUtils.h" #include "Util.h" #include "LangInfo.h" #include "URL.h" #include "video/VideoReferenceClock.h" #ifdef HAS_OMXPLAYER #include "cores/omxplayer/OMXPlayerAudio.h" #include "cores/omxplayer/OMXPlayerVideo.h" #include "cores/omxplayer/OMXHelper.h" #endif #include "VideoPlayerAudio.h" #include "windowing/WindowingFactory.h" #include "DVDCodecs/DVDCodecUtils.h" using namespace PVR; using namespace KODI::MESSAGING; void CSelectionStreams::Clear(StreamType type, StreamSource source) { CSingleLock lock(m_section); for(int i=m_Streams.size()-1;i>=0;i--) { if(type && m_Streams[i].type != type) continue; if(source && m_Streams[i].source != source) continue; m_Streams.erase(m_Streams.begin() + i); } } SelectionStream& CSelectionStreams::Get(StreamType type, int index) { CSingleLock lock(m_section); int count = -1; for(size_t i=0;i<m_Streams.size();i++) { if(m_Streams[i].type != type) continue; count++; if(count == index) return m_Streams[i]; } return m_invalid; } std::vector<SelectionStream> CSelectionStreams::Get(StreamType type) { std::vector<SelectionStream> streams; int count = Count(type); for(int index = 0; index < count; ++index){ streams.push_back(Get(type, index)); } return streams; } #define PREDICATE_RETURN(lh, rh) \ do { \ if((lh) != (rh)) \ return (lh) > (rh); \ } while(0) class PredicateSubtitleFilter { private: std::string audiolang; bool original; bool nosub; bool onlyforced; public: /** \brief The class' operator() decides if the given (subtitle) SelectionStream is relevant wrt. * preferred subtitle language and audio language. If the subtitle is relevant <B>false</B> false is returned. * * A subtitle is relevant if * - it was previously selected, or * - it's an external sub, or * - it's a forced sub and "original stream's language" was selected, or * - it's a forced sub and its language matches the audio's language, or * - it's a default sub, or * - its language matches the preferred subtitle's language (unequal to "original stream's language") */ PredicateSubtitleFilter(std::string& lang) : audiolang(lang), original(StringUtils::EqualsNoCase(CSettings::GetInstance().GetString(CSettings::SETTING_LOCALE_SUBTITLELANGUAGE), "original")), nosub(StringUtils::EqualsNoCase(CSettings::GetInstance().GetString(CSettings::SETTING_LOCALE_SUBTITLELANGUAGE), "none")), onlyforced(StringUtils::EqualsNoCase(CSettings::GetInstance().GetString(CSettings::SETTING_LOCALE_SUBTITLELANGUAGE), "forced_only")) { }; bool operator()(const SelectionStream& ss) const { if (ss.type_index == CMediaSettings::GetInstance().GetCurrentVideoSettings().m_SubtitleStream) return false; if (nosub) return true; if (onlyforced) { if ((ss.flags & CDemuxStream::FLAG_FORCED) && g_LangCodeExpander.CompareISO639Codes(ss.language, audiolang)) return false; else return true; } if(STREAM_SOURCE_MASK(ss.source) == STREAM_SOURCE_DEMUX_SUB || STREAM_SOURCE_MASK(ss.source) == STREAM_SOURCE_TEXT) return false; if ((ss.flags & CDemuxStream::FLAG_FORCED) && (original || g_LangCodeExpander.CompareISO639Codes(ss.language, audiolang))) return false; if ((ss.flags & CDemuxStream::FLAG_DEFAULT)) return false; if(!original) { std::string subtitle_language = g_langInfo.GetSubtitleLanguage(); if (g_LangCodeExpander.CompareISO639Codes(subtitle_language, ss.language)) return false; } return true; } }; static bool PredicateAudioPriority(const SelectionStream& lh, const SelectionStream& rh) { PREDICATE_RETURN(lh.type_index == CMediaSettings::GetInstance().GetCurrentVideoSettings().m_AudioStream , rh.type_index == CMediaSettings::GetInstance().GetCurrentVideoSettings().m_AudioStream); if(!StringUtils::EqualsNoCase(CSettings::GetInstance().GetString(CSettings::SETTING_LOCALE_AUDIOLANGUAGE), "original")) { std::string audio_language = g_langInfo.GetAudioLanguage(); PREDICATE_RETURN(g_LangCodeExpander.CompareISO639Codes(audio_language, lh.language) , g_LangCodeExpander.CompareISO639Codes(audio_language, rh.language)); bool hearingimp = CSettings::GetInstance().GetBool(CSettings::SETTING_ACCESSIBILITY_AUDIOHEARING); PREDICATE_RETURN(!hearingimp ? !(lh.flags & CDemuxStream::FLAG_HEARING_IMPAIRED) : lh.flags & CDemuxStream::FLAG_HEARING_IMPAIRED , !hearingimp ? !(rh.flags & CDemuxStream::FLAG_HEARING_IMPAIRED) : rh.flags & CDemuxStream::FLAG_HEARING_IMPAIRED); bool visualimp = CSettings::GetInstance().GetBool(CSettings::SETTING_ACCESSIBILITY_AUDIOVISUAL); PREDICATE_RETURN(!visualimp ? !(lh.flags & CDemuxStream::FLAG_VISUAL_IMPAIRED) : lh.flags & CDemuxStream::FLAG_VISUAL_IMPAIRED , !visualimp ? !(rh.flags & CDemuxStream::FLAG_VISUAL_IMPAIRED) : rh.flags & CDemuxStream::FLAG_VISUAL_IMPAIRED); } if (CSettings::GetInstance().GetBool(CSettings::SETTING_VIDEOPLAYER_PREFERDEFAULTFLAG)) { PREDICATE_RETURN(lh.flags & CDemuxStream::FLAG_DEFAULT , rh.flags & CDemuxStream::FLAG_DEFAULT); } PREDICATE_RETURN(lh.channels , rh.channels); PREDICATE_RETURN(StreamUtils::GetCodecPriority(lh.codec) , StreamUtils::GetCodecPriority(rh.codec)); PREDICATE_RETURN(lh.flags & CDemuxStream::FLAG_DEFAULT , rh.flags & CDemuxStream::FLAG_DEFAULT); return false; } /** \brief The class' operator() decides if the given (subtitle) SelectionStream lh is 'better than' the given (subtitle) SelectionStream rh. * If lh is 'better than' rh the return value is true, false otherwise. * * A subtitle lh is 'better than' a subtitle rh (in evaluation order) if * - lh was previously selected, or * - lh is an external sub and rh not, or * - lh is a forced sub and ("original stream's language" was selected or subtitles are off) and rh not, or * - lh is an external sub and its language matches the preferred subtitle's language (unequal to "original stream's language") and rh not, or * - lh is language matches the preferred subtitle's language (unequal to "original stream's language") and rh not, or * - lh is a default sub and rh not */ class PredicateSubtitlePriority { private: std::string audiolang; bool original; bool subson; PredicateSubtitleFilter filter; public: PredicateSubtitlePriority(std::string& lang) : audiolang(lang), original(StringUtils::EqualsNoCase(CSettings::GetInstance().GetString(CSettings::SETTING_LOCALE_SUBTITLELANGUAGE), "original")), subson(CMediaSettings::GetInstance().GetCurrentVideoSettings().m_SubtitleOn), filter(lang) { }; bool relevant(const SelectionStream& ss) const { return !filter(ss); } bool operator()(const SelectionStream& lh, const SelectionStream& rh) const { PREDICATE_RETURN(relevant(lh) , relevant(rh)); PREDICATE_RETURN(lh.type_index == CMediaSettings::GetInstance().GetCurrentVideoSettings().m_SubtitleStream , rh.type_index == CMediaSettings::GetInstance().GetCurrentVideoSettings().m_SubtitleStream); // prefer external subs PREDICATE_RETURN(STREAM_SOURCE_MASK(lh.source) == STREAM_SOURCE_DEMUX_SUB || STREAM_SOURCE_MASK(lh.source) == STREAM_SOURCE_TEXT , STREAM_SOURCE_MASK(rh.source) == STREAM_SOURCE_DEMUX_SUB || STREAM_SOURCE_MASK(rh.source) == STREAM_SOURCE_TEXT); if(!subson || original) { PREDICATE_RETURN(lh.flags & CDemuxStream::FLAG_FORCED && g_LangCodeExpander.CompareISO639Codes(lh.language, audiolang) , rh.flags & CDemuxStream::FLAG_FORCED && g_LangCodeExpander.CompareISO639Codes(rh.language, audiolang)); PREDICATE_RETURN(lh.flags & CDemuxStream::FLAG_FORCED , rh.flags & CDemuxStream::FLAG_FORCED); } std::string subtitle_language = g_langInfo.GetSubtitleLanguage(); if(!original) { PREDICATE_RETURN((STREAM_SOURCE_MASK(lh.source) == STREAM_SOURCE_DEMUX_SUB || STREAM_SOURCE_MASK(lh.source) == STREAM_SOURCE_TEXT) && g_LangCodeExpander.CompareISO639Codes(subtitle_language, lh.language) , (STREAM_SOURCE_MASK(rh.source) == STREAM_SOURCE_DEMUX_SUB || STREAM_SOURCE_MASK(rh.source) == STREAM_SOURCE_TEXT) && g_LangCodeExpander.CompareISO639Codes(subtitle_language, rh.language)); } if(!original) { PREDICATE_RETURN(g_LangCodeExpander.CompareISO639Codes(subtitle_language, lh.language) , g_LangCodeExpander.CompareISO639Codes(subtitle_language, rh.language)); bool hearingimp = CSettings::GetInstance().GetBool(CSettings::SETTING_ACCESSIBILITY_SUBHEARING); PREDICATE_RETURN(!hearingimp ? !(lh.flags & CDemuxStream::FLAG_HEARING_IMPAIRED) : lh.flags & CDemuxStream::FLAG_HEARING_IMPAIRED , !hearingimp ? !(rh.flags & CDemuxStream::FLAG_HEARING_IMPAIRED) : rh.flags & CDemuxStream::FLAG_HEARING_IMPAIRED); } PREDICATE_RETURN(lh.flags & CDemuxStream::FLAG_DEFAULT , rh.flags & CDemuxStream::FLAG_DEFAULT); return false; } }; static bool PredicateVideoPriority(const SelectionStream& lh, const SelectionStream& rh) { PREDICATE_RETURN(lh.type_index == CMediaSettings::GetInstance().GetCurrentVideoSettings().m_VideoStream , rh.type_index == CMediaSettings::GetInstance().GetCurrentVideoSettings().m_VideoStream); PREDICATE_RETURN(lh.flags & CDemuxStream::FLAG_DEFAULT , rh.flags & CDemuxStream::FLAG_DEFAULT); return false; } bool CSelectionStreams::Get(StreamType type, CDemuxStream::EFlags flag, SelectionStream& out) { CSingleLock lock(m_section); for(size_t i=0;i<m_Streams.size();i++) { if(m_Streams[i].type != type) continue; if((m_Streams[i].flags & flag) != flag) continue; out = m_Streams[i]; return true; } return false; } int CSelectionStreams::IndexOf(StreamType type, int source, int id) const { CSingleLock lock(m_section); int count = -1; for(size_t i=0;i<m_Streams.size();i++) { if(type && m_Streams[i].type != type) continue; count++; if(source && m_Streams[i].source != source) continue; if(id < 0) continue; if(m_Streams[i].id == id) return count; } if(id < 0) return count; else return -1; } int CSelectionStreams::IndexOf(StreamType type, const CVideoPlayer& p) const { if (p.m_pInputStream && p.m_pInputStream->IsStreamType(DVDSTREAM_TYPE_DVD)) { int id = -1; if(type == STREAM_AUDIO) id = ((CDVDInputStreamNavigator*)p.m_pInputStream)->GetActiveAudioStream(); else if (type == STREAM_VIDEO) id = ((CDVDInputStreamNavigator*)p.m_pInputStream)->GetActiveAngle(); else if(type == STREAM_SUBTITLE) id = ((CDVDInputStreamNavigator*)p.m_pInputStream)->GetActiveSubtitleStream(); return IndexOf(type, STREAM_SOURCE_NAV, id); } if(type == STREAM_AUDIO) return IndexOf(type, p.m_CurrentAudio.source, p.m_CurrentAudio.id); else if(type == STREAM_VIDEO) return IndexOf(type, p.m_CurrentVideo.source, p.m_CurrentVideo.id); else if(type == STREAM_SUBTITLE) return IndexOf(type, p.m_CurrentSubtitle.source, p.m_CurrentSubtitle.id); else if(type == STREAM_TELETEXT) return IndexOf(type, p.m_CurrentTeletext.source, p.m_CurrentTeletext.id); else if(type == STREAM_RADIO_RDS) return IndexOf(type, p.m_CurrentRadioRDS.source, p.m_CurrentRadioRDS.id); return -1; } int CSelectionStreams::Source(StreamSource source, std::string filename) { CSingleLock lock(m_section); int index = source - 1; for(size_t i=0;i<m_Streams.size();i++) { SelectionStream &s = m_Streams[i]; if(STREAM_SOURCE_MASK(s.source) != source) continue; // if it already exists, return same if(s.filename == filename) return s.source; if(index < s.source) index = s.source; } // return next index return index + 1; } void CSelectionStreams::Update(SelectionStream& s) { CSingleLock lock(m_section); int index = IndexOf(s.type, s.source, s.id); if(index >= 0) { SelectionStream& o = Get(s.type, index); s.type_index = o.type_index; o = s; } else { s.type_index = Count(s.type); m_Streams.push_back(s); } } void CSelectionStreams::Update(CDVDInputStream* input, CDVDDemux* demuxer, std::string filename2) { if(input && input->IsStreamType(DVDSTREAM_TYPE_DVD)) { CDVDInputStreamNavigator* nav = (CDVDInputStreamNavigator*)input; std::string filename = nav->GetFileName(); int source = Source(STREAM_SOURCE_NAV, filename); int count; count = nav->GetAudioStreamCount(); for(int i=0;i<count;i++) { SelectionStream s; s.source = source; s.type = STREAM_AUDIO; s.id = i; s.flags = CDemuxStream::FLAG_NONE; s.filename = filename; DVDNavAudioStreamInfo info = nav->GetAudioStreamInfo(i); s.name = info.name; s.language = g_LangCodeExpander.ConvertToISO6392T(info.language); s.channels = info.channels; Update(s); } count = nav->GetSubTitleStreamCount(); for(int i=0;i<count;i++) { SelectionStream s; s.source = source; s.type = STREAM_SUBTITLE; s.id = i; s.filename = filename; s.channels = 0; DVDNavSubtitleStreamInfo info = nav->GetSubtitleStreamInfo(i); s.name = info.name; s.flags = info.flags; s.language = g_LangCodeExpander.ConvertToISO6392T(info.language); Update(s); } DVDNavVideoStreamInfo info = nav->GetVideoStreamInfo(); for (int i = 1; i <= info.angles; i++) { SelectionStream s; s.source = source; s.type = STREAM_VIDEO; s.id = i; s.flags = CDemuxStream::FLAG_NONE; s.filename = filename; s.channels = 0; s.aspect_ratio = info.aspectRatio; s.width = (int)info.width; s.height = (int)info.height; s.codec = info.codec; s.name = StringUtils::Format("%s %i", g_localizeStrings.Get(38032).c_str(), i); Update(s); } } else if(demuxer) { std::string filename = demuxer->GetFileName(); int count = demuxer->GetNrOfStreams(); int source; if(input) /* hack to know this is sub decoder */ source = Source(STREAM_SOURCE_DEMUX, filename); else if (!filename2.empty()) source = Source(STREAM_SOURCE_DEMUX_SUB, filename); else source = Source(STREAM_SOURCE_VIDEOMUX, filename); for(int i=0;i<count;i++) { CDemuxStream* stream = demuxer->GetStream(i); /* skip streams with no type */ if (stream->type == STREAM_NONE) continue; /* make sure stream is marked with right source */ stream->source = source; SelectionStream s; s.source = source; s.type = stream->type; s.id = stream->iId; s.language = g_LangCodeExpander.ConvertToISO6392T(stream->language); s.flags = stream->flags; s.filename = demuxer->GetFileName(); s.filename2 = filename2; s.name = stream->GetStreamName(); s.codec = demuxer->GetStreamCodecName(stream->iId); s.channels = 0; // Default to 0. Overwrite if STREAM_AUDIO below. if(stream->type == STREAM_VIDEO) { s.width = ((CDemuxStreamVideo*)stream)->iWidth; s.height = ((CDemuxStreamVideo*)stream)->iHeight; } if(stream->type == STREAM_AUDIO) { std::string type; type = ((CDemuxStreamAudio*)stream)->GetStreamType(); if(type.length() > 0) { if(s.name.length() > 0) s.name += " - "; s.name += type; } s.channels = ((CDemuxStreamAudio*)stream)->iChannels; } Update(s); } } g_dataCacheCore.SignalAudioInfoChange(); g_dataCacheCore.SignalVideoInfoChange(); } int CSelectionStreams::CountSource(StreamType type, StreamSource source) const { CSingleLock lock(m_section); int count = 0; for(size_t i=0;i<m_Streams.size();i++) { if(type && m_Streams[i].type != type) continue; if (source && m_Streams[i].source != source) continue; count++; continue; } return count; } void CVideoPlayer::CreatePlayers() { #ifdef HAS_OMXPLAYER bool omx_suitable = !OMXPlayerUnsuitable(m_HasVideo, m_HasAudio, m_pDemuxer, m_pInputStream, m_SelectionStreams); if (m_omxplayer_mode != omx_suitable) { DestroyPlayers(); m_omxplayer_mode = omx_suitable; } #endif if (m_players_created) return; if (m_omxplayer_mode) { #ifdef HAS_OMXPLAYER m_VideoPlayerVideo = new OMXPlayerVideo(&m_OmxPlayerState.av_clock, &m_overlayContainer, m_messenger, m_renderManager); m_VideoPlayerAudio = new OMXPlayerAudio(&m_OmxPlayerState.av_clock, m_messenger); #endif } else { m_VideoPlayerVideo = new CVideoPlayerVideo(&m_clock, &m_overlayContainer, m_messenger, m_renderManager); m_VideoPlayerAudio = new CVideoPlayerAudio(&m_clock, m_messenger); } m_VideoPlayerSubtitle = new CVideoPlayerSubtitle(&m_overlayContainer); m_VideoPlayerTeletext = new CDVDTeletextData(); m_VideoPlayerRadioRDS = new CDVDRadioRDSData(); m_players_created = true; } void CVideoPlayer::DestroyPlayers() { if (!m_players_created) return; delete m_VideoPlayerVideo; delete m_VideoPlayerAudio; delete m_VideoPlayerSubtitle; delete m_VideoPlayerTeletext; delete m_VideoPlayerRadioRDS; m_players_created = false; } CVideoPlayer::CVideoPlayer(IPlayerCallback& callback) : IPlayer(callback), CThread("VideoPlayer"), m_CurrentAudio(STREAM_AUDIO, VideoPlayer_AUDIO), m_CurrentVideo(STREAM_VIDEO, VideoPlayer_VIDEO), m_CurrentSubtitle(STREAM_SUBTITLE, VideoPlayer_SUBTITLE), m_CurrentTeletext(STREAM_TELETEXT, VideoPlayer_TELETEXT), m_CurrentRadioRDS(STREAM_RADIO_RDS, VideoPlayer_RDS), m_messenger("player"), m_renderManager(m_clock, this), m_ready(true) { m_players_created = false; m_pDemuxer = NULL; m_pSubtitleDemuxer = NULL; m_pCCDemuxer = NULL; m_pInputStream = NULL; m_dvd.Clear(); m_State.Clear(); m_EdlAutoSkipMarkers.Clear(); m_UpdateApplication = 0; m_bAbortRequest = false; m_errorCount = 0; m_offset_pts = 0.0; m_playSpeed = DVD_PLAYSPEED_NORMAL; m_streamPlayerSpeed = DVD_PLAYSPEED_NORMAL; m_caching = CACHESTATE_DONE; m_HasVideo = false; m_HasAudio = false; memset(&m_SpeedState, 0, sizeof(m_SpeedState)); // omxplayer variables m_OmxPlayerState.last_check_time = 0.0; m_OmxPlayerState.stamp = 0.0; m_OmxPlayerState.bOmxWaitVideo = false; m_OmxPlayerState.bOmxWaitAudio = false; m_OmxPlayerState.bOmxSentEOFs = false; m_OmxPlayerState.threshold = 0.2f; m_OmxPlayerState.current_deinterlace = CMediaSettings::GetInstance().GetCurrentVideoSettings().m_DeinterlaceMode; m_OmxPlayerState.interlace_method = VS_INTERLACEMETHOD_MAX; #ifdef HAS_OMXPLAYER m_omxplayer_mode = CSettings::GetInstance().GetBool(CSettings::SETTING_VIDEOPLAYER_USEOMXPLAYER); #else m_omxplayer_mode = false; #endif CreatePlayers(); m_displayLost = false; g_Windowing.Register(this); } CVideoPlayer::~CVideoPlayer() { g_Windowing.Unregister(this); CloseFile(); DestroyPlayers(); } bool CVideoPlayer::OpenFile(const CFileItem& file, const CPlayerOptions &options) { CLog::Log(LOGNOTICE, "VideoPlayer: Opening: %s", CURL::GetRedacted(file.GetPath()).c_str()); // if playing a file close it first // this has to be changed so we won't have to close it. if(IsRunning()) CloseFile(); m_bAbortRequest = false; SetPlaySpeed(DVD_PLAYSPEED_NORMAL); m_State.Clear(); memset(&m_SpeedState, 0, sizeof(m_SpeedState)); m_UpdateApplication = 0; m_offset_pts = 0; m_CurrentAudio.lastdts = DVD_NOPTS_VALUE; m_CurrentVideo.lastdts = DVD_NOPTS_VALUE; m_PlayerOptions = options; m_item = file; m_ready.Reset(); m_renderManager.PreInit(); Create(); // wait for the ready event CGUIDialogBusy::WaitOnEvent(m_ready, g_advancedSettings.m_videoBusyDialogDelay_ms, false); // Playback might have been stopped due to some error if (m_bStop || m_bAbortRequest) return false; return true; } bool CVideoPlayer::CloseFile(bool reopen) { CLog::Log(LOGNOTICE, "CVideoPlayer::CloseFile()"); // set the abort request so that other threads can finish up m_bAbortRequest = true; // tell demuxer to abort if(m_pDemuxer) m_pDemuxer->Abort(); if(m_pSubtitleDemuxer) m_pSubtitleDemuxer->Abort(); if(m_pInputStream) m_pInputStream->Abort(); CLog::Log(LOGNOTICE, "VideoPlayer: waiting for threads to exit"); // wait for the main thread to finish up // since this main thread cleans up all other resources and threads // we are done after the StopThread call StopThread(); m_Edl.Clear(); m_EdlAutoSkipMarkers.Clear(); m_HasVideo = false; m_HasAudio = false; CLog::Log(LOGNOTICE, "VideoPlayer: finished waiting"); m_renderManager.UnInit(); return true; } bool CVideoPlayer::IsPlaying() const { return !m_bStop; } void CVideoPlayer::OnStartup() { m_CurrentVideo.Clear(); m_CurrentAudio.Clear(); m_CurrentSubtitle.Clear(); m_CurrentTeletext.Clear(); m_CurrentRadioRDS.Clear(); m_messenger.Init(); CUtil::ClearTempFonts(); } bool CVideoPlayer::OpenInputStream() { if(m_pInputStream) SAFE_DELETE(m_pInputStream); CLog::Log(LOGNOTICE, "Creating InputStream"); // correct the filename if needed std::string filename(m_item.GetPath()); if (URIUtils::IsProtocol(filename, "dvd") || StringUtils::EqualsNoCase(filename, "iso9660://video_ts/video_ts.ifo")) { m_item.SetPath(g_mediaManager.TranslateDevicePath("")); } m_pInputStream = CDVDFactoryInputStream::CreateInputStream(this, m_item); if(m_pInputStream == NULL) { CLog::Log(LOGERROR, "CVideoPlayer::OpenInputStream - unable to create input stream for [%s]", m_item.GetPath().c_str()); return false; } if (!m_pInputStream->Open()) { CLog::Log(LOGERROR, "CVideoPlayer::OpenInputStream - error opening [%s]", m_item.GetPath().c_str()); return false; } // find any available external subtitles for non dvd files if (!m_pInputStream->IsStreamType(DVDSTREAM_TYPE_DVD) && !m_pInputStream->IsStreamType(DVDSTREAM_TYPE_PVRMANAGER) && !m_pInputStream->IsStreamType(DVDSTREAM_TYPE_TV)) { // find any available external subtitles std::vector<std::string> filenames; CUtil::ScanForExternalSubtitles(m_item.GetPath(), filenames); // load any subtitles from file item std::string key("subtitle:1"); for(unsigned s = 1; m_item.HasProperty(key); key = StringUtils::Format("subtitle:%u", ++s)) filenames.push_back(m_item.GetProperty(key).asString()); for(unsigned int i=0;i<filenames.size();i++) { // if vobsub subtitle: if (URIUtils::HasExtension(filenames[i], ".idx")) { std::string strSubFile; if ( CUtil::FindVobSubPair( filenames, filenames[i], strSubFile ) ) AddSubtitleFile(filenames[i], strSubFile); } else { if ( !CUtil::IsVobSub(filenames, filenames[i] ) ) { AddSubtitleFile(filenames[i]); } } } // end loop over all subtitle files CMediaSettings::GetInstance().GetCurrentVideoSettings().m_SubtitleCached = true; } SetAVDelay(CMediaSettings::GetInstance().GetCurrentVideoSettings().m_AudioDelay); SetSubTitleDelay(CMediaSettings::GetInstance().GetCurrentVideoSettings().m_SubtitleDelay); m_clock.Reset(); m_dvd.Clear(); m_errorCount = 0; m_ChannelEntryTimeOut.SetInfinite(); return true; } bool CVideoPlayer::OpenDemuxStream() { if(m_pDemuxer) SAFE_DELETE(m_pDemuxer); CLog::Log(LOGNOTICE, "Creating Demuxer"); int attempts = 10; while(!m_bStop && attempts-- > 0) { m_pDemuxer = CDVDFactoryDemuxer::CreateDemuxer(m_pInputStream); if(!m_pDemuxer && m_pInputStream->IsStreamType(DVDSTREAM_TYPE_PVRMANAGER)) { continue; } else if(!m_pDemuxer && m_pInputStream->NextStream() != CDVDInputStream::NEXTSTREAM_NONE) { CLog::Log(LOGDEBUG, "%s - New stream available from input, retry open", __FUNCTION__); continue; } break; } if(!m_pDemuxer) { CLog::Log(LOGERROR, "%s - Error creating demuxer", __FUNCTION__); return false; } m_SelectionStreams.Clear(STREAM_NONE, STREAM_SOURCE_DEMUX); m_SelectionStreams.Clear(STREAM_NONE, STREAM_SOURCE_NAV); m_SelectionStreams.Update(m_pInputStream, m_pDemuxer); int64_t len = m_pInputStream->GetLength(); int64_t tim = m_pDemuxer->GetStreamLength(); if(len > 0 && tim > 0) m_pInputStream->SetReadRate((unsigned int) (len * 1000 / tim)); m_offset_pts = 0; return true; } void CVideoPlayer::OpenDefaultStreams(bool reset) { // if input stream dictate, we will open later if(m_dvd.iSelectedAudioStream >= 0 || m_dvd.iSelectedSPUStream >= 0) return; SelectionStreams streams; bool valid; // open video stream streams = m_SelectionStreams.Get(STREAM_VIDEO, PredicateVideoPriority); valid = false; for(SelectionStreams::iterator it = streams.begin(); it != streams.end() && !valid; ++it) { if(OpenStream(m_CurrentVideo, it->id, it->source, reset)) valid = true; } if(!valid) CloseStream(m_CurrentVideo, true); // open audio stream if(m_PlayerOptions.video_only) streams.clear(); else streams = m_SelectionStreams.Get(STREAM_AUDIO, PredicateAudioPriority); valid = false; for(SelectionStreams::iterator it = streams.begin(); it != streams.end() && !valid; ++it) { if(OpenStream(m_CurrentAudio, it->id, it->source, reset)) valid = true; } if(!valid) CloseStream(m_CurrentAudio, true); // enable or disable subtitles bool visible = CMediaSettings::GetInstance().GetCurrentVideoSettings().m_SubtitleOn; // open subtitle stream SelectionStream as = m_SelectionStreams.Get(STREAM_AUDIO, GetAudioStream()); PredicateSubtitlePriority psp(as.language); streams = m_SelectionStreams.Get(STREAM_SUBTITLE, psp); valid = false; CloseStream(m_CurrentSubtitle, false); for(SelectionStreams::iterator it = streams.begin(); it != streams.end() && !valid; ++it) { if(OpenStream(m_CurrentSubtitle, it->id, it->source)) { valid = true; if(!psp.relevant(*it)) visible = false; else if(it->flags & CDemuxStream::FLAG_FORCED) visible = true; } } if(!valid) CloseStream(m_CurrentSubtitle, false); if (!dynamic_cast<CDVDInputStreamNavigator*>(m_pInputStream) || m_PlayerOptions.state.empty()) SetSubtitleVisibleInternal(visible); // only set subtitle visibility if state not stored by dvd navigator, because navigator will restore it (if visible) // open teletext stream streams = m_SelectionStreams.Get(STREAM_TELETEXT); valid = false; for(SelectionStreams::iterator it = streams.begin(); it != streams.end() && !valid; ++it) { if(OpenStream(m_CurrentTeletext, it->id, it->source)) valid = true; } if(!valid) CloseStream(m_CurrentTeletext, false); // open RDS stream streams = m_SelectionStreams.Get(STREAM_RADIO_RDS); valid = false; for(SelectionStreams::iterator it = streams.begin(); it != streams.end() && !valid; ++it) { if(OpenStream(m_CurrentRadioRDS, it->id, it->source)) valid = true; } if(!valid) CloseStream(m_CurrentRadioRDS, false); } bool CVideoPlayer::ReadPacket(DemuxPacket*& packet, CDemuxStream*& stream) { // check if we should read from subtitle demuxer if( m_pSubtitleDemuxer && m_VideoPlayerSubtitle->AcceptsData() ) { packet = m_pSubtitleDemuxer->Read(); if(packet) { UpdateCorrection(packet, m_offset_pts); if(packet->iStreamId < 0) return true; stream = m_pSubtitleDemuxer->GetStream(packet->iStreamId); if (!stream) { CLog::Log(LOGERROR, "%s - Error demux packet doesn't belong to a valid stream", __FUNCTION__); return false; } if(stream->source == STREAM_SOURCE_NONE) { m_SelectionStreams.Clear(STREAM_NONE, STREAM_SOURCE_DEMUX_SUB); m_SelectionStreams.Update(NULL, m_pSubtitleDemuxer); } return true; } } if (m_omxplayer_mode) { // reset eos state when we get a packet (e.g. for case of seek after eos) if (packet && stream) { m_OmxPlayerState.bOmxWaitVideo = false; m_OmxPlayerState.bOmxWaitAudio = false; m_OmxPlayerState.bOmxSentEOFs = false; } } // read a data frame from stream. if(m_pDemuxer) packet = m_pDemuxer->Read(); if(packet) { // stream changed, update and open defaults if(packet->iStreamId == DMX_SPECIALID_STREAMCHANGE) { m_SelectionStreams.Clear(STREAM_NONE, STREAM_SOURCE_DEMUX); m_SelectionStreams.Update(m_pInputStream, m_pDemuxer); OpenDefaultStreams(false); // reevaluate HasVideo/Audio, we may have switched from/to a radio channel if(m_CurrentVideo.id < 0) m_HasVideo = false; if(m_CurrentAudio.id < 0) m_HasAudio = false; return true; } UpdateCorrection(packet, m_offset_pts); if(packet->iStreamId < 0) return true; if(m_pDemuxer) { stream = m_pDemuxer->GetStream(packet->iStreamId); if (!stream) { CLog::Log(LOGERROR, "%s - Error demux packet doesn't belong to a valid stream", __FUNCTION__); return false; } if(stream->source == STREAM_SOURCE_NONE) { m_SelectionStreams.Clear(STREAM_NONE, STREAM_SOURCE_DEMUX); m_SelectionStreams.Update(m_pInputStream, m_pDemuxer); } } return true; } return false; } bool CVideoPlayer::IsValidStream(CCurrentStream& stream) { if(stream.id<0) return true; // we consider non selected as valid int source = STREAM_SOURCE_MASK(stream.source); if(source == STREAM_SOURCE_TEXT) return true; if(source == STREAM_SOURCE_DEMUX_SUB) { CDemuxStream* st = m_pSubtitleDemuxer->GetStream(stream.id); if(st == NULL || st->disabled) return false; if(st->type != stream.type) return false; return true; } if(source == STREAM_SOURCE_DEMUX) { CDemuxStream* st = m_pDemuxer->GetStream(stream.id); if(st == NULL || st->disabled) return false; if(st->type != stream.type) return false; if (m_pInputStream && m_pInputStream->IsStreamType(DVDSTREAM_TYPE_DVD)) { if(stream.type == STREAM_AUDIO && st->iPhysicalId != m_dvd.iSelectedAudioStream) return false; if(stream.type == STREAM_SUBTITLE && st->iPhysicalId != m_dvd.iSelectedSPUStream) return false; } return true; } if (source == STREAM_SOURCE_VIDEOMUX) { CDemuxStream* st = m_pCCDemuxer->GetStream(stream.id); if (st == NULL || st->disabled) return false; if (st->type != stream.type) return false; return true; } return false; } bool CVideoPlayer::IsBetterStream(CCurrentStream& current, CDemuxStream* stream) { // Do not reopen non-video streams if we're in video-only mode if(m_PlayerOptions.video_only && current.type != STREAM_VIDEO) return false; if(stream->disabled) return false; if (m_pInputStream && ( m_pInputStream->IsStreamType(DVDSTREAM_TYPE_DVD) || m_pInputStream->IsStreamType(DVDSTREAM_TYPE_BLURAY) ) ) { int source_type; source_type = STREAM_SOURCE_MASK(current.source); if(source_type != STREAM_SOURCE_DEMUX && source_type != STREAM_SOURCE_NONE) return false; source_type = STREAM_SOURCE_MASK(stream->source); if(source_type != STREAM_SOURCE_DEMUX || stream->type != current.type || stream->iId == current.id) return false; if(current.type == STREAM_AUDIO && stream->iPhysicalId == m_dvd.iSelectedAudioStream) return true; if(current.type == STREAM_SUBTITLE && stream->iPhysicalId == m_dvd.iSelectedSPUStream) return true; if(current.type == STREAM_VIDEO && current.id < 0) return true; } else { if(stream->source == current.source && stream->iId == current.id) return false; if(stream->type != current.type) return false; if(current.type == STREAM_SUBTITLE) return false; if(current.id < 0) return true; } return false; } void CVideoPlayer::CheckBetterStream(CCurrentStream& current, CDemuxStream* stream) { IDVDStreamPlayer* player = GetStreamPlayer(current.player); if (!IsValidStream(current) && (player == NULL || player->IsStalled())) CloseStream(current, true); if (IsBetterStream(current, stream)) OpenStream(current, stream->iId, stream->source); } void CVideoPlayer::Process() { if (!OpenInputStream()) { m_bAbortRequest = true; return; } if (CDVDInputStream::IMenus* ptr = dynamic_cast<CDVDInputStream::IMenus*>(m_pInputStream)) { CLog::Log(LOGNOTICE, "VideoPlayer: playing a file with menu's"); if(dynamic_cast<CDVDInputStreamNavigator*>(m_pInputStream)) m_PlayerOptions.starttime = 0; if(!m_PlayerOptions.state.empty()) ptr->SetState(m_PlayerOptions.state); else if(CDVDInputStreamNavigator* nav = dynamic_cast<CDVDInputStreamNavigator*>(m_pInputStream)) nav->EnableSubtitleStream(CMediaSettings::GetInstance().GetCurrentVideoSettings().m_SubtitleOn); CMediaSettings::GetInstance().GetCurrentVideoSettings().m_SubtitleCached = true; } if(!OpenDemuxStream()) { m_bAbortRequest = true; return; } // give players a chance to reconsider now codecs are known CreatePlayers(); // allow renderer to switch to fullscreen if requested m_VideoPlayerVideo->EnableFullscreen(m_PlayerOptions.fullscreen); if (m_omxplayer_mode) { if (!m_OmxPlayerState.av_clock.OMXInitialize(&m_clock)) m_bAbortRequest = true; if (CSettings::GetInstance().GetInt(CSettings::SETTING_VIDEOPLAYER_ADJUSTREFRESHRATE) != ADJUST_REFRESHRATE_OFF) m_OmxPlayerState.av_clock.HDMIClockSync(); m_OmxPlayerState.av_clock.OMXStateIdle(); m_OmxPlayerState.av_clock.OMXStateExecute(); m_OmxPlayerState.av_clock.OMXStop(); m_OmxPlayerState.av_clock.OMXPause(); } OpenDefaultStreams(); // look for any EDL files m_Edl.Clear(); m_EdlAutoSkipMarkers.Clear(); if (m_CurrentVideo.id >= 0 && m_CurrentVideo.hint.fpsrate > 0 && m_CurrentVideo.hint.fpsscale > 0) { float fFramesPerSecond = (float)m_CurrentVideo.hint.fpsrate / (float)m_CurrentVideo.hint.fpsscale; m_Edl.ReadEditDecisionLists(m_item.GetPath(), fFramesPerSecond, m_CurrentVideo.hint.height); } /* * Check to see if the demuxer should start at something other than time 0. This will be the case * if there was a start time specified as part of the "Start from where last stopped" (aka * auto-resume) feature or if there is an EDL cut or commercial break that starts at time 0. */ CEdl::Cut cut; int starttime = 0; if(m_PlayerOptions.starttime > 0 || m_PlayerOptions.startpercent > 0) { if (m_PlayerOptions.startpercent > 0 && m_pDemuxer) { int playerStartTime = (int)( ( (float) m_pDemuxer->GetStreamLength() ) * ( m_PlayerOptions.startpercent/(float)100 ) ); starttime = m_Edl.RestoreCutTime(playerStartTime); } else { starttime = m_Edl.RestoreCutTime(m_PlayerOptions.starttime * 1000); // s to ms } CLog::Log(LOGDEBUG, "%s - Start position set to last stopped position: %d", __FUNCTION__, starttime); } else if(m_Edl.InCut(0, &cut) && (cut.action == CEdl::CUT || cut.action == CEdl::COMM_BREAK)) { starttime = cut.end; CLog::Log(LOGDEBUG, "%s - Start position set to end of first cut or commercial break: %d", __FUNCTION__, starttime); if(cut.action == CEdl::COMM_BREAK) { /* * Setup auto skip markers as if the commercial break had been skipped using standard * detection. */ m_EdlAutoSkipMarkers.commbreak_start = cut.start; m_EdlAutoSkipMarkers.commbreak_end = cut.end; m_EdlAutoSkipMarkers.seek_to_start = true; } } if(starttime > 0) { double startpts = DVD_NOPTS_VALUE; if(m_pDemuxer) { if (m_pDemuxer->SeekTime(starttime, false, &startpts)) CLog::Log(LOGDEBUG, "%s - starting demuxer from: %d", __FUNCTION__, starttime); else CLog::Log(LOGDEBUG, "%s - failed to start demuxing from: %d", __FUNCTION__, starttime); } if(m_pSubtitleDemuxer) { if(m_pSubtitleDemuxer->SeekTime(starttime, false, &startpts)) CLog::Log(LOGDEBUG, "%s - starting subtitle demuxer from: %d", __FUNCTION__, starttime); else CLog::Log(LOGDEBUG, "%s - failed to start subtitle demuxing from: %d", __FUNCTION__, starttime); } } // make sure application know our info UpdateApplication(0); UpdatePlayState(0); if(m_PlayerOptions.identify == false) m_callback.OnPlayBackStarted(); // we are done initializing now, set the readyevent m_ready.Set(); SetCaching(CACHESTATE_FLUSH); while (!m_bAbortRequest) { #ifdef HAS_OMXPLAYER if (m_omxplayer_mode && OMXDoProcessing(m_OmxPlayerState, m_playSpeed, m_VideoPlayerVideo, m_VideoPlayerAudio, m_CurrentAudio, m_CurrentVideo, m_HasVideo, m_HasAudio, m_renderManager)) { CloseStream(m_CurrentVideo, false); OpenStream(m_CurrentVideo, m_CurrentVideo.id, m_CurrentVideo.source); if (m_State.canseek) m_messenger.Put(new CDVDMsgPlayerSeek(GetTime(), true, true, true, true, true)); } #endif // check display lost if (m_displayLost) { Sleep(50); continue; } // handle messages send to this thread, like seek or demuxer reset requests HandleMessages(); if(m_bAbortRequest) break; // should we open a new input stream? if(!m_pInputStream) { if (OpenInputStream() == false) { m_bAbortRequest = true; break; } } // should we open a new demuxer? if(!m_pDemuxer) { if (m_pInputStream->NextStream() == CDVDInputStream::NEXTSTREAM_NONE) break; if (m_pInputStream->IsEOF()) break; if (OpenDemuxStream() == false) { m_bAbortRequest = true; break; } // on channel switch we don't want to close stream players at this // time. we'll get the stream change event later if (!m_pInputStream->IsStreamType(DVDSTREAM_TYPE_PVRMANAGER) || !m_SelectionStreams.m_Streams.empty()) OpenDefaultStreams(); // never allow first frames after open to be skipped if( m_VideoPlayerVideo->IsInited() ) m_VideoPlayerVideo->SendMessage(new CDVDMsg(CDVDMsg::VIDEO_NOSKIP)); UpdateApplication(0); UpdatePlayState(0); } // handle eventual seeks due to playspeed HandlePlaySpeed(); // update player state UpdatePlayState(200); // update application with our state UpdateApplication(1000); // make sure we run subtitle process here m_VideoPlayerSubtitle->Process(m_clock.GetClock() + m_State.time_offset - m_VideoPlayerVideo->GetSubtitleDelay(), m_State.time_offset); if (CheckDelayedChannelEntry()) continue; // if the queues are full, no need to read more if ((!m_VideoPlayerAudio->AcceptsData() && m_CurrentAudio.id >= 0) || (!m_VideoPlayerVideo->AcceptsData() && m_CurrentVideo.id >= 0)) { if (m_pDemuxer && m_playSpeed == DVD_PLAYSPEED_PAUSE) { m_pDemuxer->SetSpeed(DVD_PLAYSPEED_PAUSE); } Sleep(10); continue; } else if (m_pDemuxer) { m_pDemuxer->SetSpeed(m_playSpeed); } // always yield to players if they have data levels > 50 percent if((m_VideoPlayerAudio->GetLevel() > 50 || m_CurrentAudio.id < 0) && (m_VideoPlayerVideo->GetLevel() > 50 || m_CurrentVideo.id < 0)) Sleep(0); DemuxPacket* pPacket = NULL; CDemuxStream *pStream = NULL; ReadPacket(pPacket, pStream); if (pPacket && !pStream) { /* probably a empty packet, just free it and move on */ CDVDDemuxUtils::FreeDemuxPacket(pPacket); continue; } if (!pPacket) { // when paused, demuxer could be be returning empty if (m_playSpeed == DVD_PLAYSPEED_PAUSE) continue; // check for a still frame state if (CDVDInputStream::IMenus* pStream = dynamic_cast<CDVDInputStream::IMenus*>(m_pInputStream)) { // stills will be skipped if(m_dvd.state == DVDSTATE_STILL) { if (m_dvd.iDVDStillTime > 0) { if ((XbmcThreads::SystemClockMillis() - m_dvd.iDVDStillStartTime) >= m_dvd.iDVDStillTime) { m_dvd.iDVDStillTime = 0; m_dvd.iDVDStillStartTime = 0; m_dvd.state = DVDSTATE_NORMAL; pStream->SkipStill(); continue; } } } } // if there is another stream available, reopen demuxer CDVDInputStream::ENextStream next = m_pInputStream->NextStream(); if(next == CDVDInputStream::NEXTSTREAM_OPEN) { SAFE_DELETE(m_pDemuxer); SetCaching(CACHESTATE_DONE); CLog::Log(LOGNOTICE, "VideoPlayer: next stream, wait for old streams to be finished"); CloseStream(m_CurrentAudio, true); CloseStream(m_CurrentVideo, true); m_CurrentAudio.Clear(); m_CurrentVideo.Clear(); m_CurrentSubtitle.Clear(); continue; } // input stream asked us to just retry if(next == CDVDInputStream::NEXTSTREAM_RETRY) { Sleep(100); continue; } // make sure we tell all players to finish it's data if (m_omxplayer_mode && !m_OmxPlayerState.bOmxSentEOFs) { if(m_CurrentAudio.inited) m_OmxPlayerState.bOmxWaitAudio = true; if(m_CurrentVideo.inited) m_OmxPlayerState.bOmxWaitVideo = true; m_OmxPlayerState.bOmxSentEOFs = true; } if(m_CurrentAudio.inited) m_VideoPlayerAudio->SendMessage(new CDVDMsg(CDVDMsg::GENERAL_EOF)); if(m_CurrentVideo.inited) m_VideoPlayerVideo->SendMessage(new CDVDMsg(CDVDMsg::GENERAL_EOF)); if(m_CurrentSubtitle.inited) m_VideoPlayerSubtitle->SendMessage(new CDVDMsg(CDVDMsg::GENERAL_EOF)); if(m_CurrentTeletext.inited) m_VideoPlayerTeletext->SendMessage(new CDVDMsg(CDVDMsg::GENERAL_EOF)); if(m_CurrentRadioRDS.inited) m_VideoPlayerRadioRDS->SendMessage(new CDVDMsg(CDVDMsg::GENERAL_EOF)); m_CurrentAudio.inited = false; m_CurrentVideo.inited = false; m_CurrentSubtitle.inited = false; m_CurrentTeletext.inited = false; m_CurrentRadioRDS.inited = false; // if we are caching, start playing it again SetCaching(CACHESTATE_DONE); // while players are still playing, keep going to allow seekbacks if(m_VideoPlayerAudio->HasData() || m_VideoPlayerVideo->HasData()) { Sleep(100); continue; } #ifdef HAS_OMXPLAYER if (m_omxplayer_mode && OMXStillPlaying(m_OmxPlayerState.bOmxWaitVideo, m_OmxPlayerState.bOmxWaitAudio, m_VideoPlayerVideo->IsEOS(), m_VideoPlayerAudio->IsEOS())) { Sleep(100); continue; } #endif if (!m_pInputStream->IsEOF()) CLog::Log(LOGINFO, "%s - eof reading from demuxer", __FUNCTION__); break; } // it's a valid data packet, reset error counter m_errorCount = 0; // see if we can find something better to play CheckBetterStream(m_CurrentAudio, pStream); CheckBetterStream(m_CurrentVideo, pStream); CheckBetterStream(m_CurrentSubtitle, pStream); CheckBetterStream(m_CurrentTeletext, pStream); CheckBetterStream(m_CurrentRadioRDS, pStream); // demux video stream if (CSettings::GetInstance().GetBool(CSettings::SETTING_SUBTITLES_PARSECAPTIONS) && CheckIsCurrent(m_CurrentVideo, pStream, pPacket)) { if (m_pCCDemuxer) { bool first = true; while(!m_bAbortRequest) { DemuxPacket *pkt = m_pCCDemuxer->Read(first ? pPacket : NULL); if (!pkt) break; first = false; if (m_pCCDemuxer->GetNrOfStreams() != m_SelectionStreams.CountSource(STREAM_SUBTITLE, STREAM_SOURCE_VIDEOMUX)) { m_SelectionStreams.Clear(STREAM_SUBTITLE, STREAM_SOURCE_VIDEOMUX); m_SelectionStreams.Update(NULL, m_pCCDemuxer, ""); OpenDefaultStreams(false); } CDemuxStream *pSubStream = m_pCCDemuxer->GetStream(pkt->iStreamId); if (pSubStream && m_CurrentSubtitle.id == pkt->iStreamId && m_CurrentSubtitle.source == STREAM_SOURCE_VIDEOMUX) ProcessSubData(pSubStream, pkt); else CDVDDemuxUtils::FreeDemuxPacket(pkt); } } } if (IsInMenuInternal()) { if (CDVDInputStream::IMenus* menu = dynamic_cast<CDVDInputStream::IMenus*>(m_pInputStream)) { double correction = menu->GetTimeStampCorrection(); if (pPacket->dts > correction) pPacket->dts -= correction; if (pPacket->pts > correction) pPacket->pts -= correction; } if (m_dvd.syncClock) { m_clock.Discontinuity(pPacket->dts); m_dvd.syncClock = false; } } // process the packet ProcessPacket(pStream, pPacket); // check if in a cut or commercial break that should be automatically skipped CheckAutoSceneSkip(); // update the player info for streams if (m_player_status_timer.IsTimePast()) { m_player_status_timer.Set(500); UpdateStreamInfos(); } } } bool CVideoPlayer::CheckDelayedChannelEntry(void) { bool bReturn(false); if (m_ChannelEntryTimeOut.IsTimePast()) { CFileItem currentFile(g_application.CurrentFileItem()); CPVRChannelPtr currentChannel(currentFile.GetPVRChannelInfoTag()); if (currentChannel) { SwitchChannel(currentChannel); bReturn = true; } m_ChannelEntryTimeOut.SetInfinite(); } return bReturn; } bool CVideoPlayer::CheckIsCurrent(CCurrentStream& current, CDemuxStream* stream, DemuxPacket* pkg) { if(current.id == pkg->iStreamId && current.source == stream->source && current.type == stream->type) return true; else return false; } void CVideoPlayer::ProcessPacket(CDemuxStream* pStream, DemuxPacket* pPacket) { // process packet if it belongs to selected stream. // for dvd's don't allow automatic opening of streams*/ if (CheckIsCurrent(m_CurrentAudio, pStream, pPacket)) ProcessAudioData(pStream, pPacket); else if (CheckIsCurrent(m_CurrentVideo, pStream, pPacket)) ProcessVideoData(pStream, pPacket); else if (CheckIsCurrent(m_CurrentSubtitle, pStream, pPacket)) ProcessSubData(pStream, pPacket); else if (CheckIsCurrent(m_CurrentTeletext, pStream, pPacket)) ProcessTeletextData(pStream, pPacket); else if (CheckIsCurrent(m_CurrentRadioRDS, pStream, pPacket)) ProcessRadioRDSData(pStream, pPacket); else { pStream->SetDiscard(AVDISCARD_ALL); CDVDDemuxUtils::FreeDemuxPacket(pPacket); // free it since we won't do anything with it } } void CVideoPlayer::CheckStreamChanges(CCurrentStream& current, CDemuxStream* stream) { if (current.stream != (void*)stream || current.changes != stream->changes) { /* check so that dmuxer hints or extra data hasn't changed */ /* if they have, reopen stream */ if (current.hint != CDVDStreamInfo(*stream, true)) OpenStream(current, stream->iId, stream->source ); current.stream = (void*)stream; current.changes = stream->changes; } } void CVideoPlayer::ProcessAudioData(CDemuxStream* pStream, DemuxPacket* pPacket) { CheckStreamChanges(m_CurrentAudio, pStream); bool checkcont = CheckContinuity(m_CurrentAudio, pPacket); UpdateTimestamps(m_CurrentAudio, pPacket); if (checkcont && (m_CurrentAudio.avsync == CCurrentStream::AV_SYNC_CHECK)) m_CurrentAudio.avsync = CCurrentStream::AV_SYNC_NONE; bool drop = false; if (CheckPlayerInit(m_CurrentAudio)) drop = true; /* * If CheckSceneSkip() returns true then demux point is inside an EDL cut and the packets are dropped. * If not inside a hard cut, but the demux point has reached an EDL mute section then trigger the * AUDIO_SILENCE state. The AUDIO_SILENCE state is reverted as soon as the demux point is outside * of any EDL section while EDL mute is still active. */ CEdl::Cut cut; if (CheckSceneSkip(m_CurrentAudio)) drop = true; else if (m_Edl.InCut(DVD_TIME_TO_MSEC(m_CurrentAudio.dts + m_offset_pts), &cut) && cut.action == CEdl::MUTE // Inside EDL mute && !m_EdlAutoSkipMarkers.mute) // Mute not already triggered { m_VideoPlayerAudio->SendMessage(new CDVDMsgBool(CDVDMsg::AUDIO_SILENCE, true)); m_EdlAutoSkipMarkers.mute = true; } else if (!m_Edl.InCut(DVD_TIME_TO_MSEC(m_CurrentAudio.dts + m_offset_pts), &cut) // Outside of any EDL && m_EdlAutoSkipMarkers.mute) // But the mute hasn't been removed yet { m_VideoPlayerAudio->SendMessage(new CDVDMsgBool(CDVDMsg::AUDIO_SILENCE, false)); m_EdlAutoSkipMarkers.mute = false; } m_VideoPlayerAudio->SendMessage(new CDVDMsgDemuxerPacket(pPacket, drop)); m_CurrentAudio.packets++; } void CVideoPlayer::ProcessVideoData(CDemuxStream* pStream, DemuxPacket* pPacket) { CheckStreamChanges(m_CurrentVideo, pStream); bool checkcont = false; if( pPacket->iSize != 4) //don't check the EOF_SEQUENCE of stillframes { checkcont = CheckContinuity(m_CurrentVideo, pPacket); UpdateTimestamps(m_CurrentVideo, pPacket); } if (checkcont && (m_CurrentVideo.avsync == CCurrentStream::AV_SYNC_CHECK)) m_CurrentVideo.avsync = CCurrentStream::AV_SYNC_NONE; bool drop = false; if (CheckPlayerInit(m_CurrentVideo)) drop = true; if (CheckSceneSkip(m_CurrentVideo)) drop = true; m_VideoPlayerVideo->SendMessage(new CDVDMsgDemuxerPacket(pPacket, drop)); m_CurrentVideo.packets++; } void CVideoPlayer::ProcessSubData(CDemuxStream* pStream, DemuxPacket* pPacket) { CheckStreamChanges(m_CurrentSubtitle, pStream); UpdateTimestamps(m_CurrentSubtitle, pPacket); bool drop = false; if (CheckPlayerInit(m_CurrentSubtitle)) drop = true; if (CheckSceneSkip(m_CurrentSubtitle)) drop = true; m_VideoPlayerSubtitle->SendMessage(new CDVDMsgDemuxerPacket(pPacket, drop)); if(m_pInputStream && m_pInputStream->IsStreamType(DVDSTREAM_TYPE_DVD)) m_VideoPlayerSubtitle->UpdateOverlayInfo((CDVDInputStreamNavigator*)m_pInputStream, LIBDVDNAV_BUTTON_NORMAL); } void CVideoPlayer::ProcessTeletextData(CDemuxStream* pStream, DemuxPacket* pPacket) { CheckStreamChanges(m_CurrentTeletext, pStream); UpdateTimestamps(m_CurrentTeletext, pPacket); bool drop = false; if (CheckPlayerInit(m_CurrentTeletext)) drop = true; if (CheckSceneSkip(m_CurrentTeletext)) drop = true; m_VideoPlayerTeletext->SendMessage(new CDVDMsgDemuxerPacket(pPacket, drop)); } void CVideoPlayer::ProcessRadioRDSData(CDemuxStream* pStream, DemuxPacket* pPacket) { CheckStreamChanges(m_CurrentRadioRDS, pStream); UpdateTimestamps(m_CurrentRadioRDS, pPacket); bool drop = false; if (CheckPlayerInit(m_CurrentRadioRDS)) drop = true; if (CheckSceneSkip(m_CurrentRadioRDS)) drop = true; m_VideoPlayerRadioRDS->SendMessage(new CDVDMsgDemuxerPacket(pPacket, drop)); } bool CVideoPlayer::GetCachingTimes(double& level, double& delay, double& offset) { if(!m_pInputStream || !m_pDemuxer) return false; XFILE::SCacheStatus status; if (!m_pInputStream->GetCacheStatus(&status)) return false; int64_t cached = status.forward; unsigned currate = status.currate; unsigned maxrate = status.maxrate; bool full = status.full; int64_t length = m_pInputStream->GetLength(); int64_t remain = length - m_pInputStream->Seek(0, SEEK_CUR); if(cached < 0 || length <= 0 || remain < 0) return false; double play_sbp = DVD_MSEC_TO_TIME(m_pDemuxer->GetStreamLength()) / length; double queued = 1000.0 * GetQueueTime() / play_sbp; delay = 0.0; level = 0.0; offset = (double)(cached + queued) / length; if (currate == 0) return true; double cache_sbp = 1.1 * (double)DVD_TIME_BASE / currate; /* underestimate by 10 % */ double play_left = play_sbp * (remain + queued); /* time to play out all remaining bytes */ double cache_left = cache_sbp * (remain - cached); /* time to cache the remaining bytes */ double cache_need = std::max(0.0, remain - play_left / cache_sbp); /* bytes needed until play_left == cache_left */ delay = cache_left - play_left; if (full && (currate < maxrate) ) { CLog::Log(LOGDEBUG, "Readrate %u is too low with %u required", currate, maxrate); level = -1.0; /* buffer is full & our read rate is too low */ } else level = (cached + queued) / (cache_need + queued); return true; } void CVideoPlayer::HandlePlaySpeed() { bool isInMenu = IsInMenuInternal(); if (isInMenu && m_caching != CACHESTATE_DONE) SetCaching(CACHESTATE_DONE); if (m_caching == CACHESTATE_FULL) { double level, delay, offset; if (GetCachingTimes(level, delay, offset)) { if (level < 0.0) { CGUIDialogKaiToast::QueueNotification(g_localizeStrings.Get(21454), g_localizeStrings.Get(21455)); SetCaching(CACHESTATE_INIT); } if (level >= 1.0) SetCaching(CACHESTATE_INIT); } else { if ((!m_VideoPlayerAudio->AcceptsData() && m_CurrentAudio.id >= 0) || (!m_VideoPlayerVideo->AcceptsData() && m_CurrentVideo.id >= 0)) SetCaching(CACHESTATE_INIT); } } if (m_caching == CACHESTATE_INIT) { // if all enabled streams have been inited we are done if ((m_CurrentVideo.id >= 0 || m_CurrentAudio.id >= 0) && (m_CurrentVideo.id < 0 || m_CurrentVideo.syncState != IDVDStreamPlayer::SYNC_STARTING) && (m_CurrentAudio.id < 0 || m_CurrentAudio.syncState != IDVDStreamPlayer::SYNC_STARTING)) SetCaching(CACHESTATE_PLAY); // handle exceptions if (m_CurrentAudio.id >= 0 && m_CurrentVideo.id >= 0) { if ((!m_VideoPlayerAudio->AcceptsData() || !m_VideoPlayerVideo->AcceptsData()) && m_cachingTimer.IsTimePast()) { SetCaching(CACHESTATE_DONE); } } } if (m_caching == CACHESTATE_PLAY) { // if all enabled streams have started playing we are done if ((m_CurrentVideo.id < 0 || !m_VideoPlayerVideo->IsStalled()) && (m_CurrentAudio.id < 0 || !m_VideoPlayerAudio->IsStalled())) SetCaching(CACHESTATE_DONE); } if (m_caching == CACHESTATE_DONE) { if (m_playSpeed == DVD_PLAYSPEED_NORMAL && !isInMenu) { // take action is audio or video stream is stalled if (((m_VideoPlayerAudio->IsStalled() && m_CurrentAudio.inited) || (m_VideoPlayerVideo->IsStalled() && m_CurrentVideo.inited)) && m_syncTimer.IsTimePast()) { if (m_pInputStream->IsRealtime()) { if ((m_CurrentAudio.id >= 0 && m_CurrentAudio.syncState == IDVDStreamPlayer::SYNC_INSYNC && m_VideoPlayerAudio->IsStalled()) || (m_CurrentVideo.id >= 0 && m_CurrentVideo.syncState == IDVDStreamPlayer::SYNC_INSYNC && m_VideoPlayerVideo->GetLevel() == 0)) { CLog::Log(LOGDEBUG, "Stream stalled, start buffering. Audio: %d - Video: %d", m_VideoPlayerAudio->GetLevel(),m_VideoPlayerVideo->GetLevel()); FlushBuffers(false); } } else { // start caching if audio and video have run dry if (m_VideoPlayerAudio->GetLevel() <= 50 && m_VideoPlayerVideo->GetLevel() <= 50) { SetCaching(CACHESTATE_FULL); } else if (m_CurrentAudio.id >= 0 && m_CurrentAudio.inited && m_CurrentAudio.syncState == IDVDStreamPlayer::SYNC_INSYNC && m_VideoPlayerAudio->GetLevel() == 0) { CLog::Log(LOGDEBUG,"CVideoPlayer::HandlePlaySpeed - audio stream stalled, triggering re-sync"); FlushBuffers(false); m_messenger.Put(new CDVDMsgPlayerSeek((int) GetTime(), false, true, true, true, true)); } } } // care for live streams else if (m_pInputStream->IsRealtime()) { if (m_CurrentAudio.id >= 0) { double adjust = -1.0; // a unique value if (m_clock.GetSpeedAdjust() >= 0 && m_VideoPlayerAudio->GetLevel() < 5) adjust = -0.01; if (m_clock.GetSpeedAdjust() < 0 && m_VideoPlayerAudio->GetLevel() > 10) adjust = 0.0; if (adjust != -1.0) { m_clock.SetSpeedAdjust(adjust); if (m_omxplayer_mode) m_OmxPlayerState.av_clock.OMXSetSpeedAdjust(adjust); CLog::Log(LOGDEBUG, "CVideoPlayer::HandlePlaySpeed set clock adjust: %f", adjust); } } } } } // sync streams to clock if ((m_CurrentVideo.syncState == IDVDStreamPlayer::SYNC_WAITSYNC) || (m_CurrentAudio.syncState == IDVDStreamPlayer::SYNC_WAITSYNC)) { unsigned int threshold = 20; if (m_pInputStream->IsRealtime()) threshold = 40; bool video = m_CurrentVideo.id < 0 || (m_CurrentVideo.syncState == IDVDStreamPlayer::SYNC_WAITSYNC) || (m_CurrentVideo.packets == 0 && m_CurrentAudio.packets > threshold); bool audio = m_CurrentAudio.id < 0 || (m_CurrentAudio.syncState == IDVDStreamPlayer::SYNC_WAITSYNC) || (m_CurrentAudio.packets == 0 && m_CurrentVideo.packets > threshold); if (m_CurrentAudio.syncState == IDVDStreamPlayer::SYNC_WAITSYNC && m_CurrentAudio.avsync == CCurrentStream::AV_SYNC_CONT) { m_CurrentAudio.syncState = IDVDStreamPlayer::SYNC_INSYNC; m_CurrentAudio.avsync = CCurrentStream::AV_SYNC_NONE; m_VideoPlayerAudio->SendMessage(new CDVDMsgDouble(CDVDMsg::GENERAL_RESYNC, m_clock.GetClock()), 1); } else if (m_CurrentVideo.syncState == IDVDStreamPlayer::SYNC_WAITSYNC && m_CurrentVideo.avsync == CCurrentStream::AV_SYNC_CONT) { m_CurrentVideo.syncState = IDVDStreamPlayer::SYNC_INSYNC; m_CurrentVideo.avsync = CCurrentStream::AV_SYNC_NONE; m_VideoPlayerVideo->SendMessage(new CDVDMsgDouble(CDVDMsg::GENERAL_RESYNC, m_clock.GetClock()), 1); } else if (video && audio) { double clock = 0; if (m_CurrentAudio.syncState == IDVDStreamPlayer::SYNC_WAITSYNC) CLog::Log(LOGDEBUG, "VideoPlayer::Sync - Audio - pts: %f, cache: %f, totalcache: %f", m_CurrentAudio.starttime, m_CurrentAudio.cachetime, m_CurrentAudio.cachetotal); if (m_CurrentVideo.syncState == IDVDStreamPlayer::SYNC_WAITSYNC) CLog::Log(LOGDEBUG, "VideoPlayer::Sync - Video - pts: %f, cache: %f, totalcache: %f", m_CurrentVideo.starttime, m_CurrentVideo.cachetime, m_CurrentVideo.cachetotal); if (m_CurrentAudio.starttime != DVD_NOPTS_VALUE && m_CurrentAudio.packets > 0) { if (m_pInputStream->IsRealtime()) clock = m_CurrentAudio.starttime - m_CurrentAudio.cachetotal - DVD_MSEC_TO_TIME(400); else clock = m_CurrentAudio.starttime - m_CurrentAudio.cachetime; if (m_CurrentVideo.starttime != DVD_NOPTS_VALUE && (m_CurrentVideo.packets > 0) && m_CurrentVideo.starttime - m_CurrentVideo.cachetotal < clock) { clock = m_CurrentVideo.starttime - m_CurrentVideo.cachetotal; } } else if (m_CurrentVideo.starttime != DVD_NOPTS_VALUE && m_CurrentVideo.packets > 0) { clock = m_CurrentVideo.starttime - m_CurrentVideo.cachetotal; } if (m_omxplayer_mode) { CLog::Log(LOGDEBUG, "%s::%s player started RESET", "CVideoPlayer", __FUNCTION__); m_OmxPlayerState.av_clock.OMXReset(m_CurrentVideo.id >= 0, m_playSpeed != DVD_PLAYSPEED_NORMAL && m_playSpeed != DVD_PLAYSPEED_PAUSE ? false: (m_CurrentAudio.id >= 0)); } m_clock.Discontinuity(clock); m_CurrentAudio.syncState = IDVDStreamPlayer::SYNC_INSYNC; m_CurrentAudio.avsync = CCurrentStream::AV_SYNC_NONE; m_CurrentVideo.syncState = IDVDStreamPlayer::SYNC_INSYNC; m_CurrentVideo.avsync = CCurrentStream::AV_SYNC_NONE; m_VideoPlayerAudio->SendMessage(new CDVDMsgDouble(CDVDMsg::GENERAL_RESYNC, clock), 1); m_VideoPlayerVideo->SendMessage(new CDVDMsgDouble(CDVDMsg::GENERAL_RESYNC, clock), 1); SetCaching(CACHESTATE_DONE); m_syncTimer.Set(3000); } } // handle ff/rw if(m_playSpeed != DVD_PLAYSPEED_NORMAL && m_playSpeed != DVD_PLAYSPEED_PAUSE) { if (isInMenu) { // this can't be done in menu SetPlaySpeed(DVD_PLAYSPEED_NORMAL); } else { bool check = true; // only check if we have video if (m_CurrentVideo.id < 0 || m_CurrentVideo.syncState != IDVDStreamPlayer::SYNC_INSYNC) check = false; // video message queue either initiated or already seen eof else if (m_CurrentVideo.inited == false && m_playSpeed >= 0) check = false; // don't check if time has not advanced since last check else if (m_SpeedState.lasttime == GetTime()) check = false; // skip if frame at screen has no valid timestamp else if (m_VideoPlayerVideo->GetCurrentPts() == DVD_NOPTS_VALUE) check = false; // skip if frame on screen has not changed else if (m_SpeedState.lastpts == m_VideoPlayerVideo->GetCurrentPts() && (m_SpeedState.lastpts > m_State.dts || m_playSpeed > 0)) check = false; if (check) { m_SpeedState.lastpts = m_VideoPlayerVideo->GetCurrentPts(); m_SpeedState.lasttime = GetTime(); m_SpeedState.lastabstime = CDVDClock::GetAbsoluteClock(); // check how much off clock video is when ff/rw:ing // a problem here is that seeking isn't very accurate // and since the clock will be resynced after seek // we might actually not really be playing at the wanted // speed. we'd need to have some way to not resync the clock // after a seek to remember timing. still need to handle // discontinuities somehow double error; error = m_clock.GetClock() - m_SpeedState.lastpts; error *= m_playSpeed / abs(m_playSpeed); // allow a bigger error when going ff, the faster we go // the the bigger is the error we allow if (m_playSpeed > DVD_PLAYSPEED_NORMAL) { int errorwin = m_playSpeed / DVD_PLAYSPEED_NORMAL; if (errorwin > 8) errorwin = 8; error /= errorwin; } if(error > DVD_MSEC_TO_TIME(1000)) { error = (int)DVD_TIME_TO_MSEC(m_clock.GetClock()) - m_SpeedState.lastseekpts; if(std::abs(error) > 1000) { CLog::Log(LOGDEBUG, "CVideoPlayer::Process - Seeking to catch up"); m_SpeedState.lastseekpts = (int)DVD_TIME_TO_MSEC(m_clock.GetClock()); int direction = (m_playSpeed > 0) ? 1 : -1; int iTime = DVD_TIME_TO_MSEC(m_clock.GetClock() + m_State.time_offset + 1000000.0 * direction); m_messenger.Put(new CDVDMsgPlayerSeek(iTime, (GetPlaySpeed() < 0), true, false, false, true, false)); } } } } } } bool CVideoPlayer::CheckPlayerInit(CCurrentStream& current) { if (current.inited) return false; if (current.startpts != DVD_NOPTS_VALUE) { if(current.dts == DVD_NOPTS_VALUE) { CLog::Log(LOGDEBUG, "%s - dropping packet type:%d dts:%f to get to start point at %f", __FUNCTION__, current.player, current.dts, current.startpts); return true; } if ((current.startpts - current.dts) > DVD_SEC_TO_TIME(20)) { CLog::Log(LOGDEBUG, "%s - too far to decode before finishing seek", __FUNCTION__); if(m_CurrentAudio.startpts != DVD_NOPTS_VALUE) m_CurrentAudio.startpts = current.dts; if(m_CurrentVideo.startpts != DVD_NOPTS_VALUE) m_CurrentVideo.startpts = current.dts; if(m_CurrentSubtitle.startpts != DVD_NOPTS_VALUE) m_CurrentSubtitle.startpts = current.dts; if(m_CurrentTeletext.startpts != DVD_NOPTS_VALUE) m_CurrentTeletext.startpts = current.dts; if(m_CurrentRadioRDS.startpts != DVD_NOPTS_VALUE) m_CurrentRadioRDS.startpts = current.dts; } if(current.dts < current.startpts) { CLog::Log(LOGDEBUG, "%s - dropping packet type:%d dts:%f to get to start point at %f", __FUNCTION__, current.player, current.dts, current.startpts); return true; } } if (current.dts != DVD_NOPTS_VALUE) { current.inited = true; current.startpts = current.dts; } return false; } void CVideoPlayer::UpdateCorrection(DemuxPacket* pkt, double correction) { if(pkt->dts != DVD_NOPTS_VALUE) pkt->dts -= correction; if(pkt->pts != DVD_NOPTS_VALUE) pkt->pts -= correction; } void CVideoPlayer::UpdateTimestamps(CCurrentStream& current, DemuxPacket* pPacket) { double dts = current.dts; /* update stored values */ if(pPacket->dts != DVD_NOPTS_VALUE) dts = pPacket->dts; else if(pPacket->pts != DVD_NOPTS_VALUE) dts = pPacket->pts; /* calculate some average duration */ if(pPacket->duration != DVD_NOPTS_VALUE) current.dur = pPacket->duration; else if(dts != DVD_NOPTS_VALUE && current.dts != DVD_NOPTS_VALUE) current.dur = 0.1 * (current.dur * 9 + (dts - current.dts)); current.dts = dts; } static void UpdateLimits(double& minimum, double& maximum, double dts) { if(dts == DVD_NOPTS_VALUE) return; if(minimum == DVD_NOPTS_VALUE || minimum > dts) minimum = dts; if(maximum == DVD_NOPTS_VALUE || maximum < dts) maximum = dts; } bool CVideoPlayer::CheckContinuity(CCurrentStream& current, DemuxPacket* pPacket) { if (m_playSpeed < DVD_PLAYSPEED_PAUSE) return false; if( pPacket->dts == DVD_NOPTS_VALUE || current.dts == DVD_NOPTS_VALUE) return false; double mindts = DVD_NOPTS_VALUE, maxdts = DVD_NOPTS_VALUE; UpdateLimits(mindts, maxdts, m_CurrentAudio.dts); UpdateLimits(mindts, maxdts, m_CurrentVideo.dts); UpdateLimits(mindts, maxdts, m_CurrentAudio.dts_end()); UpdateLimits(mindts, maxdts, m_CurrentVideo.dts_end()); /* if we don't have max and min, we can't do anything more */ if( mindts == DVD_NOPTS_VALUE || maxdts == DVD_NOPTS_VALUE ) return false; double correction = 0.0; if( pPacket->dts > maxdts + DVD_MSEC_TO_TIME(1000)) { CLog::Log(LOGDEBUG, "CVideoPlayer::CheckContinuity - resync forward :%d, prev:%f, curr:%f, diff:%f" , current.type, current.dts, pPacket->dts, pPacket->dts - maxdts); correction = pPacket->dts - maxdts; } /* if it's large scale jump, correct for it after having confirmed the jump */ if(pPacket->dts + DVD_MSEC_TO_TIME(500) < current.dts_end()) { CLog::Log(LOGDEBUG, "CVideoPlayer::CheckContinuity - resync backward :%d, prev:%f, curr:%f, diff:%f" , current.type, current.dts, pPacket->dts, pPacket->dts - current.dts); correction = pPacket->dts - current.dts_end(); } else if(pPacket->dts < current.dts) { CLog::Log(LOGDEBUG, "CVideoPlayer::CheckContinuity - wrapback :%d, prev:%f, curr:%f, diff:%f" , current.type, current.dts, pPacket->dts, pPacket->dts - current.dts); } double lastdts = pPacket->dts; if(correction != 0.0) { // we want the dts values of two streams to close, or for one to be invalid (e.g. from a missing audio stream) double this_dts = pPacket->dts; double that_dts = current.type == STREAM_AUDIO ? m_CurrentVideo.lastdts : m_CurrentAudio.lastdts; if (m_CurrentAudio.id == -1 || m_CurrentVideo.id == -1 || current.lastdts == DVD_NOPTS_VALUE || fabs(this_dts - that_dts) < DVD_MSEC_TO_TIME(1000)) { m_offset_pts += correction; UpdateCorrection(pPacket, correction); lastdts = pPacket->dts; CLog::Log(LOGDEBUG, "CVideoPlayer::CheckContinuity - update correction: %f", correction); } else { // not sure yet - flags the packets as unknown until we get confirmation on another audio/video packet pPacket->dts = DVD_NOPTS_VALUE; pPacket->pts = DVD_NOPTS_VALUE; } } else { if (current.avsync == CCurrentStream::AV_SYNC_CHECK) current.avsync = CCurrentStream::AV_SYNC_CONT; } current.lastdts = lastdts; return true; } bool CVideoPlayer::CheckSceneSkip(CCurrentStream& current) { if(!m_Edl.HasCut()) return false; if(current.dts == DVD_NOPTS_VALUE) return false; if(current.inited == false) return false; CEdl::Cut cut; return m_Edl.InCut(DVD_TIME_TO_MSEC(current.dts + m_offset_pts), &cut) && cut.action == CEdl::CUT; } void CVideoPlayer::CheckAutoSceneSkip() { if(!m_Edl.HasCut()) return; /* * Check that there is an audio and video stream. */ if(m_CurrentAudio.id < 0 || m_CurrentVideo.id < 0) return; /* * If there is a startpts defined for either the audio or video stream then VideoPlayer is still * still decoding frames to get to the previously requested seek point. */ if(m_CurrentAudio.inited == false || m_CurrentVideo.inited == false) return; if(m_CurrentAudio.dts == DVD_NOPTS_VALUE || m_CurrentVideo.dts == DVD_NOPTS_VALUE) return; const int64_t clock = m_omxplayer_mode ? GetTime() : DVD_TIME_TO_MSEC(std::min(m_CurrentAudio.dts, m_CurrentVideo.dts) + m_offset_pts); CEdl::Cut cut; if(!m_Edl.InCut(clock, &cut)) return; if(cut.action == CEdl::CUT && !(cut.end == m_EdlAutoSkipMarkers.cut || cut.start == m_EdlAutoSkipMarkers.cut)) // To prevent looping if same cut again { CLog::Log(LOGDEBUG, "%s - Clock in EDL cut [%s - %s]: %s. Automatically skipping over.", __FUNCTION__, CEdl::MillisecondsToTimeString(cut.start).c_str(), CEdl::MillisecondsToTimeString(cut.end).c_str(), CEdl::MillisecondsToTimeString(clock).c_str()); /* * Seeking either goes to the start or the end of the cut depending on the play direction. */ int seek = GetPlaySpeed() >= 0 ? cut.end : cut.start; /* * Seeking is NOT flushed so any content up to the demux point is retained when playing forwards. */ m_messenger.Put(new CDVDMsgPlayerSeek(seek, true, false, m_omxplayer_mode, true, false, true)); /* * Seek doesn't always work reliably. Last physical seek time is recorded to prevent looping * if there was an error with seeking and it landed somewhere unexpected, perhaps back in the * cut. The cut automatic skip marker is reset every 500ms allowing another attempt at the seek. */ m_EdlAutoSkipMarkers.cut = GetPlaySpeed() >= 0 ? cut.end : cut.start; } else if(cut.action == CEdl::COMM_BREAK && GetPlaySpeed() >= 0 && cut.start > m_EdlAutoSkipMarkers.commbreak_end) { CLog::Log(LOGDEBUG, "%s - Clock in commercial break [%s - %s]: %s. Automatically skipping to end of commercial break (only done once per break)", __FUNCTION__, CEdl::MillisecondsToTimeString(cut.start).c_str(), CEdl::MillisecondsToTimeString(cut.end).c_str(), CEdl::MillisecondsToTimeString(clock).c_str()); /* * Seeking is NOT flushed so any content up to the demux point is retained when playing forwards. */ m_messenger.Put(new CDVDMsgPlayerSeek(cut.end + 1, true, false, m_omxplayer_mode, true, false, true)); /* * Each commercial break is only skipped once so poorly detected commercial breaks can be * manually re-entered. Start and end are recorded to prevent looping and to allow seeking back * to the start of the commercial break if incorrectly flagged. */ m_EdlAutoSkipMarkers.commbreak_start = cut.start; m_EdlAutoSkipMarkers.commbreak_end = cut.end; m_EdlAutoSkipMarkers.seek_to_start = true; // Allow backwards Seek() to go directly to the start } } void CVideoPlayer::SynchronizeDemuxer(unsigned int timeout) { if(IsCurrentThread()) return; if(!m_messenger.IsInited()) return; CDVDMsgGeneralSynchronize* message = new CDVDMsgGeneralSynchronize(timeout, 0); m_messenger.Put(message->Acquire()); message->Wait(&m_bStop, 0); message->Release(); } void CVideoPlayer::SynchronizePlayers(unsigned int sources) { /* we need a big timeout as audio queue is about 8seconds for 2ch ac3 */ const int timeout = 10*1000; // in milliseconds CDVDMsgGeneralSynchronize* message = new CDVDMsgGeneralSynchronize(timeout, sources); if (m_CurrentAudio.id >= 0) m_VideoPlayerAudio->SendMessage(message->Acquire()); if (m_CurrentVideo.id >= 0) m_VideoPlayerVideo->SendMessage(message->Acquire()); /* TODO - we have to rewrite the sync class, to not require all other players waiting for subtitle, should only be the oposite way if (m_CurrentSubtitle.id >= 0) m_VideoPlayerSubtitle->SendMessage(message->Acquire()); */ message->Release(); } IDVDStreamPlayer* CVideoPlayer::GetStreamPlayer(unsigned int target) { if(target == VideoPlayer_AUDIO) return m_VideoPlayerAudio; if(target == VideoPlayer_VIDEO) return m_VideoPlayerVideo; if(target == VideoPlayer_SUBTITLE) return m_VideoPlayerSubtitle; if(target == VideoPlayer_TELETEXT) return m_VideoPlayerTeletext; if(target == VideoPlayer_RDS) return m_VideoPlayerRadioRDS; return NULL; } void CVideoPlayer::SendPlayerMessage(CDVDMsg* pMsg, unsigned int target) { IDVDStreamPlayer* player = GetStreamPlayer(target); if(player) player->SendMessage(pMsg, 0); } void CVideoPlayer::OnExit() { CLog::Log(LOGNOTICE, "CVideoPlayer::OnExit()"); // set event to inform openfile something went wrong in case openfile is still waiting for this event SetCaching(CACHESTATE_DONE); // close each stream if (!m_bAbortRequest) CLog::Log(LOGNOTICE, "VideoPlayer: eof, waiting for queues to empty"); CloseStream(m_CurrentAudio, !m_bAbortRequest); CloseStream(m_CurrentVideo, !m_bAbortRequest); // the generalization principle was abused for subtitle player. actually it is not a stream player like // video and audio. subtitle player does not run on its own thread, hence waitForBuffers makes // no sense here. waitForBuffers is abused to clear overlay container (false clears container) // subtitles are added from video player. after video player has finished, overlays have to be cleared. CloseStream(m_CurrentSubtitle, false); // clear overlay container CloseStream(m_CurrentTeletext, !m_bAbortRequest); CloseStream(m_CurrentRadioRDS, !m_bAbortRequest); // destroy objects SAFE_DELETE(m_pDemuxer); SAFE_DELETE(m_pSubtitleDemuxer); SAFE_DELETE(m_pCCDemuxer); SAFE_DELETE(m_pInputStream); // clean up all selection streams m_SelectionStreams.Clear(STREAM_NONE, STREAM_SOURCE_NONE); m_messenger.End(); if (m_omxplayer_mode) { m_OmxPlayerState.av_clock.OMXStop(); m_OmxPlayerState.av_clock.OMXStateIdle(); m_OmxPlayerState.av_clock.OMXDeinitialize(); } m_bStop = true; // if we didn't stop playing, advance to the next item in xbmc's playlist if(m_PlayerOptions.identify == false) { if (m_bAbortRequest) m_callback.OnPlayBackStopped(); else m_callback.OnPlayBackEnded(); } // set event to inform openfile something went wrong in case openfile is still waiting for this event m_ready.Set(); } void CVideoPlayer::HandleMessages() { CDVDMsg* pMsg; while (m_messenger.Get(&pMsg, 0) == MSGQ_OK) { if (pMsg->IsType(CDVDMsg::PLAYER_SEEK) && m_messenger.GetPacketCount(CDVDMsg::PLAYER_SEEK) == 0 && m_messenger.GetPacketCount(CDVDMsg::PLAYER_SEEK_CHAPTER) == 0) { CDVDMsgPlayerSeek &msg(*((CDVDMsgPlayerSeek*)pMsg)); if (!m_State.canseek) { pMsg->Release(); continue; } if(!msg.GetTrickPlay()) { g_infoManager.SetDisplayAfterSeek(100000); if(msg.GetFlush()) SetCaching(CACHESTATE_FLUSH); } double start = DVD_NOPTS_VALUE; int time = msg.GetRestore() ? m_Edl.RestoreCutTime(msg.GetTime()) : msg.GetTime(); // if input stream doesn't support ISeekTime, convert back to pts // TODO: // After demuxer we add an offset to input pts so that displayed time and clock are // increasing steadily. For seeking we need to determine the boundaries and offset // of the desired segment. With the current approach calculated time may point // to nirvana if (m_pInputStream->GetIPosTime() == nullptr) time += DVD_TIME_TO_MSEC(m_offset_pts - m_State.time_offset); CLog::Log(LOGDEBUG, "demuxer seek to: %d", time); if (m_pDemuxer && m_pDemuxer->SeekTime(time, msg.GetBackward(), &start)) { CLog::Log(LOGDEBUG, "demuxer seek to: %d, success", time); if(m_pSubtitleDemuxer) { if(!m_pSubtitleDemuxer->SeekTime(time, msg.GetBackward())) CLog::Log(LOGDEBUG, "failed to seek subtitle demuxer: %d, success", time); } // dts after successful seek if (start == DVD_NOPTS_VALUE) m_State.dts = DVD_MSEC_TO_TIME(time) - m_State.time_offset; else { start -= m_offset_pts; m_State.dts = start; } FlushBuffers(!msg.GetFlush(), start, msg.GetAccurate(), msg.GetSync()); } else CLog::Log(LOGWARNING, "error while seeking"); // set flag to indicate we have finished a seeking request if(!msg.GetTrickPlay()) g_infoManager.SetDisplayAfterSeek(); // dvd's will issue a HOP_CHANNEL that we need to skip if(m_pInputStream->IsStreamType(DVDSTREAM_TYPE_DVD)) m_dvd.state = DVDSTATE_SEEK; } else if (pMsg->IsType(CDVDMsg::PLAYER_SEEK_CHAPTER) && m_messenger.GetPacketCount(CDVDMsg::PLAYER_SEEK) == 0 && m_messenger.GetPacketCount(CDVDMsg::PLAYER_SEEK_CHAPTER) == 0) { g_infoManager.SetDisplayAfterSeek(100000); SetCaching(CACHESTATE_FLUSH); CDVDMsgPlayerSeekChapter &msg(*((CDVDMsgPlayerSeekChapter*)pMsg)); double start = DVD_NOPTS_VALUE; double offset = 0; int64_t beforeSeek = GetTime(); // This should always be the case. if(m_pDemuxer && m_pDemuxer->SeekChapter(msg.GetChapter(), &start)) { if (start != DVD_NOPTS_VALUE) start -= m_offset_pts; FlushBuffers(false, start, true); offset = DVD_TIME_TO_MSEC(start) - beforeSeek; m_callback.OnPlayBackSeekChapter(msg.GetChapter()); } g_infoManager.SetDisplayAfterSeek(2500, offset); } else if (pMsg->IsType(CDVDMsg::DEMUXER_RESET)) { m_CurrentAudio.stream = NULL; m_CurrentVideo.stream = NULL; m_CurrentSubtitle.stream = NULL; // we need to reset the demuxer, probably because the streams have changed if(m_pDemuxer) m_pDemuxer->Reset(); if(m_pSubtitleDemuxer) m_pSubtitleDemuxer->Reset(); } else if (pMsg->IsType(CDVDMsg::PLAYER_SET_AUDIOSTREAM)) { CDVDMsgPlayerSetAudioStream* pMsg2 = (CDVDMsgPlayerSetAudioStream*)pMsg; SelectionStream& st = m_SelectionStreams.Get(STREAM_AUDIO, pMsg2->GetStreamId()); if(st.source != STREAM_SOURCE_NONE) { if(st.source == STREAM_SOURCE_NAV && m_pInputStream && m_pInputStream->IsStreamType(DVDSTREAM_TYPE_DVD)) { CDVDInputStreamNavigator* pStream = (CDVDInputStreamNavigator*)m_pInputStream; if(pStream->SetActiveAudioStream(st.id)) { m_dvd.iSelectedAudioStream = -1; CloseStream(m_CurrentAudio, false); m_messenger.Put(new CDVDMsgPlayerSeek((int) GetTime(), true, true, true, true, true)); } } else { CloseStream(m_CurrentAudio, false); OpenStream(m_CurrentAudio, st.id, st.source); AdaptForcedSubtitles(); m_messenger.Put(new CDVDMsgPlayerSeek((int) GetTime(), true, true, true, true, true)); } } } else if (pMsg->IsType(CDVDMsg::PLAYER_SET_VIDEOSTREAM)) { CDVDMsgPlayerSetVideoStream* pMsg2 = (CDVDMsgPlayerSetVideoStream*)pMsg; SelectionStream& st = m_SelectionStreams.Get(STREAM_VIDEO, pMsg2->GetStreamId()); if (st.source != STREAM_SOURCE_NONE) { if (st.source == STREAM_SOURCE_NAV && m_pInputStream && m_pInputStream->IsStreamType(DVDSTREAM_TYPE_DVD)) { CDVDInputStreamNavigator* pStream = (CDVDInputStreamNavigator*)m_pInputStream; if (pStream->SetAngle(st.id)) { m_dvd.iSelectedVideoStream = st.id; m_messenger.Put(new CDVDMsgPlayerSeek((int)GetTime(), true, true, true, true, true)); } } else { CloseStream(m_CurrentVideo, false); OpenStream(m_CurrentVideo, st.id, st.source); m_messenger.Put(new CDVDMsgPlayerSeek((int)GetTime(), true, true, true, true, true)); } } } else if (pMsg->IsType(CDVDMsg::PLAYER_SET_SUBTITLESTREAM)) { CDVDMsgPlayerSetSubtitleStream* pMsg2 = (CDVDMsgPlayerSetSubtitleStream*)pMsg; SelectionStream& st = m_SelectionStreams.Get(STREAM_SUBTITLE, pMsg2->GetStreamId()); if(st.source != STREAM_SOURCE_NONE) { if(st.source == STREAM_SOURCE_NAV && m_pInputStream && m_pInputStream->IsStreamType(DVDSTREAM_TYPE_DVD)) { CDVDInputStreamNavigator* pStream = (CDVDInputStreamNavigator*)m_pInputStream; if(pStream->SetActiveSubtitleStream(st.id)) { m_dvd.iSelectedSPUStream = -1; CloseStream(m_CurrentSubtitle, false); } } else { CloseStream(m_CurrentSubtitle, false); OpenStream(m_CurrentSubtitle, st.id, st.source); } } } else if (pMsg->IsType(CDVDMsg::PLAYER_SET_SUBTITLESTREAM_VISIBLE)) { CDVDMsgBool* pValue = (CDVDMsgBool*)pMsg; SetSubtitleVisibleInternal(pValue->m_value); } else if (pMsg->IsType(CDVDMsg::PLAYER_SET_STATE)) { g_infoManager.SetDisplayAfterSeek(100000); SetCaching(CACHESTATE_FLUSH); CDVDMsgPlayerSetState* pMsgPlayerSetState = (CDVDMsgPlayerSetState*)pMsg; if (CDVDInputStream::IMenus* ptr = dynamic_cast<CDVDInputStream::IMenus*>(m_pInputStream)) { if(ptr->SetState(pMsgPlayerSetState->GetState())) { m_dvd.state = DVDSTATE_NORMAL; m_dvd.iDVDStillStartTime = 0; m_dvd.iDVDStillTime = 0; } } g_infoManager.SetDisplayAfterSeek(); } else if (pMsg->IsType(CDVDMsg::PLAYER_SET_RECORD)) { CDVDInputStreamPVRManager* input = dynamic_cast<CDVDInputStreamPVRManager*>(m_pInputStream); if(input) input->Record(*(CDVDMsgBool*)pMsg); } else if (pMsg->IsType(CDVDMsg::GENERAL_FLUSH)) { FlushBuffers(false); } else if (pMsg->IsType(CDVDMsg::PLAYER_SETSPEED)) { int speed = static_cast<CDVDMsgInt*>(pMsg)->m_value; // correct our current clock, as it would start going wrong otherwise if(m_State.timestamp > 0) { double offset; offset = CDVDClock::GetAbsoluteClock() - m_State.timestamp; offset *= m_playSpeed / DVD_PLAYSPEED_NORMAL; offset = DVD_TIME_TO_MSEC(offset); if(offset > 1000) offset = 1000; if(offset < -1000) offset = -1000; m_State.time += offset; m_State.timestamp = CDVDClock::GetAbsoluteClock(); } if (speed != DVD_PLAYSPEED_PAUSE && m_playSpeed != DVD_PLAYSPEED_PAUSE && speed != m_playSpeed) m_callback.OnPlayBackSpeedChanged(speed / DVD_PLAYSPEED_NORMAL); if (m_pInputStream->IsStreamType(DVDSTREAM_TYPE_PVRMANAGER) && speed != m_playSpeed) { CDVDInputStreamPVRManager* pvrinputstream = static_cast<CDVDInputStreamPVRManager*>(m_pInputStream); pvrinputstream->Pause( speed == 0 ); } // do a seek after rewind, clock is not in sync with current pts if (m_omxplayer_mode) { // when switching from trickplay to normal, we may not have a full set of reference frames // in decoder and we may get corrupt frames out. Seeking to current time will avoid this. if ( (speed != DVD_PLAYSPEED_PAUSE && speed != DVD_PLAYSPEED_NORMAL) || (m_playSpeed != DVD_PLAYSPEED_PAUSE && m_playSpeed != DVD_PLAYSPEED_NORMAL) ) { m_messenger.Put(new CDVDMsgPlayerSeek(GetTime(), (speed < 0), true, true, false, true)); } else { m_OmxPlayerState.av_clock.OMXPause(); } m_OmxPlayerState.av_clock.OMXSetSpeed(speed); CLog::Log(LOGDEBUG, "%s::%s CDVDMsg::PLAYER_SETSPEED speed : %d (%d)", "CVideoPlayer", __FUNCTION__, speed, m_playSpeed); } else if ((speed == DVD_PLAYSPEED_NORMAL) && (m_playSpeed != DVD_PLAYSPEED_NORMAL) && (m_playSpeed != DVD_PLAYSPEED_PAUSE)) { int64_t iTime = (int64_t)DVD_TIME_TO_MSEC(m_clock.GetClock() + m_State.time_offset); if (m_State.time != DVD_NOPTS_VALUE) iTime = m_State.time; m_messenger.Put(new CDVDMsgPlayerSeek(iTime, m_playSpeed < 0, true, false, false, true)); } // if playspeed is different then DVD_PLAYSPEED_NORMAL or DVD_PLAYSPEED_PAUSE // audioplayer, stops outputing audio to audiorendere, but still tries to // sleep an correct amount for each packet // videoplayer just plays faster after the clock speed has been increased // 1. disable audio // 2. skip frames and adjust their pts or the clock m_playSpeed = speed; m_caching = CACHESTATE_DONE; m_clock.SetSpeed(speed); m_VideoPlayerAudio->SetSpeed(speed); m_VideoPlayerVideo->SetSpeed(speed); m_streamPlayerSpeed = speed; } else if (pMsg->IsType(CDVDMsg::PLAYER_CHANNEL_SELECT_NUMBER) && m_messenger.GetPacketCount(CDVDMsg::PLAYER_CHANNEL_SELECT_NUMBER) == 0) { FlushBuffers(false); CDVDInputStreamPVRManager* input = dynamic_cast<CDVDInputStreamPVRManager*>(m_pInputStream); // TODO find a better solution for the "otherStreaHack" // a stream is not sopposed to be terminated before demuxer if (input && input->IsOtherStreamHack()) { SAFE_DELETE(m_pDemuxer); } if(input && input->SelectChannelByNumber(static_cast<CDVDMsgInt*>(pMsg)->m_value)) { SAFE_DELETE(m_pDemuxer); m_playSpeed = DVD_PLAYSPEED_NORMAL; // when using fast channel switching some shortcuts are taken which // means we'll have to update the view mode manually m_renderManager.SetViewMode(CMediaSettings::GetInstance().GetCurrentVideoSettings().m_ViewMode); }else { CLog::Log(LOGWARNING, "%s - failed to switch channel. playback stopped", __FUNCTION__); CApplicationMessenger::GetInstance().PostMsg(TMSG_MEDIA_STOP); } } else if (pMsg->IsType(CDVDMsg::PLAYER_CHANNEL_SELECT) && m_messenger.GetPacketCount(CDVDMsg::PLAYER_CHANNEL_SELECT) == 0) { FlushBuffers(false); CDVDInputStreamPVRManager* input = dynamic_cast<CDVDInputStreamPVRManager*>(m_pInputStream); if (input && input->IsOtherStreamHack()) { SAFE_DELETE(m_pDemuxer); } if(input && input->SelectChannel(static_cast<CDVDMsgType <CPVRChannelPtr> *>(pMsg)->m_value)) { SAFE_DELETE(m_pDemuxer); m_playSpeed = DVD_PLAYSPEED_NORMAL; } else { CLog::Log(LOGWARNING, "%s - failed to switch channel. playback stopped", __FUNCTION__); CApplicationMessenger::GetInstance().PostMsg(TMSG_MEDIA_STOP); } } else if (pMsg->IsType(CDVDMsg::PLAYER_CHANNEL_NEXT) || pMsg->IsType(CDVDMsg::PLAYER_CHANNEL_PREV) || pMsg->IsType(CDVDMsg::PLAYER_CHANNEL_PREVIEW_NEXT) || pMsg->IsType(CDVDMsg::PLAYER_CHANNEL_PREVIEW_PREV)) { CDVDInputStreamPVRManager* input = dynamic_cast<CDVDInputStreamPVRManager*>(m_pInputStream); if (input) { bool bSwitchSuccessful(false); bool bShowPreview(pMsg->IsType(CDVDMsg::PLAYER_CHANNEL_PREVIEW_NEXT) || pMsg->IsType(CDVDMsg::PLAYER_CHANNEL_PREVIEW_PREV) || CSettings::GetInstance().GetInt(CSettings::SETTING_PVRPLAYBACK_CHANNELENTRYTIMEOUT) > 0); if (!bShowPreview) { g_infoManager.SetDisplayAfterSeek(100000); FlushBuffers(false); if (input->IsOtherStreamHack()) { SAFE_DELETE(m_pDemuxer); } } if (pMsg->IsType(CDVDMsg::PLAYER_CHANNEL_NEXT) || pMsg->IsType(CDVDMsg::PLAYER_CHANNEL_PREVIEW_NEXT)) bSwitchSuccessful = input->NextChannel(bShowPreview); else bSwitchSuccessful = input->PrevChannel(bShowPreview); if (bSwitchSuccessful) { if (bShowPreview) { UpdateApplication(0); if (pMsg->IsType(CDVDMsg::PLAYER_CHANNEL_PREVIEW_NEXT) || pMsg->IsType(CDVDMsg::PLAYER_CHANNEL_PREVIEW_PREV)) m_ChannelEntryTimeOut.SetInfinite(); else m_ChannelEntryTimeOut.Set(CSettings::GetInstance().GetInt(CSettings::SETTING_PVRPLAYBACK_CHANNELENTRYTIMEOUT)); } else { m_ChannelEntryTimeOut.SetInfinite(); SAFE_DELETE(m_pDemuxer); m_playSpeed = DVD_PLAYSPEED_NORMAL; g_infoManager.SetDisplayAfterSeek(); // when using fast channel switching some shortcuts are taken which // means we'll have to update the view mode manually m_renderManager.SetViewMode(CMediaSettings::GetInstance().GetCurrentVideoSettings().m_ViewMode); } } else { CLog::Log(LOGWARNING, "%s - failed to switch channel. playback stopped", __FUNCTION__); CApplicationMessenger::GetInstance().PostMsg(TMSG_MEDIA_STOP); } } } else if (pMsg->IsType(CDVDMsg::GENERAL_GUI_ACTION)) OnAction(((CDVDMsgType<CAction>*)pMsg)->m_value); else if (pMsg->IsType(CDVDMsg::PLAYER_STARTED)) { SStartMsg& msg = ((CDVDMsgType<SStartMsg>*)pMsg)->m_value; if (msg.player == VideoPlayer_AUDIO) { m_CurrentAudio.syncState = IDVDStreamPlayer::SYNC_WAITSYNC; m_CurrentAudio.cachetime = msg.cachetime; m_CurrentAudio.cachetotal = msg.cachetotal; m_CurrentAudio.starttime = msg.timestamp; } if (msg.player == VideoPlayer_VIDEO) { m_CurrentVideo.syncState = IDVDStreamPlayer::SYNC_WAITSYNC; m_CurrentVideo.cachetime = msg.cachetime; m_CurrentVideo.cachetotal = msg.cachetotal; m_CurrentVideo.starttime = msg.timestamp; } CLog::Log(LOGDEBUG, "CVideoPlayer::HandleMessages - player started %d", msg.player); } else if (pMsg->IsType(CDVDMsg::SUBTITLE_ADDFILE)) { int id = AddSubtitleFile(((CDVDMsgType<std::string>*) pMsg)->m_value); if (id >= 0) { SetSubtitle(id); SetSubtitleVisibleInternal(true); } } else if (pMsg->IsType(CDVDMsg::GENERAL_SYNCHRONIZE)) { if (((CDVDMsgGeneralSynchronize*)pMsg)->Wait(100, SYNCSOURCE_OWNER)) CLog::Log(LOGDEBUG, "CVideoPlayer - CDVDMsg::GENERAL_SYNCHRONIZE"); } else if (pMsg->IsType(CDVDMsg::PLAYER_AVCHANGE)) { UpdateStreamInfos(); g_dataCacheCore.SignalAudioInfoChange(); g_dataCacheCore.SignalVideoInfoChange(); } pMsg->Release(); } } void CVideoPlayer::SetCaching(ECacheState state) { if(state == CACHESTATE_FLUSH) { double level, delay, offset; if(GetCachingTimes(level, delay, offset)) state = CACHESTATE_FULL; else state = CACHESTATE_INIT; } if(m_caching == state) return; CLog::Log(LOGDEBUG, "CVideoPlayer::SetCaching - caching state %d", state); if (state == CACHESTATE_FULL || state == CACHESTATE_INIT) { m_clock.SetSpeed(DVD_PLAYSPEED_PAUSE); if (m_omxplayer_mode) m_OmxPlayerState.av_clock.OMXPause(); m_VideoPlayerAudio->SetSpeed(DVD_PLAYSPEED_PAUSE); m_VideoPlayerVideo->SetSpeed(DVD_PLAYSPEED_PAUSE); m_streamPlayerSpeed = DVD_PLAYSPEED_PAUSE; m_pInputStream->ResetScanTimeout((unsigned int) CSettings::GetInstance().GetInt(CSettings::SETTING_PVRPLAYBACK_SCANTIME) * 1000); m_cachingTimer.Set(5000); } if (state == CACHESTATE_PLAY || (state == CACHESTATE_DONE && m_caching != CACHESTATE_PLAY)) { m_clock.SetSpeed(m_playSpeed); m_VideoPlayerAudio->SetSpeed(m_playSpeed); m_VideoPlayerVideo->SetSpeed(m_playSpeed); m_streamPlayerSpeed = m_playSpeed; m_pInputStream->ResetScanTimeout(0); } m_caching = state; m_clock.SetSpeedAdjust(0); if (m_omxplayer_mode) m_OmxPlayerState.av_clock.OMXSetSpeedAdjust(0); } void CVideoPlayer::SetPlaySpeed(int speed) { if (IsPlaying()) m_messenger.Put(new CDVDMsgInt(CDVDMsg::PLAYER_SETSPEED, speed)); else { m_playSpeed = speed; m_streamPlayerSpeed = speed; } } bool CVideoPlayer::CanPause() { CSingleLock lock(m_StateSection); return m_State.canpause; } void CVideoPlayer::Pause() { CSingleLock lock(m_StateSection); if (!m_State.canpause) return; lock.Leave(); if(m_playSpeed != DVD_PLAYSPEED_PAUSE && IsCaching()) { SetCaching(CACHESTATE_DONE); return; } // return to normal speed if it was paused before, pause otherwise if (m_playSpeed == DVD_PLAYSPEED_PAUSE) { SetPlaySpeed(DVD_PLAYSPEED_NORMAL); m_callback.OnPlayBackResumed(); } else { SetPlaySpeed(DVD_PLAYSPEED_PAUSE); m_callback.OnPlayBackPaused(); } } bool CVideoPlayer::IsPaused() const { return m_playSpeed == DVD_PLAYSPEED_PAUSE || IsCaching(); } bool CVideoPlayer::HasVideo() const { return m_HasVideo; } bool CVideoPlayer::HasAudio() const { return m_HasAudio; } bool CVideoPlayer::HasRDS() const { return m_CurrentRadioRDS.id >= 0; } bool CVideoPlayer::IsPassthrough() const { return m_VideoPlayerAudio->IsPassthrough(); } bool CVideoPlayer::CanSeek() { CSingleLock lock(m_StateSection); return m_State.canseek; } void CVideoPlayer::Seek(bool bPlus, bool bLargeStep, bool bChapterOverride) { if( m_playSpeed == DVD_PLAYSPEED_PAUSE && bPlus && !bLargeStep) { if (m_VideoPlayerVideo->StepFrame()) return; } if (!m_State.canseek) return; if (bLargeStep && bChapterOverride && GetChapter() > 0) { if (!bPlus) { SeekChapter(GetChapter() - 1); return; } else if (GetChapter() < GetChapterCount()) { SeekChapter(GetChapter() + 1); return; } } int64_t seek; if (g_advancedSettings.m_videoUseTimeSeeking && GetTotalTime() > 2000*g_advancedSettings.m_videoTimeSeekForwardBig) { if (bLargeStep) seek = bPlus ? g_advancedSettings.m_videoTimeSeekForwardBig : g_advancedSettings.m_videoTimeSeekBackwardBig; else seek = bPlus ? g_advancedSettings.m_videoTimeSeekForward : g_advancedSettings.m_videoTimeSeekBackward; seek *= 1000; seek += GetTime(); } else { int percent; if (bLargeStep) percent = bPlus ? g_advancedSettings.m_videoPercentSeekForwardBig : g_advancedSettings.m_videoPercentSeekBackwardBig; else percent = bPlus ? g_advancedSettings.m_videoPercentSeekForward : g_advancedSettings.m_videoPercentSeekBackward; seek = (int64_t)(GetTotalTimeInMsec()*(GetPercentage()+percent)/100); } bool restore = true; if (m_Edl.HasCut()) { /* * Alter the standard seek position based on whether any commercial breaks have been * automatically skipped. */ const int clock = DVD_TIME_TO_MSEC(m_clock.GetClock()); /* * If a large backwards seek occurs within 10 seconds of the end of the last automated * commercial skip, then seek back to the start of the commercial break under the assumption * it was flagged incorrectly. 10 seconds grace period is allowed in case the watcher has to * fumble around finding the remote. Only happens once per commercial break. * * Small skip does not trigger this in case the start of the commercial break was in fact fine * but it skipped too far into the program. In that case small skip backwards behaves as normal. */ if (!bPlus && bLargeStep && m_EdlAutoSkipMarkers.seek_to_start && clock >= m_EdlAutoSkipMarkers.commbreak_end && clock <= m_EdlAutoSkipMarkers.commbreak_end + 10*1000) // Only if within 10 seconds of the end (in msec) { CLog::Log(LOGDEBUG, "%s - Seeking back to start of commercial break [%s - %s] as large backwards skip activated within 10 seconds of the automatic commercial skip (only done once per break).", __FUNCTION__, CEdl::MillisecondsToTimeString(m_EdlAutoSkipMarkers.commbreak_start).c_str(), CEdl::MillisecondsToTimeString(m_EdlAutoSkipMarkers.commbreak_end).c_str()); seek = m_EdlAutoSkipMarkers.commbreak_start; restore = false; m_EdlAutoSkipMarkers.seek_to_start = false; // So this will only happen within the 10 second grace period once. } /* * If big skip forward within the last "reverted" commercial break, seek to the end of the * commercial break under the assumption that the break was incorrectly flagged and playback has * now reached the actual start of the commercial break. Assume that the end is flagged more * correctly than the landing point for a standard big skip (ends seem to be flagged more * accurately than the start). */ else if (bPlus && bLargeStep && clock >= m_EdlAutoSkipMarkers.commbreak_start && clock <= m_EdlAutoSkipMarkers.commbreak_end) { CLog::Log(LOGDEBUG, "%s - Seeking to end of previously skipped commercial break [%s - %s] as big forwards skip activated within the break.", __FUNCTION__, CEdl::MillisecondsToTimeString(m_EdlAutoSkipMarkers.commbreak_start).c_str(), CEdl::MillisecondsToTimeString(m_EdlAutoSkipMarkers.commbreak_end).c_str()); seek = m_EdlAutoSkipMarkers.commbreak_end; restore = false; } } int64_t time = GetTime(); if(g_application.CurrentFileItem().IsStack() && (seek > GetTotalTimeInMsec() || seek < 0)) { g_application.SeekTime((seek - time) * 0.001 + g_application.GetTime()); // warning, don't access any VideoPlayer variables here as // the VideoPlayer object may have been destroyed return; } m_messenger.Put(new CDVDMsgPlayerSeek((int)seek, !bPlus, true, false, restore)); SynchronizeDemuxer(100); if (seek < 0) seek = 0; m_callback.OnPlayBackSeek((int)seek, (int)(seek - time)); } bool CVideoPlayer::SeekScene(bool bPlus) { if (!m_Edl.HasSceneMarker()) return false; /* * There is a 5 second grace period applied when seeking for scenes backwards. If there is no * grace period applied it is impossible to go backwards past a scene marker. */ int64_t clock = GetTime(); if (!bPlus && clock > 5 * 1000) // 5 seconds clock -= 5 * 1000; int iScenemarker; if (m_Edl.GetNextSceneMarker(bPlus, clock, &iScenemarker)) { /* * Seeking is flushed and inaccurate, just like Seek() */ m_messenger.Put(new CDVDMsgPlayerSeek(iScenemarker, !bPlus, true, false, false)); SynchronizeDemuxer(100); return true; } return false; } void CVideoPlayer::GetAudioInfo(std::string& strAudioInfo) { { CSingleLock lock(m_StateSection); strAudioInfo = StringUtils::Format("D(%s)", m_State.demux_audio.c_str()); } strAudioInfo += StringUtils::Format("\nP(%s)", m_VideoPlayerAudio->GetPlayerInfo().c_str()); } void CVideoPlayer::GetVideoInfo(std::string& strVideoInfo) { { CSingleLock lock(m_StateSection); strVideoInfo = StringUtils::Format("D(%s)", m_State.demux_video.c_str()); } strVideoInfo += StringUtils::Format("\nP(%s)", m_VideoPlayerVideo->GetPlayerInfo().c_str()); } void CVideoPlayer::GetGeneralInfo(std::string& strGeneralInfo) { if (!m_bStop) { if (m_omxplayer_mode) { double apts = m_VideoPlayerAudio->GetCurrentPts(); double vpts = m_VideoPlayerVideo->GetCurrentPts(); double dDiff = 0; if( apts != DVD_NOPTS_VALUE && vpts != DVD_NOPTS_VALUE ) dDiff = (apts - vpts) / DVD_TIME_BASE; std::string strEDL; strEDL += StringUtils::Format(", edl:%s", m_Edl.GetInfo().c_str()); std::string strBuf; CSingleLock lock(m_StateSection); if(m_State.cache_bytes >= 0) { strBuf += StringUtils::Format(" forward:%s %2.0f%%" , StringUtils::SizeToString(m_State.cache_bytes).c_str() , m_State.cache_level * 100); if(m_playSpeed == 0 || m_caching == CACHESTATE_FULL) strBuf += StringUtils::Format(" %d sec", DVD_TIME_TO_SEC(m_State.cache_delay)); } strGeneralInfo = StringUtils::Format("C( a/v:% 6.3f%s, dcpu:%2i%% acpu:%2i%% vcpu:%2i%%%s amp:% 5.2f )" , dDiff , strEDL.c_str() , (int)(CThread::GetRelativeUsage()*100) , (int)(m_VideoPlayerAudio->GetRelativeUsage()*100) , (int)(m_VideoPlayerVideo->GetRelativeUsage()*100) , strBuf.c_str() , m_VideoPlayerAudio->GetDynamicRangeAmplification()); } else { double dDelay = m_VideoPlayerVideo->GetDelay() / DVD_TIME_BASE - m_renderManager.GetDisplayLatency(); double apts = m_VideoPlayerAudio->GetCurrentPts(); double vpts = m_VideoPlayerVideo->GetCurrentPts(); double dDiff = 0; if( apts != DVD_NOPTS_VALUE && vpts != DVD_NOPTS_VALUE ) dDiff = (apts - vpts) / DVD_TIME_BASE; std::string strEDL = StringUtils::Format(", edl:%s", m_Edl.GetInfo().c_str()); std::string strBuf; CSingleLock lock(m_StateSection); if(m_State.cache_bytes >= 0) { strBuf += StringUtils::Format(" forward:%s %2.0f%%" , StringUtils::SizeToString(m_State.cache_bytes).c_str() , m_State.cache_level * 100); if(m_playSpeed == 0 || m_caching == CACHESTATE_FULL) strBuf += StringUtils::Format(" %d sec", DVD_TIME_TO_SEC(m_State.cache_delay)); } strGeneralInfo = StringUtils::Format("C( ad:% 6.3f, a/v:% 6.3f%s, dcpu:%2i%% acpu:%2i%% vcpu:%2i%%%s )" , dDelay , dDiff , strEDL.c_str() , (int)(CThread::GetRelativeUsage()*100) , (int)(m_VideoPlayerAudio->GetRelativeUsage()*100) , (int)(m_VideoPlayerVideo->GetRelativeUsage()*100) , strBuf.c_str()); } } } void CVideoPlayer::SeekPercentage(float iPercent) { int64_t iTotalTime = GetTotalTimeInMsec(); if (!iTotalTime) return; SeekTime((int64_t)(iTotalTime * iPercent / 100)); } float CVideoPlayer::GetPercentage() { int64_t iTotalTime = GetTotalTimeInMsec(); if (!iTotalTime) return 0.0f; return GetTime() * 100 / (float)iTotalTime; } float CVideoPlayer::GetCachePercentage() { CSingleLock lock(m_StateSection); return (float) (m_State.cache_offset * 100); // NOTE: Percentage returned is relative } void CVideoPlayer::SetAVDelay(float fValue) { m_VideoPlayerVideo->SetDelay( (fValue * DVD_TIME_BASE) ) ; } float CVideoPlayer::GetAVDelay() { return (float) m_VideoPlayerVideo->GetDelay() / (float)DVD_TIME_BASE; } void CVideoPlayer::SetSubTitleDelay(float fValue) { m_VideoPlayerVideo->SetSubtitleDelay(-fValue * DVD_TIME_BASE); } float CVideoPlayer::GetSubTitleDelay() { return (float) -m_VideoPlayerVideo->GetSubtitleDelay() / DVD_TIME_BASE; } // priority: 1: libdvdnav, 2: external subtitles, 3: muxed subtitles int CVideoPlayer::GetSubtitleCount() { return m_SelectionStreams.Count(STREAM_SUBTITLE); } int CVideoPlayer::GetSubtitle() { return m_SelectionStreams.IndexOf(STREAM_SUBTITLE, *this); } void CVideoPlayer::UpdateStreamInfos() { if (!m_pDemuxer) return; CSingleLock lock(m_SelectionStreams.m_section); int streamId; std::string retVal; // video streamId = GetVideoStream(); if (streamId >= 0 && streamId < GetVideoStreamCount()) { SelectionStream& s = m_SelectionStreams.Get(STREAM_VIDEO, streamId); s.bitrate = m_VideoPlayerVideo->GetVideoBitrate(); s.aspect_ratio = m_renderManager.GetAspectRatio(); CRect viewRect; m_renderManager.GetVideoRect(s.SrcRect, s.DestRect, viewRect); s.stereo_mode = m_VideoPlayerVideo->GetStereoMode(); if (s.stereo_mode == "mono") s.stereo_mode = ""; CDemuxStream* stream = m_pDemuxer->GetStream(m_CurrentVideo.id); if (stream && stream->type == STREAM_VIDEO) { s.width = ((CDemuxStreamVideo*)stream)->iWidth; s.height = ((CDemuxStreamVideo*)stream)->iHeight; } } // audio streamId = GetAudioStream(); if (streamId >= 0 && streamId < GetAudioStreamCount()) { SelectionStream& s = m_SelectionStreams.Get(STREAM_AUDIO, streamId); s.bitrate = m_VideoPlayerAudio->GetAudioBitrate(); s.channels = m_VideoPlayerAudio->GetAudioChannels(); CDemuxStream* stream = m_pDemuxer->GetStream(m_CurrentAudio.id); if (stream && stream->type == STREAM_AUDIO) { s.codec = m_pDemuxer->GetStreamCodecName(stream->iId); } } } void CVideoPlayer::GetSubtitleStreamInfo(int index, SPlayerSubtitleStreamInfo &info) { CSingleLock lock(m_SelectionStreams.m_section); if (index < 0 || index > (int) GetSubtitleCount() - 1) return; SelectionStream& s = m_SelectionStreams.Get(STREAM_SUBTITLE, index); if(s.name.length() > 0) info.name = s.name; if(s.type == STREAM_NONE) info.name += "(Invalid)"; info.language = s.language; } void CVideoPlayer::SetSubtitle(int iStream) { m_messenger.Put(new CDVDMsgPlayerSetSubtitleStream(iStream)); } bool CVideoPlayer::GetSubtitleVisible() { if (m_pInputStream && m_pInputStream->IsStreamType(DVDSTREAM_TYPE_DVD)) { CDVDInputStreamNavigator* pStream = (CDVDInputStreamNavigator*)m_pInputStream; return pStream->IsSubtitleStreamEnabled(); } return m_VideoPlayerVideo->IsSubtitleEnabled(); } void CVideoPlayer::SetSubtitleVisible(bool bVisible) { m_messenger.Put(new CDVDMsgBool(CDVDMsg::PLAYER_SET_SUBTITLESTREAM_VISIBLE, bVisible)); } void CVideoPlayer::SetSubtitleVisibleInternal(bool bVisible) { m_VideoPlayerVideo->EnableSubtitle(bVisible); if (m_pInputStream && m_pInputStream->IsStreamType(DVDSTREAM_TYPE_DVD)) static_cast<CDVDInputStreamNavigator*>(m_pInputStream)->EnableSubtitleStream(bVisible); } int CVideoPlayer::GetAudioStreamCount() { return m_SelectionStreams.Count(STREAM_AUDIO); } int CVideoPlayer::GetAudioStream() { return m_SelectionStreams.IndexOf(STREAM_AUDIO, *this); } void CVideoPlayer::SetAudioStream(int iStream) { m_messenger.Put(new CDVDMsgPlayerSetAudioStream(iStream)); SynchronizeDemuxer(100); } int CVideoPlayer::GetVideoStreamCount() const { return m_SelectionStreams.Count(STREAM_VIDEO); } int CVideoPlayer::GetVideoStream() const { return m_SelectionStreams.IndexOf(STREAM_VIDEO, *this); } void CVideoPlayer::SetVideoStream(int iStream) { m_messenger.Put(new CDVDMsgPlayerSetVideoStream(iStream)); SynchronizeDemuxer(100); } TextCacheStruct_t* CVideoPlayer::GetTeletextCache() { if (m_CurrentTeletext.id < 0) return 0; return m_VideoPlayerTeletext->GetTeletextCache(); } void CVideoPlayer::LoadPage(int p, int sp, unsigned char* buffer) { if (m_CurrentTeletext.id < 0) return; return m_VideoPlayerTeletext->LoadPage(p, sp, buffer); } std::string CVideoPlayer::GetRadioText(unsigned int line) { if (m_CurrentRadioRDS.id < 0) return ""; return m_VideoPlayerRadioRDS->GetRadioText(line); } void CVideoPlayer::SeekTime(int64_t iTime) { int seekOffset = (int)(iTime - GetTime()); m_messenger.Put(new CDVDMsgPlayerSeek((int)iTime, true, true, true)); SynchronizeDemuxer(100); m_callback.OnPlayBackSeek((int)iTime, seekOffset); } bool CVideoPlayer::SeekTimeRelative(int64_t iTime) { int64_t abstime = GetTime() + iTime; m_messenger.Put(new CDVDMsgPlayerSeek((int)abstime, (iTime < 0) ? true : false, true, false)); SynchronizeDemuxer(100); m_callback.OnPlayBackSeek((int)abstime, iTime); return true; } // return the time in milliseconds int64_t CVideoPlayer::GetTime() { CSingleLock lock(m_StateSection); double offset = 0; const double limit = DVD_MSEC_TO_TIME(500); if (m_State.timestamp > 0) { offset = CDVDClock::GetAbsoluteClock() - m_State.timestamp; offset *= m_playSpeed / DVD_PLAYSPEED_NORMAL; if (offset > limit) offset = limit; if (offset < -limit) offset = -limit; } return llrint(m_State.time + DVD_TIME_TO_MSEC(offset)); } // return length in msec int64_t CVideoPlayer::GetTotalTimeInMsec() { CSingleLock lock(m_StateSection); return llrint(m_State.time_total); } // return length in seconds.. this should be changed to return in milleseconds throughout xbmc int64_t CVideoPlayer::GetTotalTime() { return GetTotalTimeInMsec(); } void CVideoPlayer::ToFFRW(int iSpeed) { // can't rewind in menu as seeking isn't possible // forward is fine if (iSpeed < 0 && IsInMenu()) return; SetPlaySpeed(iSpeed * DVD_PLAYSPEED_NORMAL); } bool CVideoPlayer::OpenStream(CCurrentStream& current, int iStream, int source, bool reset) { CDemuxStream* stream = NULL; CDVDStreamInfo hint; CLog::Log(LOGNOTICE, "Opening stream: %i source: %i", iStream, source); if(STREAM_SOURCE_MASK(source) == STREAM_SOURCE_DEMUX_SUB) { int index = m_SelectionStreams.IndexOf(current.type, source, iStream); if(index < 0) return false; SelectionStream st = m_SelectionStreams.Get(current.type, index); if(!m_pSubtitleDemuxer || m_pSubtitleDemuxer->GetFileName() != st.filename) { CLog::Log(LOGNOTICE, "Opening Subtitle file: %s", st.filename.c_str()); std::unique_ptr<CDVDDemuxVobsub> demux(new CDVDDemuxVobsub()); if(!demux->Open(st.filename, source, st.filename2)) return false; m_pSubtitleDemuxer = demux.release(); } double pts = m_VideoPlayerVideo->GetCurrentPts(); if(pts == DVD_NOPTS_VALUE) pts = m_CurrentVideo.dts; if(pts == DVD_NOPTS_VALUE) pts = 0; pts += m_offset_pts; if (!m_pSubtitleDemuxer->SeekTime((int)(1000.0 * pts / (double)DVD_TIME_BASE))) CLog::Log(LOGDEBUG, "%s - failed to start subtitle demuxing from: %f", __FUNCTION__, pts); stream = m_pSubtitleDemuxer->GetStream(iStream); if(!stream || stream->disabled) return false; stream->SetDiscard(AVDISCARD_NONE); hint.Assign(*stream, true); } else if(STREAM_SOURCE_MASK(source) == STREAM_SOURCE_TEXT) { int index = m_SelectionStreams.IndexOf(current.type, source, iStream); if(index < 0) return false; hint.Clear(); hint.filename = m_SelectionStreams.Get(current.type, index).filename; hint.fpsscale = m_CurrentVideo.hint.fpsscale; hint.fpsrate = m_CurrentVideo.hint.fpsrate; } else if(STREAM_SOURCE_MASK(source) == STREAM_SOURCE_DEMUX) { if(!m_pDemuxer) return false; stream = m_pDemuxer->GetStream(iStream); if(!stream || stream->disabled) return false; stream->SetDiscard(AVDISCARD_NONE); hint.Assign(*stream, true); if(m_pInputStream && m_pInputStream->IsStreamType(DVDSTREAM_TYPE_DVD)) hint.filename = "dvd"; } else if(STREAM_SOURCE_MASK(source) == STREAM_SOURCE_VIDEOMUX) { if(!m_pCCDemuxer) return false; stream = m_pCCDemuxer->GetStream(iStream); if(!stream || stream->disabled) return false; hint.Assign(*stream, false); } bool res; switch(current.type) { case STREAM_AUDIO: res = OpenAudioStream(hint, reset); break; case STREAM_VIDEO: res = OpenVideoStream(hint, reset); break; case STREAM_SUBTITLE: res = OpenSubtitleStream(hint); break; case STREAM_TELETEXT: res = OpenTeletextStream(hint); break; case STREAM_RADIO_RDS: res = OpenRadioRDSStream(hint); break; default: res = false; break; } if (res) { current.id = iStream; current.source = source; current.hint = hint; current.stream = (void*)stream; current.lastdts = DVD_NOPTS_VALUE; if (current.avsync != CCurrentStream::AV_SYNC_FORCE) current.avsync = CCurrentStream::AV_SYNC_CHECK; if(stream) current.changes = stream->changes; } else { if(stream) { /* mark stream as disabled, to disallaw further attempts*/ CLog::Log(LOGWARNING, "%s - Unsupported stream %d. Stream disabled.", __FUNCTION__, stream->iId); stream->disabled = true; stream->SetDiscard(AVDISCARD_ALL); } } g_dataCacheCore.SignalAudioInfoChange(); g_dataCacheCore.SignalVideoInfoChange(); return res; } bool CVideoPlayer::OpenAudioStream(CDVDStreamInfo& hint, bool reset) { IDVDStreamPlayer* player = GetStreamPlayer(m_CurrentAudio.player); if(player == nullptr) return false; if(m_CurrentAudio.id < 0 || m_CurrentAudio.hint != hint) { if (!player->OpenStream(hint)) return false; static_cast<IDVDStreamPlayerAudio*>(player)->SetSpeed(m_streamPlayerSpeed); m_CurrentAudio.syncState = IDVDStreamPlayer::SYNC_STARTING; m_CurrentAudio.packets = 0; } else if (reset) player->SendMessage(new CDVDMsg(CDVDMsg::GENERAL_RESET), 0); m_HasAudio = true; return true; } bool CVideoPlayer::OpenVideoStream(CDVDStreamInfo& hint, bool reset) { if (m_pInputStream && m_pInputStream->IsStreamType(DVDSTREAM_TYPE_DVD)) { /* set aspect ratio as requested by navigator for dvd's */ float aspect = static_cast<CDVDInputStreamNavigator*>(m_pInputStream)->GetVideoAspectRatio(); if (aspect != 0.0) { hint.aspect = aspect; hint.forced_aspect = true; } hint.dvd = true; } else if (m_pInputStream && m_pInputStream->IsStreamType(DVDSTREAM_TYPE_PVRMANAGER)) { // set framerate if not set by demuxer if (hint.fpsrate == 0 || hint.fpsscale == 0) { int fpsidx = CSettings::GetInstance().GetInt(CSettings::SETTING_PVRPLAYBACK_FPS); if (fpsidx == 1) { hint.fpsscale = 1000; hint.fpsrate = 50000; } else if (fpsidx == 2) { hint.fpsscale = 1001; hint.fpsrate = 60000; } } } CDVDInputStream::IMenus* pMenus = dynamic_cast<CDVDInputStream::IMenus*>(m_pInputStream); if(pMenus && pMenus->IsInMenu()) hint.stills = true; if (hint.stereo_mode.empty()) hint.stereo_mode = CStereoscopicsManager::GetInstance().DetectStereoModeByString(m_item.GetPath()); SelectionStream& s = m_SelectionStreams.Get(STREAM_VIDEO, 0); if(hint.flags & AV_DISPOSITION_ATTACHED_PIC) return false; // set desired refresh rate if (m_PlayerOptions.fullscreen && g_graphicsContext.IsFullScreenRoot() && hint.fpsrate != 0 && hint.fpsscale != 0) { if (CSettings::GetInstance().GetInt(CSettings::SETTING_VIDEOPLAYER_ADJUSTREFRESHRATE) != ADJUST_REFRESHRATE_OFF) { float framerate = DVD_TIME_BASE / CDVDCodecUtils::NormalizeFrameduration((double)DVD_TIME_BASE * hint.fpsscale / hint.fpsrate); m_renderManager.TriggerUpdateResolution(framerate, hint.width, RenderManager::GetStereoModeFlags(hint.stereo_mode)); } } IDVDStreamPlayer* player = GetStreamPlayer(m_CurrentVideo.player); if(player == nullptr) return false; if(m_CurrentVideo.id < 0 || m_CurrentVideo.hint != hint) { if (hint.codec == AV_CODEC_ID_MPEG2VIDEO || hint.codec == AV_CODEC_ID_H264) SAFE_DELETE(m_pCCDemuxer); if (!player->OpenStream(hint)) return false; s.stereo_mode = static_cast<IDVDStreamPlayerVideo*>(player)->GetStereoMode(); if (s.stereo_mode == "mono") s.stereo_mode = ""; static_cast<IDVDStreamPlayerVideo*>(player)->SetSpeed(m_streamPlayerSpeed); m_CurrentVideo.syncState = IDVDStreamPlayer::SYNC_STARTING; m_CurrentVideo.packets = 0; } else if (reset) player->SendMessage(new CDVDMsg(CDVDMsg::GENERAL_RESET), 0); m_HasVideo = true; // open CC demuxer if video is mpeg2 if ((hint.codec == AV_CODEC_ID_MPEG2VIDEO || hint.codec == AV_CODEC_ID_H264) && !m_pCCDemuxer) { m_pCCDemuxer = new CDVDDemuxCC(hint.codec); m_SelectionStreams.Clear(STREAM_NONE, STREAM_SOURCE_VIDEOMUX); } return true; } bool CVideoPlayer::OpenSubtitleStream(CDVDStreamInfo& hint) { IDVDStreamPlayer* player = GetStreamPlayer(m_CurrentSubtitle.player); if(player == nullptr) return false; if(m_CurrentSubtitle.id < 0 || m_CurrentSubtitle.hint != hint) { if (!player->OpenStream(hint)) return false; } return true; } bool CVideoPlayer::AdaptForcedSubtitles() { bool valid = false; SelectionStream ss = m_SelectionStreams.Get(STREAM_SUBTITLE, GetSubtitle()); if (ss.flags & CDemuxStream::FLAG_FORCED || !GetSubtitleVisible()) { SelectionStream as = m_SelectionStreams.Get(STREAM_AUDIO, GetAudioStream()); SelectionStreams streams = m_SelectionStreams.Get(STREAM_SUBTITLE); for(SelectionStreams::iterator it = streams.begin(); it != streams.end() && !valid; ++it) { if (it->flags & CDemuxStream::FLAG_FORCED && g_LangCodeExpander.CompareISO639Codes(it->language, as.language)) { if(OpenStream(m_CurrentSubtitle, it->id, it->source)) { valid = true; SetSubtitleVisibleInternal(true); } } } if(!valid) { CloseStream(m_CurrentSubtitle, true); SetSubtitleVisibleInternal(false); } } return valid; } bool CVideoPlayer::OpenTeletextStream(CDVDStreamInfo& hint) { if (!m_VideoPlayerTeletext->CheckStream(hint)) return false; IDVDStreamPlayer* player = GetStreamPlayer(m_CurrentTeletext.player); if(player == nullptr) return false; if(m_CurrentTeletext.id < 0 || m_CurrentTeletext.hint != hint) { if (!player->OpenStream(hint)) return false; } return true; }<|fim▁hole|> return false; IDVDStreamPlayer* player = GetStreamPlayer(m_CurrentRadioRDS.player); if(player == nullptr) return false; if(m_CurrentRadioRDS.id < 0 || m_CurrentRadioRDS.hint != hint) { if (!player->OpenStream(hint)) return false; } return true; } bool CVideoPlayer::CloseStream(CCurrentStream& current, bool bWaitForBuffers) { if (current.id < 0) return false; CLog::Log(LOGNOTICE, "Closing stream player %d", current.player); if(bWaitForBuffers) SetCaching(CACHESTATE_DONE); IDVDStreamPlayer* player = GetStreamPlayer(current.player); if(player) { if ((current.type == STREAM_AUDIO && current.syncState != IDVDStreamPlayer::SYNC_INSYNC) || (current.type == STREAM_VIDEO && current.syncState != IDVDStreamPlayer::SYNC_INSYNC)) bWaitForBuffers = false; player->CloseStream(bWaitForBuffers); } current.Clear(); return true; } void CVideoPlayer::FlushBuffers(bool queued, double pts, bool accurate, bool sync) { CLog::Log(LOGDEBUG, "CVideoPlayer::FlushBuffers - flushing buffers"); double startpts; if (accurate && !m_omxplayer_mode) startpts = pts; else startpts = DVD_NOPTS_VALUE; if (sync) { m_CurrentAudio.inited = false; m_CurrentAudio.avsync = CCurrentStream::AV_SYNC_FORCE; m_CurrentVideo.inited = false; m_CurrentVideo.avsync = CCurrentStream::AV_SYNC_FORCE; m_CurrentSubtitle.inited = false; m_CurrentTeletext.inited = false; m_CurrentRadioRDS.inited = false; } m_CurrentAudio.dts = DVD_NOPTS_VALUE; m_CurrentAudio.startpts = startpts; m_CurrentAudio.packets = 0; m_CurrentVideo.dts = DVD_NOPTS_VALUE; m_CurrentVideo.startpts = startpts; m_CurrentVideo.packets = 0; m_CurrentSubtitle.dts = DVD_NOPTS_VALUE; m_CurrentSubtitle.startpts = startpts; m_CurrentSubtitle.packets = 0; m_CurrentTeletext.dts = DVD_NOPTS_VALUE; m_CurrentTeletext.startpts = startpts; m_CurrentTeletext.packets = 0; m_CurrentRadioRDS.dts = DVD_NOPTS_VALUE; m_CurrentRadioRDS.startpts = startpts; m_CurrentRadioRDS.packets = 0; if (queued) { m_VideoPlayerAudio->SendMessage(new CDVDMsg(CDVDMsg::GENERAL_RESET)); m_VideoPlayerVideo->SendMessage(new CDVDMsg(CDVDMsg::GENERAL_RESET)); m_VideoPlayerVideo->SendMessage(new CDVDMsg(CDVDMsg::VIDEO_NOSKIP)); m_VideoPlayerSubtitle->SendMessage(new CDVDMsg(CDVDMsg::GENERAL_RESET)); m_VideoPlayerTeletext->SendMessage(new CDVDMsg(CDVDMsg::GENERAL_RESET)); m_VideoPlayerRadioRDS->SendMessage(new CDVDMsg(CDVDMsg::GENERAL_RESET)); SynchronizePlayers(SYNCSOURCE_ALL); } else { m_VideoPlayerAudio->Flush(sync); m_VideoPlayerVideo->Flush(sync); m_VideoPlayerSubtitle->Flush(); m_VideoPlayerTeletext->Flush(); m_VideoPlayerRadioRDS->Flush(); // clear subtitle and menu overlays m_overlayContainer.Clear(); if(m_playSpeed == DVD_PLAYSPEED_NORMAL || m_playSpeed == DVD_PLAYSPEED_PAUSE) { // make sure players are properly flushed, should put them in stalled state CDVDMsgGeneralSynchronize* msg = new CDVDMsgGeneralSynchronize(1000, 0); m_VideoPlayerAudio->SendMessage(msg->Acquire(), 1); m_VideoPlayerVideo->SendMessage(msg->Acquire(), 1); msg->Wait(&m_bStop, 0); msg->Release(); // purge any pending PLAYER_STARTED messages m_messenger.Flush(CDVDMsg::PLAYER_STARTED); // we should now wait for init cache SetCaching(CACHESTATE_FLUSH); if (sync) { m_CurrentAudio.syncState = IDVDStreamPlayer::SYNC_STARTING; m_CurrentVideo.syncState = IDVDStreamPlayer::SYNC_STARTING; } } if(pts != DVD_NOPTS_VALUE && sync) m_clock.Discontinuity(pts); UpdatePlayState(0); // update state, buffers are flushed and it may take some time until // we get an update from players CSingleLock lock(m_StateSection); m_State = m_State; } if (m_omxplayer_mode) { m_OmxPlayerState.av_clock.OMXFlush(); if (!queued) m_OmxPlayerState.av_clock.OMXStop(); m_OmxPlayerState.av_clock.OMXPause(); m_OmxPlayerState.av_clock.OMXMediaTime(0.0); } } // since we call ffmpeg functions to decode, this is being called in the same thread as ::Process() is int CVideoPlayer::OnDVDNavResult(void* pData, int iMessage) { if (m_pInputStream->IsStreamType(DVDSTREAM_TYPE_BLURAY)) { if(iMessage == 0) m_overlayContainer.Add((CDVDOverlay*)pData); else if(iMessage == 1) m_messenger.Put(new CDVDMsg(CDVDMsg::GENERAL_FLUSH)); else if(iMessage == 2) m_dvd.iSelectedAudioStream = *(int*)pData; else if(iMessage == 3) m_dvd.iSelectedSPUStream = *(int*)pData; else if(iMessage == 4) m_VideoPlayerVideo->EnableSubtitle(*(int*)pData ? true: false); else if(iMessage == 5) { if (m_dvd.state != DVDSTATE_STILL) { // else notify the player we have received a still frame m_dvd.iDVDStillTime = *(int*)pData; m_dvd.iDVDStillStartTime = XbmcThreads::SystemClockMillis(); /* adjust for the output delay in the video queue */ unsigned int time = 0; if( m_CurrentVideo.stream && m_dvd.iDVDStillTime > 0 ) { time = (unsigned int)(m_VideoPlayerVideo->GetOutputDelay() / ( DVD_TIME_BASE / 1000 )); if( time < 10000 && time > 0 ) m_dvd.iDVDStillTime += time; } m_dvd.state = DVDSTATE_STILL; CLog::Log(LOGDEBUG, "DVDNAV_STILL_FRAME - waiting %i sec, with delay of %d sec", m_dvd.iDVDStillTime, time / 1000); } } else if (iMessage == 6) { m_dvd.state = DVDSTATE_NORMAL; CLog::Log(LOGDEBUG, "CVideoPlayer::OnDVDNavResult - libbluray read error (DVDSTATE_NORMAL)"); CGUIDialogKaiToast::QueueNotification(g_localizeStrings.Get(25008), g_localizeStrings.Get(25009)); } return 0; } if (m_pInputStream->IsStreamType(DVDSTREAM_TYPE_DVD)) { CDVDInputStreamNavigator* pStream = (CDVDInputStreamNavigator*)m_pInputStream; switch (iMessage) { case DVDNAV_STILL_FRAME: { //CLog::Log(LOGDEBUG, "DVDNAV_STILL_FRAME"); dvdnav_still_event_t *still_event = (dvdnav_still_event_t *)pData; // should wait the specified time here while we let the player running // after that call dvdnav_still_skip(m_dvdnav); if (m_dvd.state != DVDSTATE_STILL) { // else notify the player we have received a still frame if(still_event->length < 0xff) m_dvd.iDVDStillTime = still_event->length * 1000; else m_dvd.iDVDStillTime = 0; m_dvd.iDVDStillStartTime = XbmcThreads::SystemClockMillis(); /* adjust for the output delay in the video queue */ unsigned int time = 0; if( m_CurrentVideo.stream && m_dvd.iDVDStillTime > 0 ) { time = (unsigned int)(m_VideoPlayerVideo->GetOutputDelay() / ( DVD_TIME_BASE / 1000 )); if( time < 10000 && time > 0 ) m_dvd.iDVDStillTime += time; } m_dvd.state = DVDSTATE_STILL; CLog::Log(LOGDEBUG, "DVDNAV_STILL_FRAME - waiting %i sec, with delay of %d sec", still_event->length, time / 1000); } return NAVRESULT_HOLD; } break; case DVDNAV_SPU_CLUT_CHANGE: { m_VideoPlayerSubtitle->SendMessage(new CDVDMsgSubtitleClutChange((uint8_t*)pData)); } break; case DVDNAV_SPU_STREAM_CHANGE: { dvdnav_spu_stream_change_event_t* event = (dvdnav_spu_stream_change_event_t*)pData; int iStream = event->physical_wide; bool visible = !(iStream & 0x80); SetSubtitleVisibleInternal(visible); if (iStream >= 0) m_dvd.iSelectedSPUStream = (iStream & ~0x80); else m_dvd.iSelectedSPUStream = -1; m_CurrentSubtitle.stream = NULL; } break; case DVDNAV_AUDIO_STREAM_CHANGE: { // This should be the correct way i think, however we don't have any streams right now // since the demuxer hasn't started so it doesn't change. not sure how to do this. dvdnav_audio_stream_change_event_t* event = (dvdnav_audio_stream_change_event_t*)pData; // Tell system what audiostream should be opened by default if (event->logical >= 0) m_dvd.iSelectedAudioStream = event->physical; else m_dvd.iSelectedAudioStream = -1; m_CurrentAudio.stream = NULL; } break; case DVDNAV_HIGHLIGHT: { //dvdnav_highlight_event_t* pInfo = (dvdnav_highlight_event_t*)pData; int iButton = pStream->GetCurrentButton(); CLog::Log(LOGDEBUG, "DVDNAV_HIGHLIGHT: Highlight button %d\n", iButton); m_VideoPlayerSubtitle->UpdateOverlayInfo((CDVDInputStreamNavigator*)m_pInputStream, LIBDVDNAV_BUTTON_NORMAL); } break; case DVDNAV_VTS_CHANGE: { //dvdnav_vts_change_event_t* vts_change_event = (dvdnav_vts_change_event_t*)pData; CLog::Log(LOGDEBUG, "DVDNAV_VTS_CHANGE"); //Make sure we clear all the old overlays here, or else old forced items are left. m_overlayContainer.Clear(); //Force an aspect ratio that is set in the dvdheaders if available m_CurrentVideo.hint.aspect = pStream->GetVideoAspectRatio(); if( m_VideoPlayerVideo->IsInited() ) m_VideoPlayerVideo->SendMessage(new CDVDMsgDouble(CDVDMsg::VIDEO_SET_ASPECT, m_CurrentVideo.hint.aspect)); m_SelectionStreams.Clear(STREAM_NONE, STREAM_SOURCE_NAV); m_SelectionStreams.Update(m_pInputStream, m_pDemuxer); return NAVRESULT_HOLD; } break; case DVDNAV_CELL_CHANGE: { //dvdnav_cell_change_event_t* cell_change_event = (dvdnav_cell_change_event_t*)pData; CLog::Log(LOGDEBUG, "DVDNAV_CELL_CHANGE"); if (m_dvd.state != DVDSTATE_STILL) m_dvd.state = DVDSTATE_NORMAL; } break; case DVDNAV_NAV_PACKET: { //pci_t* pci = (pci_t*)pData; // this should be possible to use to make sure we get // seamless transitions over these boundaries // if we remember the old vobunits boundaries // when a packet comes out of demuxer that has // pts values outside that boundary, it belongs // to the new vobunit, wich has new timestamps UpdatePlayState(0); } break; case DVDNAV_HOP_CHANNEL: { // This event is issued whenever a non-seamless operation has been executed. // Applications with fifos should drop the fifos content to speed up responsiveness. CLog::Log(LOGDEBUG, "DVDNAV_HOP_CHANNEL"); if(m_dvd.state == DVDSTATE_SEEK) m_dvd.state = DVDSTATE_NORMAL; else { bool sync = !IsInMenuInternal(); FlushBuffers(false, DVD_NOPTS_VALUE, false, sync); m_dvd.syncClock = true; m_dvd.state = DVDSTATE_NORMAL; if (m_pDemuxer) m_pDemuxer->Flush(); } return NAVRESULT_ERROR; } break; case DVDNAV_STOP: { CLog::Log(LOGDEBUG, "DVDNAV_STOP"); m_dvd.state = DVDSTATE_NORMAL; CGUIDialogKaiToast::QueueNotification(g_localizeStrings.Get(16026), g_localizeStrings.Get(16029)); } break; default: {} break; } } return NAVRESULT_NOP; } bool CVideoPlayer::ShowPVRChannelInfo(void) { bool bReturn(false); if (CSettings::GetInstance().GetInt(CSettings::SETTING_PVRMENU_DISPLAYCHANNELINFO) > 0) { g_PVRManager.ShowPlayerInfo(CSettings::GetInstance().GetInt(CSettings::SETTING_PVRMENU_DISPLAYCHANNELINFO)); bReturn = true; } return bReturn; } bool CVideoPlayer::OnAction(const CAction &action) { #define THREAD_ACTION(action) \ do { \ if (!IsCurrentThread()) { \ m_messenger.Put(new CDVDMsgType<CAction>(CDVDMsg::GENERAL_GUI_ACTION, action)); \ return true; \ } \ } while(false) CDVDInputStream::IMenus* pMenus = dynamic_cast<CDVDInputStream::IMenus*>(m_pInputStream); if (pMenus) { if (m_dvd.state == DVDSTATE_STILL && m_dvd.iDVDStillTime != 0 && pMenus->GetTotalButtons() == 0) { switch(action.GetID()) { case ACTION_NEXT_ITEM: case ACTION_MOVE_RIGHT: case ACTION_MOVE_UP: case ACTION_SELECT_ITEM: { THREAD_ACTION(action); /* this will force us out of the stillframe */ CLog::Log(LOGDEBUG, "%s - User asked to exit stillframe", __FUNCTION__); m_dvd.iDVDStillStartTime = 0; m_dvd.iDVDStillTime = 1; } return true; } } switch (action.GetID()) { /* this code is disabled to allow switching playlist items (dvdimage "stacks") */ #if 0 case ACTION_PREV_ITEM: // SKIP-: { THREAD_ACTION(action); CLog::Log(LOGDEBUG, " - pushed prev"); pMenus->OnPrevious(); g_infoManager.SetDisplayAfterSeek(); return true; } break; case ACTION_NEXT_ITEM: // SKIP+: { THREAD_ACTION(action); CLog::Log(LOGDEBUG, " - pushed next"); pMenus->OnNext(); g_infoManager.SetDisplayAfterSeek(); return true; } break; #endif case ACTION_SHOW_VIDEOMENU: // start button { THREAD_ACTION(action); CLog::Log(LOGDEBUG, " - go to menu"); pMenus->OnMenu(); if (m_playSpeed == DVD_PLAYSPEED_PAUSE) { SetPlaySpeed(DVD_PLAYSPEED_NORMAL); m_callback.OnPlayBackResumed(); } // send a message to everyone that we've gone to the menu CGUIMessage msg(GUI_MSG_VIDEO_MENU_STARTED, 0, 0); g_windowManager.SendThreadMessage(msg); return true; } break; } if (pMenus->IsInMenu()) { switch (action.GetID()) { case ACTION_NEXT_ITEM: THREAD_ACTION(action); CLog::Log(LOGDEBUG, " - pushed next in menu, stream will decide"); pMenus->OnNext(); g_infoManager.SetDisplayAfterSeek(); return true; case ACTION_PREV_ITEM: THREAD_ACTION(action); CLog::Log(LOGDEBUG, " - pushed prev in menu, stream will decide"); pMenus->OnPrevious(); g_infoManager.SetDisplayAfterSeek(); return true; case ACTION_PREVIOUS_MENU: case ACTION_NAV_BACK: { THREAD_ACTION(action); CLog::Log(LOGDEBUG, " - menu back"); pMenus->OnBack(); } break; case ACTION_MOVE_LEFT: { THREAD_ACTION(action); CLog::Log(LOGDEBUG, " - move left"); pMenus->OnLeft(); } break; case ACTION_MOVE_RIGHT: { THREAD_ACTION(action); CLog::Log(LOGDEBUG, " - move right"); pMenus->OnRight(); } break; case ACTION_MOVE_UP: { THREAD_ACTION(action); CLog::Log(LOGDEBUG, " - move up"); pMenus->OnUp(); } break; case ACTION_MOVE_DOWN: { THREAD_ACTION(action); CLog::Log(LOGDEBUG, " - move down"); pMenus->OnDown(); } break; case ACTION_MOUSE_MOVE: case ACTION_MOUSE_LEFT_CLICK: { CRect rs, rd, rv; m_renderManager.GetVideoRect(rs, rd, rv); CPoint pt(action.GetAmount(), action.GetAmount(1)); if (!rd.PtInRect(pt)) return false; // out of bounds THREAD_ACTION(action); // convert to video coords... pt -= CPoint(rd.x1, rd.y1); pt.x *= rs.Width() / rd.Width(); pt.y *= rs.Height() / rd.Height(); pt += CPoint(rs.x1, rs.y1); if (action.GetID() == ACTION_MOUSE_LEFT_CLICK) { if (pMenus->OnMouseClick(pt)) return true; else { CApplicationMessenger::GetInstance().PostMsg(TMSG_GUI_ACTION, WINDOW_INVALID, -1, static_cast<void*>(new CAction(ACTION_TRIGGER_OSD))); return false; } } return pMenus->OnMouseMove(pt); } break; case ACTION_SELECT_ITEM: { THREAD_ACTION(action); CLog::Log(LOGDEBUG, " - button select"); // show button pushed overlay if(m_pInputStream->IsStreamType(DVDSTREAM_TYPE_DVD)) m_VideoPlayerSubtitle->UpdateOverlayInfo((CDVDInputStreamNavigator*)m_pInputStream, LIBDVDNAV_BUTTON_CLICKED); pMenus->ActivateButton(); } break; case REMOTE_0: case REMOTE_1: case REMOTE_2: case REMOTE_3: case REMOTE_4: case REMOTE_5: case REMOTE_6: case REMOTE_7: case REMOTE_8: case REMOTE_9: { THREAD_ACTION(action); // Offset from key codes back to button number int button = action.GetID() - REMOTE_0; CLog::Log(LOGDEBUG, " - button pressed %d", button); pMenus->SelectButton(button); } break; default: return false; break; } return true; // message is handled } } if (dynamic_cast<CDVDInputStreamPVRManager*>(m_pInputStream)) { switch (action.GetID()) { case ACTION_MOVE_UP: case ACTION_NEXT_ITEM: case ACTION_CHANNEL_UP: { bool bPreview(action.GetID() == ACTION_MOVE_UP && // only up/down shows a preview, all others do switch CSettings::GetInstance().GetBool(CSettings::SETTING_PVRPLAYBACK_CONFIRMCHANNELSWITCH)); if (bPreview) m_messenger.Put(new CDVDMsg(CDVDMsg::PLAYER_CHANNEL_PREVIEW_NEXT)); else { m_messenger.Put(new CDVDMsg(CDVDMsg::PLAYER_CHANNEL_NEXT)); if (CSettings::GetInstance().GetInt(CSettings::SETTING_PVRPLAYBACK_CHANNELENTRYTIMEOUT) == 0) g_infoManager.SetDisplayAfterSeek(); } ShowPVRChannelInfo(); return true; } case ACTION_MOVE_DOWN: case ACTION_PREV_ITEM: case ACTION_CHANNEL_DOWN: { bool bPreview(action.GetID() == ACTION_MOVE_DOWN && // only up/down shows a preview, all others do switch CSettings::GetInstance().GetBool(CSettings::SETTING_PVRPLAYBACK_CONFIRMCHANNELSWITCH)); if (bPreview) m_messenger.Put(new CDVDMsg(CDVDMsg::PLAYER_CHANNEL_PREVIEW_PREV)); else { m_messenger.Put(new CDVDMsg(CDVDMsg::PLAYER_CHANNEL_PREV)); if (CSettings::GetInstance().GetInt(CSettings::SETTING_PVRPLAYBACK_CHANNELENTRYTIMEOUT) == 0) g_infoManager.SetDisplayAfterSeek(); } ShowPVRChannelInfo(); return true; } case ACTION_CHANNEL_SWITCH: { // Offset from key codes back to button number int channel = (int) action.GetAmount(); m_messenger.Put(new CDVDMsgInt(CDVDMsg::PLAYER_CHANNEL_SELECT_NUMBER, channel)); g_infoManager.SetDisplayAfterSeek(); ShowPVRChannelInfo(); return true; } break; } } switch (action.GetID()) { case ACTION_NEXT_ITEM: if (GetChapter() > 0 && GetChapter() < GetChapterCount()) { m_messenger.Put(new CDVDMsgPlayerSeekChapter(GetChapter() + 1)); g_infoManager.SetDisplayAfterSeek(); return true; } else break; case ACTION_PREV_ITEM: if (GetChapter() > 0) { m_messenger.Put(new CDVDMsgPlayerSeekChapter(GetChapter() - 1)); g_infoManager.SetDisplayAfterSeek(); return true; } else break; } // return false to inform the caller we didn't handle the message return false; } bool CVideoPlayer::IsInMenuInternal() const { CDVDInputStream::IMenus* pStream = dynamic_cast<CDVDInputStream::IMenus*>(m_pInputStream); if (pStream) { if (m_dvd.state == DVDSTATE_STILL) return true; else return pStream->IsInMenu(); } return false; } bool CVideoPlayer::IsInMenu() const { CSingleLock lock(m_StateSection); return m_State.isInMenu; } bool CVideoPlayer::HasMenu() const { CSingleLock lock(m_StateSection); return m_State.hasMenu; } std::string CVideoPlayer::GetPlayerState() { CSingleLock lock(m_StateSection); return m_State.player_state; } bool CVideoPlayer::SetPlayerState(const std::string& state) { m_messenger.Put(new CDVDMsgPlayerSetState(state)); return true; } int CVideoPlayer::GetChapterCount() { CSingleLock lock(m_StateSection); return m_State.chapters.size(); } int CVideoPlayer::GetChapter() { CSingleLock lock(m_StateSection); return m_State.chapter; } void CVideoPlayer::GetChapterName(std::string& strChapterName, int chapterIdx) { CSingleLock lock(m_StateSection); if (chapterIdx == -1 && m_State.chapter > 0 && m_State.chapter <= (int) m_State.chapters.size()) strChapterName = m_State.chapters[m_State.chapter - 1].first; else if (chapterIdx > 0 && chapterIdx <= (int) m_State.chapters.size()) strChapterName = m_State.chapters[chapterIdx - 1].first; } int CVideoPlayer::SeekChapter(int iChapter) { if (GetChapter() > 0) { if (iChapter < 0) iChapter = 0; if (iChapter > GetChapterCount()) return 0; // Seek to the chapter. m_messenger.Put(new CDVDMsgPlayerSeekChapter(iChapter)); SynchronizeDemuxer(100); } return 0; } int64_t CVideoPlayer::GetChapterPos(int chapterIdx) { CSingleLock lock(m_StateSection); if (chapterIdx > 0 && chapterIdx <= (int) m_State.chapters.size()) return m_State.chapters[chapterIdx - 1].second; return -1; } void CVideoPlayer::AddSubtitle(const std::string& strSubPath) { m_messenger.Put(new CDVDMsgType<std::string>(CDVDMsg::SUBTITLE_ADDFILE, strSubPath)); } int CVideoPlayer::GetCacheLevel() const { CSingleLock lock(m_StateSection); return (int)(m_State.cache_level * 100); } double CVideoPlayer::GetQueueTime() { int a = m_VideoPlayerAudio->GetLevel(); int v = m_VideoPlayerVideo->GetLevel(); return std::max(a, v) * 8000.0 / 100; } void CVideoPlayer::GetVideoStreamInfo(int streamId, SPlayerVideoStreamInfo &info) { CSingleLock lock(m_SelectionStreams.m_section); if (streamId == CURRENT_STREAM) streamId = GetVideoStream(); if (streamId < 0 || streamId > GetVideoStreamCount() - 1) return; SelectionStream& s = m_SelectionStreams.Get(STREAM_VIDEO, streamId); if (s.language.length() > 0) info.language = s.language; if (s.name.length() > 0) info.name = s.name; info.bitrate = s.bitrate; info.width = s.width; info.height = s.height; info.SrcRect = s.SrcRect; info.DestRect = s.DestRect; info.videoCodecName = s.codec; info.videoAspectRatio = s.aspect_ratio; info.stereoMode = s.stereo_mode; } int CVideoPlayer::GetSourceBitrate() { if (m_pInputStream) return (int)m_pInputStream->GetBitstreamStats().GetBitrate(); return 0; } void CVideoPlayer::GetAudioStreamInfo(int index, SPlayerAudioStreamInfo &info) { CSingleLock lock(m_SelectionStreams.m_section); if (index == CURRENT_STREAM) index = GetAudioStream(); if (index < 0 || index > GetAudioStreamCount() - 1 ) return; SelectionStream& s = m_SelectionStreams.Get(STREAM_AUDIO, index); if(s.language.length() > 0) info.language = s.language; if(s.name.length() > 0) info.name = s.name; if(s.type == STREAM_NONE) info.name += " (Invalid)"; info.bitrate = s.bitrate; info.channels = s.channels; info.audioCodecName = s.codec; } int CVideoPlayer::AddSubtitleFile(const std::string& filename, const std::string& subfilename) { std::string ext = URIUtils::GetExtension(filename); std::string vobsubfile = subfilename; if (ext == ".idx") { if (vobsubfile.empty()) { // find corresponding .sub (e.g. in case of manually selected .idx sub) vobsubfile = CUtil::GetVobSubSubFromIdx(filename); if (vobsubfile.empty()) return -1; } CDVDDemuxVobsub v; if (!v.Open(filename, STREAM_SOURCE_NONE, vobsubfile)) return -1; m_SelectionStreams.Update(NULL, &v, vobsubfile); ExternalStreamInfo info; CUtil::GetExternalStreamDetailsFromFilename(m_item.GetPath(), vobsubfile, info); for (int i = 0; i < v.GetNrOfSubtitleStreams(); ++i) { int index = m_SelectionStreams.IndexOf(STREAM_SUBTITLE, m_SelectionStreams.Source(STREAM_SOURCE_DEMUX_SUB, filename), i); SelectionStream& stream = m_SelectionStreams.Get(STREAM_SUBTITLE, index); if (stream.name.empty()) stream.name = info.name; if (stream.language.empty()) stream.language = info.language; if (static_cast<CDemuxStream::EFlags>(info.flag) != CDemuxStream::FLAG_NONE) stream.flags = static_cast<CDemuxStream::EFlags>(info.flag); } return m_SelectionStreams.IndexOf(STREAM_SUBTITLE, m_SelectionStreams.Source(STREAM_SOURCE_DEMUX_SUB, filename), 0); } if(ext == ".sub") { // if this looks like vobsub file (i.e. .idx found), add it as such std::string vobsubidx = CUtil::GetVobSubIdxFromSub(filename); if (!vobsubidx.empty()) return AddSubtitleFile(vobsubidx, filename); } SelectionStream s; s.source = m_SelectionStreams.Source(STREAM_SOURCE_TEXT, filename); s.type = STREAM_SUBTITLE; s.id = 0; s.filename = filename; ExternalStreamInfo info; CUtil::GetExternalStreamDetailsFromFilename(m_item.GetPath(), filename, info); s.name = info.name; s.language = info.language; if (static_cast<CDemuxStream::EFlags>(info.flag) != CDemuxStream::FLAG_NONE) s.flags = static_cast<CDemuxStream::EFlags>(info.flag); m_SelectionStreams.Update(s); return m_SelectionStreams.IndexOf(STREAM_SUBTITLE, s.source, s.id); } void CVideoPlayer::UpdatePlayState(double timeout) { if(m_State.timestamp != 0 && m_State.timestamp + DVD_MSEC_TO_TIME(timeout) > CDVDClock::GetAbsoluteClock()) return; SPlayerState state(m_State); if (m_CurrentVideo.dts != DVD_NOPTS_VALUE) state.dts = m_CurrentVideo.dts; else if (m_CurrentAudio.dts != DVD_NOPTS_VALUE) state.dts = m_CurrentAudio.dts; else if (m_CurrentVideo.startpts != DVD_NOPTS_VALUE) state.dts = m_CurrentVideo.startpts; else if (m_CurrentAudio.startpts != DVD_NOPTS_VALUE) state.dts = m_CurrentAudio.startpts; if (m_pDemuxer) { if (IsInMenuInternal()) state.chapter = 0; else state.chapter = m_pDemuxer->GetChapter(); state.chapters.clear(); if (m_pDemuxer->GetChapterCount() > 0) { for (int i = 0; i < m_pDemuxer->GetChapterCount(); ++i) { std::string name; m_pDemuxer->GetChapterName(name, i + 1); state.chapters.push_back(make_pair(name, m_pDemuxer->GetChapterPos(i + 1))); } } // time = dts - m_offset_pts state.time = DVD_TIME_TO_MSEC(m_clock.GetClock(false)); state.time_offset = 0; state.time_total = m_pDemuxer->GetStreamLength(); } state.canpause = true; state.canseek = true; state.isInMenu = false; state.hasMenu = false; if (m_pInputStream) { // override from input stream if needed CDVDInputStreamPVRManager* pvrStream = dynamic_cast<CDVDInputStreamPVRManager*>(m_pInputStream); if (pvrStream) { state.canrecord = pvrStream->CanRecord(); state.recording = pvrStream->IsRecording(); } CDVDInputStream::IDisplayTime* pDisplayTime = m_pInputStream->GetIDisplayTime(); if (pDisplayTime && pDisplayTime->GetTotalTime() > 0) { if (state.dts != DVD_NOPTS_VALUE) { // dts is correct by offset_pts, so we need to revert this correction here // the results is: time = pDisplayTime->GetTime() state.time_offset += DVD_MSEC_TO_TIME(pDisplayTime->GetTime()) - state.dts + m_offset_pts; state.time += DVD_TIME_TO_MSEC(state.time_offset); } state.time_total = pDisplayTime->GetTotalTime(); } if (CDVDInputStream::IMenus* ptr = dynamic_cast<CDVDInputStream::IMenus*>(m_pInputStream)) { if (!ptr->GetState(state.player_state)) state.player_state = ""; if (m_dvd.state == DVDSTATE_STILL) { state.time = XbmcThreads::SystemClockMillis() - m_dvd.iDVDStillStartTime; state.time_total = m_dvd.iDVDStillTime; state.isInMenu = true; } else if (IsInMenuInternal()) { state.time = pDisplayTime->GetTime(); state.time_offset = 0; state.isInMenu = true; } state.hasMenu = true; } state.canpause = m_pInputStream->CanPause(); state.canseek = m_pInputStream->CanSeek(); } if (m_Edl.HasCut()) { state.time = (double) m_Edl.RemoveCutTime(llrint(state.time)); state.time_total = (double) m_Edl.RemoveCutTime(llrint(state.time_total)); } if(state.time_total <= 0) state.canseek = false; if (m_CurrentAudio.id >= 0 && m_pDemuxer) { CDemuxStream* pStream = m_pDemuxer->GetStream(m_CurrentAudio.id); if (pStream && pStream->type == STREAM_AUDIO) state.demux_audio = ((CDemuxStreamAudio*)pStream)->GetStreamInfo(); } else state.demux_audio = ""; if (m_CurrentVideo.id >= 0 && m_pDemuxer) { CDemuxStream* pStream = m_pDemuxer->GetStream(m_CurrentVideo.id); if (pStream && pStream->type == STREAM_VIDEO) state.demux_video = ((CDemuxStreamVideo*)pStream)->GetStreamInfo(); } else state.demux_video = ""; double level, delay, offset; if(GetCachingTimes(level, delay, offset)) { state.cache_delay = std::max(0.0, delay); state.cache_level = std::max(0.0, std::min(1.0, level)); state.cache_offset = offset; } else { state.cache_delay = 0.0; state.cache_level = std::min(1.0, GetQueueTime() / 8000.0); state.cache_offset = GetQueueTime() / state.time_total; } XFILE::SCacheStatus status; if(m_pInputStream && m_pInputStream->GetCacheStatus(&status)) { state.cache_bytes = status.forward; if(state.time_total) state.cache_bytes += m_pInputStream->GetLength() * (int64_t) (GetQueueTime() / state.time_total); } else state.cache_bytes = 0; state.timestamp = CDVDClock::GetAbsoluteClock(); CSingleLock lock(m_StateSection); m_State = state; } void CVideoPlayer::UpdateApplication(double timeout) { if(m_UpdateApplication != 0 && m_UpdateApplication + DVD_MSEC_TO_TIME(timeout) > CDVDClock::GetAbsoluteClock()) return; CDVDInputStreamPVRManager* pStream = dynamic_cast<CDVDInputStreamPVRManager*>(m_pInputStream); if(pStream) { CFileItem item(g_application.CurrentFileItem()); if(pStream->UpdateItem(item)) { g_application.CurrentFileItem() = item; CApplicationMessenger::GetInstance().PostMsg(TMSG_UPDATE_CURRENT_ITEM, 0, -1, static_cast<void*>(new CFileItem(item))); } } m_UpdateApplication = CDVDClock::GetAbsoluteClock(); } bool CVideoPlayer::CanRecord() { CSingleLock lock(m_StateSection); return m_State.canrecord; } bool CVideoPlayer::IsRecording() { CSingleLock lock(m_StateSection); return m_State.recording; } bool CVideoPlayer::Record(bool bOnOff) { if (m_pInputStream && (m_pInputStream->IsStreamType(DVDSTREAM_TYPE_TV) || m_pInputStream->IsStreamType(DVDSTREAM_TYPE_PVRMANAGER)) ) { m_messenger.Put(new CDVDMsgBool(CDVDMsg::PLAYER_SET_RECORD, bOnOff)); return true; } return false; } bool CVideoPlayer::GetStreamDetails(CStreamDetails &details) { if (m_pDemuxer) { std::vector<SelectionStream> subs = m_SelectionStreams.Get(STREAM_SUBTITLE); std::vector<CStreamDetailSubtitle> extSubDetails; for (unsigned int i = 0; i < subs.size(); i++) { if (subs[i].filename == m_item.GetPath()) continue; CStreamDetailSubtitle p; p.m_strLanguage = subs[i].language; extSubDetails.push_back(p); } bool result = CDVDFileInfo::DemuxerToStreamDetails(m_pInputStream, m_pDemuxer, extSubDetails, details); if (result && details.GetStreamCount(CStreamDetail::VIDEO) > 0) // this is more correct (dvds in particular) { /* * We can only obtain the aspect & duration from VideoPlayer when the Process() thread is running * and UpdatePlayState() has been called at least once. In this case VideoPlayer duration/AR will * return 0 and we'll have to fallback to the (less accurate) info from the demuxer. */ float aspect = m_renderManager.GetAspectRatio(); if (aspect > 0.0f) ((CStreamDetailVideo*)details.GetNthStream(CStreamDetail::VIDEO,0))->m_fAspect = aspect; int64_t duration = GetTotalTime() / 1000; if (duration > 0) ((CStreamDetailVideo*)details.GetNthStream(CStreamDetail::VIDEO,0))->m_iDuration = (int) duration; } return result; } else return false; } std::string CVideoPlayer::GetPlayingTitle() { /* Currently we support only Title Name from Teletext line 30 */ TextCacheStruct_t* ttcache = m_VideoPlayerTeletext->GetTeletextCache(); if (ttcache && !ttcache->line30.empty()) return ttcache->line30; return ""; } bool CVideoPlayer::SwitchChannel(const CPVRChannelPtr &channel) { if (g_PVRManager.IsPlayingChannel(channel)) return false; // desired channel already active, nothing to do. if (!g_PVRManager.CheckParentalLock(channel)) return false; /* set GUI info */ if (!g_PVRManager.PerformChannelSwitch(channel, true)) return false; UpdateApplication(0); UpdatePlayState(0); /* select the new channel */ m_messenger.Put(new CDVDMsgType<CPVRChannelPtr>(CDVDMsg::PLAYER_CHANNEL_SELECT, channel)); return true; } void CVideoPlayer::FrameMove() { m_renderManager.FrameMove(); } void CVideoPlayer::FrameWait(int ms) { m_renderManager.FrameWait(ms); } bool CVideoPlayer::HasFrame() { return m_renderManager.HasFrame(); } void CVideoPlayer::Render(bool clear, uint32_t alpha, bool gui) { m_renderManager.Render(clear, 0, alpha, gui); } void CVideoPlayer::AfterRender() { m_renderManager.FrameFinish(); } void CVideoPlayer::FlushRenderer() { m_renderManager.Flush(); } void CVideoPlayer::SetRenderViewMode(int mode) { m_renderManager.SetViewMode(mode); } float CVideoPlayer::GetRenderAspectRatio() { return m_renderManager.GetAspectRatio(); } RESOLUTION CVideoPlayer::GetRenderResolution() { return g_graphicsContext.GetVideoResolution(); } void CVideoPlayer::TriggerUpdateResolution() { m_renderManager.TriggerUpdateResolution(0, 0, 0); } bool CVideoPlayer::IsRenderingVideo() { return m_renderManager.IsConfigured(); } bool CVideoPlayer::IsRenderingGuiLayer() { return m_renderManager.IsGuiLayer(); } bool CVideoPlayer::IsRenderingVideoLayer() { return m_renderManager.IsVideoLayer(); } bool CVideoPlayer::Supports(EDEINTERLACEMODE mode) { return m_renderManager.Supports(mode); } bool CVideoPlayer::Supports(EINTERLACEMETHOD method) { return m_renderManager.Supports(method); } bool CVideoPlayer::Supports(ESCALINGMETHOD method) { return m_renderManager.Supports(method); } bool CVideoPlayer::Supports(ERENDERFEATURE feature) { return m_renderManager.Supports(feature); } unsigned int CVideoPlayer::RenderCaptureAlloc() { return m_renderManager.AllocRenderCapture(); } void CVideoPlayer::RenderCapture(unsigned int captureId, unsigned int width, unsigned int height, int flags) { m_renderManager.StartRenderCapture(captureId, width, height, flags); } void CVideoPlayer::RenderCaptureRelease(unsigned int captureId) { m_renderManager.ReleaseRenderCapture(captureId); } bool CVideoPlayer::RenderCaptureGetPixels(unsigned int captureId, unsigned int millis, uint8_t *buffer, unsigned int size) { return m_renderManager.RenderCaptureGetPixels(captureId, millis, buffer, size); } std::string CVideoPlayer::GetRenderVSyncState() { return m_renderManager.GetVSyncState(); } void CVideoPlayer::VideoParamsChange() { m_messenger.Put(new CDVDMsg(CDVDMsg::PLAYER_AVCHANGE)); } // IDispResource interface void CVideoPlayer::OnLostDisplay() { CLog::Log(LOGNOTICE, "VideoPlayer: OnLostDisplay received"); m_displayLost = true; } void CVideoPlayer::OnResetDisplay() { CLog::Log(LOGNOTICE, "VideoPlayer: OnResetDisplay received"); m_displayLost = false; }<|fim▁end|>
bool CVideoPlayer::OpenRadioRDSStream(CDVDStreamInfo& hint) { if (!m_VideoPlayerRadioRDS->CheckStream(hint))
<|file_name|>servers.go<|end_file_name|><|fim▁begin|>package events import "encoding/json" // User is the format for the user command. type User struct { User string `json:"user"` Name string `json:"name"` } // Nick is the format for the nick command. type Nick struct { Nick string `json:"nick"` } // Quit is the format for the quit event type Quit struct { Type string `json:"type"` Status string `json:"status"` User string `json:"user"` Msg string `json:"msg"` } // Connected returns a connection event func Connected(server, msg string) string { event, err := json.Marshal(StatusTargetMsgEvent{Type: "connected", Status: "ok", Target: server, Msg: msg, }) if err != nil { return InternalError(err.Error()) }<|fim▁hole|> return string(event) } // RcvedQuit returns a quit event func RcvedQuit(user, msg string) string { event, err := json.Marshal(Quit{Type: "quit", Status: "ok", User: user, Msg: msg, }) if err != nil { return InternalError(err.Error()) } return string(event) }<|fim▁end|>
<|file_name|>compat.py<|end_file_name|><|fim▁begin|>""" The `compat` module provides support for backwards compatibility with older versions of Django/Python, and compatibility wrappers around optional packages. """ # flake8: noqa from __future__ import unicode_literals import inspect import django from django.apps import apps from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.db import connection, models, transaction from django.template import Context, RequestContext, Template from django.utils import six from django.views.generic import View try: from django.urls import ( NoReverseMatch, RegexURLPattern, RegexURLResolver, ResolverMatch, Resolver404, get_script_prefix, reverse, reverse_lazy, resolve ) except ImportError: from django.core.urlresolvers import ( # Will be removed in Django 2.0 NoReverseMatch, RegexURLPattern, RegexURLResolver, ResolverMatch, Resolver404, get_script_prefix, reverse, reverse_lazy, resolve ) try: import urlparse # Python 2.x except ImportError: import urllib.parse as urlparse def unicode_repr(instance): # Get the repr of an instance, but ensure it is a unicode string # on both python 3 (already the case) and 2 (not the case). if six.PY2: return repr(instance).decode('utf-8') return repr(instance) def unicode_to_repr(value): # Coerce a unicode string to the correct repr return type, depending on # the Python version. We wrap all our `__repr__` implementations with # this and then use unicode throughout internally. if six.PY2: return value.encode('utf-8') return value def unicode_http_header(value): # Coerce HTTP header value to unicode. if isinstance(value, six.binary_type): return value.decode('iso-8859-1') return value def total_seconds(timedelta): # TimeDelta.total_seconds() is only available in Python 2.7 if hasattr(timedelta, 'total_seconds'): return timedelta.total_seconds() else: return (timedelta.days * 86400.0) + float(timedelta.seconds) + (timedelta.microseconds / 1000000.0) def distinct(queryset, base): if settings.DATABASES[queryset.db]["ENGINE"] == "django.db.backends.oracle": # distinct analogue for Oracle users return base.filter(pk__in=set(queryset.values_list('pk', flat=True))) return queryset.distinct() # Obtaining manager instances and names from model options differs after 1.10. def get_names_and_managers(options): if django.VERSION >= (1, 10): # Django 1.10 onwards provides a `.managers` property on the Options. return [ (manager.name, manager) for manager in options.managers ] # For Django 1.8 and 1.9, use the three-tuple information provided # by .concrete_managers and .abstract_managers return [ (manager_info[1], manager_info[2]) for manager_info in (options.concrete_managers + options.abstract_managers) ] # field.rel is deprecated from 1.9 onwards def get_remote_field(field, **kwargs): if 'default' in kwargs: if django.VERSION < (1, 9): return getattr(field, 'rel', kwargs['default']) return getattr(field, 'remote_field', kwargs['default']) if django.VERSION < (1, 9): return field.rel return field.remote_field def _resolve_model(obj): """ Resolve supplied `obj` to a Django model class. `obj` must be a Django model class itself, or a string representation of one. Useful in situations like GH #1225 where Django may not have resolved a string-based reference to a model in another model's foreign key definition. String representations should have the format: 'appname.ModelName' """ if isinstance(obj, six.string_types) and len(obj.split('.')) == 2: app_name, model_name = obj.split('.') resolved_model = apps.get_model(app_name, model_name) if resolved_model is None: msg = "Django did not return a model for {0}.{1}" raise ImproperlyConfigured(msg.format(app_name, model_name)) return resolved_model elif inspect.isclass(obj) and issubclass(obj, models.Model): return obj raise ValueError("{0} is not a Django model".format(obj)) def is_authenticated(user): if django.VERSION < (1, 10): return user.is_authenticated() return user.is_authenticated def is_anonymous(user): if django.VERSION < (1, 10): return user.is_anonymous() return user.is_anonymous def get_related_model(field): if django.VERSION < (1, 9): return _resolve_model(field.rel.to) return field.remote_field.model def value_from_object(field, obj): if django.VERSION < (1, 9): return field._get_val_from_obj(obj) return field.value_from_object(obj) # contrib.postgres only supported from 1.8 onwards. try: from django.contrib.postgres import fields as postgres_fields except ImportError: postgres_fields = None # JSONField is only supported from 1.9 onwards try: from django.contrib.postgres.fields import JSONField except ImportError: JSONField = None # coreapi is optional (Note that uritemplate is a dependency of coreapi) try: import coreapi import uritemplate except (ImportError, SyntaxError): # SyntaxError is possible under python 3.2 coreapi = None uritemplate = None # coreschema is optional try: import coreschema except ImportError: coreschema = None # django-filter is optional try: import django_filters<|fim▁hole|># django-crispy-forms is optional try: import crispy_forms except ImportError: crispy_forms = None # requests is optional try: import requests except ImportError: requests = None # Django-guardian is optional. Import only if guardian is in INSTALLED_APPS # Fixes (#1712). We keep the try/except for the test suite. guardian = None try: if 'guardian' in settings.INSTALLED_APPS: import guardian except ImportError: pass # PATCH method is not implemented by Django if 'patch' not in View.http_method_names: View.http_method_names = View.http_method_names + ['patch'] # Markdown is optional try: import markdown if markdown.version <= '2.2': HEADERID_EXT_PATH = 'headerid' LEVEL_PARAM = 'level' elif markdown.version < '2.6': HEADERID_EXT_PATH = 'markdown.extensions.headerid' LEVEL_PARAM = 'level' else: HEADERID_EXT_PATH = 'markdown.extensions.toc' LEVEL_PARAM = 'baselevel' def apply_markdown(text): """ Simple wrapper around :func:`markdown.markdown` to set the base level of '#' style headers to <h2>. """ extensions = [HEADERID_EXT_PATH] extension_configs = { HEADERID_EXT_PATH: { LEVEL_PARAM: '2' } } md = markdown.Markdown( extensions=extensions, extension_configs=extension_configs ) return md.convert(text) except ImportError: apply_markdown = None markdown = None try: import pygments from pygments.lexers import get_lexer_by_name from pygments.formatters import HtmlFormatter def pygments_highlight(text, lang, style): lexer = get_lexer_by_name(lang, stripall=False) formatter = HtmlFormatter(nowrap=True, style=style) return pygments.highlight(text, lexer, formatter) def pygments_css(style): formatter = HtmlFormatter(style=style) return formatter.get_style_defs('.highlight') except ImportError: pygments = None def pygments_highlight(text, lang, style): return text def pygments_css(style): return None try: import pytz from pytz.exceptions import InvalidTimeError except ImportError: InvalidTimeError = Exception # `separators` argument to `json.dumps()` differs between 2.x and 3.x # See: http://bugs.python.org/issue22767 if six.PY3: SHORT_SEPARATORS = (',', ':') LONG_SEPARATORS = (', ', ': ') INDENT_SEPARATORS = (',', ': ') else: SHORT_SEPARATORS = (b',', b':') LONG_SEPARATORS = (b', ', b': ') INDENT_SEPARATORS = (b',', b': ') try: # DecimalValidator is unavailable in Django < 1.9 from django.core.validators import DecimalValidator except ImportError: DecimalValidator = None def set_rollback(): if hasattr(transaction, 'set_rollback'): if connection.settings_dict.get('ATOMIC_REQUESTS', False): # If running in >=1.6 then mark a rollback as required, # and allow it to be handled by Django. if connection.in_atomic_block: transaction.set_rollback(True) elif transaction.is_managed(): # Otherwise handle it explicitly if in managed mode. if transaction.is_dirty(): transaction.rollback() transaction.leave_transaction_management() else: # transaction not managed pass def template_render(template, context=None, request=None): """ Passing Context or RequestContext to Template.render is deprecated in 1.9+, see https://github.com/django/django/pull/3883 and https://github.com/django/django/blob/1.9/django/template/backends/django.py#L82-L84 :param template: Template instance :param context: dict :param request: Request instance :return: rendered template as SafeText instance """ if isinstance(template, Template): if request: context = RequestContext(request, context) else: context = Context(context) return template.render(context) # backends template, e.g. django.template.backends.django.Template else: return template.render(context, request=request) def set_many(instance, field, value): if django.VERSION < (1, 10): setattr(instance, field, value) else: field = getattr(instance, field) field.set(value) def include(module, namespace=None, app_name=None): from django.conf.urls import include if django.VERSION < (1,9): return include(module, namespace, app_name) else: return include((module, app_name), namespace)<|fim▁end|>
except ImportError: django_filters = None
<|file_name|>kekule.localize.widget.zh.js<|end_file_name|><|fim▁begin|>/** @ignore */ Kekule.LOCAL_RES = true; Kekule.Localization.setCurrModule("widget"); Kekule.Localization.addResource("zh", "WidgetTexts", { "CAPTION_OK": "确定", "CAPTION_CANCEL": "取消", "CAPTION_YES": "是", "CAPTION_NO": "否", "CAPTION_BROWSE_COLOR": "浏览颜色", "HINT_BROWSE_COLOR": "浏览更多颜色", "S_COLOR_UNSET": "(未设置)", "S_COLOR_DEFAULT": "(缺省值)", "S_COLOR_MIXED": "(多个值)", "S_COLOR_TRANSPARENT": "(透明)", "S_OBJECT_UNSET": "(无)", "S_ITEMS": "条目", "S_OBJECT": "对象", "S_VALUE_UNSET": "(未设置)", "CAPTION_MENU": "Menu", "HINT_MENU": "Open menu", "S_INSPECT_NONE": "(无)", "S_INSPECT_OBJECTS": "({0}个对象)", "S_INSPECT_ID_OBJECT": "{0}: {1}", "S_INSPECT_ANONYMOUS_OBJECT": "({0})", "CAPTION_TOGGLE_TEXTWRAP": "切换文本换行", "CAPTION_INC_TEXT_SIZE": "增大字号", "CAPTION_DEC_TEXT_SIZE": "减小字号", "HINT_TOGGLE_TEXTWRAP": "切换文本是否自动换行", "HINT_INC_TEXT_SIZE": "增大字号", "HINT_DEC_TEXT_SIZE": "减小字号", "HINT_CHOOSE_FONT_FAMILY": "选择字体", "CAPTION_FIRST_PAGE": "首页", "CAPTION_LAST_PAGE": "末页", "CAPTION_PREV_PAGE": "前一页", "CAPTION_NEXT_PAGE": "后一页", "HINT_FIRST_PAGE": "首页", "HINT_LAST_PAGE": "末页", "HINT_PREV_PAGE": "前一页", "HINT_NEXT_PAGE": "后一页", "HINT_CURR_PAGE": "当前页", "MSG_RETRIEVING_DATA": "载入数据…", "CAPTION_DATATABLE_EDIT": "编辑", "CAPTION_DATATABLE_DELETE": "删除", "CAPTION_DATATABLE_INSERT": "插入", "HINT_DATATABLE_EDIT": "编辑数据", "HINT_DATATABLE_DELETE": "删除数据", "HINT_DATATABLE_INSERT": "插入数据", "CAPTION_ADD_CELL": "+", "HINT_ADD_CELL": "添加新单元格", "CAPTION_REMOVE_CELL": "移除", "HINT_REMOVE_CELL": "移除单元格", "CAPTION_CONFIG": "设置…", "HINT_CONFIG": "修改设置" }); Kekule.Localization.addResource("zh", "ChemWidgetTexts", { "CAPTION_CLEAROBJS": "清除", "CAPTION_LOADFILE": "载入…", "CAPTION_LOADDATA": "载入…", "CAPTION_SAVEFILE": "保存…", "CAPTION_ZOOMIN": "放大", "CAPTION_ZOOMOUT": "缩小", "CAPTION_RESETZOOM": "重置缩放", "CAPTION_RESETVIEW": "重置", "CAPTION_ROTATE": "旋转", "CAPTION_ROTATELEFT": "向左旋转", "CAPTION_ROTATERIGHT": "向右旋转", "CAPTION_ROTATEX": "沿X轴旋转", "CAPTION_ROTATEY": "沿Y轴旋转", "CAPTION_ROTATEZ": "沿Z轴旋转", "CAPTION_MOL_DISPLAY_TYPE": "分子显示样式", "CAPTION_SKELETAL": "键线式", "CAPTION_CONDENSED": "缩写式", "CAPTION_WIRE": "单线模型", "CAPTION_STICKS": "棍式模型", "CAPTION_BALLSTICK": "球棍模型", "CAPTION_SPACEFILL": "比例模型", "CAPTION_HIDEHYDROGENS": "显示/隐藏氢原子", "CAPTION_OPENEDITOR": "编辑…", "CAPTION_EDIT_OBJ": "编辑", "HINT_CLEAROBJS": "清除对象", "HINT_LOADFILE": "自文件载入", "HINT_LOADDATA": "载入数据", "HINT_SAVEFILE": "存储到文件", "HINT_ZOOMIN": "放大", "HINT_ZOOMOUT": "缩小", "HINT_RESETZOOM": "重置缩放", "HINT_RESETVIEW": "重置缩放与旋转", "HINT_ROTATE": "旋转", "HINT_ROTATELEFT": "逆时针旋转", "HINT_ROTATERIGHT": "顺时针旋转", "HINT_ROTATEX": "沿X轴旋转", "HINT_ROTATEY": "沿Y轴旋转", "HINT_ROTATEZ": "沿Z轴旋转",<|fim▁hole|> "HINT_WIRE": "以单线模型显示", "HINT_STICKS": "以棍式模型显示", "HINT_BALLSTICK": "以球棍模型显示", "HINT_SPACEFILL": "以比例模型显示", "HINT_HIDEHYDROGENS": "模型中显示/隐藏氢原子", "HINT_OPENEDITOR": "编辑当前对象", "CAPTION_NEWDOC": "新建", "CAPTION_UNDO": "撤销", "CAPTION_REDO": "重做", "CAPTION_COPY": "复制", "CAPTION_CUT": "剪切", "CAPTION_PASTE": "粘贴", "CAPTION_CLONE_SELECTION": "克隆选区", "CAPTION_TOGGLE_INSPECTOR": "对象检视器", "CAPTION_MANIPULATE": "选取", "CAPTION_ERASE": "删除", "CAPTION_MOL_BOND": "键", "CAPTION_MOL_BOND_SINGLE": "单键", "CAPTION_MOL_BOND_DOUBLE": "双键", "CAPTION_MOL_BOND_TRIPLE": "三键", "CAPTION_MOL_BOND_WEDGEUP": "实楔线键", "CAPTION_MOL_BOND_WEDGEDOWN": "虚楔线键", "CAPTION_MOL_BOND_CLOSER": "突出(加粗)键", "CAPTION_MOL_BOND_WAVY": "波浪键", "CAPTION_MOL_BOND_DOUBLE_EITHER": "顺或反式双键", "CAPTION_MOL_ATOM": "原子", "CAPTION_MOL_FORMULA": "分子式", "CAPTION_MOL_CHARGE": "电荷", "CAPTION_MOL_CHARGE_CLEAR": "清除电荷", "CAPTION_MOL_CHARGE_POSITIVE": "正电荷", "CAPTION_MOL_CHARGE_NEGATIVE": "负电荷", "CAPTION_MOL_CHARGE_SINGLET": "单线态", "CAPTION_MOL_CHARGE_DOUBLET": "双线态自由基", "CAPTION_MOL_CHARGE_TRIPLET": "三线态", "CAPTION_TEXT_BLOCK": "文本", "CAPTION_REPOSITORY_RING": "环", "CAPTION_REPOSITORY_RING_3": "环丙烷", "CAPTION_REPOSITORY_RING_4": "环丁烷", "CAPTION_REPOSITORY_RING_5": "环戊烷", "CAPTION_REPOSITORY_RING_6": "环己烷", "CAPTION_REPOSITORY_RING_7": "环庚烷", "CAPTION_REPOSITORY_RING_8": "环辛烷", "CAPTION_REPOSITORY_RING_AR_6": "苯", "CAPTION_REPOSITORY_RING_AR_5": "环戊二烯", "CAPTION_REPOSITORY_ARROWLINE": "线段与箭头", "CAPTION_REPOSITORY_GLYPH": "图符", "CAPTION_REPOSITORY_GLYPH_LINE": "直线", "CAPTION_REPOSITORY_GLYPH_OPEN_ARROW_LINE": "开放箭头线", "CAPTION_REPOSITORY_GLYPH_TRIANGLE_ARROW_LINE": "三角箭头线", "CAPTION_REPOSITORY_GLYPH_DI_OPEN_ARROW_LINE": "双向开放箭头线", "CAPTION_REPOSITORY_GLYPH_DI_TRIANGLE_ARROW_LINE": "双向三角箭头线", "CAPTION_REPOSITORY_GLYPH_REV_ARROW_LINE": "可逆箭头线", "CAPTION_REPOSITORY_GLYPH_OPEN_ARROW_DILINE": "开放箭头双线", "CAPTION_REPOSITORY_HEAT_SYMBOL": "加热符号", "CAPTION_REPOSITORY_ADD_SYMBOL": "加号", "CAPTION_PICK_COLOR": "颜色", "CAPTION_TEXT_DIRECTION": "文字方向", "CAPTION_TEXT_DIRECTION_DEFAULT": "缺省", "CAPTION_TEXT_DIRECTION_LTR": "由左至右", "CAPTION_TEXT_DIRECTION_RTL": "由右至左", "CAPTION_TEXT_DIRECTION_TTB": "由上至下", "CAPTION_TEXT_DIRECTION_BTT": "由下至上", "CAPTION_TEXT_HORIZONTAL_ALIGN": "文字水平对齐", "CAPTION_TEXT_VERTICAL_ALIGN": "文字垂直对齐", "CAPTION_TEXT_ALIGN_DEFAULT": "缺省", "CAPTION_TEXT_ALIGN_LEADING": "首对齐", "CAPTION_TEXT_ALIGN_TRAILING": "尾对齐", "CAPTION_TEXT_ALIGN_CENTER": "居中对齐", "CAPTION_TEXT_ALIGN_LEFT": "左对齐", "CAPTION_TEXT_ALIGN_RIGHT": "右对齐", "CAPTION_TEXT_ALIGN_TOP": "上对齐", "CAPTION_TEXT_ALIGN_BOTTOM": "下对齐", "HINT_NEWDOC": "创建新文档", "HINT_UNDO": "撤销", "HINT_REDO": "重做", "HINT_COPY": "将选定对象复制至内部剪贴板", "HINT_CUT": "将选定对象剪切至内部剪贴板", "HINT_PASTE": "自内部剪贴板复制", "HINT_CLONE_SELECTION": "克隆当前选区", "HINT_TOGGLE_INSPECTOR": "显示或隐藏对象检视器", "HINT_MANIPULATE": "选择工具", "HINT_ERASE": "删除工具", "HINT_MOL_BOND": "化学键工具", "HINT_MOL_BOND_SINGLE": "单键", "HINT_MOL_BOND_DOUBLE": "双键", "HINT_MOL_BOND_TRIPLE": "叁键", "HINT_MOL_BOND_WEDGEUP": "实楔线键", "HINT_MOL_BOND_WEDGEDOWN": "虚楔线键", "HINT_MOL_BOND_CLOSER": "突出(加粗)键", "HINT_MOL_BOND_WAVY": "波浪键", "HINT_MOL_BOND_DOUBLE_EITHER": "顺或反式双键", "HINT_MOL_ATOM": "原子工具", "HINT_MOL_FORMULA": "分子式工具", "HINT_MOL_CHARGE": "电荷工具", "HINT_MOL_CHARGE_CLEAR": "清除电荷或自由基", "HINT_MOL_CHARGE_POSITIVE": "正电荷", "HINT_MOL_CHARGE_NEGATIVE": "负电荷", "HINT_MOL_CHARGE_SINGLET": "单线态自由基", "HINT_MOL_CHARGE_DOUBLET": "双线态自由基", "HINT_MOL_CHARGE_TRIPLET": "三线态自由基", "HINT_TEXT_BLOCK": "文字工具", "HINT_REPOSITORY_RING": "环工具", "HINT_REPOSITORY_RING_3": "环丙烷", "HINT_REPOSITORY_RING_4": "环丁烷", "HINT_REPOSITORY_RING_5": "环戊烷", "HINT_REPOSITORY_RING_6": "环己烷", "HINT_REPOSITORY_RING_7": "环庚烷", "HINT_REPOSITORY_RING_8": "环辛烷", "HINT_REPOSITORY_RING_AR_6": "苯", "HINT_REPOSITORY_RING_AR_5": "环戊二烯", "HINT_REPOSITORY_ARROWLINE": "线段与箭头", "HINT_REPOSITORY_GLYPH": "图符", "HINT_REPOSITORY_GLYPH_LINE": "直线", "HINT_REPOSITORY_GLYPH_OPEN_ARROW_LINE": "开放箭头线", "HINT_REPOSITORY_GLYPH_TRIANGLE_ARROW_LINE": "三角箭头线", "HINT_REPOSITORY_GLYPH_DI_OPEN_ARROW_LINE": "双向开放箭头线", "HINT_REPOSITORY_GLYPH_DI_TRIANGLE_ARROW_LINE": "双向三角箭头线", "HINT_REPOSITORY_GLYPH_REV_ARROW_LINE": "可逆箭头线", "HINT_REPOSITORY_GLYPH_OPEN_ARROW_DILINE": "开放箭头双线", "HINT_REPOSITORY_HEAT_SYMBOL": "加热符号", "HINT_REPOSITORY_ADD_SYMBOL": "加号", "HINT_FONTNAME": "设置字体", "HINT_FONTSIZE": "设置字号", "HINT_PICK_COLOR": "选择颜色", "HINT_TEXT_DIRECTION": "设置文字方向", "HINT_TEXT_HORIZONTAL_ALIGN": "设置文字水平对齐方式", "HINT_TEXT_VERTICAL_ALIGN": "设置文字水平垂直方式", "CAPTION_LOADDATA_DIALOG": "载入数据", "CAPTION_DATA_FORMAT": "数据格式:", "CAPTION_DATA_SRC": "在下方输入或粘贴数据:", "CAPTION_LOADDATA_FROM_FILE": "载入文件", "CAPTION_CHOOSEFILEFORMAT": "选择文件格式", "CAPTION_SELECT_FORMAT": "选择格式:", "CAPTION_PREVIEW_FILE_CONTENT": "预览文件内容…", "S_DEF_SAVE_FILENAME": "Unnamed", "CAPTION_ATOMLIST_PERIODIC_TABLE": "更多…", "CAPTION_RGROUP": "取代基", "CAPTION_VARIABLE_ATOM": "(包含)原子列表", "CAPTION_VARIABLE_NOT_ATOM": "(不包含)原子列表", "CAPTION_PSEUDOATOM": "赝原子(Pseudoatom)", "CAPTION_DUMMY_ATOM": "虚原子(Dummy atom)", "CAPTION_HETERO_ATOM": "杂原子", "CAPTION_ANY_ATOM": "任意原子", "CAPTION_PERIODIC_TABLE_DIALOG": "元素周期表", "CAPTION_PERIODIC_TABLE_DIALOG_SEL_ELEM": "选择元素", "CAPTION_PERIODIC_TABLE_DIALOG_SEL_ELEMS": "选择多个元素", "CAPTION_TEXTBLOCK_INIT": "在此输入文字", "LEGEND_CAPTION": "图例", "LEGEND_ELEM_SYMBOL": "元素符号", "LEGEND_ELEM_NAME": "名称", "LEGEND_ATOMIC_NUM": "原子序数", "LEGEND_ATOMIC_WEIGHT": "原子量", "CAPTION_2D": "2D", "CAPTION_3D": "3D", "CAPTION_AUTOSIZE": "自动尺寸", "CAPTION_AUTOFIT": "自动缩放", "CAPTION_SHOWSIZEINFO": "显示尺寸信息", "CAPTION_LABEL_SIZE": "尺寸:", "CAPTION_BACKGROUND_COLOR": "背景颜色:", "CAPTION_WIDTH_HEIGHT": "宽:{0}、高:{1}", "PLACEHOLDER_WIDTH": "宽", "PLACEHOLDER_HEIGHT": "高", "HINT_AUTOSIZE": "图形尺寸是否由对象大小自动调整", "HINT_AUTOFIT": "对象是否填满图形区域", "S_VALUE_DEFAULT": "(缺省)" }); Kekule.Localization.addResource("zh", "ErrorMsg", { "WIDGET_CLASS_NOT_FOUND": "控件类不存在", "WIDGET_CAN_NOT_BIND_TO_ELEM": "控件{0}无法绑定到HTML元素<{1}>", "LOAD_CHEMDATA_FAILED": "载入数据失败", "FILE_API_NOT_SUPPORTED": "您当前的浏览器不支持HTML文件操作,请升级浏览器。", "DRAW_BRIDGE_NOT_SUPPORTED": "您当前的浏览器不支持该绘图功能,请升级浏览器。", "COMMAND_NOT_REVERSIBLE": "命令无法撤销", "PAGE_INDEX_OUTOF_RANGE": "页面超出范围", "FETCH_DATA_TIMEOUT": "加载数据超时", "RENDER_TYPE_CHANGE_NOT_ALLOWED": "渲染类型无法改变", "CAN_NOT_CREATE_EDITOR": "创建编辑器失败", "CAN_NOT_SET_COORD_OF_CLASS": "无法设置对象{0}实例的坐标", "CAN_NOT_SET_DIMENSION_OF_CLASS": "无法设置对象{0}实例的尺寸", "CAN_NOT_MERGE_CONNECTORS": "化学键或连接符无法合并", "NOT_A_VALID_ATOM": "无效的原子", "INVALID_ATOM_SYMBOL": "原子符号无效" });<|fim▁end|>
"HINT_MOL_DISPLAY_TYPE": "改变分子显示样式", "HINT_SKELETAL": "以键线式显示", "HINT_CONDENSED": "以缩写式显示",
<|file_name|>generate.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python """Utility to generate files to benchmark""" # Copyright Abel Sinkovics (abel@sinkovics.hu) 2016. # Distributed under the Boost Software License, Version 1.0. # (See accompanying file LICENSE_1_0.txt or copy at # http://www.boost.org/LICENSE_1_0.txt) import argparse import os import string import random import re import json import Cheetah.Template import chars def regex_to_error_msg(regex): """Format a human-readable error message from a regex""" return re.sub('([^\\\\])[()]', '\\1', regex) \ .replace('[ \t]*$', '') \ .replace('^', '') \ .replace('$', '') \ .replace('[ \t]*', ' ') \ .replace('[ \t]+', ' ') \ .replace('[0-9]+', 'X') \ \ .replace('\\[', '[') \ .replace('\\]', ']') \ .replace('\\(', '(') \ .replace('\\)', ')') \ .replace('\\.', '.') def mkdir_p(path): """mkdir -p path""" try: os.makedirs(path) except OSError: pass def in_comment(regex): """Builds a regex matching "regex" in a comment""" return '^[ \t]*//[ \t]*' + regex + '[ \t]*$' def random_chars(number): """Generate random characters""" char_map = { k: v for k, v in chars.CHARS.iteritems() if not format_character(k).startswith('\\x') } char_num = sum(char_map.values()) return ( format_character(nth_char(char_map, random.randint(0, char_num - 1))) for _ in xrange(0, number) ) def random_string(length): """Generate a random string or character list depending on the mode""" return \ 'BOOST_METAPARSE_STRING("{0}")'.format(''.join(random_chars(length))) class Mode(object): """Represents a generation mode""" def __init__(self, name): self.name = name if name == 'BOOST_METAPARSE_STRING': self.identifier = 'bmp' elif name == 'manual': self.identifier = 'man' else: raise Exception('Invalid mode: {0}'.format(name)) def description(self): """The description of the mode""" if self.identifier == 'bmp': return 'Using BOOST_METAPARSE_STRING' elif self.identifier == 'man': return 'Generating strings manually' def convert_from(self, base): """Convert a BOOST_METAPARSE_STRING mode document into one with this mode""" if self.identifier == 'bmp': return base elif self.identifier == 'man': result = [] prefix = 'BOOST_METAPARSE_STRING("' while True: bmp_at = base.find(prefix) if bmp_at == -1: return ''.join(result) + base else: result.append( base[0:bmp_at] + '::boost::metaparse::string<' ) new_base = '' was_backslash = False comma = '' for i in xrange(bmp_at + len(prefix), len(base)): if was_backslash: result.append( '{0}\'\\{1}\''.format(comma, base[i]) ) was_backslash = False comma = ',' elif base[i] == '"': new_base = base[i+2:] break elif base[i] == '\\': was_backslash = True else: result.append('{0}\'{1}\''.format(comma, base[i])) comma = ',' base = new_base result.append('>') class Template(object): """Represents a loaded template""" def __init__(self, name, content): self.name = name self.content = content def instantiate(self, value_of_n): """Instantiates the template""" template = Cheetah.Template.Template( self.content, searchList={'n': value_of_n} ) template.random_string = random_string return str(template) def range(self): """Returns the range for N""" match = self._match(in_comment( 'n[ \t]+in[ \t]*\\[([0-9]+)\\.\\.([0-9]+)\\),[ \t]+' 'step[ \t]+([0-9]+)' )) return range( int(match.group(1)), int(match.group(2)), int(match.group(3)) ) def property(self, name): """Parses and returns a property""" return self._get_line(in_comment(name + ':[ \t]*(.*)')) def modes(self): """Returns the list of generation modes""" return [Mode(s.strip()) for s in self.property('modes').split(',')] def _match(self, regex): """Find the first line matching regex and return the match object""" cregex = re.compile(regex) for line in self.content.splitlines(): match = cregex.match(line) if match: return match raise Exception('No "{0}" line in {1}.cpp'.format( regex_to_error_msg(regex), self.name )) def _get_line(self, regex): """Get a line based on a regex""" return self._match(regex).group(1) def load_file(path): """Returns the content of the file""" with open(path, 'rb') as in_file: return in_file.read() def templates_in(path): """Enumerate the templates found in path""" ext = '.cpp' return ( Template(f[0:-len(ext)], load_file(os.path.join(path, f))) for f in os.listdir(path) if f.endswith(ext) ) def nth_char(char_map, index): """Returns the nth character of a character->occurrence map""" for char in char_map: if index < char_map[char]: return char index = index - char_map[char] return None def format_character(char): """Returns the C-formatting of the character""" if \ char in string.ascii_letters \ or char in string.digits \ or char in [ '_', '.', ':', ';', ' ', '!', '?', '+', '-', '/', '=', '<', '>', '$', '(', ')', '@', '~', '`', '|', '#', '[', ']', '{', '}', '&', '*', '^', '%']: return char elif char in ['"', '\'', '\\']: return '\\{0}'.format(char) elif char == '\n': return '\\n' elif char == '\r': return '\\r' elif char == '\t': return '\\t' else: return '\\x{:02x}'.format(ord(char)) def write_file(filename, content): """Create the file with the given content""" print 'Generating {0}'.format(filename) with open(filename, 'wb') as out_f: out_f.write(content) def out_filename(template, n_val, mode): """Determine the output filename""" return '{0}_{1}_{2}.cpp'.format(template.name, n_val, mode.identifier) def main(): """The main function of the script""" desc = 'Generate files to benchmark' parser = argparse.ArgumentParser(description=desc) parser.add_argument( '--src', dest='src_dir', default='src', help='The directory containing the templates'<|fim▁hole|> default='generated', help='The output directory' ) parser.add_argument( '--seed', dest='seed', default='13', help='The random seed (to ensure consistent regeneration)' ) args = parser.parse_args() random.seed(int(args.seed)) mkdir_p(args.out_dir) for template in templates_in(args.src_dir): modes = template.modes() n_range = template.range() for n_value in n_range: base = template.instantiate(n_value) for mode in modes: write_file( os.path.join( args.out_dir, out_filename(template, n_value, mode) ), mode.convert_from(base) ) write_file( os.path.join(args.out_dir, '{0}.json'.format(template.name)), json.dumps({ 'files': { n: { m.identifier: out_filename(template, n, m) for m in modes } for n in n_range }, 'name': template.name, 'x_axis_label': template.property('x_axis_label'), 'desc': template.property('desc'), 'modes': {m.identifier: m.description() for m in modes} }) ) if __name__ == '__main__': main()<|fim▁end|>
) parser.add_argument( '--out', dest='out_dir',
<|file_name|>basepolygon.C<|end_file_name|><|fim▁begin|>// Copyright (C) 1999-2021 // Smithsonian Astrophysical Observatory, Cambridge, MA, USA // For conditions of distribution and use, see copyright notice in "copyright" #include "basepolygon.h" #include "fitsimage.h" BasePolygon::BasePolygon(Base* p, const Vector& ctr, const Vector& b) : Marker(p, ctr, 0) { } BasePolygon::BasePolygon(Base* p, const Vector& ctr, const Vector& b, const char* clr, int* dsh, int wth, const char* fnt, const char* txt, unsigned short prop, const char* cmt, const List<Tag>& tg, const List<CallBack>& cb) : Marker(p, ctr, 0, clr, dsh, wth, fnt, txt, prop, cmt, tg, cb) { } BasePolygon::BasePolygon(Base* p, const List<Vertex>& v, const char* clr, int* dsh, int wth, const char* fnt, const char* txt, unsigned short prop, const char* cmt, const List<Tag>& tg, const List<CallBack>& cb) : Marker(p, Vector(0,0), 0, clr, dsh, wth, fnt, txt, prop, cmt, tg, cb) { // Vertex list is in ref coords angle = 0; vertex = v; // find center center = Vector(0,0); vertex.head(); do center += vertex.current()->vector; while (vertex.next()); center /= vertex.count(); // vertices are relative vertex.head(); do vertex.current()->vector *= Translate(-center) * FlipY(); // no rotation while (vertex.next()); updateBBox(); } BasePolygon::BasePolygon(const BasePolygon& a) : Marker(a) { vertex = a.vertex; } void BasePolygon::createVertex(int which, const Vector& v) { // which segment (1 to n) // v is in ref coords Matrix mm = bckMatrix(); int seg = which-1; if (seg>=0 && seg<vertex.count()) { Vertex* n = new Vertex(v * mm); vertex.insert(seg,n); recalcCenter(); updateBBox(); doCallBack(CallBack::EDITCB); doCallBack(CallBack::MOVECB); // center can change } } void BasePolygon::deleteVertex(int h) { if (h>4) { int hh = h-4-1; if (vertex.count() > 3) { Vertex* v = vertex[hh]; if (v) { vertex.extractNext(v); delete v; recalcCenter(); updateBBox(); doCallBack(CallBack::EDITCB); doCallBack(CallBack::MOVECB); // center can change } } } } void BasePolygon::edit(const Vector& v, int h) {<|fim▁hole|> if (s1[0] != 0 && s1[1] != 0 && s2[0] != 0 && s2[1] != 0) { double a = fabs(s1[0]/s2[0]); double b = fabs(s1[1]/s2[1]); double s = a > b ? a : b; vertex.head(); do vertex.current()->vector *= Scale(s); while (vertex.next()); } updateBBox(); doCallBack(CallBack::EDITCB); } else { moveVertex(v,h); updateBBox(); doCallBack(CallBack::EDITCB); doCallBack(CallBack::MOVECB); // center can change } } void BasePolygon::moveVertex(const Vector& v, int h) { Matrix mm = bckMatrix(); if (vertex[h-5]) vertex.current()->vector = v * mm; recalcCenter(); } void BasePolygon::recalcCenter() { // recalculate center Vector nc; vertex.head(); do nc += vertex.current()->vector * Rotate(angle) * FlipY(); while (vertex.next()); nc /= vertex.count(); center += nc; // update all vertices vertex.head(); do vertex.current()->vector -= nc * FlipY() * Rotate(-angle); while (vertex.next()); } void BasePolygon::rotate(const Vector& v, int h) { if (h < 5) Marker::rotate(v,h); else { // we need to check this here, because we are really rotating if (canEdit()) { moveVertex(v,h); updateBBox(); doCallBack(CallBack::EDITCB); doCallBack(CallBack::MOVECB); // center can change } } } void BasePolygon::updateHandles() { // generate handles numHandle = 4 + vertex.count(); if (handle) delete [] handle; handle = new Vector[numHandle]; // the first four are our control handles BBox bb; vertex.head(); do bb.bound(vertex.current()->vector); while (vertex.next()); Vector zz = parent->zoom(); float r = 10/zz.length(); bb.expand(r); // give us more room handle[0] = fwdMap(bb.ll,Coord::CANVAS); handle[1] = fwdMap(bb.lr(),Coord::CANVAS); handle[2] = fwdMap(bb.ur,Coord::CANVAS); handle[3] = fwdMap(bb.ul(),Coord::CANVAS); // and the rest are vertices int i=4; vertex.head(); do handle[i++] = fwdMap(vertex.current()->vector,Coord::CANVAS); while (vertex.next()); } void BasePolygon::updateCoords(const Matrix& mx) { Scale s(mx); vertex.head(); do vertex.current()->vector *= s; while (vertex.next()); Marker::updateCoords(mx); } void BasePolygon::listBase(FitsImage* ptr, ostream& str, Coord::CoordSystem sys, Coord::SkyFrame sky, Coord::SkyFormat format) { Matrix mm = fwdMatrix(); str << type_ << '('; int first=1; vertex.head(); do { if (!first) str << ','; first=0; ptr->listFromRef(str,vertex.current()->vector*mm,sys,sky,format); } while (vertex.next()); str << ')'; }<|fim▁end|>
if (h < 5) { Vector s1 = v * bckMatrix(); Vector s2 = bckMap(handle[h-1],Coord::CANVAS);
<|file_name|>Install.py<|end_file_name|><|fim▁begin|># Install.py -- File system installation commands # Copyright (C) 2007-2013 CEA # # This file is part of shine # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # from __future__ import print_function import sys from Shine.Configuration.Globals import Globals from Shine.FSUtils import create_lustrefs from Shine.Lustre.FileSystem import FSRemoteError from Shine.Commands.Base.Command import Command, CommandHelpException from Shine.Commands.Base.CommandRCDefs import RC_OK, RC_FAILURE # Lustre events from Shine.Commands.Base.FSEventHandler import FSGlobalEventHandler <|fim▁hole|> """ shine install -m /path/to/model.lmf """ NAME = "install" DESCRIPTION = "Install a new file system." def execute(self): # Option sanity check self.forbidden(self.options.fsnames, "-f, see -m") self.forbidden(self.options.labels, "-l") self.forbidden(self.options.indexes, "-i") self.forbidden(self.options.failover, "-F") rc = RC_OK if not self.options.model: raise CommandHelpException("Lustre model file path" "(-m <model_file>) argument required.", self) eh = FSGlobalEventHandler(self) # Use this Shine.FSUtils convenience function. lmf = self.get_lmf_path() if lmf: print("Using Lustre model file %s" % lmf) else: raise CommandHelpException("Lustre model file for ``%s'' not found:" " please use filename or full LMF path.\n" "Your default model files directory (lmf_dir) is: %s" % (self.options.model, Globals().get_lmf_dir()), self) install_nodes = self.options.nodes excluded_nodes = self.options.excludes fs_conf, fs = create_lustrefs(self.get_lmf_path(), event_handler=eh, nodes=install_nodes, excluded=excluded_nodes) # Register the filesystem in backend print("Registering FS %s to backend..." % fs.fs_name) if self.options.dryrun: rc = 0 else: rc = self.register_fs(fs_conf) if rc: msg = "Error: failed to register FS to backend (rc=%d)" % rc print(msg, file=sys.stderr) else: print("Filesystem %s registered." % fs.fs_name) # Helper message. # If user specified nodes which were not used, warn him about it. actual_nodes = fs.components.managed().servers() if not self.check_valid_list(fs_conf.get_fs_name(), \ actual_nodes, "install"): return RC_FAILURE # Install file system configuration files; normally, this should # not be done by the Shine.Lustre.FileSystem object itself, but as # all proxy methods are currently handled by it, it is more # convenient this way... try: fs.install(fs_conf.get_cfg_filename(), dryrun=self.options.dryrun) tuning_conf = Globals().get_tuning_file() if tuning_conf: fs.install(tuning_conf, dryrun=self.options.dryrun) except FSRemoteError as error: print("WARNING: Due to error, installation skipped on %s" % error.nodes) rc = RC_FAILURE if not install_nodes and not excluded_nodes: # Give pointer to next user step. print("Use `shine format -f %s' to initialize the file system." % fs_conf.get_fs_name()) return rc def register_fs(self, fs_conf): # register file system configuration to the backend fs_conf.register_fs() fs_conf.register_targets()<|fim▁end|>
class Install(Command):
<|file_name|>DsReplicaObjMetaData2Ctr.py<|end_file_name|><|fim▁begin|># encoding: utf-8 # module samba.dcerpc.drsuapi # from /usr/lib/python2.7/dist-packages/samba/dcerpc/drsuapi.so # by generator 1.135 """ drsuapi DCE/RPC """ # imports import dcerpc as __dcerpc import talloc as __talloc<|fim▁hole|>class DsReplicaObjMetaData2Ctr(__talloc.Object): # no doc def __init__(self, *args, **kwargs): # real signature unknown pass @staticmethod # known case of __new__ def __new__(S, *more): # real signature unknown; restored from __doc__ """ T.__new__(S, ...) -> a new object with type S, a subtype of T """ pass array = property(lambda self: object(), lambda self, v: None, lambda self: None) # default count = property(lambda self: object(), lambda self, v: None, lambda self: None) # default enumeration_context = property(lambda self: object(), lambda self, v: None, lambda self: None) # default<|fim▁end|>
<|file_name|>test_book.py<|end_file_name|><|fim▁begin|>from quanthistling.tests import * class TestBookController(TestController): def test_index(self): response = self.app.get(url(controller='book', action='index'))<|fim▁hole|><|fim▁end|>
# Test response...
<|file_name|>test.js<|end_file_name|><|fim▁begin|>'use strict' const { describe, it, beforeEach, afterEach } = require('mocha') const Helper = require('hubot-test-helper') const { expect } = require('chai') const mock = require('mock-require') const http = require('http') const sleep = m => new Promise(resolve => setTimeout(() => resolve(), m)) const request = uri => { return new Promise((resolve, reject) => { http .get(uri, res => { const result = { statusCode: res.statusCode } if (res.statusCode !== 200) { resolve(result) } else { res.setEncoding('utf8') let rawData = '' res.on('data', chunk => { rawData += chunk }) res.on('end', () => { result.body = rawData resolve(result) }) } }) .on('error', err => reject(err)) }) } const infoRutStub = { getPersonByRut (rut) { return new Promise((resolve, reject) => { if (rut === '11111111-1') { return resolve({ name: 'Anonymous', rut }) } else if (rut === '77777777-7') { return resolve({ name: 'Sushi', rut }) } else if (rut === '22222222-2') { return resolve(null) } reject(new Error('Not found')) }) }, getEnterpriseByRut (rut) { return new Promise((resolve, reject) => { if (rut === '11111111-1') { return resolve({ name: 'Anonymous', rut }) } else if (rut === '77777777-7') { return resolve({ name: 'Sushi', rut }) } else if (rut === '22222222-2') { return resolve(null) } reject(new Error('Not found')) }) }, getPersonByName (name) { return new Promise((resolve, reject) => { if (name === 'juan perez perez') { return resolve([ { rut: '11.111.111-1', name: 'Anonymous' }, { rut: '11.111.111-1', name: 'Anonymous' }, { rut: '11.111.111-1', name: 'Anonymous' }, { rut: '11.111.111-1', name: 'Anonymous' }, { rut: '11.111.111-1', name: 'Anonymous' }, { rut: '11.111.111-1', name: 'Anonymous' } ]) } else if (name === 'soto') { return resolve([ { rut: '11.111.111-1', name: 'Anonymous' }, { rut: '11.111.111-1', name: 'Anonymous' }, { rut: '11.111.111-1', name: 'Anonymous' }, { rut: '11.111.111-1', name: 'Anonymous' }, { rut: '11.111.111-1', name: 'Anonymous' } ]) } else if (name === 'info-rut') { return resolve([]) } reject(new Error('Not found')) }) }, getEnterpriseByName (name) { return new Promise((resolve, reject) => { if (name === 'perez') { return resolve([{ rut: '11.111.111-1', name: 'Anonymous' }]) } else if (name === 'info-rut') { return resolve([]) } reject(new Error('Not found')) }) } } mock('info-rut', infoRutStub) const helper = new Helper('./../src/index.js') describe('info rut', function () { beforeEach(() => { this.room = helper.createRoom() }) afterEach(() => this.room.destroy()) describe('person rut valid', () => { const rut = '11111111-1' beforeEach(async () => { this.room.user.say('user', `hubot info-rut rut ${rut}`) await sleep(1000) }) it('should return a full name', () => { expect(this.room.messages).to.eql([ ['user', `hubot info-rut rut ${rut}`], ['hubot', `Anonymous (${rut})`] ]) }) }) describe('enterprise rut valid', () => { const rut = '77777777-7' beforeEach(async () => { this.room.user.say('user', `hubot info-rut rut ${rut}`) await sleep(1000) }) it('should return a full name', () => { expect(this.room.messages).to.eql([ ['user', `hubot info-rut rut ${rut}`], ['hubot', `Sushi (${rut})`] ]) }) }) describe('rut invalid', () => { const rut = '22222222-2' beforeEach(async () => { this.room.user.say('user', `hubot info-rut rut ${rut}`) await sleep(1000) }) it('should return a error', () => { expect(this.room.messages).to.eql([ ['user', `hubot info-rut rut ${rut}`], ['hubot', '@user rut sin resultados'] ]) }) }) describe('rut error', () => { const rut = '1' beforeEach(async () => { this.room.user.say('user', `hubot info-rut rut ${rut}`) await sleep(1000) }) it('should return a error', () => { expect(this.room.messages).to.eql([ ['user', `hubot info-rut rut ${rut}`], ['hubot', '@user ocurrio un error al consultar el rut'] ]) }) }) describe('name valid', () => { const name = 'juan perez perez' beforeEach(async () => { this.room.user.say('user', `hubot info-rut persona ${name}`) await sleep(1000) }) it('should return a array of results with link', () => { expect(this.room.messages).to.eql([ ['user', `hubot info-rut persona ${name}`], [ 'hubot', 'Anonymous (11.111.111-1)\n' + 'Anonymous (11.111.111-1)\n' + 'Anonymous (11.111.111-1)\n' + 'Anonymous (11.111.111-1)\n' + 'Anonymous (11.111.111-1)\n' + 'Más resultados en ' + 'http://localhost:8080/info-rut?name=juan%20perez%20perez&' + 'type=persona' ] ]) }) }) describe('name valid', () => { const name = 'soto' beforeEach(async () => { this.room.user.say('user', `hubot info-rut persona ${name}`) await sleep(500) }) it('should return a array of results', () => { expect(this.room.messages).to.eql([ ['user', `hubot info-rut persona ${name}`], [ 'hubot', 'Anonymous (11.111.111-1)\n' + 'Anonymous (11.111.111-1)\n' + 'Anonymous (11.111.111-1)\n' + 'Anonymous (11.111.111-1)\n' + 'Anonymous (11.111.111-1)' ] ]) }) }) describe('name without results', () => { const name = 'info-rut' beforeEach(async () => { this.room.user.say('user', `hubot info-rut empresa ${name}`) await sleep(500) }) it('should return a empty results', () => { expect(this.room.messages).to.eql([ ['user', `hubot info-rut empresa ${name}`], ['hubot', `@user no hay resultados para ${name}`] ]) }) }) describe('name invalid', () => { const name = 'asdf' beforeEach(async () => { this.room.user.say('user', `hubot info-rut persona ${name}`) await sleep(500) }) it('should return a empty results', () => { expect(this.room.messages).to.eql([ ['user', `hubot info-rut persona ${name}`], ['hubot', '@user ocurrio un error al consultar el nombre'] ]) }) }) describe('GET /info-rut?name=perez&type=persona', () => { beforeEach(async () => { this.response = await request( 'http://localhost:8080/info-rut?name=juan%20perez%20perez&type=persona' ) }) it('responds with status 200 and results', () => { expect(this.response.statusCode).to.equal(200) expect(this.response.body).to.equal( 'Anonymous (11.111.111-1)<br/>' + 'Anonymous (11.111.111-1)<br/>' + 'Anonymous (11.111.111-1)<br/>' + 'Anonymous (11.111.111-1)<br/>' + 'Anonymous (11.111.111-1)<br/>' + 'Anonymous (11.111.111-1)' ) }) }) describe('GET /info-rut?name=perez&type=empresa', () => { beforeEach(async () => { this.response = await request(<|fim▁hole|> ) }) it('responds with status 200 and results', () => { expect(this.response.statusCode).to.equal(200) expect(this.response.body).to.equal('Anonymous (11.111.111-1)') }) }) describe('GET /info-rut?name=info-rut&type=persona', () => { beforeEach(async () => { this.response = await request( 'http://localhost:8080/info-rut?name=info-rut&type=persona' ) }) it('responds with status 200 and not results', () => { expect(this.response.statusCode).to.equal(200) expect(this.response.body).to.equal('no hay resultados para info-rut') }) }) describe('GET /info-rut', () => { beforeEach(async () => { this.response = await request('http://localhost:8080/info-rut') }) it('responds with status 200 and not results', () => { expect(this.response.statusCode).to.equal(200) expect(this.response.body).to.equal('faltan los parametros type y name') }) }) describe('GET /info-rut?name=asdf&type=persona', () => { beforeEach(async () => { this.response = await request( 'http://localhost:8080/info-rut?name=asdf&type=persona' ) }) it('responds with status 200 and not results', () => { expect(this.response.statusCode).to.equal(200) expect(this.response.body).to.equal( 'Ocurrio un error al consultar el nombre' ) }) }) })<|fim▁end|>
'http://localhost:8080/info-rut?name=perez&type=empresa'
<|file_name|>napalm_get_facts.py<|end_file_name|><|fim▁begin|>""" (c) 2020 Kirk Byers <ktbyers@twb-tech.com> (c) 2016 Elisa Jasinska <elisa@bigwaveit.org> This file is part of Ansible Ansible is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Ansible is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Ansible. If not, see <http://www.gnu.org/licenses/>. """ from __future__ import unicode_literals, print_function from ansible.module_utils.basic import AnsibleModule # FIX for Ansible 2.8 moving this function and making it private # greatly simplified for napalm-ansible's use def return_values(obj): """Return native stringified values from datastructures. For use with removing sensitive values pre-jsonification.""" yield str(obj) DOCUMENTATION = """ --- module: napalm_get_facts author: "Elisa Jasinska (@fooelisa)" version_added: "2.1" short_description: "Gathers facts from a network device via napalm" description: - "Gathers facts from a network device via the Python module napalm" requirements: - napalm options: hostname: description: - IP or FQDN of the device you want to connect to required: False username: description: - Username required: False password: description: - Password required: False dev_os: description: - OS of the device required: False provider: description: - Dictionary which acts as a collection of arguments used to define the characteristics of how to connect to the device. Note - hostname, username, password and dev_os must be defined in either provider or local param Note - local param takes precedence, e.g. hostname is preferred to provider['hostname'] required: False timeout: description: - Time in seconds to wait for the device to respond required: False default: 60 optional_args: description: - Dictionary of additional arguments passed to underlying driver required: False default: None ignore_notimplemented: description: - "Ignores NotImplementedError for filters which aren't supported by the driver. Returns invalid filters in a list called: not_implemented" required: False default: False choices: [True, False] filter: description: - "A list of facts to retreive from a device and provided though C(ansible_facts) The list of facts available are maintained at: http://napalm.readthedocs.io/en/latest/support/ Note- not all getters are implemented on all supported device types" required: False default: ['facts'] args: description: - dictionary of kwargs arguments to pass to the filter. The outer key is the name of the getter (same as the filter) required: False default: None """ EXAMPLES = """ - name: get facts from device napalm_get_facts: hostname: '{{ inventory_hostname }}' username: '{{ user }}' dev_os: '{{ os }}' password: '{{ passwd }}' filter: ['facts'] register: result - name: print data debug: var: result - name: Getters napalm_get_facts: provider: "{{ ios_provider }}" filter: - "lldp_neighbors_detail" - "interfaces" - name: get facts from device napalm_get_facts: hostname: "{{ host }}" username: "{{ user }}" dev_os: "{{ os }}" password: "{{ password }}" optional_args: port: "{{ port }}" filter: ['facts', 'route_to', 'interfaces'] args: route_to: protocol: static destination: 8.8.8.8 """ RETURN = """ changed: description: "whether the command has been executed on the device" returned: always type: bool sample: True ansible_facts: description: "Facts gathered on the device provided via C(ansible_facts)" returned: certain keys are returned depending on filter type: dict """ napalm_found = False try: from napalm import get_network_driver from napalm.base import ModuleImportError napalm_found = True except ImportError: pass def main(): module = AnsibleModule( argument_spec=dict( hostname=dict(type="str", required=False, aliases=["host"]), username=dict(type="str", required=False), password=dict(type="str", required=False, no_log=True), provider=dict(type="dict", required=False), dev_os=dict(type="str", required=False), timeout=dict(type="int", required=False, default=60), ignore_notimplemented=dict(type="bool", required=False, default=False), args=dict(type="dict", required=False, default=None), optional_args=dict(type="dict", required=False, default=None), filter=dict(type="list", required=False, default=["facts"]), ), supports_check_mode=True, ) if not napalm_found: module.fail_json(msg="the python module napalm is required") provider = module.params["provider"] or {} no_log = ["password", "secret"] for param in no_log: if provider.get(param): module.no_log_values.update(return_values(provider[param])) if provider.get("optional_args") and provider["optional_args"].get(param): module.no_log_values.update( return_values(provider["optional_args"].get(param)) ) if module.params.get("optional_args") and module.params["optional_args"].get( param ): module.no_log_values.update( return_values(module.params["optional_args"].get(param)) ) # allow host or hostname provider["hostname"] = provider.get("hostname", None) or provider.get("host", None) # allow local params to override provider for param, pvalue in provider.items(): if module.params.get(param) is not False: module.params[param] = module.params.get(param) or pvalue hostname = module.params["hostname"] username = module.params["username"] dev_os = module.params["dev_os"] password = module.params["password"] timeout = module.params["timeout"] filter_list = module.params["filter"] args = module.params["args"] or {} ignore_notimplemented = module.params["ignore_notimplemented"] implementation_errors = [] argument_check = {"hostname": hostname, "username": username, "dev_os": dev_os} for key, val in argument_check.items(): if val is None: module.fail_json(msg=str(key) + " is required") if module.params["optional_args"] is None: optional_args = {} else: optional_args = module.params["optional_args"] try: network_driver = get_network_driver(dev_os) except ModuleImportError as e: module.fail_json(msg="Failed to import napalm driver: " + str(e)) try: device = network_driver( hostname=hostname, username=username, password=password, timeout=timeout, optional_args=optional_args, ) device.open() except Exception as e: module.fail_json(msg="cannot connect to device: " + str(e)) # retreive data from device facts = {} NAPALM_GETTERS = [ getter for getter in dir(network_driver) if getter.startswith("get_") ] # Allow NX-OS checkpoint file to be retrieved via Ansible for use with replace config NAPALM_GETTERS.append("get_checkpoint_file") <|fim▁hole|> if getter_function not in NAPALM_GETTERS: module.fail_json(msg="filter not recognized: " + getter) try: if getter_function == "get_checkpoint_file": getter_function = "_get_checkpoint_file" get_func = getattr(device, getter_function) result = get_func(**args.get(getter, {})) facts[getter] = result except NotImplementedError: if ignore_notimplemented: implementation_errors.append(getter) else: module.fail_json( msg="The filter {} is not supported in napalm-{} [get_{}()]".format( getter, dev_os, getter ) ) except Exception as e: module.fail_json( msg="[{}] cannot retrieve device data: ".format(getter) + str(e) ) # close device connection try: device.close() except Exception as e: module.fail_json(msg="cannot close device connection: " + str(e)) new_facts = {} # Prepend all facts with napalm_ for unique namespace for filter_name, filter_value in facts.items(): # Make napalm get_facts to be directly accessible as variables if filter_name == "facts": for fact_name, fact_value in filter_value.items(): napalm_fact_name = "napalm_" + fact_name new_facts[napalm_fact_name] = fact_value new_filter_name = "napalm_" + filter_name new_facts[new_filter_name] = filter_value results = {"ansible_facts": new_facts} if ignore_notimplemented: results["not_implemented"] = sorted(implementation_errors) module.exit_json(**results) if __name__ == "__main__": main()<|fim▁end|>
for getter in filter_list: getter_function = "get_{}".format(getter)
<|file_name|>01c_quick_example.rs<|end_file_name|><|fim▁begin|>#[macro_use] extern crate clap; fn main() { // This example shows how to create an application with several arguments using macro builder. // It combines the simplicity of the from_usage methods and the performance of the Builder Pattern. // // The example below is functionally identical to the one in 01a_quick_example.rs and 01b_quick_example.rs // // Create an application with 5 possible arguments (2 auto generated) and 2 subcommands (1 auto generated) // - A config file // + Uses "-c filename" or "--config filename" // - An output file // + A positional argument (i.e. "$ myapp output_filename") // - A debug flag // + Uses "-d" or "--debug" // + Allows multiple occurrences of such as "-dd" (for vary levels of debugging, as an example) // - A help flag (automatically generated by clap) // + Uses "-h" or "--help" (Only autogenerated if you do NOT specify your own "-h" or "--help")<|fim▁hole|> // > A list flag // = Uses "-l" (usage is "$ myapp test -l" // > A help flag (automatically generated by clap // = Uses "-h" or "--help" (full usage "$ myapp test -h" or "$ myapp test --help") // > A version flag (automatically generated by clap // = Uses "-V" or "--version" (full usage "$ myapp test -V" or "$ myapp test --version") // - A subcommand "help" (automatically generated by clap because we specified a subcommand of our own) // + Used by "$ myapp help" (same functionality as "-h" or "--help") let matches = clap_app!(myapp => (version: "1.0") (author: "Kevin K. <kbknapp@gmail.com>") (about: "Does awesome things") (@arg CONFIG: -c --config +takes_value "Sets a custom config file") (@arg INPUT: +required "Sets the input file to use") (@arg debug: -d ... "Sets the level of debugging information") (@subcommand test => (about: "controls testing features") (version: "1.3") (author: "Someone E. <someone_else@other.com>") (@arg verbose: -v --verbose "Print test information verbosely") ) ).get_matches(); // Calling .unwrap() is safe here because "INPUT" is required (if "INPUT" wasn't // required we could have used an 'if let' to conditionally get the value) println!("Using input file: {}", matches.value_of("INPUT").unwrap()); // Gets a value for config if supplied by user, or defaults to "default.conf" let config = matches.value_of("CONFIG").unwrap_or("default.conf"); println!("Value for config: {}", config); // Vary the output based on how many times the user used the "debug" flag // (i.e. 'myapp -d -d -d' or 'myapp -ddd' vs 'myapp -d' match matches.occurrences_of("debug") { 0 => println!("Debug mode is off"), 1 => println!("Debug mode is kind of on"), 2 => println!("Debug mode is on"), 3 | _ => println!("Don't be crazy"), } // You can information about subcommands by requesting their matches by name // (as below), requesting just the name used, or both at the same time if let Some(matches) = matches.subcommand_matches("test") { if matches.is_present("verbose") { println!("Printing verbosely..."); } else { println!("Printing normally..."); } } // more program logic goes here... }<|fim▁end|>
// - A version flag (automatically generated by clap) // + Uses "-V" or "--version" (Only autogenerated if you do NOT specify your own "-V" or "--version") // - A subcommand "test" (subcommands behave like their own apps, with their own arguments // + Used by "$ myapp test" with the following arguments
<|file_name|>instr_mulps.rs<|end_file_name|><|fim▁begin|>use ::{BroadcastMode, Instruction, MaskReg, MergeMode, Mnemonic, OperandSize, Reg, RoundingMode};<|fim▁hole|>use ::Reg::*; use ::RegScale::*; use ::test::run_test; #[test] fn mulps_1() { run_test(&Instruction { mnemonic: Mnemonic::MULPS, operand1: Some(Direct(XMM3)), operand2: Some(Direct(XMM7)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[15, 89, 223], OperandSize::Dword) } #[test] fn mulps_2() { run_test(&Instruction { mnemonic: Mnemonic::MULPS, operand1: Some(Direct(XMM6)), operand2: Some(Indirect(EBX, Some(OperandSize::Xmmword), None)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[15, 89, 51], OperandSize::Dword) } #[test] fn mulps_3() { run_test(&Instruction { mnemonic: Mnemonic::MULPS, operand1: Some(Direct(XMM3)), operand2: Some(Direct(XMM1)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[15, 89, 217], OperandSize::Qword) } #[test] fn mulps_4() { run_test(&Instruction { mnemonic: Mnemonic::MULPS, operand1: Some(Direct(XMM1)), operand2: Some(IndirectDisplaced(RCX, 674850177, Some(OperandSize::Xmmword), None)), operand3: None, operand4: None, lock: false, rounding_mode: None, merge_mode: None, sae: false, mask: None, broadcast: None }, &[15, 89, 137, 129, 101, 57, 40], OperandSize::Qword) }<|fim▁end|>
use ::RegType::*; use ::instruction_def::*; use ::Operand::*;
<|file_name|>3160-spec.ts<|end_file_name|><|fim▁begin|>import { Chart } from '../../src'; import { createDiv, removeDom } from '../util/dom'; describe('#3160', () => { const div = createDiv(); const chart = new Chart({ container: div, // 指定图表容器 ID height: 300, // 指定图表高度 autoFit: true, }); chart.coordinate('theta'); const data = [ { type: '1', item: 1, value: 0.1 }, { type: '2', item: 4, value: 0.4 }, { type: '1.3', item: 2, value: 0.2 }, { type: '2.5', item: 3, value: 0.3 }, ]; chart.data(data); chart .interval() .position('value') .color('type') .label('type', { layout: { type: 'pie-spider' } }) .adjust('stack'); chart.render(); it('mapping color to linear scale, label render normal', () => { const labels = chart.geometries[0].labelsContainer.getChildren(); expect(labels.length).toBe(4); }); it('spider label, render normal', () => { chart.clear(); chart .interval() .position('value')<|fim▁hole|> chart.render(); const labels = chart.geometries[0].labelsContainer.getChildren(); const label1 = labels.find((l) => l.get('id') === `1-${data[0].type}`); const label2 = labels.find((l) => l.get('id') === `1-${data[1].type}`); const label3 = labels.find((l) => l.get('id') === `1-${data[2].type}`); const label4 = labels.find((l) => l.get('id') === `1-${data[3].type}`); // @ts-ignore expect(label1.getChildren()[0].getCanvasBBox().minX).toEqual(label2.getChildren()[0].getCanvasBBox().minX); // @ts-ignore expect(label3.getChildren()[0].getCanvasBBox().maxX).toEqual(label4.getChildren()[0].getCanvasBBox().maxX); }); afterAll(() => { chart.destroy(); removeDom(div); }); });<|fim▁end|>
.color('type') .label('type', { layout: { type: 'pie-spider' } }) .adjust('stack');
<|file_name|>base.py<|end_file_name|><|fim▁begin|>"""The base command.""" from datetime import datetime from json import dumps from watches.util import ESClientProducer class Base(object): """A base command.""" TEXT_PLAIN = 'plain/text' JSON_APPLICATION = 'application/json' TRANSFORM_PARAM = '--transform' TIMESTAMP_PARAM = '--timestamp' TRANSFORM_VALUE_NESTED = 'nested' TIMESTAMP_KEY = 'timestamp' _ALL_KEYWORD = '_all' _ALL_INDICES_PLACEHOLDER = 'indices_summary' def __init__(self, options, *args, **kwargs): self.options = options self.args = args self.kwargs = kwargs if self.options["--verbose"]: print('Supplied options:', dumps(self.options, indent=2, sort_keys=True)) self.es = ESClientProducer.create_client(self.options) def run(self): # Not sure if this is the best way to convert localtime to UTC in ISO 8601 format ts = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ') data = self.getData() # Treat JSON_APPLICATION response differently than TEXT_PLAIN # JSON data can be injected timestamp and formatted if self.JSON_APPLICATION == self.getResponseContentType(): if self.options[self.TIMESTAMP_PARAM]: data[self.TIMESTAMP_KEY] = ts if self.options[self.TRANSFORM_PARAM]: data = self.transformData(data) self.printData(data) def printData(self, data): """Print the data to the output. Depending on content type the data can be formatted differently. Commands can also override this method is special treatment is needed, for example "just_*" commands. """ if self.JSON_APPLICATION == self.getResponseContentType(): if self.options["-l"]: print(dumps(data, default=lambda x: str(x))) else: print(dumps(data, indent=2, sort_keys=False, default=lambda x: str(x))) else: print(data) def getData(self): raise NotImplementedError('Method getData() not implemented') def getResponseContentType(self): """Response MIME type. By default we assume JSON, make sure to override if needed.""" return self.JSON_APPLICATION def transformData(self, data): """ Data can be transformed before sending to client. Currently, the only transformation type implemented is 'nested'. :param data: :return: """ transform = self.options[self.TRANSFORM_PARAM] if transform: if transform == self.TRANSFORM_VALUE_NESTED: return self.transformNestedData(data) else: raise RuntimeError('Unsupported transform type') else: return data def transformNestedData(self, data): """ If subclass supports 'nested' transformation then it needs to implement this method and it can use and override provided helper methods. By default the data is returned unchanged. :param data: :return: """ return data def nestedNodes(self, nodes): """ Helper method to transform nodes object. Subclass can override this if the default behaviour does not apply. :param nodes: :return: """ if isinstance(nodes, dict): nodesArray = [] for key in nodes: n = nodes[key] n['node'] = key nodesArray.append(n) return nodesArray return nodes def nestedNodesShardsArray(self, nodes): """ Helper method to transform nodes shards array. Subclass can override this if the default behaviour does not apply. :param nodes:<|fim▁hole|> if isinstance(nodes, dict): shardsArray = [] for node in nodes: if isinstance(nodes[node], list): for shard in nodes[node]: # shard['node'] = node # node value ^^ is already there in the dict shardsArray.append(shard) else: raise RuntimeError('shards not in expected format') else: raise RuntimeError('shards not in expected format') return shardsArray def nestedIndices(self, indices): """ Helper method to transform indices object. Subclass can override this if the default behaviour does not apply. :param indices: :return: """ if isinstance(indices, dict): indicesArray = [] for key in indices: i = indices[key] i['index'] = key indicesArray.append(i) return indicesArray else: return indices def nestedShards(self, shards): """ Helper method to transform shards object. Subclass can override this if the default behaviour does not apply. :param shards: :return: """ if isinstance(shards, dict): shardsArray = [] for key in shards: s = shards[key] # convert shard id to number (this is how other admin REST APIs represent it) s['shard'] = int(key) shardsArray.append(s) return shardsArray else: return shards def nestedShardsArray(self, shards): """ Helper method to transform shards array. This is useful in case REST API returns shards data in an array. :param shards: :return: """ shardsArray = [] if isinstance(shards, dict): for key in shards: if isinstance(shards[key], list): for shard in shards[key]: shard['shard'] = int(key) shardsArray.append(shard) else: raise RuntimeError('shards not in expected format') else: raise RuntimeError('shards not in expected format') return shardsArray def nestedIndicesAndShards(self, indices): """ Helper method to transform indices and shards. This method is designed for cases where index contains 'shards' key as the top level field. :param indices: :return: """ indices = self.nestedIndices(indices) for index in indices: if isinstance(index, dict): if 'shards' in index: index['shards'] = self.nestedShards(index['shards']) return indices def check_filter_path(self, args): if self.options['--filter_path'] and self.options["--filter_path"] is not None and len(self.options["--filter_path"]) > 0: args.update({ 'filter_path': self.options['--filter_path'] })<|fim▁end|>
:return: """
<|file_name|>logs.js<|end_file_name|><|fim▁begin|>var util = require( '../../utils/util.js' ) Page( { data: { projects: [ { name: 'FinalScheduler(终极排班系统)', git: "https://github.com/giscafer/FinalScheduler" }, { name: 'MoveSite(电影狙击手)', git: "https://github.com/giscafer/moviesite" }, { name: 'Ponitor(价格监控)', git: "https://github.com/giscafer/Ponitor" }, { name: 'hexo-theme-cafe(Hexo博客主题)', git: "https://github.com/giscafer/hexo-theme-cafe" }, { name: 'ife-course-demo(百度前端学院)', git: "https://github.com/giscafer/ife-course-demo" } ] }, onReady: function() { this.clickName(); }, clickName: function( e ) { var pros = this.data.projects; console.log( "#########################################################################################################" )<|fim▁hole|> console.log( "##-----------------------------------------------------------------------------------------------------##" ) pros.forEach( function( item, index ) { console.log( "## ", item.name + ":" + item.git ) }) console.log( "## ##" ) console.log( "#########################################################################################################" ) } })<|fim▁end|>
console.log( "## 其他项目 ##" )
<|file_name|>test-garbage.js<|end_file_name|><|fim▁begin|>const times = require('lodash/times'); const {GameEngine} = require('../../lib/common/engine'); const {R, G, B, _} = require('../../lib/common/panel-league/util'); module.exports.testAdd = ((test) => { const game = new GameEngine(); game.addEvent({ time: 0, type: 'addGarbage', slab: {x: 0, width: game.width, height: 1} }); game.step(); const state = game.step(); test.expect(1); test.strictEqual(state.garbage.length, 1, "No garbage slab found"); test.done(); }); module.exports.testFit = ((test) => { const game = new GameEngine({initialRows: 0, width: 6}); game.addEvent({ time: 0, type: 'addGarbage', slab: {x: 0, width: 2, height: 1} }); game.addEvent({ time: 1, type: 'addGarbage', slab: {x: 2, width: 2, height: 2} }); times(100, () => game.step()); state = game.step(); test.expect(6); for (let i = 0; i < 4; ++i) { test.ok(state.blocks[i + state.width * (state.height - 1)].color, "Empty block found"); } for (let i = 4; i < 6; ++i) { test.ok(!state.blocks[i + state.width * (state.height - 1)].color, "Non-empty block found"); } test.done(); }); module.exports.testOverhang = ((test) => { const game = new GameEngine({initialRows: 0}); game.addEvent({ time: 0, type: 'addGarbage', slab: {x: 0, width: 1, height: 2} }); game.addEvent({ time: 1, type: 'addGarbage', slab: {x: 0, width: game.width, height: 2} }); times(100, () => game.step()); state = game.step(); test.expect(2 * game.width - 1); for (let i = 1; i < game.width; ++i) { test.ok(!state.blocks[i + state.width * (state.height - 1)].color, "Non-empty block found"); } for (let i = 0; i < game.width; ++i) { test.ok(state.blocks[i + state.width * (state.height - 3)].color, "Empty block found"); } test.done(); }); module.exports.testShock = ((test) => { setup = [ _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, _, G, R, G, G, B, B, ]; const game = new GameEngine({width: 6, height: 5, colors: setup}); game.addEvent({ time: 0, type: 'addGarbage',<|fim▁hole|> slab: {x: 0, width: game.width, height: 2} }); game.addEvent({ time: 1, type: 'addGarbage', slab: {x: 0, width: game.width, height: 2} }); game.addEvent({ time: 101, type: 'swap', index: 24 }); test.expect(2 + 2 * game.width); times(99, () => game.step()); let state = game.step(); let numGarbage = state.blocks.reduce((sum, block) => sum + block.garbage, 0); test.strictEqual(numGarbage, game.width * 4, 'Not enough garbage found'); times(99, () => game.step()); state = game.step(); numGarbage = state.blocks.reduce((sum, block) => sum + block.garbage, 0); test.strictEqual(numGarbage, game.width * 2, 'Garbage not released correctly'); // We don't control RNG here so prepare for rare matches. const shift = game.width * !state.blocks[0].color; for (let i = 0; i < game.width; ++i) { test.ok(state.blocks[i + shift].garbage, "Garbage not preserved"); test.ok(!state.blocks[i + game.width + shift].garbage, "Garbage not released"); } test.done(); });<|fim▁end|>
<|file_name|>baseAnimationNode.py<|end_file_name|><|fim▁begin|>from RGT.XML.SVG.basicSvgNode import BasicSvgNode from RGT.XML.SVG.Attribs.conditionalProcessingAttributes import ConditionalProcessingAttributes from RGT.XML.SVG.Attribs.xlinkAttributes import XlinkAttributes from RGT.XML.SVG.Attribs.animationTimingAttributes import AnimationTimingAttributes class BaseAnimationNode(BasicSvgNode): <|fim▁hole|> def __init__(self, ownerDoc, tagName): BasicSvgNode.__init__(self, ownerDoc, tagName) ConditionalProcessingAttributes.__init__(self) XlinkAttributes.__init__(self) AnimationTimingAttributes.__init__(self) self._allowedSvgChildNodes.update(self.SVG_GROUP_DESCRIPTIVE_ELEMENTS) def setExternalResourcesRequired(self, data): allowedValues = ['true', 'false'] if data is not None: if data not in allowedValues: values = '' for value in allowedValues: values += value + ', ' values = values[0: len(values) - 2] raise ValueError('Value not allowed, only ' + values + 'are allowed') else: self._setNodeAttribute(self.ATTRIBUTE_EXTERNAL_RESOURCES_REQUIRED, data) def getExternalResourcesRequired(self): node = self._getNodeAttribute(self.ATTRIBUTE_EXTERNAL_RESOURCES_REQUIRED) if node is not None: return node.nodeValue return None<|fim▁end|>
ATTRIBUTE_EXTERNAL_RESOURCES_REQUIRED = 'externalResourcesRequired'
<|file_name|>jquery.storage-manager.js<|end_file_name|><|fim▁begin|>;(function (window, document) { 'use strict'; /** * Global storage manager * * The storage manager provides a unified way to store items in the localStorage and sessionStorage. * It uses a polyfill that uses cookies as a fallback when no localStorage or sessionStore is available or working. * * @example * * Saving an item to localStorage: * * StorageManager.setItem('local', 'key', 'value'); * * Retrieving it: * * var item = StorageManager.getItem('local', 'key'); // item === 'value' * * Basically you can use every method of the Storage interface (http://www.w3.org/TR/webstorage/#the-storage-interface) * But notice that you have to pass the storage type ('local' | 'session') in the first parameter for every call. * * @example * * Getting the localStorage/sessionStorage (polyfill) object * * var localStorage = StorageManager.getStorage('local'); * var sessionStorage = StorageManager.getStorage('session'); * * You can also use its shorthands: * * var localStorage = StorageManager.getLocalStorage(); * var sessionStorage = StorageManager.getSessionStorage(); */ window.StorageManager = (function () { var storage = { local: window.localStorage, session: window.sessionStorage }, p; /** * Helper function to detect if cookies are enabled. * @returns {boolean} */ function hasCookiesSupport() { // if cookies are already present assume cookie support if ('cookie' in document && (document.cookie.length > 0)) { return true; } document.cookie = 'testcookie=1;'; var writeTest = (document.cookie.indexOf('testcookie') !== -1); document.cookie = 'testcookie=1' + ';expires=Sat, 01-Jan-2000 00:00:00 GMT'; return writeTest; } // test for safari's "QUOTA_EXCEEDED_ERR: DOM Exception 22" issue. for (p in storage) { if (!storage.hasOwnProperty(p)) { continue; } try { storage[p].setItem('storage', ''); storage[p].removeItem('storage'); } catch (err) { } } // Just return the public API instead of all available functions return { /** * Returns the storage object/polyfill of the given type. * * @returns {Storage|StoragePolyFill} */ getStorage: function (type) { return storage[type]; }, /** * Returns the sessionStorage object/polyfill. * * @returns {Storage|StoragePolyFill} */ getSessionStorage: function () { return this.getStorage('session'); }, /** * Returns the localStorage object/polyfill. * * @returns {Storage|StoragePolyFill} */ getLocalStorage: function () { return this.getStorage('local'); }, /** * Calls the clear() method of the storage from the given type. * * @param {String} type */ clear: function (type) { this.getStorage(type).clear(); }, /** * Calls the getItem() method of the storage from the given type. * * @param {String} type * @param {String} key * @returns {String} */ getItem: function (type, key) { return this.getStorage(type).getItem(key); }, /** * Calls the key() method of the storage from the given type. * * @param {String} type * @param {Number|String} i * @returns {String} */ key: function (type, i) { return this.getStorage(type).key(i); }, /** * Calls the removeItem() method of the storage from the given type. * * @param {String} type * @param {String} key */ removeItem: function (type, key) { this.getStorage(type).removeItem(key); }, /** * Calls the setItem() method of the storage from the given type. * * @param {String} type * @param {String} key * @param {String} value */ setItem: function (type, key, value) { this.getStorage(type).setItem(key, value); }, /**<|fim▁hole|> * Helper function call to check if cookies are enabled. */ hasCookiesSupport: hasCookiesSupport() }; })(); })(window, document);<|fim▁end|>
<|file_name|>ConsumerMovieRibbonApplication.java<|end_file_name|><|fim▁begin|>package com.jack.cloud.movie; <|fim▁hole|>import org.springframework.boot.autoconfigure.SpringBootApplication; import org.springframework.cloud.client.loadbalancer.LoadBalanced; import org.springframework.cloud.netflix.eureka.EnableEurekaClient; import org.springframework.cloud.netflix.ribbon.RibbonClient; import org.springframework.context.annotation.Bean; import org.springframework.web.client.RestTemplate; @SpringBootApplication @EnableEurekaClient @RibbonClient(name = "provider-user", configuration = TestConfiguration.class) public class ConsumerMovieRibbonApplication { @Bean @LoadBalanced // ribbon 注解 public RestTemplate restTemplate(){ return new RestTemplate(); } public static void main(String[] args) { SpringApplication.run(ConsumerMovieRibbonApplication.class, args); } }<|fim▁end|>
import com.jack.cloud.config.TestConfiguration; import org.springframework.boot.SpringApplication;
<|file_name|>templates.js<|end_file_name|><|fim▁begin|>(function() { const MAX_LINE_CHARS = 4000; const RE_WS = /^\s*$/; this.search_panel = function(search, type, handler) { return ( [ ['div', this['advanced_' + type + '_search'](search), 'class', 'advanced-search-controls'], ['div', ['div', 'class', 'panel-search mono'], 'class', 'panel-search-container', 'handler', handler], ]); }; this.searchbar_content = function(search) { var content = this.filters(search.controls); content[0] = 'div'; content.push('class', 'advanced-panel-search'); return content; }; this._search_input = function(name, type, value, label, is_selected, is_disabled, title) { var input = ['input', 'type', type, 'value', value, 'name', name]; if (is_selected) { input.push('checked', 'checked'); } if (is_disabled) { input.push('disabled', 'disabled'); } if (title) { input.push('title', title); } var ret = ['label', input, label]; if (title) { ret.push('title', title); } return ret; }; this.advanced_search_field = function(search) { return ( ['div', ['table', ['tr', ['td', this.default_filter(search.controls[0])], ['td', ['span', '\u00A0', 'class', 'search-info-badge'], 'width', '1px'], ['td', this.search_control(search.controls[1]), 'width', '1px'], ['td', this.search_control(search.controls[2]), 'width', '1px']], 'class', 'advanced-search-table'], 'class', 'advanced-search']); }; this.advanced_dom_search = function(search) { return ( [ this.advanced_search_field(search), ['div', ['form', this._search_input('dom-search-type', 'radio', DOMSearch.PLAIN_TEXT, ui_strings.S_LABEL_SEARCH_TYPE_TEXT, DOMSearch.PLAIN_TEXT == search.search_type), this._search_input('dom-search-type', 'radio', DOMSearch.REGEXP, ui_strings.S_LABEL_SEARCH_TYPE_REGEXP, DOMSearch.REGEXP == search.search_type), this._search_input('dom-search-type', 'radio', DOMSearch.CSS, ui_strings.S_LABEL_SEARCH_TYPE_CSS, DOMSearch.CSS == search.search_type), this._search_input('dom-search-type', 'radio', DOMSearch.XPATH, ui_strings.S_LABEL_SEARCH_TYPE_XPATH, DOMSearch.XPATH == search.search_type), this._search_input('dom-search-ignore-case', 'checkbox', 'ignore-case', ui_strings.S_LABEL_SEARCH_FLAG_IGNORE_CASE, search.ignore_case, !search.is_token_search), 'handler', 'dom-search-type-changed', ], ], ]); }.bind(this); this.advanced_js_search = function(search) { return ( [ this.advanced_search_field(search), ['div', ['form', this._search_input('js-search-type', 'checkbox', 'reg-exp', ui_strings.S_LABEL_SEARCH_TYPE_REGEXP, TextSearch.REGEXP == search.search_type), this._search_input('js-search-ignore-case', 'checkbox', 'ignore-case', ui_strings.S_LABEL_SEARCH_FLAG_IGNORE_CASE, search.ignore_case), this._search_input('js-search-all-files', 'checkbox', 'search-all-files', ui_strings.S_LABEL_SEARCH_ALL_FILES, search.search_all_files), this._search_input('js-search-injected-scripts', 'checkbox', 'search-injected-scripts', ui_strings.S_LABEL_SEARCH_INJECTED_SCRIPTS, search.search_injected_scripts, !search.search_all_files, ui_strings.S_LABEL_SEARCH_INJECTED_SCRIPTS_TOOLTIP), 'handler', 'js-search-type-changed', ], ], ]); }.bind(this); this.js_search_window = function() { return ['div', 'class', 'js-search-results', 'handler', 'show-script']; }; this.js_search_results = function(results, result_count, max_count) { var ret = this._search_result_init(result_count, max_count); var div = null; for (var rt_id in results) { div = ['div']; div.push(this._search_result_header(rt_id)); div.extend(results[rt_id].map(this.search_result_script, this)); div.push('class', 'js-search-results-runtime'); ret.push(div); if (this._js_search_ctx.count > this._js_search_ctx.max_count) { break; } } return ret; }; this.js_search_result_single_file = function(script, result_count, max_count) { var ret = this._search_result_init(result_count, max_count); ret.push(this.search_result_script(script)); return ret; }; this._search_result_init = function(result_count, max_count) { var ret = ['div']; this._js_search_ctx = {count: 0, max_count: max_count}; if (result_count > max_count) { ret.push(['div', ['div', ui_strings.S_INFO_TOO_MANY_SEARCH_RESULTS .replace('%(COUNT)s', result_count) .replace('%(MAX)s', max_count), 'class', 'info-box'], 'class', 'info-box-container']); } return ret; }; this._search_result_header = function(rt_id) { var runtime = window.runtimes.getRuntime(rt_id); var display_uri = runtime && helpers.shortenURI(runtime.uri); return ['h2', runtime && (runtime.title || display_uri.uri) || '']; }; this._format_line_no = function(line_no) { line_no = String(line_no); var padding = [' ', ' ', ' ', ' ', ' ', ' ']; return (padding[line_no.length] || '') + line_no; }; this.resource_link = function(url, text, line) { var ret = ["span", text, "handler", "open-resource-tab", "data-resource-url", url, "class", "internal-link"]; if (line) { ret.push("data-resource-line-number", String(line)); } return ret; }; this.search_result_script = function(script, show_script_uri) { var ret = ['div']; if (this._js_search_ctx.count < this._js_search_ctx.max_count) { if (typeof show_script_uri != 'boolean' || show_script_uri) { var h3 = ['h3']; if (script.uri) { h3.push(this.resource_link(script.uri, script.uri), ':'); } else if (script.script_type == "inline") { var rt = window.runtimes.getRuntime(script.runtime_id); if (rt && rt.uri) { h3.push(script.script_type + " ("); h3.push(this.resource_link(rt.uri, rt.uri)); h3.push("):"); } } else { h3.push(script.script_type + ":"); } ret.push(h3); } var line = 0, cur_line = 0, script_data = '', script_tmpl = null, cur = null; for (var i = 0; i < script.line_matches.length; i++) { if (this._js_search_ctx.count++ < this._js_search_ctx.max_count) { cur_line = script.line_matches[i]; if (cur_line != line) { line = cur_line; script_data = script.script_data.slice(script.line_arr[line - 1], script.line_arr[line]); script_tmpl = this.highlight_js_source(script_data, null, script.state_arr[line - 1], ['code'], true); if (script_tmpl.length == 2 && RE_WS.test(script_tmpl[1])) { script_tmpl[1] += "\u00a0";<|fim▁hole|> { script_tmpl.push(['span', '…', 'class', 'match-following-line']) } ret.push(['div', ['span', String(line), 'class', 'line-no'], script_tmpl, 'data-line-no', String(line), 'class', 'search-match js-search']); } } } ret.push('class', 'js-search-results-script js-source', 'data-script-id', String(script.script_id)); } return ret; }; }).apply(window.templates || (window.templates = {}));<|fim▁end|>
} if (script.line_offsets_length[i] && script.line_offsets[i] + script.line_offsets_length[i] > script.get_line_length(line))
<|file_name|>ip.go<|end_file_name|><|fim▁begin|>package packngo import "fmt" const ipBasePath = "/ips" // IPService interface defines available IP methods type IPService interface { Assign(deviceID string, assignRequest *IPAddressAssignRequest) (*IPAddress, *Response, error) Unassign(ipAddressID string) (*Response, error) Get(ipAddressID string) (*IPAddress, *Response, error) } // IPAddress represents a ip address type IPAddress struct { ID string `json:"id"` Address string `json:"address"` Gateway string `json:"gateway"` Network string `json:"network"` AddressFamily int `json:"address_family"` Netmask string `json:"netmask"` Public bool `json:"public"` Cidr int `json:"cidr"` AssignedTo map[string]string `json:"assigned_to"` Created string `json:"created_at,omitempty"` Updated string `json:"updated_at,omitempty"` Href string `json:"href"` Facility Facility `json:"facility,omitempty"` } // IPAddressAssignRequest represents the body if a ip assign request type IPAddressAssignRequest struct { Address string `json:"address"` } func (i IPAddress) String() string { return Stringify(i) } // IPServiceOp implements IPService type IPServiceOp struct { client *Client } // Get returns IpAddress by ID func (i *IPServiceOp) Get(ipAddressID string) (*IPAddress, *Response, error) { path := fmt.Sprintf("%s/%s", ipBasePath, ipAddressID) req, err := i.client.NewRequest("GET", path, nil) if err != nil { return nil, nil, err } ip := new(IPAddress) resp, err := i.client.Do(req, ip) if err != nil { return nil, resp, err } return ip, resp, err } // Unassign unassigns an IP address record. This will remove the relationship between an IP // and the device and will make the IP address available to be assigned to another device. func (i *IPServiceOp) Unassign(ipAddressID string) (*Response, error) { path := fmt.Sprintf("%s/%s", ipBasePath, ipAddressID) req, err := i.client.NewRequest("DELETE", path, nil) if err != nil { return nil, err } resp, err := i.client.Do(req, nil) return resp, err } // Assign assigns an IP address to a device. The IP address must be in one of the IP ranges assigned to the device’s project. func (i *IPServiceOp) Assign(deviceID string, assignRequest *IPAddressAssignRequest) (*IPAddress, *Response, error) { path := fmt.Sprintf("%s/%s%s", deviceBasePath, deviceID, ipBasePath) req, err := i.client.NewRequest("POST", path, assignRequest) ip := new(IPAddress) resp, err := i.client.Do(req, ip) if err != nil { return nil, resp, err } return ip, resp, err } // IP RESERVATIONS API // IPReservationService interface defines available IPReservation methods type IPReservationService interface { List(projectID string) ([]IPReservation, *Response, error) RequestMore(projectID string, ipReservationReq *IPReservationRequest) (*IPReservation, *Response, error) Get(ipReservationID string) (*IPReservation, *Response, error) Remove(ipReservationID string) (*Response, error) } // IPReservationServiceOp implements the IPReservationService interface type IPReservationServiceOp struct { client *Client } // IPReservationRequest represents the body of a reservation request type IPReservationRequest struct { Type string `json:"type"` Quantity int `json:"quantity"` Comments string `json:"comments"` } // IPReservation represent an IP reservation for a single project type IPReservation struct { ID string `json:"id"` Network string `json:"network"` Address string `json:"address"` AddressFamily int `json:"address_family"` Netmask string `json:"netmask"` Public bool `json:"public"` Cidr int `json:"cidr"` Management bool `json:"management"` Manageable bool `json:"manageable"` Addon bool `json:"addon"` Bill bool `json:"bill"` Assignments []map[string]string `json:"assignments"` Created string `json:"created_at,omitempty"` Updated string `json:"updated_at,omitempty"` Href string `json:"href"` } type ipReservationRoot struct { IPReservations []IPReservation `json:"ip_addresses"` } // List provides a list of IP resevations for a single project. func (i *IPReservationServiceOp) List(projectID string) ([]IPReservation, *Response, error) { path := fmt.Sprintf("%s/%s%s", projectBasePath, projectID, ipBasePath) req, err := i.client.NewRequest("GET", path, nil) if err != nil { return nil, nil, err } reservations := new(ipReservationRoot) resp, err := i.client.Do(req, reservations) if err != nil { return nil, resp, err } return reservations.IPReservations, resp, err } // RequestMore requests more IP space for a project in order to have additional IP addresses to assign to devices func (i *IPReservationServiceOp) RequestMore(projectID string, ipReservationReq *IPReservationRequest) (*IPReservation, *Response, error) { path := fmt.Sprintf("%s/%s%s", projectBasePath, projectID, ipBasePath) req, err := i.client.NewRequest("POST", path, &ipReservationReq) if err != nil {<|fim▁hole|> } ip := new(IPReservation) resp, err := i.client.Do(req, ip) if err != nil { return nil, resp, err } return ip, resp, err } // Get returns a single IP reservation object func (i *IPReservationServiceOp) Get(ipReservationID string) (*IPReservation, *Response, error) { path := fmt.Sprintf("%s/%s", ipBasePath, ipReservationID) req, err := i.client.NewRequest("GET", path, nil) if err != nil { return nil, nil, err } reservation := new(IPReservation) resp, err := i.client.Do(req, reservation) if err != nil { return nil, nil, err } return reservation, resp, err } // Remove removes an IP reservation from the project. func (i *IPReservationServiceOp) Remove(ipReservationID string) (*Response, error) { path := fmt.Sprintf("%s/%s", ipBasePath, ipReservationID) req, err := i.client.NewRequest("DELETE", path, nil) if err != nil { return nil, err } resp, err := i.client.Do(req, nil) if err != nil { return nil, err } return resp, err }<|fim▁end|>
return nil, nil, err
<|file_name|>relayUnstableBatchedUpdates.js<|end_file_name|><|fim▁begin|><|fim▁hole|>/** * Copyright 2013-present, Facebook, Inc. * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. * * @providesModule relayUnstableBatchedUpdates * * @format */ 'use strict'; module.exports = require('react-dom').unstable_batchedUpdates;<|fim▁end|>
<|file_name|>select.py<|end_file_name|><|fim▁begin|>""" Functions to spot hemy regions """ import os.path as op import datetime from collections import defaultdict, Counter from tabulate import tabulate from scipy import stats import numpy as np <|fim▁hole|>import vcf from bcbio.distributed.transaction import file_transaction from bcbio.provenance import do from bcbio.utils import append_stem, file_exists, splitext_plus, safe_makedir from bcbio.variation.vcfutils import bgzip_and_index def is_good_cpg(frmt, record): alt_depth = sum(map(int, frmt['DP4'].split(','))[2:]) ref_depth = sum(map(int, frmt['DP4'].split(','))[:2]) if record[6] != "PASS": return False if int(ref_depth) > 3 and int(alt_depth) > 3: return True def _genotype(alleles): if alleles[0] == alleles[1]: return "homoz" else: return "heteroz" def is_good_het(frmt, record): depth = sum(map(int, frmt['DP4'].split(','))[2:]) # if _genotype(frmt['GT'].split("/")) == "heteroz" and int(frmt['DP']) > 3 and depth > 3 and record[6] == "PASS": if _genotype(frmt['GT'].split("/")) == "heteroz" and int(frmt['DP']) > 3: return True def _get_strand(record): return record[7].split(";")[0].split("=")[1] def _snp_veracity_both_strand(sense, anti): """ Only if SNPs is detected in both strand with two alleles """ gen_plus = sense.keys() gen_minus = anti.keys() allels1 = [g.split(":")[0].split("/")[0] for g in gen_plus] allels2 = [g.split(":")[0] for g in gen_minus] if len(allels1) == len(allels2): return True def _read_pairs(gt): # print "read_pairs %s" % gt gt1 = gt.split(":")[0].split("/")[0] if gt.find("/") > -1: gt2 = gt.split(":")[0].split("/")[1] return (gt1, gt2) def _get_total(gts, total): return [total[_read_pairs(gts[0][1])[0]], total[_read_pairs(gts[1][1])[0]]] def _top_gt(gts): total = Counter() first = _read_pairs(gts[0][1]) top = None for gt in gts: pair = _read_pairs(gt[1]) if pair: if pair[0] != first[0] and pair[1] != first[1]: top = [gts[0], gt] total[pair[0]] += gt[0] if top: total = _get_total(top, total) return top, total return False, False def _above_prop(x, s, p=0.8): pvals = [] for p in [0.8, 0.9, 1.0]: pvals.append(stats.binom_test(x, s, p)) return max(pvals) > 0.70 def _prop(gt): sense_sorted = sorted(zip(gt.values(), gt.keys()), reverse=True) top_2, total = _top_gt(sense_sorted) # print "top_2 %s totla %s" % (top_2, total) if top_2: gt2_prop = float(top_2[1][0]) / total[1] gt1_prop = float(top_2[0][0]) / total[0] table = np.array([[top_2[1][0], total[1] - top_2[1][0]], [total[0] - top_2[0][0], top_2[0][0]]]) # print "table\n%s\ntotals %s %s" % (table, gt1_prop, gt2_prop) # print stats.fisher_exact(table) if stats.fisher_exact(table)[1] < 0.05 and _above_prop(top_2[0][0], total[0]) and _above_prop(top_2[1][0], total[1]): return True return False def _valid_test(link, link_as): """ Only if top2 associated nt are equally represented """ # print "link %s %s" % (link, link_as) if len(link) > 1: sense_pval = _prop(link) else: sense_pval = False # if len(link_as) > 1: # anti_pval = _prop(link_as) # else: # anti_pval = True if sense_pval: return True return False def _valid(link, link_as): """ Only if one snp allele is associated with the Cu/Cm """ if len(link) == 2: gen = link.keys() allels1 = gen[0].split(":")[0].split("/") allels2 = gen[1].split(":")[0].split("/") if allels1[0] != allels2[0] and allels1[1] != allels2[1] and _snp_veracity(link, link_as): return True def _format(link): """ Give nice format to dict with alleles and reads supporting """ cell = '' for allele in link: cell += "%s=%s;" % (allele, link[allele]) return cell def _change_to_cpg(line, tag): return line.replace(tag, "CpG%s" % tag).strip() def _change_to_snp(line, tag): return line.replace(tag, "SNP%s" % tag).strip() def _create_vcf_header(vcf_file, out_handle): """ Create header for final vcf """ print >>out_handle, "##fileformat=VCFv4.1" print >>out_handle, "##fileData=%s" % datetime.date.today().strftime('%y%m%d') with open(vcf_file) as in_handle: for line in in_handle: if line.startswith("##reference"): print >>out_handle, line.strip() if line.startswith("##contig"): print >>out_handle, line.strip() if line.startswith("#CHROM"): print >>out_handle, line.strip() if line.startswith("##BisSNP"): print >>out_handle, line.strip() if line.startswith("##FILTER"): print >>out_handle, line.strip() if line.startswith("##FORMAT=<ID=GT"): print >>out_handle, line.strip() if line.startswith("##INFO=<ID=DP"): print >>out_handle, line.strip() if line.startswith("##FORMAT=<ID=BRC6"): print >>out_handle, _change_to_cpg(line, 'BRC6') print >>out_handle, _change_to_snp(line, 'BRC6') if line.startswith("##FORMAT=<ID=CM"): print >>out_handle, _change_to_cpg(line, 'CM') print >>out_handle, _change_to_snp(line, 'CM') if line.startswith("##FORMAT=<ID=CU"): print >>out_handle, _change_to_cpg(line, 'CU') print >>out_handle, _change_to_snp(line, 'CU') if line.startswith("##FORMAT=<ID=CP"): print >>out_handle, _change_to_cpg(line, 'CP') print >>out_handle, _change_to_snp(line, 'CP') if line.startswith("##FORMAT=<ID=DP"): print >>out_handle, _change_to_cpg(line, 'DP') print >>out_handle, _change_to_snp(line, 'DP') if line.startswith("##INFO=<ID=CS"): print >>out_handle, line.strip() def _get_info(info, tag): """ get value from info vcf field """ return next((value.split("=")[1] for value in info.split(";") if value.startswith(tag)), None) def _get_format(header, frmt): """ get format field in dict instance """ frmt = dict(zip(header.split(":"), frmt.split(':'))) return frmt def _format_vcf_value(frmt1, frmt2, tag): return {_change_to_cpg(tag, tag): frmt1[tag], _change_to_snp(tag, tag): frmt2[tag]} def _get_vcf_line(record): """ create new vcf file with CpG and SNP information """ frmt = {} cs = _get_info(record[7], "CS") ref = "%s%s" % ("C", record[13]) alt = "%s%s" % ("C", record[14]) qual = (float(record[5]) + float(record[15])) / 2 filter = "LowQual" dp = int(_get_info(record[7], "DP")) + int(_get_info(record[17], "DP")) info = ";".join(["DP=%s" % dp, "CS=%s" % cs]) cpg = _get_format(record[8], record[9]) snp = _get_format(record[18], record[19]) for value in ["BRC6", "CM", "CU", "CP", "DP"]: frmt.update(_format_vcf_value(cpg, snp, value)) format = "GT:" + ":".join(frmt.keys()) sample = snp["GT"] + ":" + ":".join(frmt.values()) return record[0], record[11], ref, alt, qual, filter, info, format, sample def _correct_vcf(vcf_file): """ sort by genome/position, bgzip and index """ vcf_sort = append_stem(vcf_file, "_sort") + ".gz" if not file_exists(vcf_sort): with file_transaction(vcf_sort) as tx_out: cmd = "cat {vcf_file} |vcf-sort | bgzip > {tx_out}" do.run(cmd.format(**locals()), "sort %s" % vcf_file) do.run("tabix -f {0}".format(tx_out), "") return vcf_sort def cpg_het_pairs(cpgvcf, snpvcf, bam_file, out_file, workdir): """ Detect het close to hemi-met sites """ out_vcf = splitext_plus(out_file)[0] + ".vcf" cpg_filter = op.join(workdir, op.basename(append_stem(cpgvcf, "_filtered"))) snp_filter = op.join(workdir, op.basename(append_stem(snpvcf, "_filtered"))) if not file_exists(cpg_filter): with open(cpg_filter, 'w') as out_handle: with open(cpgvcf) as in_handle: for line in in_handle: if line.startswith("#"): continue record = line.strip().split("\t") # print record header, frmt = record[8], record[9] frmt = dict(zip(header.split(":"), frmt.split(':'))) if is_good_cpg(frmt, record): print >>out_handle, line if not file_exists(snp_filter): with open(snp_filter, 'w') as out_handle: with open(snpvcf) as in_handle: for line in in_handle: if line.startswith("#"): continue record = line.strip().split("\t") header, frmt = record[8], record[9] frmt = dict(zip(header.split(":"), frmt.split(':'))) if is_good_het(frmt, record): print >>out_handle, line if not file_exists(out_vcf): res = pybedtools.BedTool(cpg_filter).window(snp_filter, w=75) with open(out_file, 'w') as out_handle, open(out_vcf, 'w') as vcf_handle: _create_vcf_header(cpgvcf, vcf_handle) print >>out_handle, "chrom\tCpG_pos\tCpG_nt\tSNP_pos\tAlleles\tassociation_plus\tSNP_reads_minus" for record in res: if record[1] != record[11]: # if record[1] == "19889634": link, link_as, align = _make_linkage(bam_file, record[0], int(record[1]), int(record[11]), _get_strand(record)) res = "%s\t%s\t%s\t%s\t%s/%s\t%s\t%s" % (record[0], record[1], record[3], record[11], record[13], record[14], _format(link), _format(link_as)) chrom, pos, ref, alt, qual, filt, info, frmt, sample = _get_vcf_line(record) # print res if _valid_test(link, link_as): filt = "PASS" print >>out_handle, res # print res # print >>out_handle, '\n'.join(align) vcf_res = "{chrom}\t{pos}\t.\t{ref}\t{alt}\t{qual}\t{filt}\t{info}\t{frmt}\t{sample}".format(**locals()) print >>vcf_handle, vcf_res return _correct_vcf(out_vcf) def _complement(nt): if nt == 'a': return 't' elif nt == 't': return 'a' elif nt == 'c': return 'g' elif nt == 'g': return 'c' def _model(pileup, snp, cpg_st): c_pos = v_pos = [] for read in pileup: if len(pileup[read].keys()) == 1: continue info_snp = pileup[read]['snp'].split(":") info_cpg = pileup[read]['cpg'].split(":") if info_cpg[1] == cpg_st: if cpg_st == "+": c_pos.append(info_cpg[0].lower()) v_pos.append(info_snp[0].lower()) else: c_pos.append(_complement(info_cpg[0].lower())) v_pos.append(_complement(info_snp[0].lower())) else: if info_snp[1] == "+": v_pos.append(info_snp[0].lower()) else: v_pos.append(_complement(info_snp[0].lower())) def _make_linkage(bam_file, chrom, cpg, snp, cpg_st): start, end = [cpg-1, snp-1] if cpg-1 < snp-1 else [snp-1, cpg-1] pairs = _pairs_matrix(bam_file, [chrom, start, end], cpg-1, snp-1) link = Counter() link_as = Counter() align = [] for pair in pairs: if len(pairs[pair].keys()) == 1: continue nts = [pairs[pair]['cpg'].split(":")[0], pairs[pair]['snp'].split(":")[0]] align.append("-".join(nts) if cpg < snp else "-".join(nts[::-1])) info_snp = pairs[pair]['snp'].split(":") # if info_snp[1] == cpg_st: # print pairs[pair] if pairs[pair]['cpg']: info_cpg = pairs[pair]['cpg'].split(":") if info_cpg[1] == info_snp[1] and info_cpg[1] == cpg_st: link["v%s/c%s:%s" % (info_snp[0], info_cpg[0], cpg_st)] += 1 # else: # link_as["v%s:%s" % (info_snp[0], info_snp[1])] += 1 # print "LINK\n%s\n" % link return link, link_as, align def _pairs_matrix(bam_file, region, cpg, snp): """ Get reads from the cpg region and pairs cpg nt with snp nt """ pileup = defaultdict(dict) c, s, e = region samfile = pysam.AlignmentFile(bam_file, "rb") for pileupcolumn in samfile.pileup(c, s, e): if pileupcolumn.pos == cpg or pileupcolumn.pos == snp: # print ("\ncoverage at base %s = %s" % (pileupcolumn.pos, pileupcolumn.n)) for pileupread in pileupcolumn.pileups: if not pileupread.is_del and not pileupread.is_refskip: # query position is None if is_del or is_refskip is set. strand = "-" if pileupread.alignment.is_reverse else "+" tag = "cpg" if pileupcolumn.pos == cpg else "snp" nt = pileupread.alignment.query_sequence[pileupread.query_position] nt = nt.lower() if strand == "-" else nt pileup[pileupread.alignment.query_name].update({tag: nt + ":%s" % strand}) return pileup def get_het(in_vcf, region, sample, out_file): res = pybedtools.BedTool(in_vcf).intersect(b=region, wo=True) with file_transaction(out_file) as tx_out: with open(tx_out, 'w') as out_handle: # print >> out_handle, "chrom\tstart\tend\tgen\dp4\tstrand\tgene\tsample" for record in res: gene = record[-2] chrom, pos, info, header, frmt = record[0], int(record[1]), record[7], record[8], record[9] # cs = info.split(';')[0].split('=')[1] frmt = dict(zip(header.split(":"), frmt.split(':'))) # if _genotype(frmt['GT'].split("/")) == "heteroz" and int(frmt['DP']) > 10 and int(frmt['DP4']) > 10 and record[6] == "PASS": if is_good_het(frmt, record): tag = "%s-%s-%s-%s" % (frmt['GT'], frmt['DP'], gene, sample) print >> out_handle, "%s\t%s\t%s\t%s\t.\t+" % (chrom, pos, pos + 1, tag ) def post_processing(vcf_res, vcf_merged, out): """ merge list of vcf files and get stats """ if len(vcf_res) == 1: return vcf_res if not file_exists(vcf_merged): cmd = "bcftools merge {0} > {1}".format(" ".join(vcf_res), vcf_merged) do.run(cmd, "merge files") vcf_reader = vcf.Reader(open(vcf_merged, 'r')) samples = vcf_reader.samples num_call = Counter() num_call_sample = Counter() for record in vcf_reader: if not record.FILTER: num_call[record.num_called] += 1 # print record.num_called for sample in samples: if record.genotype(sample)['GT'] != "./.": # print record.genotype(sample)['GT'] num_call_sample[sample] += 1 with open(out + "_shared_stat.tsv", 'w') as stat_handle: print >>stat_handle, tabulate([[k, v] for k, v in num_call.iteritems()], headers=["# samples", "# of SNPs"]) with open(out + "_stat.tsv", 'w') as stat_handle: print >>stat_handle, tabulate([[k, v] for k, v in num_call_sample.iteritems()], headers=["samples", "# of SNPs"]) def detect_asm(data, args): vcf_res = [] in_vcf = data['fastq'] bam_file = data['bam'] sample = splitext_plus(op.basename(in_vcf))[0].split(".raw")[0].replace(".rawcpg", "") workdir = op.join(args.out, sample) safe_makedir(workdir) snp_file = in_vcf.replace("rawcpg", "rawsnp") assert bam_file, "No bam file associated to vcf %s" % in_vcf out_file = op.join(workdir, sample + "_pairs.tsv") vcf_res = cpg_het_pairs(in_vcf, snp_file, bam_file, out_file, workdir) data['asm'] = vcf_res return data<|fim▁end|>
import pybedtools import pysam
<|file_name|>Bubble Sort.cpp<|end_file_name|><|fim▁begin|>/* This file is part of White - Storm: Lightning (alpha). Copyright 2012 Christopher Augustus Greeley White - Storm: Lightning (alpha) is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. White - Storm: Lightning (alpha) is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with White - Storm: Lightning (alpha). If not, see <http://www.gnu.org/licenses/>. */ #include "Bubble Sort.h" void WSL::Algorithmic::BubbleSort::BubbleSortAlgorithm( WSL::Containers::PointableVector< WSL::Containers::RenderType > *toBeSorted ) { WSL::Containers::RenderType transfer; const unsigned int SIZE = toBeSorted->vector.size(); for( unsigned int i = 0; i < SIZE; ++i ) { for( unsigned int j = 0; j < SIZE; ++j ) { if( ( j + 1 ) < SIZE ) { if( toBeSorted->vector[ j ].z > toBeSorted->vector[ ( j + 1 ) ].z ) { transfer = toBeSorted->vector[ j ]; toBeSorted->vector[ j ] = toBeSorted->vector[ ( j + 1 ) ]; toBeSorted->vector[ j + 1 ] = transfer; } } else if( toBeSorted->vector[ 0 ].z > toBeSorted->vector[ j ].z )<|fim▁hole|> transfer = toBeSorted->vector[ 0 ]; toBeSorted->vector[ 0 ] = toBeSorted->vector[ j ]; toBeSorted->vector[ j ] = transfer; } } } }<|fim▁end|>
{
<|file_name|>CSS2DRenderer.js<|end_file_name|><|fim▁begin|>/** * @author mrdoob / http://mrdoob.com/ */ THREE.CSS2DObject = function ( element ) { THREE.Object3D.call( this ); this.element = element; this.element.style.position = 'absolute'; this.addEventListener( 'removed', function ( event ) { if ( this.element.parentNode !== null ) { this.element.parentNode.removeChild( this.element ); } } ); }; THREE.CSS2DObject.prototype = Object.create( THREE.Object3D.prototype ); THREE.CSS2DObject.prototype.constructor = THREE.CSS2DObject; // THREE.CSS2DRenderer = function () { console.log( 'THREE.CSS2DRenderer', THREE.REVISION ); var _width, _height; var _widthHalf, _heightHalf; var vector = new THREE.Vector3(); var viewMatrix = new THREE.Matrix4(); var viewProjectionMatrix = new THREE.Matrix4(); <|fim▁hole|> this.setSize = function ( width, height ) { _width = width; _height = height; _widthHalf = _width / 2; _heightHalf = _height / 2; domElement.style.width = width + 'px'; domElement.style.height = height + 'px'; }; var renderObject = function ( object, camera ) { if ( object instanceof THREE.CSS2DObject ) { vector.setFromMatrixPosition( object.matrixWorld ); vector.applyMatrix4( viewProjectionMatrix ); var element = object.element; var style = 'translate(-50%,-50%) translate(' + ( vector.x * _widthHalf + _widthHalf ) + 'px,' + ( - vector.y * _heightHalf + _heightHalf ) + 'px)'; element.style.WebkitTransform = style; element.style.MozTransform = style; element.style.oTransform = style; element.style.transform = style; if ( element.parentNode !== domElement ) { domElement.appendChild( element ); } } for ( var i = 0, l = object.children.length; i < l; i ++ ) { renderObject( object.children[ i ], camera ); } }; this.render = function ( scene, camera ) { scene.updateMatrixWorld(); if ( camera.parent === null ) camera.updateMatrixWorld(); viewMatrix.copy( camera.matrixWorldInverse ); viewProjectionMatrix.multiplyMatrices( camera.projectionMatrix, viewMatrix ); renderObject( scene, camera ); }; };<|fim▁end|>
var domElement = document.createElement( 'div' ); domElement.style.overflow = 'hidden'; this.domElement = domElement;
<|file_name|>imagedata.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ use crate::dom::bindings::codegen::Bindings::ImageDataBinding; use crate::dom::bindings::codegen::Bindings::ImageDataBinding::ImageDataMethods; use crate::dom::bindings::error::{Error, Fallible}; use crate::dom::bindings::reflector::{reflect_dom_object, Reflector}; use crate::dom::bindings::root::DomRoot; use crate::dom::globalscope::GlobalScope; use dom_struct::dom_struct; use euclid::{Rect, Size2D}; use ipc_channel::ipc::IpcSharedMemory; use js::jsapi::{Heap, JSContext, JSObject}; use js::rust::Runtime; use js::typedarray::{CreateWith, Uint8ClampedArray}; use std::borrow::Cow; use std::default::Default; use std::ptr; use std::ptr::NonNull; use std::vec::Vec; #[dom_struct] pub struct ImageData { reflector_: Reflector, width: u32,<|fim▁hole|> data: Heap<*mut JSObject>, } impl ImageData { #[allow(unsafe_code)] pub fn new( global: &GlobalScope, width: u32, height: u32, mut data: Option<Vec<u8>>, ) -> Fallible<DomRoot<ImageData>> { let len = width * height * 4; unsafe { let cx = global.get_cx(); rooted!(in (cx) let mut js_object = ptr::null_mut::<JSObject>()); let data = match data { Some(ref mut d) => { d.resize(len as usize, 0); CreateWith::Slice(&d[..]) }, None => CreateWith::Length(len), }; Uint8ClampedArray::create(cx, data, js_object.handle_mut()).unwrap(); Self::new_with_jsobject(global, width, Some(height), Some(js_object.get())) } } #[allow(unsafe_code)] unsafe fn new_with_jsobject( global: &GlobalScope, width: u32, mut opt_height: Option<u32>, opt_jsobject: Option<*mut JSObject>, ) -> Fallible<DomRoot<ImageData>> { assert!(opt_jsobject.is_some() || opt_height.is_some()); if width == 0 { return Err(Error::IndexSize); } // checking jsobject type and verifying (height * width * 4 == jsobject.byte_len()) if let Some(jsobject) = opt_jsobject { let cx = global.get_cx(); typedarray!(in(cx) let array_res: Uint8ClampedArray = jsobject); let array = array_res.map_err(|_| { Error::Type("Argument to Image data is not an Uint8ClampedArray".to_owned()) })?; let byte_len = array.as_slice().len() as u32; if byte_len % 4 != 0 { return Err(Error::InvalidState); } let len = byte_len / 4; if width == 0 || len % width != 0 { return Err(Error::IndexSize); } let height = len / width; if opt_height.map_or(false, |x| height != x) { return Err(Error::IndexSize); } else { opt_height = Some(height); } } let height = opt_height.unwrap(); if height == 0 { return Err(Error::IndexSize); } let imagedata = Box::new(ImageData { reflector_: Reflector::new(), width: width, height: height, data: Heap::default(), }); if let Some(jsobject) = opt_jsobject { (*imagedata).data.set(jsobject); } else { let len = width * height * 4; let cx = global.get_cx(); rooted!(in (cx) let mut array = ptr::null_mut::<JSObject>()); Uint8ClampedArray::create(cx, CreateWith::Length(len), array.handle_mut()).unwrap(); (*imagedata).data.set(array.get()); } Ok(reflect_dom_object( imagedata, global, ImageDataBinding::Wrap, )) } // https://html.spec.whatwg.org/multipage/#pixel-manipulation:dom-imagedata-3 #[allow(unsafe_code)] pub fn Constructor(global: &GlobalScope, width: u32, height: u32) -> Fallible<DomRoot<Self>> { unsafe { Self::new_with_jsobject(global, width, Some(height), None) } } // https://html.spec.whatwg.org/multipage/#pixel-manipulation:dom-imagedata-4 #[allow(unsafe_code)] #[allow(unused_variables)] pub unsafe fn Constructor_( cx: *mut JSContext, global: &GlobalScope, jsobject: *mut JSObject, width: u32, opt_height: Option<u32>, ) -> Fallible<DomRoot<Self>> { Self::new_with_jsobject(global, width, opt_height, Some(jsobject)) } /// Nothing must change the array on the JS side while the slice is live. #[allow(unsafe_code)] pub unsafe fn as_slice(&self) -> &[u8] { assert!(!self.data.get().is_null()); let cx = Runtime::get(); assert!(!cx.is_null()); typedarray!(in(cx) let array: Uint8ClampedArray = self.data.get()); let array = array.as_ref().unwrap(); // NOTE(nox): This is just as unsafe as `as_slice` itself even though we // are extending the lifetime of the slice, because the data in // this ImageData instance will never change. The method is thus unsafe // because the array may be manipulated from JS while the reference // is live. let ptr = array.as_slice() as *const _; &*ptr } #[allow(unsafe_code)] pub fn to_shared_memory(&self) -> IpcSharedMemory { IpcSharedMemory::from_bytes(unsafe { self.as_slice() }) } #[allow(unsafe_code)] pub unsafe fn get_rect(&self, rect: Rect<u32>) -> Cow<[u8]> { pixels::rgba8_get_rect(self.as_slice(), self.get_size(), rect) } pub fn get_size(&self) -> Size2D<u32> { Size2D::new(self.Width(), self.Height()) } } impl ImageDataMethods for ImageData { // https://html.spec.whatwg.org/multipage/#dom-imagedata-width fn Width(&self) -> u32 { self.width } // https://html.spec.whatwg.org/multipage/#dom-imagedata-height fn Height(&self) -> u32 { self.height } #[allow(unsafe_code)] // https://html.spec.whatwg.org/multipage/#dom-imagedata-data unsafe fn Data(&self, _: *mut JSContext) -> NonNull<JSObject> { NonNull::new(self.data.get()).expect("got a null pointer") } }<|fim▁end|>
height: u32, #[ignore_malloc_size_of = "mozjs"]
<|file_name|>KubeMonitorJob.java<|end_file_name|><|fim▁begin|>package app.monitor.job; import core.framework.internal.log.LogManager; import core.framework.json.JSON; import core.framework.kafka.MessagePublisher; import core.framework.log.message.StatMessage; import core.framework.scheduler.Job; import core.framework.scheduler.JobContext; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.time.Duration; import java.time.Instant; import java.time.ZonedDateTime; import java.util.List; import java.util.Map; /** * @author neo */ public class KubeMonitorJob implements Job { public final MessagePublisher<StatMessage> publisher; public final KubeClient kubeClient; public final List<String> namespaces; private final Logger logger = LoggerFactory.getLogger(KubeMonitorJob.class); public KubeMonitorJob(List<String> namespaces, KubeClient kubeClient, MessagePublisher<StatMessage> publisher) { this.publisher = publisher; this.kubeClient = kubeClient; this.namespaces = namespaces; } @Override public void execute(JobContext context) { try { var now = ZonedDateTime.now(); for (String namespace : namespaces) { KubePodList pods = kubeClient.listPods(namespace); for (KubePodList.Pod pod : pods.items) { String errorMessage = check(pod, now); if (errorMessage != null) { publishPodFailure(pod, errorMessage); } } } } catch (Throwable e) { logger.error(e.getMessage(), e); publisher.publish(StatMessageFactory.failedToCollect(LogManager.APP_NAME, null, e)); } } String check(KubePodList.Pod pod, ZonedDateTime now) { if (pod.metadata.deletionTimestamp != null) { Duration elapsed = Duration.between(pod.metadata.deletionTimestamp, now); if (elapsed.toSeconds() >= 300) { return "pod is still in deletion, elapsed=" + elapsed; } return null; } String phase = pod.status.phase; if ("Succeeded".equals(phase)) return null; // terminated if ("Failed".equals(phase) || "Unknown".equals(phase)) return "unexpected pod phase, phase=" + phase; if ("Pending".equals(phase)) { // newly created pod may not have container status yet, containerStatuses is initialized as empty for (KubePodList.ContainerStatus status : pod.status.containerStatuses) { if (status.state.waiting != null && "ImagePullBackOff".equals(status.state.waiting.reason)) { return "ImagePullBackOff: " + status.state.waiting.message; } } // for unschedulable pod for (KubePodList.PodCondition condition : pod.status.conditions) { if ("PodScheduled".equals(condition.type) && "False".equals(condition.status) && Duration.between(condition.lastTransitionTime, now).toSeconds() >= 300) { return condition.reason + ": " + condition.message; } } } if ("Running".equals(phase)) {<|fim▁hole|> if (status.state.waiting != null && "CrashLoopBackOff".equals(status.state.waiting.reason)) { return "CrashLoopBackOff: " + status.state.waiting.message; } boolean containerReady = Boolean.TRUE.equals(status.ready); if (!containerReady && status.lastState != null && status.lastState.terminated != null) { var terminated = status.lastState.terminated; return "pod was terminated, reason=" + terminated.reason + ", exitCode=" + terminated.exitCode; } if (!containerReady) { ready = false; } } if (ready) return null; // all running, all ready } ZonedDateTime startTime = pod.status.startTime != null ? pod.status.startTime : pod.metadata.creationTimestamp; // startTime may not be populated yet if pod is just created Duration elapsed = Duration.between(startTime, now); if (elapsed.toSeconds() >= 300) { // can be: 1) took long to be ready after start, or 2) readiness check failed in the middle run return "pod is not in ready state, uptime=" + elapsed; } return null; } private void publishPodFailure(KubePodList.Pod pod, String errorMessage) { var now = Instant.now(); var message = new StatMessage(); message.id = LogManager.ID_GENERATOR.next(now); message.date = now; message.result = "ERROR"; message.app = pod.metadata.labels.getOrDefault("app", pod.metadata.name); message.host = pod.metadata.name; message.errorCode = "POD_FAILURE"; message.errorMessage = errorMessage; message.info = Map.of("pod", JSON.toJSON(pod)); publisher.publish(message); } }<|fim▁end|>
boolean ready = true; for (KubePodList.ContainerStatus status : pod.status.containerStatuses) {
<|file_name|>matrix.ones.js<|end_file_name|><|fim▁begin|>function ones(rows, columns) { columns = columns || rows; if (typeof rows === 'number' && typeof columns === 'number') { const matrix = []; for (let i = 0; i < rows; i++) { matrix.push([]); for (let j = 0; j < columns; j++) { matrix[i].push(1); } } return matrix;<|fim▁hole|>} module.exports = ones;<|fim▁end|>
} throw new TypeError('Matrix dimensions should be integers.');
<|file_name|>RF_Sensor.py<|end_file_name|><|fim▁begin|># Core imports import copy import Queue import thread import time # Package imports from ..core.Threadable import Threadable from ..reconstruction.Buffer import Buffer from ..settings import Arguments from Packet import Packet from TDMA_Scheduler import TDMA_Scheduler # pylint: disable=undefined-all-variable __all__ = [ "RF_Sensor_Simulator", "RF_Sensor_Physical_XBee", "RF_Sensor_Physical_Texas_Instruments" ] class RSSI_Validity_Request(object): """ A request to the valid location callback, containing properties that the valid location callback can use to track the measurement validity. """ def __init__(self, specification, other_id=None, other_valid=None, other_valid_pair=None, other_index=None): self._is_broadcast = specification == "rssi_broadcast" self._other_valid = other_valid self._other_id = other_id self._other_index = other_index self._other_valid_pair = other_valid_pair @property def is_broadcast(self): """ Whether the RSSI packet is a measurement broadcast packet, or that it is sent to the ground station. """ return self._is_broadcast @property def other_id(self): """ RF sensor ID of the other sensor. """ return self._other_id @property def other_valid(self): """ Whether the location of the other sensor is valid. """ return self._other_valid @property def other_valid_pair(self): """ Whether the other sensor has received a valid measurement from the current sensor. """ return self._other_valid_pair @property def other_index(self): """ The waypoint index of the other sensor. """ return self._other_index class DisabledException(Exception): """ Special exception indicating that the RF sensor was disabled during execution of the sensor loop. """ pass class RF_Sensor(Threadable): """ Base class for all RF sensors. This class is responsible for setting up the basic characteristics of an RF sensor and contains common code for the simulated and physical specializations. """ def __init__(self, arguments, thread_manager, location_callback, receive_callback, valid_callback): """ Initialize the RF sensor. The `arguments` parameter is used to load settings for a specific RF sensor type. The sensor has a `thread_manager`, which is a `Thread_Manager` object for registering its own thread loop. Additionally, it requires certian callbacks. The `location_callback` is called whenever the sensor<|fim▁hole|> "rssi_ground_station" private packets. The `receive_callback` is called whenever non-private packets are received and has the `Packet` object as an argument. Finally, the `valid_callback` is called shortly after the `location_callback` is called. It may be given a boolean argument indicating whether another RF sensor has a valid location, but only when creating the "rssi_ground_station" private packet. This is used by the callback to determine if measurements at a certain location are finished. Classes that inherit this base class may extend this method. """ super(RF_Sensor, self).__init__("rf_sensor", thread_manager) # Make sure that the provided callbacks are callable. for callback in [location_callback, receive_callback, valid_callback]: if not hasattr(callback, "__call__"): raise TypeError("Provided RF sensor callback is not callable") # Load settings for a specific RF sensor type. if isinstance(arguments, Arguments): self._settings = arguments.get_settings(self.type) else: raise ValueError("'arguments' must be an instance of Arguments") # Initialize common member variables. self._id = self._settings.get("rf_sensor_id") self._number_of_sensors = self._settings.get("number_of_sensors") self._address = None self._connection = None self._buffer = None self._scheduler = TDMA_Scheduler(self._id, arguments) self._packets = Queue.Queue() self._custom_packets = Queue.Queue() self._joined = False self._activated = False self._started = False self._loop_delay = self._settings.get("loop_delay") self._location_callback = location_callback self._receive_callback = receive_callback self._valid_callback = valid_callback @property def id(self): """ Get the ID of the RF sensor. """ return self._id @property def number_of_sensors(self): """ Get the number of sensors in the network. """ return self._number_of_sensors @property def buffer(self): """ Get the buffer of the RF sensor. """ return self._buffer @buffer.setter def buffer(self, buffer): """ Set the buffer. The `buffer` argument must be a `Buffer` object. """ if not isinstance(buffer, Buffer): raise ValueError("The `buffer` argument must be a `Buffer` object") self._buffer = buffer @property def type(self): raise NotImplementedError("Subclasses must implement the `type` property") @property def identity(self): """ Get the identity of the RF sensor, consisting of its ID, address and network join status. Classes that inherit this base class may extend this property. """ return { "id": self._id, "address": self._address, "joined": self._joined } def activate(self): """ Activate the sensor to start sending and receiving packets. Classes that inherit this base class may extend this method. """ super(RF_Sensor, self).activate() if not self._activated: self._activated = True if self._connection is None: self._setup() thread.start_new_thread(self._loop, ()) def deactivate(self): """ Deactivate the sensor to stop sending and receiving packets. Classes that inherit this base class may extend this method. """ super(RF_Sensor, self).deactivate() if self._activated: self._activated = False if self._connection is not None: # Close the connection and clean up so that the thread might get # the signal faster and we can correctly reactivate later on. self._connection.close() self._connection = None def start(self): """ Start the signal strength measurements (and stop sending custom packets). Classes that inherit this base class may extend this method. """ self._scheduler.update() self._packets = Queue.Queue() self._started = True def stop(self): """ Stop the signal strength measurements (and start sending custom packets). """ self._started = False # Reset the scheduler timestamp so that it updates correctly in case we # restart the sensor measurements. self._scheduler.timestamp = 0 def enqueue(self, packet, to=None): """ Enqueue a custom `packet` to send `to` another RF sensor in the network. """ if not isinstance(packet, Packet): raise TypeError("Only `Packet` objects can be enqueued") if packet.is_private(): raise ValueError("Private packets cannot be enqueued") if to is None: # No destination ID has been provided, so we broadcast the packet to # all sensors in the network except for ourself and the ground station. for to_id in xrange(1, self._number_of_sensors + 1): if to_id == self._id: continue self._custom_packets.put({ "packet": copy.deepcopy(packet), "to": to_id }) else: self._custom_packets.put({ "packet": packet, "to": to }) def discover(self, callback, required_sensors=None): """ Discover RF sensors in the network. The `callback` callable function is called when an RF sensor reports its identity. The `required_sensors` set indicates which sensors should be discovered; if it is not provided, then all RF sensors are discovered. Note that discovery may fail due to interference or disabled sensors. Classes that inherit this base class must extend this method. """ if not hasattr(callback, "__call__"): raise TypeError("Provided discovery callback is not callable") if isinstance(required_sensors, set): if not required_sensors.issubset(range(1, self._number_of_sensors + 1)): raise ValueError("Provided required sensors may only contain vehicle sensors") elif required_sensors is not None: raise TypeError("Provided required sensors must be a `set`") def _setup(self): raise NotImplementedError("Subclasses must implement `_setup()`") def _loop(self): """ Execute the sensor loop. This runs in a separate thread. """ try: while self._activated: self._loop_body() except DisabledException: return except: super(RF_Sensor, self).interrupt() def _loop_body(self): """ Body of the sensor loop. This is extracted into a separate method to make testing easier, as well as for keeping the `_loop` implementation in the base class. Classes that inherit this base class must extend this method. """ # If the sensor has been activated, we only send enqueued custom packets. # If the sensor has been started, we stop sending custom packets and # start performing signal strength measurements. if not self._started: self._send_custom_packets() elif self._id > 0 and self._scheduler.in_slot: self._send() self._scheduler.update() time.sleep(self._loop_delay) def _send(self): """ Send a broadcast packet to each other sensor in the network and send collected packets to the ground station. Classes that inherit this base class may extend this method. """ # Create and send the RSSI broadcast packets. for to_id in xrange(1, self._number_of_sensors + 1): if not self._scheduler.in_slot: return if to_id == self._id: continue packet = self._create_rssi_broadcast_packet(to_id) self._send_tx_frame(packet, to_id) # Send collected packets to the ground station. while not self._packets.empty() and self._scheduler.in_slot: packet = self._packets.get() self._send_tx_frame(packet, 0) def _send_custom_packets(self): """ Send custom packets to their destinations. """ while not self._custom_packets.empty(): item = self._custom_packets.get() self._send_tx_frame(item["packet"], item["to"]) def _send_tx_frame(self, packet, to=None): """ Send a TX frame with `packet` as payload `to` another sensor. Classes that inherit this base class must extend this method. """ if self._connection is None: raise DisabledException if not isinstance(packet, Packet): raise TypeError("Only `Packet` objects can be sent") if to is None: raise TypeError("Invalid destination '{}' has been provided".format(to)) # Introduce a short delay to give the hardware more time to send # packets when this method is called many times in a row. time.sleep(self._loop_delay) def _receive(self, packet=None): raise NotImplementedError("Subclasses must implement `_receive(packet=None)`") def _create_rssi_broadcast_packet(self, to_id): """ Create a `Packet` object according to the "rssi_broadcast" specification. The resulting packet is complete. """ location, waypoint_index = self._location_callback() request = RSSI_Validity_Request("rssi_broadcast", other_id=to_id) valid, valid_pair = self._valid_callback(request) packet = Packet() packet.set("specification", "rssi_broadcast") packet.set("latitude", location[0]) packet.set("longitude", location[1]) packet.set("valid", valid) packet.set("valid_pair", valid_pair) packet.set("waypoint_index", waypoint_index) packet.set("sensor_id", self._id) packet.set("timestamp", time.time()) return packet def _create_rssi_ground_station_packet(self, rssi_broadcast_packet): """ Create a `Packet` object according to the "rssi_ground_station" specification using data from an `rssi_broadcast_packet`. The `rssi_broadcast_packet` must be created according to the "rssi_broadcast" specification. The resulting packet is only missing RSSI information. The packet can be sent to the ground station as an indication of the signal strength between the RF sensor that sent the `rssi_broadcast_packet` and the current RF sensor. """ from_valid = rssi_broadcast_packet.get("valid") from_id = rssi_broadcast_packet.get("sensor_id") from_waypoint_index = rssi_broadcast_packet.get("waypoint_index") from_valid_pair = rssi_broadcast_packet.get("valid_pair") location = self._location_callback()[0] request = RSSI_Validity_Request("rssi_ground_station", other_id=from_id, other_valid=from_valid, other_valid_pair=from_valid_pair, other_index=from_waypoint_index) to_valid = self._valid_callback(request)[0] packet = Packet() packet.set("specification", "rssi_ground_station") packet.set("sensor_id", self._id) packet.set("from_latitude", rssi_broadcast_packet.get("latitude")) packet.set("from_longitude", rssi_broadcast_packet.get("longitude")) packet.set("from_valid", from_valid) packet.set("to_latitude", location[0]) packet.set("to_longitude", location[1]) packet.set("to_valid", to_valid) return packet<|fim▁end|>
needs to know its own location for the "rssi_broadcast" and the
<|file_name|>raft_handlers.go<|end_file_name|><|fim▁begin|>package main import ( "encoding/json" "github.com/coreos/go-raft" "net/http" ) //------------------------------------------------------------- // Handlers to handle raft related request via raft server port //------------------------------------------------------------- // Get all the current logs func GetLogHttpHandler(w http.ResponseWriter, req *http.Request) { debugf("[recv] GET %s/log", r.url) w.Header().Set("Content-Type", "application/json") w.WriteHeader(http.StatusOK) json.NewEncoder(w).Encode(r.LogEntries()) } // Response to vote request func VoteHttpHandler(w http.ResponseWriter, req *http.Request) { rvreq := &raft.RequestVoteRequest{} err := decodeJsonRequest(req, rvreq) if err == nil { debugf("[recv] POST %s/vote [%s]", r.url, rvreq.CandidateName) if resp := r.RequestVote(rvreq); resp != nil { w.WriteHeader(http.StatusOK) json.NewEncoder(w).Encode(resp) return } } warnf("[vote] ERROR: %v", err) w.WriteHeader(http.StatusInternalServerError) } // Response to append entries request func AppendEntriesHttpHandler(w http.ResponseWriter, req *http.Request) { aereq := &raft.AppendEntriesRequest{} err := decodeJsonRequest(req, aereq) if err == nil { debugf("[recv] POST %s/log/append [%d]", r.url, len(aereq.Entries)) if resp := r.AppendEntries(aereq); resp != nil { w.WriteHeader(http.StatusOK) json.NewEncoder(w).Encode(resp) if !resp.Success { debugf("[Append Entry] Step back") } return } } warnf("[Append Entry] ERROR: %v", err) w.WriteHeader(http.StatusInternalServerError) } // Response to recover from snapshot request func SnapshotHttpHandler(w http.ResponseWriter, req *http.Request) { aereq := &raft.SnapshotRequest{} err := decodeJsonRequest(req, aereq) if err == nil { debugf("[recv] POST %s/snapshot/ ", r.url) if resp := r.RequestSnapshot(aereq); resp != nil { w.WriteHeader(http.StatusOK) json.NewEncoder(w).Encode(resp) return } } warnf("[Snapshot] ERROR: %v", err) w.WriteHeader(http.StatusInternalServerError) } // Response to recover from snapshot request func SnapshotRecoveryHttpHandler(w http.ResponseWriter, req *http.Request) { aereq := &raft.SnapshotRecoveryRequest{} err := decodeJsonRequest(req, aereq) if err == nil { debugf("[recv] POST %s/snapshotRecovery/ ", r.url) if resp := r.SnapshotRecoveryRequest(aereq); resp != nil { w.WriteHeader(http.StatusOK) json.NewEncoder(w).Encode(resp) return } } warnf("[Snapshot] ERROR: %v", err) w.WriteHeader(http.StatusInternalServerError) } // Get the port that listening for etcd connecting of the server func EtcdURLHttpHandler(w http.ResponseWriter, req *http.Request) { debugf("[recv] Get %s/etcdURL/ ", r.url) w.WriteHeader(http.StatusOK) w.Write([]byte(argInfo.EtcdURL)) } // Response to the join request func JoinHttpHandler(w http.ResponseWriter, req *http.Request) error { command := &JoinCommand{} if err := decodeJsonRequest(req, command); err == nil { debugf("Receive Join Request from %s", command.Name) return dispatch(command, w, req, false) } else { w.WriteHeader(http.StatusInternalServerError) return nil } } // Response to remove request func RemoveHttpHandler(w http.ResponseWriter, req *http.Request) { if req.Method != "DELETE" { w.WriteHeader(http.StatusMethodNotAllowed) return } nodeName := req.URL.Path[len("/remove/"):] command := &RemoveCommand{ Name: nodeName, } debugf("[recv] Remove Request [%s]", command.Name) dispatch(command, w, req, false) } <|fim▁hole|> w.WriteHeader(http.StatusOK) w.Write([]byte(r.name)) } // Response to the name request func RaftVersionHttpHandler(w http.ResponseWriter, req *http.Request) { debugf("[recv] Get %s/version/ ", r.url) w.WriteHeader(http.StatusOK) w.Write([]byte(r.version)) }<|fim▁end|>
// Response to the name request func NameHttpHandler(w http.ResponseWriter, req *http.Request) { debugf("[recv] Get %s/name/ ", r.url)
<|file_name|>recipes.py<|end_file_name|><|fim▁begin|># Copyright (C) 2018 Henrique Pereira Coutada Miranda, Alejandro Molina Sanchez, Alexandre Morlet, Fulvio Paleari # # All rights reserved. # # This file is part of yambopy # # import os from operator import itemgetter from collections import OrderedDict from yambopy import * # # by Henrique Miranda. # def pack_files_in_folder(folder,save_folder=None,mask='',verbose=True): """ Pack the output files in a folder to json files """ if not save_folder: save_folder = folder #pack the files in .json files for dirpath,dirnames,filenames in os.walk(folder): #check if the folder fits the mask if mask in dirpath: #check if there are some output files in the folder if ([ f for f in filenames if 'o-' in f ]): if verbose: print(dirpath) y = YamboOut(dirpath,save_folder=save_folder) y.pack() # # by Alejandro Molina-Sanchez # def breaking_symmetries(efield1,efield2=[0,0,0],folder='.',RmTimeRev=True): """ Breaks the symmetries for a given field. Second field used in circular polarized pump configuration RmTimeRev : Remove time symmetry is set True by default """ os.system('mkdir -p %s'%folder) os.system('cp -r database/SAVE %s'%folder) os.system('cd %s; yambo'%folder) ypp = YamboIn.from_runlevel('-y -V all',executable='ypp',folder=folder,filename='ypp.in') ypp['Efield1'] = efield1 # Field in the X-direction ypp['Efield2'] = efield2 # Field in the X-direction if RmTimeRev: ypp.arguments.append('RmTimeRev') # Remove Time Symmetry ypp.write('%s/ypp.in'%folder) os.system('cd %s ; ypp_ph -F ypp.in'%folder ) os.system('cd %s ; cd FixSymm; yambo '%folder ) os.system('rm -r %s/SAVE'%folder) os.system('mv %s/FixSymm/SAVE %s/'%(folder,folder)) os.system('rm -r %s/FixSymm'%folder) # # by Alexandre Morlet & Henrique Miranda # def analyse_gw(folder,var,bandc,kpointc,bandv,kpointv,pack,text,draw,verbose=False): """ Study the convergence of GW calculations by looking at the change in band-gap value. The script reads from <folder> all results from <variable> calculations and display them. Use the band and k-point options (or change default values) according to the size of your k-grid and the location of the band extrema. """ print(' K-point Band') print('Conduction state %6d %6d'%(kpointc, bandc)) print(' Valence state %6d %6d'%(kpointv, bandv)) #find all ndb.QP files in the folder io = OrderedDict() for root, dirs, files in os.walk(folder): #get starting name of folder basename = os.path.basename(root) #look into folders starting with var or reference if any( [basename.startswith(v) for v in [var,'reference']] ): for filename in files: if filename != 'ndb.QP': continue #get ndb.QP file in folder io[basename] = ( YamboIn.from_file(folder=folder,filename="%s.in"%basename), YamboQPDB.from_db(folder=root,filename=filename) ) #consistency check #TODO convergence_data = [] for basename, (inp,out) in io.items(): #get input value, unit = inp[var] #get qp value # Be careful because the array of eigenvalues is defined now in another way eigenvalues_dft, eigenvalues_qp, lifetimes, z = out.get_qps() #save result qp_gap = eigenvalues_qp[kpointc-out.min_kpoint,bandc-out.min_band] - eigenvalues_qp[kpointv-out.min_kpoint,bandv-out.min_band] #check type of variable if isinstance(value,list): value = value[1] convergence_data.append([value,qp_gap]) convergence_data = np.array(sorted(convergence_data)) if convergence_data.dtype == 'object': raise ValueError('Unknown type of variable') if text: output_folder = 'analyse_%s'%folder if not os.path.isdir(output_folder): os.mkdir(output_folder) outname = os.path.join(output_folder,'%s_%s.dat'%(folder,var)) header = var+' ('+str(unit)+')' np.savetxt(outname,convergence_data,delimiter='\t',header=header) if draw: import matplotlib.pyplot as plt fig = plt.figure() ax = fig.add_subplot(1,1,1) ax.plot(convergence_data[:,0],convergence_data[:,1],'o-') ax.set_xlabel(var+' ('+unit+')') ax.set_ylabel('E_gw = E_lda + \Delta E') fig.savefig('%s.png'%var) # # by Alexandre Morlet #<|fim▁hole|> Using ypp, you can study the convergence of BSE calculations in 2 ways: Create a .png of all absorption spectra relevant to the variable you study Look at the eigenvalues of the first n "bright" excitons (given a threshold intensity) The script reads from <folder> all results from <variable> calculations for processing. The resulting pictures and data files are saved in the ./analyse_<folder>/ folder. Arguments: folder -> Folder containing SAVE and convergence runs. var -> Variable tested (e.g. FFTGvecs) numbexc -> Number of excitons to read beyond threshold (default=2) intexc -> Minimum intensity for excitons to be considered bright (default=0.05) degenexc -> Energy threshold under which different peaks are merged (eV) (default=0.01) maxexc -> Energy threshold after which excitons are not read anymore (eV) (default=8.0) text -> Skips writing the .dat file (default: True) draw -> Skips drawing (plotting) the abs spectra (default: True) """ #find the save folder lat = YamboSaveDB.from_db_file(os.path.join(folder,'SAVE')) #find all ndb.BS_diago_Q01 files in the folder io = OrderedDict() for root, dirs, files in os.walk(folder): #get starting name of folder basename = os.path.basename(root) #look into folders starting with var or reference if any( [basename.startswith(v) for v in [var,'reference']] ): for filename in files: if filename != 'ndb.BS_diago_Q01': continue #get ndb.BS_diago_Q01 file in folder io[basename] = ( YamboIn.from_file(folder=folder,filename="%s.in"%basename), YamboExcitonDB.from_db_file(lat,folder=root,filename=filename) ) #TODO consistency check exciton_energies = [] exciton_spectras = [] for basename, (inp,out) in io.items(): #get input value, unit = inp[var] #get exiton energies exciton_energy = out.eigenvalues.real #get excitonic spectra exciton_spectra = out.get_chi() #check type of variable if isinstance(value,list): value = value[1] exciton_energies.append([value,exciton_energy]) exciton_spectras.append([value,exciton_spectra]) exciton_spectras = sorted(exciton_spectras,key=lambda x: x[0]) exciton_energies = sorted(exciton_energies,key=lambda x: x[0]) #save a file with the exciton eneergies output_folder = 'analyse_%s'%folder if not os.path.isdir(output_folder): os.mkdir(output_folder) output_file = '%s_exciton_energies.dat'%var with open(os.path.join(output_folder,output_file),'w') as f: header = "%s (%s)\n"%(var,unit) if unit else "%s\n"%var f.write(header) for value,energies in exciton_energies: f.write("{} ".format(value)+("%10.6lf "*numbexc)%tuple(energies[:numbexc])+"\n") import matplotlib.pyplot as plt ## Exciton spectra plots filename = 'exciton_spectra.png' fig = plt.figure(figsize=(6,5)) ax = fig.add_subplot(1,1,1) #plot the spectra cmap = plt.get_cmap('viridis') nspectra = len(exciton_spectras) for i,(value,(w,spectra)) in enumerate(exciton_spectras): plt.plot(w,spectra.imag,c=cmap(i/nspectra),label="{} = {} {}".format(var,value,unit)) ## Spectra plots ax.set_xlabel('$\omega$ (eV)') ax.set_ylabel('Im($\epsilon_M$)') ax.legend(frameon=False) output_file = '%s_exciton_spectra.pdf'%var fig.savefig(os.path.join(output_folder,output_file)) if draw: plt.show() # # by Fulvio Paleari & Henrique Miranda # def merge_qp(output,files,verbose=False): """ Merge the quasiparticle databases produced by yambo """ #read all the files and display main info in each of them print("=========input=========") filenames = [ f.name for f in files] datasets = [ Dataset(filename) for filename in filenames] QP_table, QP_kpts, QP_E_E0_Z = [], [], [] for d,filename in zip(datasets,filenames): _, nkpoints, nqps, _, nstrings = list(map(int,d['PARS'][:])) print("filename: ", filename) if verbose: print("description:") for i in range(1,nstrings+1): print(''.join(d['DESC_strings_%05d'%i][0])) else: print("description:", ''.join(d['DESC_strings_%05d'%(nstrings)][0])) print() QP_table.append( d['QP_table'][:].T ) QP_kpts.append( d['QP_kpts'][:].T ) QP_E_E0_Z.append( d['QP_E_Eo_Z'][:] ) # create the QP_table QP_table_save = np.vstack(QP_table) # create the kpoints table #create a list with the bigger size of QP_table nkpoints = int(max(QP_table_save[:,2])) QP_kpts_save = np.zeros([nkpoints,3]) #iterate over the QP's and store the corresponding kpoint for qp_file,kpts in zip(QP_table,QP_kpts): #iterate over the kpoints and save the coordinates on the list for qp in qp_file: n1,n2,nk = list(map(int,qp)) QP_kpts_save[nk-1] = kpts[nk-1] # create the QPs energies table QP_E_E0_Z_save = np.concatenate(QP_E_E0_Z,axis=1) #create reference file from one of the files netcdf_format = datasets[0].data_model fin = datasets[0] fout = Dataset(output,'w',format=netcdf_format) variables_update = ['QP_table', 'QP_kpts', 'QP_E_Eo_Z'] variables_save = [QP_table_save.T, QP_kpts_save.T, QP_E_E0_Z_save] variables_dict = dict(list(zip(variables_update,variables_save))) PARS_save = fin['PARS'][:] PARS_save[1:3] = nkpoints,len(QP_table_save) #create the description string kmin,kmax = np.amin(QP_table_save[:,2]),np.amax(QP_table_save[:,2]) bmin,bmax = np.amin(QP_table_save[:,1]),np.amax(QP_table_save[:,1]) description = "QP @ K %03d - %03d : b %03d - %03d"%(kmin,kmax,bmin,bmax) description_save = np.array([i for i in " %s"%description]) #output data print("========output=========") print("filename: ", output) print("description: ", description) #copy dimensions for dname, the_dim in list(fin.dimensions.items()): fout.createDimension(dname, len(the_dim) if not the_dim.isunlimited() else None) #get dimensions def dimensions(array): return tuple([ 'D_%010d'%d for d in array.shape ]) #create missing dimensions for v in variables_save: for dname,d in zip( dimensions(v),v.shape ): if dname not in list(fout.dimensions.keys()): fout.createDimension(dname, d) #copy variables for v_name, varin in list(fin.variables.items()): if v_name in variables_update: #get the variable merged = variables_dict[v_name] # create the variable outVar = fout.createVariable(v_name, varin.datatype, dimensions(merged)) # Copy variable attributes outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()}) #save outvar outVar[:] = merged else: # create the variable outVar = fout.createVariable(v_name, varin.datatype, varin.dimensions) # Copy variable attributes outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()}) if v_name=='PARS': outVar[:] = PARS_save[:] elif v_name=='DESC_strings_%05d'%(nstrings): outVar[:] = varin[:] outVar[:,:len(description_save)] = description_save.T else: outVar[:] = varin[:] fout.close() # # by Alexandre Morlet, Fulvio Paleari & Henrique Miranda # def add_qp(output,add=[],substract=[],addimg=[],verbose=False): """ Add quasiparticle lifetimes from multiple files """ # Define filenames addf=[f.name for f in add] subf=[f.name for f in substract] addimgf=[f.name for f in addimg] filenames = addf+subf+addimgf if len(filenames) is 0: raise ValueError('No files passed to function.') # Init empty lists and dics sizes=[] # contains the various 'PARS' QP_table, QP_kpts, QP_E_E0_Z = {},{},{} # read value for each file qpdic = {} # used to calculate the final E (real part) qpdici = {} # used to calculate the final E (img part) # Read the files datasets = [ Dataset(filename) for filename in filenames] print("\n Reading input files\n") for d,f in zip(datasets,filenames): print("filename: %s"%f) # read sizes _, nkpoints, nqps, _, nstrings = list(map(int,d['PARS'][:])) sizes.append((f,(nkpoints,nqps,nstrings))) # Check if the number of kpoints is consistent # (Don't forget to break symmetries on every file for RT) if nkpoints!=sizes[0][1][0]: raise ValueError('File %s does not have the same number of kpoints'%f) # printing the description string # (breaking the symmetries doesn't update the descr) if verbose: print("description:") for i in range(1,nstrings+1): print(''.join(d['DESC_strings_%05d'%i][0])) else: print("description:", ''.join(d['DESC_strings_%05d'%(nstrings)][0])) # fill dictionaries with data for all files QP_table[f] = d['QP_table'][:].T QP_kpts[f] = d['QP_kpts'][:].T QP_E_E0_Z[f]= d['QP_E_Eo_Z'][:] # Init qpdic & qpdici (going through each file in case the number of bands is different) # For qpdici, we assume Im(Eo)=0 for (n1,n2,k),(E,Eo,Z) in zip(QP_table[f],QP_E_E0_Z[f][0]): qpdic[(n1,n2,k)]=Eo qpdici[(n1,n2,k)]=0 print("Number of k points: %s\n"%nkpoints) # keys are sorted in the order yambo usually writes DBs qpkeys = sorted(list(qpdic.keys()),key=itemgetter(2,1)) # For E, [0,:,:] is real part and [1,:,:] is img part QP_E_E0_Z_save = np.zeros((2,len(qpkeys),3)) QP_table_save = np.zeros((len(qpkeys),3)) # create and init the QPs energies table # The E0 is simply written in the real part (is 0 in the img part) # and Z = 1 (since we merge different calculation types) for i,(n1,n2,k) in enumerate(qpkeys): QP_E_E0_Z_save[0,i,1] = qpdic[(n1,n2,k)] QP_E_E0_Z_save[0,:,2] = 1 QP_E_E0_Z_save[1,:,1] = 0 QP_E_E0_Z_save[1,:,2] = 1 # Add corrections in real part (-a files) for f in addf: print('Add E corr for real part : %s'%f) for (n1,n2,k),(E,Eo,Z) in zip(QP_table[f],QP_E_E0_Z[f][0]): qpdic[(n1,n2,k)]+=E-Eo # Sub corrections in real part (-s files) for f in subf: print('Sub E corr for real part : %s'%f) for (n1,n2,k),(E,Eo,Z) in zip(QP_table[f],QP_E_E0_Z[f][0]): qpdic[(n1,n2,k)]-=E-Eo # Add corrections in img part (-ai files) for f in addimgf: print('Add E corr for img part : %s'%f) for (n1,n2,k),(E,Eo,Z) in zip(QP_table[f],QP_E_E0_Z[f][1]): qpdici[(n1,n2,k)]+=E-Eo # create the kpoints table # We put the restriction to have the same number of k points (same grid), so any file fits QP_kpts_save = QP_kpts[filenames[0]] # Filling the E column for i,(n1,n2,k) in enumerate(qpkeys): QP_table_save[i]=[n1,n2,k] QP_E_E0_Z_save[0,i,0]+=qpdic[(n1,n2,k)] QP_E_E0_Z_save[1,i,0]+=qpdici[(n1,n2,k)] ## Output file #create reference file from one of the files netcdf_format = datasets[0].data_model fin = datasets[0] fout = Dataset(output,'w',format=netcdf_format) variables_update = ['QP_table', 'QP_kpts', 'QP_E_Eo_Z'] variables_save = [QP_table_save.T, QP_kpts_save.T, QP_E_E0_Z_save] variables_dict = dict(list(zip(variables_update,variables_save))) PARS_save = fin['PARS'][:] PARS_save[1:3] = sizes[0][1][0],len(QP_table_save) #create the description string kmin,kmax = np.amin(QP_table_save[:,2]),np.amax(QP_table_save[:,2]) bmin,bmax = np.amin(QP_table_save[:,1]),np.amax(QP_table_save[:,1]) description = "QP @ K %03d - %03d : b %03d - %03d"%(kmin,kmax,bmin,bmax) description_save = np.array([i for i in " %s"%description]) #output data print("\n Producing output file\n") print("filename: ", output) print("description: ", description) #copy dimensions for dname, the_dim in list(fin.dimensions.items()): fout.createDimension(dname, len(the_dim) if not the_dim.isunlimited() else None) #get dimensions def dimensions(array): return tuple([ 'D_%010d'%d for d in array.shape ]) #create missing dimensions for v in variables_save: for dname,d in zip( dimensions(v),v.shape ): if dname not in list(fout.dimensions.keys()): fout.createDimension(dname, d) #copy variables for v_name, varin in list(fin.variables.items()): if v_name in variables_update: #get the variable merged = variables_dict[v_name] # create the variable outVar = fout.createVariable(v_name, varin.datatype, dimensions(merged)) # Copy variable attributes outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()}) #save outvar outVar[:] = merged else: # create the variable outVar = fout.createVariable(v_name, varin.datatype, varin.dimensions) # Copy variable attributes outVar.setncatts({k: varin.getncattr(k) for k in varin.ncattrs()}) if v_name=='PARS': outVar[:] = PARS_save[:] elif v_name=='DESC_strings_%05d'%(nstrings): outVar[:] = varin[:] outVar[:,:len(description_save)] = description_save.T else: outVar[:] = varin[:] fout.close() # # by Henrique Miranda # def plot_excitons(filename,cut=0.2,size=20): from math import ceil, sqrt def get_var(dictionary,variables): """ To have compatibility with different versions of yambo We provide a list of different possible tags """ for var in variables: if var in dictionary: return dictionary[var] raise ValueError( 'Could not find the variables %s in the output file'%str(variables) ) # # read file # f = open(filename) data = json.load(f) f.close() # # plot the absorption spectra # nexcitons = len(data['excitons']) print("nexitons", nexcitons) plt.plot(get_var(data,['E/ev','E/ev[1]']), get_var(data,['EPS-Im[2]' ]),label='BSE',lw=2) plt.plot(get_var(data,['E/ev','E/ev[1]']), get_var(data,['EPSo-Im[4]']),label='IP',lw=2) for n,exciton in enumerate(data['excitons']): plt.axvline(exciton['energy']) plt.xlabel('$\\omega$ (eV)') plt.ylabel('Intensity arb. units') plt.legend(frameon=False) plt.draw() # # plot excitons # #dimensions nx = int(ceil(sqrt(nexcitons))) ny = int(ceil(nexcitons*1.0/nx)) print("cols:",nx) print("rows:",ny) cmap = plt.get_cmap("gist_heat_r") fig = plt.figure(figsize=(nx*3,ny*3)) sorted_excitons = sorted(data['excitons'],key=lambda x: x['energy']) for n,exciton in enumerate(sorted_excitons): #get data w = np.array(exciton['weights']) qpt = np.array(exciton['qpts']) #plot ax = plt.subplot(ny,nx,n+1) ax.scatter(qpt[:,0], qpt[:,1], s=size, c=w, marker='H', cmap=cmap, lw=0, label="%5.2lf (eV)"%exciton['energy']) ax.text(-cut*.9,-cut*.9,"%5.2lf (eV)"%exciton['energy']) # axis plt.xlim([-cut,cut]) plt.ylim([-cut,cut]) ax.yaxis.set_major_locator(plt.NullLocator()) ax.xaxis.set_major_locator(plt.NullLocator()) ax.set_aspect('equal') plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.01, hspace=0.01) #remove extension from file figure_filename = os.path.splitext(filename)[0] plt.savefig('%s.png'%figure_filename)<|fim▁end|>
def analyse_bse(folder,var,numbexc,intexc,degenexc,maxexc,text,draw,verbose=False): """
<|file_name|>test_auth.py<|end_file_name|><|fim▁begin|>from __future__ import absolute_import from sentry.testutils import AcceptanceTestCase class AuthTest(AcceptanceTestCase): def enter_auth(self, username, password): # disable captcha as it makes these tests flakey (and requires waiting # on external resources) with self.settings(RECAPTCHA_PUBLIC_KEY=None): self.browser.get('/auth/login/') self.browser.find_element_by_id('id_username').send_keys(username) self.browser.find_element_by_id('id_password').send_keys(password) self.browser.find_element_by_xpath("//button[contains(text(), 'Login')]").click() def test_renders(self): self.browser.get('/auth/login/') self.browser.snapshot(name='login') def test_no_credentials(self): self.enter_auth('', '') self.browser.snapshot(name='login fields required') <|fim▁hole|> self.enter_auth('bad-username', 'bad-username') self.browser.snapshot(name='login fields invalid') def test_success(self): email = 'dummy@example.com' password = 'dummy' user = self.create_user(email=email) user.set_password(password) user.save() self.enter_auth(email, password) self.browser.snapshot(name='login success')<|fim▁end|>
def test_invalid_credentials(self):
<|file_name|>main.ts<|end_file_name|><|fim▁begin|><|fim▁hole|>enableProdMode(); platformBrowser().bootstrapModuleFactory(AppModuleNgFactory);<|fim▁end|>
import { platformBrowser } from '@angular/platform-browser'; import { enableProdMode } from '@angular/core'; import { AppModuleNgFactory } from '../../../temp/app/navigationbar/keyboardnavigation/app.module.ngfactory';
<|file_name|>skip_logic.ts<|end_file_name|><|fim▁begin|>import {Registry} from "../src/registry"; import {expect} from "chai"; import {SkipLogic} from "../src/skip_logic"; import {MemoryFile} from "../src/files/memory_file"; import {Config} from "../src/config"; describe("Skip logic", () => { it("normal interface, no skip", async () => { const abap = ` INTERFACE zif_bar PUBLIC. ENDINTERFACE.`; const file = new MemoryFile("zif_bar.ingf.abap", abap); const reg = new Registry().addFile(file); await reg.parseAsync(); expect(reg.getObjectCount()).to.equal(1); expect(new SkipLogic(reg).skip(reg.getFirstObject()!)).to.equal(false); }); it("isGeneratedGatewayClass, true", async () => { const abap = "class /ABC/CL_Z_ABAPGIT_TEST_MPC definition\n" + " public\n" + " inheriting from /IWBEP/CL_MGW_PUSH_ABS_MODEL\n" + " create public .\n" + "public section.\n" + "protected section.\n" + "private section.\n" +<|fim▁hole|> const file = new MemoryFile("#abc#cl_z_abapgit_test_mpc.clas.abap", abap); const reg = new Registry().addFile(file); await reg.parseAsync(); expect(reg.getObjectCount()).to.equal(1); expect(new SkipLogic(reg).skip(reg.getFirstObject()!)).to.equal(true); }); it("interface, isGeneratedProxy", async () => { const abap = "INTERFACE zif_foobar PUBLIC.\n" + " METHODS method1 IMPORTING foo TYPE i.\n" + "ENDINTERFACE."; const reg = new Registry().addFile(new MemoryFile("zif_foobar.intf.abap", abap)); const xml = "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n" + "<abapGit version=\"v1.0.0\" serializer=\"LCL_OBJECT_INTF\" serializer_version=\"v1.0.0\">\n" + "<asx:abap xmlns:asx=\"http://www.sap.com/abapxml\" version=\"1.0\">\n" + "<asx:values>\n" + "<VSEOINTERF>\n" + "<CLSNAME>ZIF_FOOBAR</CLSNAME>\n" + "<LANGU>E</LANGU>\n" + "<DESCRIPT>Proxy Interface (generated)</DESCRIPT>\n" + "<EXPOSURE>2</EXPOSURE>\n" + "<STATE>1</STATE>\n" + "<UNICODE>X</UNICODE>\n" + "<CLSPROXY>X</CLSPROXY>\n" + "</VSEOINTERF>\n" + "</asx:values>\n" + "</asx:abap>\n" + "</abapGit>"; reg.addFile(new MemoryFile("zif_foobar.intf.xml", xml)); await reg.parseAsync(); expect(reg.getObjectCount()).to.equal(1); expect(new SkipLogic(reg).skip(reg.getFirstObject()!)).to.equal(true); }); it("generated BOPF constants interface", async () => { const abap = ` INTERFACE zif_tt_i_projects_c PUBLIC. INTERFACES /bobf/if_lib_constants . ENDINTERFACE.`; const file = new MemoryFile("zif_tt_i_projects_c.intf.abap", abap); const reg = new Registry().addFile(file); await reg.parseAsync(); expect(reg.getObjectCount()).to.equal(1); expect(new SkipLogic(reg).skip(reg.getFirstObject()!)).to.equal(true); }); it("generated DPC classes generated for a published CDS", async () => { const abap = ` class ZCL_ZTT_C_PROJECTS definition public inheriting from CL_SADL_GTK_EXPOSURE_MPC final create public. ENDCLASS. CLASS ZCL_ZTT_C_PROJECTS IMPLEMENTATION. ENDCLASS. `; const file = new MemoryFile("zcl_ztt_c_projects.clas.abap", abap); const reg = new Registry().addFile(file); await reg.parseAsync(); expect(reg.getObjectCount()).to.equal(1); expect(new SkipLogic(reg).skip(reg.getFirstObject()!)).to.equal(true); }); it("skipIncludesWithoutMain, skip", async () => { const reg = new Registry(); const config = reg.getConfig().get(); config.global.skipIncludesWithoutMain = true; reg.setConfig(new Config(JSON.stringify(config))); const abap = `write bar.`; reg.addFile(new MemoryFile("zprog.prog.abap", abap)); const xml = ` <?xml version="1.0" encoding="utf-8"?> <abapGit version="v1.0.0" serializer="LCL_OBJECT_PROG" serializer_version="v1.0.0"> <asx:abap xmlns:asx="http://www.sap.com/abapxml" version="1.0"> <asx:values> <PROGDIR> <NAME>ZPROG</NAME> <SUBC>I</SUBC> <RLOAD>E</RLOAD> <UCCHECK>X</UCCHECK> </PROGDIR> </asx:values> </asx:abap> </abapGit>`; reg.addFile(new MemoryFile("zprog.prog.xml", xml)); await reg.parseAsync(); expect(reg.getObjectCount()).to.equal(1); expect(new SkipLogic(reg).skip(reg.getFirstObject()!)).to.equal(true); }); it("skipIncludesWithoutMain, not an include, do not skip", async () => { const reg = new Registry(); const config = reg.getConfig().get(); config.global.skipIncludesWithoutMain = true; reg.setConfig(new Config(JSON.stringify(config))); const abap = `write bar.`; reg.addFile(new MemoryFile("zprog.prog.abap", abap)); await reg.parseAsync(); expect(reg.getObjectCount()).to.equal(1); expect(new SkipLogic(reg).skip(reg.getFirstObject()!)).to.equal(false); }); });<|fim▁end|>
"ENDCLASS.\n" + "CLASS /ABC/CL_Z_ABAPGIT_TEST_MPC IMPLEMENTATION.\n" + "ENDCLASS.";
<|file_name|>test_driver_copy.py<|end_file_name|><|fim▁begin|>import pytest from tests.utils.targetdriver import TargetDriver, if_feature from tests.utils.testdriver import TestDriver def get_services(): return TargetDriver('rep1'), TestDriver('rep2') @pytest.fixture(autouse=True) def _(module_launcher_launch): pass @if_feature.copy_file_from_onitu def test_driver_copy_from_onitu(module_launcher):<|fim▁hole|> module_launcher.copy_file('default', 'copy1', 100, d_test, d_target) @if_feature.copy_file_to_onitu def test_driver_copy_to_onitu(module_launcher): d_target, d_test = module_launcher.get_services('rep1', 'rep2') module_launcher.copy_file('default', 'copy2', 100, d_target, d_test)<|fim▁end|>
d_target, d_test = module_launcher.get_services('rep1', 'rep2')
<|file_name|>image.go<|end_file_name|><|fim▁begin|>package images // import "github.com/tiborvass/docker/daemon/images" import ( "fmt" "github.com/pkg/errors" "github.com/docker/distribution/reference" "github.com/tiborvass/docker/errdefs" "github.com/tiborvass/docker/image" specs "github.com/opencontainers/image-spec/specs-go/v1" ) // ErrImageDoesNotExist is error returned when no image can be found for a reference. type ErrImageDoesNotExist struct { ref reference.Reference } func (e ErrImageDoesNotExist) Error() string { ref := e.ref if named, ok := ref.(reference.Named); ok { ref = reference.TagNameOnly(named) } return fmt.Sprintf("No such image: %s", reference.FamiliarString(ref)) } // NotFound implements the NotFound interface func (e ErrImageDoesNotExist) NotFound() {} // GetImage returns an image corresponding to the image referred to by refOrID. func (i *ImageService) GetImage(refOrID string, platform *specs.Platform) (retImg *image.Image, retErr error) { defer func() { if retErr != nil || retImg == nil || platform == nil { return } // This allows us to tell clients that we don't have the image they asked for // Where this gets hairy is the image store does not currently support multi-arch images, e.g.: // An image `foo` may have a multi-arch manifest, but the image store only fetches the image for a specific platform // The image store does not store the manifest list and image tags are assigned to architecture specific images. // So we can have a `foo` image that is amd64 but the user requested armv7. If the user looks at the list of images. // This may be confusing. // The alternative to this is to return a errdefs.Conflict error with a helpful message, but clients will not be // able to automatically tell what causes the conflict.<|fim▁hole|> if retImg.OS != platform.OS { retErr = errdefs.NotFound(errors.Errorf("image with reference %s was found but does not match the specified OS platform: wanted: %s, actual: %s", refOrID, platform.OS, retImg.OS)) retImg = nil return } if retImg.Architecture != platform.Architecture { retErr = errdefs.NotFound(errors.Errorf("image with reference %s was found but does not match the specified platform cpu architecture: wanted: %s, actual: %s", refOrID, platform.Architecture, retImg.Architecture)) retImg = nil return } // Only validate variant if retImg has a variant set. // The image variant may not be set since it's a newer field. if platform.Variant != "" && retImg.Variant != "" && retImg.Variant != platform.Variant { retErr = errdefs.NotFound(errors.Errorf("image with reference %s was found but does not match the specified platform cpu architecture variant: wanted: %s, actual: %s", refOrID, platform.Variant, retImg.Variant)) retImg = nil return } }() ref, err := reference.ParseAnyReference(refOrID) if err != nil { return nil, errdefs.InvalidParameter(err) } namedRef, ok := ref.(reference.Named) if !ok { digested, ok := ref.(reference.Digested) if !ok { return nil, ErrImageDoesNotExist{ref} } id := image.IDFromDigest(digested.Digest()) if img, err := i.imageStore.Get(id); err == nil { return img, nil } return nil, ErrImageDoesNotExist{ref} } if digest, err := i.referenceStore.Get(namedRef); err == nil { // Search the image stores to get the operating system, defaulting to host OS. id := image.IDFromDigest(digest) if img, err := i.imageStore.Get(id); err == nil { return img, nil } } // Search based on ID if id, err := i.imageStore.Search(refOrID); err == nil { img, err := i.imageStore.Get(id) if err != nil { return nil, ErrImageDoesNotExist{ref} } return img, nil } return nil, ErrImageDoesNotExist{ref} }<|fim▁end|>
<|file_name|>call.go<|end_file_name|><|fim▁begin|>package introspect import ( "encoding/xml" "strings" "github.com/godbus/dbus/v5" ) // Call calls org.freedesktop.Introspectable.Introspect on a remote object // and returns the introspection data. func Call(o dbus.BusObject) (*Node, error) { var xmldata string var node Node err := o.Call("org.freedesktop.DBus.Introspectable.Introspect", 0).Store(&xmldata) if err != nil { return nil, err<|fim▁hole|> if err != nil { return nil, err } if node.Name == "" { node.Name = string(o.Path()) } return &node, nil }<|fim▁end|>
} err = xml.NewDecoder(strings.NewReader(xmldata)).Decode(&node)
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>// Copyright 2015 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. //! Traits, helpers, and type definitions for core I/O functionality. //! //! > **NOTE**: This module is very much a work in progress and is under active //! > development. At this time it is still recommended to use the `old_io` //! > module while the details of this module shake out. #![unstable(feature = "io", reason = "this new I/O module is still under active development and \ APIs are subject to tweaks fairly regularly")] use cmp; use unicode::str as core_str; use error::Error as StdError; use fmt; use iter::Iterator; use marker::Sized; use ops::{Drop, FnOnce}; use option::Option::{self, Some, None}; use ptr::PtrExt; use result::Result::{Ok, Err}; use result; use slice::{self, SliceExt}; use string::String; use str::{self, StrExt}; use vec::Vec; pub use self::buffered::{BufReader, BufWriter, BufStream, LineWriter}; pub use self::buffered::IntoInnerError; pub use self::cursor::Cursor; pub use self::error::{Result, Error, ErrorKind}; pub use self::util::{copy, sink, Sink, empty, Empty, repeat, Repeat}; pub use self::stdio::{stdin, stdout, stderr, Stdin, Stdout, Stderr}; pub use self::stdio::{StdoutLock, StderrLock, StdinLock}; #[macro_use] mod lazy; pub mod prelude; mod buffered; mod cursor; mod error; mod impls; mod util; mod stdio; const DEFAULT_BUF_SIZE: usize = 64 * 1024; // Acquires a slice of the vector `v` from its length to its capacity // (uninitialized data), reads into it, and then updates the length. // // This function is leveraged to efficiently read some bytes into a destination // vector without extra copying and taking advantage of the space that's already // in `v`. // // The buffer we're passing down, however, is pointing at uninitialized data // (the end of a `Vec`), and many operations will be *much* faster if we don't // have to zero it out. In order to prevent LLVM from generating an `undef` // value when reads happen from this uninitialized memory, we force LLVM to // think it's initialized by sending it through a black box. This should prevent // actual undefined behavior after optimizations. fn with_end_to_cap<F>(v: &mut Vec<u8>, f: F) -> Result<usize> where F: FnOnce(&mut [u8]) -> Result<usize> { unsafe { let n = try!(f({ let base = v.as_mut_ptr().offset(v.len() as isize); black_box(slice::from_raw_parts_mut(base, v.capacity() - v.len())) })); // If the closure (typically a `read` implementation) reported that it // read a larger number of bytes than the vector actually has, we need // to be sure to clamp the vector to at most its capacity. let new_len = cmp::min(v.capacity(), v.len() + n); v.set_len(new_len); return Ok(n); } // Semi-hack used to prevent LLVM from retaining any assumptions about // `dummy` over this function call unsafe fn black_box<T>(mut dummy: T) -> T { asm!("" :: "r"(&mut dummy) : "memory"); dummy } } // A few methods below (read_to_string, read_line) will append data into a // `String` buffer, but we need to be pretty careful when doing this. The // implementation will just call `.as_mut_vec()` and then delegate to a // byte-oriented reading method, but we must ensure that when returning we never // leave `buf` in a state such that it contains invalid UTF-8 in its bounds. // // To this end, we use an RAII guard (to protect against panics) which updates // the length of the string when it is dropped. This guard initially truncates // the string to the prior length and only after we've validated that the // new contents are valid UTF-8 do we allow it to set a longer length. // // The unsafety in this function is twofold: // // 1. We're looking at the raw bytes of `buf`, so we take on the burden of UTF-8 // checks. // 2. We're passing a raw buffer to the function `f`, and it is expected that // the function only *appends* bytes to the buffer. We'll get undefined // behavior if existing bytes are overwritten to have non-UTF-8 data. fn append_to_string<F>(buf: &mut String, f: F) -> Result<()> where F: FnOnce(&mut Vec<u8>) -> Result<()> { struct Guard<'a> { s: &'a mut Vec<u8>, len: usize } #[unsafe_destructor] impl<'a> Drop for Guard<'a> { fn drop(&mut self) { unsafe { self.s.set_len(self.len); } } } unsafe { let mut g = Guard { len: buf.len(), s: buf.as_mut_vec() }; let ret = f(g.s); if str::from_utf8(&g.s[g.len..]).is_err() { ret.and_then(|()| { Err(Error::new(ErrorKind::InvalidInput, "stream did not contain valid UTF-8", None)) }) } else { g.len = g.s.len(); ret } } } fn read_to_end<R: Read + ?Sized>(r: &mut R, buf: &mut Vec<u8>) -> Result<()> { loop { if buf.capacity() == buf.len() { buf.reserve(DEFAULT_BUF_SIZE); } match with_end_to_cap(buf, |b| r.read(b)) { Ok(0) => return Ok(()), Ok(_) => {} Err(ref e) if e.kind() == ErrorKind::Interrupted => {} Err(e) => return Err(e), } } } /// A trait for objects which are byte-oriented sources. /// /// Readers are defined by one method, `read`. Each call to `read` will attempt /// to pull bytes from this source into a provided buffer. /// /// Readers are intended to be composable with one another. Many objects /// throughout the I/O and related libraries take and provide types which /// implement the `Read` trait. pub trait Read { /// Pull some bytes from this source into the specified buffer, returning /// how many bytes were read. /// /// This function does not provide any guarantees about whether it blocks /// waiting for data, but if an object needs to block for a read but cannot /// it will typically signal this via an `Err` return value. /// /// If the return value of this method is `Ok(n)`, then it must be /// guaranteed that `0 <= n <= buf.len()`. A nonzero `n` value indicates /// that the buffer `buf` has ben filled in with `n` bytes of data from this /// source. If `n` is `0`, then it can indicate one of two scenarios: /// /// 1. This reader has reached its "end of file" and will likely no longer /// be able to produce bytes. Note that this does not mean that the /// reader will *always* no longer be able to produce bytes. /// 2. The buffer specified was 0 bytes in length. /// /// No guarantees are provided about the contents of `buf` when this /// function is called, implementations cannot rely on any property of the /// contents of `buf` being true. It is recommended that implementations /// only write data to `buf` instead of reading its contents. /// /// # Errors /// /// If this function encounters any form of I/O or other error, an error /// variant will be returned. If an error is returned then it must be /// guaranteed that no bytes were read. fn read(&mut self, buf: &mut [u8]) -> Result<usize>; /// Read all bytes until EOF in this source, placing them into `buf`. /// /// All bytes read from this source will be appended to the specified buffer /// `buf`. This function will return a call to `read` either: /// /// 1. Returns `Ok(0)`. /// 2. Returns an error which is not of the kind `ErrorKind::Interrupted`. /// /// Until one of these conditions is met the function will continuously /// invoke `read` to append more data to `buf`. /// /// # Errors /// /// If this function encounters an error of the kind /// `ErrorKind::Interrupted` then the error is ignored and the operation /// will continue. /// /// If any other read error is encountered then this function immediately /// returns. Any bytes which have already been read will be appended to /// `buf`. fn read_to_end(&mut self, buf: &mut Vec<u8>) -> Result<()> { read_to_end(self, buf) } /// Read all bytes until EOF in this source, placing them into `buf`. /// /// # Errors /// /// If the data in this stream is *not* valid UTF-8 then an error is /// returned and `buf` is unchanged. /// /// See `read_to_end` for other error semantics. fn read_to_string(&mut self, buf: &mut String) -> Result<()> { // Note that we do *not* call `.read_to_end()` here. We are passing // `&mut Vec<u8>` (the raw contents of `buf`) into the `read_to_end` // method to fill it up. An arbitrary implementation could overwrite the // entire contents of the vector, not just append to it (which is what // we are expecting). // // To prevent extraneously checking the UTF-8-ness of the entire buffer // we pass it to our hardcoded `read_to_end` implementation which we // know is guaranteed to only read data into the end of the buffer. append_to_string(buf, |b| read_to_end(self, b)) } } /// Extension methods for all instances of `Read`, typically imported through /// `std::io::prelude::*`. #[unstable(feature = "io", reason = "may merge into the Read trait")] pub trait ReadExt: Read + Sized { /// Create a "by reference" adaptor for this instance of `Read`. /// /// The returned adaptor also implements `Read` and will simply borrow this /// current reader. #[stable(feature = "rust1", since = "1.0.0")] fn by_ref(&mut self) -> &mut Self { self } /// Transform this `Read` instance to an `Iterator` over its bytes. /// /// The returned type implements `Iterator` where the `Item` is `Result<u8, /// R::Err>`. The yielded item is `Ok` if a byte was successfully read and /// `Err` otherwise for I/O errors. EOF is mapped to returning `None` from /// this iterator. #[stable(feature = "rust1", since = "1.0.0")] fn bytes(self) -> Bytes<Self> { Bytes { inner: self } } /// Transform this `Read` instance to an `Iterator` over `char`s. /// /// This adaptor will attempt to interpret this reader as an UTF-8 encoded /// sequence of characters. The returned iterator will return `None` once /// EOF is reached for this reader. Otherwise each element yielded will be a /// `Result<char, E>` where `E` may contain information about what I/O error /// occurred or where decoding failed. /// /// Currently this adaptor will discard intermediate data read, and should /// be avoided if this is not desired. #[unstable(feature = "io", reason = "the semantics of a partial read/write \ of where errors happen is currently \ unclear and may change")] fn chars(self) -> Chars<Self> { Chars { inner: self } } /// Create an adaptor which will chain this stream with another. /// /// The returned `Read` instance will first read all bytes from this object /// until EOF is encountered. Afterwards the output is equivalent to the /// output of `next`. #[stable(feature = "rust1", since = "1.0.0")] fn chain<R: Read>(self, next: R) -> Chain<Self, R> { Chain { first: self, second: next, done_first: false } } /// Create an adaptor which will read at most `limit` bytes from it. /// /// This function returns a new instance of `Read` which will read at most /// `limit` bytes, after which it will always return EOF (`Ok(0)`). Any /// read errors will not count towards the number of bytes read and future /// calls to `read` may succeed. #[stable(feature = "rust1", since = "1.0.0")] fn take(self, limit: u64) -> Take<Self> { Take { inner: self, limit: limit } } /// Creates a reader adaptor which will write all read data into the given /// output stream. /// /// Whenever the returned `Read` instance is read it will write the read /// data to `out`. The current semantics of this implementation imply that /// a `write` error will not report how much data was initially read. #[unstable(feature = "io", reason = "the semantics of a partial read/write \ of where errors happen is currently \ unclear and may change")] fn tee<W: Write>(self, out: W) -> Tee<Self, W> { Tee { reader: self, writer: out } } } impl<T: Read> ReadExt for T {} /// A trait for objects which are byte-oriented sinks. /// /// The `write` method will attempt to write some data into the object, /// returning how many bytes were successfully written. /// /// The `flush` method is useful for adaptors and explicit buffers themselves /// for ensuring that all buffered data has been pushed out to the "true sink". /// /// Writers are intended to be composable with one another. Many objects /// throughout the I/O and related libraries take and provide types which /// implement the `Write` trait. pub trait Write { /// Write a buffer into this object, returning how many bytes were written. /// /// This function will attempt to write the entire contents of `buf`, but /// the entire write may not succeed, or the write may also generate an /// error. A call to `write` represents *at most one* attempt to write to /// any wrapped object. /// /// Calls to `write` are not guaranteed to block waiting for data to be /// written, and a write which would otherwise block can indicated through /// an `Err` variant. /// /// If the return value is `Ok(n)` then it must be guaranteed that /// `0 <= n <= buf.len()`. A return value of `0` typically means that the /// underlying object is no longer able to accept bytes and will likely not /// be able to in the future as well, or that the buffer provided is empty. /// /// # Errors /// /// Each call to `write` may generate an I/O error indicating that the /// operation could not be completed. If an error is returned then no bytes /// in the buffer were written to this writer. /// /// It is **not** considered an error if the entire buffer could not be /// written to this writer. fn write(&mut self, buf: &[u8]) -> Result<usize>; /// Flush this output stream, ensuring that all intermediately buffered /// contents reach their destination. /// /// # Errors /// /// It is considered an error if not all bytes could be written due to /// I/O errors or EOF being reached. fn flush(&mut self) -> Result<()>; /// Attempts to write an entire buffer into this write. /// /// This method will continuously call `write` while there is more data to /// write. This method will not return until the entire buffer has been /// successfully written or an error occurs. The first error generated from /// this method will be returned. /// /// # Errors /// /// This function will return the first error that `write` returns. fn write_all(&mut self, mut buf: &[u8]) -> Result<()> { while buf.len() > 0 { match self.write(buf) { Ok(0) => return Err(Error::new(ErrorKind::WriteZero, "failed to write whole buffer", None)), Ok(n) => buf = &buf[n..], Err(ref e) if e.kind() == ErrorKind::Interrupted => {} Err(e) => return Err(e), } } Ok(()) } /// Writes a formatted string into this writer, returning any error /// encountered. /// /// This method is primarily used to interface with the `format_args!` /// macro, but it is rare that this should explicitly be called. The /// `write!` macro should be favored to invoke this method instead. /// /// This function internally uses the `write_all` method on this trait and /// hence will continuously write data so long as no errors are received. /// This also means that partial writes are not indicated in this signature. /// /// # Errors /// /// This function will return any I/O error reported while formatting. fn write_fmt(&mut self, fmt: fmt::Arguments) -> Result<()> { // Create a shim which translates a Write to a fmt::Write and saves // off I/O errors. instead of discarding them struct Adaptor<'a, T: ?Sized + 'a> { inner: &'a mut T, error: Result<()>, } impl<'a, T: Write + ?Sized> fmt::Write for Adaptor<'a, T> { fn write_str(&mut self, s: &str) -> fmt::Result { match self.inner.write_all(s.as_bytes()) { Ok(()) => Ok(()), Err(e) => { self.error = Err(e); Err(fmt::Error) } } } } let mut output = Adaptor { inner: self, error: Ok(()) }; match fmt::write(&mut output, fmt) { Ok(()) => Ok(()), Err(..) => output.error } } } /// Extension methods for all instances of `Write`, typically imported through /// `std::io::prelude::*`. #[unstable(feature = "io", reason = "may merge into the Read trait")] pub trait WriteExt: Write + Sized { /// Create a "by reference" adaptor for this instance of `Write`. /// /// The returned adaptor also implements `Write` and will simply borrow this /// current writer. #[stable(feature = "rust1", since = "1.0.0")] fn by_ref(&mut self) -> &mut Self { self } /// Creates a new writer which will write all data to both this writer and /// another writer. /// /// All data written to the returned writer will both be written to `self` /// as well as `other`. Note that the error semantics of the current /// implementation do not precisely track where errors happen. For example /// an error on the second call to `write` will not report that the first /// call to `write` succeeded. #[unstable(feature = "io", reason = "the semantics of a partial read/write \ of where errors happen is currently \ unclear and may change")] fn broadcast<W: Write>(self, other: W) -> Broadcast<Self, W> { Broadcast { first: self, second: other } } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: Write> WriteExt for T {} /// An object implementing `Seek` internally has some form of cursor which can /// be moved within a stream of bytes. /// /// The stream typically has a fixed size, allowing seeking relative to either /// end or the current offset. pub trait Seek { /// Seek to an offset, in bytes, in a stream /// /// A seek beyond the end of a stream is allowed, but seeking before offset /// 0 is an error. /// /// The behavior when seeking past the end of the stream is implementation /// defined. /// /// This method returns the new position within the stream if the seek /// operation completed successfully. /// /// # Errors /// /// Seeking to a negative offset is considered an error fn seek(&mut self, pos: SeekFrom) -> Result<u64>; } /// Enumeration of possible methods to seek within an I/O object. #[derive(Copy, PartialEq, Eq, Clone, Debug)] pub enum SeekFrom { /// Set the offset to the provided number of bytes. Start(u64), /// Set the offset to the size of this object plus the specified number of /// bytes. /// /// It is possible to seek beyond the end of an object, but is an error to /// seek before byte 0. End(i64), /// Set the offset to the current position plus the specified number of /// bytes. /// /// It is possible to seek beyond the end of an object, but is an error to /// seek before byte 0. Current(i64), } fn read_until<R: BufRead + ?Sized>(r: &mut R, delim: u8, buf: &mut Vec<u8>) -> Result<()> { loop { let (done, used) = { let available = match r.fill_buf() { Ok(n) => n, Err(ref e) if e.kind() == ErrorKind::Interrupted => continue, Err(e) => return Err(e) }; match available.position_elem(&delim) { Some(i) => { buf.push_all(&available[..i + 1]); (true, i + 1) } None => { buf.push_all(available); (false, available.len()) } } }; r.consume(used); if done || used == 0 { return Ok(()); } } } /// A Buffer is a type of reader which has some form of internal buffering to /// allow certain kinds of reading operations to be more optimized than others. /// /// This type extends the `Read` trait with a few methods that are not /// possible to reasonably implement with purely a read interface. pub trait BufRead: Read { /// Fills the internal buffer of this object, returning the buffer contents. /// /// None of the contents will be "read" in the sense that later calling /// `read` may return the same contents. /// /// The `consume` function must be called with the number of bytes that are /// consumed from this buffer returned to ensure that the bytes are never /// returned twice. /// /// An empty buffer returned indicates that the stream has reached EOF. /// /// # Errors /// /// This function will return an I/O error if the underlying reader was /// read, but returned an error. fn fill_buf(&mut self) -> Result<&[u8]>; /// Tells this buffer that `amt` bytes have been consumed from the buffer, /// so they should no longer be returned in calls to `read`. fn consume(&mut self, amt: usize); /// Read all bytes until the delimiter `byte` is reached. /// /// This function will continue to read (and buffer) bytes from the /// underlying stream until the delimiter or EOF is found. Once found, all /// bytes up to, and including, the delimiter (if found) will be appended to /// `buf`. /// /// If this buffered reader is currently at EOF, then this function will not /// place any more bytes into `buf` and will return `Ok(())`. /// /// # Errors /// /// This function will ignore all instances of `ErrorKind::Interrupted` and /// will otherwise return any errors returned by `fill_buf`. /// /// If an I/O error is encountered then all bytes read so far will be /// present in `buf` and its length will have been adjusted appropriately. fn read_until(&mut self, byte: u8, buf: &mut Vec<u8>) -> Result<()> { read_until(self, byte, buf) } /// Read all bytes until a newline byte (the 0xA byte) is reached. /// /// This function will continue to read (and buffer) bytes from the /// underlying stream until the newline delimiter (the 0xA byte) or EOF is /// found. Once found, all bytes up to, and including, the delimiter (if /// found) will be appended to `buf`. /// /// If this reader is currently at EOF then this function will not modify /// `buf` and will return `Ok(())`. /// /// # Errors /// /// This function has the same error semantics as `read_until` and will also /// return an error if the read bytes are not valid UTF-8. If an I/O error /// is encountered then `buf` may contain some bytes already read in the /// event that all data read so far was valid UTF-8. fn read_line(&mut self, buf: &mut String) -> Result<()> { // Note that we are not calling the `.read_until` method here, but // rather our hardcoded implementation. For more details as to why, see // the comments in `read_to_end`. append_to_string(buf, |b| read_until(self, b'\n', b)) } } /// Extension methods for all instances of `BufRead`, typically imported through /// `std::io::prelude::*`. pub trait BufReadExt: BufRead + Sized { /// Returns an iterator over the contents of this reader split on the byte /// `byte`. /// /// The iterator returned from this function will return instances of /// `io::Result<Vec<u8>>`. Each vector returned will *not* have the /// delimiter byte at the end. /// /// This function will yield errors whenever `read_until` would have also /// yielded an error. #[unstable(feature = "io", reason = "may be renamed to not conflict with \ SliceExt::split")] fn split(self, byte: u8) -> Split<Self> { Split { buf: self, delim: byte } } /// Returns an iterator over the lines of this reader. /// /// The iterator returned from this function will yield instances of /// `io::Result<String>`. Each string returned will *not* have a newline /// byte (the 0xA byte) at the end. /// /// This function will yield errors whenever `read_string` would have also /// yielded an error. #[stable(feature = "rust1", since = "1.0.0")] fn lines(self) -> Lines<Self> { Lines { buf: self } } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: BufRead> BufReadExt for T {} /// A `Write` adaptor which will write data to multiple locations. /// /// For more information, see `WriteExt::broadcast`. pub struct Broadcast<T, U> { first: T, second: U, } impl<T: Write, U: Write> Write for Broadcast<T, U> { fn write(&mut self, data: &[u8]) -> Result<usize> { let n = try!(self.first.write(data)); // FIXME: what if the write fails? (we wrote something) try!(self.second.write_all(&data[..n])); Ok(n) } fn flush(&mut self) -> Result<()> { self.first.flush().and(self.second.flush()) } } /// Adaptor to chain together two instances of `Read`. /// /// For more information, see `ReadExt::chain`. #[stable(feature = "rust1", since = "1.0.0")] pub struct Chain<T, U> { first: T, second: U, done_first: bool, } #[stable(feature = "rust1", since = "1.0.0")] impl<T: Read, U: Read> Read for Chain<T, U> { fn read(&mut self, buf: &mut [u8]) -> Result<usize> { if !self.done_first { match try!(self.first.read(buf)) { 0 => { self.done_first = true; } n => return Ok(n), } } self.second.read(buf) } } /// Reader adaptor which limits the bytes read from an underlying reader. /// /// For more information, see `ReadExt::take`. #[stable(feature = "rust1", since = "1.0.0")] pub struct Take<T> { inner: T, limit: u64, } #[stable(feature = "rust1", since = "1.0.0")] impl<T> Take<T> { /// Returns the number of bytes that can be read before this instance will /// return EOF. /// /// # Note /// /// This instance may reach EOF after reading fewer bytes than indicated by /// this method if the underlying `Read` instance reaches EOF. #[stable(feature = "rust1", since = "1.0.0")] pub fn limit(&self) -> u64 { self.limit } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: Read> Read for Take<T> { fn read(&mut self, buf: &mut [u8]) -> Result<usize> { // Don't call into inner reader at all at EOF because it may still block if self.limit == 0 { return Ok(0); } let max = cmp::min(buf.len() as u64, self.limit) as usize; let n = try!(self.inner.read(&mut buf[..max])); self.limit -= n as u64; Ok(n) } } #[stable(feature = "rust1", since = "1.0.0")] impl<T: BufRead> BufRead for Take<T> { fn fill_buf(&mut self) -> Result<&[u8]> { let buf = try!(self.inner.fill_buf()); let cap = cmp::min(buf.len() as u64, self.limit) as usize; Ok(&buf[..cap]) } fn consume(&mut self, amt: usize) { // Don't let callers reset the limit by passing an overlarge value let amt = cmp::min(amt as u64, self.limit) as usize; self.limit -= amt as u64; self.inner.consume(amt); } } /// An adaptor which will emit all read data to a specified writer as well. /// /// For more information see `ReadExt::tee` pub struct Tee<R, W> { reader: R, writer: W, } impl<R: Read, W: Write> Read for Tee<R, W> { fn read(&mut self, buf: &mut [u8]) -> Result<usize> { let n = try!(self.reader.read(buf)); // FIXME: what if the write fails? (we read something) try!(self.writer.write_all(&buf[..n])); Ok(n) } } /// A bridge from implementations of `Read` to an `Iterator` of `u8`. /// /// See `ReadExt::bytes` for more information. #[stable(feature = "rust1", since = "1.0.0")] pub struct Bytes<R> { inner: R, } #[stable(feature = "rust1", since = "1.0.0")] impl<R: Read> Iterator for Bytes<R> { type Item = Result<u8>; fn next(&mut self) -> Option<Result<u8>> { let mut buf = [0]; match self.inner.read(&mut buf) { Ok(0) => None, Ok(..) => Some(Ok(buf[0])), Err(e) => Some(Err(e)), } } } /// A bridge from implementations of `Read` to an `Iterator` of `char`. /// /// See `ReadExt::chars` for more information. pub struct Chars<R> { inner: R, } /// An enumeration of possible errors that can be generated from the `Chars` /// adapter. #[derive(PartialEq, Clone, Debug)] pub enum CharsError { /// Variant representing that the underlying stream was read successfully /// but it did not contain valid utf8 data. NotUtf8, /// Variant representing that an I/O error occurred. Other(Error), } impl<R: Read> Iterator for Chars<R> { type Item = result::Result<char, CharsError>; fn next(&mut self) -> Option<result::Result<char, CharsError>> { let mut buf = [0]; let first_byte = match self.inner.read(&mut buf) { Ok(0) => return None, Ok(..) => buf[0], Err(e) => return Some(Err(CharsError::Other(e))), }; let width = core_str::utf8_char_width(first_byte); if width == 1 { return Some(Ok(first_byte as char)) } if width == 0 { return Some(Err(CharsError::NotUtf8)) } let mut buf = [first_byte, 0, 0, 0]; { let mut start = 1; while start < width { match self.inner.read(&mut buf[start..width]) { Ok(0) => return Some(Err(CharsError::NotUtf8)), Ok(n) => start += n, Err(e) => return Some(Err(CharsError::Other(e))), } } } Some(match str::from_utf8(&buf[..width]).ok() { Some(s) => Ok(s.char_at(0)), None => Err(CharsError::NotUtf8), }) } } impl StdError for CharsError { fn description(&self) -> &str { match *self { CharsError::NotUtf8 => "invalid utf8 encoding", CharsError::Other(ref e) => e.description(), } } fn cause(&self) -> Option<&StdError> { match *self { CharsError::NotUtf8 => None, CharsError::Other(ref e) => e.cause(), } } } impl fmt::Display for CharsError { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { CharsError::NotUtf8 => { "byte stream did not contain valid utf8".fmt(f) } CharsError::Other(ref e) => e.fmt(f), } } } /// An iterator over the contents of an instance of `BufRead` split on a /// particular byte. /// /// See `BufReadExt::split` for more information. pub struct Split<B> { buf: B, delim: u8, } impl<B: BufRead> Iterator for Split<B> { type Item = Result<Vec<u8>>; fn next(&mut self) -> Option<Result<Vec<u8>>> { let mut buf = Vec::new(); match self.buf.read_until(self.delim, &mut buf) { Ok(()) if buf.len() == 0 => None, Ok(()) => { if buf[buf.len() - 1] == self.delim { buf.pop(); } Some(Ok(buf)) } Err(e) => Some(Err(e)) } } }<|fim▁hole|> /// An iterator over the lines of an instance of `BufRead` split on a newline /// byte. /// /// See `BufReadExt::lines` for more information. #[stable(feature = "rust1", since = "1.0.0")] pub struct Lines<B> { buf: B, } #[stable(feature = "rust1", since = "1.0.0")] impl<B: BufRead> Iterator for Lines<B> { type Item = Result<String>; fn next(&mut self) -> Option<Result<String>> { let mut buf = String::new(); match self.buf.read_line(&mut buf) { Ok(()) if buf.len() == 0 => None, Ok(()) => { if buf.ends_with("\n") { buf.pop(); } Some(Ok(buf)) } Err(e) => Some(Err(e)) } } } #[cfg(test)] mod tests { use prelude::v1::*; use io::prelude::*; use io; use super::Cursor; #[test] fn read_until() { let mut buf = Cursor::new(b"12"); let mut v = Vec::new(); assert_eq!(buf.read_until(b'3', &mut v), Ok(())); assert_eq!(v, b"12"); let mut buf = Cursor::new(b"1233"); let mut v = Vec::new(); assert_eq!(buf.read_until(b'3', &mut v), Ok(())); assert_eq!(v, b"123"); v.truncate(0); assert_eq!(buf.read_until(b'3', &mut v), Ok(())); assert_eq!(v, b"3"); v.truncate(0); assert_eq!(buf.read_until(b'3', &mut v), Ok(())); assert_eq!(v, []); } #[test] fn split() { let buf = Cursor::new(b"12"); let mut s = buf.split(b'3'); assert_eq!(s.next(), Some(Ok(vec![b'1', b'2']))); assert_eq!(s.next(), None); let buf = Cursor::new(b"1233"); let mut s = buf.split(b'3'); assert_eq!(s.next(), Some(Ok(vec![b'1', b'2']))); assert_eq!(s.next(), Some(Ok(vec![]))); assert_eq!(s.next(), None); } #[test] fn read_line() { let mut buf = Cursor::new(b"12"); let mut v = String::new(); assert_eq!(buf.read_line(&mut v), Ok(())); assert_eq!(v, "12"); let mut buf = Cursor::new(b"12\n\n"); let mut v = String::new(); assert_eq!(buf.read_line(&mut v), Ok(())); assert_eq!(v, "12\n"); v.truncate(0); assert_eq!(buf.read_line(&mut v), Ok(())); assert_eq!(v, "\n"); v.truncate(0); assert_eq!(buf.read_line(&mut v), Ok(())); assert_eq!(v, ""); } #[test] fn lines() { let buf = Cursor::new(b"12"); let mut s = buf.lines(); assert_eq!(s.next(), Some(Ok("12".to_string()))); assert_eq!(s.next(), None); let buf = Cursor::new(b"12\n\n"); let mut s = buf.lines(); assert_eq!(s.next(), Some(Ok("12".to_string()))); assert_eq!(s.next(), Some(Ok(String::new()))); assert_eq!(s.next(), None); } #[test] fn read_to_end() { let mut c = Cursor::new(b""); let mut v = Vec::new(); assert_eq!(c.read_to_end(&mut v), Ok(())); assert_eq!(v, []); let mut c = Cursor::new(b"1"); let mut v = Vec::new(); assert_eq!(c.read_to_end(&mut v), Ok(())); assert_eq!(v, b"1"); } #[test] fn read_to_string() { let mut c = Cursor::new(b""); let mut v = String::new(); assert_eq!(c.read_to_string(&mut v), Ok(())); assert_eq!(v, ""); let mut c = Cursor::new(b"1"); let mut v = String::new(); assert_eq!(c.read_to_string(&mut v), Ok(())); assert_eq!(v, "1"); let mut c = Cursor::new(b"\xff"); let mut v = String::new(); assert!(c.read_to_string(&mut v).is_err()); } #[test] fn take_eof() { struct R; impl Read for R { fn read(&mut self, _: &mut [u8]) -> io::Result<usize> { Err(io::Error::new(io::ErrorKind::Other, "", None)) } } let mut buf = [0; 1]; assert_eq!(Ok(0), R.take(0).read(&mut buf)); } }<|fim▁end|>
<|file_name|>ex14.py<|end_file_name|><|fim▁begin|>from sys import argv script, user_name = argv prompt = '> ' print "Hi %s, I'm the %s script." % (user_name, script) print "I'd like to ask you a few questions." print "Do you like me %s?" % user_name likes = raw_input(prompt) print "Where do you live %s?" % user_name lives = raw_input(prompt) <|fim▁hole|>Alright, so you said %r about liking me. You live in %r. Not sure where that is. And you have a %r computer. Nice. """ % (likes, lives, computer)<|fim▁end|>
print "What kind of computer do you have?" computer = raw_input(prompt) print """
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>use crate::context::Context; use crate::error::RenderError; use crate::json::value::ScopedJson; use crate::output::Output; use crate::registry::Registry; use crate::render::{do_escape, Helper, RenderContext}; pub use self::helper_each::EACH_HELPER; pub use self::helper_if::{IF_HELPER, UNLESS_HELPER}; pub use self::helper_log::LOG_HELPER; pub use self::helper_lookup::LOOKUP_HELPER; pub use self::helper_raw::RAW_HELPER; pub use self::helper_with::WITH_HELPER; /// A type alias for `Result<(), RenderError>` pub type HelperResult = Result<(), RenderError>; /// Helper Definition /// /// Implement `HelperDef` to create custom helpers. You can retrieve useful information from these arguments. /// /// * `&Helper`: current helper template information, contains name, params, hashes and nested template /// * `&Registry`: the global registry, you can find templates by name from registry /// * `&Context`: the whole data to render, in most case you can use data from `Helper` /// * `&mut RenderContext`: you can access data or modify variables (starts with @)/partials in render context, for example, @index of #each. See its document for detail. /// * `&mut dyn Output`: where you write output to /// /// By default, you can use a bare function as a helper definition because we have supported unboxed_closure. If you have stateful or configurable helper, you can create a struct to implement `HelperDef`. /// /// ## Define an inline helper /// /// ``` /// use handlebars::*; /// /// fn upper(h: &Helper<'_, '_>, _: &Handlebars<'_>, _: &Context, rc: /// &mut RenderContext<'_, '_>, out: &mut dyn Output) /// -> HelperResult { /// // get parameter from helper or throw an error /// let param = h.param(0).and_then(|v| v.value().as_str()).unwrap_or(""); /// out.write(param.to_uppercase().as_ref())?; /// Ok(()) /// } /// ``` /// /// ## Define block helper /// /// Block helper is like `#if` or `#each` which has a inner template and an optional *inverse* template (the template in else branch). You can access the inner template by `helper.template()` and `helper.inverse()`. In most cases you will just call `render` on it. /// /// ``` /// use handlebars::*; /// /// fn dummy_block<'reg, 'rc>( /// h: &Helper<'reg, 'rc>, /// r: &'reg Handlebars<'reg>, /// ctx: &'rc Context, /// rc: &mut RenderContext<'reg, 'rc>, /// out: &mut dyn Output, /// ) -> HelperResult { /// h.template() /// .map(|t| t.render(r, ctx, rc, out)) /// .unwrap_or(Ok(())) /// } /// ``` /// /// ## Define helper function using macro /// /// In most cases you just need some simple function to call from templates. We have a `handlebars_helper!` macro to simplify the job. /// /// ``` /// use handlebars::*; /// /// handlebars_helper!(plus: |x: i64, y: i64| x + y); /// /// let mut hbs = Handlebars::new(); /// hbs.register_helper("plus", Box::new(plus)); /// ``` /// pub trait HelperDef { /// A simplified api to define helper /// /// To implement your own `call_inner`, you will return a new `ScopedJson` /// which has a JSON value computed from current context. /// /// ### Calling from subexpression /// /// When calling the helper as a subexpression, the value and its type can /// be received by upper level helpers. /// /// Note that the value can be `json!(null)` which is treated as `false` in /// helpers like `if` and rendered as empty string. fn call_inner<'reg: 'rc, 'rc>( &self, _: &Helper<'reg, 'rc>, _: &'reg Registry<'reg>, _: &'rc Context, _: &mut RenderContext<'reg, 'rc>, ) -> Result<ScopedJson<'reg, 'rc>, RenderError> { Err(RenderError::unimplemented()) } /// A complex version of helper interface. /// /// This function offers `Output`, which you can write custom string into /// and render child template. Helpers like `if` and `each` are implemented /// with this. Because the data written into `Output` are typically without /// type information. So helpers defined by this function are not composable. /// /// ### Calling from subexpression /// /// Although helpers defined by this are not composable, when called from /// subexpression, handlebars tries to parse the string output as JSON to /// re-build its type. This can be buggy with numrical and other literal values. /// So it is not recommended to use these helpers in subexpression. fn call<'reg: 'rc, 'rc>( &self, h: &Helper<'reg, 'rc>, r: &'reg Registry<'reg>, ctx: &'rc Context, rc: &mut RenderContext<'reg, 'rc>, out: &mut dyn Output, ) -> HelperResult { match self.call_inner(h, r, ctx, rc) { Ok(result) => { if r.strict_mode() && result.is_missing() { Err(RenderError::strict_error(None)) } else { // auto escape according to settings let output = do_escape(r, rc, result.render()); out.write(output.as_ref())?; Ok(()) } } Err(e) => { if e.is_unimplemented() { // default implementation, do nothing Ok(()) } else { Err(e) } } } } } /// implement HelperDef for bare function so we can use function as helper impl< F: for<'reg, 'rc> Fn( &Helper<'reg, 'rc>, &'reg Registry<'reg>, &'rc Context, &mut RenderContext<'reg, 'rc>, &mut dyn Output, ) -> HelperResult, > HelperDef for F { fn call<'reg: 'rc, 'rc>( &self, h: &Helper<'reg, 'rc>, r: &'reg Registry<'reg>, ctx: &'rc Context, rc: &mut RenderContext<'reg, 'rc>, out: &mut dyn Output, ) -> HelperResult { (*self)(h, r, ctx, rc, out) } } mod block_util; mod helper_each; pub(crate) mod helper_extras; mod helper_if; mod helper_log; mod helper_lookup; mod helper_raw; mod helper_with; #[cfg(feature = "script_helper")] pub(crate) mod scripting; // pub type HelperDef = for <'a, 'b, 'c> Fn<(&'a Context, &'b Helper, &'b Registry, &'c mut RenderContext), Result<String, RenderError>>; // // pub fn helper_dummy (ctx: &Context, h: &Helper, r: &Registry, rc: &mut RenderContext) -> Result<String, RenderError> { // h.template().unwrap().render(ctx, r, rc).unwrap() // } // #[cfg(test)] mod test { use std::collections::BTreeMap; use crate::context::Context; use crate::error::RenderError; use crate::helpers::HelperDef; use crate::json::value::JsonRender; use crate::output::Output; use crate::registry::Registry; use crate::render::{Helper, RenderContext, Renderable}; #[derive(Clone, Copy)] struct MetaHelper; impl HelperDef for MetaHelper { fn call<'reg: 'rc, 'rc>( &self, h: &Helper<'reg, 'rc>, r: &'reg Registry<'reg>, ctx: &'rc Context, rc: &mut RenderContext<'reg, 'rc>, out: &mut dyn Output, ) -> Result<(), RenderError> { let v = h.param(0).unwrap(); if !h.is_block() { let output = format!("{}:{}", h.name(), v.value().render()); out.write(output.as_ref())?; } else { let output = format!("{}:{}", h.name(), v.value().render()); out.write(output.as_ref())?; out.write("->")?; h.template().unwrap().render(r, ctx, rc, out)?; }; Ok(()) } } #[test] fn test_meta_helper() { let mut handlebars = Registry::new(); assert!(handlebars .register_template_string("t0", "{{foo this}}") .is_ok()); assert!(handlebars .register_template_string("t1", "{{#bar this}}nice{{/bar}}") .is_ok()); <|fim▁hole|> let meta_helper = MetaHelper; handlebars.register_helper("helperMissing", Box::new(meta_helper)); handlebars.register_helper("blockHelperMissing", Box::new(meta_helper)); let r0 = handlebars.render("t0", &true); assert_eq!(r0.ok().unwrap(), "foo:true".to_string()); let r1 = handlebars.render("t1", &true); assert_eq!(r1.ok().unwrap(), "bar:true->nice".to_string()); } #[test] fn test_helper_for_subexpression() { let mut handlebars = Registry::new(); assert!(handlebars .register_template_string("t2", "{{foo value=(bar 0)}}") .is_ok()); handlebars.register_helper( "helperMissing", Box::new( |h: &Helper<'_, '_>, _: &Registry<'_>, _: &Context, _: &mut RenderContext<'_, '_>, out: &mut dyn Output| -> Result<(), RenderError> { let output = format!("{}{}", h.name(), h.param(0).unwrap().value()); out.write(output.as_ref())?; Ok(()) }, ), ); handlebars.register_helper( "foo", Box::new( |h: &Helper<'_, '_>, _: &Registry<'_>, _: &Context, _: &mut RenderContext<'_, '_>, out: &mut dyn Output| -> Result<(), RenderError> { let output = format!("{}", h.hash_get("value").unwrap().value().render()); out.write(output.as_ref())?; Ok(()) }, ), ); let mut data = BTreeMap::new(); // handlebars should never try to lookup this value because // subexpressions are now resolved as string literal data.insert("bar0".to_string(), true); let r2 = handlebars.render("t2", &data); assert_eq!(r2.ok().unwrap(), "bar0".to_string()); } }<|fim▁end|>
<|file_name|>roles.py<|end_file_name|><|fim▁begin|>""" Classes used to model the roles used in the courseware. Each role is responsible for checking membership, adding users, removing users, and listing members """ import logging from abc import ABCMeta, abstractmethod from collections import defaultdict <|fim▁hole|>from openedx.core.lib.cache_utils import get_cache from student.models import CourseAccessRole log = logging.getLogger(__name__) # A list of registered access roles. REGISTERED_ACCESS_ROLES = {} def register_access_role(cls): """ Decorator that allows access roles to be registered within the roles module and referenced by their string values. Assumes that the decorated class has a "ROLE" attribute, defining its type. """ try: role_name = cls.ROLE REGISTERED_ACCESS_ROLES[role_name] = cls except AttributeError: log.exception(u"Unable to register Access Role with attribute 'ROLE'.") return cls class BulkRoleCache(object): CACHE_NAMESPACE = u"student.roles.BulkRoleCache" CACHE_KEY = u'roles_by_user' @classmethod def prefetch(cls, users): roles_by_user = defaultdict(set) get_cache(cls.CACHE_NAMESPACE)[cls.CACHE_KEY] = roles_by_user for role in CourseAccessRole.objects.filter(user__in=users).select_related('user'): roles_by_user[role.user.id].add(role) users_without_roles = filter(lambda u: u.id not in roles_by_user, users) for user in users_without_roles: roles_by_user[user.id] = set() @classmethod def get_user_roles(cls, user): return get_cache(cls.CACHE_NAMESPACE)[cls.CACHE_KEY][user.id] class RoleCache(object): """ A cache of the CourseAccessRoles held by a particular user """ def __init__(self, user): try: self._roles = BulkRoleCache.get_user_roles(user) except KeyError: self._roles = set( CourseAccessRole.objects.filter(user=user).all() ) def has_role(self, role, course_id, org): """ Return whether this RoleCache contains a role with the specified role, course_id, and org """ return any( access_role.role == role and access_role.course_id == course_id and access_role.org == org for access_role in self._roles ) class AccessRole(object): """ Object representing a role with particular access to a resource """ __metaclass__ = ABCMeta @abstractmethod def has_user(self, user): """ Return whether the supplied django user has access to this role. """ return False @abstractmethod def add_users(self, *users): """ Add the role to the supplied django users. """ pass @abstractmethod def remove_users(self, *users): """ Remove the role from the supplied django users. """ pass @abstractmethod def users_with_role(self): """ Return a django QuerySet for all of the users with this role """ return User.objects.none() class GlobalStaff(AccessRole): """ The global staff role """ def has_user(self, user): return user.is_staff def add_users(self, *users): for user in users: if user.is_authenticated and user.is_active: user.is_staff = True user.save() def remove_users(self, *users): for user in users: # don't check is_authenticated nor is_active on purpose user.is_staff = False user.save() def users_with_role(self): raise Exception("This operation is un-indexed, and shouldn't be used") class RoleBase(AccessRole): """ Roles by type (e.g., instructor, beta_user) and optionally org, course_key """ def __init__(self, role_name, org='', course_key=None): """ Create role from required role_name w/ optional org and course_key. You may just provide a role name if it's a global role (not constrained to an org or course). Provide org if constrained to an org. Provide org and course if constrained to a course. Although, you should use the subclasses for all of these. """ super(RoleBase, self).__init__() self.org = org self.course_key = course_key self._role_name = role_name # pylint: disable=arguments-differ def has_user(self, user, check_user_activation=True): """ Check if the supplied django user has access to this role. Arguments: user: user to check against access to role check_user_activation: Indicating whether or not we need to check user activation while checking user roles Return: bool identifying if user has that particular role or not """ if check_user_activation and not (user.is_authenticated and user.is_active): return False # pylint: disable=protected-access if not hasattr(user, '_roles'): # Cache a list of tuples identifying the particular roles that a user has # Stored as tuples, rather than django models, to make it cheaper to construct objects for comparison user._roles = RoleCache(user) return user._roles.has_role(self._role_name, self.course_key, self.org) def add_users(self, *users): """ Add the supplied django users to this role. """ # silently ignores anonymous and inactive users so that any that are # legit get updated. from student.models import CourseAccessRole for user in users: if user.is_authenticated and user.is_active and not self.has_user(user): entry = CourseAccessRole(user=user, role=self._role_name, course_id=self.course_key, org=self.org) entry.save() if hasattr(user, '_roles'): del user._roles def remove_users(self, *users): """ Remove the supplied django users from this role. """ entries = CourseAccessRole.objects.filter( user__in=users, role=self._role_name, org=self.org, course_id=self.course_key ) entries.delete() for user in users: if hasattr(user, '_roles'): del user._roles def users_with_role(self): """ Return a django QuerySet for all of the users with this role """ # Org roles don't query by CourseKey, so use CourseKeyField.Empty for that query if self.course_key is None: self.course_key = CourseKeyField.Empty entries = User.objects.filter( courseaccessrole__role=self._role_name, courseaccessrole__org=self.org, courseaccessrole__course_id=self.course_key ) return entries class CourseRole(RoleBase): """ A named role in a particular course """ def __init__(self, role, course_key): """ Args: course_key (CourseKey) """ super(CourseRole, self).__init__(role, course_key.org, course_key) @classmethod def course_group_already_exists(self, course_key): return CourseAccessRole.objects.filter(org=course_key.org, course_id=course_key).exists() def __repr__(self): return '<{}: course_key={}>'.format(self.__class__.__name__, self.course_key) class OrgRole(RoleBase): """ A named role in a particular org independent of course """ def __repr__(self): return '<{}>'.format(self.__class__.__name__) @register_access_role class CourseStaffRole(CourseRole): """A Staff member of a course""" ROLE = 'staff' def __init__(self, *args, **kwargs): super(CourseStaffRole, self).__init__(self.ROLE, *args, **kwargs) @register_access_role class CourseInstructorRole(CourseRole): """A course Instructor""" ROLE = 'instructor' def __init__(self, *args, **kwargs): super(CourseInstructorRole, self).__init__(self.ROLE, *args, **kwargs) @register_access_role class CourseFinanceAdminRole(CourseRole): """A course staff member with privileges to review financial data.""" ROLE = 'finance_admin' def __init__(self, *args, **kwargs): super(CourseFinanceAdminRole, self).__init__(self.ROLE, *args, **kwargs) @register_access_role class CourseSalesAdminRole(CourseRole): """A course staff member with privileges to perform sales operations. """ ROLE = 'sales_admin' def __init__(self, *args, **kwargs): super(CourseSalesAdminRole, self).__init__(self.ROLE, *args, **kwargs) @register_access_role class CourseBetaTesterRole(CourseRole): """A course Beta Tester""" ROLE = 'beta_testers' def __init__(self, *args, **kwargs): super(CourseBetaTesterRole, self).__init__(self.ROLE, *args, **kwargs) @register_access_role class LibraryUserRole(CourseRole): """ A user who can view a library and import content from it, but not edit it. Used in Studio only. """ ROLE = 'library_user' def __init__(self, *args, **kwargs): super(LibraryUserRole, self).__init__(self.ROLE, *args, **kwargs) class CourseCcxCoachRole(CourseRole): """A CCX Coach""" ROLE = 'ccx_coach' def __init__(self, *args, **kwargs): super(CourseCcxCoachRole, self).__init__(self.ROLE, *args, **kwargs) class OrgStaffRole(OrgRole): """An organization staff member""" def __init__(self, *args, **kwargs): super(OrgStaffRole, self).__init__('staff', *args, **kwargs) class OrgInstructorRole(OrgRole): """An organization instructor""" def __init__(self, *args, **kwargs): super(OrgInstructorRole, self).__init__('instructor', *args, **kwargs) class OrgLibraryUserRole(OrgRole): """ A user who can view any libraries in an org and import content from them, but not edit them. Used in Studio only. """ ROLE = LibraryUserRole.ROLE def __init__(self, *args, **kwargs): super(OrgLibraryUserRole, self).__init__(self.ROLE, *args, **kwargs) @register_access_role class CourseCreatorRole(RoleBase): """ This is the group of people who have permission to create new courses (we may want to eventually make this an org based role). """ ROLE = "course_creator_group" def __init__(self, *args, **kwargs): super(CourseCreatorRole, self).__init__(self.ROLE, *args, **kwargs) @register_access_role class SupportStaffRole(RoleBase): """ Student support team members. """ ROLE = "support" def __init__(self, *args, **kwargs): super(SupportStaffRole, self).__init__(self.ROLE, *args, **kwargs) class UserBasedRole(object): """ Backward mapping: given a user, manipulate the courses and roles """ def __init__(self, user, role): """ Create a UserBasedRole accessor: for a given user and role (e.g., "instructor") """ self.user = user self.role = role def has_course(self, course_key): """ Return whether the role's user has the configured role access to the passed course """ if not (self.user.is_authenticated and self.user.is_active): return False # pylint: disable=protected-access if not hasattr(self.user, '_roles'): self.user._roles = RoleCache(self.user) return self.user._roles.has_role(self.role, course_key, course_key.org) def add_course(self, *course_keys): """ Grant this object's user the object's role for the supplied courses """ if self.user.is_authenticated and self.user.is_active: for course_key in course_keys: entry = CourseAccessRole(user=self.user, role=self.role, course_id=course_key, org=course_key.org) entry.save() if hasattr(self.user, '_roles'): del self.user._roles else: raise ValueError("user is not active. Cannot grant access to courses") def remove_courses(self, *course_keys): """ Remove the supplied courses from this user's configured role. """ entries = CourseAccessRole.objects.filter(user=self.user, role=self.role, course_id__in=course_keys) entries.delete() if hasattr(self.user, '_roles'): del self.user._roles def courses_with_role(self): """ Return a django QuerySet for all of the courses with this user x role. You can access any of these properties on each result record: * user (will be self.user--thus uninteresting) * org * course_id * role (will be self.role--thus uninteresting) """ return CourseAccessRole.objects.filter(role=self.role, user=self.user)<|fim▁end|>
from django.contrib.auth.models import User from opaque_keys.edx.django.models import CourseKeyField
<|file_name|>default.rs<|end_file_name|><|fim▁begin|>// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. use ast::{MetaItem, Item, Expr}; use codemap::Span; use ext::base::ExtCtxt; use ext::build::AstBuilder; use ext::deriving::generic::*; use ext::deriving::generic::ty::*; use parse::token::InternedString; use std::gc::Gc; pub fn expand_deriving_default(cx: &mut ExtCtxt, span: Span, mitem: Gc<MetaItem>, item: Gc<Item>, push: |Gc<Item>|) { let inline = cx.meta_word(span, InternedString::new("inline")); let attrs = vec!(cx.attribute(span, inline)); let trait_def = TraitDef { span: span, attributes: Vec::new(), path: Path::new(vec!("std", "default", "Default")), additional_bounds: Vec::new(), generics: LifetimeBounds::empty(), methods: vec!( MethodDef { name: "default", generics: LifetimeBounds::empty(), explicit_self: None, args: Vec::new(), ret_ty: Self, attributes: attrs,<|fim▁hole|> }) }) }; trait_def.expand(cx, mitem, item, push) } fn default_substructure(cx: &mut ExtCtxt, trait_span: Span, substr: &Substructure) -> Gc<Expr> { let default_ident = vec!( cx.ident_of("std"), cx.ident_of("default"), cx.ident_of("Default"), cx.ident_of("default") ); let default_call = |span| cx.expr_call_global(span, default_ident.clone(), Vec::new()); return match *substr.fields { StaticStruct(_, ref summary) => { match *summary { Unnamed(ref fields) => { if fields.is_empty() { cx.expr_ident(trait_span, substr.type_ident) } else { let exprs = fields.iter().map(|sp| default_call(*sp)).collect(); cx.expr_call_ident(trait_span, substr.type_ident, exprs) } } Named(ref fields) => { let default_fields = fields.iter().map(|&(ident, span)| { cx.field_imm(span, ident, default_call(span)) }).collect(); cx.expr_struct_ident(trait_span, substr.type_ident, default_fields) } } } StaticEnum(..) => { cx.span_err(trait_span, "`Default` cannot be derived for enums, only structs"); // let compilation continue cx.expr_uint(trait_span, 0) } _ => cx.span_bug(trait_span, "Non-static method in `deriving(Default)`") }; }<|fim▁end|>
const_nonmatching: false, combine_substructure: combine_substructure(|a, b, c| { default_substructure(a, b, c)
<|file_name|>volume.go<|end_file_name|><|fim▁begin|>package set import ( "encoding/json" "errors" "fmt" "io" "os" "path" "strings" "github.com/golang/glog" "github.com/spf13/cobra" kapi "k8s.io/kubernetes/pkg/api" apierrs "k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/meta" kresource "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/unversioned" kclient "k8s.io/kubernetes/pkg/client/unversioned" kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/resource" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/runtime" "github.com/openshift/origin/pkg/cmd/templates" cmdutil "github.com/openshift/origin/pkg/cmd/util" "github.com/openshift/origin/pkg/cmd/util/clientcmd" ) const ( volumePrefix = "volume-" storageAnnClass = "volume.beta.kubernetes.io/storage-class" ) var ( volumeLong = templates.LongDesc(` Update volumes on a pod template This command can add, update or remove volumes from containers for any object that has a pod template (deployment configs, replication controllers, or pods). You can list volumes in pod or any object that has a pod template. You can specify a single object or multiple, and alter volumes on all containers or just those that match a given name. If you alter a volume setting on a deployment config, a deployment will be triggered. Changing a replication controller will not affect running pods, and you cannot change a pod's volumes once it has been created. Volume types include: * emptydir (empty directory) *default*: A directory allocated when the pod is created on a local host, is removed when the pod is deleted and is not copied across servers * hostdir (host directory): A directory with specific path on any host (requires elevated privileges) * persistentvolumeclaim or pvc (persistent volume claim): Link the volume directory in the container to a persistent volume claim you have allocated by name - a persistent volume claim is a request to allocate storage. Note that if your claim hasn't been bound, your pods will not start. * secret (mounted secret): Secret volumes mount a named secret to the provided directory. For descriptions on other volume types, see https://docs.openshift.com`) volumeExample = templates.Examples(` # List volumes defined on all deployment configs in the current project %[1]s volume dc --all # Add a new empty dir volume to deployment config (dc) 'registry' mounted under # /var/lib/registry %[1]s volume dc/registry --add --mount-path=/var/lib/registry # Use an existing persistent volume claim (pvc) to overwrite an existing volume 'v1' %[1]s volume dc/registry --add --name=v1 -t pvc --claim-name=pvc1 --overwrite # Remove volume 'v1' from deployment config 'registry' %[1]s volume dc/registry --remove --name=v1 # Create a new persistent volume claim that overwrites an existing volume 'v1' %[1]s volume dc/registry --add --name=v1 -t pvc --claim-size=1G --overwrite # Change the mount point for volume 'v1' to /data %[1]s volume dc/registry --add --name=v1 -m /data --overwrite # Modify the deployment config by removing volume mount "v1" from container "c1" # (and by removing the volume "v1" if no other containers have volume mounts that reference it) %[1]s volume dc/registry --remove --name=v1 --containers=c1 # Add new volume based on a more complex volume source (Git repo, AWS EBS, GCE PD, # Ceph, Gluster, NFS, ISCSI, ...) %[1]s volume dc/registry --add -m /repo --source=<json-string>`) ) type VolumeOptions struct { DefaultNamespace string ExplicitNamespace bool Out io.Writer Err io.Writer Mapper meta.RESTMapper Typer runtime.ObjectTyper RESTClientFactory func(mapping *meta.RESTMapping) (resource.RESTClient, error) UpdatePodSpecForObject func(obj runtime.Object, fn func(*kapi.PodSpec) error) (bool, error) Client kclient.PersistentVolumeClaimsNamespacer Encoder runtime.Encoder // Resource selection Selector string All bool Filenames []string // Operations Add bool Remove bool List bool // Common optional params Name string Containers string Confirm bool Output string PrintObject func([]*resource.Info) error OutputVersion unversioned.GroupVersion // Add op params AddOpts *AddVolumeOptions } type AddVolumeOptions struct { Type string MountPath string Overwrite bool Path string ConfigMapName string SecretName string Source string CreateClaim bool ClaimName string ClaimSize string ClaimMode string ClaimClass string TypeChanged bool } func NewCmdVolume(fullName string, f *clientcmd.Factory, out, errOut io.Writer) *cobra.Command { addOpts := &AddVolumeOptions{} opts := &VolumeOptions{AddOpts: addOpts} cmd := &cobra.Command{ Use: "volumes RESOURCE/NAME --add|--remove|--list", Short: "Update volumes on a pod template", Long: volumeLong, Example: fmt.Sprintf(volumeExample, fullName), Aliases: []string{"volume"}, Run: func(cmd *cobra.Command, args []string) { addOpts.TypeChanged = cmd.Flag("type").Changed err := opts.Validate(cmd, args) if err != nil { kcmdutil.CheckErr(kcmdutil.UsageError(cmd, err.Error())) } err = opts.Complete(f, cmd, out, errOut) kcmdutil.CheckErr(err) err = opts.RunVolume(args) if err == cmdutil.ErrExit { os.Exit(1) } kcmdutil.CheckErr(err) }, } cmd.Flags().StringVarP(&opts.Selector, "selector", "l", "", "Selector (label query) to filter on") cmd.Flags().BoolVar(&opts.All, "all", false, "select all resources in the namespace of the specified resource types") cmd.Flags().StringSliceVarP(&opts.Filenames, "filename", "f", opts.Filenames, "Filename, directory, or URL to file to use to edit the resource.") cmd.Flags().BoolVar(&opts.Add, "add", false, "Add volume and/or volume mounts for containers") cmd.Flags().BoolVar(&opts.Remove, "remove", false, "Remove volume and/or volume mounts for containers") cmd.Flags().BoolVar(&opts.List, "list", false, "List volumes and volume mounts for containers") cmd.Flags().StringVar(&opts.Name, "name", "", "Name of the volume. If empty, auto generated for add operation") cmd.Flags().StringVarP(&opts.Containers, "containers", "c", "*", "The names of containers in the selected pod templates to change - may use wildcards") cmd.Flags().BoolVar(&opts.Confirm, "confirm", false, "Confirm that you really want to remove multiple volumes") cmd.Flags().StringVarP(&addOpts.Type, "type", "t", "", "Type of the volume source for add operation. Supported options: emptyDir, hostPath, secret, configmap, persistentVolumeClaim") cmd.Flags().StringVarP(&addOpts.MountPath, "mount-path", "m", "", "Mount path inside the container. Optional param for --add or --remove") cmd.Flags().BoolVar(&addOpts.Overwrite, "overwrite", false, "If true, replace existing volume source and/or volume mount for the given resource") cmd.Flags().StringVar(&addOpts.Path, "path", "", "Host path. Must be provided for hostPath volume type") cmd.Flags().StringVar(&addOpts.ConfigMapName, "configmap-name", "", "Name of the persisted config map. Must be provided for configmap volume type") cmd.Flags().StringVar(&addOpts.SecretName, "secret-name", "", "Name of the persisted secret. Must be provided for secret volume type") cmd.Flags().StringVar(&addOpts.ClaimName, "claim-name", "", "Persistent volume claim name. Must be provided for persistentVolumeClaim volume type") cmd.Flags().StringVar(&addOpts.ClaimClass, "claim-class", "", "StorageClass to use for the persistent volume claim") cmd.Flags().StringVar(&addOpts.ClaimSize, "claim-size", "", "If specified along with a persistent volume type, create a new claim with the given size in bytes. Accepts SI notation: 10, 10G, 10Gi") cmd.Flags().StringVar(&addOpts.ClaimMode, "claim-mode", "ReadWriteOnce", "Set the access mode of the claim to be created. Valid values are ReadWriteOnce (rwo), ReadWriteMany (rwm), or ReadOnlyMany (rom)") cmd.Flags().StringVar(&addOpts.Source, "source", "", "Details of volume source as json string. This can be used if the required volume type is not supported by --type option. (e.g.: '{\"gitRepo\": {\"repository\": <git-url>, \"revision\": <commit-hash>}}')") kcmdutil.AddPrinterFlags(cmd) cmd.MarkFlagFilename("filename", "yaml", "yml", "json") // deprecate --list option cmd.Flags().MarkDeprecated("list", "Volumes and volume mounts can be listed by providing a resource with no additional options.") return cmd } func (v *VolumeOptions) Validate(cmd *cobra.Command, args []string) error { if len(v.Selector) > 0 { if _, err := labels.Parse(v.Selector); err != nil { return errors.New("--selector=<selector> must be a valid label selector") } if v.All { return errors.New("you may specify either --selector or --all but not both") } } if len(v.Filenames) == 0 && len(args) < 1 { return errors.New("provide one or more resources to add, list, or delete volumes on as TYPE/NAME") } numOps := 0 if v.Add { numOps++ } if v.Remove { numOps++ } if v.List { numOps++ } switch { case numOps == 0: v.List = true case numOps > 1: return errors.New("you may only specify one operation at a time") } output := kcmdutil.GetFlagString(cmd, "output") if v.List && len(output) > 0 { return errors.New("--list and --output may not be specified together") } err := v.AddOpts.Validate(v.Add) if err != nil { return err } // Removing all volumes for the resource type needs confirmation if v.Remove && len(v.Name) == 0 && !v.Confirm { return errors.New("must provide --confirm for removing more than one volume") } return nil } func (a *AddVolumeOptions) Validate(isAddOp bool) error { if isAddOp { if len(a.Type) == 0 && (len(a.ClaimName) > 0 || len(a.ClaimSize) > 0) { a.Type = "persistentvolumeclaim" a.TypeChanged = true } if len(a.Type) == 0 && (len(a.SecretName) > 0) { a.Type = "secret" a.TypeChanged = true } if len(a.Type) == 0 && (len(a.ConfigMapName) > 0) { a.Type = "configmap" a.TypeChanged = true } if len(a.Type) == 0 && (len(a.Path) > 0) { a.Type = "hostpath" a.TypeChanged = true } if len(a.Type) == 0 { a.Type = "emptydir" } if len(a.Type) == 0 && len(a.Source) == 0 { return errors.New("must provide --type or --source for --add operation") } else if a.TypeChanged && len(a.Source) > 0 { return errors.New("either specify --type or --source but not both for --add operation") } if len(a.Type) > 0 { switch strings.ToLower(a.Type) { case "emptydir": case "hostpath": if len(a.Path) == 0 { return errors.New("must provide --path for --type=hostPath") } case "secret": if len(a.SecretName) == 0 { return errors.New("must provide --secret-name for --type=secret") } case "configmap": if len(a.ConfigMapName) == 0 { return errors.New("must provide --configmap-name for --type=configmap") } case "persistentvolumeclaim", "pvc": if len(a.ClaimName) == 0 && len(a.ClaimSize) == 0 { return errors.New("must provide --claim-name or --claim-size (to create a new claim) for --type=pvc") } default: return errors.New("invalid volume type. Supported types: emptyDir, hostPath, secret, persistentVolumeClaim") } } else if len(a.Path) > 0 || len(a.SecretName) > 0 || len(a.ClaimName) > 0 { return errors.New("--path|--secret-name|--claim-name are only valid for --type option") } if len(a.Source) > 0 { var source map[string]interface{} err := json.Unmarshal([]byte(a.Source), &source) if err != nil { return err } if len(source) > 1 { return errors.New("must provide only one volume for --source") } var vs kapi.VolumeSource err = json.Unmarshal([]byte(a.Source), &vs) if err != nil { return err } } if len(a.ClaimClass) > 0 { selectedLowerType := strings.ToLower(a.Type) if selectedLowerType != "persistentvolumeclaim" && selectedLowerType != "pvc" { return errors.New("must provide --type as persistentVolumeClaim") } if len(a.ClaimSize) == 0 { return errors.New("must provide --claim-size to create new pvc with claim-class") } } } else if len(a.Source) > 0 || len(a.Path) > 0 || len(a.SecretName) > 0 || len(a.ConfigMapName) > 0 || len(a.ClaimName) > 0 || a.Overwrite { return errors.New("--type|--path|--configmap-name|--secret-name|--claim-name|--source|--overwrite are only valid for --add operation") } return nil } func (v *VolumeOptions) Complete(f *clientcmd.Factory, cmd *cobra.Command, out, errOut io.Writer) error { clientConfig, err := f.ClientConfig() if err != nil { return err } v.OutputVersion, err = kcmdutil.OutputVersion(cmd, clientConfig.GroupVersion) if err != nil { return err } _, kc, err := f.Clients() if err != nil { return err } v.Client = kc cmdNamespace, explicit, err := f.DefaultNamespace() if err != nil { return err } mapper, typer := f.Object(false) v.Output = kcmdutil.GetFlagString(cmd, "output") if len(v.Output) > 0 { v.PrintObject = func(infos []*resource.Info) error { return f.PrintResourceInfos(cmd, infos, v.Out) } } v.DefaultNamespace = cmdNamespace v.ExplicitNamespace = explicit v.Out = out v.Err = errOut v.Mapper = mapper v.Typer = typer v.RESTClientFactory = f.Factory.ClientForMapping v.UpdatePodSpecForObject = f.UpdatePodSpecForObject v.Encoder = f.JSONEncoder() // In case of volume source ignore the default volume type if len(v.AddOpts.Source) > 0 { v.AddOpts.Type = "" } if len(v.AddOpts.ClaimSize) > 0 { v.AddOpts.CreateClaim = true if len(v.AddOpts.ClaimName) == 0 { v.AddOpts.ClaimName = kapi.SimpleNameGenerator.GenerateName("pvc-") } q, err := kresource.ParseQuantity(v.AddOpts.ClaimSize) if err != nil { return fmt.Errorf("--claim-size is not valid: %v", err) } v.AddOpts.ClaimSize = q.String() } switch strings.ToLower(v.AddOpts.ClaimMode) { case strings.ToLower(string(kapi.ReadOnlyMany)), "rom": v.AddOpts.ClaimMode = string(kapi.ReadOnlyMany) case strings.ToLower(string(kapi.ReadWriteOnce)), "rwo": v.AddOpts.ClaimMode = string(kapi.ReadWriteOnce) case strings.ToLower(string(kapi.ReadWriteMany)), "rwm": v.AddOpts.ClaimMode = string(kapi.ReadWriteMany) case "": default: return errors.New("--claim-mode must be one of ReadWriteOnce (rwo), ReadWriteMany (rwm), or ReadOnlyMany (rom)") } return nil } func (v *VolumeOptions) RunVolume(args []string) error { mapper := resource.ClientMapperFunc(v.RESTClientFactory) b := resource.NewBuilder(v.Mapper, v.Typer, mapper, kapi.Codecs.UniversalDecoder()). ContinueOnError(). NamespaceParam(v.DefaultNamespace).DefaultNamespace(). FilenameParam(v.ExplicitNamespace, false, v.Filenames...). SelectorParam(v.Selector). ResourceTypeOrNameArgs(v.All, args...). Flatten() singular := false infos, err := b.Do().IntoSingular(&singular).Infos() if err != nil { return err } if v.List { listingErrors := v.printVolumes(infos) if len(listingErrors) > 0 { return cmdutil.ErrExit } return nil } updateInfos := []*resource.Info{} // if a claim should be created, generate the info we'll add to the flow if v.Add && v.AddOpts.CreateClaim { claim := v.AddOpts.createClaim() m, err := v.Mapper.RESTMapping(kapi.Kind("PersistentVolumeClaim")) if err != nil { return err } client, err := mapper.ClientForMapping(m) if err != nil { return err } info := &resource.Info{ Mapping: m, Client: client, Namespace: v.DefaultNamespace, Object: claim, } infos = append(infos, info) updateInfos = append(updateInfos, info) } patches, patchError := v.getVolumeUpdatePatches(infos, singular) if patchError != nil { return patchError } if v.PrintObject != nil { return v.PrintObject(infos) } failed := false for _, info := range updateInfos { var obj runtime.Object if len(info.ResourceVersion) == 0 { obj, err = resource.NewHelper(info.Client, info.Mapping).Create(info.Namespace, false, info.Object) } else { obj, err = resource.NewHelper(info.Client, info.Mapping).Replace(info.Namespace, info.Name, true, info.Object) } if err != nil { handlePodUpdateError(v.Err, err, "volume") failed = true continue } info.Refresh(obj, true) fmt.Fprintf(v.Out, "%s/%s\n", info.Mapping.Resource, info.Name) } for _, patch := range patches { info := patch.Info if patch.Err != nil { failed = true fmt.Fprintf(v.Err, "error: %s/%s %v\n", info.Mapping.Resource, info.Name, patch.Err) continue } if string(patch.Patch) == "{}" || len(patch.Patch) == 0 { fmt.Fprintf(v.Err, "info: %s %q was not changed\n", info.Mapping.Resource, info.Name) continue } glog.V(4).Infof("Calculated patch %s", patch.Patch) obj, err := resource.NewHelper(info.Client, info.Mapping).Patch(info.Namespace, info.Name, kapi.StrategicMergePatchType, patch.Patch) if err != nil { handlePodUpdateError(v.Err, err, "volume") failed = true continue } info.Refresh(obj, true) kcmdutil.PrintSuccess(v.Mapper, false, v.Out, info.Mapping.Resource, info.Name, false, "updated") } if failed { return cmdutil.ErrExit } return nil } func (v *VolumeOptions) getVolumeUpdatePatches(infos []*resource.Info, singular bool) ([]*Patch, error) { skipped := 0 patches := CalculatePatches(infos, v.Encoder, func(info *resource.Info) (bool, error) { transformed := false ok, err := v.UpdatePodSpecForObject(info.Object, func(spec *kapi.PodSpec) error { var e error switch { case v.Add: e = v.addVolumeToSpec(spec, info, singular) transformed = true case v.Remove: e = v.removeVolumeFromSpec(spec, info) transformed = true } return e }) if !ok { skipped++ } return transformed, err }) if singular && skipped == len(infos) { patchError := fmt.Errorf("the %s %s is not a pod or does not have a pod template", infos[0].Mapping.Resource, infos[0].Name) return patches, patchError } return patches, nil } func setVolumeSourceByType(kv *kapi.Volume, opts *AddVolumeOptions) error { switch strings.ToLower(opts.Type) { case "emptydir": kv.EmptyDir = &kapi.EmptyDirVolumeSource{} case "hostpath": kv.HostPath = &kapi.HostPathVolumeSource{ Path: opts.Path, } case "secret": kv.Secret = &kapi.SecretVolumeSource{ SecretName: opts.SecretName, } case "configmap": kv.ConfigMap = &kapi.ConfigMapVolumeSource{ LocalObjectReference: kapi.LocalObjectReference{ Name: opts.ConfigMapName, }, } case "persistentvolumeclaim", "pvc": kv.PersistentVolumeClaim = &kapi.PersistentVolumeClaimVolumeSource{ ClaimName: opts.ClaimName, } default: return fmt.Errorf("invalid volume type: %s", opts.Type) } return nil } func (v *VolumeOptions) printVolumes(infos []*resource.Info) []error { listingErrors := []error{} for _, info := range infos { _, err := v.UpdatePodSpecForObject(info.Object, func(spec *kapi.PodSpec) error { return v.listVolumeForSpec(spec, info) }) if err != nil { listingErrors = append(listingErrors, err) fmt.Fprintf(v.Err, "error: %s/%s %v\n", info.Mapping.Resource, info.Name, err) } } return listingErrors } func (v *AddVolumeOptions) createClaim() *kapi.PersistentVolumeClaim { pvc := &kapi.PersistentVolumeClaim{ ObjectMeta: kapi.ObjectMeta{ Name: v.ClaimName, }, Spec: kapi.PersistentVolumeClaimSpec{ AccessModes: []kapi.PersistentVolumeAccessMode{kapi.PersistentVolumeAccessMode(v.ClaimMode)}, Resources: kapi.ResourceRequirements{ Requests: kapi.ResourceList{ kapi.ResourceName(kapi.ResourceStorage): kresource.MustParse(v.ClaimSize), }, }, }, } if len(v.ClaimClass) > 0 { pvc.Annotations = map[string]string{ storageAnnClass: v.ClaimClass, } } return pvc } func (v *VolumeOptions) setVolumeSource(kv *kapi.Volume) error { var err error opts := v.AddOpts if len(opts.Type) > 0 { err = setVolumeSourceByType(kv, opts) } else if len(opts.Source) > 0 { err = json.Unmarshal([]byte(opts.Source), &kv.VolumeSource) } return err } func (v *VolumeOptions) setVolumeMount(spec *kapi.PodSpec, info *resource.Info) error { opts := v.AddOpts containers, _ := selectContainers(spec.Containers, v.Containers) if len(containers) == 0 && v.Containers != "*" { fmt.Fprintf(v.Err, "warning: %s/%s does not have any containers matching %q\n", info.Mapping.Resource, info.Name, v.Containers) return nil } for _, c := range containers { for _, m := range c.VolumeMounts { if path.Clean(m.MountPath) == path.Clean(opts.MountPath) && m.Name != v.Name { return fmt.Errorf("volume mount '%s' already exists for container '%s'", opts.MountPath, c.Name) } } for i, m := range c.VolumeMounts { if m.Name == v.Name { c.VolumeMounts = append(c.VolumeMounts[:i], c.VolumeMounts[i+1:]...) break } } volumeMount := &kapi.VolumeMount{ Name: v.Name, MountPath: path.Clean(opts.MountPath), } c.VolumeMounts = append(c.VolumeMounts, *volumeMount) } return nil } func (v *VolumeOptions) getVolumeName(spec *kapi.PodSpec, singleResource bool) (string, error) { opts := v.AddOpts if opts.Overwrite { // Multiple resources can have same mount-path for different volumes, // so restrict it for single resource to uniquely find the volume if !singleResource { return "", fmt.Errorf("you must specify --name for the volume name when dealing with multiple resources") } if len(opts.MountPath) > 0 { containers, _ := selectContainers(spec.Containers, v.Containers) var name string matchCount := 0 for _, c := range containers { for _, m := range c.VolumeMounts { if path.Clean(m.MountPath) == path.Clean(opts.MountPath) { name = m.Name matchCount += 1 break } } } switch matchCount { case 0: return "", fmt.Errorf("unable to find the volume for mount-path: %s", opts.MountPath) case 1: return name, nil default: return "", fmt.Errorf("found multiple volumes with same mount-path: %s", opts.MountPath) } } else { return "", fmt.Errorf("ambiguous --overwrite, specify --name or --mount-path")<|fim▁hole|> } else { // Generate volume name name := kapi.SimpleNameGenerator.GenerateName(volumePrefix) if len(v.Output) == 0 { fmt.Fprintf(v.Err, "info: Generated volume name: %s\n", name) } return name, nil } } func (v *VolumeOptions) addVolumeToSpec(spec *kapi.PodSpec, info *resource.Info, singleResource bool) error { opts := v.AddOpts if len(v.Name) == 0 { var err error v.Name, err = v.getVolumeName(spec, singleResource) if err != nil { return err } } newVolume := &kapi.Volume{ Name: v.Name, } setSource := true for i, vol := range spec.Volumes { if v.Name == vol.Name { if !opts.Overwrite { return fmt.Errorf("volume '%s' already exists. Use --overwrite to replace", v.Name) } if !opts.TypeChanged && len(opts.Source) == 0 { newVolume.VolumeSource = vol.VolumeSource setSource = false } spec.Volumes = append(spec.Volumes[:i], spec.Volumes[i+1:]...) break } } if setSource { err := v.setVolumeSource(newVolume) if err != nil { return err } } spec.Volumes = append(spec.Volumes, *newVolume) if len(opts.MountPath) > 0 { err := v.setVolumeMount(spec, info) if err != nil { return err } } return nil } func (v *VolumeOptions) removeSpecificVolume(spec *kapi.PodSpec, containers, skippedContainers []*kapi.Container) error { for _, c := range containers { for i, m := range c.VolumeMounts { if v.Name == m.Name { c.VolumeMounts = append(c.VolumeMounts[:i], c.VolumeMounts[i+1:]...) break } } } // Remove volume if no container is using it found := false for _, c := range skippedContainers { for _, m := range c.VolumeMounts { if v.Name == m.Name { found = true break } } if found { break } } if !found { foundVolume := false for i, vol := range spec.Volumes { if v.Name == vol.Name { spec.Volumes = append(spec.Volumes[:i], spec.Volumes[i+1:]...) foundVolume = true break } } if !foundVolume { return fmt.Errorf("volume '%s' not found", v.Name) } } return nil } func (v *VolumeOptions) removeVolumeFromSpec(spec *kapi.PodSpec, info *resource.Info) error { containers, skippedContainers := selectContainers(spec.Containers, v.Containers) if len(containers) == 0 && v.Containers != "*" { fmt.Fprintf(v.Err, "warning: %s/%s does not have any containers matching %q\n", info.Mapping.Resource, info.Name, v.Containers) return nil } if len(v.Name) == 0 { for _, c := range containers { c.VolumeMounts = []kapi.VolumeMount{} } spec.Volumes = []kapi.Volume{} } else { err := v.removeSpecificVolume(spec, containers, skippedContainers) if err != nil { return err } } return nil } func sourceAccessMode(readOnly bool) string { if readOnly { return " read-only" } return "" } func describePersistentVolumeClaim(claim *kapi.PersistentVolumeClaim) string { if len(claim.Spec.VolumeName) == 0 { // TODO: check for other dimensions of request - IOPs, etc if val, ok := claim.Spec.Resources.Requests[kapi.ResourceStorage]; ok { return fmt.Sprintf("waiting for %sB allocation", val.String()) } return "waiting to allocate" } // TODO: check for other dimensions of capacity? if val, ok := claim.Status.Capacity[kapi.ResourceStorage]; ok { return fmt.Sprintf("allocated %sB", val.String()) } return "allocated unknown size" } func describeVolumeSource(source *kapi.VolumeSource) string { switch { case source.AWSElasticBlockStore != nil: return fmt.Sprintf("AWS EBS %s type=%s partition=%d%s", source.AWSElasticBlockStore.VolumeID, source.AWSElasticBlockStore.FSType, source.AWSElasticBlockStore.Partition, sourceAccessMode(source.AWSElasticBlockStore.ReadOnly)) case source.EmptyDir != nil: return "empty directory" case source.GCEPersistentDisk != nil: return fmt.Sprintf("GCE PD %s type=%s partition=%d%s", source.GCEPersistentDisk.PDName, source.GCEPersistentDisk.FSType, source.GCEPersistentDisk.Partition, sourceAccessMode(source.GCEPersistentDisk.ReadOnly)) case source.GitRepo != nil: if len(source.GitRepo.Revision) == 0 { return fmt.Sprintf("Git repository %s", source.GitRepo.Repository) } return fmt.Sprintf("Git repository %s @ %s", source.GitRepo.Repository, source.GitRepo.Revision) case source.Glusterfs != nil: return fmt.Sprintf("GlusterFS %s:%s%s", source.Glusterfs.EndpointsName, source.Glusterfs.Path, sourceAccessMode(source.Glusterfs.ReadOnly)) case source.HostPath != nil: return fmt.Sprintf("host path %s", source.HostPath.Path) case source.ISCSI != nil: return fmt.Sprintf("ISCSI %s target-portal=%s type=%s lun=%d%s", source.ISCSI.IQN, source.ISCSI.TargetPortal, source.ISCSI.FSType, source.ISCSI.Lun, sourceAccessMode(source.ISCSI.ReadOnly)) case source.NFS != nil: return fmt.Sprintf("NFS %s:%s%s", source.NFS.Server, source.NFS.Path, sourceAccessMode(source.NFS.ReadOnly)) case source.PersistentVolumeClaim != nil: return fmt.Sprintf("pvc/%s%s", source.PersistentVolumeClaim.ClaimName, sourceAccessMode(source.PersistentVolumeClaim.ReadOnly)) case source.RBD != nil: return fmt.Sprintf("Ceph RBD %v type=%s image=%s pool=%s%s", source.RBD.CephMonitors, source.RBD.FSType, source.RBD.RBDImage, source.RBD.RBDPool, sourceAccessMode(source.RBD.ReadOnly)) case source.Secret != nil: return fmt.Sprintf("secret/%s", source.Secret.SecretName) default: return "unknown" } } func (v *VolumeOptions) listVolumeForSpec(spec *kapi.PodSpec, info *resource.Info) error { containers, _ := selectContainers(spec.Containers, v.Containers) if len(containers) == 0 && v.Containers != "*" { fmt.Fprintf(v.Err, "warning: %s/%s does not have any containers matching %q\n", info.Mapping.Resource, info.Name, v.Containers) return nil } fmt.Fprintf(v.Out, "%s/%s\n", info.Mapping.Resource, info.Name) checkName := (len(v.Name) > 0) found := false for _, vol := range spec.Volumes { if checkName && v.Name != vol.Name { continue } found = true refInfo := "" if vol.VolumeSource.PersistentVolumeClaim != nil { claimName := vol.VolumeSource.PersistentVolumeClaim.ClaimName claim, err := v.Client.PersistentVolumeClaims(info.Namespace).Get(claimName) switch { case err == nil: refInfo = fmt.Sprintf("(%s)", describePersistentVolumeClaim(claim)) case apierrs.IsNotFound(err): refInfo = "(does not exist)" default: fmt.Fprintf(v.Err, "error: unable to retrieve persistent volume claim %s referenced in %s/%s: %v", claimName, info.Mapping.Resource, info.Name, err) } } if len(refInfo) > 0 { refInfo = " " + refInfo } fmt.Fprintf(v.Out, " %s%s as %s\n", describeVolumeSource(&vol.VolumeSource), refInfo, vol.Name) for _, c := range containers { for _, m := range c.VolumeMounts { if vol.Name != m.Name { continue } if len(spec.Containers) == 1 { fmt.Fprintf(v.Out, " mounted at %s\n", m.MountPath) } else { fmt.Fprintf(v.Out, " mounted at %s in container %s\n", m.MountPath, c.Name) } } } } if checkName && !found { return fmt.Errorf("volume %q not found", v.Name) } return nil }<|fim▁end|>
}
<|file_name|>auth.py<|end_file_name|><|fim▁begin|>AUTHENTICATION_BACKENDS = ( # Needed to login by username in Django admin, regardless of `allauth` 'django.contrib.auth.backends.ModelBackend', # `allauth` specific authentication methods, such as login by e-mail 'allauth.account.auth_backends.AuthenticationBackend', ) LOGIN_REDIRECT_URL = 'reviews' ACCOUNT_EMAIL_REQUIRED = True ACCOUNT_EMAIL_VERIFICATION = True<|fim▁hole|>ALLOW_NEW_REGISTRATIONS = True<|fim▁end|>
ACCOUNT_LOGOUT_ON_GET = True ACCOUNT_PASSWORD_MIN_LENGTH = 10
<|file_name|>space2comment.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python """ Copyright (c) 2006-2016 sqlmap developers (http://sqlmap.org/) See the file 'doc/COPYING' for copying permission """ from lib.core.enums import PRIORITY __priority__ = PRIORITY.LOW def dependencies(): pass def tamper(payload, **kwargs): """ Replaces space character (' ') with comments '/**/' Tested against: * Microsoft SQL Server 2005 * MySQL 4, 5.0 and 5.5 * Oracle 10g * PostgreSQL 8.3, 8.4, 9.0 Notes: * Useful to bypass weak and bespoke web application firewalls >>> tamper('SELECT id FROM users') 'SELECT/**/id/**/FROM/**/users' """ retVal = payload if payload: retVal = "" quote, doublequote, firstspace = False, False, False for i in xrange(len(payload)): if not firstspace: if payload[i].isspace(): firstspace = True retVal += "/**/" continue <|fim▁hole|> elif payload[i] == '"': doublequote = not doublequote elif payload[i] == " " and not doublequote and not quote: retVal += "/**/" continue retVal += payload[i] return retVal<|fim▁end|>
elif payload[i] == '\'': quote = not quote
<|file_name|>Main.java<|end_file_name|><|fim▁begin|>/* * Author: Minho Kim (ISKU) * Date: March 3, 2018 * E-mail: minho.kim093@gmail.com * * https://github.com/ISKU/Algorithm * https://www.acmicpc.net/problem/13169 */ import java.util.*; public class Main { private static long[] array; private static int N; public static void main(String... args) {<|fim▁hole|> array = new long[N]; for (int i = 0; i < N; i++) array[i] = sc.nextInt(); System.out.print(sum(0, 0)); } private static long sum(int i, long value) { if (i == N) return value; return sum(i + 1, value + array[i]) ^ sum(i + 1, value); } }<|fim▁end|>
Scanner sc = new Scanner(System.in); N = sc.nextInt();
<|file_name|>bugsnag.js<|end_file_name|><|fim▁begin|>import { Meteor } from 'meteor/meteor'; import bugsnag from 'bugsnag'; import { settings } from '../../../settings'; import { Info } from '../../../utils'; settings.get('Bugsnag_api_key', (key, value) => { if (value) { bugsnag.register(value); } }); const notify = function(message, stack) { if (typeof stack === 'string') { message += ` ${ stack }`; } let options = {}; if (Info) { options = { app: { version: Info.version, info: Info } }; } const error = new Error(message); error.stack = stack;<|fim▁hole|>process.on('uncaughtException', Meteor.bindEnvironment((error) => { notify(error.message, error.stack); throw error; })); const originalMeteorDebug = Meteor._debug; Meteor._debug = function(...args) { notify(...args); return originalMeteorDebug(...args); };<|fim▁end|>
bugsnag.notify(error, options); };
<|file_name|>0039_remove_contest_is_external.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # Generated by Django 1.9.7 on 2016-08-09 22:56 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('judge', '0038_profile_problem_count'), ] operations = [<|fim▁hole|> migrations.RemoveField( model_name='contest', name='is_external', ), ]<|fim▁end|>
<|file_name|>test_input.py<|end_file_name|><|fim▁begin|>from .. utils import TranspileTestCase, BuiltinFunctionTestCase <|fim▁hole|>class InputTests(TranspileTestCase): pass # FIXME: This test can't run without a redirection for stdin. # class BuiltinInputFunctionTests(BuiltinFunctionTestCase, TranspileTestCase): # functions = ["input"] # not_implemented = [ # 'test_bool', # 'test_bytearray', # 'test_bytes', # 'test_class', # 'test_complex', # 'test_dict', # 'test_float', # 'test_frozenset', # 'test_int', # 'test_list', # 'test_None', # 'test_NotImplemented', # 'test_set', # 'test_str', # 'test_tuple', # ]<|fim▁end|>
<|file_name|>service.py<|end_file_name|><|fim▁begin|>from flow.commands.service import ServiceCommand from flow.configuration.inject.broker import BrokerConfiguration from flow.configuration.inject.redis_conf import RedisConfiguration from flow.configuration.inject.service_locator import ServiceLocatorConfiguration from flow.shell_command.fork.handler import ForkShellCommandMessageHandler import logging LOG = logging.getLogger(__name__) class ForkShellCommand(ServiceCommand): injector_modules = [ BrokerConfiguration, RedisConfiguration,<|fim▁hole|> def _setup(self, *args, **kwargs): self.handlers = [self.injector.get(ForkShellCommandMessageHandler)] return ServiceCommand._setup(self, *args, **kwargs)<|fim▁end|>
ServiceLocatorConfiguration, ]
<|file_name|>kbill.ts<|end_file_name|><|fim▁begin|><!DOCTYPE TS><TS> <context> <name>KBill</name> <message><|fim▁hole|> </message> <message> <source>New game</source> <translation>FOO</translation> </message> <message> <source>Pause game</source> <translation>FOO</translation> </message> <message> <source>Story of kBill</source> <translation>FOO</translation> </message> <message> <source>Rules</source> <translation>FOO</translation> </message> <message> <source>&amp;File</source> <translation>FOO</translation> </message> <message> <source>&amp;Help</source> <translation>FOO</translation> </message> <message> <source>Pause Game</source> <translation>FOO</translation> </message> <message> <source>The story of KBill</source> <translation>FOO</translation> </message> <message> <source>The rules of KBill</source> <translation>FOO</translation> </message> <message> <source>&lt;b&gt;The Rules&lt;/b&gt;&lt;p&gt;kBill has been painstakingly designed and researched in order to make it as easy to use for the whole family as it is for little Sally. Years - nay - days of beta testing and consulting with the cheapest of human interface designers have resulted in a game that is easy to use, yet nothing at all like a Macintosh.&lt;p&gt;&lt;UL&gt;&lt;LI&gt;Whack the Bills (click)&lt;/LI&gt;&lt;LI&gt;Restart the computer (click)&lt;/LI&gt;&lt;LI&gt;Pick up stolen OSes &amp; return (drag) them to their respective computers&lt;/LI&gt;&lt;LI&gt;Drag the bucket to extinguish sparks&lt;/LI&gt;&lt;LI&gt;Scoring is based on total uptime, with bonuses for killing Bills.&lt;/LI&gt;&lt;/UL&gt;&lt;P&gt;As for the rest, you can probably work it out for yourself. We did, so it can&apos;t be too hard</source> <translation>FOO</translation> </message> <message> <source>&lt;b&gt;The Story&lt;/b&gt;&lt;p&gt;Yet again, the fate of the world rests in your hands! An evil computer hacker, known only by his handle &apos;Bill&apos;, has created the ultimate computer virus. A virus so powerful that it has the power to transmute an ordinary computer into a toaster oven. (oooh!) &apos;Bill&apos; has cloned himself into a billion-jillion micro-Bills. Their sole purpose is to deliver the nefarious virus, which has been cleverly disguised as a popular operating system. As System Administrator and Exterminator, your job is to keep Bill from succeeding at his task.</source> <translation type="unfinished"></translation> </message> </context> <context> <name>UI</name> <message> <source>After Level</source> <translation>FOO</translation> </message> <message> <source>Your score</source> <translation>FOO</translation> </message> </context> </TS><|fim▁end|>
<source>kBill</source> <translation>FOO</translation>
<|file_name|>Subheader.js<|end_file_name|><|fim▁begin|>'use strict'; import React, {PureComponent} from 'react'; import {StyleSheet, View, Text} from 'react-native'; import withMaterialTheme from './styles/withMaterialTheme'; import {withMeasurementForwarding} from './util'; import * as typo from './styles/typo'; import shades from './styles/shades'; /** * Section heading */ class Subheader extends PureComponent { static defaultProps = { inset: false, lines: 1, }; render() { const { materialTheme, style, textStyle, inset, text, lines, secondary, color: colorOverride, dark, light, ...textProps } = this.props; let color; if ( colorOverride ) { color = colorOverride; } else if ( dark || light ) { const theme = dark ? 'dark' : 'light'; if ( secondary ) color = shades[theme].secondaryText; else color = shades[theme].primaryText; } else { if ( secondary ) color = materialTheme.text.secondaryColor; else color = materialTheme.text.primaryColor; } return ( <View ref={this._setMeasureRef} style={[ styles.container, inset && styles.inset, style, styles.containerOverrides ]}> <Text {...textProps} numberOfLines={lines} style={[ styles.text, textStyle, {color} ]}><|fim▁hole|> </Text> </View> ); } } export default withMaterialTheme(withMeasurementForwarding(Subheader)); const styles = StyleSheet.create({ container: { height: 48, paddingHorizontal: 16, }, containerOverrides: { flexDirection: 'row', alignItems: 'center', }, inset: { paddingRight: 16, paddingLeft: 72, }, text: { ...typo.fontMedium, fontSize: 14, }, });<|fim▁end|>
{text}
<|file_name|>CalendarTableModel.java<|end_file_name|><|fim▁begin|>package bronz.accounting.bunk.ui.model; import java.util.Calendar; import java.util.GregorianCalendar; import javax.swing.table.AbstractTableModel; import bronz.accounting.bunk.ui.panel.CalendarPanel; import bronz.utilities.custom.CustomCalendar; import bronz.utilities.general.DateUtil; public class CalendarTableModel extends AbstractTableModel { private static final long serialVersionUID = 1L; private final CalendarPanel panel; private final Calendar startDate; private Object[][] data; private String[] columnNames = { "", "", "", "", "", "", "" }; public CalendarTableModel( final CalendarPanel panel , final Calendar startDate ) { this.panel = panel; this.startDate = startDate; this.data = new Object[ 7 ][ 7 ]; loadDates(); } private void loadDates() { this.data[ 0 ][ 0 ] = "SUN"; this.data[ 0 ][ 1 ] = "MON"; this.data[ 0 ][ 2 ] = "TUE"; this.data[ 0 ][ 3 ] = "WED"; this.data[ 0 ][ 4 ] = "THU"; this.data[ 0 ][ 5 ] = "FRI"; this.data[ 0 ][ 6 ] = "SAT"; int a = DateUtil.getNoOfDayInMonth( (GregorianCalendar) startDate); startDate.set( Calendar.DATE, 1 ); int day = startDate.get( Calendar.DAY_OF_WEEK ); day--; int x = 0; int y = 0; for ( int i = 0; i < 42; i++) { if( i%7 == 0) { x++; y = 0; } if( (i >= day) && i < ( a + day ) ) { this.data[ x ][ y ] = (i - day)+ 1; } y++; } } public int getColumnCount() { return columnNames.length; } public int getRowCount() { return data.length; } public String getColumnName(int col) { return columnNames[ col ]; } public Object getValueAt(int row, int col) { return data[ row ][ col ]; } public boolean isCellEditable(int row, int col) { if ( this.data[ row ][ col ] instanceof Integer ) { final int selectedDate = (Integer) this.data[ row ][ col ]; final Calendar selectedCalendar = new CustomCalendar(); selectedCalendar.set( Calendar.DATE , selectedDate ); return panel.setSelectedDate( selectedCalendar ); } else { return false;<|fim▁hole|> } public void setValueAt( final Object value, final int row, final int col) { } }<|fim▁end|>
}
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>""" Compatibility module. This module contains duplicated code from Python itself or 3rd party extensions, which may be included for the following reasons: <|fim▁hole|>""" import _inspect import py3k from _inspect import getargspec, formatargspec from py3k import * __all__ = [] __all__.extend(_inspect.__all__) __all__.extend(py3k.__all__)<|fim▁end|>
* compatibility * we may only need a small subset of the copied library/module
<|file_name|>driver.go<|end_file_name|><|fim▁begin|>package dbfiles import ( "encoding/csv" "io" "github.com/juju/errgo" ) type Driver interface { Extention() string Write(io.Writer, []string) error Read(io.Reader) ([][]string, error) } type CSV struct{} func (driver CSV) Extention() string { return "csv" } func (driver CSV) Write(writer io.Writer, values []string) error { csvwriter := csv.NewWriter(writer) err := csvwriter.WriteAll([][]string{values})<|fim▁hole|> return errgo.Notef(err, "can not write to csv writer") } return nil } func (driver CSV) Read(reader io.Reader) ([][]string, error) { csvreader := csv.NewReader(reader) csvreader.FieldsPerRecord = -1 var values [][]string values, err := csvreader.ReadAll() if err != nil { return nil, errgo.Notef(err, "can not read all records from file") } return values, nil }<|fim▁end|>
if err != nil {
<|file_name|>uniforms_storage.rs<|end_file_name|><|fim▁begin|>use std::cell::RefCell; use RawUniformValue; use smallvec::SmallVec; use gl; use Handle; use context::CommandContext; use version::Version; use version::Api; pub struct UniformsStorage { values: RefCell<SmallVec<[Option<RawUniformValue>; 16]>>, uniform_blocks: RefCell<SmallVec<[Option<gl::types::GLuint>; 4]>>, shader_storage_blocks: RefCell<SmallVec<[Option<gl::types::GLuint>; 4]>>, } impl UniformsStorage { /// Builds a new empty storage. pub fn new() -> UniformsStorage { UniformsStorage { values: RefCell::new(SmallVec::new()), uniform_blocks: RefCell::new(SmallVec::new()), shader_storage_blocks: RefCell::new(SmallVec::new()), } } /// Compares `value` with the value stored in this object. If the values differ, updates /// the storage and calls `glUniform`. pub fn set_uniform_value(&self, ctxt: &mut CommandContext, program: Handle, location: gl::types::GLint, value: &RawUniformValue) { let mut values = self.values.borrow_mut(); if values.len() <= location as usize { for _ in (values.len() .. location as usize + 1) { values.push(None); } } // TODO: don't assume that, instead use DSA if the program is not current assert!(ctxt.state.program == program); macro_rules! uniform( ($ctxt:expr, $uniform:ident, $uniform_arb:ident, $($params:expr),+) => ( unsafe { if $ctxt.version >= &Version(Api::Gl, 1, 5) || $ctxt.version >= &Version(Api::GlEs, 2, 0) { $ctxt.gl.$uniform($($params),+) } else { assert!($ctxt.extensions.gl_arb_shader_objects); $ctxt.gl.$uniform_arb($($params),+) } } ) ); match (value, &mut values[location as usize]) { (&RawUniformValue::SignedInt(a), &mut Some(RawUniformValue::SignedInt(b))) if a == b => (), (&RawUniformValue::UnsignedInt(a), &mut Some(RawUniformValue::UnsignedInt(b))) if a == b => (), (&RawUniformValue::Float(a), &mut Some(RawUniformValue::Float(b))) if a == b => (), (&RawUniformValue::Mat2(a), &mut Some(RawUniformValue::Mat2(b))) if a == b => (), (&RawUniformValue::Mat3(a), &mut Some(RawUniformValue::Mat3(b))) if a == b => (), (&RawUniformValue::Mat4(a), &mut Some(RawUniformValue::Mat4(b))) if a == b => (), (&RawUniformValue::Vec2(a), &mut Some(RawUniformValue::Vec2(b))) if a == b => (), (&RawUniformValue::Vec3(a), &mut Some(RawUniformValue::Vec3(b))) if a == b => (), (&RawUniformValue::Vec4(a), &mut Some(RawUniformValue::Vec4(b))) if a == b => (), (&RawUniformValue::SignedInt(v), target) => { *target = Some(RawUniformValue::SignedInt(v)); uniform!(ctxt, Uniform1i, Uniform1iARB, location, v); }, (&RawUniformValue::UnsignedInt(v), target) => { *target = Some(RawUniformValue::UnsignedInt(v)); // Uniform1uiARB doesn't exist unsafe { if ctxt.version >= &Version(Api::Gl, 1, 5) || ctxt.version >= &Version(Api::GlEs, 2, 0) { ctxt.gl.Uniform1ui(location, v) } else { assert!(ctxt.extensions.gl_arb_shader_objects); ctxt.gl.Uniform1iARB(location, v as gl::types::GLint) } } }, (&RawUniformValue::Float(v), target) => { *target = Some(RawUniformValue::Float(v)); uniform!(ctxt, Uniform1f, Uniform1fARB, location, v); }, (&RawUniformValue::Mat2(v), target) => { *target = Some(RawUniformValue::Mat2(v)); uniform!(ctxt, UniformMatrix2fv, UniformMatrix2fvARB, location, 1, gl::FALSE, v.as_ptr() as *const f32); }, (&RawUniformValue::Mat3(v), target) => { *target = Some(RawUniformValue::Mat3(v)); uniform!(ctxt, UniformMatrix3fv, UniformMatrix3fvARB, location, 1, gl::FALSE, v.as_ptr() as *const f32); }, (&RawUniformValue::Mat4(v), target) => { *target = Some(RawUniformValue::Mat4(v)); uniform!(ctxt, UniformMatrix4fv, UniformMatrix4fvARB, location, 1, gl::FALSE, v.as_ptr() as *const f32); }, (&RawUniformValue::Vec2(v), target) => { *target = Some(RawUniformValue::Vec2(v)); uniform!(ctxt, Uniform2fv, Uniform2fvARB, location, 1, v.as_ptr() as *const f32); }, (&RawUniformValue::Vec3(v), target) => { *target = Some(RawUniformValue::Vec3(v)); uniform!(ctxt, Uniform3fv, Uniform3fvARB, location, 1, v.as_ptr() as *const f32); }, (&RawUniformValue::Vec4(v), target) => { *target = Some(RawUniformValue::Vec4(v)); uniform!(ctxt, Uniform4fv, Uniform4fvARB, location, 1, v.as_ptr() as *const f32); }, } } /// Compares `value` with the value stored in this object. If the values differ, updates /// the storage and calls `glUniformBlockBinding`. pub fn set_uniform_block_binding(&self, ctxt: &mut CommandContext, program: Handle, location: gl::types::GLuint, value: gl::types::GLuint) { let mut blocks = self.uniform_blocks.borrow_mut(); if blocks.len() <= location as usize { for _ in (blocks.len() .. location as usize + 1) { blocks.push(None); } } // TODO: don't assume that, instead use DSA if the program is not current assert!(ctxt.state.program == program); match (value, &mut blocks[location as usize]) { (a, &mut Some(b)) if a == b => (), (a, target) => { *target = Some(a); match program {<|fim▁hole|> ctxt.gl.UniformBlockBinding(id, location, value); }, _ => unreachable!() } }, } } /// Compares `value` with the value stored in this object. If the values differ, updates /// the storage and calls `glShaderStorageBlockBinding`. pub fn set_shader_storage_block_binding(&self, ctxt: &mut CommandContext, program: Handle, location: gl::types::GLuint, value: gl::types::GLuint) { let mut blocks = self.shader_storage_blocks.borrow_mut(); if blocks.len() <= location as usize { for _ in (blocks.len() .. location as usize + 1) { blocks.push(None); } } // TODO: don't assume that, instead use DSA if the program is not current assert!(ctxt.state.program == program); match (value, &mut blocks[location as usize]) { (a, &mut Some(b)) if a == b => (), (a, target) => { *target = Some(a); match program { Handle::Id(id) => unsafe { ctxt.gl.ShaderStorageBlockBinding(id, location, value); }, _ => unreachable!() } }, } } }<|fim▁end|>
Handle::Id(id) => unsafe {
<|file_name|>CustomTableRow.spec.tsx<|end_file_name|><|fim▁begin|>import React from 'react'; import { createShallow } from '@material-ui/core/test-utils'; import { CustomTableRow } from '../components/common/CustomTableRow'; describe('CustomTextField component', () => { let shallow; beforeAll(() => { shallow = createShallow(); }); const props = { title: 'test title', children: 'test data', classes: {}, ...(global as any).eventkit_test_props, }; const getWrapper = prop => ( shallow(<CustomTableRow {...prop} />) ); it('should render a title and data', () => { const wrapper = getWrapper(props); expect(wrapper.find('.qa-CustomTableRow')).toHaveLength(1); expect(wrapper.find('.qa-CustomTableRow').find('div').at(1) .text()).toEqual('test title'); expect(wrapper.find('.qa-CustomTableRow').find('div').at(2) .text()).toEqual('test data');<|fim▁hole|><|fim▁end|>
}); });
<|file_name|>service_offerings.go<|end_file_name|><|fim▁begin|>package egoscale // ServiceOffering corresponds to the Compute Offerings // // A service offering correspond to some hardware features (CPU, RAM). // // See: http://docs.cloudstack.apache.org/projects/cloudstack-administration/en/latest/service_offerings.html type ServiceOffering struct { Authorized bool `json:"authorized,omitempty" doc:"is the account/domain authorized to use this service offering"` CPUNumber int `json:"cpunumber,omitempty" doc:"the number of CPU"` CPUSpeed int `json:"cpuspeed,omitempty" doc:"the clock rate CPU speed in Mhz"` Created string `json:"created,omitempty" doc:"the date this service offering was created"` DefaultUse bool `json:"defaultuse,omitempty" doc:"is this a default system vm offering"` DeploymentPlanner string `json:"deploymentplanner,omitempty" doc:"deployment strategy used to deploy VM."` DiskBytesReadRate int64 `json:"diskBytesReadRate,omitempty" doc:"bytes read rate of the service offering"` DiskBytesWriteRate int64 `json:"diskBytesWriteRate,omitempty" doc:"bytes write rate of the service offering"` DiskIopsReadRate int64 `json:"diskIopsReadRate,omitempty" doc:"io requests read rate of the service offering"` DiskIopsWriteRate int64 `json:"diskIopsWriteRate,omitempty" doc:"io requests write rate of the service offering"` Displaytext string `json:"displaytext,omitempty" doc:"an alternate display text of the service offering."` HostTags string `json:"hosttags,omitempty" doc:"the host tag for the service offering"` HypervisorSnapshotReserve int `json:"hypervisorsnapshotreserve,omitempty" doc:"Hypervisor snapshot reserve space as a percent of a volume (for managed storage using Xen or VMware)"` ID *UUID `json:"id" doc:"the id of the service offering"` IsCustomized bool `json:"iscustomized,omitempty" doc:"is true if the offering is customized"` IsCustomizedIops bool `json:"iscustomizediops,omitempty" doc:"true if disk offering uses custom iops, false otherwise"` IsSystem bool `json:"issystem,omitempty" doc:"is this a system vm offering"` IsVolatile bool `json:"isvolatile,omitempty" doc:"true if the vm needs to be volatile, i.e., on every reboot of vm from API root disk is discarded and creates a new root disk"` LimitCPUUse bool `json:"limitcpuuse,omitempty" doc:"restrict the CPU usage to committed service offering"` MaxIops int64 `json:"maxiops,omitempty" doc:"the max iops of the disk offering"` Memory int `json:"memory,omitempty" doc:"the memory in MB"` MinIops int64 `json:"miniops,omitempty" doc:"the min iops of the disk offering"` Name string `json:"name,omitempty" doc:"the name of the service offering"` NetworkRate int `json:"networkrate,omitempty" doc:"data transfer rate in megabits per second allowed."` OfferHA bool `json:"offerha,omitempty" doc:"the ha support in the service offering"` Restricted bool `json:"restricted,omitempty" doc:"is this offering restricted"` ServiceOfferingDetails map[string]string `json:"serviceofferingdetails,omitempty" doc:"additional key/value details tied with this service offering"` StorageType string `json:"storagetype,omitempty" doc:"the storage type for this service offering"` SystemVMType string `json:"systemvmtype,omitempty" doc:"is this a the systemvm type for system vm offering"` Tags string `json:"tags,omitempty" doc:"the tags for the service offering"` } // ListRequest builds the ListSecurityGroups request func (so ServiceOffering) ListRequest() (ListCommand, error) { // Restricted cannot be applied here because it really has three states req := &ListServiceOfferings{ ID: so.ID, Name: so.Name, SystemVMType: so.SystemVMType, } if so.IsSystem { req.IsSystem = &so.IsSystem } return req, nil<|fim▁hole|>} //go:generate go run generate/main.go -interface=Listable ListServiceOfferings // ListServiceOfferings represents a query for service offerings type ListServiceOfferings struct { ID *UUID `json:"id,omitempty" doc:"ID of the service offering"` IsSystem *bool `json:"issystem,omitempty" doc:"is this a system vm offering"` Keyword string `json:"keyword,omitempty" doc:"List by keyword"` Name string `json:"name,omitempty" doc:"name of the service offering"` Page int `json:"page,omitempty"` PageSize int `json:"pagesize,omitempty"` Restricted *bool `json:"restricted,omitempty" doc:"filter by the restriction flag: true to list only the restricted service offerings, false to list non-restricted service offerings, or nothing for all."` SystemVMType string `json:"systemvmtype,omitempty" doc:"the system VM type. Possible types are \"consoleproxy\", \"secondarystoragevm\" or \"domainrouter\"."` VirtualMachineID *UUID `json:"virtualmachineid,omitempty" doc:"the ID of the virtual machine. Pass this in if you want to see the available service offering that a virtual machine can be changed to."` _ bool `name:"listServiceOfferings" description:"Lists all available service offerings."` } // ListServiceOfferingsResponse represents a list of service offerings type ListServiceOfferingsResponse struct { Count int `json:"count"` ServiceOffering []ServiceOffering `json:"serviceoffering"` }<|fim▁end|>
<|file_name|>GetAllMarketsResponse.java<|end_file_name|><|fim▁begin|>/** * GetAllUsersResponse.java * Created by pgirard at 2:07:29 PM on Aug 19, 2010 * in the com.qagwaai.starmalaccamax.shared.services.action package * for the StarMalaccamax project */ package com.qagwaai.starmalaccamax.client.service.action; import java.util.ArrayList; import com.google.gwt.user.client.rpc.IsSerializable; import com.qagwaai.starmalaccamax.shared.model.MarketDTO; /** * @author pgirard * */ public final class GetAllMarketsResponse extends AbstractResponse implements IsSerializable { /** * */ private ArrayList<MarketDTO> markets; /** * */ private int totalMarkets; <|fim▁hole|> /** * @return the users */ public ArrayList<MarketDTO> getMarkets() { return markets; } /** * @return the totalMarkets */ public int getTotalMarkets() { return totalMarkets; } /** * @param markets * the users to set */ public void setMarkets(final ArrayList<MarketDTO> markets) { this.markets = markets; } /** * @param totalMarkets * the totalMarkets to set */ public void setTotalMarkets(final int totalMarkets) { this.totalMarkets = totalMarkets; } /** * {@inheritDoc} */ @Override public String toString() { return "GetAllMarketsResponse [markets=" + markets + ", totalMarkets=" + totalMarkets + "]"; } }<|fim▁end|>
<|file_name|>pyre_node.py<|end_file_name|><|fim▁begin|># Copyright (c) 2015-2020 Contributors as noted in the AUTHORS file # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. # System imports import json import logging import re import uuid from threading import Event # Third-party imports from pyre import Pyre # Local imports from ..tools import zmq, green # , spy_call, w_spy_call, spy_object logger = logging.getLogger(__name__) class PyreNode(Pyre): def __init__(self, *args, **kwargs): # spy_object(self, class_=Pyre, except_=['name', 'uuid'], with_caller=False) # spy_call(self.__init__, args, kwargs, with_caller=False); print self._name = None self._uuid = None super(self.__class__, self).__init__(*args, **kwargs) self.request_results = {} # TODO: Fuse the two dicts self.request_events = {} self.poller = zmq.Poller() self.poller.register(self.inbox, zmq.POLLIN) self.join('SURVEY') def run(self): self.task = green.spawn(self._run, 100) def _run(self, timeout=None): self._running = True self.start() while self._running: try: # logger.debug('Polling') items = dict(self.poller.poll(timeout)) # logger.debug('polled out: %s, %s', len(items), items) while len(items) > 0: for fd, ev in items.items(): if (self.inbox == fd) and (ev == zmq.POLLIN): self._process_message() # logger.debug('quick polling') items = dict(self.poller.poll(0)) # logger.debug('qpoll: %s, %s', len(items), items) except (KeyboardInterrupt, SystemExit): logger.debug('(%s) KeyboardInterrupt or SystemExit', self.name()) break logger.debug('(%s) Exiting loop and stopping', self.name()) self.stop() def _process_message(self): logger.debug('(%s) processing message', self.name()) msg = self.recv() logger.debug('(%s) received stuff: %s', self.name(), msg) msg_type = msg.pop(0) logger.debug('(%s) msg_type: %s', self.name(), msg_type) peer_id = uuid.UUID(bytes=msg.pop(0)) logger.debug('(%s) peer_id: %s', self.name(), peer_id) peer_name = msg.pop(0) logger.debug('(%s) peer_name: %s', self.name(), peer_name) if msg_type == b'ENTER': self.on_peer_enter(peer_id, peer_name, msg) elif msg_type == b'EXIT': self.on_peer_exit(peer_id, peer_name, msg) elif msg_type == b'SHOUT': self.on_peer_shout(peer_id, peer_name, msg) elif msg_type == b'WHISPER': self.on_peer_whisper(peer_id, peer_name, msg) def on_peer_enter(self, peer_id, peer_name, msg): logger.debug('(%s) ZRE ENTER: %s, %s', self.name(), peer_name, peer_id) pub_endpoint = self.get_peer_endpoint(peer_id, 'pub') rpc_endpoint = self.get_peer_endpoint(peer_id, 'rpc') self.on_new_peer(peer_id, peer_name, pub_endpoint, rpc_endpoint) def on_new_peer(self, peer_id, peer_name, pub_endpoint, rpc_endpoint): pass def on_peer_exit(self, peer_id, peer_name, msg): logger.debug('(%s) ZRE EXIT: %s, %s', self.name(), peer_name, peer_id) self.on_peer_gone(peer_id, peer_name) def on_peer_gone(self, peer_id, peer_name): pass def on_peer_shout(self, peer_id, peer_name, msg): group = msg.pop(0) data = msg.pop(0) logger.debug('(%s) ZRE SHOUT: %s, %s > (%s) %s', self.name(), peer_name, peer_id, group, data) if group == b'SURVEY': self.on_survey(peer_id, peer_name, json.loads(data)) elif group == b'EVENT': self.on_event(peer_id, peer_name, json.loads(data)) def on_survey(self, peer_id, peer_name, request): pass def on_event(self, peer_id, peer_name, request): pass def on_peer_whisper(self, peer_id, peer_name, msg): logger.debug('(%s) ZRE WHISPER: %s, %s > %s', self.name(), peer_name, peer_id, msg) reply = json.loads(msg[0]) if reply['req_id'] in self.request_results: logger.debug('(%s) Received reply from %s: %s', self.name(), peer_name, reply['data']) self.request_results[reply['req_id']].append((peer_name, reply['data'])) ev, limit_peers = self.request_events[reply['req_id']] if limit_peers and (len(self.request_results[reply['req_id']]) >= limit_peers): ev.set() green.sleep(0) # Yield else: logger.warning( '(%s) Discarding reply from %s because the request ID is unknown', self.name(), peer_name ) def get_peer_endpoint(self, peer, prefix): pyre_endpoint = self.peer_address(peer) ip = re.search('.*://(.*):.*', pyre_endpoint).group(1) return '%s://%s:%s' % ( self.peer_header_value(peer, prefix + '_proto'), ip, self.peer_header_value(peer, prefix + '_port') ) def join_event(self): self.join('EVENT') def leave_event(self): self.leave('EVENT') def send_survey(self, request, timeout, limit_peers): # request['req_id'] = ('%x' % randint(0, 0xFFFFFFFF)).encode() self.request_results[request['req_id']] = [] <|fim▁hole|> self.shout('SURVEY', json.dumps(request).encode()) ev.wait(timeout) result = self.request_results[request['req_id']] del self.request_results[request['req_id']] del self.request_events[request['req_id']] return result def send_event(self, request): self.shout('EVENT', json.dumps(request).encode()) def reply_survey(self, peer_id, reply): self.whisper(peer_id, json.dumps(reply).encode()) def shutdown(self): self._running = False def name(self): if self._name is None: # f = w_spy_call(super(self.__class__, self).name, with_caller=False) f = super(self.__class__, self).name self._name = f() return self._name def uuid(self): if self._uuid is None: # f = w_spy_call(super(self.__class__, self).uuid, with_caller=False) f = super(self.__class__, self).uuid self._uuid = f() return self._uuid<|fim▁end|>
ev = Event() self.request_events[request['req_id']] = (ev, limit_peers)
<|file_name|>quicksorts.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 # -*- coding: utf-8 -*- # quicksorts.py (C) myke, 2015 # 2015-11-08 1.1 # various versions of quicksort alogo import random TIMES = 10 SIZE = 10 RANGE = 10 # ----------------------------------------------- def qs1 (al): """ Algo quicksort for a list """ if not al: return [] return (qs1([x for x in al if x < al[0]]) + [x for x in al if x == al[0]] + qs1([x for x in al if x > al[0]])) # ----------------------------------------------- def qs2 (array): """ another longer version""" less = [] equal = [] greater = [] if len(array) > 1: pivot = array[0]<|fim▁hole|> less.append(x) if x == pivot: equal.append(x) if x > pivot: greater.append(x) return qs2(less)+equal+qs2(greater) else: return array # ----------------------------------------------- qs = qs1 # ----------------------------------------------- def main (): """ dispatcher: tests make and sort """ for i in range(TIMES): sa = [random.randint(1, RANGE) for e in range(SIZE)] print (sa, "-->", qs (sa)) main() # ----------------------------------------------- # used: http://stackoverflow.com/questions/18262306/quick-sort-with-python<|fim▁end|>
for x in array: if x < pivot:
<|file_name|>dummy_service_pb_service.ts<|end_file_name|><|fim▁begin|>// package: dummy // file: dummy/dummy_service.proto import * as dummy_dummy_service_pb from "../dummy/dummy_service_pb"; export class DummyService { static serviceName = "dummy.DummyService"; } export namespace DummyService { export class CreateGame { static readonly methodName = "CreateGame"; static readonly service = DummyService;<|fim▁hole|> } }<|fim▁end|>
static readonly requestStream = false; static readonly responseStream = false; static readonly requestType = dummy_dummy_service_pb.CreateGameRequest; static readonly responseType = dummy_dummy_service_pb.CreateGameResponse;
<|file_name|>test.py<|end_file_name|><|fim▁begin|>from test_support import *<|fim▁hole|><|fim▁end|>
do_flow(opt=["--mode=flow"])
<|file_name|>route.js<|end_file_name|><|fim▁begin|>"use strict"; /** @module router * A module that defines a class for routing * http requests to handler functions */ module.exports = { Router: Router }; var url = require('url'); function Router(db) { this.db = db; this.routeMap = { get: [], post: [] } } /** @function route * Routes an incoming request to the proper registered * request handler, or sends a 404 error if no match * is found. Modifies the request object to contain a * params associative array of tokens parsed from the * reqeust object. * @param {http.incomingRequest} req - the reqeust object * @param {http.serverResponse} res - the response object */ Router.prototype.route = function(req, res) { console.log("Routing!"); // The method is used to determine which routeMap // to search for the route in var routeMap = this.routeMap[req.method.toLowerCase()]; // The resource string from the request url will // be matched against the routeMap regular expressions var resource = url.parse(req.url).pathname; // Find the correct route for the requested resource // INVARIANT: route has not yet been found. for(var i = 0; i < routeMap.length; i++){ var match = routeMap[i].regexp.exec(resource); if(match) { // Store the parameters as an object // on the request req.params = {} routeMap[i].tokens.forEach(function(token, j){ // Each token corresponds to a capture group // from the regular expression match. These are // offset by 1, as the first entry in the match // array is the full matching string. req.params[token] = match[j+1]; }); // Trigger the handler and return, stopping execution return routeMap[i].handler(req, res); } } console.log("Resource Not Mapped"); // If we reach this point, the resource was not mapped // to a route, so send a 404 error res.statusCode = 404; res.statusMessage = "Resource not found"; res.end("Resource not found"); } /** @function get * Adds a GET route with associated handler to * the get routemap. * @param {string} route - the route to add * @param {function} handler - the function to * call when a match is found */ Router.prototype.get = function(route, handler) { addRoute(this.routeMap.get, route, handler); } /** @function post * Adds a POST route with associated handler to * the get routemap. * @param {string} route - the route to add * @param {function} handler - the function to * call when a match is found */ Router.prototype.post = function(route, handler) { addRoute(this.routeMap.post, route, handler); } /** @function resource * This is a shorthand method for generating restful * routes for a resource, i.e.: * GET route/ -> resource.list() * POST route/ -> resource.add() * GET route/:id -> resource.show() * POST route/:id -> resource.update() * GET route/:id/edit -> resource.edit() * POST route/:id/destroy -> resource.destroy() * @param {string} route - the resource route * @param {object} resource - an object implementing the * above methods */ Router.prototype.resource = function(route, resource) { var db = this.db; console.log("Route: "+route); if(resource.list) this.get(route, function(req, res) {resource.list(req, res, db)}); if(resource.create) this.post(route, function(req, res) {resource.create(req, res, db)}); if(resource.read) this.get(route + '/:id', function(req, res) {resource.read(req, res, db)}); if(resource.edit) this.get(route + '/:id/edit', function(req, res) {resource.read(req, res, db)});<|fim▁hole|> /** @function addRoute * This helper function adds a route to a routeMap * associative array * @param {object} routeMap - the routemap to add the route to * @param {string} route - the route to add * @param {function} handler - the handler to add */ function addRoute(routeMap, route, handler) { var tokens = []; // Convert the route into a regular expression var parts = route.split('/').map(function(part) { if(part.charAt(0) == ':') { // This is a token, so store the token and // add a regexp capture group to our parts array tokens.push(part.slice(1)); return '(\\w+)'; } else { // This is a static sequence of characters, // so leave it as-is return part; } }); var regexp = new RegExp('^' + parts.join('/') + '/?$'); // Store the route in the routemap routeMap.push({ regexp: regexp, tokens: tokens, handler: handler }); }<|fim▁end|>
if(resource.update) this.post(route + '/:id', function(req, res) {resource.update(req, res, db)}); if(resource.destroy) this.get(route + '/:id/destroy', function(req, res) {resource.destroy(req, res, db)}); }
<|file_name|>index.js<|end_file_name|><|fim▁begin|>/** * Created by huangyao on 14-10-1. */ var _ = require('lodash'); var color =require('colors'); var fs =require('fs'); var config = require('../config.js'); var path = require('path'); var mongoose = require("mongoose"); var lcommon = require('lush').common; console.log(config.db); mongoose.connect(config.db,function (err) { if(err){ throw new Error('db connect error!\n'+err); } console.log('db connect success!'.yellow); }); // console.log('point'); // var models = { // // init : function (callback) { // fs.readdir(path+'/module',function (err,files) { // if(err){ // throw err; // } // // console.log(files); // return callback(files.filter(function (item) { // return !(item === "index.js" || item === "." || item === ".."); // })); // }); // }, // }; // // // models.init(function (files) { // // console.log(files); // for (var item in files) { // //reuire all modules // // console.log(lcommon.literat(files[item]).slice(0,-3)); // console.log(files[item]); // item = files[item].split('.')[0]; // require('./'+ item);<|fim▁hole|>// // // _.extend(models,file.exports); // // console.log(file); // } // }); // var models = fs.readdirSync(path.resolve('.','module')); models.forEach(function(item,index){ if(item !== '.' && item !== '..' && item !== 'index.js'){ // console.log(item.indexOf('.js')); //ends with .js if(item.indexOf('.js') == (item.length-3)){ item = item.split('.')[0]; require('./'+ item); var m = lcommon.literat(item); console.log('loading and use ',m,' model'); exports[m] = mongoose.model(m); } } });<|fim▁end|>
// var m = lcommon.literat(item); // console.log('loading and use ',m,' model'); // exports[m] = mongoose.model(m);
<|file_name|>gulpfile.js<|end_file_name|><|fim▁begin|>// require(d) gulp for compatibility with sublime-gulp. var gulp = require('gulp'); const { src, dest, series, parallel } = require('gulp'); const clean = require('gulp-clean'); const eslint = require('gulp-eslint'); const csso = require('gulp-csso'); const rename = require('gulp-rename'); const babel = require("gulp-babel"); const minify_babel = require("gulp-babel-minify"); const minify = require('gulp-minify'); const rollup = require('rollup'); // Erase build directory function cleandist() { return gulp.src(['build/*'], {read: false, allowEmpty: true}) .pipe(clean()) } // Lint Task function lint() { return gulp.src('./src/*.js') .pipe(eslint()) .pipe(eslint.format()) } // Build ES6 module function build_es6() { return gulp.src('./src/*.js') .pipe(minify({ ext: { src:'.js', min:'.min.js' }, preserveComments: 'some', noSource: true })) .pipe(gulp.dest('./build/es6/chordictionary')) } // Build CommonJS module async function build_commonjs() { const bundle = await rollup.rollup({ input: './src/main.js' }); await bundle.write({ file: './tmp/chordictionary_cjs.js', format: 'cjs', name: 'chordictionary' }); await gulp.src('./tmp/chordictionary_cjs.js', { allowEmpty: true }) .pipe(clean()) .pipe(babel()) .pipe(rename('chordictionary.min.js')) .pipe(minify_babel()) .pipe(gulp.dest('./build/commonjs')) } // Build IIFE script async function build_iife() { const bundle = await rollup.rollup({ input: './src/main.js' }); await bundle.write({ file: './tmp/chordictionary_iife.js', format: 'iife', name: 'chordictionary' }); await gulp.src('./tmp/chordictionary_iife.js', { allowEmpty: true }) .pipe(clean()) .pipe(babel()) .pipe(rename('chordictionary.min.js')) .pipe(minify_babel()) .pipe(gulp.dest('./build/iife'));<|fim▁hole|> // CSS task function css() { return gulp.src('./src/chordictionary.css') .pipe(csso()) .pipe(rename({ suffix: '.min' })) .pipe(gulp.dest('./build')); } const build = gulp.series(cleandist, gulp.parallel(css, lint), gulp.parallel(build_es6, build_commonjs, build_iife)); exports.lint = lint; exports.css = css; exports.clean = cleandist; exports.build = build; exports.default = build;<|fim▁end|>
}
<|file_name|>server_test.go<|end_file_name|><|fim▁begin|>// Copyright 2019 The Serviced Authors. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // +build unit package shell import ( "fmt" "testing" "time" "github.com/stretchr/testify/assert" "github.com/control-center/serviced/domain/service" "github.com/control-center/serviced/servicedversion" "github.com/control-center/serviced/utils" ) type osMock struct { wd string env map[string]string } func (m osMock) Getwd() (string, error) { return m.wd, nil } func (m osMock) Getenv(key string) string { return m.env[key] } func makeOsMock(tz string) osMock { mock := osMock{ wd: "/home/zenoss", env: map[string]string{ "SERVICED_NOREGISTRY": "1", }, } if len(tz) > 0 { mock.env["TZ"] = tz } return mock } func makeService() service.Service { return service.Service{ ID: "abcd-1234", Name: "Abcd", ImageID: "imageAbcd", DisableShell: false, } } func makeProcessConfig(isTTY bool, saveAs string, command string) ProcessConfig { config := ProcessConfig{ IsTTY: isTTY, Mount: []string{ "/home/dir/source,/mnt/src", "/opt/zenoss", }, LogToStderr: true, LogStash: struct { Enable bool SettleTime time.Duration }{ Enable: true, SettleTime: 1000000, }, } if len(saveAs) > 0 { config.SaveAs = saveAs } if len(command) > 0 { config.Command = command } return config } func makeExpectedResult(image string, o *osMock, s *service.Service, c *ProcessConfig) []string { volumeCurrentDir := fmt.Sprintf("%s:/mnt/pwd", o.wd) volumeResources := fmt.Sprintf( "%s:%s", utils.ResourcesDir(), utils.RESOURCES_CONTAINER_DIRECTORY, )<|fim▁hole|> expected := []string{ "run", "-u", "root", "-w", "/", "-v", "/opt/serviced/bin/:/serviced", "-v", volumeCurrentDir, "-v", volumeResources, "-v", "/home/dir/source:/mnt/src", "-v", "/opt/zenoss:/opt/zenoss", } if len(c.SaveAs) > 0 { expected = append(expected, "--name=imageName") } else { expected = append(expected, "--rm") } if c.IsTTY { expected = append(expected, "-i", "-t") } expected = append( expected, "-e", servicedVersion, "-e", servicedNoRegistry, "-e", "SERVICED_IS_SERVICE_SHELL=true", "-e", servicedServiceImage, "-e", "SERVICED_UI_PORT=443", "-e", "SERVICED_ZOOKEEPER_ACL_USER=", "-e", "SERVICED_ZOOKEEPER_ACL_PASSWD=", ) val, ok := o.env["TZ"] if ok { expected = append(expected, "-e", fmt.Sprintf("TZ=%s", val)) } expected = append( expected, image, "/serviced/serviced-controller", "--logtostderr=true", "--autorestart=false", "--disable-metric-forwarding", "--logstash=true", "--logstash-settle-time=1ms", "abcd-1234", "0", ) if len(c.Command) > 0 { expected = append(expected, c.Command) } else { expected = append(expected, "su -") } return expected } func TestBuildDockerArgs(t *testing.T) { controller := "/opt/serviced/bin/serviced-controller" docker := "/usr/bin/docker" image := "baseImage" svc := makeService() cases := []struct{ id string mock osMock cfg ProcessConfig }{ {"Case0", makeOsMock(""), makeProcessConfig(true, "imageName", "bash")}, {"Case1", makeOsMock(""), makeProcessConfig(true, "", "bash")}, {"Case2", makeOsMock(""), makeProcessConfig(false, "", "bash")}, {"Case3", makeOsMock(""), makeProcessConfig(false, "", "")}, {"Case4", makeOsMock("TZ"), makeProcessConfig(false, "", "")}, {"Case5", makeOsMock("TZ"), makeProcessConfig(true, "", "")}, } for _, tc := range cases { t.Run(tc.id, func(t *testing.T) { expected := makeExpectedResult(image, &tc.mock, &svc, &tc.cfg) actual := buildDockerArgs(tc.mock, &svc, &tc.cfg, controller, docker, image) assert := assert.New(t) assert.NotNil(actual) assert.Equal(expected, actual) }) } }<|fim▁end|>
servicedVersion := fmt.Sprintf("SERVICED_VERSION=%s ", servicedversion.Version) servicedNoRegistry := fmt.Sprintf("SERVICED_NOREGISTRY=%s", o.env["SERVICED_NOREGISTRY"]) servicedServiceImage := fmt.Sprintf("SERVICED_SERVICE_IMAGE=%s", image)
<|file_name|>object_picktest.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python #Copyright (c) 2016, Buti Al Delail #All rights reserved. # #Redistribution and use in source and binary forms, with or without #modification, are permitted provided that the following conditions are met: # #* Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # #* Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # #* Neither the name of kuri_mbzirc_challenge_3 nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # #THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" #AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE #IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE #DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE #FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL #DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE #OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, import rospy import thread import threading import time import mavros import actionlib from math import * from mavros.utils import * from mavros import setpoint as SP from tf.transformations import quaternion_from_euler from kuri_msgs.msg import * class dropzone_landing: def __init__(self): self.done = False self.done_evt = threading.Event() self.isExploring = False self.progress = 0.0 self.x = 0.0 self.y = 0.0 self.z = 0.0 self.currentPoseX = 0 self.currentPoseY = 0 self.currentPoseZ = 0 self.navigating = False mavros.set_namespace('/uav_1/mavros') # publisher for mavros/setpoint_position/local self.pub = SP.get_pub_position_local(queue_size=10) # subscriber for mavros/local_position/local self.sub = rospy.Subscriber(mavros.get_topic('local_position', 'pose'), SP.PoseStamped, self.reached) self.objects_map = ObjectsMap() self.client = actionlib.SimpleActionClient('TrackingAction', TrackingAction) #client = self.client #client = self.actionServer.client print "Waiting for tracking server" self.client.wait_for_server() self.goal = TrackingGoal() self.goal.uav_id = 1 self.client.send_goal(self.goal) print "Waiting for result" self.client.wait_for_result() print "Result:" self.objects =self.client.get_result().tracked_objects.objects print self.objects try: thread.start_new_thread(self.navigate, ()) except: fault("Error: Unable to start thread") def navigate(self): rate = rospy.Rate(40) # 10hz msg = SP.PoseStamped( header=SP.Header( frame_id="base_footprint", # no matter, plugin don't use TF stamp=rospy.Time.now()), # stamp should update ) while not rospy.is_shutdown(): msg.pose.position.x = self.x msg.pose.position.y = self.y msg.pose.position.z = self.z # For demo purposes we will lock yaw/heading to north. yaw_degrees = 0 # North yaw = radians(yaw_degrees) quaternion = quaternion_from_euler(0, 0, yaw) msg.pose.orientation = SP.Quaternion(*quaternion) self.pub.publish(msg) rate.sleep() def setPose(self, x, y, z, delay=0, wait=True):<|fim▁hole|> self.navigating = True if wait: rate = rospy.Rate(5) while not self.done and not rospy.is_shutdown(): rate.sleep() time.sleep(delay) def takeoff(self, z, delay=0, wait=True): diff = z - self.currentPoseZ while not abs(diff)<0.2: diff = z - self.currentPoseZ if diff>0: self.setPose(self.currentPoseX,self.currentPoseY,self.currentPoseZ + 1, 0, False) else: self.setPose(self.currentPoseX,self.currentPoseY,self.currentPoseZ - 0.1, 0, False) def land(self, delay=0, wait=True): altitude = self.currentPoseZ while altitude > 0: altitude = self.currentPoseZ self.setPose(self.currentPoseX,self.currentPoseY,self.currentPoseZ - 0.5 ,2) def reached(self, topic): def is_near(msg, x, y, d): rospy.logdebug("Position %s: local: %d, target: %d, abs diff: %d", msg, x, y, abs(x - y)) return abs(x - y) < d self.currentPoseX = topic.pose.position.x self.currentPoseY = topic.pose.position.y self.currentPoseZ = topic.pose.position.z if is_near('X', topic.pose.position.x, self.x, 0.2) and \ is_near('Y', topic.pose.position.y, self.y, 0.2) and \ is_near('Z', topic.pose.position.z, self.z, 0.5): if self.navigating: self.done = True self.navigating = False self.done_evt.set() def explore(self): print 'explore started ' rate = rospy.Rate(30) self.newGoal = True if self.isExploring == False: #Change this later when we have a better exploration #self.isExploring = True while self.done == False: time.sleep(1) rospy.loginfo("Climb") self.progress += 0.1 self.takeoff(5) self.progress += 0.1 rospy.loginfo("Moving to Red_Object") self.reached_object = False red_object_id = -1 xspeed = 1 while self.reached_object == False: self.client.send_goal(self.goal) self.client.wait_for_result() self.objects = self.client.get_result().tracked_objects.objects islost = True for obj in self.objects: if red_object_id == -1 and (obj.color == 'RED' or obj.color == 'BLUE' or obj.color == 'GREEN'): #pick any nearby object red_object_id = obj.object_id if obj.object_id == red_object_id: islost = False print 'Moving to Drop zone', self.currentPoseX-obj.pose2.pose.position.x, self.currentPoseY-obj.pose2.pose.position.y, obj.pose.pose.position.x, obj.pose.pose.position.y if fabs(obj.pose2.pose.position.x) < 0.01 and fabs(obj.pose2.pose.position.y) > 0.01: print 'Moving Y' self.setPose(self.x, self.currentPoseY+obj.pose2.pose.position.y*xspeed, self.z, 0 , False) elif fabs(obj.pose2.pose.position.y) < 0.01 and fabs(obj.pose2.pose.position.x) > 0.01: print 'Moving X' self.setPose(self.currentPoseX-obj.pose2.pose.position.x*xspeed, self.y, self.z, 0 , False) else: print 'Moving XY' self.setPose(self.currentPoseX-obj.pose2.pose.position.x*xspeed, self.currentPoseY+obj.pose2.pose.position.y*xspeed, self.z, 0 , True) if fabs(obj.pose2.pose.position.x) < 0.3 and fabs(obj.pose2.pose.position.y) < 0.3 and self.z > 0.0: print 'Moving Z' land = 0.2 if self.z <= 3: xspeed = 1 if self.z <= 1.5: xspeed = 0.5 if self.z < 0.5: land = 0.05 self.setPose(self.x, self.y, self.z - land * xspeed, 1, False) if self.z <= 0.4: self.reached_object = True if islost == True: red_object_id = -1 if red_object_id == -1: rospy.loginfo("No object in sight, exploring") #self.setPose(self.x, self.y - 5, self.z, 1, True) rate.sleep() time.sleep(10) rospy.loginfo("Picked Object, climb") self.takeoff(1) self.takeoff(2) self.takeoff(3) self.takeoff(4) self.takeoff(5) #self.setPose(self.x, self.y, self.z) time.sleep(10) rospy.loginfo("Moving to DropZone") self.setPose(1, -21, 5) ##Go near dropzone self.progress += 0.1 self.reached_dropzone = False xspeed = 3 while self.reached_dropzone == False: self.client.send_goal(self.goal) self.client.wait_for_result() self.objects = self.client.get_result().tracked_objects.objects for obj in self.objects: if obj.color == 'DROP_ZONE': print 'Moving to Drop zone', self.currentPoseX-obj.pose2.pose.position.x, self.currentPoseY-obj.pose2.pose.position.y, obj.pose.pose.position.x, obj.pose.pose.position.y if fabs(obj.pose2.pose.position.x) < 0.1 and fabs(obj.pose2.pose.position.y) > 0.1: print 'Moving Y' self.setPose(self.x, self.currentPoseY+obj.pose2.pose.position.y*xspeed, self.z, 0 , False) elif fabs(obj.pose2.pose.position.y) < 0.1 and fabs(obj.pose2.pose.position.x) > 0.1: print 'Moving X' self.setPose(self.currentPoseX-obj.pose2.pose.position.x*xspeed, self.y, self.z, 0 , False) else: print 'Moving XY' self.setPose(self.currentPoseX-obj.pose2.pose.position.x*xspeed, self.currentPoseY+obj.pose2.pose.position.y*xspeed, self.z, 0 , True) if fabs(obj.pose2.pose.position.x) < 0.3 and fabs(obj.pose2.pose.position.y) < 0.3 and self.z > 1: print 'Moving Z' land = 0.5 if self.z <= 3: land = 0.2 xspeed = 0.5 self.setPose(self.x, self.y, self.z - land, 1, False) if self.z < 1.5: self.reached_dropzone = True rate.sleep() self.progress += 0.1 rospy.loginfo("Landed Object, climb") self.takeoff(7) rospy.loginfo("Bye!") def main(args): '''Initializes and cleanup ros node''' rospy.init_node('dropzone_landing', anonymous=True) d = dropzone_landing() d.explore() try: rospy.spin() except KeyboardInterrupt: print "Shutting down ROS Image feature detector module" cv2.destroyAllWindows() if __name__ == '__main__': main(sys.argv)<|fim▁end|>
self.done = False self.x = x self.y = y self.z = z
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python3 # Copyright (C) 2016 Job Snijders <job@instituut.net> # # This file is part of rtrsub # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. import rtrsub version = rtrsub.__version__ import codecs import os import sys from os.path import abspath, dirname, join from setuptools import setup, find_packages here = abspath(dirname(__file__)) def parse_requirements(filename): """ load requirements from a pip requirements file """ lineiter = (line.strip() for line in open(filename)) return [line for line in lineiter if line and not line.startswith("#")] with codecs.open(join(here, 'README.md'), encoding='utf-8') as f: README = f.read() if sys.argv[-1] == 'publish': os.system('python3 setup.py sdist upload') print("You probably want to also tag the version now:") print((" git tag -a %s -m 'version %s'" % (version, version))) print(" git push --tags") sys.exit() install_reqs = parse_requirements('requirements.txt') reqs = install_reqs setup( name='rtrsub', version=version, maintainer="Job Snijders", maintainer_email='job@instituut.net', url='https://github.com/job/rtrsub', description='RTR Substitution', long_description=README, long_description_content_type="text/markdown", license='BSD 2-Clause', keywords='rpki prefix routing networking', setup_requires=reqs, install_requires=reqs, classifiers=[ 'Intended Audience :: Developers', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: System :: Networking',<|fim▁hole|> 'Programming Language :: Python :: 3 :: Only' ], packages=find_packages(exclude=['tests', 'tests.*']), entry_points={'console_scripts': ['rtrsub = rtrsub.rtrsub:main']}, )<|fim▁end|>
'License :: OSI Approved :: BSD License',
<|file_name|>table_datablock.py<|end_file_name|><|fim▁begin|>import attr import struct import math import re import copy from datablock import Datablock from record import Record from rowid import Rowid @attr.s class TableDatablock(Datablock): header = attr.ib(default=[]) records = attr.ib(default=[]) def get_data(self): """ Convert header and records to bytes """ records_buffer = bytearray(self.records_size()) for i in range(0,len(self.records)): struct.pack_into('%ss' % self.header[2*i+1], records_buffer, self.header[2*i], self.records[i].pack()) fmt = 'BH%sH%ss' % (len(self.header), len(records_buffer)) data = struct.pack(fmt, self.type, self.count_record, *self.header, records_buffer) return data def save_record(self, record): """ Saves a Record to the datablock """ if type(record) is not Record: raise TypeError("Wrong type for save_record()") # TODO: check if there's room in the Datablock # TODO: save to the Datablock def records_size(self): return TableDatablock.DATABLOCK_SIZE - ((len(self.header) * 2)) def free_contiguous_space(self, space_needed): if(len(self.header) == 0): return 0 last_offset = 0 for i in range(0, len(self.header), 2): if self.header[last_offset] < self.header[i]: last_offset = i #Check for space between records if(i+2 < len(self.header)): space_between = self.header[i+2]-(self.header[i]+self.header[i+1]) if(self.header[i+1] == 0): #If header wanted is deleted, ignore header space space_between += 4 if(space_needed <= space_between): return self.header[i]+self.header[i+1] #Check for space in the end if(self.records_size() -(self.header[last_offset]+self.header[last_offset+1]) >= space_needed): return self.header[last_offset]+self.header[last_offset+1] return -1 def write_data(self, record, position=None): if(position is None): position = self.free_contiguous_space(record.size()+4) if(position == -1): print('Error writing data') return False # Insert Header in the right position place = -1 for i in range(0, len(self.header), 2): if(self.header[i] == position and self.header[i+1] == 0): # Going to use header that was delated place = i self.header[i+1] = record.size() return self._insert_new_record(record, place, True) elif(self.header[i] > position): place = i self.header.insert(i, position) self.header.insert(i+1, record.size()) return self._insert_new_record(record, place) if(place == -1): place = len(self.header) self.header.append(position) self.header.append(record.size()) return self._insert_new_record(record, place) def update_record(self, record, desc): tmp_record = copy.copy(record) tmp_record.description = desc pos = record.rowid.pos*2 can_store = False if(pos+2 >= len(self.header)): can_store =((self.header[pos+1] + (self.records_size() - (self.header[pos]+self.header[pos+1]))) >= tmp_record.size()) else: can_store = ((self.header[pos+1]+(self.header[pos+2]-self.header[pos+1])) >= tmp_record.size()) #Check for space between records if(can_store): record.description = desc self.header[pos+1] = record.size() self._dirty = True return True else: self.delete_record(record) return None def delete_record(self, record): pos = record.rowid.pos self.header[pos*2+1] = 0 #set record removed size to 0 to mark it was removed self.records[pos].deleted = True self._dirty = True return True def search_by(self, value, field): found_records = [] for record in self.records: if(field == 'code'): if(record.code == value and not record.deleted):<|fim▁hole|> return found_records def get_record_by_pos(self, position): """ Get specific record by its position """ return self.records[position] @classmethod def from_bytes(cls, address, data=None, count_record=0): """ Creates a new TableDatablock in memory from a string of bytes """ if(count_record == 0 and data is None): return cls(address=address, count_record=count_record, type=1, header=[], records=[]) header = [] header_info, record_info = TableDatablock.unpack(count_record, data) for i in range(0, count_record * 2, 2): header.append(header_info[i+2]) #Get record begin position header.append(header_info[i + 2 + 1]) #Get record length records = TableDatablock.unpack_records(record_info[0], header, address) return cls(address=address, count_record=count_record, type=1, header=header, records=records) @staticmethod def unpack(count_record, data): records_size = TableDatablock.DATABLOCK_SIZE - ((count_record * 4) + 4) # Calculate the remaining space in the record data area fmt_header = 'BH%sH%sx' % (count_record * 2, records_size) fmt_record = '%ss' % records_size header = struct.unpack(fmt_header, data) # Get binary header data records = struct.unpack_from(fmt_record, data, (count_record * 4) + 4) # Get binary records data return header, records @staticmethod def unpack_records(record_str, header, address): """ Returns a list of Records included in the datablock """ records = [] for i in range(0, len(header), 2): if(header[i+1] != 0): info = struct.unpack_from('I%ss' % (header[i+1]-4), record_str, header[i]) rowid = Rowid(dblock=address, pos=int(math.ceil(i/2.0))) desc = re.sub(r'[^\w]', '', info[1].decode()) records.append(Record(code=info[0], description=desc, rowid=rowid)) else: rowid = Rowid(dblock=address, pos=int(math.ceil(i/2.0))) records.append(Record(code=0, description='', rowid=rowid, deleted=True)) return records def _insert_new_record(self, record, place, reuse=False): if(record.rowid is None): record.rowid = Rowid(dblock=self.address, pos=int(math.ceil(place/2.0))) if(reuse): self.records[place] = record else: self.records.insert(place, record) self._dirty = True self.count_record = len(self.records) return record<|fim▁end|>
return [record] elif(field == 'description'): if(record.description == value and not record.deleted): found_records.append(record)