Dataset Viewer
Auto-converted to Parquet Duplicate
query
stringlengths
4
15k
positive
stringlengths
5
373k
negative
stringlengths
5
289k
Start the asyncio event loop and runs the application.
def main(): """Start the asyncio event loop and runs the application.""" # Helper method so that the coroutine exits cleanly if an exception # happens (which would leave resources dangling) async def _run_application(loop): try: return await cli_handler(loop) except KeyboardInterrupt: pass # User pressed Ctrl+C, just ignore it except SystemExit: pass # sys.exit() was used - do nothing except: # pylint: disable=bare-except # noqa import traceback traceback.print_exc(file=sys.stderr) sys.stderr.writelines( '\n>>> An error occurred, full stack trace above\n') return 1 try: loop = asyncio.get_event_loop() return loop.run_until_complete(_run_application(loop)) except KeyboardInterrupt: pass return 1
def run(self, loop=None): """Actually run the application :param loop: Custom event loop or None for default """ if loop is None: loop = asyncio.get_event_loop() self.loop = loop loop.run_until_complete(self.startup()) for func in self.tasks: self.start_task(func) try: task = self.start_task(self.main_task) loop.run_until_complete(task) except (KeyboardInterrupt, SystemError): print("Attempting graceful shutdown, press Ctrl-C again to exit", flush=True) def shutdown_exception_handler(_loop, context): if "exception" not in context or not isinstance(context["exception"], asyncio.CancelledError): _loop.default_exception_handler(context) loop.set_exception_handler(shutdown_exception_handler) tasks = asyncio.gather(*self._started_tasks, loop=loop, return_exceptions=True) tasks.add_done_callback(lambda _: loop.stop()) tasks.cancel() while not tasks.done() and not loop.is_closed(): loop.run_forever() finally: loop.run_until_complete(self.shutdown()) loop.run_until_complete(self.cleanup()) loop.close()
Start the asyncio event loop and runs the application.
def main(): """Start the asyncio event loop and runs the application.""" # Helper method so that the coroutine exits cleanly if an exception # happens (which would leave resources dangling) async def _run_application(loop): try: return await cli_handler(loop) except KeyboardInterrupt: pass # User pressed Ctrl+C, just ignore it except SystemExit: pass # sys.exit() was used - do nothing except: # pylint: disable=bare-except # noqa import traceback traceback.print_exc(file=sys.stderr) sys.stderr.writelines( '\n>>> An error occurred, full stack trace above\n') return 1 try: loop = asyncio.get_event_loop() return loop.run_until_complete(_run_application(loop)) except KeyboardInterrupt: pass return 1
def asyncio_main_run(root_runner: BaseRunner): """ Create an ``asyncio`` event loop running in the main thread and watching runners Using ``asyncio`` to handle suprocesses requires a specific loop type to run in the main thread. This function sets up and runs the correct loop in a portable way. In addition, it runs a single :py:class:`~.BaseRunner` until completion or failure. .. seealso:: The `issue #8 <https://github.com/MatterMiners/cobald/issues/8>`_ for details. """ assert threading.current_thread() == threading.main_thread(), 'only main thread can accept asyncio subprocesses' if sys.platform == 'win32': event_loop = asyncio.ProactorEventLoop() asyncio.set_event_loop(event_loop) else: event_loop = asyncio.get_event_loop() asyncio.get_child_watcher().attach_loop(event_loop) event_loop.run_until_complete(awaitable_runner(root_runner))
Start the asyncio event loop and runs the application.
def main(): """Start the asyncio event loop and runs the application.""" # Helper method so that the coroutine exits cleanly if an exception # happens (which would leave resources dangling) async def _run_application(loop): try: return await cli_handler(loop) except KeyboardInterrupt: pass # User pressed Ctrl+C, just ignore it except SystemExit: pass # sys.exit() was used - do nothing except: # pylint: disable=bare-except # noqa import traceback traceback.print_exc(file=sys.stderr) sys.stderr.writelines( '\n>>> An error occurred, full stack trace above\n') return 1 try: loop = asyncio.get_event_loop() return loop.run_until_complete(_run_application(loop)) except KeyboardInterrupt: pass return 1
def run(self): """Run the event loop.""" self.signal_init() self.listen_init() self.logger.info('starting') self.loop.start()
Start the asyncio event loop and runs the application.
def main(): """Start the asyncio event loop and runs the application.""" # Helper method so that the coroutine exits cleanly if an exception # happens (which would leave resources dangling) async def _run_application(loop): try: return await cli_handler(loop) except KeyboardInterrupt: pass # User pressed Ctrl+C, just ignore it except SystemExit: pass # sys.exit() was used - do nothing except: # pylint: disable=bare-except # noqa import traceback traceback.print_exc(file=sys.stderr) sys.stderr.writelines( '\n>>> An error occurred, full stack trace above\n') return 1 try: loop = asyncio.get_event_loop() return loop.run_until_complete(_run_application(loop)) except KeyboardInterrupt: pass return 1
def _open(file, mode='r', buffering=-1, encoding=None, errors=None, newline=None, closefd=True, opener=None, *, loop=None, executor=None): """Open an asyncio file.""" if loop is None: loop = asyncio.get_event_loop() cb = partial(sync_open, file, mode=mode, buffering=buffering, encoding=encoding, errors=errors, newline=newline, closefd=closefd, opener=opener) f = yield from loop.run_in_executor(executor, cb) return wrap(f, loop=loop, executor=executor)
Start the asyncio event loop and runs the application.
def main(): """Start the asyncio event loop and runs the application.""" # Helper method so that the coroutine exits cleanly if an exception # happens (which would leave resources dangling) async def _run_application(loop): try: return await cli_handler(loop) except KeyboardInterrupt: pass # User pressed Ctrl+C, just ignore it except SystemExit: pass # sys.exit() was used - do nothing except: # pylint: disable=bare-except # noqa import traceback traceback.print_exc(file=sys.stderr) sys.stderr.writelines( '\n>>> An error occurred, full stack trace above\n') return 1 try: loop = asyncio.get_event_loop() return loop.run_until_complete(_run_application(loop)) except KeyboardInterrupt: pass return 1
async function start() { if (child) { await stop(); } return new Promise((resolve) => { const serverPath = path.resolve(process.cwd(), 'packages/node_modules/@webex/test-helper-server'); child = spawn(process.argv[0], [serverPath], { env: process.env, stdio: ['ignore', 'pipe', process.stderr] }); child.stdout.on('data', (data) => { const message = `${data}`; const pattern = /.+/gi; if (message.match(pattern)) { resolve(); } }); process.on('exit', stop); }); }
Initialize the pool manager with the number of pools, the entry sizes for each pool, and the maximum depth of the free pool. @param bufferEntrySizes the memory sizes of each entry in the pools @param bufferEntryDepths the maximum number of entries in the free pool
public void initialize(int[] bufferEntrySizes, int[] bufferEntryDepths) { if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) { Tr.entry(tc, "initialize"); } // order both lists from smallest to largest, based only on Entry Sizes int len = bufferEntrySizes.length; int[] bSizes = new int[len]; int[] bDepths = new int[len]; int sizeCompare; int depth; int sizeSort; int j; for (int i = 0; i < len; i++) { sizeCompare = bufferEntrySizes[i]; depth = bufferEntryDepths[i]; // go backwards, for speed, since first Array List is // probably already ordered small to large for (j = i - 1; j >= 0; j--) { sizeSort = bSizes[j]; if (sizeCompare > sizeSort) { // add the bigger one after the smaller one bSizes[j + 1] = sizeCompare; bDepths[j + 1] = depth; break; } // move current one down, since it is bigger bSizes[j + 1] = sizeSort; bDepths[j + 1] = bDepths[j]; } if (j < 0) { // smallest so far, add it at the front of the list bSizes[0] = sizeCompare; bDepths[0] = depth; } } boolean tracking = trackingBuffers(); this.pools = new WsByteBufferPool[len]; this.poolsDirect = new WsByteBufferPool[len]; this.poolSizes = new int[len]; for (int i = 0; i < len; i++) { // make backing pool 10 times larger than local pools this.pools[i] = new WsByteBufferPool(bSizes[i], bDepths[i] * 10, tracking, false); this.poolsDirect[i] = new WsByteBufferPool(bSizes[i], bDepths[i] * 10, tracking, true); this.poolSizes[i] = bSizes[i]; } if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled()) { Tr.debug(tc, "Number of pools created: " + this.poolSizes.length); } if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) { Tr.exit(tc, "initialize"); } }
private void redistributeBuffers() throws IOException { assert Thread.holdsLock(factoryLock); // All buffers, which are not among the required ones final int numAvailableMemorySegment = totalNumberOfMemorySegments - numTotalRequiredBuffers; if (numAvailableMemorySegment == 0) { // in this case, we need to redistribute buffers so that every pool gets its minimum for (LocalBufferPool bufferPool : allBufferPools) { bufferPool.setNumBuffers(bufferPool.getNumberOfRequiredMemorySegments()); } return; } /* * With buffer pools being potentially limited, let's distribute the available memory * segments based on the capacity of each buffer pool, i.e. the maximum number of segments * an unlimited buffer pool can take is numAvailableMemorySegment, for limited buffer pools * it may be less. Based on this and the sum of all these values (totalCapacity), we build * a ratio that we use to distribute the buffers. */ long totalCapacity = 0; // long to avoid int overflow for (LocalBufferPool bufferPool : allBufferPools) { int excessMax = bufferPool.getMaxNumberOfMemorySegments() - bufferPool.getNumberOfRequiredMemorySegments(); totalCapacity += Math.min(numAvailableMemorySegment, excessMax); } // no capacity to receive additional buffers? if (totalCapacity == 0) { return; // necessary to avoid div by zero when nothing to re-distribute } // since one of the arguments of 'min(a,b)' is a positive int, this is actually // guaranteed to be within the 'int' domain // (we use a checked downCast to handle possible bugs more gracefully). final int memorySegmentsToDistribute = MathUtils.checkedDownCast( Math.min(numAvailableMemorySegment, totalCapacity)); long totalPartsUsed = 0; // of totalCapacity int numDistributedMemorySegment = 0; for (LocalBufferPool bufferPool : allBufferPools) { int excessMax = bufferPool.getMaxNumberOfMemorySegments() - bufferPool.getNumberOfRequiredMemorySegments(); // shortcut if (excessMax == 0) { continue; } totalPartsUsed += Math.min(numAvailableMemorySegment, excessMax); // avoid remaining buffers by looking at the total capacity that should have been // re-distributed up until here // the downcast will always succeed, because both arguments of the subtraction are in the 'int' domain final int mySize = MathUtils.checkedDownCast( memorySegmentsToDistribute * totalPartsUsed / totalCapacity - numDistributedMemorySegment); numDistributedMemorySegment += mySize; bufferPool.setNumBuffers(bufferPool.getNumberOfRequiredMemorySegments() + mySize); } assert (totalPartsUsed == totalCapacity); assert (numDistributedMemorySegment == memorySegmentsToDistribute); }
Initialize the pool manager with the number of pools, the entry sizes for each pool, and the maximum depth of the free pool. @param bufferEntrySizes the memory sizes of each entry in the pools @param bufferEntryDepths the maximum number of entries in the free pool
public void initialize(int[] bufferEntrySizes, int[] bufferEntryDepths) { if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) { Tr.entry(tc, "initialize"); } // order both lists from smallest to largest, based only on Entry Sizes int len = bufferEntrySizes.length; int[] bSizes = new int[len]; int[] bDepths = new int[len]; int sizeCompare; int depth; int sizeSort; int j; for (int i = 0; i < len; i++) { sizeCompare = bufferEntrySizes[i]; depth = bufferEntryDepths[i]; // go backwards, for speed, since first Array List is // probably already ordered small to large for (j = i - 1; j >= 0; j--) { sizeSort = bSizes[j]; if (sizeCompare > sizeSort) { // add the bigger one after the smaller one bSizes[j + 1] = sizeCompare; bDepths[j + 1] = depth; break; } // move current one down, since it is bigger bSizes[j + 1] = sizeSort; bDepths[j + 1] = bDepths[j]; } if (j < 0) { // smallest so far, add it at the front of the list bSizes[0] = sizeCompare; bDepths[0] = depth; } } boolean tracking = trackingBuffers(); this.pools = new WsByteBufferPool[len]; this.poolsDirect = new WsByteBufferPool[len]; this.poolSizes = new int[len]; for (int i = 0; i < len; i++) { // make backing pool 10 times larger than local pools this.pools[i] = new WsByteBufferPool(bSizes[i], bDepths[i] * 10, tracking, false); this.poolsDirect[i] = new WsByteBufferPool(bSizes[i], bDepths[i] * 10, tracking, true); this.poolSizes[i] = bSizes[i]; } if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled()) { Tr.debug(tc, "Number of pools created: " + this.poolSizes.length); } if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) { Tr.exit(tc, "initialize"); } }
public void registerBufferPool(BufferPool bufferPool) { checkArgument(bufferPool.getNumberOfRequiredMemorySegments() >= getNumberOfSubpartitions(), "Bug in result partition setup logic: Buffer pool has not enough guaranteed buffers for this result partition."); checkState(this.bufferPool == null, "Bug in result partition setup logic: Already registered buffer pool."); this.bufferPool = checkNotNull(bufferPool); }
Initialize the pool manager with the number of pools, the entry sizes for each pool, and the maximum depth of the free pool. @param bufferEntrySizes the memory sizes of each entry in the pools @param bufferEntryDepths the maximum number of entries in the free pool
public void initialize(int[] bufferEntrySizes, int[] bufferEntryDepths) { if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) { Tr.entry(tc, "initialize"); } // order both lists from smallest to largest, based only on Entry Sizes int len = bufferEntrySizes.length; int[] bSizes = new int[len]; int[] bDepths = new int[len]; int sizeCompare; int depth; int sizeSort; int j; for (int i = 0; i < len; i++) { sizeCompare = bufferEntrySizes[i]; depth = bufferEntryDepths[i]; // go backwards, for speed, since first Array List is // probably already ordered small to large for (j = i - 1; j >= 0; j--) { sizeSort = bSizes[j]; if (sizeCompare > sizeSort) { // add the bigger one after the smaller one bSizes[j + 1] = sizeCompare; bDepths[j + 1] = depth; break; } // move current one down, since it is bigger bSizes[j + 1] = sizeSort; bDepths[j + 1] = bDepths[j]; } if (j < 0) { // smallest so far, add it at the front of the list bSizes[0] = sizeCompare; bDepths[0] = depth; } } boolean tracking = trackingBuffers(); this.pools = new WsByteBufferPool[len]; this.poolsDirect = new WsByteBufferPool[len]; this.poolSizes = new int[len]; for (int i = 0; i < len; i++) { // make backing pool 10 times larger than local pools this.pools[i] = new WsByteBufferPool(bSizes[i], bDepths[i] * 10, tracking, false); this.poolsDirect[i] = new WsByteBufferPool(bSizes[i], bDepths[i] * 10, tracking, true); this.poolSizes[i] = bSizes[i]; } if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled()) { Tr.debug(tc, "Number of pools created: " + this.poolSizes.length); } if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) { Tr.exit(tc, "initialize"); } }
public void ensureCapacity(int size) { if (size > elements.length) { reallocate(Math.max(size, 2 * elements.length)); } }
Initialize the pool manager with the number of pools, the entry sizes for each pool, and the maximum depth of the free pool. @param bufferEntrySizes the memory sizes of each entry in the pools @param bufferEntryDepths the maximum number of entries in the free pool
public void initialize(int[] bufferEntrySizes, int[] bufferEntryDepths) { if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) { Tr.entry(tc, "initialize"); } // order both lists from smallest to largest, based only on Entry Sizes int len = bufferEntrySizes.length; int[] bSizes = new int[len]; int[] bDepths = new int[len]; int sizeCompare; int depth; int sizeSort; int j; for (int i = 0; i < len; i++) { sizeCompare = bufferEntrySizes[i]; depth = bufferEntryDepths[i]; // go backwards, for speed, since first Array List is // probably already ordered small to large for (j = i - 1; j >= 0; j--) { sizeSort = bSizes[j]; if (sizeCompare > sizeSort) { // add the bigger one after the smaller one bSizes[j + 1] = sizeCompare; bDepths[j + 1] = depth; break; } // move current one down, since it is bigger bSizes[j + 1] = sizeSort; bDepths[j + 1] = bDepths[j]; } if (j < 0) { // smallest so far, add it at the front of the list bSizes[0] = sizeCompare; bDepths[0] = depth; } } boolean tracking = trackingBuffers(); this.pools = new WsByteBufferPool[len]; this.poolsDirect = new WsByteBufferPool[len]; this.poolSizes = new int[len]; for (int i = 0; i < len; i++) { // make backing pool 10 times larger than local pools this.pools[i] = new WsByteBufferPool(bSizes[i], bDepths[i] * 10, tracking, false); this.poolsDirect[i] = new WsByteBufferPool(bSizes[i], bDepths[i] * 10, tracking, true); this.poolSizes[i] = bSizes[i]; } if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled()) { Tr.debug(tc, "Number of pools created: " + this.poolSizes.length); } if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) { Tr.exit(tc, "initialize"); } }
private void circularByteBufferInitializer(int bufferCapacity, int bufferSize, int readPosition, int writePosition) { if (bufferCapacity < 2) { throw new IllegalArgumentException("Buffer capacity must be greater than 2 !"); } if ((bufferSize < 0) || (bufferSize > bufferCapacity)) { throw new IllegalArgumentException("Buffer size must be a value between 0 and "+bufferCapacity+" !"); } if ((readPosition < 0) || (readPosition > bufferSize)) { throw new IllegalArgumentException("Buffer read position must be a value between 0 and "+bufferSize+" !"); } if ((writePosition < 0) || (writePosition > bufferSize)) { throw new IllegalArgumentException("Buffer write position must be a value between 0 and "+bufferSize+" !"); } this.buffer = new byte[bufferCapacity]; this.currentBufferSize = bufferSize; this.currentReadPosition = readPosition; this.currentWritePosition = writePosition; }
Initialize the pool manager with the number of pools, the entry sizes for each pool, and the maximum depth of the free pool. @param bufferEntrySizes the memory sizes of each entry in the pools @param bufferEntryDepths the maximum number of entries in the free pool
public void initialize(int[] bufferEntrySizes, int[] bufferEntryDepths) { if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) { Tr.entry(tc, "initialize"); } // order both lists from smallest to largest, based only on Entry Sizes int len = bufferEntrySizes.length; int[] bSizes = new int[len]; int[] bDepths = new int[len]; int sizeCompare; int depth; int sizeSort; int j; for (int i = 0; i < len; i++) { sizeCompare = bufferEntrySizes[i]; depth = bufferEntryDepths[i]; // go backwards, for speed, since first Array List is // probably already ordered small to large for (j = i - 1; j >= 0; j--) { sizeSort = bSizes[j]; if (sizeCompare > sizeSort) { // add the bigger one after the smaller one bSizes[j + 1] = sizeCompare; bDepths[j + 1] = depth; break; } // move current one down, since it is bigger bSizes[j + 1] = sizeSort; bDepths[j + 1] = bDepths[j]; } if (j < 0) { // smallest so far, add it at the front of the list bSizes[0] = sizeCompare; bDepths[0] = depth; } } boolean tracking = trackingBuffers(); this.pools = new WsByteBufferPool[len]; this.poolsDirect = new WsByteBufferPool[len]; this.poolSizes = new int[len]; for (int i = 0; i < len; i++) { // make backing pool 10 times larger than local pools this.pools[i] = new WsByteBufferPool(bSizes[i], bDepths[i] * 10, tracking, false); this.poolsDirect[i] = new WsByteBufferPool(bSizes[i], bDepths[i] * 10, tracking, true); this.poolSizes[i] = bSizes[i]; } if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled()) { Tr.debug(tc, "Number of pools created: " + this.poolSizes.length); } if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) { Tr.exit(tc, "initialize"); } }
public static ByteBuffer allocate(int capacity) { ByteBuffer buf = ByteBuffer.allocate(capacity); buf.limit(0); return buf; }
// List lists all of the documents in an index. The documents are returned in // increasing ID order.
func (x *Index) List(c context.Context, opts *ListOptions) *Iterator { t := &Iterator{ c: c, index: x, count: -1, listInclusive: true, more: moreList, limit: -1, } if opts != nil { t.listStartID = opts.StartID if opts.Limit > 0 { t.limit = opts.Limit } t.idsOnly = opts.IDsOnly } return t }
func (tx *Tx) Indexes() ([]string, error) { if tx.db == nil { return nil, ErrTxClosed } names := make([]string, 0, len(tx.db.idxs)) for name := range tx.db.idxs { names = append(names, name) } sort.Strings(names) return names, nil }
// List lists all of the documents in an index. The documents are returned in // increasing ID order.
func (x *Index) List(c context.Context, opts *ListOptions) *Iterator { t := &Iterator{ c: c, index: x, count: -1, listInclusive: true, more: moreList, limit: -1, } if opts != nil { t.listStartID = opts.StartID if opts.Limit > 0 { t.limit = opts.Limit } t.idsOnly = opts.IDsOnly } return t }
func (o *SortOption) Indexes() []int { var ret []int for _, x := range o.orderByList { ret = append(ret, x.Index) } return ret }
// List lists all of the documents in an index. The documents are returned in // increasing ID order.
func (x *Index) List(c context.Context, opts *ListOptions) *Iterator { t := &Iterator{ c: c, index: x, count: -1, listInclusive: true, more: moreList, limit: -1, } if opts != nil { t.listStartID = opts.StartID if opts.Limit > 0 { t.limit = opts.Limit } t.idsOnly = opts.IDsOnly } return t }
func (list *TargetList) List() []TargetID { list.RLock() defer list.RUnlock() keys := []TargetID{} for k := range list.targets { keys = append(keys, k) } return keys }
// List lists all of the documents in an index. The documents are returned in // increasing ID order.
func (x *Index) List(c context.Context, opts *ListOptions) *Iterator { t := &Iterator{ c: c, index: x, count: -1, listInclusive: true, more: moreList, limit: -1, } if opts != nil { t.listStartID = opts.StartID if opts.Limit > 0 { t.limit = opts.Limit } t.idsOnly = opts.IDsOnly } return t }
func (mongo *MongoDB) List(start int, limit int) (*data.Messages, error) { messages := &data.Messages{} err := mongo.Collection.Find(bson.M{}).Skip(start).Limit(limit).Sort("-created").Select(bson.M{ "id": 1, "_id": 1, "from": 1, "to": 1, "content.headers": 1, "content.size": 1, "created": 1, "raw": 1, }).All(messages) if err != nil { log.Printf("Error loading messages: %s", err) return nil, err } return messages, nil }
// List lists all of the documents in an index. The documents are returned in // increasing ID order.
func (x *Index) List(c context.Context, opts *ListOptions) *Iterator { t := &Iterator{ c: c, index: x, count: -1, listInclusive: true, more: moreList, limit: -1, } if opts != nil { t.listStartID = opts.StartID if opts.Limit > 0 { t.limit = opts.Limit } t.idsOnly = opts.IDsOnly } return t }
func (db *NodeSet) NodeList() NodeList { db.lock.RLock() defer db.lock.RUnlock() var values NodeList for _, key := range db.order { values = append(values, db.nodes[key]) } return values }
Loads the entity that holds the item type data when an Item is loaded. @param Item $item @param LifecycleEventArgs $event
public function postLoad(Item $item, LifecycleEventArgs $event) { $type = $this->itemDefinitions->getConvertedType($item->getMimeType()); $definition = $this->itemDefinitions->get($type); $repository = $event ->getEntityManager() ->getRepository($definition->getEntityClass()); /** @var \UJM\ExoBundle\Entity\ItemType\AbstractItem $typeEntity */ $typeEntity = $repository->findOneBy([ 'question' => $item, ]); if (!empty($typeEntity)) { $item->setInteraction($typeEntity); } }
public function prePersist(Item $item, LifecycleEventArgs $event) { $interaction = $item->getInteraction(); if (null !== $interaction) { $event->getEntityManager()->persist($interaction); } }
Loads the entity that holds the item type data when an Item is loaded. @param Item $item @param LifecycleEventArgs $event
public function postLoad(Item $item, LifecycleEventArgs $event) { $type = $this->itemDefinitions->getConvertedType($item->getMimeType()); $definition = $this->itemDefinitions->get($type); $repository = $event ->getEntityManager() ->getRepository($definition->getEntityClass()); /** @var \UJM\ExoBundle\Entity\ItemType\AbstractItem $typeEntity */ $typeEntity = $repository->findOneBy([ 'question' => $item, ]); if (!empty($typeEntity)) { $item->setInteraction($typeEntity); } }
protected function dispatchClassMetadataLoadedEvent(ClassMetadataInterface $classMetadata): void { if (null === $this->eventDispatcher) { return; } $this->eventDispatcher->dispatch( ClassMetadataLoadedEvent::LOADED_EVENT, new ClassMetadataLoadedEvent($classMetadata) ); }
Loads the entity that holds the item type data when an Item is loaded. @param Item $item @param LifecycleEventArgs $event
public function postLoad(Item $item, LifecycleEventArgs $event) { $type = $this->itemDefinitions->getConvertedType($item->getMimeType()); $definition = $this->itemDefinitions->get($type); $repository = $event ->getEntityManager() ->getRepository($definition->getEntityClass()); /** @var \UJM\ExoBundle\Entity\ItemType\AbstractItem $typeEntity */ $typeEntity = $repository->findOneBy([ 'question' => $item, ]); if (!empty($typeEntity)) { $item->setInteraction($typeEntity); } }
public function onLoaded(ResourceInterface $resource) { if (!$this->isDebugging() || $this->current === $resource) { return; } $this->ensureCollector($this->current); $this->meta[$this->current]->addResource($resource); }
Loads the entity that holds the item type data when an Item is loaded. @param Item $item @param LifecycleEventArgs $event
public function postLoad(Item $item, LifecycleEventArgs $event) { $type = $this->itemDefinitions->getConvertedType($item->getMimeType()); $definition = $this->itemDefinitions->get($type); $repository = $event ->getEntityManager() ->getRepository($definition->getEntityClass()); /** @var \UJM\ExoBundle\Entity\ItemType\AbstractItem $typeEntity */ $typeEntity = $repository->findOneBy([ 'question' => $item, ]); if (!empty($typeEntity)) { $item->setInteraction($typeEntity); } }
protected function _prepareEvent($eventName, array $data = []) { if (is_array($eventName)) { list($eventName, $subject) = $eventName; } else { $subject = new EventDispatcher(); } return new Event($eventName, $subject, $data); }
Loads the entity that holds the item type data when an Item is loaded. @param Item $item @param LifecycleEventArgs $event
public function postLoad(Item $item, LifecycleEventArgs $event) { $type = $this->itemDefinitions->getConvertedType($item->getMimeType()); $definition = $this->itemDefinitions->get($type); $repository = $event ->getEntityManager() ->getRepository($definition->getEntityClass()); /** @var \UJM\ExoBundle\Entity\ItemType\AbstractItem $typeEntity */ $typeEntity = $repository->findOneBy([ 'question' => $item, ]); if (!empty($typeEntity)) { $item->setInteraction($typeEntity); } }
public Resource getStoreResource(final Instance _instance, final Resource.StoreEvent _event) throws EFapsException { Resource storeRsrc = null; final Store store = Store.get(_instance.getType().getStoreId()); storeRsrc = store.getResource(_instance); storeRsrc.open(_event); this.storeStore.add(storeRsrc); return storeRsrc; }
Extract the normalized text from within an element @param fromElement @return extracted Text node (could be null)
protected Text extractText(Element fromElement) { fromElement.normalize(); NodeList fromNodeList = fromElement.getChildNodes(); Node currentNode; for (int i=0; i < fromNodeList.getLength(); ++i) { currentNode = fromNodeList.item(i); if (currentNode.getNodeType() == Node.TEXT_NODE) { return (Text) currentNode; } } return null; }
private static String getText(Element element) { if (element.getFirstChild() == null) { return ""; } return ((Text) element.getFirstChild()).getData().trim(); }
Extract the normalized text from within an element @param fromElement @return extracted Text node (could be null)
protected Text extractText(Element fromElement) { fromElement.normalize(); NodeList fromNodeList = fromElement.getChildNodes(); Node currentNode; for (int i=0; i < fromNodeList.getLength(); ++i) { currentNode = fromNodeList.item(i); if (currentNode.getNodeType() == Node.TEXT_NODE) { return (Text) currentNode; } } return null; }
private String parseXmlElement(Element element) { if(element.getFirstChild() instanceof Text) { Text text = (Text) element.getFirstChild(); return text.getData(); } return toXml(element); }
Extract the normalized text from within an element @param fromElement @return extracted Text node (could be null)
protected Text extractText(Element fromElement) { fromElement.normalize(); NodeList fromNodeList = fromElement.getChildNodes(); Node currentNode; for (int i=0; i < fromNodeList.getLength(); ++i) { currentNode = fromNodeList.item(i); if (currentNode.getNodeType() == Node.TEXT_NODE) { return (Text) currentNode; } } return null; }
public @Nonnull String text() { return new NodeListSpliterator(node.getChildNodes()).stream() .filter(it -> it instanceof Text) .map(it -> ((Text) it).getNodeValue()) .collect(joining()); }
Extract the normalized text from within an element @param fromElement @return extracted Text node (could be null)
protected Text extractText(Element fromElement) { fromElement.normalize(); NodeList fromNodeList = fromElement.getChildNodes(); Node currentNode; for (int i=0; i < fromNodeList.getLength(); ++i) { currentNode = fromNodeList.item(i); if (currentNode.getNodeType() == Node.TEXT_NODE) { return (Text) currentNode; } } return null; }
public String getContent() { String content = ""; NodeList list = dom.getChildNodes(); for (int i=0;i<list.getLength();i++) { if (list.item(i) instanceof Text) { content += (list.item(i).getNodeValue()); } } return content; }
Extract the normalized text from within an element @param fromElement @return extracted Text node (could be null)
protected Text extractText(Element fromElement) { fromElement.normalize(); NodeList fromNodeList = fromElement.getChildNodes(); Node currentNode; for (int i=0; i < fromNodeList.getLength(); ++i) { currentNode = fromNodeList.item(i); if (currentNode.getNodeType() == Node.TEXT_NODE) { return (Text) currentNode; } } return null; }
@Override public XMLObject unmarshall(Element domElement) throws UnmarshallingException { Document newDocument = null; Node childNode = domElement.getFirstChild(); while (childNode != null) { if (childNode.getNodeType() != Node.TEXT_NODE) { // We skip everything except for a text node. log.info("Ignoring node {} - it is not a text node", childNode.getNodeName()); } else { newDocument = parseContents((Text) childNode, domElement); if (newDocument != null) { break; } } childNode = childNode.getNextSibling(); } return super.unmarshall(newDocument != null ? newDocument.getDocumentElement() : domElement); }
// handle processes a request from a Multiwatcher to the storeManager.
func (sm *storeManager) handle(req *request) { if req.w.stopped { // The watcher has previously been stopped. if req.reply != nil { select { case req.reply <- false: case <-sm.tomb.Dying(): } } return } if req.reply == nil { // This is a request to stop the watcher. for req := sm.waiting[req.w]; req != nil; req = req.next { select { case req.reply <- false: case <-sm.tomb.Dying(): } } delete(sm.waiting, req.w) req.w.stopped = true sm.leave(req.w) return } // Add request to head of list. req.next = sm.waiting[req.w] sm.waiting[req.w] = req }
func NewAckingResourceMutatorWrapper(mutator ResourceMutator, nodeToID NodeToIDFunc) *AckingResourceMutatorWrapper { return &AckingResourceMutatorWrapper{ mutator: mutator, nodeToID: nodeToID, pendingCompletions: make(map[*completion.Completion]*pendingCompletion), } }
// handle processes a request from a Multiwatcher to the storeManager.
func (sm *storeManager) handle(req *request) { if req.w.stopped { // The watcher has previously been stopped. if req.reply != nil { select { case req.reply <- false: case <-sm.tomb.Dying(): } } return } if req.reply == nil { // This is a request to stop the watcher. for req := sm.waiting[req.w]; req != nil; req = req.next { select { case req.reply <- false: case <-sm.tomb.Dying(): } } delete(sm.waiting, req.w) req.w.stopped = true sm.leave(req.w) return } // Add request to head of list. req.next = sm.waiting[req.w] sm.waiting[req.w] = req }
func newStore() *multiwatcherStore { return &multiwatcherStore{ entities: make(map[interface{}]*list.Element), list: list.New(), } }
// handle processes a request from a Multiwatcher to the storeManager.
func (sm *storeManager) handle(req *request) { if req.w.stopped { // The watcher has previously been stopped. if req.reply != nil { select { case req.reply <- false: case <-sm.tomb.Dying(): } } return } if req.reply == nil { // This is a request to stop the watcher. for req := sm.waiting[req.w]; req != nil; req = req.next { select { case req.reply <- false: case <-sm.tomb.Dying(): } } delete(sm.waiting, req.w) req.w.stopped = true sm.leave(req.w) return } // Add request to head of list. req.next = sm.waiting[req.w] sm.waiting[req.w] = req }
func New(s store.Store, ctx context.Context) (watcher.Watcher, error) { return watcher.New(s, ctx, KEY, convert, invertKey) }
// handle processes a request from a Multiwatcher to the storeManager.
func (sm *storeManager) handle(req *request) { if req.w.stopped { // The watcher has previously been stopped. if req.reply != nil { select { case req.reply <- false: case <-sm.tomb.Dying(): } } return } if req.reply == nil { // This is a request to stop the watcher. for req := sm.waiting[req.w]; req != nil; req = req.next { select { case req.reply <- false: case <-sm.tomb.Dying(): } } delete(sm.waiting, req.w) req.w.stopped = true sm.leave(req.w) return } // Add request to head of list. req.next = sm.waiting[req.w] sm.waiting[req.w] = req }
func (c *Cacher) Watch(ctx context.Context, key string, resourceVersion string, pred storage.SelectionPredicate) (watch.Interface, error) { watchRV, err := c.versioner.ParseResourceVersion(resourceVersion) if err != nil { return nil, err } c.ready.wait() triggerValue, triggerSupported := "", false // TODO: Currently we assume that in a given Cacher object, any <predicate> that is // passed here is aware of exactly the same trigger (at most one). // Thus, either 0 or 1 values will be returned. if matchValues := pred.MatcherIndex(); len(matchValues) > 0 { triggerValue, triggerSupported = matchValues[0].Value, true } // If there is triggerFunc defined, but triggerSupported is false, // we can't narrow the amount of events significantly at this point. // // That said, currently triggerFunc is defined only for Pods and Nodes, // and there is only constant number of watchers for which triggerSupported // is false (excluding those issues explicitly by users). // Thus, to reduce the risk of those watchers blocking all watchers of a // given resource in the system, we increase the sizes of buffers for them. chanSize := 10 if c.triggerFunc != nil && !triggerSupported { // TODO: We should tune this value and ideally make it dependent on the // number of objects of a given type and/or their churn. chanSize = 1000 } // Determine watch timeout('0' means deadline is not set, ignore checking) deadline, _ := ctx.Deadline() // Create a watcher here to reduce memory allocations under lock, // given that memory allocation may trigger GC and block the thread. // Also note that emptyFunc is a placeholder, until we will be able // to compute watcher.forget function (which has to happen under lock). watcher := newCacheWatcher(chanSize, filterWithAttrsFunction(key, pred), emptyFunc, c.versioner, deadline, pred.AllowWatchBookmarks, c.objectType) // We explicitly use thread unsafe version and do locking ourself to ensure that // no new events will be processed in the meantime. The watchCache will be unlocked // on return from this function. // Note that we cannot do it under Cacher lock, to avoid a deadlock, since the // underlying watchCache is calling processEvent under its lock. c.watchCache.RLock() defer c.watchCache.RUnlock() initEvents, err := c.watchCache.GetAllEventsSinceThreadUnsafe(watchRV) if err != nil { // To match the uncached watch implementation, once we have passed authn/authz/admission, // and successfully parsed a resource version, other errors must fail with a watch event of type ERROR, // rather than a directly returned error. return newErrWatcher(err), nil } // With some events already sent, update resourceVersion so that // events that were buffered and not yet processed won't be delivered // to this watcher second time causing going back in time. if len(initEvents) > 0 { watchRV = initEvents[len(initEvents)-1].ResourceVersion } func() { c.Lock() defer c.Unlock() // Update watcher.forget function once we can compute it. watcher.forget = forgetWatcher(c, c.watcherIdx, triggerValue, triggerSupported) c.watchers.addWatcher(watcher, c.watcherIdx, triggerValue, triggerSupported) // Add it to the queue only when server and client support watch bookmarks. if c.watchBookmarkEnabled && watcher.allowWatchBookmarks { c.bookmarkWatchers.addWatcher(watcher) } c.watcherIdx++ }() go watcher.process(ctx, initEvents, watchRV) return watcher, nil }
// handle processes a request from a Multiwatcher to the storeManager.
func (sm *storeManager) handle(req *request) { if req.w.stopped { // The watcher has previously been stopped. if req.reply != nil { select { case req.reply <- false: case <-sm.tomb.Dying(): } } return } if req.reply == nil { // This is a request to stop the watcher. for req := sm.waiting[req.w]; req != nil; req = req.next { select { case req.reply <- false: case <-sm.tomb.Dying(): } } delete(sm.waiting, req.w) req.w.stopped = true sm.leave(req.w) return } // Add request to head of list. req.next = sm.waiting[req.w] sm.waiting[req.w] = req }
func (mux *ServeMux) Handler(r *Request) Handler { var matcher Matcher var entry *muxEntry // Acquire a read lock to ensure that list would not // be modified during the search of registered handler. mux.mu.RLock() var matched bool // Try to match the processing request to any of the registered // filters. for matcher, entry = range mux.handlers { if matched = matcher.Match(r); matched { break } } mux.mu.RUnlock() // Use the DefaultHandler when there are no matching entries in the list. if !matched { return DefaultHandler } // If the retrieved entry is not disposable one, we will // return it as is without any processing. if !entry.once { return entry.handler } // But when the entry is disposable, we need to remove it // from the list. // We need to acquire the write lock in order to remove the // entry from the root. This procedure does not guarantee, // that handler will process the first message. mux.mu.Lock() defer mux.mu.Unlock() // If the concurrent message have already started the message // processing, it will be no longer presented in the list. if _, ok := mux.handlers[matcher]; !ok { return DiscardHandler } // Remove the entry from the list if it is marked as disposable. delete(mux.handlers, matcher) return entry.handler }
// Limit returns true if rate was exceeded
func (rl *RateLimiter) Limit() bool { // Calculate the number of ns that have passed since our last call now := unixNano() passed := now - atomic.SwapUint64(&rl.lastCheck, now) // Add them to our allowance rate := atomic.LoadUint64(&rl.rate) current := atomic.AddUint64(&rl.allowance, passed*rate) // Ensure our allowance is not over maximum if max := atomic.LoadUint64(&rl.max); current > max { atomic.AddUint64(&rl.allowance, max-current) current = max } // If our allowance is less than one unit, rate-limit! if current < rl.unit { return true } // Not limited, subtract a unit atomic.AddUint64(&rl.allowance, -rl.unit) return false }
func LimitFuncHandler(lmt *limiter.Limiter, nextFunc func(http.ResponseWriter, *http.Request)) http.Handler { return LimitHandler(lmt, http.HandlerFunc(nextFunc)) }
// Limit returns true if rate was exceeded
func (rl *RateLimiter) Limit() bool { // Calculate the number of ns that have passed since our last call now := unixNano() passed := now - atomic.SwapUint64(&rl.lastCheck, now) // Add them to our allowance rate := atomic.LoadUint64(&rl.rate) current := atomic.AddUint64(&rl.allowance, passed*rate) // Ensure our allowance is not over maximum if max := atomic.LoadUint64(&rl.max); current > max { atomic.AddUint64(&rl.allowance, max-current) current = max } // If our allowance is less than one unit, rate-limit! if current < rl.unit { return true } // Not limited, subtract a unit atomic.AddUint64(&rl.allowance, -rl.unit) return false }
func (r *RateLimiter) Limit(dataSize int) time.Duration { r.lock.Lock() defer r.lock.Unlock() // update time var duration time.Duration = time.Duration(0) if r.bandwidth == 0 { return duration } current := time.Now() elapsedTime := current.Sub(r.lastUpdate) r.lastUpdate = current allowance := r.allowance + float64(elapsedTime)*r.bandwidth // allowance can't exceed bandwidth if allowance > r.maxAllowance { allowance = r.maxAllowance } allowance -= float64(dataSize) if allowance < 0 { // sleep until allowance is back to 0 duration = time.Duration(-allowance / r.bandwidth) // rate limiting was applied, record stats r.count++ r.duration += duration } r.allowance = allowance return duration }
// Limit returns true if rate was exceeded
func (rl *RateLimiter) Limit() bool { // Calculate the number of ns that have passed since our last call now := unixNano() passed := now - atomic.SwapUint64(&rl.lastCheck, now) // Add them to our allowance rate := atomic.LoadUint64(&rl.rate) current := atomic.AddUint64(&rl.allowance, passed*rate) // Ensure our allowance is not over maximum if max := atomic.LoadUint64(&rl.max); current > max { atomic.AddUint64(&rl.allowance, max-current) current = max } // If our allowance is less than one unit, rate-limit! if current < rl.unit { return true } // Not limited, subtract a unit atomic.AddUint64(&rl.allowance, -rl.unit) return false }
func UsingRateLimit(rps float64) Option { return func(api *API) error { // because ratelimiter doesnt do any windowing // setting burst makes it difficult to enforce a fixed rate // so setting it equal to 1 this effectively disables bursting // this doesn't check for sensible values, ultimately the api will enforce that the value is ok api.rateLimiter = rate.NewLimiter(rate.Limit(rps), 1) return nil } }
// Limit returns true if rate was exceeded
func (rl *RateLimiter) Limit() bool { // Calculate the number of ns that have passed since our last call now := unixNano() passed := now - atomic.SwapUint64(&rl.lastCheck, now) // Add them to our allowance rate := atomic.LoadUint64(&rl.rate) current := atomic.AddUint64(&rl.allowance, passed*rate) // Ensure our allowance is not over maximum if max := atomic.LoadUint64(&rl.max); current > max { atomic.AddUint64(&rl.allowance, max-current) current = max } // If our allowance is less than one unit, rate-limit! if current < rl.unit { return true } // Not limited, subtract a unit atomic.AddUint64(&rl.allowance, -rl.unit) return false }
func isResourceGuaranteed(container *api.Container, resource api.ResourceName) bool { // A container resource is guaranteed if its request == limit. // If request == limit, the user is very confident of resource consumption. req, hasReq := container.Resources.Requests[resource] limit, hasLimit := container.Resources.Limits[resource] if !hasReq || !hasLimit { return false } return req.Cmp(limit) == 0 && req.Value() != 0 }
// Limit returns true if rate was exceeded
func (rl *RateLimiter) Limit() bool { // Calculate the number of ns that have passed since our last call now := unixNano() passed := now - atomic.SwapUint64(&rl.lastCheck, now) // Add them to our allowance rate := atomic.LoadUint64(&rl.rate) current := atomic.AddUint64(&rl.allowance, passed*rate) // Ensure our allowance is not over maximum if max := atomic.LoadUint64(&rl.max); current > max { atomic.AddUint64(&rl.allowance, max-current) current = max } // If our allowance is less than one unit, rate-limit! if current < rl.unit { return true } // Not limited, subtract a unit atomic.AddUint64(&rl.allowance, -rl.unit) return false }
func admitImage(size int64, limit corev1.LimitRangeItem) error { if limit.Type != imagev1.LimitTypeImage { return nil } limitQuantity, ok := limit.Max[corev1.ResourceStorage] if !ok { return nil } imageQuantity := resource.NewQuantity(size, resource.BinarySI) if limitQuantity.Cmp(*imageQuantity) < 0 { // image size is larger than the permitted limit range max size, image is forbidden return newLimitExceededError(imagev1.LimitTypeImage, corev1.ResourceStorage, imageQuantity, &limitQuantity) } return nil }
Modify to the last occurrence of a given day of the week in the current quarter. If no day_of_week is provided, modify to the last day of the quarter. Use the supplied consts to indicate the desired day_of_week, ex. DateTime.MONDAY. :type day_of_week: int or None :rtype: DateTime
def _last_of_quarter(self, day_of_week=None): """ Modify to the last occurrence of a given day of the week in the current quarter. If no day_of_week is provided, modify to the last day of the quarter. Use the supplied consts to indicate the desired day_of_week, ex. DateTime.MONDAY. :type day_of_week: int or None :rtype: DateTime """ return self.on(self.year, self.quarter * 3, 1).last_of("month", day_of_week)
def previous(self, day_of_week=None): """ Modify to the previous occurrence of a given day of the week. If no day_of_week is provided, modify to the previous occurrence of the current day of the week. Use the supplied consts to indicate the desired day_of_week, ex. pendulum.MONDAY. :param day_of_week: The previous day of week to reset to. :type day_of_week: int or None :rtype: Date """ if day_of_week is None: day_of_week = self.day_of_week if day_of_week < SUNDAY or day_of_week > SATURDAY: raise ValueError("Invalid day of week") dt = self.subtract(days=1) while dt.day_of_week != day_of_week: dt = dt.subtract(days=1) return dt
Modify to the last occurrence of a given day of the week in the current quarter. If no day_of_week is provided, modify to the last day of the quarter. Use the supplied consts to indicate the desired day_of_week, ex. DateTime.MONDAY. :type day_of_week: int or None :rtype: DateTime
def _last_of_quarter(self, day_of_week=None): """ Modify to the last occurrence of a given day of the week in the current quarter. If no day_of_week is provided, modify to the last day of the quarter. Use the supplied consts to indicate the desired day_of_week, ex. DateTime.MONDAY. :type day_of_week: int or None :rtype: DateTime """ return self.on(self.year, self.quarter * 3, 1).last_of("month", day_of_week)
def next(self, day_of_week=None, keep_time=False): """ Modify to the next occurrence of a given day of the week. If no day_of_week is provided, modify to the next occurrence of the current day of the week. Use the supplied consts to indicate the desired day_of_week, ex. DateTime.MONDAY. :param day_of_week: The next day of week to reset to. :type day_of_week: int or None :param keep_time: Whether to keep the time information or not. :type keep_time: bool :rtype: DateTime """ if day_of_week is None: day_of_week = self.day_of_week if day_of_week < SUNDAY or day_of_week > SATURDAY: raise ValueError("Invalid day of week") if keep_time: dt = self else: dt = self.start_of("day") dt = dt.add(days=1) while dt.day_of_week != day_of_week: dt = dt.add(days=1) return dt
Modify to the last occurrence of a given day of the week in the current quarter. If no day_of_week is provided, modify to the last day of the quarter. Use the supplied consts to indicate the desired day_of_week, ex. DateTime.MONDAY. :type day_of_week: int or None :rtype: DateTime
def _last_of_quarter(self, day_of_week=None): """ Modify to the last occurrence of a given day of the week in the current quarter. If no day_of_week is provided, modify to the last day of the quarter. Use the supplied consts to indicate the desired day_of_week, ex. DateTime.MONDAY. :type day_of_week: int or None :rtype: DateTime """ return self.on(self.year, self.quarter * 3, 1).last_of("month", day_of_week)
def day(self, num): """Return the given day of week as a date object. Day 0 is the Monday.""" d = date(self.year, 1, 4) # The Jan 4th must be in week 1 according to ISO return d + timedelta(weeks=self.week-1, days=-d.weekday() + num)
Modify to the last occurrence of a given day of the week in the current quarter. If no day_of_week is provided, modify to the last day of the quarter. Use the supplied consts to indicate the desired day_of_week, ex. DateTime.MONDAY. :type day_of_week: int or None :rtype: DateTime
def _last_of_quarter(self, day_of_week=None): """ Modify to the last occurrence of a given day of the week in the current quarter. If no day_of_week is provided, modify to the last day of the quarter. Use the supplied consts to indicate the desired day_of_week, ex. DateTime.MONDAY. :type day_of_week: int or None :rtype: DateTime """ return self.on(self.year, self.quarter * 3, 1).last_of("month", day_of_week)
def get_period_last_3_months() -> str: """ Returns the last week as a period string """ today = Datum() today.today() # start_date = today - timedelta(weeks=13) start_date = today.clone() start_date.subtract_months(3) period = get_period(start_date.date, today.date) return period
Modify to the last occurrence of a given day of the week in the current quarter. If no day_of_week is provided, modify to the last day of the quarter. Use the supplied consts to indicate the desired day_of_week, ex. DateTime.MONDAY. :type day_of_week: int or None :rtype: DateTime
def _last_of_quarter(self, day_of_week=None): """ Modify to the last occurrence of a given day of the week in the current quarter. If no day_of_week is provided, modify to the last day of the quarter. Use the supplied consts to indicate the desired day_of_week, ex. DateTime.MONDAY. :type day_of_week: int or None :rtype: DateTime """ return self.on(self.year, self.quarter * 3, 1).last_of("month", day_of_week)
def get_last_weekday_in_month(year, month, weekday): """Get the last weekday in a given month. e.g: >>> # the last monday in Jan 2013 >>> Calendar.get_last_weekday_in_month(2013, 1, MON) datetime.date(2013, 1, 28) """ day = date(year, month, monthrange(year, month)[1]) while True: if day.weekday() == weekday: break day = day - timedelta(days=1) return day
Compile Sass content using Leafo's ScssPhp compiler. @link https://github.com/leafo/scssphp @param string $content Content to compile. @param array $directories The import directories to make available when compiling. @return string Compiled Sass content.
private static function compileSassContentWithLeafoScss(string $content, array $directories) { if (empty($content)) { return ''; } if (self::$sassCompilerInstance == null) { self::$sassCompilerInstance = self::getLeafoScssInstance(); } self::updateLeafoScssInstance(self::$sassCompilerInstance, $directories); return self::$sassCompilerInstance->compile($content); }
def compile_sass(self, sass_filename, sass_fileurl): """ Compile the given SASS file into CSS """ compile_kwargs = { 'filename': sass_filename, 'include_paths': SassProcessor.include_paths + APPS_INCLUDE_DIRS, 'custom_functions': get_custom_functions(), } if self.sass_precision: compile_kwargs['precision'] = self.sass_precision if self.sass_output_style: compile_kwargs['output_style'] = self.sass_output_style content = sass.compile(**compile_kwargs) self.save_to_destination(content, sass_filename, sass_fileurl) self.processed_files.append(sass_filename) if self.verbosity > 1: self.stdout.write("Compiled SASS/SCSS file: '{0}'\n".format(sass_filename))
Compile Sass content using Leafo's ScssPhp compiler. @link https://github.com/leafo/scssphp @param string $content Content to compile. @param array $directories The import directories to make available when compiling. @return string Compiled Sass content.
private static function compileSassContentWithLeafoScss(string $content, array $directories) { if (empty($content)) { return ''; } if (self::$sassCompilerInstance == null) { self::$sassCompilerInstance = self::getLeafoScssInstance(); } self::updateLeafoScssInstance(self::$sassCompilerInstance, $directories); return self::$sassCompilerInstance->compile($content); }
def compile_scss_normal Dir["#{@path}.scss"].select { |f| File.file? f }.each do |file| next if File.basename(file).chr == '_' scss_file = File.open(file, 'rb') { |f| f.read } output_file = File.open( file.split('.').reverse.drop(1).reverse.join('.'), "w" ) output_file << Sass::Engine.new(scss_file, { syntax: :scss, quiet: true, style: :compressed }).render output_file.close end end
Compile Sass content using Leafo's ScssPhp compiler. @link https://github.com/leafo/scssphp @param string $content Content to compile. @param array $directories The import directories to make available when compiling. @return string Compiled Sass content.
private static function compileSassContentWithLeafoScss(string $content, array $directories) { if (empty($content)) { return ''; } if (self::$sassCompilerInstance == null) { self::$sassCompilerInstance = self::getLeafoScssInstance(); } self::updateLeafoScssInstance(self::$sassCompilerInstance, $directories); return self::$sassCompilerInstance->compile($content); }
public static function css_files(array $files) { if (empty($files)) { return ''; } $compressed = array(); foreach ($files as $file) { $content = file_get_contents($file); if ($content === false) { $compressed[] = "\n\n/* Cannot read CSS file ".basename(dirname(dirname($file))).'/'.basename(dirname($file)).'/'.basename($file)."*/\n\n"; continue; } $compressed[] = self::css($content); } return implode("\n", $compressed); }
Compile Sass content using Leafo's ScssPhp compiler. @link https://github.com/leafo/scssphp @param string $content Content to compile. @param array $directories The import directories to make available when compiling. @return string Compiled Sass content.
private static function compileSassContentWithLeafoScss(string $content, array $directories) { if (empty($content)) { return ''; } if (self::$sassCompilerInstance == null) { self::$sassCompilerInstance = self::getLeafoScssInstance(); } self::updateLeafoScssInstance(self::$sassCompilerInstance, $directories); return self::$sassCompilerInstance->compile($content); }
public function process($source, $force = true) { list ($webRoot, $source) = $this->_findWebRoot($source); $lessFile = FS::clean($webRoot . Configure::read('App.lessBaseUrl') . $source, '/'); $this->_setForce($force); $less = new Less($this->_config); if (!FS::isFile($lessFile)) { return null; } list($source, $isExpired) = $less->compile($lessFile); if ($isExpired) { $cacheId = FS::firstLine($source); $comment = '/* resource:' . str_replace(FS::clean(ROOT, '/'), '', $lessFile) . ' */' . PHP_EOL; $fileHead = implode('', [$cacheId, Str::low($comment)]); $css = $this->_normalizeContent($source, $fileHead); $this->_write($source, $css); } $source = str_replace(FS::clean(APP_ROOT . '/' . Configure::read('App.webroot'), '/'), '', $source); return $source; }
Compile Sass content using Leafo's ScssPhp compiler. @link https://github.com/leafo/scssphp @param string $content Content to compile. @param array $directories The import directories to make available when compiling. @return string Compiled Sass content.
private static function compileSassContentWithLeafoScss(string $content, array $directories) { if (empty($content)) { return ''; } if (self::$sassCompilerInstance == null) { self::$sassCompilerInstance = self::getLeafoScssInstance(); } self::updateLeafoScssInstance(self::$sassCompilerInstance, $directories); return self::$sassCompilerInstance->compile($content); }
def render_css(self, fn=None, text=None, margin='', indent='\t'): """output css using the Sass processor""" fn = fn or os.path.splitext(self.fn)[0]+'.css' if not os.path.exists(os.path.dirname(fn)): os.makedirs(os.path.dirname(fn)) curdir = os.path.abspath(os.curdir) os.chdir(os.path.dirname(fn)) # needed in order for scss to relative @import text = text or self.render_styles() if text != '': text = sass.compile(string=text) os.chdir(curdir) return CSS(fn=fn, text=text)
Reset the date to the last day of the decade. :rtype: Date
def _end_of_decade(self): """ Reset the date to the last day of the decade. :rtype: Date """ year = self.year - self.year % YEARS_PER_DECADE + YEARS_PER_DECADE - 1 return self.set(year, 12, 31)
private Date[] getBeforeDays(int year, int month) { Calendar cal = Calendar.getInstance(); cal.set(year, month, 1); int dayNum = getDayIndex(year, month, 1) - 1; Date[] date = new Date[dayNum]; if (dayNum > 0) for (int i = 0; i < dayNum; i++) { cal.add(Calendar.DAY_OF_MONTH, -1); date[i] = cal.getTime(); } return date; }
Reset the date to the last day of the decade. :rtype: Date
def _end_of_decade(self): """ Reset the date to the last day of the decade. :rtype: Date """ year = self.year - self.year % YEARS_PER_DECADE + YEARS_PER_DECADE - 1 return self.set(year, 12, 31)
def yesterday(date=None): """yesterday once more""" if not date: return _date - datetime.timedelta(days=1) else: current_date = parse(date) return current_date - datetime.timedelta(days=1)
Reset the date to the last day of the decade. :rtype: Date
def _end_of_decade(self): """ Reset the date to the last day of the decade. :rtype: Date """ year = self.year - self.year % YEARS_PER_DECADE + YEARS_PER_DECADE - 1 return self.set(year, 12, 31)
def rollforward(self, date): """Roll date forward to nearest start of year""" if self.onOffset(date): return date else: return date + YearBegin(month=self.month)
Reset the date to the last day of the decade. :rtype: Date
def _end_of_decade(self): """ Reset the date to the last day of the decade. :rtype: Date """ year = self.year - self.year % YEARS_PER_DECADE + YEARS_PER_DECADE - 1 return self.set(year, 12, 31)
def today(year=None): """this day, last year""" return datetime.date(int(year), _date.month, _date.day) if year else _date
Reset the date to the last day of the decade. :rtype: Date
def _end_of_decade(self): """ Reset the date to the last day of the decade. :rtype: Date """ year = self.year - self.year % YEARS_PER_DECADE + YEARS_PER_DECADE - 1 return self.set(year, 12, 31)
def gday_of_year(self): """Return the number of days since January 1 of the given year.""" return (self.date - dt.date(self.date.year, 1, 1)).days
Puts an archive data row in an index.
private function putRowInIndex(&$index, $metadataNamesToIndexBy, $row, $idSite, $period) { $currentLevel = & $index; foreach ($metadataNamesToIndexBy as $metadataName) { if ($metadataName == DataTableFactory::TABLE_METADATA_SITE_INDEX) { $key = $idSite; } elseif ($metadataName == DataTableFactory::TABLE_METADATA_PERIOD_INDEX) { $key = $period; } else { $key = $row[self::METADATA_CONTAINER_ROW_KEY][$metadataName]; } if (!isset($currentLevel[$key])) { $currentLevel[$key] = array(); } $currentLevel = & $currentLevel[$key]; } $currentLevel = $row; }
def _insert_row(self, i, index): """ Insert a new row in the Series. :param i: index location to insert :param index: index value to insert into the index list :return: nothing """ if i == len(self._index): self._add_row(index) else: self._index.insert(i, index) self._data.insert(i, None)
Puts an archive data row in an index.
private function putRowInIndex(&$index, $metadataNamesToIndexBy, $row, $idSite, $period) { $currentLevel = & $index; foreach ($metadataNamesToIndexBy as $metadataName) { if ($metadataName == DataTableFactory::TABLE_METADATA_SITE_INDEX) { $key = $idSite; } elseif ($metadataName == DataTableFactory::TABLE_METADATA_PERIOD_INDEX) { $key = $period; } else { $key = $row[self::METADATA_CONTAINER_ROW_KEY][$metadataName]; } if (!isset($currentLevel[$key])) { $currentLevel[$key] = array(); } $currentLevel = & $currentLevel[$key]; } $currentLevel = $row; }
def _index(self, row): """Add a row to the internal list of rows without writing it to disk. This function should keep the data structure consistent so it's usable for both adding new rows, and loading pre-existing histories. """ self.rows.append(row) self._keys.update(row.keys()) self._steps += 1
Puts an archive data row in an index.
private function putRowInIndex(&$index, $metadataNamesToIndexBy, $row, $idSite, $period) { $currentLevel = & $index; foreach ($metadataNamesToIndexBy as $metadataName) { if ($metadataName == DataTableFactory::TABLE_METADATA_SITE_INDEX) { $key = $idSite; } elseif ($metadataName == DataTableFactory::TABLE_METADATA_PERIOD_INDEX) { $key = $period; } else { $key = $row[self::METADATA_CONTAINER_ROW_KEY][$metadataName]; } if (!isset($currentLevel[$key])) { $currentLevel[$key] = array(); } $currentLevel = & $currentLevel[$key]; } $currentLevel = $row; }
func (dt *DbfTable) InsertRecord() int { if row := dt.findSpot(); row > -1 { // undelete selected row dt.dataStore[dt.getRowOffset(row)] = 0x20 return row } return dt.AddRecord() }
Puts an archive data row in an index.
private function putRowInIndex(&$index, $metadataNamesToIndexBy, $row, $idSite, $period) { $currentLevel = & $index; foreach ($metadataNamesToIndexBy as $metadataName) { if ($metadataName == DataTableFactory::TABLE_METADATA_SITE_INDEX) { $key = $idSite; } elseif ($metadataName == DataTableFactory::TABLE_METADATA_PERIOD_INDEX) { $key = $period; } else { $key = $row[self::METADATA_CONTAINER_ROW_KEY][$metadataName]; } if (!isset($currentLevel[$key])) { $currentLevel[$key] = array(); } $currentLevel = & $currentLevel[$key]; } $currentLevel = $row; }
private static void writeLineToMasterIndex(FSDataOutputStream stream, long startHash, long endHash, long indexStartPos, long indexEndPos) throws IOException { String toWrite = startHash + " " + endHash + " " + indexStartPos + " " + indexEndPos + "\n"; stream.write(toWrite.getBytes()); }
Puts an archive data row in an index.
private function putRowInIndex(&$index, $metadataNamesToIndexBy, $row, $idSite, $period) { $currentLevel = & $index; foreach ($metadataNamesToIndexBy as $metadataName) { if ($metadataName == DataTableFactory::TABLE_METADATA_SITE_INDEX) { $key = $idSite; } elseif ($metadataName == DataTableFactory::TABLE_METADATA_PERIOD_INDEX) { $key = $period; } else { $key = $row[self::METADATA_CONTAINER_ROW_KEY][$metadataName]; } if (!isset($currentLevel[$key])) { $currentLevel[$key] = array(); } $currentLevel = & $currentLevel[$key]; } $currentLevel = $row; }
public void writeIndexes(JobKey jobKey) throws IOException { // Defensive coding if (jobKey != null) { Table historyByJobIdTable = null; try { historyByJobIdTable = hbaseConnection .getTable(TableName.valueOf(Constants.HISTORY_BY_JOBID_TABLE)); byte[] jobKeyBytes = jobKeyConv.toBytes(jobKey); byte[] rowKeyBytes = jobIdConv.toBytes( new QualifiedJobId(jobKey.getCluster(), jobKey.getJobId())); // Insert (or update) row with jobid as the key Put p = new Put(rowKeyBytes); p.addColumn(Constants.INFO_FAM_BYTES, Constants.ROWKEY_COL_BYTES, jobKeyBytes); historyByJobIdTable.put(p); } finally { if (historyByJobIdTable != null) { historyByJobIdTable.close(); } } } }
Is the point near any points in the multi lat lng @param point point @param multiLatLng multi lat lng @param tolerance distance tolerance @return true if near
public static boolean isPointNearMultiLatLng(LatLng point, MultiLatLng multiLatLng, double tolerance) { boolean near = false; for (LatLng multiPoint : multiLatLng.getLatLngs()) { near = isPointNearPoint(point, multiPoint, tolerance); if (near) { break; } } return near; }
public boolean isCloseTo(GeoPoint point, double tolerance, MapView mapView) { return getCloseTo(point, tolerance, mapView) != null; }
Is the point near any points in the multi lat lng @param point point @param multiLatLng multi lat lng @param tolerance distance tolerance @return true if near
public static boolean isPointNearMultiLatLng(LatLng point, MultiLatLng multiLatLng, double tolerance) { boolean near = false; for (LatLng multiPoint : multiLatLng.getLatLngs()) { near = isPointNearPoint(point, multiPoint, tolerance); if (near) { break; } } return near; }
private function _isInTolerance($basePointIndex, $subjectPointIndex) { $radius = $this->distanceBetweenPoints( $this->points[$basePointIndex], $this->points[$subjectPointIndex] ); return $radius < $this->_tolerance; }
Is the point near any points in the multi lat lng @param point point @param multiLatLng multi lat lng @param tolerance distance tolerance @return true if near
public static boolean isPointNearMultiLatLng(LatLng point, MultiLatLng multiLatLng, double tolerance) { boolean near = false; for (LatLng multiPoint : multiLatLng.getLatLngs()) { near = isPointNearPoint(point, multiPoint, tolerance); if (near) { break; } } return near; }
public static boolean isPointOnPolyline(LatLng point, PolylineOptions polyline, boolean geodesic, double tolerance) { return PolyUtil.isLocationOnPath(point, polyline.getPoints(), geodesic, tolerance); }
Is the point near any points in the multi lat lng @param point point @param multiLatLng multi lat lng @param tolerance distance tolerance @return true if near
public static boolean isPointNearMultiLatLng(LatLng point, MultiLatLng multiLatLng, double tolerance) { boolean near = false; for (LatLng multiPoint : multiLatLng.getLatLngs()) { near = isPointNearPoint(point, multiPoint, tolerance); if (near) { break; } } return near; }
int subSimplify(PointList points, int fromIndex, int lastIndex) { if (lastIndex - fromIndex < 2) { return 0; } int indexWithMaxDist = -1; double maxDist = -1; double firstLat = points.getLatitude(fromIndex); double firstLon = points.getLongitude(fromIndex); double lastLat = points.getLatitude(lastIndex); double lastLon = points.getLongitude(lastIndex); for (int i = fromIndex + 1; i < lastIndex; i++) { double lat = points.getLatitude(i); if (Double.isNaN(lat)) { continue; } double lon = points.getLongitude(i); double dist = calc.calcNormalizedEdgeDistance(lat, lon, firstLat, firstLon, lastLat, lastLon); if (maxDist < dist) { indexWithMaxDist = i; maxDist = dist; } } if (indexWithMaxDist < 0) { throw new IllegalStateException("maximum not found in [" + fromIndex + "," + lastIndex + "]"); } int counter = 0; if (maxDist < normedMaxDist) { for (int i = fromIndex + 1; i < lastIndex; i++) { points.set(i, Double.NaN, Double.NaN, Double.NaN); counter++; } } else { counter = subSimplify(points, fromIndex, indexWithMaxDist); counter += subSimplify(points, indexWithMaxDist, lastIndex); } return counter; }
Is the point near any points in the multi lat lng @param point point @param multiLatLng multi lat lng @param tolerance distance tolerance @return true if near
public static boolean isPointNearMultiLatLng(LatLng point, MultiLatLng multiLatLng, double tolerance) { boolean near = false; for (LatLng multiPoint : multiLatLng.getLatLngs()) { near = isPointNearPoint(point, multiPoint, tolerance); if (near) { break; } } return near; }
public static double getLatitudeDistance(double minLatitude, double maxLatitude) { LatLng lowerMiddle = new LatLng(minLatitude, 0); LatLng upperMiddle = new LatLng(maxLatitude, 0); double latDistance = SphericalUtil.computeDistanceBetween(lowerMiddle, upperMiddle); return latDistance; }
Gives support for Rails date_select, datetime_select, time_select helpers.
def process_attributes(attributes = nil) return attributes if attributes.blank? multi_parameter_attributes = {} new_attributes = {} attributes.each_pair do |key, value| if key.match(DATE_KEY_REGEX) match = key.to_s.match(DATE_KEY_REGEX) found_key = match[1] index = match[2].to_i (multi_parameter_attributes[found_key] ||= {})[index] = value.empty? ? nil : value.send("to_#{$3}") else new_attributes[key] = value end end multi_parameter_attributes.empty? ? new_attributes : process_multiparameter_attributes(multi_parameter_attributes, new_attributes) end
def select_date(element, value) suffixes = { :year => '1i', :month => '2i', :day => '3i', :hour => '4i', :minute => '5i' } date = value.respond_to?(:year) ? value : Date.parse(value) browser.select "jquery=#{element}_#{suffixes[:year]}", date.year browser.select "jquery=#{element}_#{suffixes[:month]}", date.strftime('%B') browser.select "jquery=#{element}_#{suffixes[:day]}", date.day end
Gives support for Rails date_select, datetime_select, time_select helpers.
def process_attributes(attributes = nil) return attributes if attributes.blank? multi_parameter_attributes = {} new_attributes = {} attributes.each_pair do |key, value| if key.match(DATE_KEY_REGEX) match = key.to_s.match(DATE_KEY_REGEX) found_key = match[1] index = match[2].to_i (multi_parameter_attributes[found_key] ||= {})[index] = value.empty? ? nil : value.send("to_#{$3}") else new_attributes[key] = value end end multi_parameter_attributes.empty? ? new_attributes : process_multiparameter_attributes(multi_parameter_attributes, new_attributes) end
def select_simple_date(date_input, value, format = nil) value = value.strftime format unless format.nil? date_input.set "#{value}\e" end
Gives support for Rails date_select, datetime_select, time_select helpers.
def process_attributes(attributes = nil) return attributes if attributes.blank? multi_parameter_attributes = {} new_attributes = {} attributes.each_pair do |key, value| if key.match(DATE_KEY_REGEX) match = key.to_s.match(DATE_KEY_REGEX) found_key = match[1] index = match[2].to_i (multi_parameter_attributes[found_key] ||= {})[index] = value.empty? ? nil : value.send("to_#{$3}") else new_attributes[key] = value end end multi_parameter_attributes.empty? ? new_attributes : process_multiparameter_attributes(multi_parameter_attributes, new_attributes) end
def draw_datetime(f, col_or_sym, options={}) col_name = get_column_name(col_or_sym) f.text_field(col_name, value: datetime_fmt(f.object.send(col_name))) end
Gives support for Rails date_select, datetime_select, time_select helpers.
def process_attributes(attributes = nil) return attributes if attributes.blank? multi_parameter_attributes = {} new_attributes = {} attributes.each_pair do |key, value| if key.match(DATE_KEY_REGEX) match = key.to_s.match(DATE_KEY_REGEX) found_key = match[1] index = match[2].to_i (multi_parameter_attributes[found_key] ||= {})[index] = value.empty? ? nil : value.send("to_#{$3}") else new_attributes[key] = value end end multi_parameter_attributes.empty? ? new_attributes : process_multiparameter_attributes(multi_parameter_attributes, new_attributes) end
def parse_timestamps super @next_capture_at = Time.at(next_capture_at) if next_capture_at @canceled_at = Time.at(canceled_at) if canceled_at @trial_start = Time.at(trial_start) if trial_start @trial_end = Time.at(trial_end) if trial_end end
Gives support for Rails date_select, datetime_select, time_select helpers.
def process_attributes(attributes = nil) return attributes if attributes.blank? multi_parameter_attributes = {} new_attributes = {} attributes.each_pair do |key, value| if key.match(DATE_KEY_REGEX) match = key.to_s.match(DATE_KEY_REGEX) found_key = match[1] index = match[2].to_i (multi_parameter_attributes[found_key] ||= {})[index] = value.empty? ? nil : value.send("to_#{$3}") else new_attributes[key] = value end end multi_parameter_attributes.empty? ? new_attributes : process_multiparameter_attributes(multi_parameter_attributes, new_attributes) end
public static function date_field_types() { static $field_types = null; if ( null === $field_types ) { $field_types = array( 'date', 'datetime', 'time' ); $field_types = apply_filters( 'pods_tableless_field_types', $field_types ); } return $field_types; }
read-only @language=en Set text CSS font style. @param {String} font Text CSS font style to set. @returns {Text} the Text object, chained call supported.
function(font){ var me = this; if(me.font !== font){ me.font = font; me._fontHeight = Text.measureFontHeight(font); } return me; }
function setFontFamily(fontFamily) { var editor = EditorManager.getCurrentFullEditor(); if (currFontFamily === fontFamily) { return; } _removeDynamicFontFamily(); if (fontFamily) { _addDynamicFontFamily(fontFamily); } exports.trigger("fontFamilyChange", fontFamily, currFontFamily); currFontFamily = fontFamily; prefs.set("fontFamily", fontFamily); if (editor) { editor.refreshAll(); } }
read-only @language=en Set text CSS font style. @param {String} font Text CSS font style to set. @returns {Text} the Text object, chained call supported.
function(font){ var me = this; if(me.font !== font){ me.font = font; me._fontHeight = Text.measureFontHeight(font); } return me; }
function fontSet(alias, className) { config.fontSets.push({ alias: alias, fontSet: className || alias }); return this; }
read-only @language=en Set text CSS font style. @param {String} font Text CSS font style to set. @returns {Text} the Text object, chained call supported.
function(font){ var me = this; if(me.font !== font){ me.font = font; me._fontHeight = Text.measureFontHeight(font); } return me; }
function font() { return src([`${svgDir}/*.svg`]) .pipe( iconfontCss({ fontName: config.name, path: template, targetPath: '../src/index.less', normalize: true, firstGlyph: 0xf000, cssClass: fontName // this is a trick to pass fontName to template }) ) .pipe( iconfont({ fontName, formats }) ) .pipe(dest(srcDir)); }
read-only @language=en Set text CSS font style. @param {String} font Text CSS font style to set. @returns {Text} the Text object, chained call supported.
function(font){ var me = this; if(me.font !== font){ me.font = font; me._fontHeight = Text.measureFontHeight(font); } return me; }
def _update_fontcolor(self, fontcolor): """Updates text font color button Parameters ---------- fontcolor: Integer \tText color in integer RGB format """ textcolor = wx.SystemSettings_GetColour(wx.SYS_COLOUR_WINDOWTEXT) textcolor.SetRGB(fontcolor) self.textcolor_choice.SetColour(textcolor)
read-only @language=en Set text CSS font style. @param {String} font Text CSS font style to set. @returns {Text} the Text object, chained call supported.
function(font){ var me = this; if(me.font !== font){ me.font = font; me._fontHeight = Text.measureFontHeight(font); } return me; }
def OnTextFont(self, event): """Text font choice event handler""" fontchoice_combobox = event.GetEventObject() idx = event.GetInt() try: font_string = fontchoice_combobox.GetString(idx) except AttributeError: font_string = event.GetString() post_command_event(self, self.FontMsg, font=font_string)
Generate a new Key in the NFVO. @param name the name of the new Key @return the private Key @throws SDKException if the request fails
@Help(help = "Generate a new Key in the NFVO") public String generateKey(String name) throws SDKException { return (String) requestPost("generate", name); }
@Help(help = "Import a Key into the NFVO by providing name and public key") public Key importKey(String name, String publicKey) throws SDKException { Key key = new Key(); key.setName(name); key.setPublicKey(publicKey); return (Key) requestPost(key); }
Generate a new Key in the NFVO. @param name the name of the new Key @return the private Key @throws SDKException if the request fails
@Help(help = "Generate a new Key in the NFVO") public String generateKey(String name) throws SDKException { return (String) requestPost("generate", name); }
private NameGenerator getNameGenerator(String namePrefix) { synchronized(this) { if (_nameGenerators == null) _nameGenerators = new HashMap<String,NameGenerator>(); NameGenerator nameGenerator = _nameGenerators.get(namePrefix); if (nameGenerator == null) { nameGenerator = new NameGenerator(namePrefix); _nameGenerators.put(namePrefix, nameGenerator); } return nameGenerator; } }
Generate a new Key in the NFVO. @param name the name of the new Key @return the private Key @throws SDKException if the request fails
@Help(help = "Generate a new Key in the NFVO") public String generateKey(String name) throws SDKException { return (String) requestPost("generate", name); }
public static void generateSecretKey(KeyConfig config) throws NoSuchAlgorithmException, KeyStoreException, CertificateException, IOException { if (config == null || config.getKeyStoreFile() == null || StringUtils.isEmpty(config.getKeyEntryName()) || config.getAlgorithm() == null) { throw new KeyStoreException( "Missing parameters, unable to create keystore."); } SecureRandom random = new SecureRandom(); KeyGenerator keygen = KeyGenerator.getInstance(config.getAlgorithm() .getName(), new BouncyCastleProvider()); keygen.init(config.getKeySize(), random); SecretKey key = keygen.generateKey(); KeyStore keyStore = KeyStore.getInstance("JCEKS"); FileInputStream fis = null; if (config.getKeyStoreFile().exists() && FileUtils.sizeOf(config.getKeyStoreFile()) > 0) { fis = new FileInputStream(config.getKeyStoreFile()); } keyStore.load(fis, config.getKeyStorePassword().toCharArray()); KeyStore.ProtectionParameter protectionParameter = new KeyStore.PasswordProtection( config.getKeyStorePassword().toCharArray()); KeyStore.SecretKeyEntry secretKeyEntry = new KeyStore.SecretKeyEntry( key); keyStore.setEntry(config.getKeyEntryName(), secretKeyEntry, protectionParameter); if (fis != null) { fis.close(); } FileOutputStream fos = new FileOutputStream(config.getKeyStoreFile()); keyStore.store(fos, config.getKeyStorePassword().toCharArray()); fos.close(); }
Generate a new Key in the NFVO. @param name the name of the new Key @return the private Key @throws SDKException if the request fails
@Help(help = "Generate a new Key in the NFVO") public String generateKey(String name) throws SDKException { return (String) requestPost("generate", name); }
public PBKey getPBKey() { if (pbKey == null) { this.pbKey = new PBKey(this.getJcdAlias(), this.getUserName(), this.getPassWord()); } return pbKey; }
Generate a new Key in the NFVO. @param name the name of the new Key @return the private Key @throws SDKException if the request fails
@Help(help = "Generate a new Key in the NFVO") public String generateKey(String name) throws SDKException { return (String) requestPost("generate", name); }
def generate_semantic_data_key(used_semantic_keys): """ Create a new and unique semantic data key :param list used_semantic_keys: Handed list of keys already in use :rtype: str :return: semantic_data_id """ semantic_data_id_counter = -1 while True: semantic_data_id_counter += 1 if "semantic data key " + str(semantic_data_id_counter) not in used_semantic_keys: break return "semantic data key " + str(semantic_data_id_counter)
hack for NS62 bug
function jsUnitFixTop() { var tempTop = top; if (!tempTop) { tempTop = window; while (tempTop.parent) { tempTop = tempTop.parent; if (tempTop.top && tempTop.top.jsUnitTestSuite) { tempTop = tempTop.top; break; } } } try { window.top = tempTop; } catch (e) { } }
@Fix(io.sarl.lang.validation.IssueCodes.INVALID_IMPLEMENTED_TYPE) public void fixInvalidImplementedType(final Issue issue, IssueResolutionAcceptor acceptor) { ImplementedTypeRemoveModification.accept(this, issue, acceptor, RemovalType.OTHER); }
hack for NS62 bug
function jsUnitFixTop() { var tempTop = top; if (!tempTop) { tempTop = window; while (tempTop.parent) { tempTop = tempTop.parent; if (tempTop.top && tempTop.top.jsUnitTestSuite) { tempTop = tempTop.top; break; } } } try { window.top = tempTop; } catch (e) { } }
public boolean shouldInvalidate (Object id, int sourceOfInvalidation, int causeOfInvalidation) { boolean retVal = true; if (preInvalidationListenerCount > 0) { // In external implementation, catch any exceptions and process try { retVal = currentPreInvalidationListener.shouldInvalidate(id, sourceOfInvalidation, causeOfInvalidation); } catch (Throwable t) { com.ibm.ws.ffdc.FFDCFilter.processException(t, "com.ibm.ws.cache.DCEventSource.shouldInvalidate", "120", this); if (tc.isDebugEnabled()) { Tr.debug(tc, "Exception thrown in shouldInvalidate method of PreInvalidationListener\n" + t.toString()); } } } return retVal; //invalidate }
hack for NS62 bug
function jsUnitFixTop() { var tempTop = top; if (!tempTop) { tempTop = window; while (tempTop.parent) { tempTop = tempTop.parent; if (tempTop.top && tempTop.top.jsUnitTestSuite) { tempTop = tempTop.top; break; } } } try { window.top = tempTop; } catch (e) { } }
def remove(spy) if @constant_spies[spy.constant_name] == spy @constant_spies.delete(spy.constant_name) else raise NoSpyError, "#{spy.constant_name} was not stubbed on #{base_module.name}" end self end
hack for NS62 bug
function jsUnitFixTop() { var tempTop = top; if (!tempTop) { tempTop = window; while (tempTop.parent) { tempTop = tempTop.parent; if (tempTop.top && tempTop.top.jsUnitTestSuite) { tempTop = tempTop.top; break; } } } try { window.top = tempTop; } catch (e) { } }
def test(): """Test for ReverseDNS class""" dns = ReverseDNS() print(dns.lookup('192.168.0.1')) print(dns.lookup('8.8.8.8')) # Test cache print(dns.lookup('8.8.8.8'))
hack for NS62 bug
function jsUnitFixTop() { var tempTop = top; if (!tempTop) { tempTop = window; while (tempTop.parent) { tempTop = tempTop.parent; if (tempTop.top && tempTop.top.jsUnitTestSuite) { tempTop = tempTop.top; break; } } } try { window.top = tempTop; } catch (e) { } }
import re def debug(s): return re.sub(r'bug(?!s)', '', s)
Convert a file into a raid file format
public void raidFile(INode sourceINodes[], String source, RaidCodec codec, short expectedSourceRepl, Block[] parityBlocks) throws IOException { waitForReady(); long now = FSNamesystem.now(); unprotectedRaidFile(sourceINodes, source, codec, expectedSourceRepl, parityBlocks, now); fsImage.getEditLog().logRaidFile(source, codec.id, expectedSourceRepl, now); }
boolean reconstructFile(Path srcPath, Context context) throws IOException, InterruptedException { Progressable progress = context; if (progress == null) { progress = RaidUtils.NULL_PROGRESSABLE; } FileSystem fs = srcPath.getFileSystem(getConf()); FileStatus srcStat = null; try { srcStat = fs.getFileStatus(srcPath); } catch (FileNotFoundException ex) { return false; } if (RaidNode.isParityHarPartFile(srcPath)) { return processParityHarPartFile(srcPath, progress); } // Reconstruct parity file for (Codec codec : Codec.getCodecs()) { if (isParityFile(srcPath, codec)) { Decoder decoder = new Decoder(getConf(), codec); decoder.connectToStore(srcPath); return processParityFile(srcPath, decoder, context); } } // Reconstruct source file without connecting to stripe store for (Codec codec : Codec.getCodecs()) { ParityFilePair ppair = ParityFilePair.getParityFile( codec, srcStat, getConf()); if (ppair != null) { Decoder decoder = new Decoder(getConf(), codec); decoder.connectToStore(srcPath); return processFile(srcPath, ppair, decoder, false, context); } } // Reconstruct source file through stripe store for (Codec codec : Codec.getCodecs()) { if (!codec.isDirRaid) { continue; } try { // try to fix through the stripe store. Decoder decoder = new Decoder(getConf(), codec); decoder.connectToStore(srcPath); if (processFile(srcPath, null, decoder, true, context)) { return true; } } catch (Exception ex) { LogUtils.logRaidReconstructionMetrics(LOGRESULTS.FAILURE, 0, codec, srcPath, -1, LOGTYPES.OFFLINE_RECONSTRUCTION_USE_STRIPE, fs, ex, context); } } return false; }
Convert a file into a raid file format
public void raidFile(INode sourceINodes[], String source, RaidCodec codec, short expectedSourceRepl, Block[] parityBlocks) throws IOException { waitForReady(); long now = FSNamesystem.now(); unprotectedRaidFile(sourceINodes, source, codec, expectedSourceRepl, parityBlocks, now); fsImage.getEditLog().logRaidFile(source, codec.id, expectedSourceRepl, now); }
@Override public INodeRaidStorage convertToRaidStorage(BlockInfo[] parityBlocks, RaidCodec codec, int[] checksums, BlocksMap blocksMap, short replication, INodeFile inode) throws IOException { if (codec == null) { throw new IOException("Codec is null"); } else { return new INodeRaidStorage(codec.convertToRaidStorage(parityBlocks, blocks, checksums, blocksMap, replication, inode), codec); } }
Convert a file into a raid file format
public void raidFile(INode sourceINodes[], String source, RaidCodec codec, short expectedSourceRepl, Block[] parityBlocks) throws IOException { waitForReady(); long now = FSNamesystem.now(); unprotectedRaidFile(sourceINodes, source, codec, expectedSourceRepl, parityBlocks, now); fsImage.getEditLog().logRaidFile(source, codec.id, expectedSourceRepl, now); }
private static BlockLocation[] getParityBlocks(final Path filePath, final long blockSize, final long numStripes, final RaidInfo raidInfo) throws IOException { FileSystem parityFS = raidInfo.parityPair.getFileSystem(); // get parity file metadata FileStatus parityFileStatus = raidInfo.parityPair.getFileStatus(); long parityFileLength = parityFileStatus.getLen(); if (parityFileLength != numStripes * raidInfo.parityBlocksPerStripe * blockSize) { throw new IOException("expected parity file of length" + (numStripes * raidInfo.parityBlocksPerStripe * blockSize) + " but got parity file of length " + parityFileLength); } BlockLocation[] parityBlocks = parityFS.getFileBlockLocations(parityFileStatus, 0L, parityFileLength); if (parityFS instanceof DistributedFileSystem || parityFS instanceof DistributedRaidFileSystem) { long parityBlockSize = parityFileStatus.getBlockSize(); if (parityBlockSize != blockSize) { throw new IOException("file block size is " + blockSize + " but parity file block size is " + parityBlockSize); } } else if (parityFS instanceof HarFileSystem) { LOG.debug("HAR FS found"); } else { LOG.warn("parity file system is not of a supported type"); } return parityBlocks; }
Convert a file into a raid file format
public void raidFile(INode sourceINodes[], String source, RaidCodec codec, short expectedSourceRepl, Block[] parityBlocks) throws IOException { waitForReady(); long now = FSNamesystem.now(); unprotectedRaidFile(sourceINodes, source, codec, expectedSourceRepl, parityBlocks, now); fsImage.getEditLog().logRaidFile(source, codec.id, expectedSourceRepl, now); }
def getpart(self, ix): """ Returns a fileobject for the specified section. This method optionally decompresses the data found in the .idb file, and returns a file-like object, with seek, read, tell. """ if self.offsets[ix] == 0: return comp, ofs, size, checksum = self.getsectioninfo(ix) fh = FileSection(self.fh, ofs, ofs + size) if comp == 2: import zlib # very old databases used a different compression scheme: wbits = -15 if self.magic == 'IDA0' else 15 fh = makeStringIO(zlib.decompress(fh.read(size), wbits)) elif comp == 0: pass else: raise Exception("unsupported section encoding: %02x" % comp) return fh
Convert a file into a raid file format
public void raidFile(INode sourceINodes[], String source, RaidCodec codec, short expectedSourceRepl, Block[] parityBlocks) throws IOException { waitForReady(); long now = FSNamesystem.now(); unprotectedRaidFile(sourceINodes, source, codec, expectedSourceRepl, parityBlocks, now); fsImage.getEditLog().logRaidFile(source, codec.id, expectedSourceRepl, now); }
def read_raid(self, controller=None): """Get the current RAID configuration from the system. :param controller: If controller model its post-create read else post-delete :returns: current raid config. """ if controller: if not self.logical_drives: msg = ('No logical drives found on the controller') LOG.debug(msg) raise exception.IloLogicalDriveNotFoundError(msg) raid_op = 'create_raid' else: raid_op = 'delete_raid' result, raid_message = self._check_smart_storage_message() if result: configured_raid_settings = self._conn.get(self.settings_uri) raid_data = { 'logical_disks': self._generic_format( configured_raid_settings.json(), controller=controller)} return raid_data else: if self.physical_drives is None or not raid_message: # This controller is not configured or controller # not used in raid operation return else: msg = ('Failed to perform the %(opr)s operation ' 'successfully. Error - %(error)s' % {'opr': raid_op, 'error': str(raid_message)}) raise exception.IloError(msg)
Checks if an object holding the same name already exists in the index. If so, it compares their definition order: the lowest definition order is kept. If definition order equal, an error is risen.Item The method returns the item that should be added after it has decided which one should be kept. If the new item has precedence over the New existing one, the existing is removed for the new to replace it. :param item: object to check for conflict :type item: alignak.objects.item.Item :param name: name of the object :type name: str :return: 'item' parameter modified :rtype: object
def manage_conflict(self, item, name): """ Checks if an object holding the same name already exists in the index. If so, it compares their definition order: the lowest definition order is kept. If definition order equal, an error is risen.Item The method returns the item that should be added after it has decided which one should be kept. If the new item has precedence over the New existing one, the existing is removed for the new to replace it. :param item: object to check for conflict :type item: alignak.objects.item.Item :param name: name of the object :type name: str :return: 'item' parameter modified :rtype: object """ if item.is_tpl(): existing = self.name_to_template[name] else: existing = self.name_to_item[name] if existing == item: return item existing_prio = getattr( existing, "definition_order", existing.properties["definition_order"].default) item_prio = getattr( item, "definition_order", item.properties["definition_order"].default) if existing_prio < item_prio: # Existing item has lower priority, so it has precedence. return existing if existing_prio > item_prio: # New item has lower priority, so it has precedence. # Existing item will be deleted below pass else: # Don't know which one to keep, lastly defined has precedence objcls = getattr(self.inner_class, "my_type", "[unknown]") mesg = "duplicate %s '%s', from: '%s' and '%s', using lastly defined. " \ "You may manually set the definition_order parameter to avoid this message." \ % (objcls, name, item.imported_from, existing.imported_from) item.configuration_warnings.append(mesg) if item.is_tpl(): self.remove_template(existing) else: self.remove_item(existing) return item
def cmpname(name1, name2): """ Compare two CIM names for equality and ordering. The comparison is performed case-insensitively. One or both of the items may be `None`, and `None` is considered the lowest possible value. The implementation delegates to the '==' and '<' operators of the name datatypes. If name1 == name2, 0 is returned. If name1 < name2, -1 is returned. Otherwise, +1 is returned. """ if name1 is None and name2 is None: return 0 if name1 is None: return -1 if name2 is None: return 1 lower_name1 = name1.lower() lower_name2 = name2.lower() if lower_name1 == lower_name2: return 0 return -1 if lower_name1 < lower_name2 else 1
Checks if an object holding the same name already exists in the index. If so, it compares their definition order: the lowest definition order is kept. If definition order equal, an error is risen.Item The method returns the item that should be added after it has decided which one should be kept. If the new item has precedence over the New existing one, the existing is removed for the new to replace it. :param item: object to check for conflict :type item: alignak.objects.item.Item :param name: name of the object :type name: str :return: 'item' parameter modified :rtype: object
def manage_conflict(self, item, name): """ Checks if an object holding the same name already exists in the index. If so, it compares their definition order: the lowest definition order is kept. If definition order equal, an error is risen.Item The method returns the item that should be added after it has decided which one should be kept. If the new item has precedence over the New existing one, the existing is removed for the new to replace it. :param item: object to check for conflict :type item: alignak.objects.item.Item :param name: name of the object :type name: str :return: 'item' parameter modified :rtype: object """ if item.is_tpl(): existing = self.name_to_template[name] else: existing = self.name_to_item[name] if existing == item: return item existing_prio = getattr( existing, "definition_order", existing.properties["definition_order"].default) item_prio = getattr( item, "definition_order", item.properties["definition_order"].default) if existing_prio < item_prio: # Existing item has lower priority, so it has precedence. return existing if existing_prio > item_prio: # New item has lower priority, so it has precedence. # Existing item will be deleted below pass else: # Don't know which one to keep, lastly defined has precedence objcls = getattr(self.inner_class, "my_type", "[unknown]") mesg = "duplicate %s '%s', from: '%s' and '%s', using lastly defined. " \ "You may manually set the definition_order parameter to avoid this message." \ % (objcls, name, item.imported_from, existing.imported_from) item.configuration_warnings.append(mesg) if item.is_tpl(): self.remove_template(existing) else: self.remove_item(existing) return item
def is_fullfilled_by(self, other): """ Checks if the other index already fulfills all the indexing and constraint needs of the current one. :param other: The other index :type other: Index :rtype: bool """ # allow the other index to be equally large only. It being larger is an option # but it creates a problem with scenarios of the kind PRIMARY KEY(foo,bar) UNIQUE(foo) if len(other.get_columns()) != len(self.get_columns()): return False # Check if columns are the same, and even in the same order if not self.spans_columns(other.get_columns()): return False if not self.same_partial_index(other): return False if self.is_simple_index(): # this is a special case: If the current key is neither primary or unique, # any unique or primary key will always have the same effect # for the index and there cannot be any constraint overlaps. # This means a primary or unique index can always fulfill # the requirements of just an index that has no constraints. return True if other.is_primary() != self.is_primary(): return False if other.is_unique() != self.is_unique(): return False return True
Checks if an object holding the same name already exists in the index. If so, it compares their definition order: the lowest definition order is kept. If definition order equal, an error is risen.Item The method returns the item that should be added after it has decided which one should be kept. If the new item has precedence over the New existing one, the existing is removed for the new to replace it. :param item: object to check for conflict :type item: alignak.objects.item.Item :param name: name of the object :type name: str :return: 'item' parameter modified :rtype: object
def manage_conflict(self, item, name): """ Checks if an object holding the same name already exists in the index. If so, it compares their definition order: the lowest definition order is kept. If definition order equal, an error is risen.Item The method returns the item that should be added after it has decided which one should be kept. If the new item has precedence over the New existing one, the existing is removed for the new to replace it. :param item: object to check for conflict :type item: alignak.objects.item.Item :param name: name of the object :type name: str :return: 'item' parameter modified :rtype: object """ if item.is_tpl(): existing = self.name_to_template[name] else: existing = self.name_to_item[name] if existing == item: return item existing_prio = getattr( existing, "definition_order", existing.properties["definition_order"].default) item_prio = getattr( item, "definition_order", item.properties["definition_order"].default) if existing_prio < item_prio: # Existing item has lower priority, so it has precedence. return existing if existing_prio > item_prio: # New item has lower priority, so it has precedence. # Existing item will be deleted below pass else: # Don't know which one to keep, lastly defined has precedence objcls = getattr(self.inner_class, "my_type", "[unknown]") mesg = "duplicate %s '%s', from: '%s' and '%s', using lastly defined. " \ "You may manually set the definition_order parameter to avoid this message." \ % (objcls, name, item.imported_from, existing.imported_from) item.configuration_warnings.append(mesg) if item.is_tpl(): self.remove_template(existing) else: self.remove_item(existing) return item
def insert_right(self, item): 'Insert a new item. If equal keys are found, add to the right' k = self._key(item) i = bisect_right(self._keys, k) self._keys.insert(i, k) self._items.insert(i, item)
Checks if an object holding the same name already exists in the index. If so, it compares their definition order: the lowest definition order is kept. If definition order equal, an error is risen.Item The method returns the item that should be added after it has decided which one should be kept. If the new item has precedence over the New existing one, the existing is removed for the new to replace it. :param item: object to check for conflict :type item: alignak.objects.item.Item :param name: name of the object :type name: str :return: 'item' parameter modified :rtype: object
def manage_conflict(self, item, name): """ Checks if an object holding the same name already exists in the index. If so, it compares their definition order: the lowest definition order is kept. If definition order equal, an error is risen.Item The method returns the item that should be added after it has decided which one should be kept. If the new item has precedence over the New existing one, the existing is removed for the new to replace it. :param item: object to check for conflict :type item: alignak.objects.item.Item :param name: name of the object :type name: str :return: 'item' parameter modified :rtype: object """ if item.is_tpl(): existing = self.name_to_template[name] else: existing = self.name_to_item[name] if existing == item: return item existing_prio = getattr( existing, "definition_order", existing.properties["definition_order"].default) item_prio = getattr( item, "definition_order", item.properties["definition_order"].default) if existing_prio < item_prio: # Existing item has lower priority, so it has precedence. return existing if existing_prio > item_prio: # New item has lower priority, so it has precedence. # Existing item will be deleted below pass else: # Don't know which one to keep, lastly defined has precedence objcls = getattr(self.inner_class, "my_type", "[unknown]") mesg = "duplicate %s '%s', from: '%s' and '%s', using lastly defined. " \ "You may manually set the definition_order parameter to avoid this message." \ % (objcls, name, item.imported_from, existing.imported_from) item.configuration_warnings.append(mesg) if item.is_tpl(): self.remove_template(existing) else: self.remove_item(existing) return item
def has_key(cls, *args): """ Check whether flyweight object with specified key has already been created. Returns: bool: True if already created, False if not """ key = args if len(args) > 1 else args[0] return key in cls._instances
Checks if an object holding the same name already exists in the index. If so, it compares their definition order: the lowest definition order is kept. If definition order equal, an error is risen.Item The method returns the item that should be added after it has decided which one should be kept. If the new item has precedence over the New existing one, the existing is removed for the new to replace it. :param item: object to check for conflict :type item: alignak.objects.item.Item :param name: name of the object :type name: str :return: 'item' parameter modified :rtype: object
def manage_conflict(self, item, name): """ Checks if an object holding the same name already exists in the index. If so, it compares their definition order: the lowest definition order is kept. If definition order equal, an error is risen.Item The method returns the item that should be added after it has decided which one should be kept. If the new item has precedence over the New existing one, the existing is removed for the new to replace it. :param item: object to check for conflict :type item: alignak.objects.item.Item :param name: name of the object :type name: str :return: 'item' parameter modified :rtype: object """ if item.is_tpl(): existing = self.name_to_template[name] else: existing = self.name_to_item[name] if existing == item: return item existing_prio = getattr( existing, "definition_order", existing.properties["definition_order"].default) item_prio = getattr( item, "definition_order", item.properties["definition_order"].default) if existing_prio < item_prio: # Existing item has lower priority, so it has precedence. return existing if existing_prio > item_prio: # New item has lower priority, so it has precedence. # Existing item will be deleted below pass else: # Don't know which one to keep, lastly defined has precedence objcls = getattr(self.inner_class, "my_type", "[unknown]") mesg = "duplicate %s '%s', from: '%s' and '%s', using lastly defined. " \ "You may manually set the definition_order parameter to avoid this message." \ % (objcls, name, item.imported_from, existing.imported_from) item.configuration_warnings.append(mesg) if item.is_tpl(): self.remove_template(existing) else: self.remove_item(existing) return item
public KType indexReplace(int index, KType equivalentKey) { assert index >= 0 : "The index must point at an existing key."; assert index <= mask || (index == mask + 1 && hasEmptyKey); assert Intrinsics.equals(this, keys[index], equivalentKey); KType previousValue = Intrinsics.<KType> cast(keys[index]); keys[index] = equivalentKey; return previousValue; }
Convert the alias. @param string @return LibBaseTemplateFilterAlias
public function write(&$text) { $matches = array(); if (strpos($text, '<html')) { //add language $text = str_replace('<html', '<html lang="'.$this->getService('anahita:language')->getTag().'"', $text); //render the styles $text = str_replace('</head>', $this->_renderHead().$this->_renderStyles().'</head>', $text); //render the scripts $text = str_replace('</body>', $this->_renderScripts().'</body>', $text); } }
protected function normalizeAlias($alias) { $normalized = preg_replace('/[^a-zA-Z0-9]/', ' ', $alias); $normalized = 'new' . str_replace(' ', '', ucwords($normalized)); return $normalized; }
Convert the alias. @param string @return LibBaseTemplateFilterAlias
public function write(&$text) { $matches = array(); if (strpos($text, '<html')) { //add language $text = str_replace('<html', '<html lang="'.$this->getService('anahita:language')->getTag().'"', $text); //render the styles $text = str_replace('</head>', $this->_renderHead().$this->_renderStyles().'</head>', $text); //render the scripts $text = str_replace('</body>', $this->_renderScripts().'</body>', $text); } }
public function getConvertedValue($alias = null) { if ($alias && isset($this->convertedAliasValue[$alias])) { return $this->convertedAliasValue[$alias]; } else { return $this->convertedValue; } }
Convert the alias. @param string @return LibBaseTemplateFilterAlias
public function write(&$text) { $matches = array(); if (strpos($text, '<html')) { //add language $text = str_replace('<html', '<html lang="'.$this->getService('anahita:language')->getTag().'"', $text); //render the styles $text = str_replace('</head>', $this->_renderHead().$this->_renderStyles().'</head>', $text); //render the scripts $text = str_replace('</body>', $this->_renderScripts().'</body>', $text); } }
private static String aliasAsUrlPattern(final String alias) { String urlPattern = alias; if (urlPattern != null && !urlPattern.equals("/") && !urlPattern.contains("*")) { if (urlPattern.endsWith("/")) { urlPattern = urlPattern + "*"; } else { urlPattern = urlPattern + "/*"; } } return urlPattern; }
Convert the alias. @param string @return LibBaseTemplateFilterAlias
public function write(&$text) { $matches = array(); if (strpos($text, '<html')) { //add language $text = str_replace('<html', '<html lang="'.$this->getService('anahita:language')->getTag().'"', $text); //render the styles $text = str_replace('</head>', $this->_renderHead().$this->_renderStyles().'</head>', $text); //render the scripts $text = str_replace('</body>', $this->_renderScripts().'</body>', $text); } }
public static String getAlias(String queryString) { Matcher m = ALIAS_PATTERN.matcher(queryString); return m.find() ? m.group(1) : null; }
Convert the alias. @param string @return LibBaseTemplateFilterAlias
public function write(&$text) { $matches = array(); if (strpos($text, '<html')) { //add language $text = str_replace('<html', '<html lang="'.$this->getService('anahita:language')->getTag().'"', $text); //render the styles $text = str_replace('</head>', $this->_renderHead().$this->_renderStyles().'</head>', $text); //render the scripts $text = str_replace('</body>', $this->_renderScripts().'</body>', $text); } }
public function add($original, $alias) { $alias = Input::checkAlias($alias); $this->aliases[$alias] = $original; }
Create an object URL. Given a base URL and an object name, create an object URL. This is useful because object names can contain certain characters (namely slashes (`/`)) that are normally URLencoded when they appear inside of path sequences. @note Swift does not distinguish between @c %2F and a slash character, so this is not strictly necessary. @param string $base The base URL. This is not altered; it is just prepended to the returned string. @param string $oname The name of the object. @retval string @return string The URL to the object. Characters that need escaping will be escaped, while slash characters are not. Thus, the URL will look pathy.
public static function objectUrl($base, $oname) { if (strpos($oname, '/') === FALSE) { return $base . '/' . rawurlencode($oname); } $oParts = explode('/', $oname); $buffer = array(); foreach ($oParts as $part) { $buffer[] = rawurlencode($part); } $newname = implode('/', $buffer); return $base . '/' . $newname; }
public function getAddUrlFor(array $params = array()) { $params = array_merge($params, $this->getExtraParameters()); $friendlyName = explode('\\', $this->getEntityName()); $friendlyName = array_pop($friendlyName); $re = '/(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])/'; $a = preg_split($re, $friendlyName); $superFriendlyName = implode(' ', $a); return array( $superFriendlyName => array( 'path' => $this->getPathByConvention($this::SUFFIX_ADD), 'params' => $params, ), ); }
Create an object URL. Given a base URL and an object name, create an object URL. This is useful because object names can contain certain characters (namely slashes (`/`)) that are normally URLencoded when they appear inside of path sequences. @note Swift does not distinguish between @c %2F and a slash character, so this is not strictly necessary. @param string $base The base URL. This is not altered; it is just prepended to the returned string. @param string $oname The name of the object. @retval string @return string The URL to the object. Characters that need escaping will be escaped, while slash characters are not. Thus, the URL will look pathy.
public static function objectUrl($base, $oname) { if (strpos($oname, '/') === FALSE) { return $base . '/' . rawurlencode($oname); } $oParts = explode('/', $oname); $buffer = array(); foreach ($oParts as $part) { $buffer[] = rawurlencode($part); } $newname = implode('/', $buffer); return $base . '/' . $newname; }
public function standardizeUri(string $uri): string { if ($uri == '') { throw new BadMethodCallException('URI is empty'); } if (substr($uri, 0, 1) === '/') { $uri = substr($uri, 1); } $uri = preg_replace('|^https?://[^/]+/rest/(tx:[-0-9a-zA-Z]+/)?|', '', $uri); $uri = $this->apiUrl . '/' . $uri; return $uri; }
Create an object URL. Given a base URL and an object name, create an object URL. This is useful because object names can contain certain characters (namely slashes (`/`)) that are normally URLencoded when they appear inside of path sequences. @note Swift does not distinguish between @c %2F and a slash character, so this is not strictly necessary. @param string $base The base URL. This is not altered; it is just prepended to the returned string. @param string $oname The name of the object. @retval string @return string The URL to the object. Characters that need escaping will be escaped, while slash characters are not. Thus, the URL will look pathy.
public static function objectUrl($base, $oname) { if (strpos($oname, '/') === FALSE) { return $base . '/' . rawurlencode($oname); } $oParts = explode('/', $oname); $buffer = array(); foreach ($oParts as $part) { $buffer[] = rawurlencode($part); } $newname = implode('/', $buffer); return $base . '/' . $newname; }
public function baseUrl($targetPath = null) { if (!isset($this->baseUrl)) { throw new RuntimeException(sprintf( 'The base URI is not defined for [%s]', get_class($this) )); } if ($targetPath !== null) { return $this->createAbsoluteUrl($this->baseUrl, $targetPath); } return rtrim($this->baseUrl, '/').'/'; }
Create an object URL. Given a base URL and an object name, create an object URL. This is useful because object names can contain certain characters (namely slashes (`/`)) that are normally URLencoded when they appear inside of path sequences. @note Swift does not distinguish between @c %2F and a slash character, so this is not strictly necessary. @param string $base The base URL. This is not altered; it is just prepended to the returned string. @param string $oname The name of the object. @retval string @return string The URL to the object. Characters that need escaping will be escaped, while slash characters are not. Thus, the URL will look pathy.
public static function objectUrl($base, $oname) { if (strpos($oname, '/') === FALSE) { return $base . '/' . rawurlencode($oname); } $oParts = explode('/', $oname); $buffer = array(); foreach ($oParts as $part) { $buffer[] = rawurlencode($part); } $newname = implode('/', $buffer); return $base . '/' . $newname; }
public static function alias( $alias, $params = array(), $retain = false ) { $route_params = array(); // to handle the suffix after a slash in an alias define $suffix = ''; if ( strpos( $alias, '/' ) !== false && $alias !== '/' ) { // slashes in aliases get appended as suffix list( $alias, $suffix ) = explode( '/', $alias ); $suffix = '/'.$suffix; } // get the parameters with the numeric keys so we can // pass them as route parameters like [any]-[num]-[num]/[any]/ foreach( $params as $key => $value ) { if ( is_int( $key ) ) { $route_params[] = $value; unset( $params[$key] ); } } return CCUrl::to( CCRouter::alias( $alias, $route_params ).$suffix, $params, $retain ); }
Create an object URL. Given a base URL and an object name, create an object URL. This is useful because object names can contain certain characters (namely slashes (`/`)) that are normally URLencoded when they appear inside of path sequences. @note Swift does not distinguish between @c %2F and a slash character, so this is not strictly necessary. @param string $base The base URL. This is not altered; it is just prepended to the returned string. @param string $oname The name of the object. @retval string @return string The URL to the object. Characters that need escaping will be escaped, while slash characters are not. Thus, the URL will look pathy.
public static function objectUrl($base, $oname) { if (strpos($oname, '/') === FALSE) { return $base . '/' . rawurlencode($oname); } $oParts = explode('/', $oname); $buffer = array(); foreach ($oParts as $part) { $buffer[] = rawurlencode($part); } $newname = implode('/', $buffer); return $base . '/' . $newname; }
public function qualify($url, $base) { if(!parse_url($url, PHP_URL_SCHEME)) { if ($url[0] != '/') { //Relative path $url = dirname($base) . '/' . $url; } else { //Absolute path $url = parse_url($base, PHP_URL_SCHEME) . ':/' . $url; } } return $this->normalise($url); }
End of preview. Expand in Data Studio
README.md exists but content is empty.
Downloads last month
46