answer
stringlengths
15
1.25M
#include <linux/mm.h> #include <linux/module.h> #include <linux/gfp.h> #include <linux/kernel_stat.h> #include <linux/swap.h> #include <linux/pagemap.h> #include <linux/init.h> #include <linux/highmem.h> #include <linux/vmpressure.h> #include <linux/vmstat.h> #include <linux/file.h> #include <linux/writeback.h> #include <linux/blkdev.h> #include <linux/buffer_head.h> /* for try_to_release_page(), <API key> */ #include <linux/mm_inline.h> #include <linux/backing-dev.h> #include <linux/rmap.h> #include <linux/topology.h> #include <linux/cpu.h> #include <linux/cpuset.h> #include <linux/compaction.h> #include <linux/notifier.h> #include <linux/rwsem.h> #include <linux/delay.h> #include <linux/kthread.h> #include <linux/freezer.h> #include <linux/memcontrol.h> #include <linux/delayacct.h> #include <linux/sysctl.h> #include <linux/oom.h> #include <linux/prefetch.h> #include <linux/debugfs.h> #include <asm/tlbflush.h> #include <asm/div64.h> #include <linux/swapops.h> #include <linux/balloon_compaction.h> #include "internal.h" #define CREATE_TRACE_POINTS #include <trace/events/vmscan.h> struct scan_control { /* Incremented by the number of inactive pages that were scanned */ unsigned long nr_scanned; /* Number of pages freed so far during a call to shrink_zones() */ unsigned long nr_reclaimed; /* How many pages shrink_list() should reclaim */ unsigned long nr_to_reclaim; unsigned long hibernation_mode; /* This context's GFP mask */ gfp_t gfp_mask; int may_writepage; /* Can mapped pages be reclaimed? */ int may_unmap; /* Can pages be swapped as part of reclaim? */ int may_swap; int order; /* Scan (total_size >> priority) pages at once */ int priority; /* * The memory cgroup that hit its limit and as a result is the * primary target of this reclaim invocation. */ struct mem_cgroup *target_mem_cgroup; /* * Nodemask of nodes allowed by the caller. If NULL, all nodes * are scanned. */ nodemask_t *nodemask; }; #define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) #ifdef ARCH_HAS_PREFETCH #define <API key>(_page, _base, _field) \ do { \ if ((_page)->lru.prev != _base) { \ struct page *prev; \ \ prev = lru_to_page(&(_page->lru)); \ prefetch(&prev->_field); \ } \ } while (0) #else #define <API key>(_page, _base, _field) do { } while (0) #endif #ifdef ARCH_HAS_PREFETCHW #define <API key>(_page, _base, _field) \ do { \ if ((_page)->lru.prev != _base) { \ struct page *prev; \ \ prev = lru_to_page(&(_page->lru)); \ prefetchw(&prev->_field); \ } \ } while (0) #else #define <API key>(_page, _base, _field) do { } while (0) #endif /* * From 0 .. 100. Higher means more swappy. */ int vm_swappiness = 60; unsigned long vm_total_pages; /* The total number of pages which the VM controls */ static LIST_HEAD(shrinker_list); static DECLARE_RWSEM(shrinker_rwsem); #ifdef CONFIG_MEMCG static bool global_reclaim(struct scan_control *sc) { return !sc->target_mem_cgroup; } #else static bool global_reclaim(struct scan_control *sc) { return true; } #endif static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru) { if (!mem_cgroup_disabled()) return <API key>(lruvec, lru); return zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru); } struct dentry *debug_file; static int debug_shrinker_show(struct seq_file *s, void *unused) { struct shrinker *shrinker; struct shrink_control sc; sc.gfp_mask = -1; sc.nr_to_scan = 0; down_read(&shrinker_rwsem); list_for_each_entry(shrinker, &shrinker_list, list) { int num_objs; num_objs = shrinker->shrink(shrinker, &sc); seq_printf(s, "%pf %d\n", shrinker->shrink, num_objs); } up_read(&shrinker_rwsem); return 0; } static int debug_shrinker_open(struct inode *inode, struct file *file) { return single_open(file, debug_shrinker_show, inode->i_private); } static const struct file_operations debug_shrinker_fops = { .open = debug_shrinker_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; /* * Add a shrinker callback to be called from the vm */ void register_shrinker(struct shrinker *shrinker) { atomic_long_set(&shrinker->nr_in_batch, 0); down_write(&shrinker_rwsem); list_add_tail(&shrinker->list, &shrinker_list); up_write(&shrinker_rwsem); } EXPORT_SYMBOL(register_shrinker); static int __init add_shrinker_debug(void) { debugfs_create_file("shrinker", 0644, NULL, NULL, &debug_shrinker_fops); return 0; } late_initcall(add_shrinker_debug); /* * Remove one */ void unregister_shrinker(struct shrinker *shrinker) { down_write(&shrinker_rwsem); list_del(&shrinker->list); up_write(&shrinker_rwsem); } EXPORT_SYMBOL(unregister_shrinker); static inline int do_shrinker_shrink(struct shrinker *shrinker, struct shrink_control *sc, unsigned long nr_to_scan) { sc->nr_to_scan = nr_to_scan; return (*shrinker->shrink)(shrinker, sc); } #define SHRINK_BATCH 128 /* * Call the shrink functions to age shrinkable caches * * Here we assume it costs one seek to replace a lru page and that it also * takes a seek to recreate a cache object. With this in mind we age equal * percentages of the lru and ageable caches. This should balance the seeks * generated by these structures. * * If the vm encountered mapped pages on the LRU it increase the pressure on * slab to avoid swapping. * * We do weird things to avoid (scanned*seeks*entries) overflowing 32 bits. * * `lru_pages' represents the number of on-LRU pages in all the zones which * are eligible for the caller's allocation attempt. It is used for balancing * slab reclaim versus page reclaim. * * Returns the number of slab objects which we shrunk. */ unsigned long shrink_slab(struct shrink_control *shrink, unsigned long nr_pages_scanned, unsigned long lru_pages) { struct shrinker *shrinker; unsigned long ret = 0; if (nr_pages_scanned == 0) nr_pages_scanned = SWAP_CLUSTER_MAX; if (!down_read_trylock(&shrinker_rwsem)) { /* Assume we'll be able to shrink next time */ ret = 1; goto out; } list_for_each_entry(shrinker, &shrinker_list, list) { unsigned long long delta; long total_scan; long max_pass; int shrink_ret = 0; long nr; long new_nr; long batch_size = shrinker->batch ? shrinker->batch : SHRINK_BATCH; max_pass = do_shrinker_shrink(shrinker, shrink, 0); if (max_pass <= 0) continue; /* * copy the current shrinker scan count into a local variable * and zero it so that other concurrent shrinker invocations * don't also do this scanning work. */ nr = atomic_long_xchg(&shrinker->nr_in_batch, 0); total_scan = nr; delta = (4 * nr_pages_scanned) / shrinker->seeks; delta *= max_pass; do_div(delta, lru_pages + 1); total_scan += delta; if (total_scan < 0) { printk(KERN_ERR "shrink_slab: %pF negative objects to " "delete nr=%ld\n", shrinker->shrink, total_scan); total_scan = max_pass; } /* * We need to avoid excessive windup on filesystem shrinkers * due to large numbers of GFP_NOFS allocations causing the * shrinkers to return -1 all the time. This results in a large * nr being built up so when a shrink that can do some work * comes along it empties the entire cache due to nr >>> * max_pass. This is bad for sustaining a working set in * memory. * * Hence only allow the shrinker to scan the entire cache when * a large delta change is calculated directly. */ if (delta < max_pass / 4) total_scan = min(total_scan, max_pass / 2); /* * Avoid risking looping forever due to too large nr value: * never try to free more than twice the estimate number of * freeable entries. */ if (total_scan > max_pass * 2) total_scan = max_pass * 2; <API key>(shrinker, shrink, nr, nr_pages_scanned, lru_pages, max_pass, delta, total_scan); while (total_scan >= batch_size) { int nr_before; nr_before = do_shrinker_shrink(shrinker, shrink, 0); shrink_ret = do_shrinker_shrink(shrinker, shrink, batch_size); if (shrink_ret == -1) break; if (shrink_ret < nr_before) ret += nr_before - shrink_ret; count_vm_events(SLABS_SCANNED, batch_size); total_scan -= batch_size; cond_resched(); } /* * move the unused scan count back into the shrinker in a * manner that handles concurrent updates. If we exhausted the * scan, there is no need to do an update. */ if (total_scan > 0) new_nr = <API key>(total_scan, &shrinker->nr_in_batch); else new_nr = atomic_long_read(&shrinker->nr_in_batch); <API key>(shrinker, shrink_ret, nr, new_nr); } up_read(&shrinker_rwsem); out: cond_resched(); return ret; } static inline int <API key>(struct page *page) { /* * A freeable page cache page is referenced only by the caller * that isolated the page, the page cache radix tree and * optional buffer heads at page->private. */ return page_count(page) - page_has_private(page) == 2; } static int may_write_to_queue(struct backing_dev_info *bdi, struct scan_control *sc) { if (current->flags & PF_SWAPWRITE) return 1; if (!bdi_write_congested(bdi)) return 1; if (bdi == current->backing_dev_info) return 1; return 0; } /* * We detected a synchronous write error writing a page out. Probably * -ENOSPC. We need to propagate that into the address_space for a subsequent * fsync(), msync() or close(). * * The tricky part is that after writepage we cannot touch the mapping: nothing * prevents it from being freed up. But we have a ref on the page and once * that page is locked, the mapping is pinned. * * We're allowed to run sleeping lock_page() here because we know the caller has * __GFP_FS. */ static void handle_write_error(struct address_space *mapping, struct page *page, int error) { lock_page(page); if (page_mapping(page) == mapping) mapping_set_error(mapping, error); unlock_page(page); } /* possible outcome of pageout() */ typedef enum { /* failed to write page out, page is locked */ PAGE_KEEP, /* move page to the active list, page is locked */ PAGE_ACTIVATE, /* page has been sent to the disk successfully, page is unlocked */ PAGE_SUCCESS, /* page is clean and locked */ PAGE_CLEAN, } pageout_t; /* * pageout is called by shrink_page_list() for each dirty page. * Calls ->writepage(). */ static pageout_t pageout(struct page *page, struct address_space *mapping, struct scan_control *sc) { /* * If the page is dirty, only perform writeback if that write * will be non-blocking. To prevent this allocation from being * stalled by pagecache activity. But note that there may be * stalls if we need to run get_block(). We could test * PagePrivate for that. * * If this process is currently in <API key>() against * this page's queue, we can perform writeback even if that * will block. * * If the page is swapcache, write it back even if that would * block, for some throttling. This happens by accident, because * <API key> is bust: it doesn't reflect the * congestion state of the swapdevs. Easy to fix, if needed. */ if (!<API key>(page)) return PAGE_KEEP; if (!mapping) { /* * Some data journaling orphaned pages can have * page->mapping == NULL while being dirty with clean buffers. */ if (page_has_private(page)) { if (try_to_free_buffers(page)) { ClearPageDirty(page); printk("%s: orphaned page\n", __func__); return PAGE_CLEAN; } } return PAGE_KEEP; } if (mapping->a_ops->writepage == NULL) return PAGE_ACTIVATE; if (!may_write_to_queue(mapping->backing_dev_info, sc)) return PAGE_KEEP; if (<API key>(page)) { int res; struct writeback_control wbc = { .sync_mode = WB_SYNC_NONE, .nr_to_write = SWAP_CLUSTER_MAX, .range_start = 0, .range_end = LLONG_MAX, .for_reclaim = 1, }; SetPageReclaim(page); res = mapping->a_ops->writepage(page, &wbc); if (res < 0) handle_write_error(mapping, page, res); if (res == <API key>) { ClearPageReclaim(page); return PAGE_ACTIVATE; } if (!PageWriteback(page)) { /* synchronous write or broken a_ops? */ ClearPageReclaim(page); } <API key>(page, trace_reclaim_flags(page)); inc_zone_page_state(page, NR_VMSCAN_WRITE); return PAGE_SUCCESS; } return PAGE_CLEAN; } /* * Same as remove_mapping, but if the page is removed from the mapping, it * gets returned with a refcount of 0. */ static int __remove_mapping(struct address_space *mapping, struct page *page) { BUG_ON(!PageLocked(page)); BUG_ON(mapping != page_mapping(page)); spin_lock_irq(&mapping->tree_lock); /* * The non racy check for a busy page. * * Must be careful with the order of the tests. When someone has * a ref to the page, it may be possible that they dirty it then * drop the reference. So if PageDirty is tested before page_count * here, then the following race may occur: * * get_user_pages(&page); * [user mapping goes away] * write_to(page); * !PageDirty(page) [good] * SetPageDirty(page); * put_page(page); * !page_count(page) [good, discard it] * * [oops, our write_to data is lost] * * Reversing the order of the tests ensures such a situation cannot * escape unnoticed. The smp_rmb is needed to ensure the page->flags * load is not satisfied before that of page->_count. * * Note that if SetPageDirty is always performed via set_page_dirty, * and thus under tree_lock, then this ordering is not required. */ if (!page_freeze_refs(page, 2)) goto cannot_free; /* note: atomic_cmpxchg in page_freeze_refs provides the smp_rmb */ if (unlikely(PageDirty(page))) { page_unfreeze_refs(page, 2); goto cannot_free; } if (PageSwapCache(page)) { swp_entry_t swap = { .val = page_private(page) }; <API key>(page); spin_unlock_irq(&mapping->tree_lock); swapcache_free(swap, page); } else { void (*freepage)(struct page *); freepage = mapping->a_ops->freepage; <API key>(page); spin_unlock_irq(&mapping->tree_lock); <API key>(page); if (freepage != NULL) freepage(page); } return 1; cannot_free: spin_unlock_irq(&mapping->tree_lock); return 0; } /* * Attempt to detach a locked page from its ->mapping. If it is dirty or if * someone else has a ref on the page, abort and return 0. If it was * successfully detached, return 1. Assumes the caller has a single ref on * this page. */ int remove_mapping(struct address_space *mapping, struct page *page) { if (__remove_mapping(mapping, page)) { /* * Unfreezing the refcount with 1 rather than 2 effectively * drops the pagecache ref for us without requiring another * atomic operation. */ page_unfreeze_refs(page, 1); return 1; } return 0; } /** * putback_lru_page - put previously isolated page onto appropriate LRU list * @page: page to be put back to appropriate lru list * * Add previously isolated @page to appropriate LRU list. * Page may still be unevictable for other reasons. * * lru_lock must not be held, interrupts must be enabled. */ void putback_lru_page(struct page *page) { int lru; int active = !!TestClearPageActive(page); int was_unevictable = PageUnevictable(page); VM_BUG_ON(PageLRU(page)); redo: <API key>(page); if (page_evictable(page)) { /* * For evictable pages, we can use the cache. * In event of a race, worst case is we end up with an * unevictable page on [in]active list. * We know how to handle that. */ lru = active + page_lru_base_type(page); lru_cache_add_lru(page, lru); } else { /* * Put unevictable pages directly on zone's unevictable * list. */ lru = LRU_UNEVICTABLE; <API key>(page); /* * When racing with an mlock or AS_UNEVICTABLE clearing * (page is unlocked) make sure that if the other thread * does not observe our setting of PG_lru and fails * isolation/<API key>, * we see PG_mlocked/AS_UNEVICTABLE cleared below and move * the page back to the evictable list. * * The other side is <API key>() or shmem_lock(). */ smp_mb(); } /* * page's status can change while we move it among lru. If an evictable * page is on unevictable list, it never be freed. To avoid that, * check after we added it to the list, again. */ if (lru == LRU_UNEVICTABLE && page_evictable(page)) { if (!isolate_lru_page(page)) { put_page(page); goto redo; } /* This means someone else dropped this page from LRU * So, it will be freed or putback to LRU again. There is * nothing to do here. */ } if (was_unevictable && lru != LRU_UNEVICTABLE) count_vm_event(<API key>); else if (!was_unevictable && lru == LRU_UNEVICTABLE) count_vm_event(<API key>); put_page(page); /* drop ref from isolate */ } enum page_references { PAGEREF_RECLAIM, <API key>, PAGEREF_KEEP, PAGEREF_ACTIVATE, }; static enum page_references <API key>(struct page *page, struct scan_control *sc) { int referenced_ptes, referenced_page; unsigned long vm_flags; referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup, &vm_flags); referenced_page = <API key>(page); /* * Mlock lost the isolation race with us. Let try_to_unmap() * move the page to the unevictable list. */ if (vm_flags & VM_LOCKED) return PAGEREF_RECLAIM; if (referenced_ptes) { if (PageSwapBacked(page)) return PAGEREF_ACTIVATE; /* * All mapped pages start out with page table * references from the instantiating fault, so we need * to look twice if a mapped file page is used more * than once. * * Mark it and spare it for another trip around the * inactive list. Another page table reference will * lead to its activation. * * Note: the mark is set for activated pages as well * so that recently deactivated but used pages are * quickly recovered. */ SetPageReferenced(page); if (referenced_page || referenced_ptes > 1) return PAGEREF_ACTIVATE; /* * Activate file-backed executable pages after first usage. */ if (vm_flags & VM_EXEC) return PAGEREF_ACTIVATE; return PAGEREF_KEEP; } /* Reclaim if clean, defer dirty pages to writeback */ if (referenced_page && !PageSwapBacked(page)) return <API key>; return PAGEREF_RECLAIM; } /* * shrink_page_list() returns the number of reclaimed pages */ static unsigned long shrink_page_list(struct list_head *page_list, struct zone *zone, struct scan_control *sc, enum ttu_flags ttu_flags, unsigned long *ret_nr_dirty, unsigned long *ret_nr_writeback, bool force_reclaim) { LIST_HEAD(ret_pages); LIST_HEAD(free_pages); int pgactivate = 0; unsigned long nr_dirty = 0; unsigned long nr_congested = 0; unsigned long nr_reclaimed = 0; unsigned long nr_writeback = 0; cond_resched(); <API key>(); while (!list_empty(page_list)) { struct address_space *mapping; struct page *page; int may_enter_fs; enum page_references references = <API key>; cond_resched(); page = lru_to_page(page_list); list_del(&page->lru); if (!trylock_page(page)) goto keep; VM_BUG_ON(PageActive(page)); VM_BUG_ON(page_zone(page) != zone); sc->nr_scanned++; if (unlikely(!page_evictable(page))) goto cull_mlocked; if (!sc->may_unmap && page_mapped(page)) goto keep_locked; /* Double the slab pressure for mapped and swapcache pages */ if (page_mapped(page) || PageSwapCache(page)) sc->nr_scanned++; may_enter_fs = (sc->gfp_mask & __GFP_FS) || (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); if (PageWriteback(page)) { /* * memcg doesn't have any dirty pages throttling so we * could easily OOM just because too many pages are in * writeback and there is nothing else to reclaim. * * Require may_enter_fs to wait on writeback, because * fs may not have submitted IO yet. And a loop driver * thread might enter reclaim, and deadlock if it waits * on a page for which it is needed to do the write * (loop masks off __GFP_IO|__GFP_FS for this reason); * but more thought would probably show more reasons. */ if (global_reclaim(sc) || !PageReclaim(page) || !may_enter_fs) { /* * This is slightly racy - end_page_writeback() * might have just cleared PageReclaim, then * setting PageReclaim here end up interpreted * as PageReadahead - but that does not matter * enough to care. What we do want is for this * page to have PageReclaim set next time memcg * reclaim reaches the tests above, so it will * then <API key>() to avoid OOM; * and it's also appropriate in global reclaim. */ SetPageReclaim(page); nr_writeback++; goto keep_locked; } <API key>(page); } if (!force_reclaim) references = <API key>(page, sc); switch (references) { case PAGEREF_ACTIVATE: goto activate_locked; case PAGEREF_KEEP: goto keep_locked; case PAGEREF_RECLAIM: case <API key>: ; /* try to reclaim the page below */ } /* * Anonymous process memory has backing store? * Try to allocate it some swap space here. */ if (PageAnon(page) && !PageSwapCache(page)) { if (!(sc->gfp_mask & __GFP_IO)) goto keep_locked; if (!add_to_swap(page, page_list)) goto activate_locked; may_enter_fs = 1; } mapping = page_mapping(page); /* * The page is mapped into the page tables of one or more * processes. Try to unmap it here. */ if (page_mapped(page) && mapping) { switch (try_to_unmap(page, ttu_flags)) { case SWAP_FAIL: goto activate_locked; case SWAP_AGAIN: goto keep_locked; case SWAP_MLOCK: goto cull_mlocked; case SWAP_SUCCESS: ; /* try to free the page below */ } } if (PageDirty(page)) { nr_dirty++; /* * Only kswapd can writeback filesystem pages to * avoid risk of stack overflow but do not writeback * unless under significant pressure. */ if (page_is_file_cache(page) && (!current_is_kswapd() || sc->priority >= DEF_PRIORITY - 2)) { /* * Immediately reclaim when written back. * Similar in principal to deactivate_page() * except we already have the page isolated * and know it's dirty */ inc_zone_page_state(page, NR_VMSCAN_IMMEDIATE); SetPageReclaim(page); goto keep_locked; } if (references == <API key>) goto keep_locked; if (!may_enter_fs) goto keep_locked; if (!sc->may_writepage) goto keep_locked; /* Page is dirty, try to write it out here */ switch (pageout(page, mapping, sc)) { case PAGE_KEEP: nr_congested++; goto keep_locked; case PAGE_ACTIVATE: goto activate_locked; case PAGE_SUCCESS: if (PageWriteback(page)) goto keep; if (PageDirty(page)) goto keep; /* * A synchronous write - probably a ramdisk. Go * ahead and try to reclaim the page. */ if (!trylock_page(page)) goto keep; if (PageDirty(page) || PageWriteback(page)) goto keep_locked; mapping = page_mapping(page); case PAGE_CLEAN: ; /* try to free the page below */ } } /* * If the page has buffers, try to free the buffer mappings * associated with this page. If we succeed we try to free * the page as well. * * We do this even if the page is PageDirty(). * try_to_release_page() does not perform I/O, but it is * possible for a page to have PageDirty set, but it is actually * clean (all its buffers are clean). This happens if the * buffers were written out directly, with submit_bh(). ext3 * will do this, as well as the blockdev mapping. * try_to_release_page() will discover that cleanness and will * drop the buffers and mark the page clean - it can be freed. * * Rarely, pages can have buffers and no ->mapping. These are * the pages which were not successfully invalidated in * <API key>(). We try to drop those buffers here * and if that worked, and the page is no longer mapped into * process address space (page_count == 1) it can be freed. * Otherwise, leave the page on the LRU so it is swappable. */ if (page_has_private(page)) { if (!try_to_release_page(page, sc->gfp_mask)) goto activate_locked; if (!mapping && page_count(page) == 1) { unlock_page(page); if (put_page_testzero(page)) goto free_it; else { /* * rare race with speculative reference. * the speculative reference will free * this page shortly, so we may * increment nr_reclaimed here (and * leave it off the LRU). */ nr_reclaimed++; continue; } } } if (!mapping || !__remove_mapping(mapping, page)) goto keep_locked; /* * At this point, we have no other references and there is * no way to pick any more up (removed from LRU, removed * from pagecache). Can use non-atomic bitops now (and * we obviously don't have to worry about waking up a process * waiting on the page lock, because there are no references. */ __clear_page_locked(page); free_it: nr_reclaimed++; /* * Is there need to periodically free_page_list? It would * appear not as the counts should be low */ list_add(&page->lru, &free_pages); continue; cull_mlocked: if (PageSwapCache(page)) try_to_free_swap(page); unlock_page(page); list_add(&page->lru, &ret_pages); continue; activate_locked: /* Not a candidate for swapping, so reclaim swap space. */ if (PageSwapCache(page) && vm_swap_full()) try_to_free_swap(page); VM_BUG_ON(PageActive(page)); SetPageActive(page); pgactivate++; keep_locked: unlock_page(page); keep: list_add(&page->lru, &ret_pages); VM_BUG_ON(PageLRU(page) || PageUnevictable(page)); } /* * Tag a zone as congested if all the dirty pages encountered were * backed by a congested BDI. In this case, reclaimers should just * back off and wait for congestion to clear because further reclaim * will encounter the same problem */ if (nr_dirty && nr_dirty == nr_congested && global_reclaim(sc)) zone_set_flag(zone, ZONE_CONGESTED); <API key>(&free_pages, 1); list_splice(&ret_pages, page_list); count_vm_events(PGACTIVATE, pgactivate); <API key>(); *ret_nr_dirty += nr_dirty; *ret_nr_writeback += nr_writeback; return nr_reclaimed; } unsigned long <API key>(struct zone *zone, struct list_head *page_list) { struct scan_control sc = { .gfp_mask = GFP_KERNEL, .priority = DEF_PRIORITY, .may_unmap = 1, }; unsigned long ret, dummy1, dummy2; struct page *page, *next; LIST_HEAD(clean_pages); <API key>(page, next, page_list, lru) { if (page_is_file_cache(page) && !PageDirty(page) && !<API key>(page)) { ClearPageActive(page); list_move(&page->lru, &clean_pages); } } ret = shrink_page_list(&clean_pages, zone, &sc, TTU_UNMAP|TTU_IGNORE_ACCESS, &dummy1, &dummy2, true); list_splice(&clean_pages, page_list); <API key>(zone, NR_ISOLATED_FILE, -ret); return ret; } /* * Attempt to remove the specified page from its LRU. Only take this page * if it is of the appropriate PageActive status. Pages which are being * freed elsewhere are also ignored. * * page: page to consider * mode: one of the LRU isolation modes defined above * * returns 0 on success, -ve errno on failure. */ int __isolate_lru_page(struct page *page, isolate_mode_t mode) { int ret = -EINVAL; /* Only take pages on the LRU. */ if (!PageLRU(page)) return ret; /* Compaction should not handle unevictable pages but CMA can do so */ if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE)) return ret; ret = -EBUSY; /* * To minimise LRU disruption, the caller can indicate that it only * wants to isolate pages it will be able to operate on without * blocking - clean pages for the most part. * * ISOLATE_CLEAN means that only clean pages should be isolated. This * is used by reclaim when it is cannot write to backing storage * * <API key> is used to indicate that it only wants to pages * that it is possible to migrate without blocking */ if (mode & (ISOLATE_CLEAN|<API key>)) { /* All the caller can do on PageWriteback is block */ if (PageWriteback(page)) return ret; if (PageDirty(page)) { struct address_space *mapping; /* ISOLATE_CLEAN means only clean pages */ if (mode & ISOLATE_CLEAN) return ret; /* * Only pages without mappings or that have a * ->migratepage callback are possible to migrate * without blocking */ mapping = page_mapping(page); if (mapping && !mapping->a_ops->migratepage) return ret; } } if ((mode & ISOLATE_UNMAPPED) && page_mapped(page)) return ret; if (likely(<API key>(page))) { /* * Be careful not to clear PageLRU until after we're * sure the page is not being freed elsewhere -- the * page release code relies on it. */ ClearPageLRU(page); ret = 0; } return ret; } /* * zone->lru_lock is heavily contended. Some of the functions that * shrink the lists perform better by taking out a batch of pages * and working on them outside the LRU lock. * * For pagecache intensive workloads, this function is the hottest * spot in the kernel (apart from copy_*_user functions). * * Appropriate locks must be held before calling this function. * * @nr_to_scan: The number of pages to look through on the list. * @lruvec: The LRU vector to pull pages from. * @dst: The temp list to put pages on to. * @nr_scanned: The number of pages that were scanned. * @sc: The scan_control struct for this reclaim session * @mode: One of the LRU isolation modes * @lru: LRU list id for isolating * * returns how many pages were moved onto *@dst. */ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, struct lruvec *lruvec, struct list_head *dst, unsigned long *nr_scanned, struct scan_control *sc, isolate_mode_t mode, enum lru_list lru) { struct list_head *src = &lruvec->lists[lru]; unsigned long nr_taken = 0; unsigned long scan; for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) { struct page *page; int nr_pages; page = lru_to_page(src); <API key>(page, src, flags); VM_BUG_ON(!PageLRU(page)); switch (__isolate_lru_page(page, mode)) { case 0: nr_pages = hpage_nr_pages(page); <API key>(lruvec, lru, -nr_pages); list_move(&page->lru, dst); nr_taken += nr_pages; break; case -EBUSY: /* else it is being freed elsewhere */ list_move(&page->lru, src); continue; default: BUG(); } } *nr_scanned = scan; <API key>(sc->order, nr_to_scan, scan, nr_taken, mode, is_file_lru(lru)); return nr_taken; } /** * isolate_lru_page - tries to isolate a page from its LRU list * @page: page to isolate from its LRU list * * Isolates a @page from an LRU list, clears PageLRU and adjusts the * vmstat statistic corresponding to whatever LRU list the page was on. * * Returns 0 if the page was removed from an LRU list. * Returns -EBUSY if the page was not on an LRU list. * * The returned page will have PageLRU() cleared. If it was found on * the active list, it will have PageActive set. If it was found on * the unevictable list, it will have the PageUnevictable bit set. That flag * may need to be cleared by the caller before letting the page go. * * The vmstat statistic corresponding to the list on which the page was * found will be decremented. * * Restrictions: * (1) Must be called with an elevated refcount on the page. This is a * fundamentnal difference from isolate_lru_pages (which is called * without a stable reference). * (2) the lru_lock must not be held. * (3) interrupts must be enabled. */ int isolate_lru_page(struct page *page) { int ret = -EBUSY; VM_BUG_ON(!page_count(page)); if (PageLRU(page)) { struct zone *zone = page_zone(page); struct lruvec *lruvec; spin_lock_irq(&zone->lru_lock); lruvec = <API key>(page, zone); if (PageLRU(page)) { int lru = page_lru(page); get_page(page); ClearPageLRU(page); <API key>(page, lruvec, lru); ret = 0; } spin_unlock_irq(&zone->lru_lock); } return ret; } /* * A direct reclaimer may isolate SWAP_CLUSTER_MAX pages from the LRU list and * then get resheduled. When there are massive number of tasks doing page * allocation, such sleeping direct reclaimers may keep piling up on each CPU, * the LRU list will go small and be scanned faster than necessary, leading to * unnecessary swapping, thrashing and OOM. */ static int too_many_isolated(struct zone *zone, int file, struct scan_control *sc) { unsigned long inactive, isolated; if (current_is_kswapd()) return 0; if (!global_reclaim(sc)) return 0; if (file) { inactive = zone_page_state(zone, NR_INACTIVE_FILE); isolated = zone_page_state(zone, NR_ISOLATED_FILE); } else { inactive = zone_page_state(zone, NR_INACTIVE_ANON); isolated = zone_page_state(zone, NR_ISOLATED_ANON); } /* * GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they * won't get blocked by normal direct-reclaimers, forming a circular * deadlock. */ if ((sc->gfp_mask & GFP_IOFS) == GFP_IOFS) inactive >>= 3; return isolated > inactive; } static noinline_for_stack void <API key>(struct lruvec *lruvec, struct list_head *page_list) { struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; struct zone *zone = lruvec_zone(lruvec); LIST_HEAD(pages_to_free); /* * Put back any unfreeable pages. */ while (!list_empty(page_list)) { struct page *page = lru_to_page(page_list); int lru; VM_BUG_ON(PageLRU(page)); list_del(&page->lru); if (unlikely(!page_evictable(page))) { spin_unlock_irq(&zone->lru_lock); putback_lru_page(page); spin_lock_irq(&zone->lru_lock); continue; } lruvec = <API key>(page, zone); SetPageLRU(page); lru = page_lru(page); <API key>(page, lruvec, lru); if (is_active_lru(lru)) { int file = is_file_lru(lru); int numpages = hpage_nr_pages(page); reclaim_stat->recent_rotated[file] += numpages; } if (put_page_testzero(page)) { __ClearPageLRU(page); __ClearPageActive(page); <API key>(page, lruvec, lru); if (unlikely(PageCompound(page))) { spin_unlock_irq(&zone->lru_lock); (*<API key>(page))(page); spin_lock_irq(&zone->lru_lock); } else list_add(&page->lru, &pages_to_free); } } /* * To save our caller's stack, now use input list for pages to free. */ list_splice(&pages_to_free, page_list); } /* * <API key>() is a helper for shrink_zone(). It returns the number * of reclaimed pages */ static noinline_for_stack unsigned long <API key>(unsigned long nr_to_scan, struct lruvec *lruvec, struct scan_control *sc, enum lru_list lru) { LIST_HEAD(page_list); unsigned long nr_scanned; unsigned long nr_reclaimed = 0; unsigned long nr_taken; unsigned long nr_dirty = 0; unsigned long nr_writeback = 0; isolate_mode_t isolate_mode = 0; int file = is_file_lru(lru); struct zone *zone = lruvec_zone(lruvec); struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; while (unlikely(too_many_isolated(zone, file, sc))) { congestion_wait(BLK_RW_ASYNC, HZ/10); /* We are about to die and free our memory. Return now. */ if (<API key>(current)) return SWAP_CLUSTER_MAX; } lru_add_drain(); if (!sc->may_unmap) isolate_mode |= ISOLATE_UNMAPPED; if (!sc->may_writepage) isolate_mode |= ISOLATE_CLEAN; spin_lock_irq(&zone->lru_lock); nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list, &nr_scanned, sc, isolate_mode, lru); <API key>(zone, NR_LRU_BASE + lru, -nr_taken); <API key>(zone, NR_ISOLATED_ANON + file, nr_taken); if (global_reclaim(sc)) { zone->pages_scanned += nr_scanned; if (current_is_kswapd()) <API key>(PGSCAN_KSWAPD, zone, nr_scanned); else <API key>(PGSCAN_DIRECT, zone, nr_scanned); } spin_unlock_irq(&zone->lru_lock); if (nr_taken == 0) return 0; nr_reclaimed = shrink_page_list(&page_list, zone, sc, TTU_UNMAP, &nr_dirty, &nr_writeback, false); spin_lock_irq(&zone->lru_lock); reclaim_stat->recent_scanned[file] += nr_taken; if (global_reclaim(sc)) { if (current_is_kswapd()) <API key>(PGSTEAL_KSWAPD, zone, nr_reclaimed); else <API key>(PGSTEAL_DIRECT, zone, nr_reclaimed); } <API key>(lruvec, &page_list); <API key>(zone, NR_ISOLATED_ANON + file, -nr_taken); spin_unlock_irq(&zone->lru_lock); <API key>(&page_list, 1); /* * If reclaim is isolating dirty pages under writeback, it implies * that the long-lived page allocation rate is exceeding the page * laundering rate. Either the global limits are not being effective * at throttling processes due to the page distribution throughout * zones or there is heavy usage of a slow backing device. The * only option is to throttle from reclaim context which is not ideal * as there is no guarantee the dirtying process is throttled in the * same way balance_dirty_pages() manages. * * This scales the number of dirty pages that must be under writeback * before throttling depending on priority. It is a simple backoff * function that has the most effect in the range DEF_PRIORITY to * DEF_PRIORITY-2 which is the priority reclaim is considered to be * in trouble and reclaim is considered to be in trouble. * * DEF_PRIORITY 100% isolated pages must be PageWriteback to throttle * DEF_PRIORITY-1 50% must be PageWriteback * DEF_PRIORITY-2 25% must be PageWriteback, kswapd in trouble * ... * DEF_PRIORITY-6 For SWAP_CLUSTER_MAX isolated pages, throttle if any * isolated page is PageWriteback */ if (nr_writeback && nr_writeback >= (nr_taken >> (DEF_PRIORITY - sc->priority))) wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10); <API key>(zone->zone_pgdat->node_id, zone_idx(zone), nr_scanned, nr_reclaimed, sc->priority, trace_shrink_flags(file)); return nr_reclaimed; } /* * This moves pages from the active list to the inactive list. * * We move them the other way if the page is referenced by one or more * processes, from rmap. * * If the pages are mostly unmapped, the processing is fast and it is * appropriate to hold zone->lru_lock across the whole operation. But if * the pages are mapped, the processing is slow (page_referenced()) so we * should drop zone->lru_lock around each page. It's impossible to balance * this, so instead we remove the pages from the LRU while processing them. * It is safe to rely on PG_active against the non-LRU pages in here because * nobody will play with that bit on a non-LRU page. * * The downside is that we have to touch page->_count against each page. * But we had to alter page->flags anyway. */ static void <API key>(struct lruvec *lruvec, struct list_head *list, struct list_head *pages_to_free, enum lru_list lru) { struct zone *zone = lruvec_zone(lruvec); unsigned long pgmoved = 0; struct page *page; int nr_pages; while (!list_empty(list)) { page = lru_to_page(list); lruvec = <API key>(page, zone); VM_BUG_ON(PageLRU(page)); SetPageLRU(page); nr_pages = hpage_nr_pages(page); <API key>(lruvec, lru, nr_pages); list_move(&page->lru, &lruvec->lists[lru]); pgmoved += nr_pages; if (put_page_testzero(page)) { __ClearPageLRU(page); __ClearPageActive(page); <API key>(page, lruvec, lru); if (unlikely(PageCompound(page))) { spin_unlock_irq(&zone->lru_lock); (*<API key>(page))(page); spin_lock_irq(&zone->lru_lock); } else list_add(&page->lru, pages_to_free); } } <API key>(zone, NR_LRU_BASE + lru, pgmoved); if (!is_active_lru(lru)) __count_vm_events(PGDEACTIVATE, pgmoved); } static void shrink_active_list(unsigned long nr_to_scan, struct lruvec *lruvec, struct scan_control *sc, enum lru_list lru) { unsigned long nr_taken; unsigned long nr_scanned; unsigned long vm_flags; LIST_HEAD(l_hold); /* The pages which were snipped off */ LIST_HEAD(l_active); LIST_HEAD(l_inactive); struct page *page; struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; unsigned long nr_rotated = 0; isolate_mode_t isolate_mode = 0; int file = is_file_lru(lru); struct zone *zone = lruvec_zone(lruvec); lru_add_drain(); if (!sc->may_unmap) isolate_mode |= ISOLATE_UNMAPPED; if (!sc->may_writepage) isolate_mode |= ISOLATE_CLEAN; spin_lock_irq(&zone->lru_lock); nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold, &nr_scanned, sc, isolate_mode, lru); if (global_reclaim(sc)) zone->pages_scanned += nr_scanned; reclaim_stat->recent_scanned[file] += nr_taken; <API key>(PGREFILL, zone, nr_scanned); <API key>(zone, NR_LRU_BASE + lru, -nr_taken); <API key>(zone, NR_ISOLATED_ANON + file, nr_taken); spin_unlock_irq(&zone->lru_lock); while (!list_empty(&l_hold)) { cond_resched(); page = lru_to_page(&l_hold); list_del(&page->lru); if (unlikely(!page_evictable(page))) { putback_lru_page(page); continue; } if (unlikely(<API key>)) { if (page_has_private(page) && trylock_page(page)) { if (page_has_private(page)) try_to_release_page(page, 0); unlock_page(page); } } if (page_referenced(page, 0, sc->target_mem_cgroup, &vm_flags)) { nr_rotated += hpage_nr_pages(page); /* * Identify referenced, file-backed active pages and * give them one more trip around the active list. So * that executable code get better chances to stay in * memory under moderate memory pressure. Anon pages * are not likely to be evicted by use-once streaming * IO, plus JVM can create lots of anon VM_EXEC pages, * so we ignore them here. */ if ((vm_flags & VM_EXEC) && page_is_file_cache(page)) { list_add(&page->lru, &l_active); continue; } } ClearPageActive(page); /* we are de-activating */ list_add(&page->lru, &l_inactive); } /* * Move pages back to the lru list. */ spin_lock_irq(&zone->lru_lock); /* * Count referenced pages from currently used mappings as rotated, * even though only some of them are actually re-activated. This * helps balance scan pressure between file and anonymous pages in * get_scan_ratio. */ reclaim_stat->recent_rotated[file] += nr_rotated; <API key>(lruvec, &l_active, &l_hold, lru); <API key>(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE); <API key>(zone, NR_ISOLATED_ANON + file, -nr_taken); spin_unlock_irq(&zone->lru_lock); <API key>(&l_hold, 1); } #ifdef CONFIG_SWAP static int <API key>(struct zone *zone) { unsigned long active, inactive; active = zone_page_state(zone, NR_ACTIVE_ANON); inactive = zone_page_state(zone, NR_INACTIVE_ANON); if (inactive * zone->inactive_ratio < active) return 1; return 0; } /** * <API key> - check if anonymous pages need to be deactivated * @lruvec: LRU vector to check * * Returns true if the zone does not have enough inactive anon pages, * meaning some active anon pages need to be deactivated. */ static int <API key>(struct lruvec *lruvec) { /* * If we don't have swap space, anonymous page deactivation * is pointless. */ if (!total_swap_pages) return 0; if (!mem_cgroup_disabled()) return <API key>(lruvec); return <API key>(lruvec_zone(lruvec)); } #else static inline int <API key>(struct lruvec *lruvec) { return 0; } #endif /** * <API key> - check if file pages need to be deactivated * @lruvec: LRU vector to check * * When the system is doing streaming IO, memory pressure here * ensures that active file pages get deactivated, until more * than half of the file pages are on the inactive list. * * Once we get to that situation, protect the system's working * set from being evicted by disabling active file page aging. * * This uses a different ratio than the anonymous pages, because * the page cache uses a use-once replacement algorithm. */ static int <API key>(struct lruvec *lruvec) { unsigned long inactive; unsigned long active; inactive = get_lru_size(lruvec, LRU_INACTIVE_FILE); active = get_lru_size(lruvec, LRU_ACTIVE_FILE); return active > inactive; } static int <API key>(struct lruvec *lruvec, enum lru_list lru) { if (is_file_lru(lru)) return <API key>(lruvec); else return <API key>(lruvec); } static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, struct lruvec *lruvec, struct scan_control *sc) { if (is_active_lru(lru)) { if (<API key>(lruvec, lru)) shrink_active_list(nr_to_scan, lruvec, sc, lru); return 0; } return <API key>(nr_to_scan, lruvec, sc, lru); } static int vmscan_swappiness(struct scan_control *sc) { if (global_reclaim(sc)) return vm_swappiness; return <API key>(sc->target_mem_cgroup); } enum scan_balance { SCAN_EQUAL, SCAN_FRACT, SCAN_ANON, SCAN_FILE, }; /* * Determine how aggressively the anon and file LRU lists should be * scanned. The relative value of each set of LRU lists is determined * by looking at the fraction of the pages scanned we did rotate back * onto the active list instead of evict. * * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan */ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, unsigned long *nr) { struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; u64 fraction[2]; u64 denominator = 0; /* gcc */ struct zone *zone = lruvec_zone(lruvec); unsigned long anon_prio, file_prio; enum scan_balance scan_balance; unsigned long anon, file, free; bool force_scan = false; unsigned long ap, fp; enum lru_list lru; /* * If the zone or memcg is small, nr[l] can be 0. This * results in no scanning on this priority and a potential * priority drop. Global direct reclaim can go to the next * zone and tends to have no problems. Global kswapd is for * zone balancing and it needs to scan a minimum amount. When * reclaiming for a memcg, a priority drop can cause high * latencies, so it's better to scan a minimum amount there as * well. */ if (current_is_kswapd() && zone->all_unreclaimable) force_scan = true; if (!global_reclaim(sc)) force_scan = true; /* If we have no swap space, do not bother scanning anon pages. */ if (!sc->may_swap || (get_nr_swap_pages() <= 0)) { scan_balance = SCAN_FILE; goto out; } /* * Global reclaim will swap to prevent OOM even with no * swappiness, but memcg users want to use this knob to * disable swapping for individual groups completely when * using the memory controller's swap limit feature would be * too expensive. */ if (!global_reclaim(sc) && !vmscan_swappiness(sc)) { scan_balance = SCAN_FILE; goto out; } /* * Do not apply any pressure balancing cleverness when the * system is close to OOM, scan both anon and file equally * (unless the swappiness setting disagrees with swapping). */ if (!sc->priority && vmscan_swappiness(sc)) { scan_balance = SCAN_EQUAL; goto out; } anon = get_lru_size(lruvec, LRU_ACTIVE_ANON) + get_lru_size(lruvec, LRU_INACTIVE_ANON); file = get_lru_size(lruvec, LRU_ACTIVE_FILE) + get_lru_size(lruvec, LRU_INACTIVE_FILE); /* * If it's foreseeable that reclaiming the file cache won't be * enough to get the zone back into a desirable shape, we have * to swap. Better start now and leave the - probably heavily * thrashing - remaining file pages alone. */ if (global_reclaim(sc)) { free = zone_page_state(zone, NR_FREE_PAGES); if (unlikely(file + free <= high_wmark_pages(zone))) { scan_balance = SCAN_ANON; goto out; } } /* * There is enough inactive page cache, do not reclaim * anything from the anonymous working set right now. */ if (!<API key>(lruvec)) { scan_balance = SCAN_FILE; goto out; } scan_balance = SCAN_FRACT; /* * With swappiness at 100, anonymous and file have the same priority. * This scanning priority is essentially the inverse of IO cost. */ anon_prio = vmscan_swappiness(sc); file_prio = 200 - anon_prio; /* * OK, so we have swap space and a fair amount of page cache * pages. We use the recently rotated / recently scanned * ratios to determine how valuable each cache is. * * Because workloads change over time (and to avoid overflow) * we keep these statistics as a floating average, which ends * up weighing recent references more than old ones. * * anon in [0], file in [1] */ spin_lock_irq(&zone->lru_lock); if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) { reclaim_stat->recent_scanned[0] /= 2; reclaim_stat->recent_rotated[0] /= 2; } if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) { reclaim_stat->recent_scanned[1] /= 2; reclaim_stat->recent_rotated[1] /= 2; } /* * The amount of pressure on anon vs file pages is inversely * proportional to the fraction of recently scanned pages on * each list that were recently referenced and in active use. */ ap = anon_prio * (reclaim_stat->recent_scanned[0] + 1); ap /= reclaim_stat->recent_rotated[0] + 1; fp = file_prio * (reclaim_stat->recent_scanned[1] + 1); fp /= reclaim_stat->recent_rotated[1] + 1; spin_unlock_irq(&zone->lru_lock); fraction[0] = ap; fraction[1] = fp; denominator = ap + fp + 1; out: <API key>(lru) { int file = is_file_lru(lru); unsigned long size; unsigned long scan; size = get_lru_size(lruvec, lru); scan = size >> sc->priority; if (!scan && force_scan) scan = min(size, SWAP_CLUSTER_MAX); switch (scan_balance) { case SCAN_EQUAL: /* Scan lists relative to size */ break; case SCAN_FRACT: /* * Scan types proportional to swappiness and * their relative recent reclaim efficiency. */ scan = div64_u64(scan * fraction[file], denominator); break; case SCAN_FILE: case SCAN_ANON: /* Scan one type exclusively */ if ((scan_balance == SCAN_FILE) != file) scan = 0; break; default: /* Look ma, no brain */ BUG(); } nr[lru] = scan; } } /* * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. */ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) { unsigned long nr[NR_LRU_LISTS]; unsigned long nr_to_scan; enum lru_list lru; unsigned long nr_reclaimed = 0; unsigned long nr_to_reclaim = sc->nr_to_reclaim; struct blk_plug plug; get_scan_count(lruvec, sc, nr); blk_start_plug(&plug); while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || nr[LRU_INACTIVE_FILE]) { <API key>(lru) { if (nr[lru]) { nr_to_scan = min(nr[lru], SWAP_CLUSTER_MAX); nr[lru] -= nr_to_scan; nr_reclaimed += shrink_list(lru, nr_to_scan, lruvec, sc); } } /* * On large memory systems, scan >> priority can become * really large. This is fine for the starting priority; * we want to put equal scanning pressure on each zone. * However, if the VM has a harder time of freeing pages, * with multiple processes reclaiming pages, the total * freeing target can get unreasonably large. */ if (nr_reclaimed >= nr_to_reclaim && sc->priority < DEF_PRIORITY) break; } blk_finish_plug(&plug); sc->nr_reclaimed += nr_reclaimed; /* * Even if we did not try to evict anon pages at all, we want to * rebalance the anon lru active/inactive ratio. */ if (<API key>(lruvec)) shrink_active_list(SWAP_CLUSTER_MAX, lruvec, sc, LRU_ACTIVE_ANON); <API key>(sc->gfp_mask); } /* Use reclaim/compaction for costly allocs or under memory pressure */ static bool <API key>(struct scan_control *sc) { if (IS_ENABLED(CONFIG_COMPACTION) && sc->order && (sc->order > <API key> || sc->priority < DEF_PRIORITY - 2)) return true; return false; } /* * Reclaim/compaction is used for high-order allocation requests. It reclaims * order-0 pages before compacting the zone. <API key>() returns * true if more pages should be reclaimed such that when the page allocator * calls try_to_compact_zone() that it will have enough free pages to succeed. * It will give up earlier than that if there is difficulty reclaiming pages. */ static inline bool <API key>(struct zone *zone, unsigned long nr_reclaimed, unsigned long nr_scanned, struct scan_control *sc) { unsigned long <API key>; unsigned long inactive_lru_pages; /* If not in reclaim/compaction mode, stop */ if (!<API key>(sc)) return false; /* Consider stopping depending on scan and reclaim activity */ if (sc->gfp_mask & __GFP_REPEAT) { /* * For __GFP_REPEAT allocations, stop reclaiming if the * full LRU list has been scanned and we are still failing * to reclaim pages. This full LRU scan is potentially * expensive but a __GFP_REPEAT caller really wants to succeed */ if (!nr_reclaimed && !nr_scanned) return false; } else { /* * For non-__GFP_REPEAT allocations which can presumably * fail without consequence, stop if we failed to reclaim * any pages from the last SWAP_CLUSTER_MAX number of * pages that were scanned. This will return to the * caller faster at the risk reclaim/compaction and * the resulting allocation attempt fails */ if (!nr_reclaimed) return false; } /* * If we have not reclaimed enough pages for compaction and the * inactive lists are large enough, continue reclaiming */ <API key> = (2UL << sc->order); inactive_lru_pages = zone_page_state(zone, NR_INACTIVE_FILE); if (get_nr_swap_pages() > 0) inactive_lru_pages += zone_page_state(zone, NR_INACTIVE_ANON); if (sc->nr_reclaimed < <API key> && inactive_lru_pages > <API key>) return true; /* If compaction would go ahead or the allocation would succeed, stop */ switch (compaction_suitable(zone, sc->order)) { case COMPACT_PARTIAL: case COMPACT_CONTINUE: return false; default: return true; } } static void shrink_zone(struct zone *zone, struct scan_control *sc) { unsigned long nr_reclaimed, nr_scanned; do { struct mem_cgroup *root = sc->target_mem_cgroup; struct <API key> reclaim = { .zone = zone, .priority = sc->priority, }; struct mem_cgroup *memcg; nr_reclaimed = sc->nr_reclaimed; nr_scanned = sc->nr_scanned; memcg = mem_cgroup_iter(root, NULL, &reclaim); do { struct lruvec *lruvec; lruvec = <API key>(zone, memcg); shrink_lruvec(lruvec, sc); /* * Direct reclaim and kswapd have to scan all memory * cgroups to fulfill the overall scan target for the * zone. * * Limit reclaim, on the other hand, only cares about * nr_to_reclaim pages to be reclaimed and it will * retry with decreasing priority if one round over the * whole hierarchy is not sufficient. */ if (!global_reclaim(sc) && sc->nr_reclaimed >= sc->nr_to_reclaim) { <API key>(root, memcg); break; } memcg = mem_cgroup_iter(root, memcg, &reclaim); } while (memcg); vmpressure(sc->gfp_mask, sc->target_mem_cgroup, sc->nr_scanned - nr_scanned, sc->nr_reclaimed - nr_reclaimed); } while (<API key>(zone, sc->nr_reclaimed - nr_reclaimed, sc->nr_scanned - nr_scanned, sc)); } /* Returns true if compaction should go ahead for a high-order request */ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc) { unsigned long balance_gap, watermark; bool watermark_ok; /* Do not consider compaction for orders reclaim is meant to satisfy */ if (sc->order <= <API key>) return false; /* * Compaction takes time to run and there are potentially other * callers using the pages just freed. Continue reclaiming until * there is a buffer of free pages available to give compaction * a reasonable chance of completing and allocating the page */ balance_gap = min(low_wmark_pages(zone), (zone->managed_pages + <API key>) / <API key>); watermark = high_wmark_pages(zone) + balance_gap + (2UL << sc->order); watermark_ok = <API key>(zone, 0, watermark, 0, 0); /* * If compaction is deferred, reclaim up to a point where * compaction will have a chance of success when re-enabled */ if (compaction_deferred(zone, sc->order)) return watermark_ok; /* If compaction is not ready to start, keep reclaiming */ if (!compaction_suitable(zone, sc->order)) return false; return watermark_ok; } /* * This is the direct reclaim path, for page-allocating processes. We only * try to reclaim pages from zones which will satisfy the caller's allocation * request. * * We reclaim from a zone even if that zone is over high_wmark_pages(zone). * Because: * a) The caller may be trying to free *extra* pages to satisfy a higher-order * allocation or * b) The target zone may be at high_wmark_pages(zone) but the lower zones * must go *over* high_wmark_pages(zone) to satisfy the `incremental min' * zone defense algorithm. * * If a zone is deemed to be full of pinned pages then just give it a light * scan then give up on it. * * This function returns true if a zone is being reclaimed for a costly * high-order allocation and compaction is ready to begin. This indicates to * the caller that it should consider retrying the allocation instead of * further reclaim. */ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc) { struct zoneref *z; struct zone *zone; unsigned long nr_soft_reclaimed; unsigned long nr_soft_scanned; bool aborted_reclaim = false; /* * If the number of buffer_heads in the machine exceeds the maximum * allowed level, force direct reclaim to scan the highmem zone as * highmem pages could be pinning lowmem pages storing buffer_heads */ if (<API key>) sc->gfp_mask |= __GFP_HIGHMEM; <API key>(zone, z, zonelist, gfp_zone(sc->gfp_mask), sc->nodemask) { if (!populated_zone(zone)) continue; /* * Take care memory controller reclaiming has small influence * to global LRU. */ if (global_reclaim(sc)) { if (!<API key>(zone, GFP_KERNEL)) continue; if (zone->all_unreclaimable && sc->priority != DEF_PRIORITY) continue; /* Let kswapd poll it */ if (IS_ENABLED(CONFIG_COMPACTION)) { /* * If we already have plenty of memory free for * compaction in this zone, don't free any more. * Even though compaction is invoked for any * non-zero order, only frequent costly order * reclamation is disruptive enough to become a * noticeable problem, like transparent huge * page allocations. */ if (compaction_ready(zone, sc)) { aborted_reclaim = true; continue; } } /* * This steals pages from memory cgroups over softlimit * and returns the number of reclaimed pages and * scanned pages. This works for global memory pressure * and balancing, not for a memcg's limit. */ nr_soft_scanned = 0; nr_soft_reclaimed = <API key>(zone, sc->order, sc->gfp_mask, &nr_soft_scanned); sc->nr_reclaimed += nr_soft_reclaimed; sc->nr_scanned += nr_soft_scanned; /* need some check for avoid more shrink_zone() */ } shrink_zone(zone, sc); } return aborted_reclaim; } static unsigned long <API key>(struct zone *zone) { int nr; nr = zone_page_state(zone, NR_ACTIVE_FILE) + zone_page_state(zone, NR_INACTIVE_FILE); if (get_nr_swap_pages() > 0) nr += zone_page_state(zone, NR_ACTIVE_ANON) + zone_page_state(zone, NR_INACTIVE_ANON); return nr; } static bool zone_reclaimable(struct zone *zone) { return zone->pages_scanned < <API key>(zone) * 6; } /* All zones in zonelist are unreclaimable? */ static bool all_unreclaimable(struct zonelist *zonelist, struct scan_control *sc) { struct zoneref *z; struct zone *zone; <API key>(zone, z, zonelist, gfp_zone(sc->gfp_mask), sc->nodemask) { if (!populated_zone(zone)) continue; if (!<API key>(zone, GFP_KERNEL)) continue; if (!zone->all_unreclaimable) return false; } return true; } /* * This is the main entry point to direct page reclaim. * * If a full scan of the inactive list fails to free enough memory then we * are "out of memory" and something needs to be killed. * * If the caller is !__GFP_FS then the probability of a failure is reasonably * high - the zone may be full of dirty or under-writeback pages, which this * caller can't do much about. We kick the writeback threads and take explicit * naps in the hope that some of these pages can be written. But if the * allocating task holds filesystem locks which prevent writeout this might not * work, and the allocation attempt will fail. * * returns: 0, if no pages reclaimed * else, the number of pages reclaimed */ static unsigned long <API key>(struct zonelist *zonelist, struct scan_control *sc, struct shrink_control *shrink) { unsigned long total_scanned = 0; struct reclaim_state *reclaim_state = current->reclaim_state; struct zoneref *z; struct zone *zone; unsigned long writeback_threshold; bool aborted_reclaim; <API key>(); if (global_reclaim(sc)) count_vm_event(ALLOCSTALL); do { vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup, sc->priority); sc->nr_scanned = 0; aborted_reclaim = shrink_zones(zonelist, sc); /* * Don't shrink slabs when reclaiming memory from * over limit cgroups */ if (global_reclaim(sc)) { unsigned long lru_pages = 0; <API key>(zone, z, zonelist, gfp_zone(sc->gfp_mask)) { if (!<API key>(zone, GFP_KERNEL)) continue; lru_pages += <API key>(zone); } shrink_slab(shrink, sc->nr_scanned, lru_pages); if (reclaim_state) { sc->nr_reclaimed += reclaim_state->reclaimed_slab; reclaim_state->reclaimed_slab = 0; } } total_scanned += sc->nr_scanned; if (sc->nr_reclaimed >= sc->nr_to_reclaim) goto out; /* * If we're getting trouble reclaiming, start doing * writepage even in laptop mode. */ if (sc->priority < DEF_PRIORITY - 2) sc->may_writepage = 1; /* * Try to write back as many pages as we just scanned. This * tends to cause slow streaming writers to write data to the * disk smoothly, at the dirtying rate, which is nice. But * that's undesirable in laptop mode, where we *want* lumpy * writeout. So in laptop mode, write out the whole world. */ writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2; if (total_scanned > writeback_threshold) { <API key>(laptop_mode ? 0 : total_scanned, <API key>); sc->may_writepage = 1; } /* Take a nap, wait for some writeback to complete */ if (!sc->hibernation_mode && sc->nr_scanned && sc->priority < DEF_PRIORITY - 2) { struct zone *preferred_zone; <API key>(zonelist, gfp_zone(sc->gfp_mask), &<API key>, &preferred_zone); wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/10); } } while (--sc->priority >= 0); out: <API key>(); if (sc->nr_reclaimed) return sc->nr_reclaimed; /* * As hibernation is going on, kswapd is freezed so that it can't mark * the zone into all_unreclaimable. Thus bypassing all_unreclaimable * check. */ if (oom_killer_disabled) return 0; /* Aborted reclaim to try compaction? don't OOM, then */ if (aborted_reclaim) return 1; /* top priority shrink_zones still had more to do? don't OOM, then */ if (global_reclaim(sc) && !all_unreclaimable(zonelist, sc)) return 1; return 0; } static bool <API key>(pg_data_t *pgdat) { struct zone *zone; unsigned long pfmemalloc_reserve = 0; unsigned long free_pages = 0; int i; bool wmark_ok; for (i = 0; i <= ZONE_NORMAL; i++) { zone = &pgdat->node_zones[i]; if (!populated_zone(zone)) continue; pfmemalloc_reserve += min_wmark_pages(zone); free_pages += zone_page_state(zone, NR_FREE_PAGES); } /* If there are no reserves (unexpected config) then do not throttle */ if (!pfmemalloc_reserve) return true; wmark_ok = free_pages > pfmemalloc_reserve / 2; /* kswapd must be awake if processes are being throttled */ if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) { pgdat->classzone_idx = min(pgdat->classzone_idx, (enum zone_type)ZONE_NORMAL); <API key>(&pgdat->kswapd_wait); } return wmark_ok; } /* * Throttle direct reclaimers if backing storage is backed by the network * and the PFMEMALLOC reserve for the preferred node is getting dangerously * depleted. kswapd will continue to make progress and wake the processes * when the low watermark is reached. * * Returns true if a fatal signal was delivered during throttling. If this * happens, the page allocator should not consider triggering the OOM killer. */ static bool <API key>(gfp_t gfp_mask, struct zonelist *zonelist, nodemask_t *nodemask) { struct zoneref *z; struct zone *zone; pg_data_t *pgdat = NULL; /* * Kernel threads should not be throttled as they may be indirectly * responsible for cleaning pages necessary for reclaim to make forward * progress. kjournald for example may enter direct reclaim while * committing a transaction where throttling it could forcing other * processes to block on log_wait_commit(). */ if (current->flags & PF_KTHREAD) goto out; /* * If a fatal signal is pending, this process should not throttle. * It should return quickly so it can exit and free its memory */ if (<API key>(current)) goto out; /* * Check if the pfmemalloc reserves are ok by finding the first node * with a usable ZONE_NORMAL or lower zone. The expectation is that * GFP_KERNEL will be required for allocating network buffers when * swapping over the network so ZONE_HIGHMEM is unusable. * * Throttling is based on the first usable node and throttled processes * wait on a queue until kswapd makes progress and wakes them. There * is an affinity then between processes waking up and where reclaim * progress has been made assuming the process wakes on the same node. * More importantly, processes running on remote nodes will not compete * for remote pfmemalloc reserves and processes on different nodes * should make reasonable progress. */ <API key>(zone, z, zonelist, gfp_mask, nodemask) { if (zone_idx(zone) > ZONE_NORMAL) continue; /* Throttle based on the first usable node */ pgdat = zone->zone_pgdat; if (<API key>(pgdat)) goto out; break; } /* If no zone was usable by the allocation flags then do not throttle */ if (!pgdat) goto out; /* Account for the throttling */ count_vm_event(<API key>); /* * If the caller cannot enter the filesystem, it's possible that it * is due to the caller holding an FS lock or performing a journal * transaction in the case of a filesystem like ext[3|4]. In this case, * it is not safe to block on pfmemalloc_wait as kswapd could be * blocked waiting on the same lock. Instead, throttle for up to a * second before continuing. */ if (!(gfp_mask & __GFP_FS)) { <API key>(pgdat->pfmemalloc_wait, <API key>(pgdat), HZ); goto check_pending; } /* Throttle until kswapd wakes the process */ wait_event_killable(zone->zone_pgdat->pfmemalloc_wait, <API key>(pgdat)); check_pending: if (<API key>(current)) return true; out: return false; } unsigned long try_to_free_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *nodemask) { unsigned long nr_reclaimed; struct scan_control sc = { .gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)), .may_writepage = !laptop_mode, .nr_to_reclaim = SWAP_CLUSTER_MAX, .may_unmap = 1, .may_swap = 1, .order = order, .priority = DEF_PRIORITY, .target_mem_cgroup = NULL, .nodemask = nodemask, }; struct shrink_control shrink = { .gfp_mask = sc.gfp_mask, }; /* * Do not enter reclaim if fatal signal was delivered while throttled. * 1 is returned so that the page allocator does not OOM kill at this * point. */ if (<API key>(gfp_mask, zonelist, nodemask)) return 1; <API key>(order, sc.may_writepage, gfp_mask); nr_reclaimed = <API key>(zonelist, &sc, &shrink); <API key>(nr_reclaimed); return nr_reclaimed; } #ifdef CONFIG_MEMCG unsigned long <API key>(struct mem_cgroup *memcg, gfp_t gfp_mask, bool noswap, struct zone *zone, unsigned long *nr_scanned) { struct scan_control sc = { .nr_scanned = 0, .nr_to_reclaim = SWAP_CLUSTER_MAX, .may_writepage = !laptop_mode, .may_unmap = 1, .may_swap = !noswap, .order = 0, .priority = 0, .target_mem_cgroup = memcg, }; struct lruvec *lruvec = <API key>(zone, memcg); sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | (<API key> & ~GFP_RECLAIM_MASK); <API key>(sc.order, sc.may_writepage, sc.gfp_mask); /* * NOTE: Although we can get the priority field, using it * here is not a good idea, since it limits the pages we can scan. * if we don't reclaim here, the shrink_zone from balance_pgdat * will pick up pages from other mem cgroup's as well. We hack * the priority and make it zero. */ shrink_lruvec(lruvec, &sc); <API key>(sc.nr_reclaimed); *nr_scanned = sc.nr_scanned; return sc.nr_reclaimed; } unsigned long <API key>(struct mem_cgroup *memcg, gfp_t gfp_mask, bool noswap) { struct zonelist *zonelist; unsigned long nr_reclaimed; int nid; struct scan_control sc = { .may_writepage = !laptop_mode, .may_unmap = 1, .may_swap = !noswap, .nr_to_reclaim = SWAP_CLUSTER_MAX, .order = 0, .priority = DEF_PRIORITY, .target_mem_cgroup = memcg, .nodemask = NULL, /* we don't care the placement */ .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | (<API key> & ~GFP_RECLAIM_MASK), }; struct shrink_control shrink = { .gfp_mask = sc.gfp_mask, }; /* * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't * take care of from where we get pages. So the node where we start the * scan does not need to be the current node. */ nid = <API key>(memcg); zonelist = NODE_DATA(nid)->node_zonelists; <API key>(0, sc.may_writepage, sc.gfp_mask); nr_reclaimed = <API key>(zonelist, &sc, &shrink); <API key>(nr_reclaimed); return nr_reclaimed; } #endif static void age_active_anon(struct zone *zone, struct scan_control *sc) { struct mem_cgroup *memcg; if (!total_swap_pages) return; memcg = mem_cgroup_iter(NULL, NULL, NULL); do { struct lruvec *lruvec = <API key>(zone, memcg); if (<API key>(lruvec)) shrink_active_list(SWAP_CLUSTER_MAX, lruvec, sc, LRU_ACTIVE_ANON); memcg = mem_cgroup_iter(NULL, memcg, NULL); } while (memcg); } static bool zone_balanced(struct zone *zone, int order, unsigned long balance_gap, int classzone_idx) { if (!<API key>(zone, order, high_wmark_pages(zone) + balance_gap, classzone_idx, 0)) return false; if (IS_ENABLED(CONFIG_COMPACTION) && order && !compaction_suitable(zone, order)) return false; return true; } /* * pgdat_balanced() is used when checking if a node is balanced. * * For order-0, all zones must be balanced! * * For high-order allocations only zones that meet watermarks and are in a * zone allowed by the callers classzone_idx are added to balanced_pages. The * total of balanced pages must be at least 25% of the zones allowed by * classzone_idx for the node to be considered balanced. Forcing all zones to * be balanced for high orders can cause excessive reclaim when there are * imbalanced zones. * The choice of 25% is due to * o a 16M DMA zone that is balanced will not balance a zone on any * reasonable sized machine * o On all other machines, the top zone must be at least a reasonable * percentage of the middle zones. For example, on 32-bit x86, highmem * would need to be at least 256M for it to be balance a whole node. * Similarly, on x86-64 the Normal zone would need to be at least 1G * to balance a node on its own. These seemed like reasonable ratios. */ static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx) { unsigned long managed_pages = 0; unsigned long balanced_pages = 0; int i; /* Check the watermark levels */ for (i = 0; i <= classzone_idx; i++) { struct zone *zone = pgdat->node_zones + i; if (!populated_zone(zone)) continue; managed_pages += zone->managed_pages; /* * A special case here: * * balance_pgdat() skips over all_unreclaimable after * DEF_PRIORITY. Effectively, it considers them balanced so * they must be considered balanced here as well! */ if (zone->all_unreclaimable) { balanced_pages += zone->managed_pages; continue; } if (zone_balanced(zone, order, 0, i)) balanced_pages += zone->managed_pages; else if (!order) return false; } if (order) return balanced_pages >= (managed_pages >> 2); else return true; } /* * Prepare kswapd for sleeping. This verifies that there are no processes * waiting in <API key>() and that watermarks have been met. * * Returns true if kswapd is ready to sleep */ static bool <API key>(pg_data_t *pgdat, int order, long remaining, int classzone_idx) { /* If a direct reclaimer woke kswapd within HZ/10, it's premature */ if (remaining) return false; /* * The throttled processes are normally woken up in balance_pgdat() as * soon as <API key>() is true. But there is a potential * race between when kswapd checks the watermarks and a process gets * throttled. There is also a potential race if processes get * throttled, kswapd wakes, a large process exits thereby balancing the * zones, which causes kswapd to exit balance_pgdat() before reaching * the wake up checks. If kswapd is going to sleep, no process should * be sleeping on pfmemalloc_wait, so wake them now if necessary. If * the wake up is premature, processes will wake kswapd and get * throttled again. The difference from wake ups in balance_pgdat() is * that here we are under prepare_to_wait(). */ if (waitqueue_active(&pgdat->pfmemalloc_wait)) wake_up_all(&pgdat->pfmemalloc_wait); return pgdat_balanced(pgdat, order, classzone_idx); } /* * For kswapd, balance_pgdat() will work across all this node's zones until * they are all at high_wmark_pages(zone). * * Returns the final order kswapd was reclaiming at * * There is special handling here for zones which are full of pinned pages. * This can happen if the pages are all mlocked, or if they are all used by * device drivers (say, ZONE_DMA). Or if they are all in use by hugetlb. * What we do is to detect the case where all pages in the zone have been * scanned twice and there has been zero successful reclaim. Mark the zone as * dead and from now on, only perform a short scan. Basically we're polling * the zone for when the problem goes away. * * kswapd scans the zones in the highmem->normal->dma direction. It skips * zones which have free_pages > high_wmark_pages(zone), but once a zone is * found to have free_pages <= high_wmark_pages(zone), we scan that zone and the * lower zones regardless of the number of free pages in the lower zones. This * interoperates with the page allocator fallback scheme to ensure that aging * of pages is balanced across the zones. */ static unsigned long balance_pgdat(pg_data_t *pgdat, int order, int *classzone_idx) { bool pgdat_is_balanced = false; int i; int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ struct reclaim_state *reclaim_state = current->reclaim_state; unsigned long nr_soft_reclaimed; unsigned long nr_soft_scanned; struct scan_control sc = { .gfp_mask = GFP_KERNEL, .may_unmap = 1, .may_swap = 1, /* * kswapd doesn't want to be bailed out while reclaim. because * we want to put equal scanning pressure on each zone. */ .nr_to_reclaim = ULONG_MAX, .order = order, .target_mem_cgroup = NULL, }; struct shrink_control shrink = { .gfp_mask = sc.gfp_mask, }; loop_again: sc.priority = DEF_PRIORITY; sc.nr_reclaimed = 0; sc.may_writepage = !laptop_mode; count_vm_event(PAGEOUTRUN); do { unsigned long lru_pages = 0; /* * Scan in the highmem->dma direction for the highest * zone which needs scanning */ for (i = pgdat->nr_zones - 1; i >= 0; i struct zone *zone = pgdat->node_zones + i; if (!populated_zone(zone)) continue; if (zone->all_unreclaimable && sc.priority != DEF_PRIORITY) continue; /* * Do some background aging of the anon list, to give * pages a chance to be referenced before reclaiming. */ age_active_anon(zone, &sc); /* * If the number of buffer_heads in the machine * exceeds the maximum allowed level and this node * has a highmem zone, force kswapd to reclaim from * it to relieve lowmem pressure. */ if (<API key> && is_highmem_idx(i)) { end_zone = i; break; } if (!zone_balanced(zone, order, 0, 0)) { end_zone = i; break; } else { /* If balanced, clear the congested flag */ zone_clear_flag(zone, ZONE_CONGESTED); } } if (i < 0) { pgdat_is_balanced = true; goto out; } for (i = 0; i <= end_zone; i++) { struct zone *zone = pgdat->node_zones + i; lru_pages += <API key>(zone); } /* * Now scan the zone in the dma->highmem direction, stopping * at the last zone which needs scanning. * * We do this because the page allocator works in the opposite * direction. This prevents the page allocator from allocating * pages behind kswapd's direction of progress, which would * cause too much scanning of the lower zones. */ for (i = 0; i <= end_zone; i++) { struct zone *zone = pgdat->node_zones + i; int nr_slab, testorder; unsigned long balance_gap; if (!populated_zone(zone)) continue; if (zone->all_unreclaimable && sc.priority != DEF_PRIORITY) continue; sc.nr_scanned = 0; nr_soft_scanned = 0; /* * Call soft limit reclaim before calling shrink_zone. */ nr_soft_reclaimed = <API key>(zone, order, sc.gfp_mask, &nr_soft_scanned); sc.nr_reclaimed += nr_soft_reclaimed; /* * We put equal pressure on every zone, unless * one zone has way too many pages free * already. The "too many pages" is defined * as the high wmark plus a "gap" where the * gap is either the low watermark or 1% * of the zone, whichever is smaller. */ balance_gap = min(low_wmark_pages(zone), (zone->managed_pages + <API key>) / <API key>); /* * Kswapd reclaims only single pages with compaction * enabled. Trying too hard to reclaim until contiguous * free pages have become available can hurt performance * by evicting too much useful data from memory. * Do not reclaim more than needed for compaction. */ testorder = order; if (IS_ENABLED(CONFIG_COMPACTION) && order && compaction_suitable(zone, order) != COMPACT_SKIPPED) testorder = 0; if ((<API key> && is_highmem_idx(i)) || !zone_balanced(zone, testorder, balance_gap, end_zone)) { shrink_zone(zone, &sc); reclaim_state->reclaimed_slab = 0; nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages); sc.nr_reclaimed += reclaim_state->reclaimed_slab; if (nr_slab == 0 && !zone_reclaimable(zone)) zone->all_unreclaimable = 1; } /* * If we're getting trouble reclaiming, start doing * writepage even in laptop mode. */ if (sc.priority < DEF_PRIORITY - 2) sc.may_writepage = 1; if (zone->all_unreclaimable) { if (end_zone && end_zone == i) end_zone continue; } if (zone_balanced(zone, testorder, 0, end_zone)) /* * If a zone reaches its high watermark, * consider it to be no longer congested. It's * possible there are dirty pages backed by * congested BDIs but as pressure is relieved, * speculatively avoid congestion waits */ zone_clear_flag(zone, ZONE_CONGESTED); } /* * If the low watermark is met there is no need for processes * to be throttled on pfmemalloc_wait as they should not be * able to safely make forward progress. Wake them */ if (waitqueue_active(&pgdat->pfmemalloc_wait) && <API key>(pgdat)) wake_up(&pgdat->pfmemalloc_wait); if (pgdat_balanced(pgdat, order, *classzone_idx)) { pgdat_is_balanced = true; break; /* kswapd: all done */ } /* * We do this so kswapd doesn't build up large priorities for * example when it is freeing in parallel with allocators. It * matches the direct reclaim path behaviour in terms of impact * on zone->*_priority. */ if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX) break; } while (--sc.priority >= 0); out: if (!pgdat_is_balanced) { cond_resched(); try_to_freeze(); /* * Fragmentation may mean that the system cannot be * rebalanced for high-order allocations in all zones. * At this point, if nr_reclaimed < SWAP_CLUSTER_MAX, * it means the zones have been fully scanned and are still * not balanced. For high-order allocations, there is * little point trying all over again as kswapd may * infinite loop. * * Instead, recheck all watermarks at order-0 as they * are the most important. If watermarks are ok, kswapd will go * back to sleep. High-order users can still perform direct * reclaim if they wish. */ if (sc.nr_reclaimed < SWAP_CLUSTER_MAX) order = sc.order = 0; goto loop_again; } /* * If kswapd was reclaiming at a higher order, it has the option of * sleeping without all zones being balanced. Before it does, it must * ensure that the watermarks for order-0 on *all* zones are met and * that the congestion flags are cleared. The congestion flag must * be cleared as kswapd is the only mechanism that clears the flag * and it is potentially going to sleep here. */ if (order) { int <API key> = 1; for (i = 0; i <= end_zone; i++) { struct zone *zone = pgdat->node_zones + i; if (!populated_zone(zone)) continue; /* Check if the memory needs to be defragmented. */ if (zone_watermark_ok(zone, order, low_wmark_pages(zone), *classzone_idx, 0)) <API key> = 0; } if (<API key>) compact_pgdat(pgdat, order); } /* * Return the order we were reclaiming at so <API key>() * makes a decision on the order we were last reclaiming at. However, * if another caller entered the allocator slow path while kswapd * was awake, order will remain at the higher level */ *classzone_idx = end_zone; return order; } static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx) { long remaining = 0; DEFINE_WAIT(wait); if (freezing(current) || kthread_should_stop()) return; prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); /* Try to sleep for a short interval */ if (<API key>(pgdat, order, remaining, classzone_idx)) { remaining = schedule_timeout(HZ/10); finish_wait(&pgdat->kswapd_wait, &wait); prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); } /* * After a short sleep, check if it was a premature sleep. If not, then * go fully to sleep until explicitly woken up. */ if (<API key>(pgdat, order, remaining, classzone_idx)) { <API key>(pgdat->node_id); /* * vmstat counters are not perfectly accurate and the estimated * value for counters such as NR_FREE_PAGES can deviate from the * true value by nr_online_cpus * threshold. To avoid the zone * watermarks being breached while under pressure, we reduce the * per-cpu vmstat threshold while kswapd is awake and restore * them before going back to sleep. */ <API key>(pgdat, <API key>); /* * Compaction records what page blocks it recently failed to * isolate pages from and skips them in the future scanning. * When kswapd is going to sleep, it is reasonable to assume * that pages and compaction may succeed so reset the cache. */ <API key>(pgdat); if (!kthread_should_stop()) schedule(); <API key>(pgdat, <API key>); } else { if (remaining) count_vm_event(<API key>); else count_vm_event(<API key>); } finish_wait(&pgdat->kswapd_wait, &wait); } /* * The background pageout daemon, started as a kernel thread * from the init process. * * This basically trickles out pages so that we have _some_ * free memory available even if there is no other activity * that frees anything up. This is needed for things like routing * etc, where we otherwise might have all activity going on in * asynchronous contexts that cannot page things out. * * If there are applications that are active memory-allocators * (most normal use), this basically shouldn't matter. */ static int kswapd(void *p) { unsigned long order, new_order; unsigned balanced_order; int classzone_idx, new_classzone_idx; int <API key>; pg_data_t *pgdat = (pg_data_t*)p; struct task_struct *tsk = current; struct reclaim_state reclaim_state = { .reclaimed_slab = 0, }; const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); <API key>(GFP_KERNEL); if (!cpumask_empty(cpumask)) <API key>(tsk, cpumask); current->reclaim_state = &reclaim_state; /* * Tell the memory management that we're a "memory allocator", * and that if we need more memory we should get access to it * regardless (see "__alloc_pages()"). "kswapd" should * never get caught in the normal page freeing logic. * * (Kswapd normally doesn't need memory anyway, but sometimes * you need a small amount of memory in order to be able to * page out something else, and this flag essentially protects * us from recursively trying to free more memory as we're * trying to free the first piece of memory in the first place). */ tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD; set_freezable(); order = new_order = 0; balanced_order = 0; classzone_idx = new_classzone_idx = pgdat->nr_zones - 1; <API key> = classzone_idx; for ( ; ; ) { bool ret; /* * If the last balance_pgdat was unsuccessful it's unlikely a * new request of a similar or harder type will succeed soon * so consider going to sleep on the basis we reclaimed at */ if (<API key> >= new_classzone_idx && balanced_order == new_order) { new_order = pgdat->kswapd_max_order; new_classzone_idx = pgdat->classzone_idx; pgdat->kswapd_max_order = 0; pgdat->classzone_idx = pgdat->nr_zones - 1; } if (order < new_order || classzone_idx > new_classzone_idx) { /* * Don't sleep if someone wants a larger 'order' * allocation or has tigher zone constraints */ order = new_order; classzone_idx = new_classzone_idx; } else { kswapd_try_to_sleep(pgdat, balanced_order, <API key>); order = pgdat->kswapd_max_order; classzone_idx = pgdat->classzone_idx; new_order = order; new_classzone_idx = classzone_idx; pgdat->kswapd_max_order = 0; pgdat->classzone_idx = pgdat->nr_zones - 1; } ret = try_to_freeze(); if (kthread_should_stop()) break; /* * We can speed up thawing tasks if we don't call balance_pgdat * after returning from the refrigerator */ if (!ret) { <API key>(pgdat->node_id, order); <API key> = classzone_idx; balanced_order = balance_pgdat(pgdat, order, &<API key>); } } tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD); current->reclaim_state = NULL; <API key>(); return 0; } /* * A zone is low on free memory, so wake its kswapd task to service it. */ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx) { pg_data_t *pgdat; if (!populated_zone(zone)) return; if (!<API key>(zone, GFP_KERNEL)) return; pgdat = zone->zone_pgdat; if (pgdat->kswapd_max_order < order) { pgdat->kswapd_max_order = order; pgdat->classzone_idx = min(pgdat->classzone_idx, classzone_idx); } if (!waitqueue_active(&pgdat->kswapd_wait)) return; if (<API key>(zone, order, low_wmark_pages(zone), 0, 0)) return; <API key>(pgdat->node_id, zone_idx(zone), order); <API key>(&pgdat->kswapd_wait); } #ifdef CONFIG_HIBERNATION /* * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of * freed pages. * * Rather than trying to age LRUs the aim is to preserve the overall * LRU order by reclaiming preferentially * inactive > active > active referenced > active mapped */ unsigned long shrink_all_memory(unsigned long nr_to_reclaim) { struct reclaim_state reclaim_state; struct scan_control sc = { .gfp_mask = <API key>, .may_swap = 1, .may_unmap = 1, .may_writepage = 1, .nr_to_reclaim = nr_to_reclaim, .hibernation_mode = 1, .order = 0, .priority = DEF_PRIORITY, }; struct shrink_control shrink = { .gfp_mask = sc.gfp_mask, }; struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); struct task_struct *p = current; unsigned long nr_reclaimed; p->flags |= PF_MEMALLOC; <API key>(sc.gfp_mask); reclaim_state.reclaimed_slab = 0; p->reclaim_state = &reclaim_state; nr_reclaimed = <API key>(zonelist, &sc, &shrink); p->reclaim_state = NULL; <API key>(); p->flags &= ~PF_MEMALLOC; return nr_reclaimed; } #endif /* CONFIG_HIBERNATION */ /* It's optimal to keep kswapds on the same CPUs as their memory, but not required for correctness. So if the last cpu in a node goes away, we get changed to run anywhere: as the first one comes back, restore their cpu bindings. */ static int cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { int nid; if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) { for_each_node_state(nid, N_MEMORY) { pg_data_t *pgdat = NODE_DATA(nid); const struct cpumask *mask; mask = cpumask_of_node(pgdat->node_id); if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids) /* One of our CPUs online: restore mask */ <API key>(pgdat->kswapd, mask); } } return NOTIFY_OK; } /* * This kswapd start function will be called by init and node-hot-add. * On node-hot-add, kswapd will moved to proper cpus if cpus are hot-added. */ int kswapd_run(int nid) { pg_data_t *pgdat = NODE_DATA(nid); int ret = 0; if (pgdat->kswapd) return 0; pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid); if (IS_ERR(pgdat->kswapd)) { /* failure at boot is fatal */ BUG_ON(system_state == SYSTEM_BOOTING); pr_err("Failed to start kswapd on node %d\n", nid); ret = PTR_ERR(pgdat->kswapd); pgdat->kswapd = NULL; } return ret; } /* * Called by memory hotplug when all memory in a node is offlined. Caller must * hold lock_memory_hotplug(). */ void kswapd_stop(int nid) { struct task_struct *kswapd = NODE_DATA(nid)->kswapd; if (kswapd) { kthread_stop(kswapd); NODE_DATA(nid)->kswapd = NULL; } } static int __init kswapd_init(void) { int nid; swap_setup(); for_each_node_state(nid, N_MEMORY) kswapd_run(nid); hotcpu_notifier(cpu_callback, 0); return 0; } module_init(kswapd_init) #ifdef CONFIG_NUMA /* * Zone reclaim mode * * If non-zero call zone_reclaim when the number of free pages falls below * the watermarks. */ int zone_reclaim_mode __read_mostly; #define RECLAIM_OFF 0 #define RECLAIM_ZONE (1<<0) /* Run <API key> on the zone */ #define RECLAIM_WRITE (1<<1) /* Writeout pages during reclaim */ #define RECLAIM_SWAP (1<<2) /* Swap pages out during reclaim */ /* * Priority for ZONE_RECLAIM. This determines the fraction of pages * of a node considered for each zone_reclaim. 4 scans 1/16th of * a zone. */ #define <API key> 4 /* * Percentage of pages in a zone that must be unmapped for zone_reclaim to * occur. */ int <API key> = 1; /* * If the number of slab pages in a zone grows beyond this percentage then * slab reclaim needs to occur. */ int <API key> = 5; static inline unsigned long <API key>(struct zone *zone) { unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED); unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) + zone_page_state(zone, NR_ACTIVE_FILE); /* * It's possible for there to be more file mapped pages than * accounted for by the pages on the file LRU lists because * tmpfs pages accounted for as ANON can also be FILE_MAPPED */ return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0; } /* Work out how many page cache pages we can reclaim in this reclaim_mode */ static long <API key>(struct zone *zone) { long <API key>; long delta = 0; /* * If RECLAIM_SWAP is set, then all file pages are considered * potentially reclaimable. Otherwise, we have to worry about * pages like swapcache and <API key>() provides * a better estimate */ if (zone_reclaim_mode & RECLAIM_SWAP) <API key> = zone_page_state(zone, NR_FILE_PAGES); else <API key> = <API key>(zone); /* If we can't clean pages, remove dirty pages from consideration */ if (!(zone_reclaim_mode & RECLAIM_WRITE)) delta += zone_page_state(zone, NR_FILE_DIRTY); /* Watch for any possible underflows due to delta */ if (unlikely(delta > <API key>)) delta = <API key>; return <API key> - delta; } /* * Try to free up some pages from this zone through reclaim. */ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) { /* Minimum pages needed in order to stay on node */ const unsigned long nr_pages = 1 << order; struct task_struct *p = current; struct reclaim_state reclaim_state; struct scan_control sc = { .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP), .may_swap = 1, .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX), .gfp_mask = (gfp_mask = memalloc_noio_flags(gfp_mask)), .order = order, .priority = <API key>, }; struct shrink_control shrink = { .gfp_mask = sc.gfp_mask, }; unsigned long nr_slab_pages0, nr_slab_pages1; cond_resched(); /* * We need to be able to allocate from the reserves for RECLAIM_SWAP * and we also need to be able to write out pages for RECLAIM_WRITE * and RECLAIM_SWAP. */ p->flags |= PF_MEMALLOC | PF_SWAPWRITE; <API key>(gfp_mask); reclaim_state.reclaimed_slab = 0; p->reclaim_state = &reclaim_state; if (<API key>(zone) > zone->min_unmapped_pages) { /* * Free memory by calling shrink zone with increasing * priorities until we have enough memory freed. */ do { shrink_zone(zone, &sc); } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0); } nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE); if (nr_slab_pages0 > zone->min_slab_pages) { /* * shrink_slab() does not currently allow us to determine how * many pages were freed in this zone. So we take the current * number of slab pages and shake the slab until it is reduced * by the same nr_pages that we used for reclaiming unmapped * pages. * * Note that shrink_slab will free memory on all zones and may * take a long time. */ for (;;) { unsigned long lru_pages = <API key>(zone); /* No reclaimable slab or very low memory pressure */ if (!shrink_slab(&shrink, sc.nr_scanned, lru_pages)) break; /* Freed enough memory */ nr_slab_pages1 = zone_page_state(zone, NR_SLAB_RECLAIMABLE); if (nr_slab_pages1 + nr_pages <= nr_slab_pages0) break; } /* * Update nr_reclaimed by the number of slab pages we * reclaimed from this zone. */ nr_slab_pages1 = zone_page_state(zone, NR_SLAB_RECLAIMABLE); if (nr_slab_pages1 < nr_slab_pages0) sc.nr_reclaimed += nr_slab_pages0 - nr_slab_pages1; } p->reclaim_state = NULL; current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE); <API key>(); return sc.nr_reclaimed >= nr_pages; } int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) { int node_id; int ret; /* * Zone reclaim reclaims unmapped file backed pages and * slab pages if we are over the defined limits. * * A small portion of unmapped file backed pages is needed for * file I/O otherwise pages read by file I/O will be immediately * thrown out if the zone is overallocated. So we do not reclaim * if less than a specified percentage of the zone is used by * unmapped file backed pages. */ if (<API key>(zone) <= zone->min_unmapped_pages && zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages) return ZONE_RECLAIM_FULL; if (zone->all_unreclaimable) return ZONE_RECLAIM_FULL; /* * Do not scan if the allocation should not be delayed. */ if (!(gfp_mask & __GFP_WAIT) || (current->flags & PF_MEMALLOC)) return ZONE_RECLAIM_NOSCAN; /* * Only run zone reclaim on the local zone or on zones that do not * have associated processors. This will favor the local processor * over remote processors and spread off node memory allocations * as wide as possible. */ node_id = zone_to_nid(zone); if (node_state(node_id, N_CPU) && node_id != numa_node_id()) return ZONE_RECLAIM_NOSCAN; if (<API key>(zone, ZONE_RECLAIM_LOCKED)) return ZONE_RECLAIM_NOSCAN; ret = __zone_reclaim(zone, gfp_mask, order); zone_clear_flag(zone, ZONE_RECLAIM_LOCKED); if (!ret) count_vm_event(<API key>); return ret; } #endif /* * page_evictable - test whether a page is evictable * @page: the page to test * * Test whether page is evictable--i.e., should be placed on active/inactive * lists vs unevictable list. * * Reasons page might not be evictable: * (1) page's mapping marked unevictable * (2) page is part of an mlocked VMA * */ int page_evictable(struct page *page) { return !mapping_unevictable(page_mapping(page)) && !PageMlocked(page); } #ifdef CONFIG_SHMEM /** * <API key> - check pages for evictability and move to appropriate zone lru list * @pages: array of pages to check * @nr_pages: number of pages to check * * Checks pages for evictability and moves them to the appropriate lru list. * * This function is only used for SysV IPC SHM_UNLOCK. */ void <API key>(struct page **pages, int nr_pages) { struct lruvec *lruvec; struct zone *zone = NULL; int pgscanned = 0; int pgrescued = 0; int i; for (i = 0; i < nr_pages; i++) { struct page *page = pages[i]; struct zone *pagezone; pgscanned++; pagezone = page_zone(page); if (pagezone != zone) { if (zone) spin_unlock_irq(&zone->lru_lock); zone = pagezone; spin_lock_irq(&zone->lru_lock); } lruvec = <API key>(page, zone); if (!PageLRU(page) || !PageUnevictable(page)) continue; if (page_evictable(page)) { enum lru_list lru = page_lru_base_type(page); VM_BUG_ON(PageActive(page)); <API key>(page); <API key>(page, lruvec, LRU_UNEVICTABLE); <API key>(page, lruvec, lru); pgrescued++; } } if (zone) { __count_vm_events(<API key>, pgrescued); __count_vm_events(<API key>, pgscanned); spin_unlock_irq(&zone->lru_lock); } } #endif /* CONFIG_SHMEM */ static void <API key>(void) { printk_once(KERN_WARNING "%s: The <API key> sysctl/node-interface has been " "disabled for lack of a legitimate use case. If you have " "one, please send an email to linux-mm@kvack.org.\n", current->comm); } /* * <API key> [vm] sysctl handler. On demand re-scan of * all nodes' unevictable lists for evictable pages */ unsigned long <API key>; int <API key>(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { <API key>(); <API key>(table, write, buffer, length, ppos); <API key> = 0; return 0; } #ifdef CONFIG_NUMA /* * per node '<API key>' attribute. On demand re-scan of * a specified node's per zone unevictable lists for evictable pages. */ static ssize_t <API key>(struct device *dev, struct device_attribute *attr, char *buf) { <API key>(); return sprintf(buf, "0\n"); /* always zero; should fit... */ } static ssize_t <API key>(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { <API key>(); return 1; } static DEVICE_ATTR(<API key>, S_IRUGO | S_IWUSR, <API key>, <API key>); int <API key>(struct node *node) { return device_create_file(&node->dev, &<API key>); } void <API key>(struct node *node) { device_remove_file(&node->dev, &<API key>); } #endif
#ifndef _MAPTREE_H #define _MAPTREE_H #include "Platform/Define.h" #include "Utilities/UnorderedMapSet.h" #include "BIH.h" namespace VMAP { class ModelInstance; class GroupModel; class VMapManager2; struct LocationInfo { LocationInfo(): hitInstance(0), hitModel(0), ground_Z(-G3D::inf()) {}; const ModelInstance* hitInstance; const GroupModel* hitModel; float ground_Z; }; class StaticMapTree { typedef UNORDERED_MAP<uint32, bool> loadedTileMap; typedef UNORDERED_MAP<uint32, uint32> loadedSpawnMap; private: uint32 iMapID; bool iIsTiled; BIH iTree; ModelInstance* iTreeValues; // the tree entries uint32 iNTreeValues; // Store all the map tile idents that are loaded for that map // some maps are not splitted into tiles and we have to make sure, not removing the map before all tiles are removed // empty tiles have no tile file, hence map with bool instead of just a set (consistency check) loadedTileMap iLoadedTiles; // stores <tree_index, reference_count> to invalidate tree values, unload map, and to be able to report errors loadedSpawnMap iLoadedSpawns; std::string iBasePath; private: bool getIntersectionTime(const G3D::Ray& pRay, float& pMaxDist, bool pStopAtFirstHit, bool isLosCheck) const; // bool <API key>(unsigned int pTileIdent) const { return(iLoadedMapTiles.containsKey(pTileIdent)); } public: static std::string getTileFileName(uint32 mapID, uint32 tileX, uint32 tileY); static uint32 packTileID(uint32 tileX, uint32 tileY) { return tileX << 16 | tileY; } static void unpackTileID(uint32 ID, uint32& tileX, uint32& tileY) { tileX = ID >> 16; tileY = ID & 0xFF; } static bool CanLoadMap(const std::string& basePath, uint32 mapID, uint32 tileX, uint32 tileY); StaticMapTree(uint32 mapID, const std::string& basePath); ~StaticMapTree(); bool isInLineOfSight(const G3D::Vector3& pos1, const G3D::Vector3& pos2) const; ModelInstance* FindCollisionModel(const G3D::Vector3& pos1, const G3D::Vector3& pos2); bool getObjectHitPos(const G3D::Vector3& pos1, const G3D::Vector3& pos2, G3D::Vector3& pResultHitPos, float pModifyDist) const; float getHeight(const G3D::Vector3& pPos, float maxSearchDist) const; bool getAreaInfo(G3D::Vector3& pos, uint32& flags, int32& adtId, int32& rootId, int32& groupId) const; bool isUnderModel(G3D::Vector3& pos, float* outDist = NULL, float* inDist = NULL) const; bool GetLocationInfo(const Vector3& pos, LocationInfo& info) const; bool InitMap(const std::string& fname, VMapManager2* vm); void UnloadMap(VMapManager2* vm); bool LoadMapTile(uint32 tileX, uint32 tileY, VMapManager2* vm); void UnloadMapTile(uint32 tileX, uint32 tileY, VMapManager2* vm); bool isTiled() const { return iIsTiled; } uint32 numLoadedTiles() const { return iLoadedTiles.size(); } #ifdef MMAP_GENERATOR public: void getModelInstances(ModelInstance*& models, uint32& count); #endif }; struct AreaInfo { AreaInfo(): result(false), ground_Z(-G3D::inf()) {}; bool result; float ground_Z; uint32 flags; int32 adtId; int32 rootId; int32 groupId; }; } // VMAP #endif // _MAPTREE_H
<?php /* vim: set expandtab tabstop=4 shiftwidth=4 softtabstop=4: */ class Net_DNS2_RR_LOC extends Net_DNS2_RR { /* * the LOC version- should only ever be 0 */ public $version; /* * The diameter of a sphere enclosing the described entity */ public $size; /* * The horizontal precision of the data */ public $horiz_pre; /* * The vertical precision of the data */ public $vert_pre; /* * The latitude - stored in decimal degrees */ public $latitude; /* * The longitude - stored in decimal degrees */ public $longitude; /* * The altitude - stored in decimal */ public $altitude; /* * used for quick power-of-ten lookups */ private $_powerOfTen = array(1, 10, 100, 1000, 10000, 100000, 1000000,10000000,100000000,1000000000); /* * some conversion values */ const CONV_SEC = 1000; const CONV_MIN = 60000; const CONV_DEG = 3600000; const REFERENCE_ALT = 10000000; const REFERENCE_LATLON = 2147483648; /** * method to return the rdata portion of the packet as a string * * @return string * @access protected * */ protected function rrToString() { if ($this->version == 0) { return $this->_d2Dms($this->latitude, 'LAT') . ' ' . $this->_d2Dms($this->longitude, 'LNG') . ' ' . sprintf('%.2fm', $this->altitude) . ' ' . sprintf('%.2fm', $this->size) . ' ' . sprintf('%.2fm', $this->horiz_pre) . ' ' . sprintf('%.2fm', $this->vert_pre); } return ''; } /** * parses the rdata portion from a standard DNS config line * * @param array $rdata a string split line of values for the rdata * * @return boolean * @access protected * */ protected function rrFromString(array $rdata) { // format as defined by RFC1876 section 3 // d1 [m1 [s1]] {"N"|"S"} d2 [m2 [s2]] {"E"|"W"} alt["m"] // [siz["m"] [hp["m"] [vp["m"]]]] $res = preg_match( '/^(\d+) \s+((\d+) \s+)?(([\d.]+) \s+)?(N|S) \s+(\d+) ' . '\s+((\d+) \s+)?(([\d.]+) \s+)?(E|W) \s+(-?[\d.]+) m?(\s+ ' . '([\d.]+) m?)?(\s+ ([\d.]+) m?)?(\s+ ([\d.]+) m?)?/ix', implode(' ', $rdata), $x ); if ($res) { // latitude $latdeg = $x[1]; $latmin = (isset($x[3])) ? $x[3] : 0; $latsec = (isset($x[5])) ? $x[5] : 0; $lathem = strtoupper($x[6]); $this->latitude = $this->_dms2d($latdeg, $latmin, $latsec, $lathem); // longitude $londeg = $x[7]; $lonmin = (isset($x[9])) ? $x[9] : 0; $lonsec = (isset($x[11])) ? $x[11] : 0; $lonhem = strtoupper($x[12]); $this->longitude = $this->_dms2d($londeg, $lonmin, $lonsec, $lonhem); // the rest of teh values $version = 0; $this->size = (isset($x[15])) ? $x[15] : 1; $this->horiz_pre = ((isset($x[17])) ? $x[17] : 10000); $this->vert_pre = ((isset($x[19])) ? $x[19] : 10); $this->altitude = $x[13]; return true; } return false; } /** * parses the rdata of the Net_DNS2_Packet object * * @param Net_DNS2_Packet &$packet a Net_DNS2_Packet packet to parse the RR from * * @return boolean * @access protected * */ protected function rrSet(Net_DNS2_Packet &$packet) { if ($this->rdlength > 0) { // unpack all the values $x = unpack( 'Cver/Csize/Choriz_pre/Cvert_pre/Nlatitude/Nlongitude/Naltitude', $this->rdata ); // version must be 0 per RFC 1876 section 2 $this->version = $x['ver']; if ($this->version == 0) { $this->size = $this->_precsizeNtoA($x['size']); $this->horiz_pre = $this->_precsizeNtoA($x['horiz_pre']); $this->vert_pre = $this->_precsizeNtoA($x['vert_pre']); // convert the latitude and longitude to degress in decimal if ($x['latitude'] < 0) { $this->latitude = ($x['latitude'] + self::REFERENCE_LATLON) / self::CONV_DEG; } else { $this->latitude = ($x['latitude'] - self::REFERENCE_LATLON) / self::CONV_DEG; } if ($x['longitude'] < 0) { $this->longitude = ($x['longitude'] + self::REFERENCE_LATLON) / self::CONV_DEG; } else { $this->longitude = ($x['longitude'] - self::REFERENCE_LATLON) / self::CONV_DEG; } // convert down the altitude $this->altitude = ($x['altitude'] - self::REFERENCE_ALT) / 100; return true; } else { return false; } return true; } return false; } /** * returns the rdata portion of the DNS packet * * @param Net_DNS2_Packet &$packet a Net_DNS2_Packet packet use for * compressed names * * @return mixed either returns a binary packed * string or null on failure * @access protected * */ protected function rrGet(Net_DNS2_Packet &$packet) { if ($this->version == 0) { $lat = 0; $lng = 0; if ($this->latitude < 0) { $lat = ($this->latitude * self::CONV_DEG) - self::REFERENCE_LATLON; } else { $lat = ($this->latitude * self::CONV_DEG) + self::REFERENCE_LATLON; } if ($this->longitude < 0) { $lng = ($this->longitude * self::CONV_DEG) - self::REFERENCE_LATLON; } else { $lng = ($this->longitude * self::CONV_DEG) + self::REFERENCE_LATLON; } return pack( 'CCCCNNN', $this->version, $this->_precsizeAtoN($this->size), $this->_precsizeAtoN($this->horiz_pre), $this->_precsizeAtoN($this->vert_pre), $lat, $lng, ($this->altitude * 100) + self::REFERENCE_ALT ); } return null; } /** * takes an XeY precision/size value, returns a string representation. * shamlessly stolen from RFC1876 Appendix A * * @param integer $prec the value to convert * * @return string * @access private * */ private function _precsizeNtoA($prec) { $mantissa = (($prec >> 4) & 0x0f) % 10; $exponent = (($prec >> 0) & 0x0f) % 10; return $mantissa * $this->_powerOfTen[$exponent]; } /** * converts ascii size/precision X * 10**Y(cm) to 0xXY. * shamlessly stolen from RFC1876 Appendix A * * @param string $prec the value to convert * * @return integer * @access private * */ private function _precsizeAtoN($prec) { $exponent = 0; while ($prec >= 10) { $prec /= 10; ++$exponent; } return ($prec << 4) | ($exponent & 0x0f); } /** * convert lat/lng in deg/min/sec/hem to decimal value * * @param integer $deg the degree value * @param integer $min the minutes value * @param integer $sec the seconds value * @param string $hem the hemisphere (N/E/S/W) * * @return float the decinmal value * @access private * */ private function _dms2d($deg, $min, $sec, $hem) { $deg = $deg - 0; $min = $min - 0; $sign = ($hem == 'W' || $hem == 'S') ? -1 : 1; return ((($sec/60+$min)/60)+$deg) * $sign; } /** * convert lat/lng in decimal to deg/min/sec/hem * * @param float $data the decimal value * @param string $latlng either LAT or LNG so we can determine the HEM value * * @return string * @access private * */ private function _d2Dms($data, $latlng) { $deg = 0; $min = 0; $sec = 0; $msec = 0; $hem = ''; if ($latlng == 'LAT') { $hem = ($data > 0) ? 'N' : 'S'; } else { $hem = ($data > 0) ? 'E' : 'W'; } $data = abs($data); $deg = (int)$data; $min = (int)(($data - $deg) * 60); $sec = (int)(((($data - $deg) * 60) - $min) * 60); $msec = round((((((($data - $deg) * 60) - $min) * 60) - $sec) * 1000)); return sprintf('%d %02d %02d.%03d %s', $deg, $min, $sec, round($msec), $hem); } } /* * Local variables: * tab-width: 4 * c-basic-offset: 4 * <API key>: nil * End: */ ?>
;; @file ; IPRT - ASMBitFirstClear(). ; ; ; Copyright (C) 2006-2010 Oracle Corporation ; ; This file is part of VirtualBox Open Source Edition (OSE), as ; available from http: ; you can redistribute it and/or modify it under the terms of the GNU ; General Public License (GPL) as published by the Free Software ; Foundation, in version 2 as it comes in the "COPYING" file of the ; VirtualBox OSE distribution. VirtualBox OSE is distributed in the ; hope that it will be useful, but WITHOUT ANY WARRANTY of any kind. ; ; The contents of this file may alternatively be used under the terms ; of the Common Development and Distribution License Version 1.0 ; (CDDL) only, as it comes in the "COPYING.CDDL" file of the ; VirtualBox OSE distribution, in which case the provisions of the ; CDDL are applicable instead of those of the GPL. ; ; You may elect to license modified versions of this file under the ; terms and conditions of either the GPL or the CDDL or both. ; ;******************************************************************************* ;* Header Files * ;******************************************************************************* %include "iprt/asmdefs.mac" BEGINCODE ;; ; Finds the first clear bit in a bitmap. ; ; @returns eax Index of the first zero bit. ; @returns eax -1 if no clear bit was found. ; @param rcx pvBitmap Pointer to the bitmap. ; @param edx cBits The number of bits in the bitmap. Multiple of 32. ; BEGINPROC_EXPORTED ASMBitFirstClear ;if (cBits) or edx, edx jz short .failed ;{ push rdi ; asm {...} mov rdi, rcx ; rdi = start of scasd mov ecx, edx add ecx, 31 ; 32 bit aligned shr ecx, 5 ; number of dwords to scan. mov rdx, rdi ; rdx = saved pvBitmap mov eax, 0ffffffffh repe scasd ; Scan for the first dword with any clear bit. je .failed_restore ; find the bit in question lea rdi, [rdi - 4] ; one step back. xor eax, [rdi] ; eax = NOT [rdi] sub rdi, rdx shl edi, 3 ; calc bit offset. mov ecx, 0ffffffffh bsf ecx, eax add ecx, edi mov eax, ecx ; return success pop rdi ret ; failure ;} ;return -1; .failed_restore: pop rdi ret .failed: mov eax, 0ffffffffh ret ENDPROC ASMBitFirstClear
#include <linux/kernel.h> #include <linux/module.h> #include <linux/random.h> #include <linux/vmalloc.h> #include <linux/hardirq.h> #include <linux/mlx5/driver.h> #include <linux/mlx5/cmd.h> #include "mlx5_core.h" enum { <API key> = 2 * HZ, MAX_MISSES = 3, }; enum { <API key> = 0x1, <API key> = 0x7, <API key> = 0x8, <API key> = 0x9, <API key> = 0xa, <API key> = 0xb, <API key> = 0xc, <API key> = 0xd, <API key> = 0xe, <API key> = 0xf, <API key> = 0x10 }; enum { MLX5_NIC_IFC_FULL = 0, <API key> = 1, <API key> = 2, <API key> = 3 }; enum { <API key>, <API key>, }; static u8 get_nic_state(struct mlx5_core_dev *dev) { return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 3; } static void <API key>(struct mlx5_core_dev *dev) { unsigned long flags; u64 vector; /* wait for pending handlers to complete */ synchronize_irq(pci_irq_vector(dev->pdev, MLX5_EQ_VEC_CMD)); spin_lock_irqsave(&dev->cmd.alloc_lock, flags); vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1); if (!vector) goto no_trig; vector |= <API key>; <API key>(&dev->cmd.alloc_lock, flags); mlx5_core_dbg(dev, "vector 0x%llx\n", vector); <API key>(dev, vector, true); return; no_trig: <API key>(&dev->cmd.alloc_lock, flags); } static int in_fatal(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; struct health_buffer __iomem *h = health->health; if (get_nic_state(dev) == <API key>) return 1; if (ioread32be(&h->fw_ver) == 0xffffffff) return 1; return 0; } void <API key>(struct mlx5_core_dev *dev, bool force) { mutex_lock(&dev->intf_state_mutex); if (dev->state == <API key>) goto unlock; mlx5_core_err(dev, "start\n"); if (pci_channel_offline(dev->pdev) || in_fatal(dev) || force) { dev->state = <API key>; <API key>(dev); } mlx5_core_event(dev, <API key>, 0); mlx5_core_err(dev, "end\n"); unlock: mutex_unlock(&dev->intf_state_mutex); } static void <API key>(struct mlx5_core_dev *dev) { u8 nic_interface = get_nic_state(dev); switch (nic_interface) { case MLX5_NIC_IFC_FULL: mlx5_core_warn(dev, "Expected to see disabled NIC but it is full driver\n"); break; case <API key>: mlx5_core_warn(dev, "starting teardown\n"); break; case <API key>: mlx5_core_warn(dev, "Expected to see disabled NIC but it is no dram nic\n"); break; default: mlx5_core_warn(dev, "Expected to see disabled NIC but it is has invalid value %d\n", nic_interface); } mlx5_disable_device(dev); } static void health_recover(struct work_struct *work) { struct mlx5_core_health *health; struct delayed_work *dwork; struct mlx5_core_dev *dev; struct mlx5_priv *priv; u8 nic_state; dwork = container_of(work, struct delayed_work, work); health = container_of(dwork, struct mlx5_core_health, recover_work); priv = container_of(health, struct mlx5_priv, health); dev = container_of(priv, struct mlx5_core_dev, priv); nic_state = get_nic_state(dev); if (nic_state == <API key>) { dev_err(&dev->pdev->dev, "health recovery flow aborted since the nic state is invalid\n"); return; } dev_err(&dev->pdev->dev, "starting health recovery flow\n"); mlx5_recover_device(dev); } /* How much time to wait until health resetting the driver (in msecs) */ #define <API key> 60000 static void health_care(struct work_struct *work) { unsigned long recover_delay = msecs_to_jiffies(<API key>); struct mlx5_core_health *health; struct mlx5_core_dev *dev; struct mlx5_priv *priv; unsigned long flags; health = container_of(work, struct mlx5_core_health, work); priv = container_of(health, struct mlx5_priv, health); dev = container_of(priv, struct mlx5_core_dev, priv); mlx5_core_warn(dev, "handling bad device here\n"); <API key>(dev); spin_lock_irqsave(&health->wq_lock, flags); if (!test_bit(<API key>, &health->flags)) <API key>(&health->recover_work, recover_delay); else dev_err(&dev->pdev->dev, "new health works are not permitted at this stage\n"); <API key>(&health->wq_lock, flags); } static const char *hsynd_str(u8 synd) { switch (synd) { case <API key>: return "firmware internal error"; case <API key>: return "irisc not responding"; case <API key>: return "unrecoverable hardware error"; case <API key>: return "firmware CRC error"; case <API key>: return "ICM fetch PCI error"; case <API key>: return "HW fatal error\n"; case <API key>: return "async EQ buffer overrun"; case <API key>: return "EQ error"; case <API key>: return "Invalid EQ referenced"; case <API key>: return "FFSER error"; case <API key>: return "High temperature"; default: return "unrecognized error"; } } static void print_health_info(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; struct health_buffer __iomem *h = health->health; char fw_str[18]; u32 fw; int i; /* If the syndrom is 0, the device is OK and no need to print buffer */ if (!ioread8(&h->synd)) return; for (i = 0; i < ARRAY_SIZE(h->assert_var); i++) dev_err(&dev->pdev->dev, "assert_var[%d] 0x%08x\n", i, ioread32be(h->assert_var + i)); dev_err(&dev->pdev->dev, "assert_exit_ptr 0x%08x\n", ioread32be(&h->assert_exit_ptr)); dev_err(&dev->pdev->dev, "assert_callra 0x%08x\n", ioread32be(&h->assert_callra)); sprintf(fw_str, "%d.%d.%d", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev)); dev_err(&dev->pdev->dev, "fw_ver %s\n", fw_str); dev_err(&dev->pdev->dev, "hw_id 0x%08x\n", ioread32be(&h->hw_id)); dev_err(&dev->pdev->dev, "irisc_index %d\n", ioread8(&h->irisc_index)); dev_err(&dev->pdev->dev, "synd 0x%x: %s\n", ioread8(&h->synd), hsynd_str(ioread8(&h->synd))); dev_err(&dev->pdev->dev, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd)); fw = ioread32be(&h->fw_ver); dev_err(&dev->pdev->dev, "raw fw_ver 0x%08x\n", fw); } static unsigned long <API key>(void) { unsigned long next; get_random_bytes(&next, sizeof(next)); next %= HZ; next += jiffies + <API key>; return next; } void <API key>(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; unsigned long flags; spin_lock_irqsave(&health->wq_lock, flags); if (!test_bit(<API key>, &health->flags)) queue_work(health->wq, &health->work); else dev_err(&dev->pdev->dev, "new health works are not permitted at this stage\n"); <API key>(&health->wq_lock, flags); } static void poll_health(struct timer_list *t) { struct mlx5_core_dev *dev = from_timer(dev, t, priv.health.timer); struct mlx5_core_health *health = &dev->priv.health; u32 count; if (dev->state == <API key>) goto out; count = ioread32be(health->health_counter); if (count == health->prev) ++health->miss_counter; else health->miss_counter = 0; health->prev = count; if (health->miss_counter == MAX_MISSES) { dev_err(&dev->pdev->dev, "device's health compromised - reached miss count\n"); print_health_info(dev); } if (in_fatal(dev) && !health->sick) { health->sick = true; print_health_info(dev); <API key>(dev); } out: mod_timer(&health->timer, <API key>()); } void <API key>(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; timer_setup(&health->timer, poll_health, 0); health->sick = 0; clear_bit(<API key>, &health->flags); clear_bit(<API key>, &health->flags); health->health = &dev->iseg->health; health->health_counter = &dev->iseg->health_counter; health->timer.expires = round_jiffies(jiffies + <API key>); add_timer(&health->timer); } void <API key>(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; del_timer_sync(&health->timer); } void <API key>(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; unsigned long flags; spin_lock_irqsave(&health->wq_lock, flags); set_bit(<API key>, &health->flags); set_bit(<API key>, &health->flags); <API key>(&health->wq_lock, flags); <API key>(&health->recover_work); cancel_work_sync(&health->work); } void <API key>(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; unsigned long flags; spin_lock_irqsave(&health->wq_lock, flags); set_bit(<API key>, &health->flags); <API key>(&health->wq_lock, flags); <API key>(&dev->priv.health.recover_work); } void mlx5_health_cleanup(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; destroy_workqueue(health->wq); } int mlx5_health_init(struct mlx5_core_dev *dev) { struct mlx5_core_health *health; char *name; health = &dev->priv.health; name = kmalloc(64, GFP_KERNEL); if (!name) return -ENOMEM; strcpy(name, "mlx5_health"); strcat(name, dev_name(&dev->pdev->dev)); health->wq = <API key>(name); kfree(name); if (!health->wq) return -ENOMEM; spin_lock_init(&health->wq_lock); INIT_WORK(&health->work, health_care); INIT_DELAYED_WORK(&health->recover_work, health_recover); return 0; }
/* { dg-do compile } */ /* { <API key> arm_dsp } */ /* Ensure the smlatb doesn't get generated when reading the Q flag from ACLE. */ #include <arm_acle.h> int foo (int x, int in, int32_t c) { short a = in & 0xffff; short b = (in & 0xffff0000) >> 16; int res = x + b * a + __ssat (c, 24); return res + <API key> (); } /* { dg-final { scan-assembler-not "smlatb\\t" } } */
<?php defined('_JEXEC') or die('RESTRICTED'); ?> <dl class="adminformlist"> <dt><?php echo WFText::_('<API key>');?></dt> <dd> <label for="install" class="tooltip" title="<?php echo WFText::_('<API key>'); ?>::<?php echo WFText::_('<API key>'); ?>"><?php echo WFText::_('<API key>'); ?></label> <span> <input type="file" name="install" id="upload" placeholder="<?php echo $this->state->get('install.directory'); ?>" /> <button id="install_button"><?php echo WFText::_('WF_INSTALLER_UPLOAD'); ?></button> </span> </dd> </dl>
#! /usr/bin/env python # encoding: utf-8 import re from waflib import Utils,Task,TaskGen,Logs from waflib.TaskGen import feature,before_method,after_method,extension from waflib.Configure import conf INC_REGEX="""(?:^|['">]\s*;)\s*(?:|#\s*)INCLUDE\s+(?:\w+_)?[<"'](.+?)(?=["'>])""" USE_REGEX="""(?:^|;)\s*USE(?:\s+|(?:(?:\s*,\s*(?:NON_)?INTRINSIC)?\s*::))\s*(\w+)""" MOD_REGEX="""(?:^|;)\s*MODULE(?!\s*PROCEDURE)(?:\s+|(?:(?:\s*,\s*(?:NON_)?INTRINSIC)?\s*::))\s*(\w+)""" re_inc=re.compile(INC_REGEX,re.I) re_use=re.compile(USE_REGEX,re.I) re_mod=re.compile(MOD_REGEX,re.I) class fortran_parser(object): def __init__(self,incpaths): self.seen=[] self.nodes=[] self.names=[] self.incpaths=incpaths def find_deps(self,node): txt=node.read() incs=[] uses=[] mods=[] for line in txt.splitlines(): m=re_inc.search(line) if m: incs.append(m.group(1)) m=re_use.search(line) if m: uses.append(m.group(1)) m=re_mod.search(line) if m: mods.append(m.group(1)) return(incs,uses,mods) def start(self,node): self.waiting=[node] while self.waiting: nd=self.waiting.pop(0) self.iter(nd) def iter(self,node): path=node.abspath() incs,uses,mods=self.find_deps(node) for x in incs: if x in self.seen: continue self.seen.append(x) self.tryfind_header(x) for x in uses: name="USE@%s"%x if not name in self.names: self.names.append(name) for x in mods: name="MOD@%s"%x if not name in self.names: self.names.append(name) def tryfind_header(self,filename): found=None for n in self.incpaths: found=n.find_resource(filename) if found: self.nodes.append(found) self.waiting.append(found) break if not found: if not filename in self.names: self.names.append(filename)
#include <rthw.h> #include <rtthread.h> #include <s3c24x0.h> #ifdef RT_USING_RTGUI #include <rtgui/rtgui_system.h> #include <rtgui/rtgui_server.h> #include <rtgui/event.h> #endif #include "lcd.h" #include "touch.h" /* ADCCON Register Bits */ #define <API key> (1<<15) #define <API key> (1<<14) #define <API key>(x) (((x)&0xFF)<<6) #define <API key> (0xFF<<6) #define <API key>(x) (((x)&0x7)<<3) #define <API key> (0x7<<3) #define <API key> (1<<2) #define <API key> (1<<1) #define <API key> (1<<0) #define <API key> (0x3<<0) /* ADCTSC Register Bits */ #define <API key> (1<<8) /* ghcstop add for s3c2440a */ #define <API key> (1<<7) #define <API key> (1<<6) #define <API key> (1<<5) #define <API key> (1<<4) #define <API key> (1<<3) #define <API key> (1<<2) #define <API key>(x) (((x)&0x3)<<0) /* ADCDAT0 Bits */ #define <API key> (1<<15) #define <API key> (1<<14) #define <API key> (0x3<<12) #define <API key> (0x03FF) /* ADCDAT1 Bits */ #define <API key> (1<<15) #define <API key> (1<<14) #define <API key> (0x3<<12) #define <API key> (0x03FF) #define WAIT4INT(x) (((x)<<8) | \ <API key> | <API key> | <API key> | \ <API key>(3)) #define AUTOPST (<API key> | <API key> | <API key> | \ <API key> | <API key>(0)) #define X_MIN 74 #define X_MAX 934 #define Y_MIN 920 #define Y_MAX 89 struct s3c2410ts { long xp; long yp; int count; int shift; int delay; int presc; char phys[32]; }; static struct s3c2410ts ts; struct rtgui_touch_device { struct rt_device parent; rt_timer_t poll_timer; rt_uint16_t x, y; rt_bool_t calibrating; <API key> calibration_func; <API key> eventpost_func; void *eventpost_param; rt_uint16_t min_x, max_x; rt_uint16_t min_y, max_y; rt_uint16_t width; rt_uint16_t height; rt_bool_t first_down_report; }; static struct rtgui_touch_device *touch = RT_NULL; #ifdef RT_USING_RTGUI static void report_touch_input(int updown) { struct rtgui_event_mouse emouse; <API key>(&emouse); emouse.wid = RT_NULL; /* set emouse button */ emouse.button = <API key>; emouse.parent.sender = RT_NULL; if (updown) { ts.xp = ts.xp / ts.count; ts.yp = ts.yp / ts.count;; if ((touch->calibrating == RT_TRUE) && (touch->calibration_func != RT_NULL)) { touch->x = ts.xp; touch->y = ts.yp; } else { if (touch->max_x > touch->min_x) { touch->x = touch->width * (ts.xp-touch->min_x)/(touch->max_x-touch->min_x); } else { touch->x = touch->width * ( touch->min_x - ts.xp ) / (touch->min_x-touch->max_x); } if (touch->max_y > touch->min_y) { touch->y = touch->height * ( ts.yp - touch->min_y ) / (touch->max_y-touch->min_y); } else { touch->y = touch->height * ( touch->min_y - ts.yp ) / (touch->min_y-touch->max_y); } } emouse.x = touch->x; emouse.y = touch->y; if (touch->first_down_report == RT_TRUE) { emouse.parent.type = <API key>; emouse.button |= <API key>; } else { emouse.parent.type = <API key>; emouse.button = 0; } } else { emouse.x = touch->x; emouse.y = touch->y; emouse.parent.type = <API key>; emouse.button |= <API key>; if ((touch->calibrating == RT_TRUE) && (touch->calibration_func != RT_NULL)) { /* callback function */ touch->calibration_func(emouse.x, emouse.y); } } /* rt_kprintf("touch %s: ts.x: %d, ts.y: %d\n", updown? "down" : "up", touch->x, touch->y); */ /* send event to server */ if (touch->calibrating != RT_TRUE) { <API key>((&emouse.parent), sizeof(emouse)); } } #else static void report_touch_input(int updown) { struct rt_touch_event touch_event; if (updown) { ts.xp = ts.xp / ts.count; ts.yp = ts.yp / ts.count; if ((touch->calibrating == RT_TRUE) && (touch->calibration_func != RT_NULL)) { touch->x = ts.xp; touch->y = ts.yp; } else { if (touch->max_x > touch->min_x) { touch->x = touch->width * ( ts.xp - touch->min_x ) / (touch->max_x-touch->min_x); } else { touch->x = touch->width * ( touch->min_x - ts.xp ) / (touch->min_x-touch->max_x); } if (touch->max_y > touch->min_y) { touch->y = touch->height * ( ts.yp - touch->min_y ) / (touch->max_y-touch->min_y); } else { touch->y = touch->height * ( touch->min_y - ts.yp ) / (touch->min_y-touch->max_y); } } touch_event.x = touch->x; touch_event.y = touch->y; touch_event.pressed = 1; if (touch->first_down_report == RT_TRUE) { if (touch->calibrating != RT_TRUE && touch->eventpost_func) { touch->eventpost_func(touch->eventpost_param, &touch_event); } } } else { touch_event.x = touch->x; touch_event.y = touch->y; touch_event.pressed = 0; if ((touch->calibrating == RT_TRUE) && (touch->calibration_func != RT_NULL)) { /* callback function */ touch->calibration_func(touch_event.x, touch_event.y); } if (touch->calibrating != RT_TRUE && touch->eventpost_func) { touch->eventpost_func(touch->eventpost_param, &touch_event); } } } #endif static void touch_timer_fire(void *parameter) { rt_uint32_t data0; rt_uint32_t data1; int updown; data0 = ADCDAT0; data1 = ADCDAT1; updown = (!(data0 & <API key>)) && (!(data1 & <API key>)); if (updown) { if (ts.count != 0) { report_touch_input(updown); } ts.xp = 0; ts.yp = 0; ts.count = 0; ADCTSC = <API key> | AUTOPST; ADCCON |= <API key>; } } static void <API key>(void) { rt_uint32_t data0; rt_uint32_t data1; data0 = ADCDAT0; data1 = ADCDAT1; ts.xp += data0 & <API key>; ts.yp += data1 & <API key>; ts.count ++; if (ts.count < (1<<ts.shift)) { ADCTSC = <API key> | AUTOPST; ADCCON |= <API key>; } else { if (touch->first_down_report) { report_touch_input(1); ts.xp = 0; ts.yp = 0; ts.count = 0; touch->first_down_report = 0; } /* start timer */ rt_timer_start(touch->poll_timer); ADCTSC = WAIT4INT(1); } SUBSRCPND |= BIT_SUB_ADC; } static void <API key>(void) { rt_uint32_t data0; rt_uint32_t data1; int updown; data0 = ADCDAT0; data1 = ADCDAT1; updown = (!(data0 & <API key>)) && (!(data1 & <API key>)); /* rt_kprintf("stylus: %s\n", updown? "down" : "up"); */ if (updown) { touch_timer_fire(0); } else { /* stop timer */ rt_timer_stop(touch->poll_timer); touch->first_down_report = RT_TRUE; if (ts.xp >= 0 && ts.yp >= 0) { report_touch_input(updown); } ts.count = 0; ADCTSC = WAIT4INT(0); } SUBSRCPND |= BIT_SUB_TC; } static void rt_touch_handler(int irqno) { if (SUBSRCPND & BIT_SUB_ADC) { /* INT_SUB_ADC */ <API key>(); } if (SUBSRCPND & BIT_SUB_TC) { /* INT_SUB_TC */ <API key>(); } /* clear interrupt */ INTPND |= (1ul << INTADC); } /* RT-Thread Device Interface */ static rt_err_t rtgui_touch_init(rt_device_t dev) { /* init touch screen structure */ rt_memset(&ts, 0, sizeof(struct s3c2410ts)); ts.delay = 50000; ts.presc = 9; ts.shift = 2; ts.count = 0; ts.xp = ts.yp = 0; ADCCON = <API key> | <API key>(ts.presc); ADCDLY = ts.delay; ADCTSC = WAIT4INT(0); <API key>(INTADC, rt_touch_handler, RT_NULL , "INTADC"); <API key>(INTADC); /* clear interrupt */ INTPND |= (1ul << INTADC); SUBSRCPND |= BIT_SUB_TC; SUBSRCPND |= BIT_SUB_ADC; /* install interrupt handler */ INTSUBMSK &= ~BIT_SUB_ADC; INTSUBMSK &= ~BIT_SUB_TC; touch->first_down_report = RT_TRUE; return RT_EOK; } static rt_err_t rtgui_touch_control(rt_device_t dev, rt_uint8_t cmd, void *args) { switch (cmd) { case <API key>: touch->calibrating = RT_TRUE; touch->calibration_func = (<API key>)args; break; case RT_TOUCH_NORMAL: touch->calibrating = RT_FALSE; break; case <API key>: { struct calibration_data *data; data = (struct calibration_data *)args; /* update */ touch->min_x = data->min_x; touch->max_x = data->max_x; touch->min_y = data->min_y; touch->max_y = data->max_y; /* rt_kprintf("min_x = %d, max_x = %d, min_y = %d, max_y = %d\n", touch->min_x, touch->max_x, touch->min_y, touch->max_y); */ } break; case RT_TOUCH_EVENTPOST: touch->eventpost_func = (<API key>)args; break; case <API key>: touch->eventpost_param = args; break; } return RT_EOK; } void rtgui_touch_hw_init(void) { rt_err_t result = RT_FALSE; rt_device_t device = RT_NULL; struct <API key> info; touch = (struct rtgui_touch_device *)rt_malloc(sizeof(struct rtgui_touch_device)); if (touch == RT_NULL) return; /* no memory yet */ /* clear device structure */ rt_memset(&(touch->parent), 0, sizeof(struct rt_device)); touch->calibrating = RT_FALSE; touch->min_x = X_MIN; touch->max_x = X_MAX; touch->min_y = Y_MIN; touch->max_y = Y_MAX; touch->eventpost_func = RT_NULL; touch->eventpost_param = RT_NULL; /* init device structure */ touch->parent.type = <API key>; touch->parent.init = rtgui_touch_init; touch->parent.control = rtgui_touch_control; touch->parent.user_data = RT_NULL; device = rt_device_find("lcd"); if (device == RT_NULL) return; /* no this device */ /* get graphic device info */ result = rt_device_control(device, <API key>, &info); if (result != RT_EOK) { /* get device information failed */ return; } touch->width = info.width; touch->height = info.height; /* create 1/8 second timer */ touch->poll_timer = rt_timer_create("touch", touch_timer_fire, RT_NULL, RT_TICK_PER_SECOND/8, <API key>); /* register touch device to RT-Thread */ rt_device_register(&(touch->parent), "touch", RT_DEVICE_FLAG_RDWR); }
#ifndef _BSP_DRV_IPC_H_ #define _BSP_DRV_IPC_H_ #include <asm/io.h> #include "<API key>.h" #include "soc_irqs.h" #include "soc_ipc_interface.h" #ifdef __cplusplus extern "C" { #endif typedef enum <API key> { <API key> = 32, }<API key>; #define SIZE_4K (4096) #define IPC_REG_SIZE (SIZE_4K) #define BSP_RegRd(uwAddr) (*((volatile int *)(uwAddr))) #define BSP_RegWr(uwAddr, uwValue) (*((volatile int *)(uwAddr)) = uwValue) #define IPC_CHECK_PARA(ulLvl) \ do{\ if(ulLvl >= 32)\ {\ printk("Wrong para , line:%d\n", __LINE__);\ return -1;\ }\ }while(0) int <API key>(void); void <API key>(unsigned int enTarget, unsigned int enIntSrc); int <API key> (unsigned int ulLvl); int <API key> (<API key> ulLvl); #ifdef __cplusplus } #endif #endif /* end #define _BSP_IPC_H_*/
/* #define DEBUG */ /* #define VERBOSE_DEBUG */ #include <linux/module.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/delay.h> #include <linux/wait.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/types.h> #include <linux/file.h> #include <linux/device.h> #include <linux/miscdevice.h> #include <linux/usb.h> #include <linux/usb_usual.h> #include <linux/usb/ch9.h> #include <linux/usb/f_mtp.h> #define <API key> 16384 #define INTR_BUFFER_SIZE 28 /* String IDs */ #define <API key> 0 /* values for mtp_dev.state */ #define STATE_OFFLINE 0 /* initial state, disconnected */ #define STATE_READY 1 /* ready for userspace calls */ #define STATE_BUSY 2 /* processing userspace calls */ #define STATE_CANCELED 3 /* transaction canceled by host */ #define STATE_ERROR 4 /* error from completion routine */ /* number of tx and rx requests to allocate */ #define TX_REQ_MAX 4 #define RX_REQ_MAX 2 #define INTR_REQ_MAX 5 /* ID for Microsoft MTP OS String */ #define MTP_OS_STRING_ID 0xEE /* MTP class reqeusts */ #define MTP_REQ_CANCEL 0x64 #define <API key> 0x65 #define MTP_REQ_RESET 0x66 #define <API key> 0x67 /* constants for device status */ #define MTP_RESPONSE_OK 0x2001 #define <API key> 0x2019 static const char mtp_shortname[] = "mtp_usb"; struct mtp_dev { struct usb_function function; struct usb_composite_dev *cdev; spinlock_t lock; struct usb_ep *ep_in; struct usb_ep *ep_out; struct usb_ep *ep_intr; int state; /* synchronize access to our device file */ atomic_t open_excl; /* to enforce only one ioctl at a time */ atomic_t ioctl_excl; struct list_head tx_idle; struct list_head intr_idle; wait_queue_head_t read_wq; wait_queue_head_t write_wq; wait_queue_head_t intr_wq; struct usb_request *rx_req[RX_REQ_MAX]; int rx_done; /* for processing MTP_SEND_FILE, MTP_RECEIVE_FILE and * <API key> ioctls on a work queue */ struct workqueue_struct *wq; struct work_struct send_file_work; struct work_struct receive_file_work; struct file *xfer_file; loff_t xfer_file_offset; int64_t xfer_file_length; unsigned xfer_send_header; uint16_t xfer_command; uint32_t xfer_transaction_id; int xfer_result; int zlp_maxpacket; }; static struct <API key> mtp_interface_desc = { .bLength = <API key>, .bDescriptorType = USB_DT_INTERFACE, .bInterfaceNumber = 0, .bNumEndpoints = 3, .bInterfaceClass = <API key>, .bInterfaceSubClass = <API key>, .bInterfaceProtocol = 0, }; static struct <API key> ptp_interface_desc = { .bLength = <API key>, .bDescriptorType = USB_DT_INTERFACE, .bInterfaceNumber = 0, .bNumEndpoints = 3, .bInterfaceClass = <API key>, .bInterfaceSubClass = 1, .bInterfaceProtocol = 1, }; static struct <API key> <API key> = { .bLength = <API key>, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = <API key>, .wMaxPacketSize = <API key>(512), }; static struct <API key> <API key> = { .bLength = <API key>, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = <API key>, .wMaxPacketSize = <API key>(512), }; static struct <API key> <API key> = { .bLength = <API key>, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = <API key>, }; static struct <API key> <API key> = { .bLength = <API key>, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = <API key>, }; static struct <API key> mtp_intr_desc = { .bLength = <API key>, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = <API key>, .wMaxPacketSize = <API key>(INTR_BUFFER_SIZE), .bInterval = 6, }; static struct <API key> *fs_mtp_descs[] = { (struct <API key> *) &mtp_interface_desc, (struct <API key> *) &<API key>, (struct <API key> *) &<API key>, (struct <API key> *) &mtp_intr_desc, NULL, }; static struct <API key> *hs_mtp_descs[] = { (struct <API key> *) &mtp_interface_desc, (struct <API key> *) &<API key>, (struct <API key> *) &<API key>, (struct <API key> *) &mtp_intr_desc, NULL, }; static struct <API key> *fs_ptp_descs[] = { (struct <API key> *) &ptp_interface_desc, (struct <API key> *) &<API key>, (struct <API key> *) &<API key>, (struct <API key> *) &mtp_intr_desc, NULL, }; static struct <API key> *hs_ptp_descs[] = { (struct <API key> *) &ptp_interface_desc, (struct <API key> *) &<API key>, (struct <API key> *) &<API key>, (struct <API key> *) &mtp_intr_desc, NULL, }; static struct usb_string mtp_string_defs[] = { /* Naming interface "MTP" so libmtp will recognize us */ [<API key>].s = "MTP", { }, /* end of list */ }; static struct usb_gadget_strings mtp_string_table = { .language = 0x0409, /* en-US */ .strings = mtp_string_defs, }; static struct usb_gadget_strings *mtp_strings[] = { &mtp_string_table, NULL, }; /* Microsoft MTP OS String */ static u8 mtp_os_string[] = { 18, /* sizeof(mtp_os_string) */ USB_DT_STRING, /* Signature field: "MSFT100" */ 'M', 0, 'S', 0, 'F', 0, 'T', 0, '1', 0, '0', 0, '0', 0, /* vendor code */ 1, /* padding */ 0 }; /* Microsoft Extended Configuration Descriptor Header Section */ struct <API key> { __le32 dwLength; __u16 bcdVersion; __le16 wIndex; __u8 bCount; __u8 reserved[7]; }; /* Microsoft Extended Configuration Descriptor Function Section */ struct <API key> { __u8 <API key>; __u8 bInterfaceCount; __u8 compatibleID[8]; __u8 subCompatibleID[8]; __u8 reserved[6]; }; /* MTP Extended Configuration Descriptor */ struct { struct <API key> header; struct <API key> function; } mtp_ext_config_desc = { .header = { .dwLength = <API key>(sizeof(mtp_ext_config_desc)), .bcdVersion = <API key>(0x0100), .wIndex = <API key>(4), .bCount = <API key>(1), }, .function = { .<API key> = 0, .bInterfaceCount = 1, .compatibleID = { 'M', 'T', 'P' }, }, }; struct mtp_device_status { __le16 wLength; __le16 wCode; }; /* temporary variable used between mtp_open() and mtp_gadget_bind() */ static struct mtp_dev *_mtp_dev; static inline struct mtp_dev *func_to_mtp(struct usb_function *f) { return container_of(f, struct mtp_dev, function); } static struct usb_request *mtp_request_new(struct usb_ep *ep, int buffer_size) { struct usb_request *req = <API key>(ep, GFP_KERNEL); if (!req) return NULL; /* now allocate buffers for the requests */ req->buf = kmalloc(buffer_size, GFP_KERNEL); if (!req->buf) { usb_ep_free_request(ep, req); return NULL; } return req; } static void mtp_request_free(struct usb_request *req, struct usb_ep *ep) { if (req) { kfree(req->buf); usb_ep_free_request(ep, req); } } static inline int mtp_lock(atomic_t *excl) { if (atomic_inc_return(excl) == 1) { return 0; } else { atomic_dec(excl); return -1; } } static inline void mtp_unlock(atomic_t *excl) { atomic_dec(excl); } /* add a request to the tail of a list */ static void mtp_req_put(struct mtp_dev *dev, struct list_head *head, struct usb_request *req) { unsigned long flags; spin_lock_irqsave(&dev->lock, flags); list_add_tail(&req->list, head); <API key>(&dev->lock, flags); } /* remove a request from the head of a list */ static struct usb_request *mtp_req_get(struct mtp_dev *dev, struct list_head *head) { unsigned long flags; struct usb_request *req; spin_lock_irqsave(&dev->lock, flags); if (list_empty(head)) { req = 0; } else { req = list_first_entry(head, struct usb_request, list); list_del(&req->list); } <API key>(&dev->lock, flags); return req; } static void mtp_complete_in(struct usb_ep *ep, struct usb_request *req) { struct mtp_dev *dev = _mtp_dev; if (req->status != 0) dev->state = STATE_ERROR; mtp_req_put(dev, &dev->tx_idle, req); wake_up(&dev->write_wq); } static void mtp_complete_out(struct usb_ep *ep, struct usb_request *req) { struct mtp_dev *dev = _mtp_dev; dev->rx_done = 1; if (req->status != 0) dev->state = STATE_ERROR; wake_up(&dev->read_wq); } static void mtp_complete_intr(struct usb_ep *ep, struct usb_request *req) { struct mtp_dev *dev = _mtp_dev; if (req->status != 0) dev->state = STATE_ERROR; mtp_req_put(dev, &dev->intr_idle, req); wake_up(&dev->intr_wq); } static int <API key>(struct mtp_dev *dev, struct <API key> *in_desc, struct <API key> *out_desc, struct <API key> *intr_desc) { struct usb_composite_dev *cdev = dev->cdev; struct usb_request *req; struct usb_ep *ep; int i; DBG(cdev, "<API key> dev: %p\n", dev); ep = usb_ep_autoconfig(cdev->gadget, in_desc); if (!ep) { DBG(cdev, "usb_ep_autoconfig for ep_in failed\n"); return -ENODEV; } DBG(cdev, "usb_ep_autoconfig for ep_in got %s\n", ep->name); ep->driver_data = dev; /* claim the endpoint */ dev->ep_in = ep; ep = usb_ep_autoconfig(cdev->gadget, out_desc); if (!ep) { DBG(cdev, "usb_ep_autoconfig for ep_out failed\n"); return -ENODEV; } DBG(cdev, "usb_ep_autoconfig for mtp ep_out got %s\n", ep->name); ep->driver_data = dev; /* claim the endpoint */ dev->ep_out = ep; ep = usb_ep_autoconfig(cdev->gadget, intr_desc); if (!ep) { DBG(cdev, "usb_ep_autoconfig for ep_intr failed\n"); return -ENODEV; } DBG(cdev, "usb_ep_autoconfig for mtp ep_intr got %s\n", ep->name); ep->driver_data = dev; /* claim the endpoint */ dev->ep_intr = ep; /* now allocate requests for our endpoints */ for (i = 0; i < TX_REQ_MAX; i++) { req = mtp_request_new(dev->ep_in, <API key>); if (!req) goto fail; req->complete = mtp_complete_in; mtp_req_put(dev, &dev->tx_idle, req); } for (i = 0; i < RX_REQ_MAX; i++) { req = mtp_request_new(dev->ep_out, <API key>); if (!req) goto fail; req->complete = mtp_complete_out; dev->rx_req[i] = req; } for (i = 0; i < INTR_REQ_MAX; i++) { req = mtp_request_new(dev->ep_intr, INTR_BUFFER_SIZE); if (!req) goto fail; req->complete = mtp_complete_intr; mtp_req_put(dev, &dev->intr_idle, req); } return 0; fail: printk(KERN_ERR "mtp_bind() could not allocate requests\n"); return -1; } static ssize_t mtp_read(struct file *fp, char __user *buf, size_t count, loff_t *pos) { struct mtp_dev *dev = fp->private_data; struct usb_composite_dev *cdev = dev->cdev; struct usb_request *req; int r = count, xfer; int ret = 0; DBG(cdev, "mtp_read(%d)\n", count); if (count > <API key>) return -EINVAL; /* we will block until we're online */ DBG(cdev, "mtp_read: waiting for online state\n"); ret = <API key>(dev->read_wq, dev->state != STATE_OFFLINE); if (ret < 0) { r = ret; goto done; } spin_lock_irq(&dev->lock); if (dev->state == STATE_CANCELED) { /* report cancelation to userspace */ dev->state = STATE_READY; spin_unlock_irq(&dev->lock); return -ECANCELED; } dev->state = STATE_BUSY; spin_unlock_irq(&dev->lock); requeue_req: /* queue a request */ req = dev->rx_req[0]; req->length = count; dev->rx_done = 0; ret = usb_ep_queue(dev->ep_out, req, GFP_KERNEL); if (ret < 0) { r = -EIO; goto done; } else { DBG(cdev, "rx %p queue\n", req); } /* wait for a request to complete */ ret = <API key>(dev->read_wq, dev->rx_done); if (ret < 0) { r = ret; usb_ep_dequeue(dev->ep_out, req); goto done; } if (dev->state == STATE_BUSY) { /* If we got a 0-len packet, throw it back and try again. */ if (req->actual == 0) goto requeue_req; DBG(cdev, "rx %p %d\n", req, req->actual); xfer = (req->actual < count) ? req->actual : count; r = xfer; if (copy_to_user(buf, req->buf, xfer)) r = -EFAULT; } else r = -EIO; done: spin_lock_irq(&dev->lock); if (dev->state == STATE_CANCELED) r = -ECANCELED; else if (dev->state != STATE_OFFLINE) dev->state = STATE_READY; spin_unlock_irq(&dev->lock); DBG(cdev, "mtp_read returning %d\n", r); return r; } static ssize_t mtp_write(struct file *fp, const char __user *buf, size_t count, loff_t *pos) { struct mtp_dev *dev = fp->private_data; struct usb_composite_dev *cdev = dev->cdev; struct usb_request *req = 0; int r = count, xfer; int sendZLP = 0; int ret; DBG(cdev, "mtp_write(%d)\n", count); spin_lock_irq(&dev->lock); if (dev->state == STATE_CANCELED) { /* report cancelation to userspace */ dev->state = STATE_READY; spin_unlock_irq(&dev->lock); return -ECANCELED; } if (dev->state == STATE_OFFLINE) { spin_unlock_irq(&dev->lock); return -ENODEV; } dev->state = STATE_BUSY; spin_unlock_irq(&dev->lock); /* we need to send a zero length packet to signal the end of transfer * if the transfer size is aligned to a packet boundary. */ if ((count & (dev->zlp_maxpacket - 1)) == 0) sendZLP = 1; while (count > 0 || sendZLP) { /* so we exit after sending ZLP */ if (count == 0) sendZLP = 0; if (dev->state != STATE_BUSY) { DBG(cdev, "mtp_write dev->error\n"); r = -EIO; break; } /* get an idle tx request to use */ req = 0; ret = <API key>(dev->write_wq, ((req = mtp_req_get(dev, &dev->tx_idle)) || dev->state != STATE_BUSY)); if (!req) { r = ret; break; } if (count > <API key>) xfer = <API key>; else xfer = count; if (xfer && copy_from_user(req->buf, buf, xfer)) { r = -EFAULT; break; } req->length = xfer; ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL); if (ret < 0) { DBG(cdev, "mtp_write: xfer error %d\n", ret); r = -EIO; break; } buf += xfer; count -= xfer; /* zero this so we don't try to free it on error exit */ req = 0; } if (req) mtp_req_put(dev, &dev->tx_idle, req); spin_lock_irq(&dev->lock); if (dev->state == STATE_CANCELED) r = -ECANCELED; else if (dev->state != STATE_OFFLINE) dev->state = STATE_READY; spin_unlock_irq(&dev->lock); DBG(cdev, "mtp_write returning %d\n", r); return r; } /* read from a local file and write to USB */ static void send_file_work(struct work_struct *data) { struct mtp_dev *dev = container_of(data, struct mtp_dev, send_file_work); struct usb_composite_dev *cdev = dev->cdev; struct usb_request *req = 0; struct mtp_data_header *header; struct file *filp; loff_t offset; int64_t count; int xfer, ret, hdr_size; int r = 0; int sendZLP = 0; /* read our parameters */ smp_rmb(); filp = dev->xfer_file; offset = dev->xfer_file_offset; count = dev->xfer_file_length; DBG(cdev, "send_file_work(%lld %lld)\n", offset, count); if (dev->xfer_send_header) { hdr_size = sizeof(struct mtp_data_header); count += hdr_size; } else { hdr_size = 0; } /* we need to send a zero length packet to signal the end of transfer * if the transfer size is aligned to a packet boundary. */ if ((count & (dev->zlp_maxpacket - 1)) == 0) sendZLP = 1; while (count > 0 || sendZLP) { /* so we exit after sending ZLP */ if (count == 0) sendZLP = 0; /* get an idle tx request to use */ req = 0; ret = <API key>(dev->write_wq, (req = mtp_req_get(dev, &dev->tx_idle)) || dev->state != STATE_BUSY); if (dev->state == STATE_CANCELED) { r = -ECANCELED; break; } if (!req) { r = ret; break; } if (count > <API key>) xfer = <API key>; else xfer = count; if (hdr_size) { /* prepend MTP data header */ header = (struct mtp_data_header *)req->buf; header->length = __cpu_to_le32(count); header->type = __cpu_to_le16(2); /* data packet */ header->command = __cpu_to_le16(dev->xfer_command); header->transaction_id = __cpu_to_le32(dev->xfer_transaction_id); } ret = vfs_read(filp, req->buf + hdr_size, xfer - hdr_size, &offset); if (ret < 0) { r = ret; break; } xfer = ret + hdr_size; hdr_size = 0; req->length = xfer; ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL); if (ret < 0) { DBG(cdev, "send_file_work: xfer error %d\n", ret); dev->state = STATE_ERROR; r = -EIO; break; } count -= xfer; /* zero this so we don't try to free it on error exit */ req = 0; } if (req) mtp_req_put(dev, &dev->tx_idle, req); DBG(cdev, "send_file_work returning %d\n", r); /* write the result */ dev->xfer_result = r; smp_wmb(); } /* read from USB and write to a local file */ static void receive_file_work(struct work_struct *data) { struct mtp_dev *dev = container_of(data, struct mtp_dev, receive_file_work); struct usb_composite_dev *cdev = dev->cdev; struct usb_request *read_req = NULL, *write_req = NULL; struct file *filp; loff_t offset; int64_t count; int ret, cur_buf = 0; int r = 0; /* read our parameters */ smp_rmb(); filp = dev->xfer_file; offset = dev->xfer_file_offset; count = dev->xfer_file_length; DBG(cdev, "receive_file_work(%lld)\n", count); while (count > 0 || write_req) { if (count > 0) { /* queue a request */ read_req = dev->rx_req[cur_buf]; cur_buf = (cur_buf + 1) % RX_REQ_MAX; read_req->length = (count > <API key> ? <API key> : count); dev->rx_done = 0; ret = usb_ep_queue(dev->ep_out, read_req, GFP_KERNEL); if (ret < 0) { r = -EIO; dev->state = STATE_ERROR; break; } } if (write_req) { DBG(cdev, "rx %p %d\n", write_req, write_req->actual); ret = vfs_write(filp, write_req->buf, write_req->actual, &offset); DBG(cdev, "vfs_write %d\n", ret); if (ret != write_req->actual) { r = -EIO; dev->state = STATE_ERROR; break; } write_req = NULL; } if (read_req) { /* wait for our last read to complete */ ret = <API key>(dev->read_wq, dev->rx_done || dev->state != STATE_BUSY); if (dev->state == STATE_CANCELED) { r = -ECANCELED; if (!dev->rx_done) usb_ep_dequeue(dev->ep_out, read_req); break; } /* if xfer_file_length is 0xFFFFFFFF, then we read until * we get a zero length packet */ if (count != 0xFFFFFFFF) count -= read_req->actual; if (read_req->actual < read_req->length) { /* short packet is used to signal EOF for sizes > 4 gig */ DBG(cdev, "got short packet\n"); count = 0; } write_req = read_req; read_req = NULL; } } DBG(cdev, "receive_file_work returning %d\n", r); /* write the result */ dev->xfer_result = r; smp_wmb(); } static int mtp_send_event(struct mtp_dev *dev, struct mtp_event *event) { struct usb_request *req= NULL; int ret; int length = event->length; DBG(dev->cdev, "mtp_send_event(%d)\n", event->length); if (length < 0 || length > INTR_BUFFER_SIZE) return -EINVAL; if (dev->state == STATE_OFFLINE) return -ENODEV; ret = <API key>(dev->intr_wq, (req = mtp_req_get(dev, &dev->intr_idle)), msecs_to_jiffies(1000)); if (!req) return -ETIME; if (copy_from_user(req->buf, (void __user *)event->data, length)) { mtp_req_put(dev, &dev->intr_idle, req); return -EFAULT; } req->length = length; ret = usb_ep_queue(dev->ep_intr, req, GFP_KERNEL); if (ret) mtp_req_put(dev, &dev->intr_idle, req); return ret; } static long mtp_ioctl(struct file *fp, unsigned code, unsigned long value) { struct mtp_dev *dev = fp->private_data; struct file *filp = NULL; int ret = -EINVAL; if (mtp_lock(&dev->ioctl_excl)) return -EBUSY; switch (code) { case MTP_SEND_FILE: case MTP_RECEIVE_FILE: case <API key>: { struct mtp_file_range mfr; struct work_struct *work; spin_lock_irq(&dev->lock); if (dev->state == STATE_CANCELED) { /* report cancelation to userspace */ dev->state = STATE_READY; spin_unlock_irq(&dev->lock); ret = -ECANCELED; goto out; } if (dev->state == STATE_OFFLINE) { spin_unlock_irq(&dev->lock); ret = -ENODEV; goto out; } dev->state = STATE_BUSY; spin_unlock_irq(&dev->lock); if (copy_from_user(&mfr, (void __user *)value, sizeof(mfr))) { ret = -EFAULT; goto fail; } /* hold a reference to the file while we are working with it */ filp = fget(mfr.fd); if (!filp) { ret = -EBADF; goto fail; } /* write the parameters */ dev->xfer_file = filp; dev->xfer_file_offset = mfr.offset; dev->xfer_file_length = mfr.length; smp_wmb(); if (code == <API key>) { work = &dev->send_file_work; dev->xfer_send_header = 1; dev->xfer_command = mfr.command; dev->xfer_transaction_id = mfr.transaction_id; } else if (code == MTP_SEND_FILE) { work = &dev->send_file_work; dev->xfer_send_header = 0; } else { work = &dev->receive_file_work; } /* We do the file transfer on a work queue so it will run * in kernel context, which is necessary for vfs_read and * vfs_write to use our buffers in the kernel address space. */ queue_work(dev->wq, work); /* wait for operation to complete */ flush_workqueue(dev->wq); fput(filp); /* read the result */ smp_rmb(); ret = dev->xfer_result; break; } case MTP_SEND_EVENT: { struct mtp_event event; /* return here so we don't change dev->state below, * which would interfere with bulk transfer state. */ if (copy_from_user(&event, (void __user *)value, sizeof(event))) ret = -EFAULT; else ret = mtp_send_event(dev, &event); goto out; } } fail: spin_lock_irq(&dev->lock); if (dev->state == STATE_CANCELED) ret = -ECANCELED; else if (dev->state != STATE_OFFLINE) dev->state = STATE_READY; spin_unlock_irq(&dev->lock); out: mtp_unlock(&dev->ioctl_excl); DBG(dev->cdev, "ioctl returning %d\n", ret); return ret; } static int mtp_open(struct inode *ip, struct file *fp) { struct <API key> **descriptors; printk(KERN_INFO "mtp_open\n"); if (!_mtp_dev->cdev) { WARN(1, "_mtp_dev->cdev is NULL in mtp_open\n"); return -ENODEV; } if (mtp_lock(&_mtp_dev->open_excl)) return -EBUSY; /* clear any error condition */ if (_mtp_dev->state != STATE_OFFLINE) _mtp_dev->state = STATE_READY; if (_mtp_dev->cdev->gadget->speed == USB_SPEED_HIGH) descriptors = _mtp_dev->function.hs_descriptors; else descriptors = _mtp_dev->function.descriptors; /* find mtp ep_in descriptor */ for (; *descriptors; ++descriptors) { struct <API key> *ep; ep = (struct <API key> *)*descriptors; if (ep->bDescriptorType == USB_DT_ENDPOINT && (ep->bEndpointAddress & USB_DIR_IN) && ep->bmAttributes == <API key>) { _mtp_dev->zlp_maxpacket = __le16_to_cpu(ep->wMaxPacketSize); fp->private_data = _mtp_dev; return 0; } } return -ENODEV; } static int mtp_release(struct inode *ip, struct file *fp) { printk(KERN_INFO "mtp_release\n"); mtp_unlock(&_mtp_dev->open_excl); return 0; } /* file operations for /dev/mtp_usb */ static const struct file_operations mtp_fops = { .owner = THIS_MODULE, .read = mtp_read, .write = mtp_write, .unlocked_ioctl = mtp_ioctl, .open = mtp_open, .release = mtp_release, }; static struct miscdevice mtp_device = { .minor = MISC_DYNAMIC_MINOR, .name = mtp_shortname, .fops = &mtp_fops, }; static int mtp_ctrlrequest(struct usb_composite_dev *cdev, const struct usb_ctrlrequest *ctrl) { struct mtp_dev *dev = _mtp_dev; int value = -EOPNOTSUPP; u16 w_index = le16_to_cpu(ctrl->wIndex); u16 w_value = le16_to_cpu(ctrl->wValue); u16 w_length = le16_to_cpu(ctrl->wLength); unsigned long flags; VDBG(cdev, "mtp_ctrlrequest " "%02x.%02x v%04x i%04x l%u\n", ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); /* Handle MTP OS string */ if (ctrl->bRequestType == (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE) && ctrl->bRequest == <API key> && (w_value >> 8) == USB_DT_STRING && (w_value & 0xFF) == MTP_OS_STRING_ID) { value = (w_length < sizeof(mtp_os_string) ? w_length : sizeof(mtp_os_string)); memcpy(cdev->req->buf, mtp_os_string, value); } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_VENDOR) { /* Handle MTP OS descriptor */ DBG(cdev, "vendor request: %d index: %d value: %d length: %d\n", ctrl->bRequest, w_index, w_value, w_length); if (ctrl->bRequest == 1 && (ctrl->bRequestType & USB_DIR_IN) && (w_index == 4 || w_index == 5)) { value = (w_length < sizeof(mtp_ext_config_desc) ? w_length : sizeof(mtp_ext_config_desc)); memcpy(cdev->req->buf, &mtp_ext_config_desc, value); } } else if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_CLASS) { DBG(cdev, "class request: %d index: %d value: %d length: %d\n", ctrl->bRequest, w_index, w_value, w_length); if (ctrl->bRequest == MTP_REQ_CANCEL && w_index == 0 && w_value == 0) { DBG(cdev, "MTP_REQ_CANCEL\n"); spin_lock_irqsave(&dev->lock, flags); if (dev->state == STATE_BUSY) { dev->state = STATE_CANCELED; wake_up(&dev->read_wq); wake_up(&dev->write_wq); } <API key>(&dev->lock, flags); /* We need to queue a request to read the remaining * bytes, but we don't actually need to look at * the contents. */ value = w_length; } else if (ctrl->bRequest == <API key> && w_index == 0 && w_value == 0) { struct mtp_device_status *status = cdev->req->buf; status->wLength = <API key>(sizeof(*status)); DBG(cdev, "<API key>\n"); spin_lock_irqsave(&dev->lock, flags); /* device status is "busy" until we report * the cancelation to userspace */ if (dev->state == STATE_CANCELED) status->wCode = __cpu_to_le16(<API key>); else status->wCode = __cpu_to_le16(MTP_RESPONSE_OK); <API key>(&dev->lock, flags); value = sizeof(*status); } } /* respond with data transfer or status phase? */ if (value >= 0) { int rc; cdev->req->zero = value < w_length; cdev->req->length = value; rc = usb_ep_queue(cdev->gadget->ep0, cdev->req, GFP_ATOMIC); if (rc < 0) ERROR(cdev, "%s setup response queue error\n", __func__); } return value; } static int mtp_function_bind(struct usb_configuration *c, struct usb_function *f) { struct usb_composite_dev *cdev = c->cdev; struct mtp_dev *dev = func_to_mtp(f); int id; int ret; dev->cdev = cdev; DBG(cdev, "mtp_function_bind dev: %p\n", dev); /* allocate interface ID(s) */ id = usb_interface_id(c, f); if (id < 0) return id; mtp_interface_desc.bInterfaceNumber = id; /* allocate endpoints */ ret = <API key>(dev, &<API key>, &<API key>, &mtp_intr_desc); if (ret) return ret; /* support high speed hardware */ if (gadget_is_dualspeed(c->cdev->gadget)) { <API key>.bEndpointAddress = <API key>.bEndpointAddress; <API key>.bEndpointAddress = <API key>.bEndpointAddress; } DBG(cdev, "%s speed %s: IN/%s, OUT/%s\n", gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", f->name, dev->ep_in->name, dev->ep_out->name); return 0; } static void mtp_function_unbind(struct usb_configuration *c, struct usb_function *f) { struct mtp_dev *dev = func_to_mtp(f); struct usb_request *req; int i; while ((req = mtp_req_get(dev, &dev->tx_idle))) mtp_request_free(req, dev->ep_in); for (i = 0; i < RX_REQ_MAX; i++) mtp_request_free(dev->rx_req[i], dev->ep_out); while ((req = mtp_req_get(dev, &dev->intr_idle))) mtp_request_free(req, dev->ep_intr); dev->state = STATE_OFFLINE; } static int <API key>(struct usb_function *f, unsigned intf, unsigned alt) { struct mtp_dev *dev = func_to_mtp(f); struct usb_composite_dev *cdev = f->config->cdev; int ret; DBG(cdev, "<API key> intf: %d alt: %d\n", intf, alt); ret = usb_ep_enable(dev->ep_in, ep_choose(cdev->gadget, &<API key>, &<API key>)); if (ret) return ret; ret = usb_ep_enable(dev->ep_out, ep_choose(cdev->gadget, &<API key>, &<API key>)); if (ret) { usb_ep_disable(dev->ep_in); return ret; } ret = usb_ep_enable(dev->ep_intr, &mtp_intr_desc); if (ret) { usb_ep_disable(dev->ep_out); usb_ep_disable(dev->ep_in); return ret; } dev->state = STATE_READY; /* readers may be blocked waiting for us to go online */ wake_up(&dev->read_wq); return 0; } static void <API key>(struct usb_function *f) { struct mtp_dev *dev = func_to_mtp(f); struct usb_composite_dev *cdev = dev->cdev; DBG(cdev, "<API key>\n"); dev->state = STATE_OFFLINE; usb_ep_disable(dev->ep_in); usb_ep_disable(dev->ep_out); usb_ep_disable(dev->ep_intr); /* readers may be blocked waiting for us to go online */ wake_up(&dev->read_wq); VDBG(cdev, "%s disabled\n", dev->function.name); } static int mtp_bind_config(struct usb_configuration *c, bool ptp_config) { struct mtp_dev *dev = _mtp_dev; int ret = 0; printk(KERN_INFO "mtp_bind_config\n"); /* allocate a string ID for our interface */ if (mtp_string_defs[<API key>].id == 0) { ret = usb_string_id(c->cdev); if (ret < 0) return ret; mtp_string_defs[<API key>].id = ret; mtp_interface_desc.iInterface = ret; } dev->cdev = c->cdev; dev->function.name = "mtp"; dev->function.strings = mtp_strings; if (ptp_config) { dev->function.descriptors = fs_ptp_descs; dev->function.hs_descriptors = hs_ptp_descs; } else { dev->function.descriptors = fs_mtp_descs; dev->function.hs_descriptors = hs_mtp_descs; } dev->function.bind = mtp_function_bind; dev->function.unbind = mtp_function_unbind; dev->function.set_alt = <API key>; dev->function.disable = <API key>; return usb_add_function(c, &dev->function); } static int mtp_setup(void) { struct mtp_dev *dev; int ret; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; spin_lock_init(&dev->lock); init_waitqueue_head(&dev->read_wq); init_waitqueue_head(&dev->write_wq); init_waitqueue_head(&dev->intr_wq); atomic_set(&dev->open_excl, 0); atomic_set(&dev->ioctl_excl, 0); INIT_LIST_HEAD(&dev->tx_idle); INIT_LIST_HEAD(&dev->intr_idle); dev->wq = <API key>("f_mtp"); if (!dev->wq) { ret = -ENOMEM; goto err1; } INIT_WORK(&dev->send_file_work, send_file_work); INIT_WORK(&dev->receive_file_work, receive_file_work); _mtp_dev = dev; ret = misc_register(&mtp_device); if (ret) goto err2; return 0; err2: destroy_workqueue(dev->wq); err1: _mtp_dev = NULL; kfree(dev); printk(KERN_ERR "mtp gadget driver failed to initialize\n"); return ret; } static void mtp_cleanup(void) { struct mtp_dev *dev = _mtp_dev; if (!dev) return; misc_deregister(&mtp_device); destroy_workqueue(dev->wq); _mtp_dev = NULL; kfree(dev); }
#include <linux/cpufreq.h> static struct <API key> meson_freq_table[] = { {0, 96000 }, {1, 192000 }, {2, 312000 }, {3, 408000 }, {4, 504000 }, {5, 600000 }, {6, 696000 }, {7, 816000 }, {8, 912000 }, {9, 1008000 }, {10, 1104000 }, {11, 1200000 }, {12, 1296000 }, {13, 1416000 }, {14, 1512000 }, {15, 1608000 }, {16, 1800000 }, {17, 1992000 }, {18, CPUFREQ_TABLE_END}, };
<?php class AWPCP_FeeType extends <API key> { const TYPE = 'fee'; public function __construct() { parent::__construct(_x('Fee', 'payment term type', 'AWPCP'), self::TYPE, ''); add_action('<API key>', array($this, 'update_buys_count'), 10, 2); } public function update_buys_count($transaction, $status) { if ($transaction->is_completed() && $transaction-><API key>()) { if ($transaction->get('payment-term-type', false) !== self::TYPE) return; $term = self::find_by_id($transaction->get('payment-term-id')); if (is_null($term)) return; $term->buys = $term->buys + 1; $term->save(); } } public function find_by_id($id) { if (absint($id) === 0) return $this-><API key>(); return AWPCP_Fee::find_by_id($id); } private function <API key>() { return new AWPCP_Fee(array( 'id' => 0, 'name' => __('Free Listing', 'AWPCP'), 'description' => '', 'duration_amount' => get_awpcp_option('addurationfreemode'), 'duration_interval' => AWPCP_Fee::INTERVAL_DAY, 'price' => 0, 'credits' => 0, 'categories' => array(), 'images' => get_awpcp_option('imagesallowedfree'), 'ads' => 1, 'characters' => get_awpcp_option( '<API key>' ), 'title_characters' => get_awpcp_option( '<API key>' ), 'buys' => 0, 'featured' => 0, 'private' => 0, )); } public function get_payment_terms() { global $wpdb; if (!awpcp_payments_api()->payments_enabled()) { return array($this-><API key>()); } $order = get_awpcp_option( 'fee-order' ); $direction = get_awpcp_option( 'fee-order-direction' ); switch ($order) { case 1: $orderby = array( 'adterm_name', $direction ); break; case 2: $orderby = array( "amount $direction, adterm_name", $direction ); break; case 3: $orderby = array( "imagesallowed $direction, adterm_name", $direction ); break; case 5: $orderby = array( "_duration_interval $direction, rec_period $direction, adterm_name", $direction ); break; } if ( <API key>() ) { $args = array( 'orderby' => $orderby[0], 'order' => $orderby[1], ); } else { $args = array( 'where' => 'private = 0', 'orderby' => $orderby[0], 'order' => $orderby[1], ); } return AWPCP_Fee::query($args); } public function <API key>($user_id) { static $terms = null; if ( is_null( $terms ) ) { $terms = $this->get_payment_terms(); } return $terms; } }
#ifndef LOAD_LIB_H #define LOAD_LIB_H #include "Define.h" #include "CascHandles.h" #include <map> #include <string> #define FILE_FORMAT_VERSION 18 #pragma pack(push, 1) union u_map_fcc { char fcc_txt[4]; uint32 fcc; }; // File version chunk struct file_MVER { union{ uint32 fcc; char fcc_txt[4]; }; uint32 size; uint32 ver; }; struct file_MWMO { u_map_fcc fcc; uint32 size; char FileList[1]; }; class FileChunk { public: FileChunk(uint8* data_, uint32 size_) : data(data_), size(size_) { } ~FileChunk(); uint8* data; uint32 size; template<class T> T* As() { return (T*)data; } void parseSubChunks(); std::multimap<std::string, FileChunk*> subchunks; FileChunk* GetSubChunk(std::string const& name); }; class ChunkedFile { public: uint8 *data; uint32 data_size; uint8 *GetData() { return data; } uint32 GetDataSize() { return data_size; } ChunkedFile(); virtual ~ChunkedFile(); bool prepareLoadedData(); bool loadFile(CASC::StorageHandle const& mpq, std::string const& fileName, bool log = true); void free(); void parseChunks(); std::multimap<std::string, FileChunk*> chunks; FileChunk* GetChunk(std::string const& name); }; #pragma pack(pop) #endif
#!/bin/bash # This file is part of PyBOMBS # PyBOMBS is free software; you can redistribute it and/or modify # the Free Software Foundation; either version 3, or (at your option) # any later version. # PyBOMBS is distributed in the hope that it will be useful, # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # along with PyBOMBS; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. meld $2 $5
#include "MovementGenerator.h" #include "Unit.h" MovementGenerator::~MovementGenerator() { } bool MovementGenerator::IsActive(Unit& u) { // When movement generator list modified from Update movegen object erase delayed, // so pointer still valid and be used for check return !u.GetMotionMaster()->empty() && u.GetMotionMaster()->top() == this; }
UPDATE `realmlist` SET `gamebuild`=30993 WHERE `gamebuild`=30706; ALTER TABLE `realmlist` CHANGE `gamebuild` `gamebuild` int(10) unsigned NOT NULL DEFAULT '30993';
/* * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR * policies) */ #include "sched.h" #include <linux/slab.h> #include <trace/events/sched.h> int sched_rr_timeslice = RR_TIMESLICE; static int <API key>(struct rt_bandwidth *rt_b, int overrun); struct rt_bandwidth def_rt_bandwidth; static enum hrtimer_restart <API key>(struct hrtimer *timer) { struct rt_bandwidth *rt_b = container_of(timer, struct rt_bandwidth, rt_period_timer); ktime_t now; int overrun; int idle = 0; for (;;) { now = hrtimer_cb_get_time(timer); overrun = hrtimer_forward(timer, now, rt_b->rt_period); if (!overrun) break; idle = <API key>(rt_b, overrun); } return idle ? HRTIMER_NORESTART : HRTIMER_RESTART; } void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) { rt_b->rt_period = ns_to_ktime(period); rt_b->rt_runtime = runtime; raw_spin_lock_init(&rt_b->rt_runtime_lock); hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); rt_b->rt_period_timer.function = <API key>; } static void start_rt_bandwidth(struct rt_bandwidth *rt_b) { if (!<API key>() || rt_b->rt_runtime == RUNTIME_INF) return; if (hrtimer_active(&rt_b->rt_period_timer)) return; raw_spin_lock(&rt_b->rt_runtime_lock); <API key>(&rt_b->rt_period_timer, rt_b->rt_period); raw_spin_unlock(&rt_b->rt_runtime_lock); } void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq) { struct rt_prio_array *array; int i; array = &rt_rq->active; for (i = 0; i < MAX_RT_PRIO; i++) { INIT_LIST_HEAD(array->queue + i); __clear_bit(i, array->bitmap); } /* delimiter for bitsearch: */ __set_bit(MAX_RT_PRIO, array->bitmap); #if defined CONFIG_SMP rt_rq->highest_prio.curr = MAX_RT_PRIO; rt_rq->highest_prio.next = MAX_RT_PRIO; rt_rq->rt_nr_migratory = 0; rt_rq->overloaded = 0; plist_head_init(&rt_rq->pushable_tasks); #endif rt_rq->rt_time = 0; rt_rq->rt_throttled = 0; rt_rq->rt_runtime = 0; raw_spin_lock_init(&rt_rq->rt_runtime_lock); } #ifdef <API key> static void <API key>(struct rt_bandwidth *rt_b) { hrtimer_cancel(&rt_b->rt_period_timer); } #define rt_entity_is_task(rt_se) (!(rt_se)->my_q) static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) { #ifdef CONFIG_SCHED_DEBUG WARN_ON_ONCE(!rt_entity_is_task(rt_se)); #endif return container_of(rt_se, struct task_struct, rt); } static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) { return rt_rq->rq; } static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) { return rt_se->rt_rq; } void free_rt_sched_group(struct task_group *tg) { int i; if (tg->rt_se) <API key>(&tg->rt_bandwidth); <API key>(i) { if (tg->rt_rq) kfree(tg->rt_rq[i]); if (tg->rt_se) kfree(tg->rt_se[i]); } kfree(tg->rt_rq); kfree(tg->rt_se); } void init_tg_rt_entry(struct task_group *tg, struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int cpu, struct sched_rt_entity *parent) { struct rq *rq = cpu_rq(cpu); rt_rq->highest_prio.curr = MAX_RT_PRIO; rt_rq->rt_nr_boosted = 0; rt_rq->rq = rq; rt_rq->tg = tg; tg->rt_rq[cpu] = rt_rq; tg->rt_se[cpu] = rt_se; if (!rt_se) return; if (!parent) rt_se->rt_rq = &rq->rt; else rt_se->rt_rq = parent->my_q; rt_se->my_q = rt_rq; rt_se->parent = parent; INIT_LIST_HEAD(&rt_se->run_list); } int <API key>(struct task_group *tg, struct task_group *parent) { struct rt_rq *rt_rq; struct sched_rt_entity *rt_se; int i; tg->rt_rq = kzalloc(sizeof(rt_rq) * nr_cpu_ids, GFP_KERNEL); if (!tg->rt_rq) goto err; tg->rt_se = kzalloc(sizeof(rt_se) * nr_cpu_ids, GFP_KERNEL); if (!tg->rt_se) goto err; init_rt_bandwidth(&tg->rt_bandwidth, ktime_to_ns(def_rt_bandwidth.rt_period), 0); <API key>(i) { rt_rq = kzalloc_node(sizeof(struct rt_rq), GFP_KERNEL, cpu_to_node(i)); if (!rt_rq) goto err; rt_se = kzalloc_node(sizeof(struct sched_rt_entity), GFP_KERNEL, cpu_to_node(i)); if (!rt_se) goto err_free_rq; init_rt_rq(rt_rq, cpu_rq(i)); rt_rq->rt_runtime = tg->rt_bandwidth.rt_runtime; init_tg_rt_entry(tg, rt_rq, rt_se, i, parent->rt_se[i]); } return 1; err_free_rq: kfree(rt_rq); err: return 0; } #else /* <API key> */ #define rt_entity_is_task(rt_se) (1) static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) { return container_of(rt_se, struct task_struct, rt); } static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) { return container_of(rt_rq, struct rq, rt); } static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) { struct task_struct *p = rt_task_of(rt_se); struct rq *rq = task_rq(p); return &rq->rt; } void free_rt_sched_group(struct task_group *tg) { } int <API key>(struct task_group *tg, struct task_group *parent) { return 1; } #endif /* <API key> */ #ifdef CONFIG_SMP static inline int rt_overloaded(struct rq *rq) { return atomic_read(&rq->rd->rto_count); } static inline void rt_set_overload(struct rq *rq) { if (!rq->online) return; cpumask_set_cpu(rq->cpu, rq->rd->rto_mask); /* * Make sure the mask is visible before we set * the overload count. That is checked to determine * if we should look at the mask. It would be a shame * if we looked at the mask, but the mask was not * updated yet. */ wmb(); atomic_inc(&rq->rd->rto_count); } static inline void rt_clear_overload(struct rq *rq) { if (!rq->online) return; /* the order here really doesn't matter */ atomic_dec(&rq->rd->rto_count); cpumask_clear_cpu(rq->cpu, rq->rd->rto_mask); } static void update_rt_migration(struct rt_rq *rt_rq) { if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) { if (!rt_rq->overloaded) { rt_set_overload(rq_of_rt_rq(rt_rq)); rt_rq->overloaded = 1; } } else if (rt_rq->overloaded) { rt_clear_overload(rq_of_rt_rq(rt_rq)); rt_rq->overloaded = 0; } } static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) { struct task_struct *p; if (!rt_entity_is_task(rt_se)) return; p = rt_task_of(rt_se); rt_rq = &rq_of_rt_rq(rt_rq)->rt; rt_rq->rt_nr_total++; if (p->nr_cpus_allowed > 1) rt_rq->rt_nr_migratory++; update_rt_migration(rt_rq); } static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) { struct task_struct *p; if (!rt_entity_is_task(rt_se)) return; p = rt_task_of(rt_se); rt_rq = &rq_of_rt_rq(rt_rq)->rt; rt_rq->rt_nr_total if (p->nr_cpus_allowed > 1) rt_rq->rt_nr_migratory update_rt_migration(rt_rq); } static inline int has_pushable_tasks(struct rq *rq) { return !plist_head_empty(&rq->rt.pushable_tasks); } static void <API key>(struct rq *rq, struct task_struct *p) { plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); plist_node_init(&p->pushable_tasks, p->prio); plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks); /* Update the highest prio pushable task */ if (p->prio < rq->rt.highest_prio.next) rq->rt.highest_prio.next = p->prio; } static void <API key>(struct rq *rq, struct task_struct *p) { plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); /* Update the new highest prio pushable task */ if (has_pushable_tasks(rq)) { p = plist_first_entry(&rq->rt.pushable_tasks, struct task_struct, pushable_tasks); rq->rt.highest_prio.next = p->prio; } else rq->rt.highest_prio.next = MAX_RT_PRIO; } #else static inline void <API key>(struct rq *rq, struct task_struct *p) { } static inline void <API key>(struct rq *rq, struct task_struct *p) { } static inline void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) { } static inline void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) { } #endif /* CONFIG_SMP */ static inline int on_rt_rq(struct sched_rt_entity *rt_se) { return !list_empty(&rt_se->run_list); } #ifdef <API key> static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) { if (!rt_rq->tg) return RUNTIME_INF; return rt_rq->rt_runtime; } static inline u64 sched_rt_period(struct rt_rq *rt_rq) { return ktime_to_ns(rt_rq->tg->rt_bandwidth.rt_period); } typedef struct task_group *rt_rq_iter_t; static inline struct task_group *next_task_group(struct task_group *tg) { do { tg = list_entry_rcu(tg->list.next, typeof(struct task_group), list); } while (&tg->list != &task_groups && <API key>(tg)); if (&tg->list == &task_groups) tg = NULL; return tg; } #define for_each_rt_rq(rt_rq, iter, rq) \ for (iter = container_of(&task_groups, typeof(*iter), list); \ (iter = next_task_group(iter)) && \ (rt_rq = iter->rt_rq[cpu_of(rq)]);) static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq) { list_add_rcu(&rt_rq->leaf_rt_rq_list, &rq_of_rt_rq(rt_rq)->leaf_rt_rq_list); } static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq) { list_del_rcu(&rt_rq->leaf_rt_rq_list); } #define for_each_leaf_rt_rq(rt_rq, rq) \ <API key>(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list) #define <API key>(rt_se) \ for (; rt_se; rt_se = rt_se->parent) static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) { return rt_se->my_q; } static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head); static void dequeue_rt_entity(struct sched_rt_entity *rt_se); static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) { struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; struct sched_rt_entity *rt_se; int cpu = cpu_of(rq_of_rt_rq(rt_rq)); rt_se = rt_rq->tg->rt_se[cpu]; if (rt_rq->rt_nr_running) { if (rt_se && !on_rt_rq(rt_se)) enqueue_rt_entity(rt_se, false); if (rt_rq->highest_prio.curr < curr->prio) resched_task(curr); } } static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) { struct sched_rt_entity *rt_se; int cpu = cpu_of(rq_of_rt_rq(rt_rq)); rt_se = rt_rq->tg->rt_se[cpu]; if (rt_se && on_rt_rq(rt_se)) dequeue_rt_entity(rt_se); } static inline int rt_rq_throttled(struct rt_rq *rt_rq) { return rt_rq->rt_throttled && !rt_rq->rt_nr_boosted; } static int rt_se_boosted(struct sched_rt_entity *rt_se) { struct rt_rq *rt_rq = group_rt_rq(rt_se); struct task_struct *p; if (rt_rq) return !!rt_rq->rt_nr_boosted; p = rt_task_of(rt_se); return p->prio != p->normal_prio; } #ifdef CONFIG_SMP static inline const struct cpumask *<API key>(void) { return cpu_rq(smp_processor_id())->rd->span; } #else static inline const struct cpumask *<API key>(void) { return cpu_online_mask; } #endif static inline struct rt_rq *<API key>(struct rt_bandwidth *rt_b, int cpu) { return container_of(rt_b, struct task_group, rt_bandwidth)->rt_rq[cpu]; } static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) { return &rt_rq->tg->rt_bandwidth; } #else /* !<API key> */ static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) { return rt_rq->rt_runtime; } static inline u64 sched_rt_period(struct rt_rq *rt_rq) { return ktime_to_ns(def_rt_bandwidth.rt_period); } typedef struct rt_rq *rt_rq_iter_t; #define for_each_rt_rq(rt_rq, iter, rq) \ for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL) static inline void list_add_leaf_rt_rq(struct rt_rq *rt_rq) { } static inline void list_del_leaf_rt_rq(struct rt_rq *rt_rq) { } #define for_each_leaf_rt_rq(rt_rq, rq) \ for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL) #define <API key>(rt_se) \ for (; rt_se; rt_se = NULL) static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) { return NULL; } static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq) { if (rt_rq->rt_nr_running) resched_task(rq_of_rt_rq(rt_rq)->curr); } static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq) { } static inline int rt_rq_throttled(struct rt_rq *rt_rq) { return rt_rq->rt_throttled; } static inline const struct cpumask *<API key>(void) { return cpu_online_mask; } static inline struct rt_rq *<API key>(struct rt_bandwidth *rt_b, int cpu) { return &cpu_rq(cpu)->rt; } static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) { return &def_rt_bandwidth; } #endif /* <API key> */ #ifdef CONFIG_SMP /* * We ran out of runtime, see if we can borrow some from our neighbours. */ static int do_balance_runtime(struct rt_rq *rt_rq) { struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd; int i, weight, more = 0; u64 rt_period; weight = cpumask_weight(rd->span); raw_spin_lock(&rt_b->rt_runtime_lock); rt_period = ktime_to_ns(rt_b->rt_period); for_each_cpu(i, rd->span) { struct rt_rq *iter = <API key>(rt_b, i); s64 diff; if (iter == rt_rq) continue; raw_spin_lock(&iter->rt_runtime_lock); /* * Either all rqs have inf runtime and there's nothing to steal * or __disable_runtime() below sets a specific rq to inf to * indicate its been disabled and disalow stealing. */ if (iter->rt_runtime == RUNTIME_INF) goto next; /* * From runqueues with spare time, take 1/n part of their * spare time, but no more than our period. */ diff = iter->rt_runtime - iter->rt_time; if (diff > 0) { diff = div_u64((u64)diff, weight); if (rt_rq->rt_runtime + diff > rt_period) diff = rt_period - rt_rq->rt_runtime; iter->rt_runtime -= diff; rt_rq->rt_runtime += diff; more = 1; if (rt_rq->rt_runtime == rt_period) { raw_spin_unlock(&iter->rt_runtime_lock); break; } } next: raw_spin_unlock(&iter->rt_runtime_lock); } raw_spin_unlock(&rt_b->rt_runtime_lock); return more; } /* * Ensure this RQ takes back all the runtime it lend to its neighbours. */ static void __disable_runtime(struct rq *rq) { struct root_domain *rd = rq->rd; rt_rq_iter_t iter; struct rt_rq *rt_rq; if (unlikely(!scheduler_running)) return; for_each_rt_rq(rt_rq, iter, rq) { struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); s64 want; int i; raw_spin_lock(&rt_b->rt_runtime_lock); raw_spin_lock(&rt_rq->rt_runtime_lock); /* * Either we're all inf and nobody needs to borrow, or we're * already disabled and thus have nothing to do, or we have * exactly the right amount of runtime to take out. */ if (rt_rq->rt_runtime == RUNTIME_INF || rt_rq->rt_runtime == rt_b->rt_runtime) goto balanced; raw_spin_unlock(&rt_rq->rt_runtime_lock); /* * Calculate the difference between what we started out with * and what we current have, that's the amount of runtime * we lend and now have to reclaim. */ want = rt_b->rt_runtime - rt_rq->rt_runtime; /* * Greedy reclaim, take back as much as we can. */ for_each_cpu(i, rd->span) { struct rt_rq *iter = <API key>(rt_b, i); s64 diff; /* * Can't reclaim from ourselves or disabled runqueues. */ if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF) continue; raw_spin_lock(&iter->rt_runtime_lock); if (want > 0) { diff = min_t(s64, iter->rt_runtime, want); iter->rt_runtime -= diff; want -= diff; } else { iter->rt_runtime -= want; want -= want; } raw_spin_unlock(&iter->rt_runtime_lock); if (!want) break; } raw_spin_lock(&rt_rq->rt_runtime_lock); /* * We cannot be left wanting - that would mean some runtime * leaked out of the system. */ BUG_ON(want); balanced: /* * Disable all the borrow logic by pretending we have inf * runtime - in which case borrowing doesn't make sense. */ rt_rq->rt_runtime = RUNTIME_INF; rt_rq->rt_throttled = 0; raw_spin_unlock(&rt_rq->rt_runtime_lock); raw_spin_unlock(&rt_b->rt_runtime_lock); } } static void __enable_runtime(struct rq *rq) { rt_rq_iter_t iter; struct rt_rq *rt_rq; if (unlikely(!scheduler_running)) return; /* * Reset each runqueue's bandwidth settings */ for_each_rt_rq(rt_rq, iter, rq) { struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); raw_spin_lock(&rt_b->rt_runtime_lock); raw_spin_lock(&rt_rq->rt_runtime_lock); rt_rq->rt_runtime = rt_b->rt_runtime; rt_rq->rt_time = 0; rt_rq->rt_throttled = 0; raw_spin_unlock(&rt_rq->rt_runtime_lock); raw_spin_unlock(&rt_b->rt_runtime_lock); } } static int balance_runtime(struct rt_rq *rt_rq) { int more = 0; if (!sched_feat(RT_RUNTIME_SHARE)) return more; if (rt_rq->rt_time > rt_rq->rt_runtime) { raw_spin_unlock(&rt_rq->rt_runtime_lock); more = do_balance_runtime(rt_rq); raw_spin_lock(&rt_rq->rt_runtime_lock); } return more; } #else /* !CONFIG_SMP */ static inline int balance_runtime(struct rt_rq *rt_rq) { return 0; } #endif /* CONFIG_SMP */ static int <API key>(struct rt_bandwidth *rt_b, int overrun) { int i, idle = 1, throttled = 0; const struct cpumask *span; span = <API key>(); #ifdef <API key> /* * FIXME: isolated CPUs should really leave the root task group, * whether they are isolcpus or were isolated via cpusets, lest * the timer run on a CPU which does not service all runqueues, * potentially leaving other CPUs indefinitely throttled. If * isolation is really required, the user will turn the throttle * off to kill the perturbations it causes anyway. Meanwhile, * this maintains functionality for boot and/or troubleshooting. */ if (rt_b == &root_task_group.rt_bandwidth) span = cpu_online_mask; #endif for_each_cpu(i, span) { int enqueue = 0; struct rt_rq *rt_rq = <API key>(rt_b, i); struct rq *rq = rq_of_rt_rq(rt_rq); raw_spin_lock(&rq->lock); if (rt_rq->rt_time) { u64 runtime; raw_spin_lock(&rt_rq->rt_runtime_lock); if (rt_rq->rt_throttled) balance_runtime(rt_rq); runtime = rt_rq->rt_runtime; rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime); if (rt_rq->rt_throttled && rt_rq->rt_time < runtime) { rt_rq->rt_throttled = 0; enqueue = 1; /* * Force a clock update if the CPU was idle, * lest wakeup -> unthrottle time accumulate. */ if (rt_rq->rt_nr_running && rq->curr == rq->idle) rq->skip_clock_update = -1; } if (rt_rq->rt_time || rt_rq->rt_nr_running) idle = 0; raw_spin_unlock(&rt_rq->rt_runtime_lock); } else if (rt_rq->rt_nr_running) { idle = 0; if (!rt_rq_throttled(rt_rq)) enqueue = 1; } if (rt_rq->rt_throttled) throttled = 1; if (enqueue) sched_rt_rq_enqueue(rt_rq); raw_spin_unlock(&rq->lock); } if (!throttled && (!<API key>() || rt_b->rt_runtime == RUNTIME_INF)) return 1; return idle; } static inline int rt_se_prio(struct sched_rt_entity *rt_se) { #ifdef <API key> struct rt_rq *rt_rq = group_rt_rq(rt_se); if (rt_rq) return rt_rq->highest_prio.curr; #endif return rt_task_of(rt_se)->prio; } static void <API key>(struct rt_rq *rt_rq) { struct rt_prio_array *array = &rt_rq->active; struct sched_rt_entity *rt_se; char buf[500]; char *pos = buf; char *end = buf + sizeof(buf); int idx; pos += snprintf(pos, sizeof(buf), "sched: RT throttling activated for rt_rq %p (cpu %d)\n", rt_rq, cpu_of(rq_of_rt_rq(rt_rq))); if (bitmap_empty(array->bitmap, MAX_RT_PRIO)) goto out; pos += snprintf(pos, end - pos, "potential CPU hogs:\n"); idx = <API key>(array->bitmap); while (idx < MAX_RT_PRIO) { list_for_each_entry(rt_se, array->queue + idx, run_list) { struct task_struct *p; if (!rt_entity_is_task(rt_se)) continue; p = rt_task_of(rt_se); if (pos < end) pos += snprintf(pos, end - pos, "\t%s (%d)\n", p->comm, p->pid); } idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx + 1); } out: #ifdef <API key> /* * Use pr_err() in the BUG() case since printk_sched() will * not get flushed and deadlock is not a concern. */ pr_err("%s", buf); BUG(); #else printk_deferred("%s", buf); #endif } static int <API key>(struct rt_rq *rt_rq) { u64 runtime = sched_rt_runtime(rt_rq); if (rt_rq->rt_throttled) return rt_rq_throttled(rt_rq); if (runtime >= sched_rt_period(rt_rq)) return 0; balance_runtime(rt_rq); runtime = sched_rt_runtime(rt_rq); if (runtime == RUNTIME_INF) return 0; if (rt_rq->rt_time > runtime) { struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); /* * Don't actually throttle groups that have no runtime assigned * but accrue some time due to boosting. */ if (likely(rt_b->rt_runtime)) { static bool once = false; rt_rq->rt_throttled = 1; if (!once) { once = true; <API key>(rt_rq); } } else { /* * In case we did anyway, make it go away, * replenishment is a joke, since it will replenish us * with exactly 0 ns. */ rt_rq->rt_time = 0; } if (rt_rq_throttled(rt_rq)) { sched_rt_rq_dequeue(rt_rq); return 1; } } return 0; } /* * Update the current task's runtime statistics. Skip current tasks that * are not in our scheduling class. */ static void update_curr_rt(struct rq *rq) { struct task_struct *curr = rq->curr; struct sched_rt_entity *rt_se = &curr->rt; struct rt_rq *rt_rq = rt_rq_of_se(rt_se); u64 delta_exec; if (curr->sched_class != &rt_sched_class) return; delta_exec = rq->clock_task - curr->se.exec_start; if (unlikely((s64)delta_exec <= 0)) return; schedstat_set(curr->se.statistics.exec_max, max(curr->se.statistics.exec_max, delta_exec)); curr->se.sum_exec_runtime += delta_exec; <API key>(curr, delta_exec); curr->se.exec_start = rq->clock_task; cpuacct_charge(curr, delta_exec); sched_rt_avg_update(rq, delta_exec); if (!<API key>()) return; <API key>(rt_se) { rt_rq = rt_rq_of_se(rt_se); if (sched_rt_runtime(rt_rq) != RUNTIME_INF) { raw_spin_lock(&rt_rq->rt_runtime_lock); rt_rq->rt_time += delta_exec; if (<API key>(rt_rq)) resched_task(curr); raw_spin_unlock(&rt_rq->rt_runtime_lock); } } } #if defined CONFIG_SMP static void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) { struct rq *rq = rq_of_rt_rq(rt_rq); #ifdef <API key> /* * Change rq's cpupri only if rt_rq is the top queue. */ if (&rq->rt != rt_rq) return; #endif if (rq->online && prio < prev_prio) cpupri_set(&rq->rd->cpupri, rq->cpu, prio); } static void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) { struct rq *rq = rq_of_rt_rq(rt_rq); #ifdef <API key> /* * Change rq's cpupri only if rt_rq is the top queue. */ if (&rq->rt != rt_rq) return; #endif if (rq->online && rt_rq->highest_prio.curr != prev_prio) cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); } #else /* CONFIG_SMP */ static inline void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {} static inline void dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) {} #endif /* CONFIG_SMP */ #if defined CONFIG_SMP || defined <API key> static void inc_rt_prio(struct rt_rq *rt_rq, int prio) { int prev_prio = rt_rq->highest_prio.curr; if (prio < prev_prio) rt_rq->highest_prio.curr = prio; inc_rt_prio_smp(rt_rq, prio, prev_prio); } static void dec_rt_prio(struct rt_rq *rt_rq, int prio) { int prev_prio = rt_rq->highest_prio.curr; if (rt_rq->rt_nr_running) { WARN_ON(prio < prev_prio); /* * This may have been our highest task, and therefore * we may have some recomputation to do */ if (prio == prev_prio) { struct rt_prio_array *array = &rt_rq->active; rt_rq->highest_prio.curr = <API key>(array->bitmap); } } else rt_rq->highest_prio.curr = MAX_RT_PRIO; dec_rt_prio_smp(rt_rq, prio, prev_prio); } #else static inline void inc_rt_prio(struct rt_rq *rt_rq, int prio) {} static inline void dec_rt_prio(struct rt_rq *rt_rq, int prio) {} #endif /* CONFIG_SMP || <API key> */ #ifdef <API key> static void inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) { if (rt_se_boosted(rt_se)) rt_rq->rt_nr_boosted++; if (rt_rq->tg) start_rt_bandwidth(&rt_rq->tg->rt_bandwidth); } static void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) { if (rt_se_boosted(rt_se)) rt_rq->rt_nr_boosted WARN_ON(!rt_rq->rt_nr_running && rt_rq->rt_nr_boosted); } #else /* <API key> */ static void inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) { start_rt_bandwidth(&def_rt_bandwidth); } static inline void dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) {} #endif /* <API key> */ #ifdef CONFIG_SCHED_HMP static void <API key>(struct rq *rq, struct task_struct *p) { <API key>(&rq->hmp_stats, p); } static void <API key>(struct rq *rq, struct task_struct *p) { <API key>(&rq->hmp_stats, p); } #else /* CONFIG_SCHED_HMP */ static inline void <API key>(struct rq *rq, struct task_struct *p) { } static inline void <API key>(struct rq *rq, struct task_struct *p) { } #endif /* CONFIG_SCHED_HMP */ static inline void inc_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) { int prio = rt_se_prio(rt_se); WARN_ON(!rt_prio(prio)); rt_rq->rt_nr_running++; inc_rt_prio(rt_rq, prio); inc_rt_migration(rt_se, rt_rq); inc_rt_group(rt_se, rt_rq); } static inline void dec_rt_tasks(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) { WARN_ON(!rt_prio(rt_se_prio(rt_se))); WARN_ON(!rt_rq->rt_nr_running); rt_rq->rt_nr_running dec_rt_prio(rt_rq, rt_se_prio(rt_se)); dec_rt_migration(rt_se, rt_rq); dec_rt_group(rt_se, rt_rq); } static void __enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head) { struct rt_rq *rt_rq = rt_rq_of_se(rt_se); struct rt_prio_array *array = &rt_rq->active; struct rt_rq *group_rq = group_rt_rq(rt_se); struct list_head *queue = array->queue + rt_se_prio(rt_se); /* * Don't enqueue the group if its throttled, or when empty. * The latter is a consequence of the former when a child group * get throttled and the current group doesn't have any other * active members. */ if (group_rq && (rt_rq_throttled(group_rq) || !group_rq->rt_nr_running)) return; if (!rt_rq->rt_nr_running) list_add_leaf_rt_rq(rt_rq); if (head) list_add(&rt_se->run_list, queue); else list_add_tail(&rt_se->run_list, queue); __set_bit(rt_se_prio(rt_se), array->bitmap); inc_rt_tasks(rt_se, rt_rq); } static void __dequeue_rt_entity(struct sched_rt_entity *rt_se) { struct rt_rq *rt_rq = rt_rq_of_se(rt_se); struct rt_prio_array *array = &rt_rq->active; list_del_init(&rt_se->run_list); if (list_empty(array->queue + rt_se_prio(rt_se))) __clear_bit(rt_se_prio(rt_se), array->bitmap); dec_rt_tasks(rt_se, rt_rq); if (!rt_rq->rt_nr_running) list_del_leaf_rt_rq(rt_rq); } /* * Because the prio of an upper entry depends on the lower * entries, we must remove entries top - down. */ static void dequeue_rt_stack(struct sched_rt_entity *rt_se) { struct sched_rt_entity *back = NULL; <API key>(rt_se) { rt_se->back = back; back = rt_se; } for (rt_se = back; rt_se; rt_se = rt_se->back) { if (on_rt_rq(rt_se)) __dequeue_rt_entity(rt_se); } } static void enqueue_rt_entity(struct sched_rt_entity *rt_se, bool head) { dequeue_rt_stack(rt_se); <API key>(rt_se) __enqueue_rt_entity(rt_se, head); } static void dequeue_rt_entity(struct sched_rt_entity *rt_se) { dequeue_rt_stack(rt_se); <API key>(rt_se) { struct rt_rq *rt_rq = group_rt_rq(rt_se); if (rt_rq && rt_rq->rt_nr_running) __enqueue_rt_entity(rt_se, false); } } /* * Adding/removing a task to/from a priority array: */ static void enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) { struct sched_rt_entity *rt_se = &p->rt; if (flags & ENQUEUE_WAKEUP) rt_se->timeout = 0; enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD); if (!task_current(rq, p) && p->nr_cpus_allowed > 1) <API key>(rq, p); inc_nr_running(rq); <API key>(rq, p); } static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) { struct sched_rt_entity *rt_se = &p->rt; update_curr_rt(rq); dequeue_rt_entity(rt_se); <API key>(rq, p); dec_nr_running(rq); <API key>(rq, p); } /* * Put task to the head or the end of the run list without the overhead of * dequeue followed by enqueue. */ static void requeue_rt_entity(struct rt_rq *rt_rq, struct sched_rt_entity *rt_se, int head) { if (on_rt_rq(rt_se)) { struct rt_prio_array *array = &rt_rq->active; struct list_head *queue = array->queue + rt_se_prio(rt_se); if (head) list_move(&rt_se->run_list, queue); else list_move_tail(&rt_se->run_list, queue); } } static void requeue_task_rt(struct rq *rq, struct task_struct *p, int head) { struct sched_rt_entity *rt_se = &p->rt; struct rt_rq *rt_rq; <API key>(rt_se) { rt_rq = rt_rq_of_se(rt_se); requeue_rt_entity(rt_rq, rt_se, head); } } static void yield_task_rt(struct rq *rq) { requeue_task_rt(rq, rq->curr, 0); } #ifdef CONFIG_SMP static int find_lowest_rq(struct task_struct *task); static int <API key>(struct task_struct *p, int sd_flag, int flags) { int cpu, target; cpu = task_cpu(p); rcu_read_lock(); target = find_lowest_rq(p); if (target != -1) cpu = target; rcu_read_unlock(); return cpu; } static int select_task_rq_rt(struct task_struct *p, int sd_flag, int flags) { struct task_struct *curr; struct rq *rq; int cpu; cpu = task_cpu(p); if (p->nr_cpus_allowed == 1) goto out; if (sched_enable_hmp) return <API key>(p, sd_flag, flags); /* For anything but wake ups, just return the task_cpu */ if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK) goto out; rq = cpu_rq(cpu); rcu_read_lock(); curr = ACCESS_ONCE(rq->curr); /* unlocked access */ /* * If the current task on @p's runqueue is an RT task, then * try to see if we can wake this RT task up on another * runqueue. Otherwise simply start this RT task * on its current runqueue. * * We want to avoid overloading runqueues. If the woken * task is a higher priority, then it will stay on this CPU * and the lower prio task should be moved to another CPU. * Even though this will probably make the lower prio task * lose its cache, we do not want to bounce a higher task * around just because it gave up its CPU, perhaps for a * lock? * * For equal prio tasks, we just let the scheduler sort it out. * * Otherwise, just let it ride on the affined RQ and the * post-schedule router will push the preempted task away * * This test is optimistic, if we get it wrong the load-balancer * will have to sort it out. */ if (curr && unlikely(rt_task(curr)) && (curr->nr_cpus_allowed < 2 || curr->prio <= p->prio) && (p->nr_cpus_allowed > 1)) { int target = find_lowest_rq(p); if (target != -1) cpu = target; } rcu_read_unlock(); out: return cpu; } static void <API key>(struct rq *rq, struct task_struct *p) { if (rq->curr->nr_cpus_allowed == 1) return; if (p->nr_cpus_allowed != 1 && cpupri_find(&rq->rd->cpupri, p, NULL)) return; if (!cpupri_find(&rq->rd->cpupri, rq->curr, NULL)) return; /* * There appears to be other cpus that can accept * current and none to run 'p', so lets reschedule * to try and push current away: */ requeue_task_rt(rq, p, 1); resched_task(rq->curr); } #endif /* CONFIG_SMP */ /* * Preempt the current task with a newly woken task if needed: */ static void <API key>(struct rq *rq, struct task_struct *p, int flags) { if (p->prio < rq->curr->prio) { resched_task(rq->curr); return; } #ifdef CONFIG_SMP /* * If: * * - the newly woken task is of equal priority to the current task * - the newly woken task is non-migratable while current is migratable * - current will be preempted on the next reschedule * * we should check to see if current can readily move to a different * cpu. If so, we will reschedule to allow the push logic to try * to move current somewhere else, making room for our non-migratable * task. */ if (p->prio == rq->curr->prio && !<API key>(rq->curr)) <API key>(rq, p); #endif } static struct sched_rt_entity *pick_next_rt_entity(struct rq *rq, struct rt_rq *rt_rq) { struct rt_prio_array *array = &rt_rq->active; struct sched_rt_entity *next = NULL; struct list_head *queue; int idx; idx = <API key>(array->bitmap); BUG_ON(idx >= MAX_RT_PRIO); queue = array->queue + idx; next = list_entry(queue->next, struct sched_rt_entity, run_list); return next; } static struct task_struct *_pick_next_task_rt(struct rq *rq) { struct sched_rt_entity *rt_se; struct task_struct *p; struct rt_rq *rt_rq; rt_rq = &rq->rt; if (!rt_rq->rt_nr_running) return NULL; if (rt_rq_throttled(rt_rq)) return NULL; do { rt_se = pick_next_rt_entity(rq, rt_rq); BUG_ON(!rt_se); rt_rq = group_rt_rq(rt_se); } while (rt_rq); /* * Force update of rq->clock_task in case we failed to do so in * put_prev_task. A stale value can cause us to over-charge execution * time to real-time task, that could trigger throttling unnecessarily */ if (rq->skip_clock_update > 0) rq->skip_clock_update = 0; update_rq_clock(rq); p = rt_task_of(rt_se); p->se.exec_start = rq->clock_task; return p; } static struct task_struct *pick_next_task_rt(struct rq *rq) { struct task_struct *p = _pick_next_task_rt(rq); /* The running task is never eligible for pushing */ if (p) <API key>(rq, p); #ifdef CONFIG_SMP /* * We detect this state here so that we can avoid taking the RQ * lock again later if there is no need to push */ rq->post_schedule = has_pushable_tasks(rq); #endif return p; } static void put_prev_task_rt(struct rq *rq, struct task_struct *p) { update_curr_rt(rq); /* * The previous task needs to be made eligible for pushing * if it is still active */ if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1) <API key>(rq, p); } #ifdef CONFIG_SMP /* Only try algorithms three times */ #define RT_MAX_TRIES 3 static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) { if (!task_running(rq, p) && cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) return 1; return 0; } /* Return the second highest RT task, NULL otherwise */ static struct task_struct *<API key>(struct rq *rq, int cpu) { struct task_struct *next = NULL; struct sched_rt_entity *rt_se; struct rt_prio_array *array; struct rt_rq *rt_rq; int idx; for_each_leaf_rt_rq(rt_rq, rq) { array = &rt_rq->active; idx = <API key>(array->bitmap); next_idx: if (idx >= MAX_RT_PRIO) continue; if (next && next->prio <= idx) continue; list_for_each_entry(rt_se, array->queue + idx, run_list) { struct task_struct *p; if (!rt_entity_is_task(rt_se)) continue; p = rt_task_of(rt_se); if (pick_rt_task(rq, p, cpu)) { next = p; break; } } if (!next) { idx = find_next_bit(array->bitmap, MAX_RT_PRIO, idx+1); goto next_idx; } } return next; } static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask); #ifdef CONFIG_SCHED_HMP static int find_lowest_rq_hmp(struct task_struct *task) { struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask); int cpu_cost, min_cost = INT_MAX; int best_cpu = -1; int i; /* Make sure the mask is initialized first */ if (unlikely(!lowest_mask)) return best_cpu; if (task->nr_cpus_allowed == 1) return best_cpu; /* No other targets possible */ if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask)) return best_cpu; /* No targets found */ /* * At this point we have built a mask of cpus representing the * lowest priority tasks in the system. Now we want to elect * the best one based on our affinity and topology. */ /* Skip performance considerations and optimize for power. * Worst case we'll be iterating over all CPUs here. CPU * online mask should be taken care of when constructing * the lowest_mask. */ for_each_cpu(i, lowest_mask) { struct rq *rq = cpu_rq(i); cpu_cost = power_cost_at_freq(i, ACCESS_ONCE(rq->min_freq)); <API key>(rq, idle_cpu(i), mostly_idle_cpu(i), sched_irqload(i), cpu_cost, cpu_temp(i)); if (sched_boost() && capacity(rq) != max_capacity) continue; if (cpu_cost < min_cost && !<API key>(i)) { min_cost = cpu_cost; best_cpu = i; } } return best_cpu; } #else /* CONFIG_SCHED_HMP */ static int find_lowest_rq_hmp(struct task_struct *task) { return -1; } #endif /* CONFIG_SCHED_HMP */ static int find_lowest_rq(struct task_struct *task) { struct sched_domain *sd; struct cpumask *lowest_mask = __get_cpu_var(local_cpu_mask); int this_cpu = smp_processor_id(); int cpu = task_cpu(task); if (sched_enable_hmp) return find_lowest_rq_hmp(task); /* Make sure the mask is initialized first */ if (unlikely(!lowest_mask)) return -1; if (task->nr_cpus_allowed == 1) return -1; /* No other targets possible */ if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask)) return -1; /* No targets found */ /* * At this point we have built a mask of cpus representing the * lowest priority tasks in the system. Now we want to elect * the best one based on our affinity and topology. * * We prioritize the last cpu that the task executed on since * it is most likely cache-hot in that location. */ if (cpumask_test_cpu(cpu, lowest_mask)) return cpu; /* * Otherwise, we consult the sched_domains span maps to figure * out which cpu is logically closest to our hot cache data. */ if (!cpumask_test_cpu(this_cpu, lowest_mask)) this_cpu = -1; /* Skip this_cpu opt if not among lowest */ rcu_read_lock(); for_each_domain(cpu, sd) { if (sd->flags & SD_WAKE_AFFINE) { int best_cpu; /* * "this_cpu" is cheaper to preempt than a * remote processor. */ if (this_cpu != -1 && cpumask_test_cpu(this_cpu, sched_domain_span(sd))) { rcu_read_unlock(); return this_cpu; } best_cpu = cpumask_first_and(lowest_mask, sched_domain_span(sd)); if (best_cpu < nr_cpu_ids) { rcu_read_unlock(); return best_cpu; } } } rcu_read_unlock(); /* * And finally, if there were no matches within the domains * just give the caller *something* to work with from the compatible * locations. */ if (this_cpu != -1) return this_cpu; cpu = cpumask_any(lowest_mask); if (cpu < nr_cpu_ids) return cpu; return -1; } /* Will lock the rq it finds */ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) { struct rq *lowest_rq = NULL; int tries; int cpu; for (tries = 0; tries < RT_MAX_TRIES; tries++) { cpu = find_lowest_rq(task); if ((cpu == -1) || (cpu == rq->cpu)) break; lowest_rq = cpu_rq(cpu); /* if the prio of this runqueue changed, try again */ if (double_lock_balance(rq, lowest_rq)) { /* * We had to unlock the run queue. In * the mean time, task could have * migrated already or had its affinity changed. * Also make sure that it wasn't scheduled on its rq. */ if (unlikely(task_rq(task) != rq || !cpumask_test_cpu(lowest_rq->cpu, tsk_cpus_allowed(task)) || task_running(rq, task) || !task->on_rq)) { <API key>(rq, lowest_rq); lowest_rq = NULL; break; } } /* If this rq is still suitable use it. */ if (lowest_rq->rt.highest_prio.curr > task->prio) break; /* try again */ <API key>(rq, lowest_rq); lowest_rq = NULL; } return lowest_rq; } static struct task_struct *<API key>(struct rq *rq) { struct task_struct *p; if (!has_pushable_tasks(rq)) return NULL; p = plist_first_entry(&rq->rt.pushable_tasks, struct task_struct, pushable_tasks); BUG_ON(rq->cpu != task_cpu(p)); BUG_ON(task_current(rq, p)); BUG_ON(p->nr_cpus_allowed <= 1); BUG_ON(!p->on_rq); BUG_ON(!rt_task(p)); return p; } /* * If the current CPU has more than one RT task, see if the non * running task can migrate over to a CPU that is running a task * of lesser priority. */ static int push_rt_task(struct rq *rq) { struct task_struct *next_task; struct rq *lowest_rq; int ret = 0; if (!rq->rt.overloaded) return 0; next_task = <API key>(rq); if (!next_task) return 0; retry: if (unlikely(next_task == rq->curr)) { WARN_ON(1); return 0; } /* * It's possible that the next_task slipped in of * higher priority than current. If that's the case * just reschedule current. */ if (unlikely(next_task->prio < rq->curr->prio)) { resched_task(rq->curr); return 0; } /* We might release rq lock */ get_task_struct(next_task); /* find_lock_lowest_rq locks the rq if found */ lowest_rq = find_lock_lowest_rq(next_task, rq); if (!lowest_rq) { struct task_struct *task; /* * find_lock_lowest_rq releases rq->lock * so it is possible that next_task has migrated. * * We need to make sure that the task is still on the same * run-queue and is also still the next task eligible for * pushing. */ task = <API key>(rq); if (task_cpu(next_task) == rq->cpu && task == next_task) { /* * The task hasn't migrated, and is still the next * eligible task, but we failed to find a run-queue * to push it to. Do not retry in this case, since * other cpus will pull from us when ready. */ goto out; } if (!task) /* No more tasks, just exit */ goto out; /* * Something has shifted, try again. */ put_task_struct(next_task); next_task = task; goto retry; } deactivate_task(rq, next_task, 0); set_task_cpu(next_task, lowest_rq->cpu); activate_task(lowest_rq, next_task, 0); ret = 1; resched_task(lowest_rq->curr); <API key>(rq, lowest_rq); out: put_task_struct(next_task); return ret; } static void push_rt_tasks(struct rq *rq) { /* push_rt_task will return true if it moved an RT */ while (push_rt_task(rq)) ; } static int pull_rt_task(struct rq *this_rq) { int this_cpu = this_rq->cpu, ret = 0, cpu; struct task_struct *p; struct rq *src_rq; if (likely(!rt_overloaded(this_rq))) return 0; for_each_cpu(cpu, this_rq->rd->rto_mask) { if (this_cpu == cpu) continue; src_rq = cpu_rq(cpu); /* * Don't bother taking the src_rq->lock if the next highest * task is known to be lower-priority than our current task. * This may look racy, but if this value is about to go * logically higher, the src_rq will push this task away. * And if its going logically lower, we do not care */ if (src_rq->rt.highest_prio.next >= this_rq->rt.highest_prio.curr) continue; /* * We can potentially drop this_rq's lock in * double_lock_balance, and another CPU could * alter this_rq */ double_lock_balance(this_rq, src_rq); /* * Are there still pullable RT tasks? */ if (src_rq->rt.rt_nr_running <= 1) goto skip; p = <API key>(src_rq, this_cpu); /* * Do we have an RT task that preempts * the to-be-scheduled task? */ if (p && (p->prio < this_rq->rt.highest_prio.curr)) { WARN_ON(p == src_rq->curr); WARN_ON(!p->on_rq); /* * There's a chance that p is higher in priority * than what's currently running on its cpu. * This is just that p is wakeing up and hasn't * had a chance to schedule. We only pull * p if it is lower in priority than the * current task on the run queue */ if (p->prio < src_rq->curr->prio) goto skip; ret = 1; deactivate_task(src_rq, p, 0); set_task_cpu(p, this_cpu); activate_task(this_rq, p, 0); /* * We continue with the search, just in * case there's an even higher prio task * in another runqueue. (low likelihood * but possible) */ } skip: <API key>(this_rq, src_rq); } return ret; } static void pre_schedule_rt(struct rq *rq, struct task_struct *prev) { /* Try to pull RT tasks here if we lower this rq's prio */ if (rq->rt.highest_prio.curr > prev->prio) pull_rt_task(rq); } static void post_schedule_rt(struct rq *rq) { push_rt_tasks(rq); } /* * If we are not running and we are not going to reschedule soon, we should * try to push tasks away now */ static void task_woken_rt(struct rq *rq, struct task_struct *p) { if (!task_running(rq, p) && !<API key>(rq->curr) && has_pushable_tasks(rq) && p->nr_cpus_allowed > 1 && rt_task(rq->curr) && (rq->curr->nr_cpus_allowed < 2 || rq->curr->prio <= p->prio)) push_rt_tasks(rq); } static void set_cpus_allowed_rt(struct task_struct *p, const struct cpumask *new_mask) { struct rq *rq; int weight; BUG_ON(!rt_task(p)); if (!p->on_rq) return; weight = cpumask_weight(new_mask); /* * Only update if the process changes its state from whether it * can migrate or not. */ if ((p->nr_cpus_allowed > 1) == (weight > 1)) return; rq = task_rq(p); /* * The process used to be able to migrate OR it can now migrate */ if (weight <= 1) { if (!task_current(rq, p)) <API key>(rq, p); BUG_ON(!rq->rt.rt_nr_migratory); rq->rt.rt_nr_migratory } else { if (!task_current(rq, p)) <API key>(rq, p); rq->rt.rt_nr_migratory++; } update_rt_migration(&rq->rt); } /* Assumes rq->lock is held */ static void rq_online_rt(struct rq *rq) { if (rq->rt.overloaded) rt_set_overload(rq); __enable_runtime(rq); cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr); } /* Assumes rq->lock is held */ static void rq_offline_rt(struct rq *rq) { if (rq->rt.overloaded) rt_clear_overload(rq); __disable_runtime(rq); cpupri_set(&rq->rd->cpupri, rq->cpu, CPUPRI_INVALID); } /* * When switch from the rt queue, we bring ourselves to a position * that we might want to pull RT tasks from other runqueues. */ static void switched_from_rt(struct rq *rq, struct task_struct *p) { /* * If there are other RT tasks then we will reschedule * and the scheduling of the other RT tasks will handle * the balancing. But if we are the last RT task * we may need to handle the pulling of RT tasks * now. */ if (!p->on_rq || rq->rt.rt_nr_running) return; if (pull_rt_task(rq)) resched_task(rq->curr); } void init_sched_rt_class(void) { unsigned int i; <API key>(i) { <API key>(&per_cpu(local_cpu_mask, i), GFP_KERNEL, cpu_to_node(i)); } } #endif /* CONFIG_SMP */ /* * When switching a task to RT, we may overload the runqueue * with RT tasks. In this case we try to push them off to * other runqueues. */ static void switched_to_rt(struct rq *rq, struct task_struct *p) { int check_resched = 1; /* * If we are already running, then there's nothing * that needs to be done. But if we are not running * we may need to preempt the current running task. * If that current running task is also an RT task * then see if we can move to another run queue. */ if (p->on_rq && rq->curr != p) { #ifdef CONFIG_SMP if (rq->rt.overloaded && push_rt_task(rq) && /* Don't resched if we changed runqueues */ rq != task_rq(p)) check_resched = 0; #endif /* CONFIG_SMP */ if (check_resched && p->prio < rq->curr->prio) resched_task(rq->curr); } } /* * Priority of the task has changed. This may cause * us to initiate a push or pull. */ static void prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio) { if (!p->on_rq) return; if (rq->curr == p) { #ifdef CONFIG_SMP /* * If our priority decreases while running, we * may need to pull tasks to this runqueue. */ if (oldprio < p->prio) pull_rt_task(rq); /* * If there's a higher priority task waiting to run * then reschedule. Note, the above pull_rt_task * can release the rq lock and p could migrate. * Only reschedule if p is still on the same runqueue. */ if (p->prio > rq->rt.highest_prio.curr && rq->curr == p) resched_task(p); #else /* For UP simply resched on drop of prio */ if (oldprio < p->prio) resched_task(p); #endif /* CONFIG_SMP */ } else { /* * This task is not running, but if it is * greater than the current running task * then reschedule. */ if (p->prio < rq->curr->prio) resched_task(rq->curr); } } static void watchdog(struct rq *rq, struct task_struct *p) { unsigned long soft, hard; /* max may change after cur was read, this will be fixed next tick */ soft = task_rlimit(p, RLIMIT_RTTIME); hard = task_rlimit_max(p, RLIMIT_RTTIME); if (soft != RLIM_INFINITY) { unsigned long next; if (p->rt.watchdog_stamp != jiffies) { p->rt.timeout++; p->rt.watchdog_stamp = jiffies; } next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ); if (p->rt.timeout > next) p->cputime_expires.sched_exp = p->se.sum_exec_runtime; } } static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) { struct sched_rt_entity *rt_se = &p->rt; update_curr_rt(rq); watchdog(rq, p); /* * RR tasks need a special form of timeslice management. * FIFO tasks have no timeslices. */ if (p->policy != SCHED_RR) return; if (--p->rt.time_slice) return; p->rt.time_slice = sched_rr_timeslice; /* * Requeue to the end of queue if we (and all of our ancestors) are the * only element on the queue */ <API key>(rt_se) { if (rt_se->run_list.prev != rt_se->run_list.next) { requeue_task_rt(rq, p, 0); <API key>(p); return; } } } static void set_curr_task_rt(struct rq *rq) { struct task_struct *p = rq->curr; p->se.exec_start = rq->clock_task; /* The running task is never eligible for pushing */ <API key>(rq, p); } static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task) { /* * Time slice is 0 for SCHED_FIFO tasks */ if (task->policy == SCHED_RR) return sched_rr_timeslice; else return 0; } const struct sched_class rt_sched_class = { .next = &fair_sched_class, .enqueue_task = enqueue_task_rt, .dequeue_task = dequeue_task_rt, .yield_task = yield_task_rt, .check_preempt_curr = <API key>, .pick_next_task = pick_next_task_rt, .put_prev_task = put_prev_task_rt, #ifdef CONFIG_SMP .select_task_rq = select_task_rq_rt, .set_cpus_allowed = set_cpus_allowed_rt, .rq_online = rq_online_rt, .rq_offline = rq_offline_rt, .pre_schedule = pre_schedule_rt, .post_schedule = post_schedule_rt, .task_woken = task_woken_rt, .switched_from = switched_from_rt, #endif .set_curr_task = set_curr_task_rt, .task_tick = task_tick_rt, .get_rr_interval = get_rr_interval_rt, .prio_changed = prio_changed_rt, .switched_to = switched_to_rt, #ifdef CONFIG_SCHED_HMP .inc_hmp_sched_stats = <API key>, .dec_hmp_sched_stats = <API key>, #endif }; #ifdef CONFIG_SCHED_DEBUG extern void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq); void print_rt_stats(struct seq_file *m, int cpu) { rt_rq_iter_t iter; struct rt_rq *rt_rq; rcu_read_lock(); for_each_rt_rq(rt_rq, iter, cpu_rq(cpu)) print_rt_rq(m, cpu, rt_rq); rcu_read_unlock(); } #endif /* CONFIG_SCHED_DEBUG */
<?php /** * @file * @ingroup SMWSpecialPage * @ingroup SpecialPage * * A factbox like view on an article, implemented by a special page. * * @author Denny Vrandecic */ /** * A factbox view on one specific article, showing all the Semantic data about it * * @ingroup SMWSpecialPage * @ingroup SpecialPage */ class SMWSpecialBrowse extends SpecialPage { int How many incoming values should be asked for static public $incomingvaluescount = 8; int How many incoming properties should be asked for static public $<API key> = 21; SMWDataValue Topic of this page private $subject = null; Text to be set in the query form private $articletext = ""; bool To display outgoing values? private $showoutgoing = true; bool To display incoming values? private $showincoming = false; int At which incoming property are we currently? private $offset = 0; /** * Constructor */ public function __construct() { global $smwgBrowseShowAll; parent::__construct( 'Browse', '', true, false, 'default', true ); if ( $smwgBrowseShowAll ) { SMWSpecialBrowse::$incomingvaluescount = 21; SMWSpecialBrowse::$<API key> = - 1; } } /** * Main entry point for Special Pages * * @param[in] $query string Given by MediaWiki */ public function execute( $query ) { global $wgRequest, $wgOut, $smwgBrowseShowAll; $this->setHeaders(); // get the GET parameters $this->articletext = $wgRequest->getVal( 'article' ); // no GET parameters? Then try the URL if ( is_null( $this->articletext ) ) { $params = SMWInfolink::decodeParameters( $query, false ); reset( $params ); $this->articletext = current( $params ); } $this->subject = SMWDataValueFactory::newTypeIDValue( '_wpg', $this->articletext ); $offsettext = $wgRequest->getVal( 'offset' ); $this->offset = ( is_null( $offsettext ) ) ? 0 : intval( $offsettext ); $dir = $wgRequest->getVal( 'dir' ); if ( $smwgBrowseShowAll ) { $this->showoutgoing = true; $this->showincoming = true; } if ( $dir === 'both' || $dir === 'in' ) { $this->showincoming = true; } if ( $dir === 'in' ) { $this->showoutgoing = false; } if ( $dir === 'out' ) { $this->showincoming = false; } $wgOut->addHTML( $this->displayBrowse() ); SMWOutputs::commitToOutputPage( $wgOut ); // make sure locally collected output data is pushed to the output! } /** * Create and output HTML including the complete factbox, based on the extracted * parameters in the execute comment. * * @return string A HTML string with the factbox */ private function displayBrowse() { global $wgContLang, $wgOut; $html = "\n"; $leftside = !( $wgContLang->isRTL() ); // For right to left languages, all is mirrored if ( $this->subject->isValid() ) { $html .= $this->displayHead(); if ( $this->showoutgoing ) { $data = smwfGetStore()->getSemanticData( $this->subject->getDataItem() ); $html .= $this->displayData( $data, $leftside ); $html .= $this->displayCenter(); } if ( $this->showincoming ) { list( $indata, $more ) = $this->getInData(); global $<API key>; if ( !$<API key> ) { $leftside = !$leftside; } $html .= $this->displayData( $indata, $leftside, true ); $html .= $this->displayBottom( $more ); } $this->articletext = $this->subject->getWikiValue(); // Add a bit space between the factbox and the query form if ( !$this->including() ) { $html .= "<p> &#160; </p>\n"; } } if ( !$this->including() ) { $html .= $this->queryForm(); } $wgOut->addHTML( $html ); } /** * Creates the HTML table displaying the data of one subject. * * @param[in] $data SMWSemanticData The data to be displayed * @param[in] $left bool Should properties be displayed on the left side? * @param[in] $incoming bool Is this an incoming? Or an outgoing? * * @return A string containing the HTML with the factbox */ private function displayData( SMWSemanticData $data, $left = true, $incoming = false ) { // Some of the CSS classes are different for the left or the right side. // In this case, there is an "i" after the "smwb-". This is set here. $ccsPrefix = $left ? 'smwb-' : 'smwb-i'; $html = "<table class=\"{$ccsPrefix}factbox\" cellpadding=\"0\" cellspacing=\"0\">\n"; $diProperties = $data->getProperties(); $noresult = true; foreach ( $diProperties as $key => $diProperty ) { $dvProperty = SMWDataValueFactory::newDataItemValue( $diProperty, null ); if ( $dvProperty->isVisible() ) { $dvProperty->setCaption( $this->getPropertyLabel( $dvProperty, $incoming ) ); $proptext = $dvProperty->getShortHTMLText( smwfGetLinker() ) . "\n"; } elseif ( $diProperty->getKey() == '_INST' ) { $proptext = smwfGetLinker()->specialLink( 'Categories' ); } elseif ( $diProperty->getKey() == '_REDI' ) { $proptext = smwfGetLinker()->specialLink( 'Listredirects', 'isredirect' ); } else { continue; // skip this line } $head = '<th>' . $proptext . "</th>\n"; $body = "<td>\n"; $values = $data->getPropertyValues( $diProperty ); if ( $incoming && ( count( $values ) >= SMWSpecialBrowse::$incomingvaluescount ) ) { $moreIncoming = true; array_pop( $values ); } else { $moreIncoming = false; } $first = true; foreach ( $values as /* SMWDataItem */ $di ) { if ( $first ) { $first = false; } else { $body .= ', '; } if ( $incoming ) { $dv = SMWDataValueFactory::newDataItemValue( $di, null ); } else { $dv = SMWDataValueFactory::newDataItemValue( $di, $diProperty ); } $body .= "<span class=\"{$ccsPrefix}value\">" . $this->displayValue( $dvProperty, $dv, $incoming ) . "</span>\n"; } if ( $moreIncoming ) { // link to the remaining incoming pages: $body .= Html::element( 'a', array( 'href' => SpecialPage::getSafeTitleFor( 'SearchByProperty' )->getLocalURL( array( 'property' => $dvProperty->getWikiValue(), 'value' => $this->subject->getWikiValue() ) ) ), wfMessage( 'smw_browse_more' )->text() ); } $body .= "</td>\n"; // display row $html .= "<tr class=\"{$ccsPrefix}propvalue\">\n" . ( $left ? ( $head . $body ):( $body . $head ) ) . "</tr>\n"; $noresult = false; } // end foreach properties if ( $noresult ) { $html .= "<tr class=\"smwb-propvalue\"><th> &#160; </th><td><em>" . wfMessage( $incoming ? '<API key>':'<API key>' )->text() . "</em></td></tr>\n"; } $html .= "</table>\n"; return $html; } /** * Displays a value, including all relevant links (browse and search by property) * * @param[in] $property SMWPropertyValue The property this value is linked to the subject with * @param[in] $value SMWDataValue The actual value * @param[in] $incoming bool If this is an incoming or outgoing link * * @return string HTML with the link to the article, browse, and search pages */ private function displayValue( SMWPropertyValue $property, SMWDataValue $dataValue, $incoming ) { $linker = smwfGetLinker(); $html = $dataValue->getLongHTMLText( $linker ); if ( $dataValue->getTypeID() == '_wpg' ) { $html .= "&#160;" . SMWInfolink::newBrowsingLink( '+', $dataValue->getLongWikiText() )->getHTML( $linker ); } elseif ( $incoming && $property->isVisible() ) { $html .= "&#160;" . SMWInfolink::<API key>( '+', $dataValue->getTitle(), $property->getDataItem()->getLabel(), 'smwsearch' )->getHTML( $linker ); } else { $html .= $dataValue->getInfolinkText( SMW_OUTPUT_HTML, $linker ); } return $html; } /** * Displays the subject that is currently being browsed to. * * @return A string containing the HTML with the subject line */ private function displayHead() { global $wgOut; $wgOut->setHTMLTitle( $this->subject->getTitle() ); $html = "<table class=\"smwb-factbox\" cellpadding=\"0\" cellspacing=\"0\">\n" . "<tr class=\"smwb-title\"><td colspan=\"2\">\n" . $this->subject->getLongHTMLText( smwfGetLinker() ) . "\n" . "</td></tr>\n</table>\n"; return $html; } /** * Creates the HTML for the center bar including the links with further navigation options. * * @return string HTMl with the center bar */ private function displayCenter() { return "<a name=\"smw_browse_incoming\"></a>\n" . "<table class=\"smwb-factbox\" cellpadding=\"0\" cellspacing=\"0\">\n" . "<tr class=\"smwb-center\"><td colspan=\"2\">\n" . ( $this->showincoming ? $this->linkHere( wfMessage( '<API key>' )->text(), true, false, 0 ): $this->linkHere( wfMessage( '<API key>' )->text(), true, true, $this->offset ) ) . "&#160;\n" . "</td></tr>\n" . "</table>\n"; } /** * Creates the HTML for the bottom bar including the links with further navigation options. * * @param[in] $more bool Are there more inproperties to be displayed? * @return string HTMl with the bottom bar */ private function displayBottom( $more ) { $html = "<table class=\"smwb-factbox\" cellpadding=\"0\" cellspacing=\"0\">\n" . "<tr class=\"smwb-center\"><td colspan=\"2\">\n"; global $smwgBrowseShowAll; if ( !$smwgBrowseShowAll ) { if ( ( $this->offset > 0 ) || $more ) { $offset = max( $this->offset - SMWSpecialBrowse::$<API key> + 1, 0 ); $html .= ( $this->offset == 0 ) ? wfMessage( 'smw_result_prev' )->text(): $this->linkHere( wfMessage( 'smw_result_prev' )->text(), $this->showoutgoing, true, $offset ); $offset = $this->offset + SMWSpecialBrowse::$<API key> - 1; // @todo FIXME: i18n patchwork. $html .= " &#160;&#160;&#160; <strong>" . wfMessage( 'smw_result_results' )->text() . " " . ( $this->offset + 1 ) . " – " . ( $offset ) . "</strong> &#160;&#160;&#160; "; $html .= $more ? $this->linkHere( wfMessage( 'smw_result_next' )->text(), $this->showoutgoing, true, $offset ):wfMessage( 'smw_result_next' )->text(); } } $html .= "&#160;\n" . "</td></tr>\n" . "</table>\n"; return $html; } /** * Creates the HTML for a link to this page, with some parameters set. * * @param[in] $text string The anchor text for the link * @param[in] $out bool Should the linked to page include outgoing properties? * @param[in] $in bool Should the linked to page include incoming properties? * @param[in] $offset int What is the offset for the incoming properties? * * @return string HTML with the link to this page */ private function linkHere( $text, $out, $in, $offset ) { $frag = ( $text == wfMessage( '<API key>' )->text() ) ? '#smw_browse_incoming' : ''; return Html::element( 'a', array( 'href' => SpecialPage::getSafeTitleFor( 'Browse' )->getLocalURL( array( 'offset' => $offset, 'dir' => $out ? ( $in ? 'both' : 'out' ) : 'in', 'article' => $this->subject->getLongWikiText() ) ) . $frag ), $text ); } /** * Creates a Semantic Data object with the incoming properties instead of the * usual outproperties. * * @return array(SMWSemanticData, bool) The semantic data including all inproperties, and if there are more inproperties left */ private function getInData() { $indata = new SMWSemanticData( $this->subject->getDataItem() ); $options = new SMWRequestOptions(); $options->sort = true; $options->limit = SMWSpecialBrowse::$<API key>; if ( $this->offset > 0 ) $options->offset = $this->offset; $inproperties = smwfGetStore()->getInProperties( $this->subject->getDataItem(), $options ); if ( count( $inproperties ) == SMWSpecialBrowse::$<API key> ) { $more = true; array_pop( $inproperties ); // drop the last one } else { $more = false; } $valoptions = new SMWRequestOptions(); $valoptions->sort = true; $valoptions->limit = SMWSpecialBrowse::$incomingvaluescount; foreach ( $inproperties as $property ) { $values = smwfGetStore()->getPropertySubjects( $property, $this->subject->getDataItem(), $valoptions ); foreach ( $values as $value ) { $indata-><API key>( $property, $value ); } } return array( $indata, $more ); } /** * Figures out the label of the property to be used. For outgoing ones it is just * the text, for incoming ones we try to figure out the inverse one if needed, * either by looking for an explicitly stated one or by creating a default one. * * @param[in] $property SMWPropertyValue The property of interest * @param[in] $incoming bool If it is an incoming property * * @return string The label of the property */ private function getPropertyLabel( SMWPropertyValue $property, $incoming = false ) { global $<API key>; if ( $incoming && $<API key> ) { $oppositeprop = SMWPropertyValue::makeUserProperty( wfMessage( '<API key>' )->text() ); $labelarray = &smwfGetStore()->getPropertyValues( $property->getDataItem()->getDiWikiPage(), $oppositeprop->getDataItem() ); $rv = ( count( $labelarray ) > 0 ) ? $labelarray[0]->getLongWikiText(): wfMessage( '<API key>', $property->getWikiValue() )->text(); } else { $rv = $property->getWikiValue(); } return $this->unbreak( $rv ); } /** * Creates the query form in order to quickly switch to a specific article. * * @return A string containing the HTML for the form */ private function queryForm() { SMWOutputs::requireResource( 'ext.smw.browse' ); $title = SpecialPage::getTitleFor( 'Browse' ); return ' <form name="smwbrowse" action="' . htmlspecialchars( $title->getLocalURL() ) . '" method="get">' . "\n" . ' <input type="hidden" name="title" value="' . $title->getPrefixedText() . '"/>' . wfMessage( 'smw_browse_article' )->text() . "<br />\n" . ' <input type="text" name="article" id="page_input_box" value="' . htmlspecialchars( $this->articletext ) . '" />' . "\n" . ' <input type="submit" value="' . wfMessage( 'smw_browse_go' )->text() . "\"/>\n" . " </form>\n"; } /** * Replace the last two space characters with unbreakable spaces for beautification. * * @param[in] $text string Text to be transformed. Does not need to have spaces * @return string Transformed text */ private function unbreak( $text ) { $nonBreakingSpace = html_entity_decode( '&#160;', ENT_NOQUOTES, 'UTF-8' ); $text = preg_replace( '/[\s]/u', $nonBreakingSpace, $text, - 1, $count ); return $count > 2 ? preg_replace( '/($nonBreakingSpace)/u', ' ', $text, max( 0, $count - 2 ) ):$text; } }
<?php namespace Symfony\Component\Validator\Constraints; use Symfony\Component\Validator\Constraint; use Symfony\Component\Validator\ConstraintValidator; use Symfony\Component\Validator\Exception\<API key>; class CardSchemeValidator extends ConstraintValidator { protected $schemes = [ // American Express card numbers start with 34 or 37 and have 15 digits. 'AMEX' => [ '/^3[47][0-9]{13}$/', ], // China UnionPay cards start with 62 and have between 16 and 19 digits. // Please note that these cards do not follow Luhn Algorithm as a checksum. 'CHINA_UNIONPAY' => [ '/^62[0-9]{14,17}$/', ], // Diners Club card numbers begin with 300 through 305, 36 or 38. All have 14 digits. // There are Diners Club cards that begin with 5 and have 16 digits. // These are a joint venture between Diners Club and MasterCard, and should be processed like a MasterCard. 'DINERS' => [ '/^3(?:0[0-5]|[68][0-9])[0-9]{11}$/', ], // Discover card numbers begin with 6011, 622126 through 622925, 644 through 649 or 65. // All have 16 digits. 'DISCOVER' => [ '/^6011[0-9]{12}$/', '/^64[4-9][0-9]{13}$/', '/^65[0-9]{14}$/', '/^622(12[6-9]|1[3-9][0-9]|[2-8][0-9][0-9]|91[0-9]|92[0-5])[0-9]{10}$/', ], // InstaPayment cards begin with 637 through 639 and have 16 digits. 'INSTAPAYMENT' => [ '/^63[7-9][0-9]{13}$/', ], // JCB cards beginning with 2131 or 1800 have 15 digits. // JCB cards beginning with 35 have 16 digits. 'JCB' => [ '/^(?:2131|1800|35[0-9]{3})[0-9]{11}$/', ], // Laser cards begin with either 6304, 6706, 6709 or 6771 and have between 16 and 19 digits. 'LASER' => [ '/^(6304|670[69]|6771)[0-9]{12,15}$/', ], // Maestro international cards begin with 675900..675999 and have between 12 and 19 digits. // Maestro UK cards begin with either 500000..509999 or 560000..699999 and have between 12 and 19 digits. 'MAESTRO' => [ '/^(6759[0-9]{2})[0-9]{6,13}$/', '/^(50[0-9]{4})[0-9]{6,13}$/', '/^5[6-9][0-9]{10,17}$/', '/^6[0-9]{11,18}$/', ], // All MasterCard numbers start with the numbers 51 through 55. All have 16 digits. // October 2016 MasterCard numbers can also start with 222100 through 272099. 'MASTERCARD' => [ '/^5[1-5][0-9]{14}$/', '/^2(22[1-9][0-9]{12}|2[3-9][0-9]{13}|[3-6][0-9]{14}|7[0-1][0-9]{13}|720[0-9]{12})$/', ], // All Visa card numbers start with a 4 and have a length of 13, 16, or 19 digits. 'VISA' => [ '/^4([0-9]{12}|[0-9]{15}|[0-9]{18})$/', ], ]; /** * Validates a creditcard belongs to a specified scheme. * * @param mixed $value * @param Constraint $constraint */ public function validate($value, Constraint $constraint) { if (!$constraint instanceof CardScheme) { throw new <API key>($constraint, __NAMESPACE__.'\CardScheme'); } if (null === $value || '' === $value) { return; } if (!is_numeric($value)) { $this->context->buildViolation($constraint->message) ->setParameter('{{ value }}', $this->formatValue($value)) ->setCode(CardScheme::NOT_NUMERIC_ERROR) ->addViolation(); return; } $schemes = array_flip((array) $constraint->schemes); $schemeRegexes = array_intersect_key($this->schemes, $schemes); foreach ($schemeRegexes as $regexes) { foreach ($regexes as $regex) { if (preg_match($regex, $value)) { return; } } } $this->context->buildViolation($constraint->message) ->setParameter('{{ value }}', $this->formatValue($value)) ->setCode(CardScheme::<API key>) ->addViolation(); } }
<?php Class <API key> extends AddThis_addjs{ var $jsAfterAdd; function <API key>(){ if (! did_action('admin_init') && ! current_filter('admin_init')) { _doing_it_wrong('<API key>', 'This function should only be called on an admin page load and no earlier the admin_init', 1); return null; } if (apply_filters('<API key>', '__return_true')) { $plugins = get_plugins(); if (empty($this->_atInstalled)) { foreach($plugins as $plugin) { if (substr($plugin['Name'], 0, 7) === 'AddThis') array_push($this->_atInstalled, $plugin['Name']); } } $keys = array_keys($this->_atPlugins); $uninstalled = array_diff( $keys, $this->_atInstalled); if (empty($uninstalled)) return false; // Get rid of our keys, we just want the names which are the keys elsewhere $uninstalled = array_values($uninstalled); $string = __('Want to increase your site traffic? AddThis also has '); $count = count($uninstalled); if ($count == 1){ $string .= __('a plugin for ', 'addthis'); $string .= __( sprintf('<a href="%s" target="_blank">' .$this->_atPlugins[$uninstalled[0]][1] .'</a>', $this->_atPlugins[$uninstalled[0]][0]), 'addthis'); } else { $string . __('plugins for '); for ($i = 0; $i < $count; $i++) { $string .= __( sprintf('<strong><a href="%s" target="_blank" >' .$this->_atPlugins[$uninstalled[$i]][1] .'</a></strong>', $this->_atPlugins[$uninstalled[$i]][0]), 'addthis'); if ($i < ($count - 2)) $string .= ', '; else if ($i == ($count -2)) $string .= ' and '; else if ($i == ($count -1)) $string .= ' plugins available.'; } } return '<p class="addthis_more_promo">' .$string . '</p>'; } } function addAfterScript($newData){ $this->jsAfterAdd .= $newData; } function addAfterToJs(){ if (! empty($this->jsAfterAdd)); $this->jsToAdd .= '<script type="text/javascript">' . $this->jsAfterAdd . '</script>'; } function output_script(){ if ($this->_js_added != true) { $this->wrapJs(); $this->addWidgetToJs(); $this->addAfterToJs(); echo $this->jsToAdd; $this->_js_added = true; } } function <API key>($content){ if ($this->_js_added != true && ! is_admin() && ! is_feed() ) { $this->wrapJs(); $this->addWidgetToJs(); $this->addAfterToJs(); $content = $content . $this->jsToAdd; $this->_js_added = true; } return $content; } }
<?php namespace eZ\Publish\Core\MVC\Symfony\Locale; /** * Interface for locale converters. * eZ Publish uses <ISO639-2/B>-<ISO3166-Alpha2> locale format (mostly, some supported locales being out of this format, e.g. cro-HR). * Symfony uses the standard POSIX locale format (<ISO639-1>_<ISO3166-Alpha2>), which is supported by Intl PHP extension. * * Locale converters are meant to convert in those 2 formats back and forth. */ interface <API key> { /** * Converts a locale in eZ Publish internal format to POSIX format. * Returns null if conversion cannot be made. * * @param string $ezpLocale * * @return string|null */ public function convertToPOSIX($ezpLocale); /** * Converts a locale in POSIX format to eZ Publish internal format. * Returns null if conversion cannot be made. * * @param string $posixLocale * * @return string|null */ public function convertToEz($posixLocale); }
#include <common.h> #include <mpc5xxx.h> #include <pci.h> #include <asm/processor.h> #ifdef CONFIG_VIDEO_SM501 #include <sm501.h> #endif #if defined(CONFIG_MPC5200_DDR) #include "mt46v16m16-75.h" #else #include "mt48lc16m16a2-75.h" #endif #ifdef CONFIG_PS2MULT void ps2mult_early_init(void); #endif #ifndef CFG_RAMBOOT static void sdram_start (int hi_addr) { long hi_addr_bit = hi_addr ? 0x01000000 : 0; /* unlock mode register */ *(vu_long *)MPC5XXX_SDRAM_CTRL = SDRAM_CONTROL | 0x80000000 | hi_addr_bit; __asm__ volatile ("sync"); /* precharge all banks */ *(vu_long *)MPC5XXX_SDRAM_CTRL = SDRAM_CONTROL | 0x80000002 | hi_addr_bit; __asm__ volatile ("sync"); #if SDRAM_DDR /* set mode register: extended mode */ *(vu_long *)MPC5XXX_SDRAM_MODE = SDRAM_EMODE; __asm__ volatile ("sync"); /* set mode register: reset DLL */ *(vu_long *)MPC5XXX_SDRAM_MODE = SDRAM_MODE | 0x04000000; __asm__ volatile ("sync"); #endif /* precharge all banks */ *(vu_long *)MPC5XXX_SDRAM_CTRL = SDRAM_CONTROL | 0x80000002 | hi_addr_bit; __asm__ volatile ("sync"); /* auto refresh */ *(vu_long *)MPC5XXX_SDRAM_CTRL = SDRAM_CONTROL | 0x80000004 | hi_addr_bit; __asm__ volatile ("sync"); /* set mode register */ *(vu_long *)MPC5XXX_SDRAM_MODE = SDRAM_MODE; __asm__ volatile ("sync"); /* normal operation */ *(vu_long *)MPC5XXX_SDRAM_CTRL = SDRAM_CONTROL | hi_addr_bit; __asm__ volatile ("sync"); } #endif /* * ATTENTION: Although partially referenced initdram does NOT make real use * use of CFG_SDRAM_BASE. The code does not work if CFG_SDRAM_BASE * is something else than 0x00000000. */ #if defined(CONFIG_MPC5200) long int initdram (int board_type) { ulong dramsize = 0; ulong dramsize2 = 0; uint svr, pvr; #ifndef CFG_RAMBOOT ulong test1, test2; /* setup SDRAM chip selects */ *(vu_long *)<API key> = 0x0000001c; /* 512MB at 0x0 */ *(vu_long *)<API key> = 0x40000000; /* disabled */ __asm__ volatile ("sync"); /* setup config registers */ *(vu_long *)<API key> = SDRAM_CONFIG1; *(vu_long *)<API key> = SDRAM_CONFIG2; __asm__ volatile ("sync"); #if SDRAM_DDR /* set tap delay */ *(vu_long *)MPC5XXX_CDM_PORCFG = SDRAM_TAPDELAY; __asm__ volatile ("sync"); #endif /* find RAM size using SDRAM CS0 only */ sdram_start(0); test1 = get_ram_size((long *)CFG_SDRAM_BASE, 0x20000000); sdram_start(1); test2 = get_ram_size((long *)CFG_SDRAM_BASE, 0x20000000); if (test1 > test2) { sdram_start(0); dramsize = test1; } else { dramsize = test2; } /* memory smaller than 1MB is impossible */ if (dramsize < (1 << 20)) { dramsize = 0; } /* set SDRAM CS0 size according to the amount of RAM found */ if (dramsize > 0) { *(vu_long *)<API key> = 0x13 + __builtin_ffs(dramsize >> 20) - 1; } else { *(vu_long *)<API key> = 0; /* disabled */ } /* let SDRAM CS1 start right after CS0 */ *(vu_long *)<API key> = dramsize + 0x0000001c; /* 512MB */ /* find RAM size using SDRAM CS1 only */ sdram_start(0); test1 = get_ram_size((long *)(CFG_SDRAM_BASE + dramsize), 0x20000000); sdram_start(1); test2 = get_ram_size((long *)(CFG_SDRAM_BASE + dramsize), 0x20000000); if (test1 > test2) { sdram_start(0); dramsize2 = test1; } else { dramsize2 = test2; } /* memory smaller than 1MB is impossible */ if (dramsize2 < (1 << 20)) { dramsize2 = 0; } /* set SDRAM CS1 size according to the amount of RAM found */ if (dramsize2 > 0) { *(vu_long *)<API key> = dramsize | (0x13 + __builtin_ffs(dramsize2 >> 20) - 1); } else { *(vu_long *)<API key> = dramsize; /* disabled */ } #else /* CFG_RAMBOOT */ /* retrieve size of memory connected to SDRAM CS0 */ dramsize = *(vu_long *)<API key> & 0xFF; if (dramsize >= 0x13) { dramsize = (1 << (dramsize - 0x13)) << 20; } else { dramsize = 0; } /* retrieve size of memory connected to SDRAM CS1 */ dramsize2 = *(vu_long *)<API key> & 0xFF; if (dramsize2 >= 0x13) { dramsize2 = (1 << (dramsize2 - 0x13)) << 20; } else { dramsize2 = 0; } #endif /* CFG_RAMBOOT */ /* * On MPC5200B we need to set the special configuration delay in the * DDR controller. Please refer to Freescale's AN3221 "MPC5200B SDRAM * Initialization and Configuration", 3.3.1 SDelay--MBAR + 0x0190: * * "The SDelay should be written to a value of 0x00000004. It is * required to account for changes caused by normal wafer processing * parameters." */ svr = get_svr(); pvr = get_pvr(); if ((SVR_MJREV(svr) >= 2) && (PVR_MAJ(pvr) == 1) && (PVR_MIN(pvr) == 4)) { *(vu_long *)<API key> = 0x04; __asm__ volatile ("sync"); } #if defined(CONFIG_TQM5200_B) return dramsize + dramsize2; #else return dramsize; #endif /* CONFIG_TQM5200_B */ } #elif defined(CONFIG_MGT5100) long int initdram (int board_type) { ulong dramsize = 0; #ifndef CFG_RAMBOOT ulong test1, test2; /* setup and enable SDRAM chip selects */ *(vu_long *)MPC5XXX_SDRAM_START = 0x00000000; *(vu_long *)MPC5XXX_SDRAM_STOP = 0x0000ffff; *(vu_long *)MPC5XXX_ADDECR |= (1 << 22); /* Enable SDRAM */ __asm__ volatile ("sync"); /* setup config registers */ *(vu_long *)<API key> = SDRAM_CONFIG1; *(vu_long *)<API key> = SDRAM_CONFIG2; /* address select register */ *(vu_long *)<API key> = SDRAM_ADDRSEL; __asm__ volatile ("sync"); /* find RAM size */ sdram_start(0); test1 = get_ram_size((ulong *)CFG_SDRAM_BASE, 0x80000000); sdram_start(1); test2 = get_ram_size((ulong *)CFG_SDRAM_BASE, 0x80000000); if (test1 > test2) { sdram_start(0); dramsize = test1; } else { dramsize = test2; } /* set SDRAM end address according to size */ *(vu_long *)MPC5XXX_SDRAM_STOP = ((dramsize - 1) >> 15); #else /* CFG_RAMBOOT */ /* Retrieve amount of SDRAM available */ dramsize = ((*(vu_long *)MPC5XXX_SDRAM_STOP + 1) << 15); #endif /* CFG_RAMBOOT */ return dramsize; } #else #error Neither CONFIG_MPC5200 or CONFIG_MGT5100 defined #endif int checkboard (void) { #if defined(CONFIG_AEVFIFO) puts ("Board: AEVFIFO\n"); return 0; #endif #if defined(CONFIG_TQM5200S) # define MODULE_NAME "TQM5200S" #else # define MODULE_NAME "TQM5200" #endif #if defined(CONFIG_STK52XX) # define CARRIER_NAME "STK52xx" #elif defined(CONFIG_TB5200) # define CARRIER_NAME "TB5200" #elif defined(CONFIG_CAM5200) # define CARRIER_NAME "Cam5200" #else # error "Unknown carrier board" #endif puts ( "Board: " MODULE_NAME " (TQ-Components GmbH)\n" " on a " CARRIER_NAME " carrier board\n"); return 0; } #undef MODULE_NAME #undef CARRIER_NAME void flash_preinit(void) { /* * Now, when we are in RAM, enable flash write * access for detection process. * Note that CS_BOOT cannot be cleared when * executing in flash. */ #if defined(CONFIG_MGT5100) *(vu_long *)MPC5XXX_ADDECR &= ~(1 << 25); /* disable CS_BOOT */ *(vu_long *)MPC5XXX_ADDECR |= (1 << 16); /* enable CS0 */ #endif *(vu_long *)MPC5XXX_BOOTCS_CFG &= ~0x1; /* clear RO */ } #ifdef CONFIG_PCI static struct pci_controller hose; extern void pci_mpc5xxx_init(struct pci_controller *); void pci_init_board(void) { pci_mpc5xxx_init(&hose); } #endif #if defined (CFG_CMD_IDE) && defined (CONFIG_IDE_RESET) #if defined (CONFIG_MINIFAP) #define <API key> 0x00000040UL #define <API key> 0x00000048UL #define <API key> 0x00000040UL #define <API key> 0x0001000CUL #define <API key> 0x00010004UL #define SM501_GPIO_51 0x00080000UL #else #define GPIO_PSC1_4 0x01000000UL #endif void init_ide_reset (void) { debug ("init_ide_reset\n"); #if defined (CONFIG_MINIFAP) /* Configure GPIO_51 of the SM501 grafic controller as ATA reset */ /* enable GPIO control (in both power modes) */ *(vu_long *) (SM501_MMIO_BASE+<API key>) |= <API key>; *(vu_long *) (SM501_MMIO_BASE+<API key>) |= <API key>; /* configure GPIO51 as output */ *(vu_long *) (SM501_MMIO_BASE+<API key>) |= SM501_GPIO_51; #else /* Configure PSC1_4 as GPIO output for ATA reset */ *(vu_long *) <API key> |= GPIO_PSC1_4; *(vu_long *) MPC5XXX_WU_GPIO_DIR |= GPIO_PSC1_4; #endif } void ide_set_reset (int idereset) { debug ("ide_reset(%d)\n", idereset); #if defined (CONFIG_MINIFAP) if (idereset) { *(vu_long *) (SM501_MMIO_BASE+<API key>) &= ~SM501_GPIO_51; } else { *(vu_long *) (SM501_MMIO_BASE+<API key>) |= SM501_GPIO_51; } #else if (idereset) { *(vu_long *) <API key> &= ~GPIO_PSC1_4; } else { *(vu_long *) <API key> |= GPIO_PSC1_4; } #endif } #endif /* defined (CFG_CMD_IDE) && defined (CONFIG_IDE_RESET) */ #ifdef CONFIG_POST /* * Reads GPIO pin PSC6_3. A keypress is reported, if PSC6_3 is low. If PSC6_3 * is left open, no keypress is detected. */ int <API key>(void) { struct mpc5xxx_gpio *gpio; gpio = (struct mpc5xxx_gpio*) MPC5XXX_GPIO; /* * Configure PSC6_1 and PSC6_3 as GPIO. PSC6 then couldn't be used in * CODEC or UART mode. Consumer IrDA should still be possible. */ gpio->port_config &= ~(0x07000000); gpio->port_config |= 0x03000000; /* Enable GPIO for GPIO_IRDA_1 (IR_USB_CLK pin) = PSC6_3 */ gpio->simple_gpioe |= 0x20000000; /* Configure GPIO_IRDA_1 as input */ gpio->simple_ddr &= ~(0x20000000); return ((gpio->simple_ival & 0x20000000) ? 0 : 1); } #endif #if defined(CONFIG_POST) || defined(CONFIG_LOGBUFFER) void post_word_store (ulong a) { volatile ulong *save_addr = (volatile ulong *)(MPC5XXX_SRAM + <API key>); *save_addr = a; } ulong post_word_load (void) { volatile ulong *save_addr = (volatile ulong *)(MPC5XXX_SRAM + <API key>); return *save_addr; } #endif /* CONFIG_POST || CONFIG_LOGBUFFER*/ #ifdef CONFIG_PS2MULT #ifdef <API key> int board_early_init_r (void) { ps2mult_early_init(); return (0); } #endif #endif /* CONFIG_PS2MULT */ int last_stage_init (void) { /* * auto scan for really existing devices and re-set chip select * configuration. */ u16 save, tmp; int restore; /* * Check for SRAM and SRAM size */ /* save original SRAM content */ save = *(volatile u16 *)CFG_CS2_START; restore = 1; /* write test pattern to SRAM */ *(volatile u16 *)CFG_CS2_START = 0xA5A5; __asm__ volatile ("sync"); /* * Put a different pattern on the data lines: otherwise they may float * long enough to read back what we wrote. */ tmp = *(volatile u16 *)CFG_FLASH_BASE; if (tmp == 0xA5A5) puts ("!! possible error in SRAM detection\n"); if (*(volatile u16 *)CFG_CS2_START != 0xA5A5) { /* no SRAM at all, disable cs */ *(vu_long *)MPC5XXX_ADDECR &= ~(1 << 18); *(vu_long *)MPC5XXX_CS2_START = 0x0000FFFF; *(vu_long *)MPC5XXX_CS2_STOP = 0x0000FFFF; restore = 0; __asm__ volatile ("sync"); } else if (*(volatile u16 *)(CFG_CS2_START + (1<<19)) == 0xA5A5) { /* make sure that we access a mirrored address */ *(volatile u16 *)CFG_CS2_START = 0x1111; __asm__ volatile ("sync"); if (*(volatile u16 *)(CFG_CS2_START + (1<<19)) == 0x1111) { /* SRAM size = 512 kByte */ *(vu_long *)MPC5XXX_CS2_STOP = STOP_REG(CFG_CS2_START, 0x80000); __asm__ volatile ("sync"); puts ("SRAM: 512 kB\n"); } else puts ("!! possible error in SRAM detection\n"); } else { puts ("SRAM: 1 MB\n"); } /* restore origianl SRAM content */ if (restore) { *(volatile u16 *)CFG_CS2_START = save; __asm__ volatile ("sync"); } /* * Check for Grafic Controller */ /* save origianl FB content */ save = *(volatile u16 *)CFG_CS1_START; restore = 1; /* write test pattern to FB memory */ *(volatile u16 *)CFG_CS1_START = 0xA5A5; __asm__ volatile ("sync"); /* * Put a different pattern on the data lines: otherwise they may float * long enough to read back what we wrote. */ tmp = *(volatile u16 *)CFG_FLASH_BASE; if (tmp == 0xA5A5) puts ("!! possible error in grafic controller detection\n"); if (*(volatile u16 *)CFG_CS1_START != 0xA5A5) { /* no grafic controller at all, disable cs */ *(vu_long *)MPC5XXX_ADDECR &= ~(1 << 17); *(vu_long *)MPC5XXX_CS1_START = 0x0000FFFF; *(vu_long *)MPC5XXX_CS1_STOP = 0x0000FFFF; restore = 0; __asm__ volatile ("sync"); } else { puts ("VGA: SMI501 (Voyager) with 8 MB\n"); } /* restore origianl FB content */ if (restore) { *(volatile u16 *)CFG_CS1_START = save; __asm__ volatile ("sync"); } return 0; } #ifdef CONFIG_VIDEO_SM501 #define DISPLAY_WIDTH 640 #define DISPLAY_HEIGHT 480 #ifdef <API key> #error <API key> not supported. #endif /* <API key> */ #ifdef <API key> #error <API key> not supported. #endif /* <API key> */ #ifdef <API key> static const SMI_REGS init_regs [] = { #if 0 /* CRT only */ {0x00004, 0x0}, {0x00048, 0x00021807}, {0x0004C, 0x10090a01}, {0x00054, 0x1}, {0x00040, 0x00021807}, {0x00044, 0x10090a01}, {0x00054, 0x0}, {0x80200, 0x00010000}, {0x80204, 0x0}, {0x80208, 0x0A000A00}, {0x8020C, 0x02fa027f}, {0x80210, 0x004a028b}, {0x80214, 0x020c01df}, {0x80218, 0x000201e9}, {0x80200, 0x00013306}, #else /* panel + CRT */ {0x00004, 0x0}, {0x00048, 0x00021807}, {0x0004C, 0x091a0a01}, {0x00054, 0x1}, {0x00040, 0x00021807}, {0x00044, 0x091a0a01}, {0x00054, 0x0}, {0x80000, 0x0f013106}, {0x80004, 0xc428bb17}, {0x8000C, 0x00000000}, {0x80010, 0x0a000a00}, {0x80014, 0x02800000}, {0x80018, 0x01e00000}, {0x8001C, 0x00000000}, {0x80020, 0x01e00280}, {0x80024, 0x02fa027f}, {0x80028, 0x004a028b}, {0x8002C, 0x020c01df}, {0x80030, 0x000201e9}, {0x80200, 0x00010000}, #endif {0, 0} }; #endif /* <API key> */ #ifdef <API key> /* * Return text to be printed besides the logo. */ void video_get_info_str (int line_number, char *info) { if (line_number == 1) { strcpy (info, " Board: TQM5200 (TQ-Components GmbH)"); #if defined (CONFIG_STK52XX) || defined (CONFIG_TB5200) } else if (line_number == 2) { #if defined (CONFIG_STK52XX) strcpy (info, " on a STK52xx carrier board"); #endif #if defined (CONFIG_TB5200) strcpy (info, " on a TB5200 carrier board"); #endif #endif } else { info [0] = '\0'; } } #endif /* * Returns SM501 register base address. First thing called in the * driver. Checks if SM501 is physically present. */ unsigned int board_video_init (void) { u16 save, tmp; int restore, ret; /* * Check for Grafic Controller */ /* save origianl FB content */ save = *(volatile u16 *)CFG_CS1_START; restore = 1; /* write test pattern to FB memory */ *(volatile u16 *)CFG_CS1_START = 0xA5A5; __asm__ volatile ("sync"); /* * Put a different pattern on the data lines: otherwise they may float * long enough to read back what we wrote. */ tmp = *(volatile u16 *)CFG_FLASH_BASE; if (tmp == 0xA5A5) puts ("!! possible error in grafic controller detection\n"); if (*(volatile u16 *)CFG_CS1_START != 0xA5A5) { /* no grafic controller found */ restore = 0; ret = 0; } else { ret = SM501_MMIO_BASE; } if (restore) { *(volatile u16 *)CFG_CS1_START = save; __asm__ volatile ("sync"); } return ret; } /* * Returns SM501 framebuffer address */ unsigned int board_video_get_fb (void) { return SM501_FB_BASE; } /* * Called after initializing the SM501 and before clearing the screen. */ void <API key> (unsigned int base) { } /* * Return a pointer to the initialization sequence. */ const SMI_REGS *board_get_regs (void) { return init_regs; } int board_get_width (void) { return DISPLAY_WIDTH; } int board_get_height (void) { return DISPLAY_HEIGHT; } #endif /* CONFIG_VIDEO_SM501 */
<?php defined('_JEXEC') or die; ?> <?php if ( $this->params->def( 'show_page_title', 1 ) ) : ?> <div class="componentheading<?php echo $this->params->get( 'pageclass_sfx' ); ?>"> <?php echo $this->escape($this->params->get('page_title')); ?> </div> <?php endif; ?> <form action="index.php?option=com_user&amp;task=remindusername" method="post" class="josForm form-validate"> <table cellpadding="0" cellspacing="0" border="0" width="100%" class="contentpane"> <tr> <td colspan="2" height="40"> <p><?php echo JText::_('<API key>'); ?></p> </td> </tr> <tr> <td height="40"> <label for="email" class="hasTip" title="<?php echo JText::_('<API key>'); ?>::<?php echo JText::_('<API key>'); ?>"><?php echo JText::_('Email Address'); ?>:</label> </td> <td> <input id="email" name="email" type="text" class="required validate-email" /> </td> </tr> </table> <button type="submit" class="validate"><?php echo JText::_('Submit'); ?></button> <?php echo JHTML::_( 'form.token' ); ?> </form>
# Jockey is free software; you can redistribute it and/or modify it # Free Software Foundation; either version 2, or (at your option) any # later version. # Jockey is distributed in the hope that it will be useful, but WITHOUT # for more details. import tick_mark import line_style import pychart_util import error_bar import chart_object import legend import object_set import line_plot_doc import theme from pychart_types import * from types import * default_width = 1.2 line_style_itr = None _keys = { 'data' : (AnyType, None, pychart_util.data_desc), 'label': (StringType, '???', pychart_util.label_desc), 'data_label_offset': (CoordType, (0, 5), """The location of data labels relative to the sample point. Meaningful only when data_label_format != None."""), 'data_label_format': (FormatType, None, """The format string for the label printed beside a sample point. It can be a `printf' style format string, or a two-parameter function that takes the (x, y) values and returns a string. """ + pychart_util.string_desc), 'xcol' : (IntType, 0, pychart_util.xcol_desc), 'ycol': (IntType, 1, pychart_util.ycol_desc), 'y_error_minus_col': (IntType, 2, """The column (within "data") from which the depth of the errorbar is extracted. Meaningful only when error_bar != None. <<error_bar>>"""), 'y_error_plus_col': (IntType, -1, """The column (within "data") from which the height of the errorbar is extracted. Meaningful only when error_bar != None. <<error_bar>>"""), 'y_qerror_minus_col': (IntType, -1, '<<error_bar>>'), 'y_qerror_plus_col': (IntType, -1, '<<error_bar>>'), 'line_style': (line_style.T, lambda: line_style_itr.next(), pychart_util.line_desc, "By default, a style is picked from standard styles round-robin. <<line_style>>"), 'tick_mark': (tick_mark.T, None, pychart_util.tick_mark_desc), 'error_bar': (error_bar.T, None, 'The style of the error bar. <<error_bar>>'), } class T(chart_object.T): __doc__ = line_plot_doc.doc keys = _keys def check_integrity(self): assert chart_object.T.check_integrity(self) ##AUTOMATICALLY GENERATED ##END AUTOMATICALLY GENERATED def get_data_range(self, which): if which == 'X': return pychart_util.get_data_range(self.data, self.xcol) else: return pychart_util.get_data_range(self.data, self.ycol) def get_legend_entry(self): if self.label: line_style = self.line_style if not line_style and self.error_bar: line_style = getattr(self.error_bar, 'line_style', None) or \ getattr(self.error_bar, 'hline_style', None) or \ getattr(self.error_bar, 'vline_style', None) if not line_style: raise Exception, 'Line plot has label, but an empty line style and error bar.' return legend.Entry(line_style=line_style, tick_mark=self.tick_mark, fill_style=None, label=self.label) return None def draw(self, ar, can): # Draw the line clipbox = theme.adjust_bounding_box([ar.loc[0], ar.loc[1], ar.loc[0] + ar.size[0], ar.loc[1] + ar.size[1]]); can.clip(clipbox[0],clipbox[1],clipbox[2],clipbox[3]) if self.line_style: points = [] for pair in self.data: yval = pychart_util.get_sample_val(pair, self.ycol) xval = pair[self.xcol] if None not in (xval, yval): points.append((ar.x_pos(xval), ar.y_pos(yval))) can.lines(self.line_style, points) can.endclip() # Draw tick marks and error bars can.clip(ar.loc[0] - 10, ar.loc[1] - 10, ar.loc[0] + ar.size[0] + 10, ar.loc[1] + ar.size[1] + 10) for pair in self.data: x = pair[self.xcol] y = pychart_util.get_sample_val(pair, self.ycol) if None in (x, y): continue x_pos = ar.x_pos(x) y_pos = ar.y_pos(y) if self.error_bar: plus = pair[self.y_error_plus_col or self.y_error_minus_col] minus = pair[self.y_error_minus_col or self.y_error_plus_col] if self.y_qerror_minus_col or self.y_qerror_plus_col: q_plus = pair[self.y_qerror_plus_col or self.y_qerror_minus_col] q_minus = pair[self.y_qerror_minus_col or self.y_qerror_plus_col] if None not in (minus,plus,q_minus,q_plus): self.error_bar.draw(can, (x_pos, y_pos), ar.y_pos(y - minus), ar.y_pos(y + plus), ar.y_pos(y - q_minus), ar.y_pos(y + q_plus)) else: if None not in (minus,plus): #PDS self.error_bar.draw(can, (x_pos, y_pos), ar.y_pos(y - minus), ar.y_pos(y + plus)) if self.tick_mark: self.tick_mark.draw(can, x_pos, y_pos) if self.data_label_format: can.show(x_pos + self.data_label_offset[0], y_pos + self.data_label_offset[1], '/hC' + pychart_util.apply_format(self.data_label_format, (x, y), 1)) can.endclip() def init(): global line_style_itr line_styles = object_set.T() for org_style in line_style.standards.list(): style = line_style.T(width = default_width, color = org_style.color, dash = org_style.dash) line_styles.add(style) line_style_itr = line_styles.iterate() theme.<API key>(init)
<html lang="en"> <head> <title>ARM-Instruction-Set - Using as</title> <meta http-equiv="Content-Type" content="text/html"> <meta name="description" content="Using as"> <meta name="generator" content="makeinfo 4.13"> <link title="Top" rel="start" href="index.html#Top"> <link rel="up" href="ARM-Syntax.html#ARM-Syntax" title="ARM Syntax"> <link rel="next" href="ARM_002dChars.html#ARM_002dChars" title="ARM-Chars"> <link href="http: <! This file documents the GNU Assembler "as". Copyright (C) 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc. Permission is granted to copy, distribute and/or modify this document under the terms of the GNU Free Documentation License, Version 1.3 or any later version published by the Free Software Foundation; with no Invariant Sections, with no Front-Cover Texts, and with no Back-Cover Texts. A copy of the license is included in the section entitled ``GNU Free Documentation License''. <meta http-equiv="Content-Style-Type" content="text/css"> <style type="text/css"><! pre.display { font-family:inherit } pre.format { font-family:inherit } pre.smalldisplay { font-family:inherit; font-size:smaller } pre.smallformat { font-family:inherit; font-size:smaller } pre.smallexample { font-size:smaller } pre.smalllisp { font-size:smaller } span.sc { font-variant:small-caps } span.roman { font-family:serif; font-weight:normal; } span.sansserif { font-family:sans-serif; font-weight:normal; } --></style> <link rel="stylesheet" type="text/css" href="../cs.css"> </head> <body> <div class="node"> <a name="ARM-Instruction-Set"></a> <a name="<API key>"></a> <p> Next:&nbsp;<a rel="next" accesskey="n" href="ARM_002dChars.html#ARM_002dChars">ARM-Chars</a>, Up:&nbsp;<a rel="up" accesskey="u" href="ARM-Syntax.html#ARM-Syntax">ARM Syntax</a> <hr> </div> <h5 class="subsubsection">9.3.2.1 Instruction Set Syntax</h5> <p>Two slightly different syntaxes are support for ARM and THUMB instructions. The default, <code>divided</code>, uses the old style where ARM and THUMB instructions had their own, separate syntaxes. The new, <code>unified</code> syntax, which can be selected via the <code>.syntax</code> directive, and has the following main features: <dl> <dt>&bull;<dd>Immediate operands do not require a <code>#</code> prefix. <br><dt>&bull;<dd>The <code>IT</code> instruction may appear, and if it does it is validated against subsequent conditional affixes. In ARM mode it does not generate machine code, in THUMB mode it does. <br><dt>&bull;<dd>For ARM instructions the conditional affixes always appear at the end of the instruction. For THUMB instructions conditional affixes can be used, but only inside the scope of an <code>IT</code> instruction. <br><dt>&bull;<dd>All of the instructions new to the V6T2 architecture (and later) are available. (Only a few such instructions can be written in the <code>divided</code> syntax). <br><dt>&bull;<dd>The <code>.N</code> and <code>.W</code> suffixes are recognized and honored. <br><dt>&bull;<dd>All instructions set the flags if and only if they have an <code>s</code> affix. </dl> </body></html>
#ifndef AMDISPLAY_UTILS_H #define AMDISPLAY_UTILS_H #ifdef __cplusplus extern "C" { #endif int <API key>(int *width, int *height); int <API key>(int *width, int *height); /*scale osd mode ,only support x1 x2*/ int <API key>(int scale_wx, int scale_hx); #ifdef __cplusplus } #endif #endif
<?php class FTPException extends Exception { /** * Error Message if no native FTP support is available */ const FTP_SUPPORT_ERROR = 'Die FTP-Funktionen sind auf diesem System nicht verfuegbar!'; /** * Error Message if the given Host does not respond */ const <API key> = 'Der angegebene Host konnte nicht kontaktiert werden!'; /** * Error Message if no SSL-FTP is available and no fallback is used */ const <API key> = 'Die Verbindung via SSL konnte nicht hergestellt werden!'; /** * Error Message if the given login information is not valid */ const <API key> = 'Die Zugangsdaten für die FTP Verbindung sind inkorrekt!'; /** * Error Message if the FTP server OS could not be determined. */ const CONNECT_UNKNOWN_OS = 'Das Betriebssystem des FTP Server konnte nicht identifiziert werden!'; /** * Constructor */ public function __construct( $meldung, $code = 0 ) { parent::__construct( $meldung, $code ); } } ?>
using SmartStore.Web.Framework.Modelling; namespace SmartStore.PayPal.Models { public class <API key> : ModelBase { public <API key>() { } public bool CurrentPageIsBasket { get; set; } public string <API key> { get; set; } } }
<?php namespace app\models; use app\properties\HasProperties; use devgroup\TagDependencyHelper\ActiveRecordHelper; use Yii; use yii\behaviors\AttributeBehavior; use yii\caching\TagDependency; use yii\data\ActiveDataProvider; use yii\db\ActiveRecord; /** * This is the model class for table "property_group". * * @property integer $id * @property integer $object_id * @property string $name * @property integer $sort_order * @property integer $is_internal * @property integer $hidden_group_title */ class PropertyGroup extends ActiveRecord { private static $identity_map = []; private static $groups_by_object_id = []; /** * @inheritdoc */ public function behaviors() { return [ [ 'class' => AttributeBehavior::className(), 'attributes' => [ ActiveRecord::EVENT_BEFORE_INSERT => 'sort_order', ], 'value' => 0, ], [ 'class' => ActiveRecordHelper::className(), ], ]; } /** * @inheritdoc */ public static function tableName() { return '{{%property_group}}'; } /** * @inheritdoc */ public function rules() { return [ [['object_id', 'name'], 'required'], [['object_id', 'sort_order', 'is_internal', 'hidden_group_title'], 'integer'], [['name'], 'string'] ]; } /** * @inheritdoc */ public function attributeLabels() { return [ 'id' => Yii::t('app', 'ID'), 'object_id' => Yii::t('app', 'Object ID'), 'name' => Yii::t('app', 'Name'), 'sort_order' => Yii::t('app', 'Sort Order'), 'is_internal' => Yii::t('app', 'Is Internal'), 'hidden_group_title' => Yii::t('app', 'Hidden Group Title'), ]; } /** * Relation to \app\models\Object * @return \yii\db\ActiveQuery */ public function getObject() { return $this->hasOne(Object::className(), ['id' => 'object_id']); } /** * Search tasks * @param $params * @return ActiveDataProvider */ public function search($params) { /* @var $query \yii\db\ActiveQuery */ $query = self::find(); $dataProvider = new ActiveDataProvider( [ 'query' => $query, 'pagination' => [ 'pageSize' => 10, ], ] ); if (!($this->load($params))) { return $dataProvider; } $query->andFilterWhere(['id' => $this->id]); $query->andFilterWhere(['like', 'name', $this->name]); $query->andFilterWhere(['object_id' => $this->object_id]); $query->andFilterWhere(['is_internal' => $this->is_internal]); $query->andFilterWhere(['hidden_group_title' => $this->hidden_group_title]); return $dataProvider; } public static function findById($id) { if (!isset(static::$identity_map[$id])) { $cacheKey = "PropertyGroup:$id"; if (false === $group = Yii::$app->cache->get($cacheKey)) { if (null !== $group = static::findOne($id)) { Yii::$app->cache->set( $cacheKey, $group, 0, new TagDependency( [ 'tags' => [ ActiveRecordHelper::getObjectTag(static::className(), $id), ], ] ) ); } } static::$identity_map[$id] = $group; } return static::$identity_map[$id]; } /** * Relation to properties * @return \yii\db\ActiveQuery */ public function getProperties() { return $this->hasMany(Property::className(), ['property_group_id' => 'id'])->orderBy('sort_order'); } /** * @param $object_id * @param bool $withProperties * @return PropertyGroup[] */ public static function getForObjectId($object_id, $withProperties = false) { if (null === $object_id) { return []; } if (!isset(static::$groups_by_object_id[$object_id])) { $cacheKey = 'PropertyGroup:objectId:'.$object_id; static::$groups_by_object_id[$object_id] = Yii::$app->cache->get($cacheKey); if (!is_array(static::$groups_by_object_id[$object_id])) { $query = static::find() ->where(['object_id'=>$object_id]) ->orderBy('sort_order'); if ($withProperties === true) { $query = $query->with('properties'); } static::$groups_by_object_id[$object_id] = $query->all(); if (null !== $object = Object::findById($object_id)) { $tags = [ ActiveRecordHelper::getObjectTag($object, $object_id) ]; foreach (static::$groups_by_object_id[$object_id] as $propertyGroup){ $tags[] = ActiveRecordHelper::getObjectTag($propertyGroup, $propertyGroup->id); if ($withProperties === true) { foreach ($propertyGroup->properties as $prop) { if (isset(Property::$<API key>[$propertyGroup->id]) === false) { Property::$<API key>[$propertyGroup->id]=[]; } Property::$<API key>[$propertyGroup->id][] = $prop->id; Property::$identity_map[$prop->id] = $prop; } } } Yii::$app->cache->set( $cacheKey, static::$groups_by_object_id[$object_id], 0, new TagDependency( [ 'tags' => $tags, ] ) ); } } } return static::$groups_by_object_id[$object_id]; } /** * @param int $object_id * @param int $object_model_id * @return null|\yii\db\ActiveRecord[] */ public static function getForModel($object_id, $object_model_id) { $cacheKey = "PropertyGroupBy:$object_id:$object_model_id"; if (false === $groups = Yii::$app->cache->get($cacheKey)) { $group_ids = ObjectPropertyGroup::find() ->select('property_group_id') ->where([ 'object_id' => $object_id, 'object_model_id' => $object_model_id, ])->column(); if (null === $group_ids) { return null; } if (null === $groups = static::find()->where(['in', 'id', $group_ids])->all()) { return null; } if (null !== $object = Object::findById($object_id)) { Yii::$app->cache->set( $cacheKey, $groups, 0, new TagDependency( [ 'tags' => [ ActiveRecordHelper::getObjectTag($object, $object_id), ActiveRecordHelper::getObjectTag($object->object_class, $object_model_id), ], ] ) ); } } return $groups; } public function beforeDelete() { if (!parent::beforeDelete()) { return false; } $properties = Property::findAll(['property_group_id' => $this->id]); foreach ($properties as $prop) { $prop->delete(); } return true; } public function afterDelete() { ObjectPropertyGroup::deleteAll(['property_group_id' => $this->id]); parent::afterDelete(); } /** * @param ActiveRecord|HasProperties $model * @param string $idAttribute * @return bool */ public function appendToObjectModel(ActiveRecord $model, $idAttribute = 'id') { $object = Object::getForClass($model::className()); if (null === $object || !$model->hasAttribute($idAttribute)) { return false; } $link = new ObjectPropertyGroup(); $link->object_id = $object->id; $link->object_model_id = $model->$idAttribute; $link->property_group_id = $this->id; $result = $link->save(); $model-><API key>(); return $result; } } ?>
#ifndef <API key> #define <API key> #include <string> #include "catch_result_type.h" #include "catch_common.h" namespace Catch { class TestCase; class AssertionResult; struct AssertionInfo; struct SectionInfo; struct MessageInfo; class <API key>; struct Counts; struct IResultCapture { virtual ~IResultCapture(); virtual void assertionEnded( AssertionResult const& result ) = 0; virtual bool sectionStarted( SectionInfo const& sectionInfo, Counts& assertions ) = 0; virtual void sectionEnded( SectionInfo const& name, Counts const& assertions, double _durationInSeconds ) = 0; virtual void pushScopedMessage( MessageInfo const& message ) = 0; virtual void popScopedMessage( MessageInfo const& message ) = 0; virtual std::string getCurrentTestName() const = 0; virtual const AssertionResult* getLastResult() const = 0; virtual void <API key>( std::string const& message ) = 0; }; IResultCapture& getResultCapture(); } #endif // <API key>
package org.jnbt; /** * The <code>TAG_Float</code> tag. * @author Graham Edgecombe * */ public final class FloatTag extends Tag { /** * The value. */ private final float value; /** * Creates the tag. * @param name The name. * @param value The value. */ public FloatTag(String name, float value) { super(name); this.value = value; } @Override public Float getValue() { return value; } @Override public String toString() { String name = getName(); String append = ""; if(name != null && !name.equals("")) { append = "(\"" + this.getName() + "\")"; } return "TAG_Float" + append + ": " + value; } }
#ifndef <API key> #define <API key> #include <gnuradio/comedi/sink_s.h> #include <string> #include <comedilib.h> #include <stdexcept> namespace gr { namespace comedi { class sink_s_impl : public sink_s { private: // typedef for pointer to class work method typedef int (sink_s::*work_t)(int noutput_items, <API key> &input_items, gr_vector_void_star &output_items); unsigned int d_sampling_freq; std::string d_device_name; comedi_t *d_dev; int d_subdevice; int d_n_chan; void *d_map; int d_buffer_size; unsigned d_buf_front; unsigned d_buf_back; // random stats int d_nunderuns; // count of underruns void output_error_msg(const char *msg, int err); void bail(const char *msg, int err) throw (std::runtime_error); public: sink_s_impl(int sampling_freq, const std::string device_name); ~sink_s_impl(); bool check_topology(int ninputs, int noutputs); int work(int noutput_items, <API key> &input_items, gr_vector_void_star &output_items); }; } /* namespace comedi */ } /* namespace gr */ #endif /* <API key> */
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ var gTestfile = 'regress-89443.js'; var BUGNUMBER = 89443; var summary = 'Testing this script will compile without stack overflow'; printBugNumber(BUGNUMBER); printStatus (summary); // I don't know what these functions are supposed to be; use dummies - function isPlainHostName() { } function dnsDomainIs() { } // Here's the big function - function FindProxyForURL(url, host) { if (isPlainHostName(host) || dnsDomainIs(host, ".hennepin.lib.mn.us") || dnsDomainIs(host, ".hclib.org") ) return "DIRECT"; else if (isPlainHostName(host) // subscription database access || dnsDomainIs(host, ".asahi.com") || dnsDomainIs(host, ".2facts.com") || dnsDomainIs(host, ".oclc.org") || dnsDomainIs(host, ".collegesource.com") || dnsDomainIs(host, ".cq.com") || dnsDomainIs(host, ".grolier.com") || dnsDomainIs(host, ".groveart.com") || dnsDomainIs(host, ".groveopera.com") || dnsDomainIs(host, ".fsonline.com") || dnsDomainIs(host, ".carl.org") || dnsDomainIs(host, ".newslibrary.com") || dnsDomainIs(host, ".pioneerplanet.com") || dnsDomainIs(host, ".startribune.com") || dnsDomainIs(host, ".poemfinder.com") || dnsDomainIs(host, ".umi.com") || dnsDomainIs(host, ".referenceusa.com") || dnsDomainIs(host, ".sirs.com") || dnsDomainIs(host, ".krmediastream.com") || dnsDomainIs(host, ".gale.com") || dnsDomainIs(host, ".galenet.com") || dnsDomainIs(host, ".galegroup.com") || dnsDomainIs(host, ".facts.com") || dnsDomainIs(host, ".eb.com") || dnsDomainIs(host, ".worldbookonline.com") || dnsDomainIs(host, ".galegroup.com") || dnsDomainIs(host, ".accessscience.com") || dnsDomainIs(host, ".booksinprint.com") || dnsDomainIs(host, ".infolearning.com") || dnsDomainIs(host, ".standardpoor.com") // image servers || dnsDomainIs(host, ".akamaitech.net") || dnsDomainIs(host, ".akamai.net") || dnsDomainIs(host, ".yimg.com") || dnsDomainIs(host, ".imgis.com") || dnsDomainIs(host, ".ibsys.com") // KidsClick-linked kids search engines || dnsDomainIs(host, ".edview.com") || dnsDomainIs(host, ".searchopolis.com") || dnsDomainIs(host, ".onekey.com") || dnsDomainIs(host, ".askjeeves.com") // Non-subscription Reference Tools URLs from the RecWebSites DBData table || dnsDomainIs(host, "www.cnn.com") || dnsDomainIs(host, "www.emulateme.com") || dnsDomainIs(host, "terraserver.microsoft.com") || dnsDomainIs(host, "www.theodora.com") || dnsDomainIs(host, "www.3datlas.com") || dnsDomainIs(host, "www.infoplease.com") || dnsDomainIs(host, "www.switchboard.com") || dnsDomainIs(host, "www.bartleby.com") || dnsDomainIs(host, "www.mn-politics.com") || dnsDomainIs(host, "www.thesaurus.com") || dnsDomainIs(host, "www.usnews.com") || dnsDomainIs(host, "www.petersons.com") || dnsDomainIs(host, "www.collegenet.com") || dnsDomainIs(host, "www.m-w.com") || dnsDomainIs(host, "clever.net") || dnsDomainIs(host, "maps.expedia.com") || dnsDomainIs(host, "www.CollegeEdge.com") || dnsDomainIs(host, "www.homeworkcentral.com") || dnsDomainIs(host, "www.studyweb.com") || dnsDomainIs(host, "www.mnpro.com") // custom URLs for local and other access || dnsDomainIs(host, ".dsdukes.com") || dnsDomainIs(host, ".spsaints.com") || dnsDomainIs(host, ".mnzoo.com") || dnsDomainIs(host, ".realaudio.com") || dnsDomainIs(host, ".co.hennepin.mn.us") || dnsDomainIs(host, ".gov") || dnsDomainIs(host, ".org") || dnsDomainIs(host, ".edu") || dnsDomainIs(host, ".fox29.com") || dnsDomainIs(host, ".wcco.com") || dnsDomainIs(host, ".kstp.com") || dnsDomainIs(host, ".kmsp.com") || dnsDomainIs(host, ".kare11.com") || dnsDomainIs(host, ".macromedia.com") || dnsDomainIs(host, ".shockwave.com") || dnsDomainIs(host, ".wwf.com") || dnsDomainIs(host, ".wwfsuperstars.com") || dnsDomainIs(host, ".summerslam.com") || dnsDomainIs(host, ".yahooligans.com") || dnsDomainIs(host, ".mhoob.com") || dnsDomainIs(host, "www.hmonginternet.com") || dnsDomainIs(host, "www.hmongonline.com") || dnsDomainIs(host, ".yahoo.com") || dnsDomainIs(host, ".pokemon.com") || dnsDomainIs(host, ".bet.com") || dnsDomainIs(host, ".smallworld.com") || dnsDomainIs(host, ".cartoonnetwork.com") || dnsDomainIs(host, ".carmensandiego.com") || dnsDomainIs(host, ".disney.com") || dnsDomainIs(host, ".powerpuffgirls.com") || dnsDomainIs(host, ".aol.com") // Smithsonian || dnsDomainIs(host, "160.111.100.190") // Hotmail || dnsDomainIs(host, ".passport.com") || dnsDomainIs(host, ".hotmail.com") || dnsDomainIs(host, "216.33.236.24") || dnsDomainIs(host, "216.32.182.251") || dnsDomainIs(host, ".hotmail.msn.com") // K12 schools || dnsDomainIs(host, ".k12.al.us") || dnsDomainIs(host, ".k12.ak.us") || dnsDomainIs(host, ".k12.ar.us") || dnsDomainIs(host, ".k12.az.us") || dnsDomainIs(host, ".k12.ca.us") || dnsDomainIs(host, ".k12.co.us") || dnsDomainIs(host, ".k12.ct.us") || dnsDomainIs(host, ".k12.dc.us") || dnsDomainIs(host, ".k12.de.us") || dnsDomainIs(host, ".k12.fl.us") || dnsDomainIs(host, ".k12.ga.us") || dnsDomainIs(host, ".k12.hi.us") || dnsDomainIs(host, ".k12.id.us") || dnsDomainIs(host, ".k12.il.us") || dnsDomainIs(host, ".k12.in.us") || dnsDomainIs(host, ".k12.ia.us") || dnsDomainIs(host, ".k12.ks.us") || dnsDomainIs(host, ".k12.ky.us") || dnsDomainIs(host, ".k12.la.us") || dnsDomainIs(host, ".k12.me.us") || dnsDomainIs(host, ".k12.md.us") || dnsDomainIs(host, ".k12.ma.us") || dnsDomainIs(host, ".k12.mi.us") || dnsDomainIs(host, ".k12.mn.us") || dnsDomainIs(host, ".k12.ms.us") || dnsDomainIs(host, ".k12.mo.us") || dnsDomainIs(host, ".k12.mt.us") || dnsDomainIs(host, ".k12.ne.us") || dnsDomainIs(host, ".k12.nv.us") || dnsDomainIs(host, ".k12.nh.us") || dnsDomainIs(host, ".k12.nj.us") || dnsDomainIs(host, ".k12.nm.us") || dnsDomainIs(host, ".k12.ny.us") || dnsDomainIs(host, ".k12.nc.us") || dnsDomainIs(host, ".k12.nd.us") || dnsDomainIs(host, ".k12.oh.us") || dnsDomainIs(host, ".k12.ok.us") || dnsDomainIs(host, ".k12.or.us") || dnsDomainIs(host, ".k12.pa.us") || dnsDomainIs(host, ".k12.ri.us") || dnsDomainIs(host, ".k12.sc.us") || dnsDomainIs(host, ".k12.sd.us") || dnsDomainIs(host, ".k12.tn.us") || dnsDomainIs(host, ".k12.tx.us") || dnsDomainIs(host, ".k12.ut.us") || dnsDomainIs(host, ".k12.vt.us") || dnsDomainIs(host, ".k12.va.us") || dnsDomainIs(host, ".k12.wa.us") || dnsDomainIs(host, ".k12.wv.us") || dnsDomainIs(host, ".k12.wi.us") || dnsDomainIs(host, ".k12.wy.us") // U.S. Libraries || dnsDomainIs(host, ".lib.al.us") || dnsDomainIs(host, ".lib.ak.us") || dnsDomainIs(host, ".lib.ar.us") || dnsDomainIs(host, ".lib.az.us") || dnsDomainIs(host, ".lib.ca.us") || dnsDomainIs(host, ".lib.co.us") || dnsDomainIs(host, ".lib.ct.us") || dnsDomainIs(host, ".lib.dc.us") || dnsDomainIs(host, ".lib.de.us") || dnsDomainIs(host, ".lib.fl.us") || dnsDomainIs(host, ".lib.ga.us") || dnsDomainIs(host, ".lib.hi.us") || dnsDomainIs(host, ".lib.id.us") || dnsDomainIs(host, ".lib.il.us") || dnsDomainIs(host, ".lib.in.us") || dnsDomainIs(host, ".lib.ia.us") || dnsDomainIs(host, ".lib.ks.us") || dnsDomainIs(host, ".lib.ky.us") || dnsDomainIs(host, ".lib.la.us") || dnsDomainIs(host, ".lib.me.us") || dnsDomainIs(host, ".lib.md.us") || dnsDomainIs(host, ".lib.ma.us") || dnsDomainIs(host, ".lib.mi.us") || dnsDomainIs(host, ".lib.mn.us") || dnsDomainIs(host, ".lib.ms.us") || dnsDomainIs(host, ".lib.mo.us") || dnsDomainIs(host, ".lib.mt.us") || dnsDomainIs(host, ".lib.ne.us") || dnsDomainIs(host, ".lib.nv.us") || dnsDomainIs(host, ".lib.nh.us") || dnsDomainIs(host, ".lib.nj.us") || dnsDomainIs(host, ".lib.nm.us") || dnsDomainIs(host, ".lib.ny.us") || dnsDomainIs(host, ".lib.nc.us") || dnsDomainIs(host, ".lib.nd.us") || dnsDomainIs(host, ".lib.oh.us") || dnsDomainIs(host, ".lib.ok.us") || dnsDomainIs(host, ".lib.or.us") || dnsDomainIs(host, ".lib.pa.us") || dnsDomainIs(host, ".lib.ri.us") || dnsDomainIs(host, ".lib.sc.us") || dnsDomainIs(host, ".lib.sd.us") || dnsDomainIs(host, ".lib.tn.us") || dnsDomainIs(host, ".lib.tx.us") || dnsDomainIs(host, ".lib.ut.us") || dnsDomainIs(host, ".lib.vt.us") || dnsDomainIs(host, ".lib.va.us") || dnsDomainIs(host, ".lib.wa.us") || dnsDomainIs(host, ".lib.wv.us") || dnsDomainIs(host, ".lib.wi.us") || dnsDomainIs(host, ".lib.wy.us") // U.S. Cities || dnsDomainIs(host, ".ci.al.us") || dnsDomainIs(host, ".ci.ak.us") || dnsDomainIs(host, ".ci.ar.us") || dnsDomainIs(host, ".ci.az.us") || dnsDomainIs(host, ".ci.ca.us") || dnsDomainIs(host, ".ci.co.us") || dnsDomainIs(host, ".ci.ct.us") || dnsDomainIs(host, ".ci.dc.us") || dnsDomainIs(host, ".ci.de.us") || dnsDomainIs(host, ".ci.fl.us") || dnsDomainIs(host, ".ci.ga.us") || dnsDomainIs(host, ".ci.hi.us") || dnsDomainIs(host, ".ci.id.us") || dnsDomainIs(host, ".ci.il.us") || dnsDomainIs(host, ".ci.in.us") || dnsDomainIs(host, ".ci.ia.us") || dnsDomainIs(host, ".ci.ks.us") || dnsDomainIs(host, ".ci.ky.us") || dnsDomainIs(host, ".ci.la.us") || dnsDomainIs(host, ".ci.me.us") || dnsDomainIs(host, ".ci.md.us") || dnsDomainIs(host, ".ci.ma.us") || dnsDomainIs(host, ".ci.mi.us") || dnsDomainIs(host, ".ci.mn.us") || dnsDomainIs(host, ".ci.ms.us") || dnsDomainIs(host, ".ci.mo.us") || dnsDomainIs(host, ".ci.mt.us") || dnsDomainIs(host, ".ci.ne.us") || dnsDomainIs(host, ".ci.nv.us") || dnsDomainIs(host, ".ci.nh.us") || dnsDomainIs(host, ".ci.nj.us") || dnsDomainIs(host, ".ci.nm.us") || dnsDomainIs(host, ".ci.ny.us") || dnsDomainIs(host, ".ci.nc.us") || dnsDomainIs(host, ".ci.nd.us") || dnsDomainIs(host, ".ci.oh.us") || dnsDomainIs(host, ".ci.ok.us") || dnsDomainIs(host, ".ci.or.us") || dnsDomainIs(host, ".ci.pa.us") || dnsDomainIs(host, ".ci.ri.us") || dnsDomainIs(host, ".ci.sc.us") || dnsDomainIs(host, ".ci.sd.us") || dnsDomainIs(host, ".ci.tn.us") || dnsDomainIs(host, ".ci.tx.us") || dnsDomainIs(host, ".ci.ut.us") || dnsDomainIs(host, ".ci.vt.us") || dnsDomainIs(host, ".ci.va.us") || dnsDomainIs(host, ".ci.wa.us") || dnsDomainIs(host, ".ci.wv.us") || dnsDomainIs(host, ".ci.wi.us") || dnsDomainIs(host, ".ci.wy.us") // U.S. Counties || dnsDomainIs(host, ".co.al.us") || dnsDomainIs(host, ".co.ak.us") || dnsDomainIs(host, ".co.ar.us") || dnsDomainIs(host, ".co.az.us") || dnsDomainIs(host, ".co.ca.us") || dnsDomainIs(host, ".co.co.us") || dnsDomainIs(host, ".co.ct.us") || dnsDomainIs(host, ".co.dc.us") || dnsDomainIs(host, ".co.de.us") || dnsDomainIs(host, ".co.fl.us") || dnsDomainIs(host, ".co.ga.us") || dnsDomainIs(host, ".co.hi.us") || dnsDomainIs(host, ".co.id.us") || dnsDomainIs(host, ".co.il.us") || dnsDomainIs(host, ".co.in.us") || dnsDomainIs(host, ".co.ia.us") || dnsDomainIs(host, ".co.ks.us") || dnsDomainIs(host, ".co.ky.us") || dnsDomainIs(host, ".co.la.us") || dnsDomainIs(host, ".co.me.us") || dnsDomainIs(host, ".co.md.us") || dnsDomainIs(host, ".co.ma.us") || dnsDomainIs(host, ".co.mi.us") || dnsDomainIs(host, ".co.mn.us") || dnsDomainIs(host, ".co.ms.us") || dnsDomainIs(host, ".co.mo.us") || dnsDomainIs(host, ".co.mt.us") || dnsDomainIs(host, ".co.ne.us") || dnsDomainIs(host, ".co.nv.us") || dnsDomainIs(host, ".co.nh.us") || dnsDomainIs(host, ".co.nj.us") || dnsDomainIs(host, ".co.nm.us") || dnsDomainIs(host, ".co.ny.us") || dnsDomainIs(host, ".co.nc.us") || dnsDomainIs(host, ".co.nd.us") || dnsDomainIs(host, ".co.oh.us") || dnsDomainIs(host, ".co.ok.us") || dnsDomainIs(host, ".co.or.us") || dnsDomainIs(host, ".co.pa.us") || dnsDomainIs(host, ".co.ri.us") || dnsDomainIs(host, ".co.sc.us") || dnsDomainIs(host, ".co.sd.us") || dnsDomainIs(host, ".co.tn.us") || dnsDomainIs(host, ".co.tx.us") || dnsDomainIs(host, ".co.ut.us") || dnsDomainIs(host, ".co.vt.us") || dnsDomainIs(host, ".co.va.us") || dnsDomainIs(host, ".co.wa.us") || dnsDomainIs(host, ".co.wv.us") || dnsDomainIs(host, ".co.wi.us") || dnsDomainIs(host, ".co.wy.us") // U.S. States || dnsDomainIs(host, ".state.al.us") || dnsDomainIs(host, ".state.ak.us") || dnsDomainIs(host, ".state.ar.us") || dnsDomainIs(host, ".state.az.us") || dnsDomainIs(host, ".state.ca.us") || dnsDomainIs(host, ".state.co.us") || dnsDomainIs(host, ".state.ct.us") || dnsDomainIs(host, ".state.dc.us") || dnsDomainIs(host, ".state.de.us") || dnsDomainIs(host, ".state.fl.us") || dnsDomainIs(host, ".state.ga.us") || dnsDomainIs(host, ".state.hi.us") || dnsDomainIs(host, ".state.id.us") || dnsDomainIs(host, ".state.il.us") || dnsDomainIs(host, ".state.in.us") || dnsDomainIs(host, ".state.ia.us") || dnsDomainIs(host, ".state.ks.us") || dnsDomainIs(host, ".state.ky.us") || dnsDomainIs(host, ".state.la.us") || dnsDomainIs(host, ".state.me.us") || dnsDomainIs(host, ".state.md.us") || dnsDomainIs(host, ".state.ma.us") || dnsDomainIs(host, ".state.mi.us") || dnsDomainIs(host, ".state.mn.us") || dnsDomainIs(host, ".state.ms.us") || dnsDomainIs(host, ".state.mo.us") || dnsDomainIs(host, ".state.mt.us") || dnsDomainIs(host, ".state.ne.us") || dnsDomainIs(host, ".state.nv.us") || dnsDomainIs(host, ".state.nh.us") || dnsDomainIs(host, ".state.nj.us") || dnsDomainIs(host, ".state.nm.us") || dnsDomainIs(host, ".state.ny.us") || dnsDomainIs(host, ".state.nc.us") || dnsDomainIs(host, ".state.nd.us") || dnsDomainIs(host, ".state.oh.us") || dnsDomainIs(host, ".state.ok.us") || dnsDomainIs(host, ".state.or.us") || dnsDomainIs(host, ".state.pa.us") || dnsDomainIs(host, ".state.ri.us") || dnsDomainIs(host, ".state.sc.us") || dnsDomainIs(host, ".state.sd.us") || dnsDomainIs(host, ".state.tn.us") || dnsDomainIs(host, ".state.tx.us") || dnsDomainIs(host, ".state.ut.us") || dnsDomainIs(host, ".state.vt.us") || dnsDomainIs(host, ".state.va.us") || dnsDomainIs(host, ".state.wa.us") || dnsDomainIs(host, ".state.wv.us") || dnsDomainIs(host, ".state.wi.us") || dnsDomainIs(host, ".state.wy.us") // KidsClick URLs || dnsDomainIs(host, "12.16.163.163") || dnsDomainIs(host, "128.59.173.136") || dnsDomainIs(host, "165.112.78.61") || dnsDomainIs(host, "216.55.23.140") || dnsDomainIs(host, "63.111.53.150") || dnsDomainIs(host, "64.94.206.8") || dnsDomainIs(host, "abc.go.com") || dnsDomainIs(host, "acmepet.petsmart.com") || dnsDomainIs(host, "adver-net.com") || dnsDomainIs(host, "aint-it-cool-news.com") || dnsDomainIs(host, "akidsheart.com") || dnsDomainIs(host, "alabanza.com") || dnsDomainIs(host, "allerdays.com") || dnsDomainIs(host, "allgame.com") || dnsDomainIs(host, "allowancenet.com") || dnsDomainIs(host, "amish-heartland.com") || dnsDomainIs(host, "ancienthistory.about.com") || dnsDomainIs(host, "animals.about.com") || dnsDomainIs(host, "antenna.nl") || dnsDomainIs(host, "arcweb.sos.state.or.us") || dnsDomainIs(host, "artistmummer.homestead.com") || dnsDomainIs(host, "artists.vh1.com") || dnsDomainIs(host, "arts.lausd.k12.ca.us") || dnsDomainIs(host, "asiatravel.com") || dnsDomainIs(host, "asterius.com") || dnsDomainIs(host, "atlas.gc.ca") || dnsDomainIs(host, "atschool.eduweb.co.uk") || dnsDomainIs(host, "ayya.pd.net") || dnsDomainIs(host, "babelfish.altavista.com") || dnsDomainIs(host, "babylon5.warnerbros.com") || dnsDomainIs(host, "banzai.neosoft.com") || dnsDomainIs(host, "barneyonline.com") || dnsDomainIs(host, "baroque-music.com") || dnsDomainIs(host, "barsoom.msss.com") || dnsDomainIs(host, "baseball-almanac.com") || dnsDomainIs(host, "bcadventure.com") || dnsDomainIs(host, "beadiecritters.hosting4less.com") || dnsDomainIs(host, "beverlyscrafts.com") || dnsDomainIs(host, "biology.about.com") || dnsDomainIs(host, "birding.about.com") || dnsDomainIs(host, "boatsafe.com") || dnsDomainIs(host, "bombpop.com") || dnsDomainIs(host, "boulter.com") || dnsDomainIs(host, "<API key>.com") || dnsDomainIs(host, "buckman.pps.k12.or.us") || dnsDomainIs(host, "buffalobills.com") || dnsDomainIs(host, "bvsd.k12.co.us") || dnsDomainIs(host, "cagle.slate.msn.com") || dnsDomainIs(host, "calc.entisoft.com") || dnsDomainIs(host, "canada.gc.ca") || dnsDomainIs(host, "candleandsoap.about.com") || dnsDomainIs(host, "caselaw.lp.findlaw.com") || dnsDomainIs(host, "catalog.com") || dnsDomainIs(host, "catalog.socialstudies.com") || dnsDomainIs(host, "cavern.com") || dnsDomainIs(host, "cbs.sportsline.com") || dnsDomainIs(host, "cc.matsuyama-u.ac.jp") || dnsDomainIs(host, "celt.net") || dnsDomainIs(host, "cgfa.kelloggcreek.com") || dnsDomainIs(host, "channel4000.com") || dnsDomainIs(host, "chess.delorie.com") || dnsDomainIs(host, "chess.liveonthenet.com") || dnsDomainIs(host, "childfun.com") || dnsDomainIs(host, "christmas.com") || dnsDomainIs(host, "citystar.com") || dnsDomainIs(host, "claim.goldrush.com") || dnsDomainIs(host, "clairerosemaryjane.com") || dnsDomainIs(host, "clevermedia.com") || dnsDomainIs(host, "cobblestonepub.com") || dnsDomainIs(host, "codebrkr.infopages.net") || dnsDomainIs(host, "colitz.com") || dnsDomainIs(host, "collections.ic.gc.ca") || dnsDomainIs(host, "coloquio.com") || dnsDomainIs(host, "come.to") || dnsDomainIs(host, "coombs.anu.edu.au") || dnsDomainIs(host, "crafterscommunity.com") || dnsDomainIs(host, "craftsforkids.about.com") || dnsDomainIs(host, "creativity.net") || dnsDomainIs(host, "cslewis.drzeus.net") || dnsDomainIs(host, "cust.idl.com.au") || dnsDomainIs(host, "cvs.anu.edu.au") || dnsDomainIs(host, "cybersleuth-kids.com") || dnsDomainIs(host, "cybertown.com") || dnsDomainIs(host, "darkfish.com") || dnsDomainIs(host, "datadragon.com") || dnsDomainIs(host, "davesite.com") || dnsDomainIs(host, "dbertens.www.cistron.nl") || dnsDomainIs(host, "detnews.com") || dnsDomainIs(host, "dhr.dos.state.fl.us") || dnsDomainIs(host, "dialspace.dial.pipex.com") || dnsDomainIs(host, "dictionaries.travlang.com") || dnsDomainIs(host, "disney.go.com") || dnsDomainIs(host, "disneyland.disney.go.com") || dnsDomainIs(host, "district.gresham.k12.or.us") || dnsDomainIs(host, "dmarie.com") || dnsDomainIs(host, "dreamwater.com") || dnsDomainIs(host, "duke.fuse.net") || dnsDomainIs(host, "earlyamerica.com") || dnsDomainIs(host, "earthsky.com") || dnsDomainIs(host, "easyweb.easynet.co.uk") || dnsDomainIs(host, "ecards1.bansheeweb.com") || dnsDomainIs(host, "edugreen.teri.res.in") || dnsDomainIs(host, "edwardlear.tripod.com") || dnsDomainIs(host, "eelink.net") || dnsDomainIs(host, "elizabethsings.com") || dnsDomainIs(host, "enature.com") || dnsDomainIs(host, "encarta.msn.com") || dnsDomainIs(host, "endangeredspecie.com") || dnsDomainIs(host, "enterprise.america.com") || dnsDomainIs(host, "ericae.net") || dnsDomainIs(host, "esl.about.com") || dnsDomainIs(host, "eveander.com") || dnsDomainIs(host, "exn.ca") || dnsDomainIs(host, "fallscam.niagara.com") || dnsDomainIs(host, "family.go.com") || dnsDomainIs(host, "family2.go.com") || dnsDomainIs(host, "familyeducation.com") || dnsDomainIs(host, "finditquick.com") || dnsDomainIs(host, "fln-bma.yazigi.com.br") || dnsDomainIs(host, "fln-con.yazigi.com.br") || dnsDomainIs(host, "food.epicurious.com") || dnsDomainIs(host, "forums.sympatico.ca") || dnsDomainIs(host, "fotw.vexillum.com") || dnsDomainIs(host, "fox.nstn.ca") || dnsDomainIs(host, "framingham.com") || dnsDomainIs(host, "freevote.com") || dnsDomainIs(host, "freeweb.pdq.net") || dnsDomainIs(host, "games.yahoo.com") || dnsDomainIs(host, "gardening.sierrahome.com") || dnsDomainIs(host, "gardenofpraise.com") || dnsDomainIs(host, "gcclearn.gcc.cc.va.us") || dnsDomainIs(host, "genealogytoday.com") || dnsDomainIs(host, "genesis.ne.mediaone.net") || dnsDomainIs(host, "geniefind.com") || dnsDomainIs(host, "geography.about.com") || dnsDomainIs(host, "gf.state.wy.us") || dnsDomainIs(host, "gi.grolier.com") || dnsDomainIs(host, "golf.com") || dnsDomainIs(host, "greatseal.com") || dnsDomainIs(host, "guardians.net") || dnsDomainIs(host, "hamlet.hypermart.net") || dnsDomainIs(host, "happypuppy.com") || dnsDomainIs(host, "harcourt.fsc.follett.com") || dnsDomainIs(host, "haringkids.com") || dnsDomainIs(host, "harrietmaysavitz.com") || dnsDomainIs(host, "harrypotter.warnerbros.com") || dnsDomainIs(host, "hca.gilead.org.il") || dnsDomainIs(host, "header.future.easyspace.com") || dnsDomainIs(host, "historymedren.about.com") || dnsDomainIs(host, "home.att.net") || dnsDomainIs(host, "home.austin.rr.com") || dnsDomainIs(host, "home.capu.net") || dnsDomainIs(host, "home.cfl.rr.com") || dnsDomainIs(host, "home.clara.net") || dnsDomainIs(host, "home.clear.net.nz") || dnsDomainIs(host, "home.earthlink.net") || dnsDomainIs(host, "home.eznet.net") || dnsDomainIs(host, "home.flash.net") || dnsDomainIs(host, "home.hiwaay.net") || dnsDomainIs(host, "home.hkstar.com") || dnsDomainIs(host, "home.ici.net") || dnsDomainIs(host, "home.inreach.com") || dnsDomainIs(host, "home.interlynx.net") || dnsDomainIs(host, "home.istar.ca") || dnsDomainIs(host, "home.mira.net") || dnsDomainIs(host, "home.nycap.rr.com") || dnsDomainIs(host, "home.online.no") || dnsDomainIs(host, "home.pb.net") || dnsDomainIs(host, "home2.pacific.net.sg") || dnsDomainIs(host, "homearts.com") || dnsDomainIs(host, "homepage.mac.com") || dnsDomainIs(host, "hometown.aol.com") || dnsDomainIs(host, "homiliesbyemail.com") || dnsDomainIs(host, "hotei.fix.co.jp") || dnsDomainIs(host, "hotwired.lycos.com") || dnsDomainIs(host, "hp.vector.co.jp") || dnsDomainIs(host, "hum.amu.edu.pl") || dnsDomainIs(host, "i-cias.com") || dnsDomainIs(host, "icatapults.freeservers.com") || dnsDomainIs(host, "ind.cioe.com") || dnsDomainIs(host, "info.ex.ac.uk") || dnsDomainIs(host, "infocan.gc.ca") || dnsDomainIs(host, "infoservice.gc.ca") || dnsDomainIs(host, "interoz.com") || dnsDomainIs(host, "ireland.iol.ie") || dnsDomainIs(host, "is.dal.ca") || dnsDomainIs(host, "itss.raytheon.com") || dnsDomainIs(host, "iul.com") || dnsDomainIs(host, "jameswhitcombriley.com") || dnsDomainIs(host, "jellieszone.com") || dnsDomainIs(host, "jordan.sportsline.com") || dnsDomainIs(host, "judyanddavid.com") || dnsDomainIs(host, "jurai.murdoch.edu.au") || dnsDomainIs(host, "just.about.com") || dnsDomainIs(host, "kayleigh.tierranet.com") || dnsDomainIs(host, "kcwingwalker.tripod.com") || dnsDomainIs(host, "kidexchange.about.com") || dnsDomainIs(host, "kids-world.colgatepalmolive.com") || dnsDomainIs(host, "kids.mysterynet.com") || dnsDomainIs(host, "kids.ot.com") || dnsDomainIs(host, "kidsartscrafts.about.com") || dnsDomainIs(host, "kidsastronomy.about.com") || dnsDomainIs(host, "kidscience.about.com") || dnsDomainIs(host, "kidscience.miningco.com") || dnsDomainIs(host, "kidscollecting.about.com") || dnsDomainIs(host, "kidsfun.co.uk") || dnsDomainIs(host, "kidsinternet.about.com") || dnsDomainIs(host, "kidslangarts.about.com") || dnsDomainIs(host, "kidspenpals.about.com") || dnsDomainIs(host, "kitecast.com") || dnsDomainIs(host, "knight.city.ba.k12.md.us") || dnsDomainIs(host, "kodak.com") || dnsDomainIs(host, "kwanzaa4kids.homestead.com") || dnsDomainIs(host, "lagos.africaonline.com") || dnsDomainIs(host, "lancearmstrong.com") || dnsDomainIs(host, "landru.i-link-2.net") || dnsDomainIs(host, "lang.nagoya-u.ac.jp") || dnsDomainIs(host, "lascala.milano.it") || dnsDomainIs(host, "latinoculture.about.com") || dnsDomainIs(host, "litcal.yasuda-u.ac.jp") || dnsDomainIs(host, "littlebit.com") || dnsDomainIs(host, "live.edventures.com") || dnsDomainIs(host, "look.net") || dnsDomainIs(host, "lycoskids.infoplease.com") || dnsDomainIs(host, "lynx.uio.no") || dnsDomainIs(host, "macdict.dict.mq.edu.au") || dnsDomainIs(host, "maori.culture.co.nz") || dnsDomainIs(host, "marktwain.about.com") || dnsDomainIs(host, "marktwain.miningco.com") || dnsDomainIs(host, "mars2030.net") || dnsDomainIs(host, "martin.parasitology.mcgill.ca") || dnsDomainIs(host, "martinlutherking.8m.com") || dnsDomainIs(host, "mastercollector.com") || dnsDomainIs(host, "mathcentral.uregina.ca") || dnsDomainIs(host, "members.aol.com") || dnsDomainIs(host, "members.carol.net") || dnsDomainIs(host, "members.cland.net") || dnsDomainIs(host, "members.cruzio.com") || dnsDomainIs(host, "members.easyspace.com") || dnsDomainIs(host, "members.eisa.net.au") || dnsDomainIs(host, "members.home.net") || dnsDomainIs(host, "members.iinet.net.au") || dnsDomainIs(host, "members.nbci.com") || dnsDomainIs(host, "members.ozemail.com.au") || dnsDomainIs(host, "members.surfsouth.com") || dnsDomainIs(host, "members.theglobe.com") || dnsDomainIs(host, "members.tripod.com") || dnsDomainIs(host, "mexplaza.udg.mx") || dnsDomainIs(host, "mgfx.com") || dnsDomainIs(host, "microimg.com") || dnsDomainIs(host, "midusa.net") || dnsDomainIs(host, "mildan.com") || dnsDomainIs(host, "millennianet.com") || dnsDomainIs(host, "mindbreakers.e-fun.nu") || dnsDomainIs(host, "missjanet.xs4all.nl") || dnsDomainIs(host, "mistral.culture.fr") || dnsDomainIs(host, "mobileation.com") || dnsDomainIs(host, "mrshowbiz.go.com") || dnsDomainIs(host, "ms.simplenet.com") || dnsDomainIs(host, "museum.gov.ns.ca") || dnsDomainIs(host, "music.excite.com") || dnsDomainIs(host, "musicfinder.yahoo.com") || dnsDomainIs(host, "my.freeway.net") || dnsDomainIs(host, "mytrains.com") || dnsDomainIs(host, "nativeauthors.com") || dnsDomainIs(host, "nba.com") || dnsDomainIs(host, "nch.ari.net") || dnsDomainIs(host, "neonpeach.tripod.com") || dnsDomainIs(host, "net.indra.com") || dnsDomainIs(host, "ngeorgia.com") || dnsDomainIs(host, "ngp.ngpc.state.ne.us") || dnsDomainIs(host, "nhd.heinle.com") || dnsDomainIs(host, "nick.com") || dnsDomainIs(host, "normandy.eb.com") || dnsDomainIs(host, "northshore.shore.net") || dnsDomainIs(host, "now2000.com") || dnsDomainIs(host, "npc.nunavut.ca") || dnsDomainIs(host, "ns2.carib-link.net") || dnsDomainIs(host, "ntl.sympatico.ca") || dnsDomainIs(host, "oceanographer.navy.mil") || dnsDomainIs(host, "oddens.geog.uu.nl") || dnsDomainIs(host, "officialcitysites.com") || dnsDomainIs(host, "oneida-nation.net") || dnsDomainIs(host, "onlinegeorgia.com") || dnsDomainIs(host, "originator_2.tripod.com") || dnsDomainIs(host, "ortech-engr.com") || dnsDomainIs(host, "osage.voorhees.k12.nj.us") || dnsDomainIs(host, "osiris.sund.ac.uk") || dnsDomainIs(host, "ourworld.compuserve.com") || dnsDomainIs(host, "outdoorphoto.com") || dnsDomainIs(host, "pages.map.com") || dnsDomainIs(host, "pages.prodigy.com") || dnsDomainIs(host, "pages.prodigy.net") || dnsDomainIs(host, "pages.tca.net") || dnsDomainIs(host, "parcsafari.qc.ca") || dnsDomainIs(host, "parenthoodweb.com") || dnsDomainIs(host, "pathfinder.com") || dnsDomainIs(host, "people.clarityconnect.com") || dnsDomainIs(host, "people.enternet.com.au") || dnsDomainIs(host, "people.ne.mediaone.net") || dnsDomainIs(host, "phonics.jazzles.com") || dnsDomainIs(host, "pibburns.com") || dnsDomainIs(host, "pilgrims.net") || dnsDomainIs(host, "pinenet.com") || dnsDomainIs(host, "place.scholastic.com") || dnsDomainIs(host, "playground.kodak.com") || dnsDomainIs(host, "politicalgraveyard.com") || dnsDomainIs(host, "polk.ga.net") || dnsDomainIs(host, "pompstory.home.mindspring.com") || dnsDomainIs(host, "popularmechanics.com") || dnsDomainIs(host, "projects.edtech.sandi.net") || dnsDomainIs(host, "psyche.usno.navy.mil") || dnsDomainIs(host, "pubweb.parc.xerox.com") || dnsDomainIs(host, "puzzlemaker.school.discovery.com") || dnsDomainIs(host, "quest.classroom.com") || dnsDomainIs(host, "quilting.about.com") || dnsDomainIs(host, "rabbitmoon.home.mindspring.com") || dnsDomainIs(host, "radio.cbc.ca") || dnsDomainIs(host, "rats2u.com") || dnsDomainIs(host, "rbcm1.rbcm.gov.bc.ca") || dnsDomainIs(host, "readplay.com") || dnsDomainIs(host, "recipes4children.homestead.com") || dnsDomainIs(host, "redsox.com") || dnsDomainIs(host, "renaissance.district96.k12.il.us") || dnsDomainIs(host, "rhyme.lycos.com") || dnsDomainIs(host, "rhythmweb.com") || dnsDomainIs(host, "riverresource.com") || dnsDomainIs(host, "rockhoundingar.com") || dnsDomainIs(host, "rockies.mlb.com") || dnsDomainIs(host, "rosecity.net") || dnsDomainIs(host, "rr-vs.informatik.uni-ulm.de") || dnsDomainIs(host, "rubens.anu.edu.au") || dnsDomainIs(host, "rummelplatz.uni-mannheim.de") || dnsDomainIs(host, "sandbox.xerox.com") || dnsDomainIs(host, "sarah.fredart.com") || dnsDomainIs(host, "schmidel.com") || dnsDomainIs(host, "scholastic.com") || dnsDomainIs(host, "school.discovery.com") || dnsDomainIs(host, "schoolcentral.com") || dnsDomainIs(host, "seattletimes.nwsource.com") || dnsDomainIs(host, "sericulum.com") || dnsDomainIs(host, "sf.airforce.com") || dnsDomainIs(host, "shop.usps.com") || dnsDomainIs(host, "showcase.netins.net") || dnsDomainIs(host, "sikids.com") || dnsDomainIs(host, "sites.huji.ac.il") || dnsDomainIs(host, "sjliving.com") || dnsDomainIs(host, "skullduggery.com") || dnsDomainIs(host, "skyways.lib.ks.us") || dnsDomainIs(host, "snowdaymovie.nick.com") || dnsDomainIs(host, "sosa21.hypermart.net") || dnsDomainIs(host, "soundamerica.com") || dnsDomainIs(host, "spaceboy.nasda.go.jp") || dnsDomainIs(host, "sports.nfl.com") || dnsDomainIs(host, "sportsillustrated.cnn.com") || dnsDomainIs(host, "starwars.hasbro.com") || dnsDomainIs(host, "statelibrary.dcr.state.nc.us") || dnsDomainIs(host, "streetplay.com") || dnsDomainIs(host, "sts.gsc.nrcan.gc.ca") || dnsDomainIs(host, "sunniebunniezz.com") || dnsDomainIs(host, "sunsite.nus.edu.sg") || dnsDomainIs(host, "sunsite.sut.ac.jp") || dnsDomainIs(host, "superm.bart.nl") || dnsDomainIs(host, "surf.to") || dnsDomainIs(host, "svinet2.fs.fed.us") || dnsDomainIs(host, "swiminfo.com") || dnsDomainIs(host, "tabletennis.about.com") || dnsDomainIs(host, "teacher.scholastic.com") || dnsDomainIs(host, "theforce.net") || dnsDomainIs(host, "thejessicas.homestead.com") || dnsDomainIs(host, "themes.editthispage.com") || dnsDomainIs(host, "theory.uwinnipeg.ca") || dnsDomainIs(host, "theshadowlands.net") || dnsDomainIs(host, "thinks.com") || dnsDomainIs(host, "thryomanes.tripod.com") || dnsDomainIs(host, "time_zone.tripod.com") || dnsDomainIs(host, "titania.cobuild.collins.co.uk") || dnsDomainIs(host, "torre.duomo.pisa.it") || dnsDomainIs(host, "touregypt.net") || dnsDomainIs(host, "toycollecting.about.com") || dnsDomainIs(host, "trace.ntu.ac.uk") || dnsDomainIs(host, "travelwithkids.about.com") || dnsDomainIs(host, "tukids.tucows.com") || dnsDomainIs(host, "tv.yahoo.com") || dnsDomainIs(host, "tycho.usno.navy.mil") || dnsDomainIs(host, "ubl.artistdirect.com") || dnsDomainIs(host, "uk-pages.net") || dnsDomainIs(host, "ukraine.uazone.net") || dnsDomainIs(host, "unmuseum.mus.pa.us") || dnsDomainIs(host, "us.imdb.com") || dnsDomainIs(host, "userpage.chemie.fu-berlin.de") || dnsDomainIs(host, "userpage.fu-berlin.de") || dnsDomainIs(host, "userpages.aug.com") || dnsDomainIs(host, "users.aol.com") || dnsDomainIs(host, "users.bigpond.net.au") || dnsDomainIs(host, "users.breathemail.net") || dnsDomainIs(host, "users.erols.com") || dnsDomainIs(host, "users.imag.net") || dnsDomainIs(host, "users.inetw.net") || dnsDomainIs(host, "users.massed.net") || dnsDomainIs(host, "users.skynet.be") || dnsDomainIs(host, "users.uniserve.com") || dnsDomainIs(host, "venus.spaceports.com") || dnsDomainIs(host, "vgstrategies.about.com") || dnsDomainIs(host, "victorian.fortunecity.com") || dnsDomainIs(host, "vilenski.com") || dnsDomainIs(host, "village.infoweb.ne.jp") || dnsDomainIs(host, "virtual.finland.fi") || dnsDomainIs(host, "vrml.fornax.hu") || dnsDomainIs(host, "vvv.com") || dnsDomainIs(host, "w1.xrefer.com") || dnsDomainIs(host, "w3.one.net") || dnsDomainIs(host, "w3.rz-berlin.mpg.de") || dnsDomainIs(host, "w3.trib.com") || dnsDomainIs(host, "wallofsound.go.com") || dnsDomainIs(host, "web.aimnet.com") || dnsDomainIs(host, "web.ccsd.k12.wy.us") || dnsDomainIs(host, "web.cs.ualberta.ca") || dnsDomainIs(host, "web.idirect.com") || dnsDomainIs(host, "web.kyoto-inet.or.jp") || dnsDomainIs(host, "web.macam98.ac.il") || dnsDomainIs(host, "web.massvacation.com") || dnsDomainIs(host, "web.one.net.au") || dnsDomainIs(host, "web.qx.net") || dnsDomainIs(host, "web.uvic.ca") || dnsDomainIs(host, "web2.airmail.net") || dnsDomainIs(host, "webcoast.com") || dnsDomainIs(host, "webgames.kalisto.com") || dnsDomainIs(host, "webhome.idirect.com") || dnsDomainIs(host, "webpages.homestead.com") || dnsDomainIs(host, "webrum.uni-mannheim.de") || dnsDomainIs(host, "webusers.anet-stl.com") || dnsDomainIs(host, "welcome.to") || dnsDomainIs(host, "wgntv.com") || dnsDomainIs(host, "whales.magna.com.au") || dnsDomainIs(host, "wildheart.com") || dnsDomainIs(host, "wilstar.net") || dnsDomainIs(host, "winter-wonderland.com") || dnsDomainIs(host, "women.com") || dnsDomainIs(host, "woodrow.mpls.frb.fed.us") || dnsDomainIs(host, "wordzap.com") || dnsDomainIs(host, "worldkids.net") || dnsDomainIs(host, "worldwideguide.net") || dnsDomainIs(host, "ww3.bay.k12.fl.us") || dnsDomainIs(host, "ww3.sportsline.com") || dnsDomainIs(host, "www-groups.dcs.st-and.ac.uk") || dnsDomainIs(host, "www-public.rz.uni-duesseldorf.de") || dnsDomainIs(host, "www.1stkids.com") || dnsDomainIs(host, "www.2020tech.com") || dnsDomainIs(host, "www.21stcenturytoys.com") || dnsDomainIs(host, "www.4adventure.com") || dnsDomainIs(host, "www.50states.com") || dnsDomainIs(host, "www.800padutch.com") || dnsDomainIs(host, "www.88.com") || dnsDomainIs(host, "www.a-better.com") || dnsDomainIs(host, "www.aaa.com.au") || dnsDomainIs(host, "www.aacca.com") || dnsDomainIs(host, "www.aalbc.com") || dnsDomainIs(host, "www.aardman.com") || dnsDomainIs(host, "www.aardvarkelectric.com") || dnsDomainIs(host, "www.aawc.com") || dnsDomainIs(host, "www.ababmx.com") || dnsDomainIs(host, "www.abbeville.com") || dnsDomainIs(host, "www.abc.net.au") || dnsDomainIs(host, "www.abcb.com") || dnsDomainIs(host, "www.abctooncenter.com") || dnsDomainIs(host, "www.about.ch") || dnsDomainIs(host, "www.accessart.org.uk") || dnsDomainIs(host, "www.accu.or.jp") || dnsDomainIs(host, "www.accuweather.com") || dnsDomainIs(host, "www.achuka.co.uk") || dnsDomainIs(host, "www.acmecity.com") || dnsDomainIs(host, "www.acorn-group.com") || dnsDomainIs(host, "www.acs.ucalgary.ca") || dnsDomainIs(host, "www.actden.com") || dnsDomainIs(host, "www.actionplanet.com") || dnsDomainIs(host, "www.activityvillage.co.uk") || dnsDomainIs(host, "www.actwin.com") || dnsDomainIs(host, "www.adequate.com") || dnsDomainIs(host, "www.adidas.com") || dnsDomainIs(host, "www.advent-calendars.com") || dnsDomainIs(host, "www.aegis.com") || dnsDomainIs(host, "www.af.mil") || dnsDomainIs(host, "www.africaindex.africainfo.no") || dnsDomainIs(host, "www.africam.com") || dnsDomainIs(host, "www.africancrafts.com") || dnsDomainIs(host, "www.aggressive.com") || dnsDomainIs(host, "www.aghines.com") || dnsDomainIs(host, "www.agirlsworld.com") || dnsDomainIs(host, "www.agora.stm.it") || dnsDomainIs(host, "www.agriculture.com") || dnsDomainIs(host, "www.aikidofaq.com") || dnsDomainIs(host, "www.ajkids.com") || dnsDomainIs(host, "www.akfkoala.gil.com.au") || dnsDomainIs(host, "www.akhlah.com") || dnsDomainIs(host, "www.alabamainfo.com") || dnsDomainIs(host, "www.aland.fi") || dnsDomainIs(host, "www.albion.com") || dnsDomainIs(host, "www.alcoholismhelp.com") || dnsDomainIs(host, "www.alcottweb.com") || dnsDomainIs(host, "www.alfanet.it") || dnsDomainIs(host, "www.alfy.com") || dnsDomainIs(host, "www.algebra-online.com") || dnsDomainIs(host, "www.alienexplorer.com") || dnsDomainIs(host, "www.aliensatschool.com") || dnsDomainIs(host, "www.all-links.com") || dnsDomainIs(host, "www.alldetroit.com") || dnsDomainIs(host, "www.allexperts.com") || dnsDomainIs(host, "www.allmixedup.com") || dnsDomainIs(host, "www.allmusic.com") || dnsDomainIs(host, "www.almanac.com") || dnsDomainIs(host, "www.almaz.com") || dnsDomainIs(host, "www.almondseed.com") || dnsDomainIs(host, "www.aloha.com") || dnsDomainIs(host, "www.aloha.net") || dnsDomainIs(host, "www.altonweb.com") || dnsDomainIs(host, "www.alyeska-pipe.com") || dnsDomainIs(host, "www.am-wood.com") || dnsDomainIs(host, "www.amazingadventure.com") || dnsDomainIs(host, "www.amazon.com") || dnsDomainIs(host, "www.americancheerleader.com") || dnsDomainIs(host, "www.americancowboy.com") || dnsDomainIs(host, "www.americangirl.com") || dnsDomainIs(host, "www.americanparknetwork.com") || dnsDomainIs(host, "www.americansouthwest.net") || dnsDomainIs(host, "www.americanwest.com") || dnsDomainIs(host, "www.ameritech.net") || dnsDomainIs(host, "www.amtexpo.com") || dnsDomainIs(host, "www.anbg.gov.au") || dnsDomainIs(host, "www.anc.org.za") || dnsDomainIs(host, "www.ancientegypt.co.uk") || dnsDomainIs(host, "www.angelfire.com") || dnsDomainIs(host, "www.angelsbaseball.com") || dnsDomainIs(host, "www.anholt.co.uk") || dnsDomainIs(host, "www.animabets.com") || dnsDomainIs(host, "www.animalnetwork.com") || dnsDomainIs(host, "www.<API key>.com") || dnsDomainIs(host, "www.anime-genesis.com") || dnsDomainIs(host, "www.annefrank.com") || dnsDomainIs(host, "www.annefrank.nl") || dnsDomainIs(host, "www.annie75.com") || dnsDomainIs(host, "www.antbee.com") || dnsDomainIs(host, "www.antiquetools.com") || dnsDomainIs(host, "www.antiquetoy.com") || dnsDomainIs(host, "www.anzsbeg.org.au") || dnsDomainIs(host, "www.aol.com") || dnsDomainIs(host, "www.aone.com") || dnsDomainIs(host, "www.aphids.com") || dnsDomainIs(host, "www.apl.com") || dnsDomainIs(host, "www.aplusmath.com") || dnsDomainIs(host, "www.applebookshop.co.uk") || dnsDomainIs(host, "www.appropriatesoftware.com") || dnsDomainIs(host, "www.appukids.com") || dnsDomainIs(host, "www.april-joy.com") || dnsDomainIs(host, "www.arab.net") || dnsDomainIs(host, "www.aracnet.com") || dnsDomainIs(host, "www.arborday.com") || dnsDomainIs(host, "www.arcadevillage.com") || dnsDomainIs(host, "www.archiecomics.com") || dnsDomainIs(host, "www.archives.state.al.us") || dnsDomainIs(host, "www.arctic.ca") || dnsDomainIs(host, "www.ardenjohnson.com") || dnsDomainIs(host, "www.aristotle.net") || dnsDomainIs(host, "www.arizhwys.com") || dnsDomainIs(host, "www.arizonaguide.com") || dnsDomainIs(host, "www.arlingtoncemetery.com") || dnsDomainIs(host, "www.armory.com") || dnsDomainIs(host, "www.armwrestling.com") || dnsDomainIs(host, "www.arnprior.com") || dnsDomainIs(host, "www.artabunga.com") || dnsDomainIs(host, "www.artcarte.com") || dnsDomainIs(host, "www.artchive.com") || dnsDomainIs(host, "www.artcontest.com") || dnsDomainIs(host, "www.artcyclopedia.com") || dnsDomainIs(host, "www.artisandevelopers.com") || dnsDomainIs(host, "www.artlex.com") || dnsDomainIs(host, "www.artsandkids.com") || dnsDomainIs(host, "www.artyastro.com") || dnsDomainIs(host, "www.arwhead.com") || dnsDomainIs(host, "www.asahi-net.or.jp") || dnsDomainIs(host, "www.asap.unimelb.edu.au") || dnsDomainIs(host, "www.ascpl.lib.oh.us") || dnsDomainIs(host, "www.asia-art.net") || dnsDomainIs(host, "www.asiabigtime.com") || dnsDomainIs(host, "www.asianart.com") || dnsDomainIs(host, "www.asiatour.com") || dnsDomainIs(host, "www.asiaweek.com") || dnsDomainIs(host, "www.askanexpert.com") || dnsDomainIs(host, "www.askbasil.com") || dnsDomainIs(host, "www.assa.org.au") || dnsDomainIs(host, "www.ast.cam.ac.uk") || dnsDomainIs(host, "www.astronomy.com") || dnsDomainIs(host, "www.astros.com") || dnsDomainIs(host, "www.atek.com") || dnsDomainIs(host, "www.athlete.com") || dnsDomainIs(host, "www.athropolis.com") || dnsDomainIs(host, "www.atkielski.com") || dnsDomainIs(host, "www.atlantabraves.com") || dnsDomainIs(host, "www.atlantafalcons.com") || dnsDomainIs(host, "www.atlantathrashers.com") || dnsDomainIs(host, "www.atlanticus.com") || dnsDomainIs(host, "www.atm.ch.cam.ac.uk") || dnsDomainIs(host, "www.atom.co.jp") || dnsDomainIs(host, "www.atomicarchive.com") || dnsDomainIs(host, "www.att.com") || dnsDomainIs(host, "www.audreywood.com") || dnsDomainIs(host, "www.auntannie.com") || dnsDomainIs(host, "www.auntie.com") || dnsDomainIs(host, "www.avi-writer.com") || dnsDomainIs(host, "www.<API key>.com") || dnsDomainIs(host, "www.awhitehorse.com") || dnsDomainIs(host, "www.axess.com") || dnsDomainIs(host, "www.ayles.com") || dnsDomainIs(host, "www.ayn.ca") || dnsDomainIs(host, "www.azcardinals.com") || dnsDomainIs(host, "www.azdiamondbacks.com") || dnsDomainIs(host, "www.azsolarcenter.com") || dnsDomainIs(host, "www.azstarnet.com") || dnsDomainIs(host, "www.aztecafoods.com") || dnsDomainIs(host, "www.b-witched.com") || dnsDomainIs(host, "www.baberuthmuseum.com") || dnsDomainIs(host, "www.backstreetboys.com") || dnsDomainIs(host, "www.bagheera.com") || dnsDomainIs(host, "www.bahamas.com") || dnsDomainIs(host, "www.baileykids.com") || dnsDomainIs(host, "www.baldeagleinfo.com") || dnsDomainIs(host, "www.balloonhq.com") || dnsDomainIs(host, "www.balloonzone.com") || dnsDomainIs(host, "www.ballparks.com") || dnsDomainIs(host, "www.balmoralsoftware.com") || dnsDomainIs(host, "www.banja.com") || dnsDomainIs(host, "www.banph.com") || dnsDomainIs(host, "www.barbie.com") || dnsDomainIs(host, "www.barkingbuddies.com") || dnsDomainIs(host, "www.barnsdle.demon.co.uk") || dnsDomainIs(host, "www.barrysclipart.com") || dnsDomainIs(host, "www.bartleby.com") || dnsDomainIs(host, "www.baseplate.com") || dnsDomainIs(host, "www.batman-superman.com") || dnsDomainIs(host, "www.batmanbeyond.com") || dnsDomainIs(host, "www.bbc.co.uk") || dnsDomainIs(host, "www.bbhighway.com") || dnsDomainIs(host, "www.bboy.com") || dnsDomainIs(host, "www.bcit.tec.nj.us") || dnsDomainIs(host, "www.bconnex.net") || dnsDomainIs(host, "www.bcpl.net") || dnsDomainIs(host, "www.beach-net.com") || dnsDomainIs(host, "www.beachboys.com") || dnsDomainIs(host, "www.beakman.com") || dnsDomainIs(host, "www.beano.co.uk") || dnsDomainIs(host, "www.beans.demon.co.uk") || dnsDomainIs(host, "www.beartime.com") || dnsDomainIs(host, "www.bearyspecial.co.uk") || dnsDomainIs(host, "www.bedtime.com") || dnsDomainIs(host, "www.beingme.com") || dnsDomainIs(host, "www.belizeexplorer.com") || dnsDomainIs(host, "www.bell-labs.com") || dnsDomainIs(host, "www.bemorecreative.com") || dnsDomainIs(host, "www.bengals.com") || dnsDomainIs(host, "www.benjerry.com") || dnsDomainIs(host, "www.bennygoodsport.com") || dnsDomainIs(host, "www.berenstainbears.com") || dnsDomainIs(host, "www.beringia.com") || dnsDomainIs(host, "www.beritsbest.com") || dnsDomainIs(host, "www.berksweb.com") || dnsDomainIs(host, "www.best.com") || dnsDomainIs(host, "www.betsybyars.com") || dnsDomainIs(host, "www.bfro.net") || dnsDomainIs(host, "www.bgmm.com") || dnsDomainIs(host, "www.bibliography.com") || dnsDomainIs(host, "www.bigblue.com.au") || dnsDomainIs(host, "www.bigchalk.com") || dnsDomainIs(host, "www.bigidea.com") || dnsDomainIs(host, "www.bigtop.com") || dnsDomainIs(host, "www.bikecrawler.com") || dnsDomainIs(host, "www.billboard.com") || dnsDomainIs(host, "www.billybear4kids.com") || dnsDomainIs(host, "www.biography.com") || dnsDomainIs(host, "www.birdnature.com") || dnsDomainIs(host, "www.birdsnways.com") || dnsDomainIs(host, "www.birdtimes.com") || dnsDomainIs(host, "www.birminghamzoo.com") || dnsDomainIs(host, "www.birthdaypartyideas.com") || dnsDomainIs(host, "www.bis.arachsys.com") || dnsDomainIs(host, "www.bkgm.com") || dnsDomainIs(host, "www.blackbaseball.com") || dnsDomainIs(host, "www.blackbeardthepirate.com") || dnsDomainIs(host, "www.blackbeltmag.com") || dnsDomainIs(host, "www.blackfacts.com") || dnsDomainIs(host, "www.blackfeetnation.com") || dnsDomainIs(host, "www.blackhills-info.com") || dnsDomainIs(host, "www.blackholegang.com") || dnsDomainIs(host, "www.blaque.net") || dnsDomainIs(host, "www.blarg.net") || dnsDomainIs(host, "www.blasternaut.com") || dnsDomainIs(host, "www.blizzard.com") || dnsDomainIs(host, "www.blocksite.com") || dnsDomainIs(host, "www.bluejackets.com") || dnsDomainIs(host, "www.bluejays.ca") || dnsDomainIs(host, "www.bluemountain.com") || dnsDomainIs(host, "www.blupete.com") || dnsDomainIs(host, "www.blyton.co.uk") || dnsDomainIs(host, "www.boatnerd.com") || dnsDomainIs(host, "www.boatsafe.com") || dnsDomainIs(host, "www.bonus.com") || dnsDomainIs(host, "www.boowakwala.com") || dnsDomainIs(host, "www.bostonbruins.com") || dnsDomainIs(host, "www.braceface.com") || dnsDomainIs(host, "www.bracesinfo.com") || dnsDomainIs(host, "www.bradkent.com") || dnsDomainIs(host, "www.brainium.com") || dnsDomainIs(host, "www.brainmania.com") || dnsDomainIs(host, "www.brainpop.com") || dnsDomainIs(host, "www.bridalcave.com") || dnsDomainIs(host, "www.brightmoments.com") || dnsDomainIs(host, "www.britannia.com") || dnsDomainIs(host, "www.britannica.com") || dnsDomainIs(host, "www.british-museum.ac.uk") || dnsDomainIs(host, "www.brookes.ac.uk") || dnsDomainIs(host, "www.brookfieldreader.com") || dnsDomainIs(host, "www.btinternet.com") || dnsDomainIs(host, "www.bubbledome.co.nz") || dnsDomainIs(host, "www.buccaneers.com") || dnsDomainIs(host, "www.buffy.com") || dnsDomainIs(host, "www.bullying.co.uk") || dnsDomainIs(host, "www.bumply.com") || dnsDomainIs(host, "www.bungi.com") || dnsDomainIs(host, "www.burlco.lib.nj.us") || dnsDomainIs(host, "www.burlingamepezmuseum.com") || dnsDomainIs(host, "www.bus.ualberta.ca") || dnsDomainIs(host, "www.busprod.com") || dnsDomainIs(host, "www.butlerart.com") || dnsDomainIs(host, "www.butterflies.com") || dnsDomainIs(host, "www.butterflyfarm.co.cr") || dnsDomainIs(host, "www.bway.net") || dnsDomainIs(host, "www.bydonovan.com") || dnsDomainIs(host, "www.ca-mall.com") || dnsDomainIs(host, "www.cabinessence.com") || dnsDomainIs(host, "www.cablecarmuseum.com") || dnsDomainIs(host, "www.cadbury.co.uk") || dnsDomainIs(host, "www.calendarzone.com") || dnsDomainIs(host, "www.calgaryflames.com") || dnsDomainIs(host, "www.californiamissions.com") || dnsDomainIs(host, "www.camalott.com") || dnsDomainIs(host, "www.camelotintl.com") || dnsDomainIs(host, "www.campbellsoup.com") || dnsDomainIs(host, "www.camvista.com") || dnsDomainIs(host, "www.canadiens.com") || dnsDomainIs(host, "www.canals.state.ny.us") || dnsDomainIs(host, "www.candlelightstories.com") || dnsDomainIs(host, "www.candles-museum.com") || dnsDomainIs(host, "www.candystand.com") || dnsDomainIs(host, "www.caneshockey.com") || dnsDomainIs(host, "www.canismajor.com") || dnsDomainIs(host, "www.canucks.com") || dnsDomainIs(host, "www.capecod.net") || dnsDomainIs(host, "www.capital.net") || dnsDomainIs(host, "www.capstonestudio.com") || dnsDomainIs(host, "www.cardblvd.com") || dnsDomainIs(host, "www.caro.net") || dnsDomainIs(host, "www.carolhurst.com") || dnsDomainIs(host, "www.carr.lib.md.us") || dnsDomainIs(host, "www.cartooncorner.com") || dnsDomainIs(host, "www.cartooncritters.com") || dnsDomainIs(host, "www.cartoonnetwork.com") || dnsDomainIs(host, "www.carvingpatterns.com") || dnsDomainIs(host, "www.cashuniversity.com") || dnsDomainIs(host, "www.castles-of-britain.com") || dnsDomainIs(host, "www.castlewales.com") || dnsDomainIs(host, "www.catholic-forum.com") || dnsDomainIs(host, "www.catholic.net") || dnsDomainIs(host, "www.cattle.guelph.on.ca") || dnsDomainIs(host, "www.cavedive.com") || dnsDomainIs(host, "www.caveofthewinds.com") || dnsDomainIs(host, "www.cbc4kids.ca") || dnsDomainIs(host, "www.ccer.ggl.ruu.nl") || dnsDomainIs(host, "www.ccnet.com") || dnsDomainIs(host, "www.celineonline.com") || dnsDomainIs(host, "www.cellsalive.com") || dnsDomainIs(host, "www.centuryinshoes.com") || dnsDomainIs(host, "www.cfl.ca") || dnsDomainIs(host, "www.channel4.com") || dnsDomainIs(host, "www.channel8.net") || dnsDomainIs(host, "www.chanukah99.com") || dnsDomainIs(host, "www.charged.com") || dnsDomainIs(host, "www.chargers.com") || dnsDomainIs(host, "www.charlotte.com") || dnsDomainIs(host, "www.chaseday.com") || dnsDomainIs(host, "www.chateauversailles.fr") || dnsDomainIs(host, "www.cheatcc.com") || dnsDomainIs(host, "www.cheerleading.net") || dnsDomainIs(host, "www.cheese.com") || dnsDomainIs(host, "www.chem4kids.com") || dnsDomainIs(host, "www.chemicool.com") || dnsDomainIs(host, "www.cherbearsden.com") || dnsDomainIs(host, "www.chesskids.com") || dnsDomainIs(host, "www.chessvariants.com") || dnsDomainIs(host, "www.cheungswingchun.com") || dnsDomainIs(host, "www.chevroncars.com") || dnsDomainIs(host, "www.chibi.simplenet.com") || dnsDomainIs(host, "www.chicagobears.com") || dnsDomainIs(host, "www.chicagoblackhawks.com") || dnsDomainIs(host, "www.chickasaw.net") || dnsDomainIs(host, "www.childrensmusic.co.uk") || dnsDomainIs(host, "www.childrenssoftware.com") || dnsDomainIs(host, "www.childrenstory.com") || dnsDomainIs(host, "www.<API key>.com") || dnsDomainIs(host, "www.chinapage.com") || dnsDomainIs(host, "www.chinatoday.com") || dnsDomainIs(host, "www.chinavista.com") || dnsDomainIs(host, "www.chinnet.net") || dnsDomainIs(host, "www.chiquita.com") || dnsDomainIs(host, "www.chisox.com") || dnsDomainIs(host, "www.chivalry.com") || dnsDomainIs(host, "www.christiananswers.net") || dnsDomainIs(host, "www.christianity.com") || dnsDomainIs(host, "www.christmas.com") || dnsDomainIs(host, "www.christmas98.com") || dnsDomainIs(host, "www.chron.com") || dnsDomainIs(host, "www.chronique.com") || dnsDomainIs(host, "www.chuckecheese.com") || dnsDomainIs(host, "www.chucklebait.com") || dnsDomainIs(host, "www.chunkymonkey.com") || dnsDomainIs(host, "www.ci.chi.il.us") || dnsDomainIs(host, "www.ci.nyc.ny.us") || dnsDomainIs(host, "www.ci.phoenix.az.us") || dnsDomainIs(host, "www.ci.san-diego.ca.us") || dnsDomainIs(host, "www.cibc.com") || dnsDomainIs(host, "www.ciderpresspottery.com") || dnsDomainIs(host, "www.cincinnatireds.com") || dnsDomainIs(host, "www.circusparade.com") || dnsDomainIs(host, "www.circusweb.com") || dnsDomainIs(host, "www.cirquedusoleil.com") || dnsDomainIs(host, "www.cit.state.vt.us") || dnsDomainIs(host, "www.citycastles.com") || dnsDomainIs(host, "www.cityu.edu.hk") || dnsDomainIs(host, "www.civicmind.com") || dnsDomainIs(host, "www.civil-war.net") || dnsDomainIs(host, "www.civilization.ca") || dnsDomainIs(host, "www.cl.cam.ac.uk") || dnsDomainIs(host, "www.clantongang.com") || dnsDomainIs(host, "www.clark.net") || dnsDomainIs(host, "www.classicgaming.com") || dnsDomainIs(host, "www.claus.com") || dnsDomainIs(host, "www.clayz.com") || dnsDomainIs(host, "www.clearcf.uvic.ca") || dnsDomainIs(host, "www.clearlight.com") || dnsDomainIs(host, "www.clemusart.com") || dnsDomainIs(host, "www.clevelandbrowns.com") || dnsDomainIs(host, "www.clipartcastle.com") || dnsDomainIs(host, "www.clubi.ie") || dnsDomainIs(host, "www.cnn.com") || dnsDomainIs(host, "www.co.henrico.va.us") || dnsDomainIs(host, "www.coax.net") || dnsDomainIs(host, "www.cocacola.com") || dnsDomainIs(host, "www.cocori.com") || dnsDomainIs(host, "www.codesmiths.com") || dnsDomainIs(host, "www.codetalk.fed.us") || dnsDomainIs(host, "www.coin-gallery.com") || dnsDomainIs(host, "www.colinthompson.com") || dnsDomainIs(host, "www.collectoronline.com") || dnsDomainIs(host, "www.colonialhall.com") || dnsDomainIs(host, "www.coloradoavalanche.com") || dnsDomainIs(host, "www.coloradorockies.com") || dnsDomainIs(host, "www.colormathpink.com") || dnsDomainIs(host, "www.colts.com") || dnsDomainIs(host, "www.comet.net") || dnsDomainIs(host, "www.cometsystems.com") || dnsDomainIs(host, "www.comicbookresources.com") || dnsDomainIs(host, "www.comicspage.com") || dnsDomainIs(host, "www.compassnet.com") || dnsDomainIs(host, "www.compleatbellairs.com") || dnsDomainIs(host, "www.comptons.com") || dnsDomainIs(host, "www.concentric.net") || dnsDomainIs(host, "www.congogorillaforest.com") || dnsDomainIs(host, "www.conjuror.com") || dnsDomainIs(host, "www.conk.com") || dnsDomainIs(host, "www.conservation.state.mo.us") || dnsDomainIs(host, "www.contracostatimes.com") || dnsDomainIs(host, "www.control.chalmers.se") || dnsDomainIs(host, "www.cookierecipe.com") || dnsDomainIs(host, "www.cooljapanesetoys.com") || dnsDomainIs(host, "www.cooper.com") || dnsDomainIs(host, "www.corpcomm.net") || dnsDomainIs(host, "www.corrietenboom.com") || dnsDomainIs(host, "www.corynet.com") || dnsDomainIs(host, "www.corypaints.com") || dnsDomainIs(host, "www.cosmosmith.com") || dnsDomainIs(host, "www.countdown2000.com") || dnsDomainIs(host, "www.cowboy.net") || dnsDomainIs(host, "www.cowboypal.com") || dnsDomainIs(host, "www.cowcreek.com") || dnsDomainIs(host, "www.cowgirl.net") || dnsDomainIs(host, "www.cowgirls.com") || dnsDomainIs(host, "www.cp.duluth.mn.us") || dnsDomainIs(host, "www.cpsweb.com") || dnsDomainIs(host, "www.craftideas.com") || dnsDomainIs(host, "www.craniamania.com") || dnsDomainIs(host, "www.crater.lake.national-park.com") || dnsDomainIs(host, "www.crayoncrawler.com") || dnsDomainIs(host, "www.crazybone.com") || dnsDomainIs(host, "www.crazybones.com") || dnsDomainIs(host, "www.crd.ge.com") || dnsDomainIs(host, "www.create4kids.com") || dnsDomainIs(host, "www.creativemusic.com") || dnsDomainIs(host, "www.crocodilian.com") || dnsDomainIs(host, "www.crop.cri.nz") || dnsDomainIs(host, "www.cruzio.com") || dnsDomainIs(host, "www.crwflags.com") || dnsDomainIs(host, "www.cryptograph.com") || dnsDomainIs(host, "www.cryst.bbk.ac.uk") || dnsDomainIs(host, "www.cs.bilkent.edu.tr") || dnsDomainIs(host, "www.cs.man.ac.uk") || dnsDomainIs(host, "www.cs.sfu.ca") || dnsDomainIs(host, "www.cs.ubc.ca") || dnsDomainIs(host, "www.csd.uu.se") || dnsDomainIs(host, "www.csmonitor.com") || dnsDomainIs(host, "www.csse.monash.edu.au") || dnsDomainIs(host, "www.cstone.net") || dnsDomainIs(host, "www.csu.edu.au") || dnsDomainIs(host, "www.cubs.com") || dnsDomainIs(host, "www.culture.fr") || dnsDomainIs(host, "www.cultures.com") || dnsDomainIs(host, "www.curtis-collection.com") || dnsDomainIs(host, "www.cut-the-knot.com") || dnsDomainIs(host, "www.cws-scf.ec.gc.ca") || dnsDomainIs(host, "www.cyber-dyne.com") || dnsDomainIs(host, "www.cyberbee.com") || dnsDomainIs(host, "www.cyberbee.net") || dnsDomainIs(host, "www.cybercom.net") || dnsDomainIs(host, "www.cybercomm.net") || dnsDomainIs(host, "www.cybercomm.nl") || dnsDomainIs(host, "www.cybercorp.co.nz") || dnsDomainIs(host, "www.cybercs.com") || dnsDomainIs(host, "www.cybergoal.com") || dnsDomainIs(host, "www.cyberkids.com") || dnsDomainIs(host, "www.cyberspaceag.com") || dnsDomainIs(host, "www.cyberteens.com") || dnsDomainIs(host, "www.cybertours.com") || dnsDomainIs(host, "www.cybiko.com") || dnsDomainIs(host, "www.czweb.com") || dnsDomainIs(host, "www.d91.k12.id.us") || dnsDomainIs(host, "www.dailygrammar.com") || dnsDomainIs(host, "www.dakidz.com") || dnsDomainIs(host, "www.dalejarrettonline.com") || dnsDomainIs(host, "www.dallascowboys.com") || dnsDomainIs(host, "www.dallasdogndisc.com") || dnsDomainIs(host, "www.dallasstars.com") || dnsDomainIs(host, "www.damnyankees.com") || dnsDomainIs(host, "www.danceart.com") || dnsDomainIs(host, "www.daniellesplace.com") || dnsDomainIs(host, "www.dare-america.com") || dnsDomainIs(host, "www.darkfish.com") || dnsDomainIs(host, "www.darsbydesign.com") || dnsDomainIs(host, "www.datadragon.com") || dnsDomainIs(host, "www.davidreilly.com") || dnsDomainIs(host, "www.dccomics.com") || dnsDomainIs(host, "www.dcn.davis.ca.us") || dnsDomainIs(host, "www.deepseaworld.com") || dnsDomainIs(host, "www.<API key>.nsn.us") || dnsDomainIs(host, "www.demon.co.uk") || dnsDomainIs(host, "www.denverbroncos.com") || dnsDomainIs(host, "www.denverpost.com") || dnsDomainIs(host, "www.dep.state.pa.us") || dnsDomainIs(host, "www.desert-fairy.com") || dnsDomainIs(host, "www.desert-storm.com") || dnsDomainIs(host, "www.desertusa.com") || dnsDomainIs(host, "www.designltd.com") || dnsDomainIs(host, "www.designsbykat.com") || dnsDomainIs(host, "www.detnews.com") || dnsDomainIs(host, "www.detroitlions.com") || dnsDomainIs(host, "www.detroitredwings.com") || dnsDomainIs(host, "www.detroittigers.com") || dnsDomainIs(host, "www.deutsches-museum.de") || dnsDomainIs(host, "www.devilray.com") || dnsDomainIs(host, "www.dhorse.com") || dnsDomainIs(host, "www.diana-ross.co.uk") || dnsDomainIs(host, "www.<API key>.net") || dnsDomainIs(host, "www.diaryproject.com") || dnsDomainIs(host, "www.dickbutkus.com") || dnsDomainIs(host, "www.dickshovel.com") || dnsDomainIs(host, "www.dictionary.com") || dnsDomainIs(host, "www.didyouknow.com") || dnsDomainIs(host, "www.diegorivera.com") || dnsDomainIs(host, "www.digitalcentury.com") || dnsDomainIs(host, "www.digitaldog.com") || dnsDomainIs(host, "www.digiweb.com") || dnsDomainIs(host, "www.dimdima.com") || dnsDomainIs(host, "www.dinodon.com") || dnsDomainIs(host, "www.dinosauria.com") || dnsDomainIs(host, "www.discovereso.com") || dnsDomainIs(host, "www.discovergalapagos.com") || dnsDomainIs(host, "www.discovergames.com") || dnsDomainIs(host, "www.<API key>.com") || dnsDomainIs(host, "www.discoveringmontana.com") || dnsDomainIs(host, "www.discoverlearning.com") || dnsDomainIs(host, "www.discovery.com") || dnsDomainIs(host, "www.disknet.com") || dnsDomainIs(host, "www.disney.go.com") || dnsDomainIs(host, "www.distinguishedwomen.com") || dnsDomainIs(host, "www.dkonline.com") || dnsDomainIs(host, "www.dltk-kids.com") || dnsDomainIs(host, "www.dmgi.com") || dnsDomainIs(host, "www.dnr.state.md.us") || dnsDomainIs(host, "www.dnr.state.mi.us") || dnsDomainIs(host, "www.dnr.state.wi.us") || dnsDomainIs(host, "www.dodgers.com") || dnsDomainIs(host, "www.dodoland.com") || dnsDomainIs(host, "www.dog-play.com") || dnsDomainIs(host, "www.dogbreedinfo.com") || dnsDomainIs(host, "www.doginfomat.com") || dnsDomainIs(host, "www.dole5aday.com") || dnsDomainIs(host, "www.dollart.com") || dnsDomainIs(host, "www.dolliedish.com") || dnsDomainIs(host, "www.dome2000.co.uk") || dnsDomainIs(host, "www.domtar.com") || dnsDomainIs(host, "www.donegal.k12.pa.us") || dnsDomainIs(host, "www.dorneypark.com") || dnsDomainIs(host, "www.<API key>.com") || dnsDomainIs(host, "www.dougweb.com") || dnsDomainIs(host, "www.dps.state.ak.us") || dnsDomainIs(host, "www.draw3d.com") || dnsDomainIs(host, "www.dreamgate.com") || dnsDomainIs(host, "www.dreamkitty.com") || dnsDomainIs(host, "www.dreamscape.com") || dnsDomainIs(host, "www.dreamtime.net.au") || dnsDomainIs(host, "www.drpeppermuseum.com") || dnsDomainIs(host, "www.drscience.com") || dnsDomainIs(host, "www.drseward.com") || dnsDomainIs(host, "www.drtoy.com") || dnsDomainIs(host, "www.dse.nl") || dnsDomainIs(host, "www.dtic.mil") || dnsDomainIs(host, "www.duracell.com") || dnsDomainIs(host, "www.dustbunny.com") || dnsDomainIs(host, "www.dynanet.com") || dnsDomainIs(host, "www.eagerreaders.com") || dnsDomainIs(host, "www.eaglekids.com") || dnsDomainIs(host, "www.earthcalendar.net") || dnsDomainIs(host, "www.earthday.net") || dnsDomainIs(host, "www.earthdog.com") || dnsDomainIs(host, "www.earthwatch.com") || dnsDomainIs(host, "www.ease.com") || dnsDomainIs(host, "www.eastasia.ws") || dnsDomainIs(host, "www.easytype.com") || dnsDomainIs(host, "www.eblewis.com") || dnsDomainIs(host, "www.ebs.hw.ac.uk") || dnsDomainIs(host, "www.eclipse.net") || dnsDomainIs(host, "www.eco-pros.com") || dnsDomainIs(host, "www.edbydesign.com") || dnsDomainIs(host, "www.eddytheeco-dog.com") || dnsDomainIs(host, "www.edgate.com") || dnsDomainIs(host, "www.edmontonoilers.com") || dnsDomainIs(host, "www.edu-source.com") || dnsDomainIs(host, "www.edu.gov.on.ca") || dnsDomainIs(host, "www.edu4kids.com") || dnsDomainIs(host, "www.educ.uvic.ca") || dnsDomainIs(host, "www.educate.org.uk") || dnsDomainIs(host, "www.education-world.com") || dnsDomainIs(host, "www.edunet.com") || dnsDomainIs(host, "www.eduplace.com") || dnsDomainIs(host, "www.edupuppy.com") || dnsDomainIs(host, "www.eduweb.com") || dnsDomainIs(host, "www.ee.ryerson.ca") || dnsDomainIs(host, "www.ee.surrey.ac.uk") || dnsDomainIs(host, "www.eeggs.com") || dnsDomainIs(host, "www.efes.com") || dnsDomainIs(host, "www.egalvao.com") || dnsDomainIs(host, "www.egypt.com") || dnsDomainIs(host, "www.egyptology.com") || dnsDomainIs(host, "www.ehobbies.com") || dnsDomainIs(host, "www.ehow.com") || dnsDomainIs(host, "www.eia.brad.ac.uk") || dnsDomainIs(host, "www.elbalero.gob.mx") || dnsDomainIs(host, "www.eliki.com") || dnsDomainIs(host, "www.elnino.com") || dnsDomainIs(host, "www.elok.com") || dnsDomainIs(host, "www.emf.net") || dnsDomainIs(host, "www.emsphone.com") || dnsDomainIs(host, "www.emulateme.com") || dnsDomainIs(host, "www.en.com") || dnsDomainIs(host, "www.enature.com") || dnsDomainIs(host, "www.enchantedlearning.com") || dnsDomainIs(host, "www.encyclopedia.com") || dnsDomainIs(host, "www.endex.com") || dnsDomainIs(host, "www.enjoyillinois.com") || dnsDomainIs(host, "www.enn.com") || dnsDomainIs(host, "www.enriqueig.com") || dnsDomainIs(host, "www.enteract.com") || dnsDomainIs(host, "www.epals.com") || dnsDomainIs(host, "www.equine-world.co.uk") || dnsDomainIs(host, "www.eric-carle.com") || dnsDomainIs(host, "www.ericlindros.net") || dnsDomainIs(host, "www.escape.com") || dnsDomainIs(host, "www.eskimo.com") || dnsDomainIs(host, "www.essentialsofmusic.com") || dnsDomainIs(host, "www.etch-a-sketch.com") || dnsDomainIs(host, "www.ethanallen.together.com") || dnsDomainIs(host, "www.etoys.com") || dnsDomainIs(host, "www.eurekascience.com") || dnsDomainIs(host, "www.euronet.nl") || dnsDomainIs(host, "www.everyrule.com") || dnsDomainIs(host, "www.ex.ac.uk") || dnsDomainIs(host, "www.excite.com") || dnsDomainIs(host, "www.execpc.com") || dnsDomainIs(host, "www.execulink.com") || dnsDomainIs(host, "www.exn.net") || dnsDomainIs(host, "www.expa.hvu.nl") || dnsDomainIs(host, "www.expage.com") || dnsDomainIs(host, "www.explode.to") || dnsDomainIs(host, "www.explorescience.com") || dnsDomainIs(host, "www.explorezone.com") || dnsDomainIs(host, "www.extremescience.com") || dnsDomainIs(host, "www.eyelid.co.uk") || dnsDomainIs(host, "www.eyeneer.com") || dnsDomainIs(host, "www.eyesofachild.com") || dnsDomainIs(host, "www.eyesofglory.com") || dnsDomainIs(host, "www.ezschool.com") || dnsDomainIs(host, "www.f1-live.com") || dnsDomainIs(host, "www.fables.co.uk") || dnsDomainIs(host, "www.factmonster.com") || dnsDomainIs(host, "www.fairygodmother.com") || dnsDomainIs(host, "www.familybuzz.com") || dnsDomainIs(host, "www.familygames.com") || dnsDomainIs(host, "www.familygardening.com") || dnsDomainIs(host, "www.familyinternet.com") || dnsDomainIs(host, "www.familymoney.com") || dnsDomainIs(host, "www.familyplay.com") || dnsDomainIs(host, "www.famousbirthdays.com") || dnsDomainIs(host, "www.fandom.com") || dnsDomainIs(host, "www.fansites.com") || dnsDomainIs(host, "www.faoschwarz.com") || dnsDomainIs(host, "www.fbe.unsw.edu.au") || dnsDomainIs(host, "www.fcps.k12.va.us") || dnsDomainIs(host, "www.fellersartsfactory.com") || dnsDomainIs(host, "www.ferrari.it") || dnsDomainIs(host, "www.fertnel.com") || dnsDomainIs(host, "www.fh-konstanz.de") || dnsDomainIs(host, "www.fhw.gr") || dnsDomainIs(host, "www.fibblesnork.com") || dnsDomainIs(host, "www.fidnet.com") || dnsDomainIs(host, "www.fieldhockey.com") || dnsDomainIs(host, "www.fieldhockeytraining.com") || dnsDomainIs(host, "www.fieler.com") || dnsDomainIs(host, "www.finalfour.net") || dnsDomainIs(host, "www.finifter.com") || dnsDomainIs(host, "www.fireworks-safety.com") || dnsDomainIs(host, "www.firstcut.com") || dnsDomainIs(host, "www.firstnations.com") || dnsDomainIs(host, "www.fishbc.com") || dnsDomainIs(host, "www.fisher-price.com") || dnsDomainIs(host, "www.fisheyeview.com") || dnsDomainIs(host, "www.fishgeeks.com") || dnsDomainIs(host, "www.fishindex.com") || dnsDomainIs(host, "www.fitzgeraldstudio.com") || dnsDomainIs(host, "www.flags.net") || dnsDomainIs(host, "www.flail.com") || dnsDomainIs(host, "www.flamarlins.com") || dnsDomainIs(host, "www.flausa.com") || dnsDomainIs(host, "www.floodlight-findings.com") || dnsDomainIs(host, "www.floridahistory.com") || dnsDomainIs(host, "www.floridapanthers.com") || dnsDomainIs(host, "www.fng.fi") || dnsDomainIs(host, "www.foodsci.uoguelph.ca") || dnsDomainIs(host, "www.foremost.com") || dnsDomainIs(host, "www.fortress.am") || dnsDomainIs(host, "www.fortunecity.com") || dnsDomainIs(host, "www.fosterclub.com") || dnsDomainIs(host, "www.foundus.com") || dnsDomainIs(host, "www.fourmilab.ch") || dnsDomainIs(host, "www.fox.com") || dnsDomainIs(host, "www.foxfamilychannel.com") || dnsDomainIs(host, "www.foxhome.com") || dnsDomainIs(host, "www.foxkids.com") || dnsDomainIs(host, "www.franceway.com") || dnsDomainIs(host, "www.fred.net") || dnsDomainIs(host, "www.fredpenner.com") || dnsDomainIs(host, "www.freedomknot.com") || dnsDomainIs(host, "www.freejigsawpuzzles.com") || dnsDomainIs(host, "www.freenet.edmonton.ab.ca") || dnsDomainIs(host, "www.frii.com") || dnsDomainIs(host, "www.frisbee.com") || dnsDomainIs(host, "www.fritolay.com") || dnsDomainIs(host, "www.frogsonice.com") || dnsDomainIs(host, "www.frontiernet.net") || dnsDomainIs(host, "www.fs.fed.us") || dnsDomainIs(host, "www.funattic.com") || dnsDomainIs(host, ".funbrain.com") || dnsDomainIs(host, "www.fundango.com") || dnsDomainIs(host, "www.funisland.com") || dnsDomainIs(host, "www.funkandwagnalls.com") || dnsDomainIs(host, "www.funorama.com") || dnsDomainIs(host, "www.funschool.com") || dnsDomainIs(host, "www.funster.com") || dnsDomainIs(host, "www.furby.com") || dnsDomainIs(host, "www.fusion.org.uk") || dnsDomainIs(host, "www.futcher.com") || dnsDomainIs(host, "www.futurescan.com") || dnsDomainIs(host, "www.fyi.net") || dnsDomainIs(host, "www.gailgibbons.com") || dnsDomainIs(host, "www.galegroup.com") || dnsDomainIs(host, "www.gambia.com") || dnsDomainIs(host, "www.gamecabinet.com") || dnsDomainIs(host, "www.gamecenter.com") || dnsDomainIs(host, "www.gamefaqs.com") || dnsDomainIs(host, "www.garfield.com") || dnsDomainIs(host, "www.garyharbo.com") || dnsDomainIs(host, "www.gatefish.com") || dnsDomainIs(host, "www.gateway-va.com") || dnsDomainIs(host, "www.gazillionaire.com") || dnsDomainIs(host, "www.gearhead.com") || dnsDomainIs(host, "www.genesplicing.com") || dnsDomainIs(host, "www.genhomepage.com") || dnsDomainIs(host, "www.geobop.com") || dnsDomainIs(host, "www.geocities.com") || dnsDomainIs(host, "www.geographia.com") || dnsDomainIs(host, "www.georgeworld.com") || dnsDomainIs(host, "www.georgian.net") || dnsDomainIs(host, "www.german-way.com") || dnsDomainIs(host, "www.germanfortravellers.com") || dnsDomainIs(host, "www.germantown.k12.il.us") || dnsDomainIs(host, "www.germany-tourism.de") || dnsDomainIs(host, "www.getmusic.com") || dnsDomainIs(host, "www.gettysburg.com") || dnsDomainIs(host, "www.ghirardellisq.com") || dnsDomainIs(host, "www.ghosttowngallery.com") || dnsDomainIs(host, "www.ghosttownsusa.com") || dnsDomainIs(host, "www.giants.com") || dnsDomainIs(host, "www.gibraltar.gi") || dnsDomainIs(host, "www.gigglepoetry.com") || dnsDomainIs(host, "www.gilchriststudios.com") || dnsDomainIs(host, "www.gillslap.freeserve.co.uk") || dnsDomainIs(host, "www.gilmer.net") || dnsDomainIs(host, "www.gio.gov.tw") || dnsDomainIs(host, "www.girltech.com") || dnsDomainIs(host, "www.girlzone.com") || dnsDomainIs(host, "www.globalgang.org.uk") || dnsDomainIs(host, "www.globalindex.com") || dnsDomainIs(host, "www.globalinfo.com") || dnsDomainIs(host, "www.gloriafan.com") || dnsDomainIs(host, "www.gms.ocps.k12.fl.us") || dnsDomainIs(host, "www.go-go-diggity.com") || dnsDomainIs(host, "www.goals.com") || dnsDomainIs(host, "www.godiva.com") || dnsDomainIs(host, "www.golden-retriever.com") || dnsDomainIs(host, "www.goldenbooks.com") || dnsDomainIs(host, "www.goldeneggs.com.au") || dnsDomainIs(host, "www.golfonline.com") || dnsDomainIs(host, "www.goobo.com") || dnsDomainIs(host, "www.goodearthgraphics.com") || dnsDomainIs(host, "www.goodyear.com") || dnsDomainIs(host, "www.gopbi.com") || dnsDomainIs(host, "www.gorge.net") || dnsDomainIs(host, "www.gorp.com") || dnsDomainIs(host, "www.got-milk.com") || dnsDomainIs(host, "www.gov.ab.ca") || dnsDomainIs(host, "www.gov.nb.ca") || dnsDomainIs(host, "www.grammarbook.com") || dnsDomainIs(host, "www.grammarlady.com") || dnsDomainIs(host, "www.grandparents-day.com") || dnsDomainIs(host, "www.granthill.com") || dnsDomainIs(host, "www.grayweb.com") || dnsDomainIs(host, "www.greatbuildings.com") || dnsDomainIs(host, "www.greatkids.com") || dnsDomainIs(host, "www.greatscience.com") || dnsDomainIs(host, "www.greeceny.com") || dnsDomainIs(host, "www.greenkeepers.com") || dnsDomainIs(host, "www.greylabyrinth.com") || dnsDomainIs(host, "www.grimmy.com") || dnsDomainIs(host, "www.gsrg.nmh.ac.uk") || dnsDomainIs(host, "www.gti.net") || dnsDomainIs(host, "www.<API key>.com") || dnsDomainIs(host, "www.guitar.net") || dnsDomainIs(host, "www.guitarplaying.com") || dnsDomainIs(host, "www.gumbyworld.com") || dnsDomainIs(host, "www.gurlwurld.com") || dnsDomainIs(host, "www.gwi.net") || dnsDomainIs(host, "www.gymn-forum.com") || dnsDomainIs(host, "www.gzkidzone.com") || dnsDomainIs(host, "www.haemibalgassi.com") || dnsDomainIs(host, "www.hairstylist.com") || dnsDomainIs(host, "www.halcyon.com") || dnsDomainIs(host, "www.halifax.cbc.ca") || dnsDomainIs(host, "www.halloween-online.com") || dnsDomainIs(host, "www.halloweenkids.com") || dnsDomainIs(host, "www.halloweenmagazine.com") || dnsDomainIs(host, "www.hamill.co.uk") || dnsDomainIs(host, "www.hamsterdance2.com") || dnsDomainIs(host, "www.hamsters.co.uk") || dnsDomainIs(host, "www.hamstertours.com") || dnsDomainIs(host, "www.handsonmath.com") || dnsDomainIs(host, "www.handspeak.com") || dnsDomainIs(host, "www.hansonline.com") || dnsDomainIs(host, "www.happychild.org.uk") || dnsDomainIs(host, "www.happyfamilies.com") || dnsDomainIs(host, "www.happytoy.com") || dnsDomainIs(host, "www.harley-davidson.com") || dnsDomainIs(host, "www.harmonicalessons.com") || dnsDomainIs(host, "www.harperchildrens.com") || dnsDomainIs(host, "www.harvey.com") || dnsDomainIs(host, "www.hasbro-interactive.com") || dnsDomainIs(host, "www.haynet.net") || dnsDomainIs(host, "www.hbc.com") || dnsDomainIs(host, "www.hblewis.com") || dnsDomainIs(host, "www.hbook.com") || dnsDomainIs(host, "www.he.net") || dnsDomainIs(host, "www.headbone.com") || dnsDomainIs(host, "www.healthatoz.com") || dnsDomainIs(host, "www.healthypet.com") || dnsDomainIs(host, "www.heartfoundation.com.au") || dnsDomainIs(host, "www.heatersworld.com") || dnsDomainIs(host, "www.her-online.com") || dnsDomainIs(host, "www.heroesofhistory.com") || dnsDomainIs(host, "www.hersheypa.com") || dnsDomainIs(host, "www.hersheys.com") || dnsDomainIs(host, "www.hevanet.com") || dnsDomainIs(host, "www.heynetwork.com") || dnsDomainIs(host, "www.hgo.com") || dnsDomainIs(host, "www.hhof.com") || dnsDomainIs(host, "www.hideandseekpuppies.com") || dnsDomainIs(host, "www.hifusion.com") || dnsDomainIs(host, "www.highbridgepress.com") || dnsDomainIs(host, "www.his.com") || dnsDomainIs(host, "www.history.navy.mil") || dnsDomainIs(host, "www.historychannel.com") || dnsDomainIs(host, "www.historyhouse.com") || dnsDomainIs(host, "www.historyplace.com") || dnsDomainIs(host, "www.hisurf.com") || dnsDomainIs(host, "www.hiyah.com") || dnsDomainIs(host, "www.hmnet.com") || dnsDomainIs(host, "www.hoboes.com") || dnsDomainIs(host, "www.hockeydb.com") || dnsDomainIs(host, "www.hohnerusa.com") || dnsDomainIs(host, "www.holidaychannel.com") || dnsDomainIs(host, "www.holidayfestival.com") || dnsDomainIs(host, "www.holidays.net") || dnsDomainIs(host, "www.hollywood.com") || dnsDomainIs(host, "www.holoworld.com") || dnsDomainIs(host, "www.homepagers.com") || dnsDomainIs(host, "www.homeschoolzone.com") || dnsDomainIs(host, "www.homestead.com") || dnsDomainIs(host, "www.homeworkspot.com") || dnsDomainIs(host, "www.hompro.com") || dnsDomainIs(host, "www.honey.com") || dnsDomainIs(host, "www.hooked.net") || dnsDomainIs(host, "www.hoophall.com") || dnsDomainIs(host, "www.hooverdam.com") || dnsDomainIs(host, "www.hopepaul.com") || dnsDomainIs(host, "www.horse-country.com") || dnsDomainIs(host, "www.horsechat.com") || dnsDomainIs(host, "www.horsefun.com") || dnsDomainIs(host, "www.horus.ics.org.eg") || dnsDomainIs(host, "www.hotbraille.com") || dnsDomainIs(host, "www.hotwheels.com") || dnsDomainIs(host, "www.howstuffworks.com") || dnsDomainIs(host, "www.hpdigitalbookclub.com") || dnsDomainIs(host, "www.hpj.com") || dnsDomainIs(host, "www.hpl.hp.com") || dnsDomainIs(host, "www.hpl.lib.tx.us") || dnsDomainIs(host, "www.hpnetwork.f2s.com") || dnsDomainIs(host, "www.hsswp.com") || dnsDomainIs(host, "www.hsx.com") || dnsDomainIs(host, "www.humboldt1.com") || dnsDomainIs(host, "www.humongous.com") || dnsDomainIs(host, "www.humph3.freeserve.co.uk") || dnsDomainIs(host, "www.humphreybear.com ") || dnsDomainIs(host, "www.hurricanehunters.com") || dnsDomainIs(host, "www.hyperhistory.com") || dnsDomainIs(host, "www.i2k.com") || dnsDomainIs(host, "www.ibhof.com") || dnsDomainIs(host, "www.ibiscom.com") || dnsDomainIs(host, "www.ibm.com") || dnsDomainIs(host, "www.icangarden.com") || dnsDomainIs(host, "www.icecreamusa.com") || dnsDomainIs(host, "www.icn.co.uk") || dnsDomainIs(host, "www.icomm.ca") || dnsDomainIs(host, "www.idfishnhunt.com") || dnsDomainIs(host, "www.iditarod.com") || dnsDomainIs(host, "www.iei.net") || dnsDomainIs(host, "www.iemily.com") || dnsDomainIs(host, "www.iir.com") || dnsDomainIs(host, "www.ika.com") || dnsDomainIs(host, "www.ikoala.com") || dnsDomainIs(host, "www.iln.net") || dnsDomainIs(host, "www.imagine5.com") || dnsDomainIs(host, "www.imes.boj.or.jp") || dnsDomainIs(host, "www.inch.com") || dnsDomainIs(host, "www.incwell.com") || dnsDomainIs(host, "www.indian-river.fl.us") || dnsDomainIs(host, "www.indians.com") || dnsDomainIs(host, "www.indo.com") || dnsDomainIs(host, "www.indyracingleague.com") || dnsDomainIs(host, "www.indyzoo.com") || dnsDomainIs(host, "www.info-canada.com") || dnsDomainIs(host, "www.infomagic.net") || dnsDomainIs(host, "www.infoplease.com") || dnsDomainIs(host, "www.infoporium.com") || dnsDomainIs(host, "www.infostuff.com") || dnsDomainIs(host, "www.inhandmuseum.com") || dnsDomainIs(host, "www.inil.com") || dnsDomainIs(host, "www.inkspot.com") || dnsDomainIs(host, "www.inkyfingers.com") || dnsDomainIs(host, "www.innerauto.com") || dnsDomainIs(host, "www.innerbody.com") || dnsDomainIs(host, "www.inqpub.com") || dnsDomainIs(host, "www.insecta-inspecta.com") || dnsDomainIs(host, "www.insectclopedia.com") || dnsDomainIs(host, "www.inside-mexico.com") || dnsDomainIs(host, "www.insiders.com") || dnsDomainIs(host, "www.insteam.com") || dnsDomainIs(host, "www.intel.com") || dnsDomainIs(host, "www.intellicast.com") || dnsDomainIs(host, "www.interads.co.uk") || dnsDomainIs(host, "www.intercot.com") || dnsDomainIs(host, "www.intergraffix.com") || dnsDomainIs(host, "www.interknowledge.com") || dnsDomainIs(host, "www.interlog.com") || dnsDomainIs(host, "www.internet4kids.com") || dnsDomainIs(host, "www.intersurf.com") || dnsDomainIs(host, "www.inthe80s.com") || dnsDomainIs(host, "www.inventorsmuseum.com") || dnsDomainIs(host, "www.inwap.com") || dnsDomainIs(host, "www.ioa.com") || dnsDomainIs(host, "www.ionet.net") || dnsDomainIs(host, "www.iowacity.com") || dnsDomainIs(host, "www.ireland-now.com") || dnsDomainIs(host, "www.ireland.com") || dnsDomainIs(host, "www.irelandseye.com") || dnsDomainIs(host, "www.irlgov.ie") || dnsDomainIs(host, "www.isd.net") || dnsDomainIs(host, "www.islandnet.com") || dnsDomainIs(host, "www.isomedia.com") || dnsDomainIs(host, "www.itftennis.com") || dnsDomainIs(host, "www.itpi.dpi.state.nc.us") || dnsDomainIs(host, "www.itskwanzaatime.com") || dnsDomainIs(host, "www.itss.raytheon.com") || dnsDomainIs(host, "www.iuma.com") || dnsDomainIs(host, "www.iwaynet.net") || dnsDomainIs(host, "www.iwc.com") || dnsDomainIs(host, "www.iwight.gov.uk") || dnsDomainIs(host, "www.ixpres.com") || dnsDomainIs(host, "www.j.b.allen.btinternet.co.uk") || dnsDomainIs(host, "www.jabuti.com") || dnsDomainIs(host, "www.jackinthebox.com") || dnsDomainIs(host, "www.jaffebros.com") || dnsDomainIs(host, "www.jaguars.com") || dnsDomainIs(host, "www.jamaica-gleaner.com") || dnsDomainIs(host, "www.jamm.com") || dnsDomainIs(host, "www.janbrett.com") || dnsDomainIs(host, "www.janetstevens.com") || dnsDomainIs(host, "www.japan-guide.com") || dnsDomainIs(host, "www.jargon.net") || dnsDomainIs(host, "www.javelinamx.com") || dnsDomainIs(host, "www.jayjay.com") || dnsDomainIs(host, "www.jazclass.aust.com") || dnsDomainIs(host, "www.jedinet.com") || dnsDomainIs(host, "www.jenniferlopez.com") || dnsDomainIs(host, "www.jlpanagopoulos.com") || dnsDomainIs(host, "www.jmarshall.com") || dnsDomainIs(host, "www.jmccall.demon.co.uk") || dnsDomainIs(host, "www.jmts.com") || dnsDomainIs(host, "www.joesherlock.com") || dnsDomainIs(host, "www.<API key>.co.uk") || dnsDomainIs(host, "www.joycecarolthomas.com") || dnsDomainIs(host, "www.joycone.com") || dnsDomainIs(host, "www.joyrides.com") || dnsDomainIs(host, "www.jps.net") || dnsDomainIs(host, "www.jspub.com") || dnsDomainIs(host, "www.judaica.com") || dnsDomainIs(host, "www.judyblume.com") || dnsDomainIs(host, "www.julen.net") || dnsDomainIs(host, "www.june29.com") || dnsDomainIs(host, "www.juneteenth.com") || dnsDomainIs(host, "www.justuskidz.com") || dnsDomainIs(host, "www.justwomen.com") || dnsDomainIs(host, "www.jwindow.net") || dnsDomainIs(host, "www.k9web.com") || dnsDomainIs(host, "www.kaercher.de") || dnsDomainIs(host, "www.kaleidoscapes.com") || dnsDomainIs(host, "www.kapili.com") || dnsDomainIs(host, "www.kcchiefs.com") || dnsDomainIs(host, "www.kcpl.lib.mo.us") || dnsDomainIs(host, "www.kcroyals.com") || dnsDomainIs(host, "www.kcsd.k12.pa.us") || dnsDomainIs(host, "www.kdu.com") || dnsDomainIs(host, "www.kelloggs.com") || dnsDomainIs(host, "www.<API key>.com") || dnsDomainIs(host, "www.kenyaweb.com") || dnsDomainIs(host, "www.keypals.com") || dnsDomainIs(host, "www.kfn.com") || dnsDomainIs(host, "www.kid-at-art.com") || dnsDomainIs(host, "www.kid-channel.com") || dnsDomainIs(host, "www.kidallergy.com") || dnsDomainIs(host, "www.kidbibs.com") || dnsDomainIs(host, "www.kidcomics.com") || dnsDomainIs(host, "www.kiddesafety.com") || dnsDomainIs(host, "www.kiddiecampus.com") || dnsDomainIs(host, "www.kididdles.com") || dnsDomainIs(host, "www.kidnews.com") || dnsDomainIs(host, "www.kidocracy.com") || dnsDomainIs(host, "www.kidport.com") || dnsDomainIs(host, "www.kids-channel.co.uk") || dnsDomainIs(host, "www.kids-drawings.com") || dnsDomainIs(host, "www.kids-in-mind.com") || dnsDomainIs(host, "www.kids4peace.com") || dnsDomainIs(host, "www.kidsandcomputers.com") || dnsDomainIs(host, "www.kidsart.co.uk") || dnsDomainIs(host, "www.kidsastronomy.com") || dnsDomainIs(host, "www.kidsbank.com") || dnsDomainIs(host, "www.kidsbookshelf.com") || dnsDomainIs(host, "www.kidsclick.com") || dnsDomainIs(host, "www.kidscom.com") || dnsDomainIs(host, "www.kidscook.com") || dnsDomainIs(host, "www.kidsdoctor.com") || dnsDomainIs(host, "www.kidsdomain.com") || dnsDomainIs(host, "www.kidsfarm.com") || dnsDomainIs(host, "www.kidsfreeware.com") || dnsDomainIs(host, "www.kidsfun.tv") || dnsDomainIs(host, "www.kidsgolf.com") || dnsDomainIs(host, "www.kidsgowild.com") || dnsDomainIs(host, "www.kidsjokes.com") || dnsDomainIs(host, "www.kidsloveamystery.com") || dnsDomainIs(host, "www.kidsmoneycents.com") || dnsDomainIs(host, "www.kidsnewsroom.com") || dnsDomainIs(host, "www.kidsource.com") || dnsDomainIs(host, "www.kidsparties.com") || dnsDomainIs(host, "www.kidsplaytown.com") || dnsDomainIs(host, "www.kidsreads.com") || dnsDomainIs(host, "www.kidsreport.com") || dnsDomainIs(host, "www.kidsrunning.com") || dnsDomainIs(host, "www.kidstamps.com") || dnsDomainIs(host, "www.kidsvideogames.com") || dnsDomainIs(host, "www.kidsway.com") || dnsDomainIs(host, "www.kidswithcancer.com") || dnsDomainIs(host, "www.kidszone.ourfamily.com") || dnsDomainIs(host, "www.kidzup.com") || dnsDomainIs(host, "www.kinderart.com") || dnsDomainIs(host, "www.kineticcity.com") || dnsDomainIs(host, "www.kings.k12.ca.us") || dnsDomainIs(host, "www.kiplinger.com") || dnsDomainIs(host, "www.kiwirecovery.org.nz") || dnsDomainIs(host, "www.klipsan.com") || dnsDomainIs(host, "www.klutz.com") || dnsDomainIs(host, "www.kn.pacbell.com") || dnsDomainIs(host, "www.knex.com") || dnsDomainIs(host, "www.knowledgeadventure.com") || dnsDomainIs(host, "www.knto.or.kr") || dnsDomainIs(host, "www.kodak.com") || dnsDomainIs(host, "www.konica.co.jp") || dnsDomainIs(host, "www.kraftfoods.com") || dnsDomainIs(host, "www.kudzukids.com") || dnsDomainIs(host, "www.kulichki.com") || dnsDomainIs(host, "www.kuttu.com") || dnsDomainIs(host, "www.kv5.com") || dnsDomainIs(host, "www.kyes-world.com") || dnsDomainIs(host, "www.kyohaku.go.jp") || dnsDomainIs(host, "www.kyrene.k12.az.us") || dnsDomainIs(host, "www.kz") || dnsDomainIs(host, "www.la-hq.org.uk") || dnsDomainIs(host, "www.labs.net") || dnsDomainIs(host, "www.labyrinth.net.au") || dnsDomainIs(host, "www.laffinthedark.com") || dnsDomainIs(host, "www.lakhota.com") || dnsDomainIs(host, "www.lakings.com") || dnsDomainIs(host, "www.lam.mus.ca.us") || dnsDomainIs(host, "www.lampstras.k12.pa.us") || dnsDomainIs(host, "www.lams.losalamos.k12.nm.us") || dnsDomainIs(host, "www.landofcadbury.ca") || dnsDomainIs(host, "www.larry-boy.com") || dnsDomainIs(host, "www.lasersite.com") || dnsDomainIs(host, "www.last-word.com") || dnsDomainIs(host, "www.latimes.com") || dnsDomainIs(host, "www.laughon.com") || dnsDomainIs(host, "www.laurasmidiheaven.com") || dnsDomainIs(host, "www.lausd.k12.ca.us") || dnsDomainIs(host, "www.learn2.com") || dnsDomainIs(host, "www.learn2type.com") || dnsDomainIs(host, "www.learnfree-hobbies.com") || dnsDomainIs(host, "www.learningkingdom.com") || dnsDomainIs(host, "www.learningplanet.com") || dnsDomainIs(host, "www.leftjustified.com") || dnsDomainIs(host, "www.legalpadjr.com") || dnsDomainIs(host, "www.legendarysurfers.com") || dnsDomainIs(host, "www.legends.dm.net") || dnsDomainIs(host, "www.legis.state.wi.us") || dnsDomainIs(host, "www.legis.state.wv.us") || dnsDomainIs(host, "www.lego.com") || dnsDomainIs(host, "www.leje.com") || dnsDomainIs(host, "www.leonardodicaprio.com") || dnsDomainIs(host, "www.lessonplanspage.com") || dnsDomainIs(host, "www.letour.fr") || dnsDomainIs(host, "www.levins.com") || dnsDomainIs(host, "www.levistrauss.com") || dnsDomainIs(host, "www.libertystatepark.com") || dnsDomainIs(host, "www.libraryspot.com") || dnsDomainIs(host, "www.lifelong.com") || dnsDomainIs(host, "www.lighthouse.cc") || dnsDomainIs(host, "www.lightlink.com") || dnsDomainIs(host, "www.lightspan.com") || dnsDomainIs(host, "www.lil-fingers.com") || dnsDomainIs(host, "www.linc.or.jp") || dnsDomainIs(host, "www.lindsaysbackyard.com") || dnsDomainIs(host, "www.lindtchocolate.com") || dnsDomainIs(host, "www.lineone.net") || dnsDomainIs(host, "www.lionel.com") || dnsDomainIs(host, "www.lisafrank.com") || dnsDomainIs(host, "www.lissaexplains.com") || dnsDomainIs(host, "www.literacycenter.net") || dnsDomainIs(host, "www.littleartist.com") || dnsDomainIs(host, "www.littlechiles.com") || dnsDomainIs(host, "www.littlecritter.com") || dnsDomainIs(host, "www.littlecrowtoys.com") || dnsDomainIs(host, "www.littlehousebooks.com") || dnsDomainIs(host, "www.littlejason.com") || dnsDomainIs(host, "www.littleplanettimes.com") || dnsDomainIs(host, "www.liveandlearn.com") || dnsDomainIs(host, "www.loadstar.prometeus.net") || dnsDomainIs(host, "www.localaccess.com") || dnsDomainIs(host, "www.lochness.co.uk") || dnsDomainIs(host, "www.lochness.scotland.net") || dnsDomainIs(host, "www.logos.it") || dnsDomainIs(host, "www.lonelyplanet.com") || dnsDomainIs(host, "www.looklearnanddo.com") || dnsDomainIs(host, "www.loosejocks.com") || dnsDomainIs(host, "www.lost-worlds.com") || dnsDomainIs(host, "www.love-story.com") || dnsDomainIs(host, "www.lpga.com") || dnsDomainIs(host, "www.lsjunction.com") || dnsDomainIs(host, "www.lucasarts.com") || dnsDomainIs(host, "www.lucent.com") || dnsDomainIs(host, "www.lucie.com") || dnsDomainIs(host, "www.lunaland.co.za") || dnsDomainIs(host, "www.luth.se") || dnsDomainIs(host, "www.lyricalworks.com") || dnsDomainIs(host, "www.infoporium.com") || dnsDomainIs(host, "www.infostuff.com") || dnsDomainIs(host, "www.inhandmuseum.com") || dnsDomainIs(host, "www.inil.com") || dnsDomainIs(host, "www.inkspot.com") || dnsDomainIs(host, "www.inkyfingers.com") || dnsDomainIs(host, "www.innerauto.com") || dnsDomainIs(host, "www.innerbody.com") || dnsDomainIs(host, "www.inqpub.com") || dnsDomainIs(host, "www.insecta-inspecta.com") || dnsDomainIs(host, "www.insectclopedia.com") || dnsDomainIs(host, "www.inside-mexico.com") || dnsDomainIs(host, "www.insiders.com") || dnsDomainIs(host, "www.insteam.com") || dnsDomainIs(host, "www.intel.com") || dnsDomainIs(host, "www.intellicast.com") || dnsDomainIs(host, "www.interads.co.uk") || dnsDomainIs(host, "www.intercot.com") || dnsDomainIs(host, "www.intergraffix.com") || dnsDomainIs(host, "www.interknowledge.com") || dnsDomainIs(host, "www.interlog.com") || dnsDomainIs(host, "www.internet4kids.com") || dnsDomainIs(host, "www.intersurf.com") || dnsDomainIs(host, "www.inthe80s.com") || dnsDomainIs(host, "www.inventorsmuseum.com") || dnsDomainIs(host, "www.inwap.com") || dnsDomainIs(host, "www.ioa.com") || dnsDomainIs(host, "www.ionet.net") || dnsDomainIs(host, "www.iowacity.com") || dnsDomainIs(host, "www.ireland-now.com") || dnsDomainIs(host, "www.ireland.com") || dnsDomainIs(host, "www.irelandseye.com") || dnsDomainIs(host, "www.irlgov.ie") || dnsDomainIs(host, "www.isd.net") || dnsDomainIs(host, "www.islandnet.com") || dnsDomainIs(host, "www.isomedia.com") || dnsDomainIs(host, "www.itftennis.com") || dnsDomainIs(host, "www.itpi.dpi.state.nc.us") || dnsDomainIs(host, "www.itskwanzaatime.com") || dnsDomainIs(host, "www.itss.raytheon.com") || dnsDomainIs(host, "www.iuma.com") || dnsDomainIs(host, "www.iwaynet.net") || dnsDomainIs(host, "www.iwc.com") || dnsDomainIs(host, "www.iwight.gov.uk") || dnsDomainIs(host, "www.ixpres.com") || dnsDomainIs(host, "www.j.b.allen.btinternet.co.uk") || dnsDomainIs(host, "www.jabuti.com") || dnsDomainIs(host, "www.jackinthebox.com") || dnsDomainIs(host, "www.jaffebros.com") || dnsDomainIs(host, "www.jaguars.com") || dnsDomainIs(host, "www.jamaica-gleaner.com") || dnsDomainIs(host, "www.jamm.com") || dnsDomainIs(host, "www.janbrett.com") || dnsDomainIs(host, "www.janetstevens.com") || dnsDomainIs(host, "www.japan-guide.com") || dnsDomainIs(host, "www.jargon.net") || dnsDomainIs(host, "www.javelinamx.com") || dnsDomainIs(host, "www.jayjay.com") || dnsDomainIs(host, "www.jazclass.aust.com") ) return "PROXY proxy.hclib.org:80"; else return "PROXY 172.16.100.20:8080"; } reportCompare('No Crash', 'No Crash', '');
package aws import ( "errors" "fmt" "log" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/ec2" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/schema" ) func <API key>() *schema.Resource { return &schema.Resource{ Read: <API key>, Schema: map[string]*schema.Schema{ "filter": <API key>(), "tags": tagsSchemaComputed(), "vpc_id": { Type: schema.TypeString, Optional: true, }, "ids": { Type: schema.TypeSet, Computed: true, Elem: &schema.Schema{Type: schema.TypeString}, Set: schema.HashString, }, }, } } func <API key>(d *schema.ResourceData, meta interface{}) error { conn := meta.(*AWSClient).ec2conn req := &ec2.<API key>{} if v, ok := d.GetOk("vpc_id"); ok { req.Filters = <API key>( map[string]string{ "vpc-id": v.(string), }, ) } filters, filtersOk := d.GetOk("filter") tags, tagsOk := d.GetOk("tags") if tagsOk { req.Filters = append(req.Filters, <API key>( tagsFromMap(tags.(map[string]interface{})), )...) } if filtersOk { req.Filters = append(req.Filters, <API key>( filters.(*schema.Set), )...) } if len(req.Filters) == 0 { // Don't send an empty filters list; the EC2 API won't accept it. req.Filters = nil } log.Printf("[DEBUG] DescribeNetworkAcls %s\n", req) resp, err := conn.DescribeNetworkAcls(req) if err != nil { return err } if resp == nil || len(resp.NetworkAcls) == 0 { return errors.New("no matching network ACLs found") } networkAcls := make([]string, 0) for _, networkAcl := range resp.NetworkAcls { networkAcls = append(networkAcls, aws.StringValue(networkAcl.NetworkAclId)) } d.SetId(resource.UniqueId()) if err := d.Set("ids", networkAcls); err != nil { return fmt.Errorf("Error setting network ACL ids: %s", err) } return nil }
/* * test_ocspchecker.c * * Test OcspChecker function * */ #include "testutil.h" #include "testutil_nss.h" static void *plContext = NULL; static void printUsage(void) { (void)printf("\nUSAGE:\nOcspChecker -d <certStoreDirectory> TestName " "[ENE|EE] <<API key>> <trustedCert> " "<targetCert>\n\n"); (void)printf("Validates a chain of certificates between " "<trustedCert> and <targetCert>\n" "using the certs and CRLs in <<API key>> and " "pkcs11 db from <certStoreDirectory>. " "If ENE is specified,\n" "then an Error is Not Expected. " "If EE is specified, an Error is Expected.\n"); } static char * createFullPathName( char *dirName, char *certFile, void *plContext) { PKIX_UInt32 certFileLen; PKIX_UInt32 dirNameLen; char *certPathName = NULL; PKIX_TEST_STD_VARS(); certFileLen = PL_strlen(certFile); dirNameLen = PL_strlen(dirName); <API key>(PKIX_PL_Malloc(dirNameLen + certFileLen + 2, (void **)&certPathName, plContext)); PL_strcpy(certPathName, dirName); PL_strcat(certPathName, "/"); PL_strcat(certPathName, certFile); printf("certPathName = %s\n", certPathName); cleanup: PKIX_TEST_RETURN(); return (certPathName); } static PKIX_Error * <API key>(PKIX_ValidateParams *valParams, char *crlDir) { PKIX_PL_String *dirString = NULL; PKIX_CertStore *certStore = NULL; <API key> *procParams = NULL; PKIX_PL_Date *validity = NULL; PKIX_List *revCheckers = NULL; <API key> *revChecker = NULL; PKIX_PL_Object *revCheckerContext = NULL; PKIX_OcspChecker *ocspChecker = NULL; PKIX_TEST_STD_VARS(); subTest("<API key>"); /* Create CollectionCertStore */ <API key>(<API key>(PKIX_ESCASCII, crlDir, 0, &dirString, plContext)); <API key>(<API key>(dirString, &certStore, plContext)); /* Create CertStore */ <API key>(<API key>(valParams, &procParams, plContext)); subTest("<API key>"); <API key>(<API key>(procParams, certStore, plContext)); subTest("<API key>"); <API key>(<API key>(procParams, PKIX_FALSE, plContext)); /* create current Date */ <API key>(<API key>(PR_Now(), &validity, plContext)); <API key>(PKIX_List_Create(&revCheckers, plContext)); /* create revChecker */ <API key>(<API key>(validity, NULL, /* pwArg */ NULL, /* Use default responder */ &revChecker, plContext)); <API key>(<API key>(revChecker, &revCheckerContext, plContext)); /* Check that this object is a ocsp checker */ <API key>(pkix_CheckType(revCheckerContext, <API key>, plContext)); ocspChecker = (PKIX_OcspChecker *)revCheckerContext; <API key>(<API key>(ocspChecker, <API key>, plContext)); <API key>(<API key>(revCheckers, (PKIX_PL_Object *)revChecker, plContext)); <API key>(<API key>(procParams, revCheckers, plContext)); cleanup: PKIX_TEST_DECREF_AC(dirString); PKIX_TEST_DECREF_AC(procParams); PKIX_TEST_DECREF_AC(certStore); PKIX_TEST_DECREF_AC(revCheckers); PKIX_TEST_DECREF_AC(revChecker); PKIX_TEST_DECREF_AC(ocspChecker); PKIX_TEST_DECREF_AC(validity); PKIX_TEST_RETURN(); return (0); } int test_ocsp(int argc, char *argv[]) { PKIX_ValidateParams *valParams = NULL; <API key> *procParams = NULL; <API key> *certSelParams = NULL; PKIX_CertSelector *certSelector = NULL; PKIX_ValidateResult *valResult = NULL; PKIX_UInt32 actualMinorVersion; PKIX_UInt32 j = 0; PKIX_UInt32 k = 0; PKIX_UInt32 chainLength = 0; PKIX_Boolean testValid = PKIX_TRUE; PKIX_List *chainCerts = NULL; PKIX_VerifyNode *verifyTree = NULL; PKIX_PL_String *verifyString = NULL; PKIX_PL_Cert *dirCert = NULL; PKIX_PL_Cert *trustedCert = NULL; PKIX_PL_Cert *targetCert = NULL; PKIX_TrustAnchor *anchor = NULL; PKIX_List *anchors = NULL; char *dirCertName = NULL; char *anchorCertName = NULL; char *dirName = NULL; char *databaseDir = NULL; PKIX_TEST_STD_VARS(); if (argc < 5) { printUsage(); return (0); } startTests("OcspChecker"); <API key>( <API key>(0, PKIX_FALSE, NULL, &plContext)); /* ENE = expect no error; EE = expect error */ if (PORT_Strcmp(argv[2 + j], "ENE") == 0) { testValid = PKIX_TRUE; } else if (PORT_Strcmp(argv[2 + j], "EE") == 0) { testValid = PKIX_FALSE; } else { printUsage(); return (0); } subTest(argv[1 + j]); dirName = argv[3 + j]; chainLength = argc - j - 5; <API key>(PKIX_List_Create(&chainCerts, plContext)); for (k = 0; k < chainLength; k++) { dirCert = createCert(dirName, argv[5 + k + j], plContext); if (k == 0) { <API key>(<API key>((PKIX_PL_Object *)dirCert, plContext)); targetCert = dirCert; } <API key>(<API key>(chainCerts, (PKIX_PL_Object *)dirCert, plContext)); PKIX_TEST_DECREF_BC(dirCert); } /* create processing params with list of trust anchors */ anchorCertName = argv[4 + j]; trustedCert = createCert(dirName, anchorCertName, plContext); <API key>(<API key>(trustedCert, &anchor, plContext)); <API key>(PKIX_List_Create(&anchors, plContext)); <API key>(<API key>(anchors, (PKIX_PL_Object *)anchor, plContext)); <API key>(<API key>(anchors, &procParams, plContext)); /* create CertSelector with target certificate in params */ <API key>(<API key>(&certSelParams, plContext)); <API key>(<API key>(certSelParams, targetCert, plContext)); <API key>(<API key>(NULL, NULL, &certSelector, plContext)); <API key>(<API key>(certSelector, certSelParams, plContext)); <API key>(<API key>(procParams, certSelector, plContext)); <API key>(<API key>(procParams, chainCerts, &valParams, plContext)); <API key>(valParams, dirName); pkixTestErrorResult = PKIX_ValidateChain(valParams, &valResult, &verifyTree, plContext); if (pkixTestErrorResult) { if (testValid == PKIX_FALSE) { (void)printf("EXPECTED ERROR RECEIVED!\n"); } else { /* ENE */ testError("UNEXPECTED ERROR RECEIVED"); } PKIX_TEST_DECREF_BC(pkixTestErrorResult); } else { if (testValid == PKIX_TRUE) { /* ENE */ (void)printf("EXPECTED SUCCESSFUL VALIDATION!\n"); } else { (void)printf("UNEXPECTED SUCCESSFUL VALIDATION!\n"); } } subTest("Displaying VerifyTree"); if (verifyTree == NULL) { (void)printf("VerifyTree is NULL\n"); } else { <API key>(<API key>((PKIX_PL_Object *)verifyTree, &verifyString, plContext)); (void)printf("verifyTree is\n%s\n", verifyString->escAsciiString); PKIX_TEST_DECREF_BC(verifyString); PKIX_TEST_DECREF_BC(verifyTree); } cleanup: PKIX_TEST_DECREF_AC(valParams); PKIX_TEST_DECREF_AC(procParams); PKIX_TEST_DECREF_AC(certSelParams); PKIX_TEST_DECREF_AC(certSelector); PKIX_TEST_DECREF_AC(chainCerts); PKIX_TEST_DECREF_AC(anchors); PKIX_TEST_DECREF_AC(anchor); PKIX_TEST_DECREF_AC(trustedCert); PKIX_TEST_DECREF_AC(targetCert); PKIX_TEST_DECREF_AC(valResult); PKIX_Shutdown(plContext); PKIX_TEST_RETURN(); endTests("OcspChecker"); return (0); }
/* DO NOT MODIFY THIS HEADER */ /* MOOSE - Multiphysics Object Oriented Simulation Environment */ /* Prepared by Battelle Energy Alliance, LLC */ /* Under Contract No. DE-AC07-05ID14517 */ /* With the U. S. Department of Energy */ #include "ExampleApp.h" #include "Moose.h" #include "AppFactory.h" #include "MooseSyntax.h" // Example 13 Includes #include "ExampleFunction.h" template<> InputParameters validParams<ExampleApp>() { InputParameters params = validParams<MooseApp>(); params.set<bool>("<API key>") = false; params.set<bool>("<API key>") = false; return params; } ExampleApp::ExampleApp(InputParameters parameters) : MooseApp(parameters) { srand(processor_id()); Moose::registerObjects(_factory); ExampleApp::registerObjects(_factory); Moose::associateSyntax(_syntax, _action_factory); ExampleApp::associateSyntax(_syntax, _action_factory); } ExampleApp::~ExampleApp() { } void ExampleApp::registerApps() { registerApp(ExampleApp); } void ExampleApp::registerObjects(Factory & factory) { registerFunction(ExampleFunction); } void ExampleApp::associateSyntax(Syntax & /*syntax*/, ActionFactory & /*action_factory*/) { }
using System; using System.Collections.Generic; using System.Linq; using System.Web; using System.Web.UI; using System.Web.UI.WebControls; namespace WingtipToys { public partial class About : Page { protected void Page_Load(object sender, EventArgs e) { } } }
HandlebarsIntl.__addLocaleData({"locale":"guz","pluralRuleFunction":function (n,ord){if(ord)return"other";return"other"},"fields":{"year":{"displayName":"Omwaka","relative":{"0":"this year","1":"next year","-1":"last year"},"relativeTime":{"future":{"other":"+{0} y"},"past":{"other":"-{0} y"}}},"month":{"displayName":"Omotienyi","relative":{"0":"this month","1":"next month","-1":"last month"},"relativeTime":{"future":{"other":"+{0} m"},"past":{"other":"-{0} m"}}},"day":{"displayName":"Rituko","relative":{"0":"Rero","1":"Mambia","-1":"Igoro"},"relativeTime":{"future":{"other":"+{0} d"},"past":{"other":"-{0} d"}}},"hour":{"displayName":"Ensa","relativeTime":{"future":{"other":"+{0} h"},"past":{"other":"-{0} h"}}},"minute":{"displayName":"Edakika","relativeTime":{"future":{"other":"+{0} min"},"past":{"other":"-{0} min"}}},"second":{"displayName":"Esekendi","relative":{"0":"now"},"relativeTime":{"future":{"other":"+{0} s"},"past":{"other":"-{0} s"}}}}}); HandlebarsIntl.__addLocaleData({"locale":"guz-KE","parentLocale":"guz"});
// file at the top-level directory of this distribution and at // option. This file may not be copied, modified, or distributed // except according to those terms. // Test that the lambda kind is inferred correctly as a return // expression fn unique() -> proc():'static { proc() () } pub fn main() { }
<?php require_once(dirname(dirname(__FILE__)) . '/libextinc/OAuth.php'); class <API key> extends OAuthDataStore { private $store; private $config; private $defaultversion = '1.0'; protected $_store_tables = array( 'consumers' => 'consumer = array with consumer attributes', 'nonce' => 'nonce+consumer_key = -boolean-', 'requesttorequest' => 'requestToken.key = array(version,callback,consumerKey,)', 'authorized' => 'requestToken.key, verifier = array(<API key>)', 'access' => 'accessToken.key+consumerKey = accestoken', 'request' => 'requestToken.key+consumerKey = requesttoken', ); function __construct() { $this->store = new <API key>('oauth'); $this->config = <API key>::getOptionalConfig('module_oauth.php'); } /** * Attach the data to the token, and establish the Callback URL and verifier * @param $requestTokenKey RequestToken that was authorized * @param $data Data that is authorized and to be attached to the requestToken * @return array(string:url, string:verifier) ; empty verifier for 1.0-response */ public function authorize($requestTokenKey, $data) { $url = null; $verifier = ''; $version = $this->defaultversion; // See whether to remember values from the original requestToken request: $request_attributes = $this->store->get('requesttorequest', $requestTokenKey, ''); // must be there .. if ($request_attributes['value']) { // establish version to work with $v = $request_attributes['value']['version']; if ($v) $version = $v; // establish callback to use if ($request_attributes['value']['callback']) { $url = $request_attributes['value']['callback']; } } // Is there a callback registered? This is leading, even over a supplied o<API key> $oConsumer = $this->lookup_consumer($request_attributes['value']['consumerKey']); if ($oConsumer && ($oConsumer->callback_url)) $url = $oConsumer->callback_url; $verifier = SimpleSAML\Utils\Random::generateID(); $url = \SimpleSAML\Utils\HTTP::addURLParameters($url, array("oauth_verifier"=>$verifier)); $this->store->set('authorized', $requestTokenKey, $verifier, $data, $this->config->getValue('<API key>', 60*30) ); return array($url, $verifier); } /** * Perform lookup whether a given token exists in the list of authorized tokens; if a verifier is * passed as well, the verifier *must* match the verifier that was registered with the token<br/> * Note that an accessToken should never be stored with a verifier * @param $requestToken * @param $verifier * @return unknown_type */ public function isAuthorized($requestToken, $verifier='') { SimpleSAML_Logger::info('OAuth isAuthorized(' . $requestToken . ')'); return $this->store->exists('authorized', $requestToken, $verifier); } public function getAuthorizedData($token, $verifier = '') { SimpleSAML_Logger::info('O<API key>(' . $token . ')'); $data = $this->store->get('authorized', $token, $verifier); return $data['value']; } public function moveAuthorizedData($requestToken, $verifier, $accessTokenKey) { SimpleSAML_Logger::info('O<API key>(' . $requestToken . ', ' . $accessTokenKey . ')'); // Retrieve authorizedData from authorized.requestToken (with provider verifier) $authorizedData = $this->getAuthorizedData($requestToken, $verifier); // Remove the requesttoken+verifier from authorized store $this->store->remove('authorized', $requestToken, $verifier); // Add accesstoken with authorizedData to authorized store (with empty verifier) // accessTokenKey+consumer => accessToken is already registered in 'access'-table $this->store->set('authorized', $accessTokenKey, '', $authorizedData, $this->config->getValue('accessTokenDuration', 60*60*24)); } public function lookup_consumer($consumer_key) { SimpleSAML_Logger::info('OAuth lookup_consumer(' . $consumer_key . ')'); if (! $this->store->exists('consumers', $consumer_key, '')) return NULL; $consumer = $this->store->get('consumers', $consumer_key, ''); $callback = NULL; if ($consumer['value']['callback_url']) $callback = $consumer['value']['callback_url']; if ($consumer['value']['RSAcertificate']) { return new OAuthConsumer($consumer['value']['key'], $consumer['value']['RSAcertificate'], $callback); } else { return new OAuthConsumer($consumer['value']['key'], $consumer['value']['secret'], $callback); } } function lookup_token($consumer, $tokenType = 'default', $token) { SimpleSAML_Logger::info('OAuth lookup_token(' . $consumer->key . ', ' . $tokenType. ',' . $token . ')'); $data = $this->store->get($tokenType, $token, $consumer->key); if ($data == NULL) throw new Exception('Could not find token'); return $data['value']; } function lookup_nonce($consumer, $token, $nonce, $timestamp) { SimpleSAML_Logger::info('OAuth lookup_nonce(' . $consumer . ', ' . $token. ',' . $nonce . ')'); if ($this->store->exists('nonce', $nonce, $consumer->key)) return TRUE; $this->store->set('nonce', $nonce, $consumer->key, TRUE, $this->config->getValue('nonceCache', 60*60*24*14)); return FALSE; } function new_request_token($consumer, $callback = null, $version = null) { SimpleSAML_Logger::info('O<API key>(' . $consumer . ')'); $lifetime = $this->config->getValue('<API key>', 60*30); $token = new OAuthToken(SimpleSAML\Utils\Random::generateID(), SimpleSAML\Utils\Random::generateID()); $token->callback = $callback; // OAuth1.0-RevA $this->store->set('request', $token->key, $consumer->key, $token, $lifetime); // also store in requestToken->key => array('callback'=>CallbackURL, 'version'=>oauth_version $request_attributes = array( 'callback' => $callback, 'version' => ($version?$version:$this->defaultversion), 'consumerKey' => $consumer->key, ); $this->store->set('requesttorequest', $token->key, '', $request_attributes, $lifetime); // also store in requestToken->key => Consumer->key (enables consumer-lookup during <API key> stage) $this->store->set('requesttoconsumer', $token->key, '', $consumer->key, $lifetime); return $token; } function new_access_token($requestToken, $consumer, $verifier = null) { SimpleSAML_Logger::info('O<API key>(' . $requestToken . ',' . $consumer . ')'); $accestoken = new OAuthToken(SimpleSAML\Utils\Random::generateID(), SimpleSAML\Utils\Random::generateID()); $this->store->set('access', $accestoken->key, $consumer->key, $accestoken, $this->config->getValue('accessTokenDuration', 60*60*24) ); return $accestoken; } /** * Return O<API key> that a given requestToken was issued to * @param $requestTokenKey * @return unknown_type */ public function <API key>($requestTokenKey) { SimpleSAML_Logger::info('O<API key>(' . $requestTokenKey . ')'); if (! $this->store->exists('requesttorequest', $requestTokenKey, '')) return NULL; $request = $this->store->get('requesttorequest', $requestTokenKey, ''); $consumerKey = $request['value']['consumerKey']; if (! $consumerKey) { return NULL; } $consumer = $this->store->get('consumers', $consumerKey['value'], ''); return $consumer['value']; } }
<reference path='fourslash.ts' /> // Exercises completions for hidden files (ie: those beginning with '.') // @Filename: f.ts / // @Filename: d1/g.ts / // @Filename: d1/d2/h.ts / // @Filename: d1/d2/d3/i.ts / /// <reference path=".\..\..\ // @Filename: test.ts / /// <reference path=" / /// <reference path="./ / /// <reference path=".\ / /// <reference path="[|.|] / /// <reference path="d1/ / /// <reference path="d1/./ / /// <reference path="d1/.\ / /// <reference path="d1/[|.|] / /// <reference path="d1\ / /// <reference path="d1\./ / /// <reference path="d1\.\ / /// <reference path="d1\[|.|] / /// <reference path="d1/d2/ / /// <reference path="d1/d2/./ / /// <reference path="d1/d2/.\ / /// <reference path="d1/d2/[|.|] / /// <reference path="d1/d2\ / /// <reference path="d1/d2\./ / /// <reference path="d1/d2\.\ / /// <reference path="d1/d2\[|.|] / /// <reference path="d1\d2/ / /// <reference path="d1\d2/./ / /// <reference path="d1\d2/.\ / /// <reference path="d1\d2/[|.|] / /// <reference path="d1\d2\ / /// <reference path="d1\d2\./ / /// <reference path="d1\d2\.\ / /// <reference path="d1\d2\[|.|] testBlock(0, 'f.ts', "d1"); testBlock(4, 'g.ts', "d2"); testBlock(8, 'g.ts', "d2"); testBlock(12, 'h.ts', "d3"); testBlock(16, 'h.ts', "d3"); testBlock(20, 'h.ts', "d3"); testBlock(24, 'h.ts', "d3"); verify.completions({ marker: "28", exact: ["g.ts", "d2"], <API key>: true }); function testBlock(offset: number, fileName: string, dir: string) { const names = [fileName, dir]; verify.completions( { marker: [offset, offset + 1, offset + 2].map(String), exact: names, <API key>: true, }, { marker: String(offset + 3), exact: names.map(name => ({ name, replacementSpan: test.ranges()[offset / 4] })), <API key>: true, }); }
package terraform import ( "github.com/hashicorp/terraform/addrs" "github.com/hashicorp/terraform/configs/configschema" "github.com/hashicorp/terraform/dag" ) // <API key> is a GraphTransformer that expands the count // out for a specific resource. // This assumes that the count is already interpolated. type <API key> struct { Concrete <API key> Schema *configschema.Block // Count is either the number of indexed instances to create, or -1 to // indicate that count is not set at all and thus a no-key instance should // be created. Count int Addr addrs.AbsResource } func (t *<API key>) Transform(g *Graph) error { if t.Count < 0 { // Negative count indicates that count is not set at all. addr := t.Addr.Instance(addrs.NoKey) abstract := <API key>(addr) abstract.Schema = t.Schema var node dag.Vertex = abstract if f := t.Concrete; f != nil { node = f(abstract) } g.Add(node) return nil } // For each count, build and add the node for i := 0; i < t.Count; i++ { key := addrs.IntKey(i) addr := t.Addr.Instance(key) abstract := <API key>(addr) abstract.Schema = t.Schema var node dag.Vertex = abstract if f := t.Concrete; f != nil { node = f(abstract) } g.Add(node) } return nil }
/** @file * @brief IPv4 Autoconfiguration */ #ifndef <API key> #define <API key> /** Current state of IPv4 Autoconfiguration */ enum <API key> { <API key>, <API key>, <API key>, <API key>, <API key>, }; /** * @brief Initialize IPv4 auto configuration engine. */ #if defined(<API key>) void <API key>(void); #else #define <API key>(...) #endif #endif /* <API key> */
package builds import ( "fmt" "path/filepath" "strings" "time" g "github.com/onsi/ginkgo" o "github.com/onsi/gomega" "k8s.io/kubernetes/test/e2e" exutil "github.com/openshift/origin/test/extended/util" ) var _ = g.Describe("default: S2I incremental build with push and pull to authenticated registry", func() { defer g.GinkgoRecover() var ( templateFixture = exutil.FixturePath("fixtures", "<API key>.json") oc = exutil.NewCLI("build-sti-env", exutil.KubeConfigPath()) ) g.JustBeforeEach(func() { g.By("waiting for builder service account") err := exutil.<API key>(oc.KubeREST().ServiceAccounts(oc.Namespace())) o.Expect(err).NotTo(o.HaveOccurred()) }) g.Describe("Building from a template", func() { g.It(fmt.Sprintf("should create a build from %q template and run it", templateFixture), func() { oc.SetOutputDir(exutil.TestContext.OutputDir) g.By(fmt.Sprintf("calling oc new-app -f %q", templateFixture)) err := oc.Run("new-app").Args("-f", templateFixture).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("starting a test build") buildName, err := oc.Run("start-build").Args("initial-build").Output() o.Expect(err).NotTo(o.HaveOccurred()) g.By("expecting the build is in Complete phase") err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), buildName, exutil.<API key>, exutil.<API key>) o.Expect(err).NotTo(o.HaveOccurred()) g.By("starting a test build using the image produced by the last build") buildName, err = oc.Run("start-build").Args("internal-build").Output() o.Expect(err).NotTo(o.HaveOccurred()) g.By("expecting the build is in Complete phase") err = exutil.WaitForABuild(oc.REST().Builds(oc.Namespace()), buildName, exutil.<API key>, exutil.<API key>) o.Expect(err).NotTo(o.HaveOccurred()) g.By("getting the Docker image reference from ImageStream") imageName, err := exutil.<API key>(oc.REST().ImageStreams(oc.Namespace()), "internal-image", "latest") o.Expect(err).NotTo(o.HaveOccurred()) g.By("writing the pod definition to a file") outputPath := filepath.Join(exutil.TestContext.OutputDir, oc.Namespace()+"-sample-pod.json") pod := exutil.CreatePodForImage(imageName) err = exutil.WriteObjectToFile(pod, outputPath) o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("calling oc create -f %q", outputPath)) err = oc.Run("create").Args("-f", outputPath).Execute() o.Expect(err).NotTo(o.HaveOccurred()) g.By("expecting the pod to be running") err = oc.KubeFramework().WaitForPodRunning(pod.Name) o.Expect(err).NotTo(o.HaveOccurred()) // even though the pod is running, the app isn't always started // so wait until webrick output is complete before curling. logs := "" count := 0 for strings.Contains(logs, "8080") && count < 10 { logs, _ = oc.Run("logs").Args(pod.Name).Output() time.Sleep(time.Second) count++ } g.By("expecting the pod container has saved artifacts") out, err := oc.Run("exec").Args("-p", pod.Name, "--", "curl", "http://0.0.0.0:8080").Output() o.Expect(err).NotTo(o.HaveOccurred()) if !strings.Contains(out, "artifacts exist") { logs, _ = oc.Run("logs").Args(pod.Name).Output() e2e.Failf("Pod %q does not contain expected artifacts: %q\n%q", pod.Name, out, logs) } }) }) })
// This source file is part of the Swift.org open source project #define DEBUG_TYPE "gsil-gen" #include "swift/AST/SILOptions.h" #include "swift/SIL/SILPrintContext.h" #include "swift/SIL/SILModule.h" #include "swift/SILOptimizer/PassManager/Transforms.h" #include "llvm/Support/FileSystem.h" #include "llvm/Support/MemoryBuffer.h" using namespace swift; namespace { A pass for generating debug info on SIL level. This pass is only enabled if SILOptions::<API key> is set (i.e. if the -gsil command line option is specified). The pass writes all SIL functions into one or multiple output files, depending on the size of the SIL. The names of the output files are derived from the main output file. output file name = <<API key>>.gsil_<n>.sil Where <n> is a consecutive number. The files are stored in the same same directory as the main output file. The debug locations and scopes of all functions and instructions are changed to point to the generated SIL output files. This enables debugging and profiling on SIL level. class <API key> : public SILModuleTransform { enum { To prevent extra large output files, e.g. when compiling the stdlib. LineLimitPerFile = 10000 }; A stream for counting line numbers. struct LineCountStream : public llvm::raw_ostream { llvm::raw_ostream &Underlying; int LineNum = 1; uint64_t Pos = 0; void write_impl(const char *Ptr, size_t Size) override { for (size_t Idx = 0; Idx < Size; Idx++) { char c = Ptr[Idx]; if (c == '\n') ++LineNum; } Underlying.write(Ptr, Size); Pos += Size; } uint64_t current_pos() const override { return Pos; } LineCountStream(llvm::raw_ostream &Underlying) : llvm::raw_ostream(/* unbuffered = */ true), Underlying(Underlying) { } ~LineCountStream() { flush(); } }; A print context which records the line numbers where instructions are printed. struct PrintContext : public SILPrintContext { LineCountStream LCS; llvm::DenseMap<const SILInstruction *, int> LineNums; void <API key>(const SILInstruction *I) override { // Record the current line number of the instruction. LineNums[I] = LCS.LineNum; } PrintContext(llvm::raw_ostream &OS) : SILPrintContext(LCS), LCS(OS) { } virtual ~PrintContext() { } }; void run() override { SILModule *M = getModule(); StringRef FileBaseName = M->getOptions().<API key>; if (FileBaseName.empty()) return; DEBUG(llvm::dbgs() << "** <API key> **\n"); std::vector<SILFunction *> PrintedFuncs; int FileIdx = 0; auto FIter = M->begin(); while (FIter != M->end()) { std::string FileName; llvm::raw_string_ostream NameOS(FileName); NameOS << FileBaseName << ".gsil_" << FileIdx++ << ".sil"; NameOS.flush(); char *FileNameBuf = (char *)M->allocate(FileName.size() + 1, 1); strcpy(FileNameBuf, FileName.c_str()); DEBUG(llvm::dbgs() << "Write debug SIL file " << FileName << '\n'); std::error_code EC; llvm::raw_fd_ostream OutFile(FileName, EC, llvm::sys::fs::OpenFlags::F_None); assert(!OutFile.has_error() && !EC && "Can't write SIL debug file"); PrintContext Ctx(OutFile); // Write functions until we reach the LineLimitPerFile. do { SILFunction *F = &*FIter++; PrintedFuncs.push_back(F); // Set the debug scope for the function. SILLocation::DebugLoc DL(Ctx.LCS.LineNum, 1, FileNameBuf); RegularLocation Loc(DL); SILDebugScope *Scope = new (*M) SILDebugScope(Loc, F); F->setDebugScope(Scope); // Ensure that the function is visible for debugging. F->setBare(IsNotBare); // Print it to the output file. F->print(Ctx); } while (FIter != M->end() && Ctx.LCS.LineNum < LineLimitPerFile); // Set the debug locations of all instructions. for (SILFunction *F : PrintedFuncs) { const SILDebugScope *Scope = F->getDebugScope(); for (SILBasicBlock &BB : *F) { for (SILInstruction &I : BB) { SILLocation Loc = I.getLoc(); SILLocation::DebugLoc DL(Ctx.LineNums[&I], 1, FileNameBuf); assert(DL.Line && "no line set for instruction"); if (Loc.is<ReturnLocation>() || Loc.is<<API key>>()) { Loc.setDebugInfoLoc(DL); I.setDebugLocation(SILDebugLocation(Loc, Scope)); } else { RegularLocation RLoc(DL); I.setDebugLocation(SILDebugLocation(RLoc, Scope)); } } } } PrintedFuncs.clear(); } } StringRef getName() override { return "<API key>"; } }; } // end anonymous namespace SILTransform *swift::<API key>() { return new <API key>(); }
from Child import Child from Node import Node # noqa: I201 AVAILABILITY_NODES = [ # <API key> -> availability-entry <API key>? Node('<API key>', kind='SyntaxCollection', element='<API key>'), # Wrapper for all the different entries that may occur inside @available # availability-entry -> '*' ','? # | identifier ','? # | <API key> ','? # | <API key> ','? Node('<API key>', kind='Syntax', description=''' A single argument to an `@available` argument like `*`, `iOS 10.1`, \ or `message: "This has been deprecated"`. ''', children=[ Child('Entry', kind='Syntax', description='The actual argument', node_choices=[ Child('Star', kind='<API key>', text_choices=['*']), Child('<API key>', kind='IdentifierToken'), Child('<API key>', kind='<API key>'), Child('<API key>', kind='<API key>'), ]), Child('TrailingComma', kind='CommaToken', is_optional=True, description=''' A trailing comma if the argument is followed by another \ argument '''), ]), # Representation of 'deprecated: 2.3', 'message: "Hello world"' etc. # <API key> -> identifier ':' version-tuple Node('<API key>', kind='Syntax', description=''' A argument to an `@available` attribute that consists of a label and \ a value, e.g. `message: "This has been deprecated"`. ''', children=[ Child('Label', kind='IdentifierToken', description='The label of the argument'), Child('Colon', kind='ColonToken', description='The colon separating label and value'), Child('Value', kind='Syntax', node_choices=[ Child('String', 'StringLiteralToken'), Child('Version', 'VersionTuple'), ], description='The value of this labeled argument',), ]), # Representation for 'iOS 10', 'swift 3.4' etc. # <API key> -> identifier version-tuple Node('<API key>', kind='Syntax', description=''' An argument to `@available` that restricts the availability on a \ certain platform to a version, e.g. `iOS 10` or `swift 3.4`. ''', children=[ Child('Platform', kind='IdentifierToken', classification='Keyword', description=''' The name of the OS on which the availability should be \ restricted or 'swift' if the availability should be \ restricted based on a Swift version. '''), Child('Version', kind='VersionTuple'), ]), # version-tuple -> integer-literal # | float-literal # | float-literal '.' integer-literal Node('VersionTuple', kind='Syntax', description=''' A version number of the form major.minor.patch in which the minor \ and patch part may be ommited. ''', children=[ Child('MajorMinor', kind='Syntax', node_choices=[ Child('Major', kind='IntegerLiteralToken'), Child('MajorMinor', kind='<API key>') ], description=''' In case the version consists only of the major version, an \ integer literal that specifies the major version. In case \ the version consists of major and minor version number, a \ floating literal in which the decimal part is interpreted \ as the minor version. '''), Child('PatchPeriod', kind='PeriodToken', is_optional=True, description=''' If the version contains a patch number, the period \ separating the minor from the patch number. '''), Child('PatchVersion', kind='IntegerLiteralToken', is_optional=True, description=''' The patch version if specified. '''), ]), ]
// +build integration package storage import ( "bytes" "encoding/base64" "errors" "fmt" "io/ioutil" "log" "net/http" "os" "strings" "testing" "golang.org/x/net/context" "golang.org/x/oauth2" "golang.org/x/oauth2/google" "google.golang.org/api/googleapi" storage "google.golang.org/api/storage/v1" ) type object struct { name, contents string } var ( projectID string bucket string objects = []object{ {"obj1", testContents}, {"obj2", testContents}, {"obj/with/slashes", testContents}, {"resumable", testContents}, {"large", strings.Repeat("a", 514)}, // larger than the first section of content that is sniffed by ContentSniffer. } aclObjects = []string{"acl1", "acl2"} copyObj = "copy-object" ) const ( envProject = "<API key>" envPrivateKey = "<API key>" // NOTE that running this test on a bucket deletes ALL contents of the bucket! envBucket = "<API key>" testContents = "some text that will be saved to a bucket object" ) func verifyAcls(obj *storage.Object, wantDomainRole, wantAllUsersRole string) (err error) { var gotDomainRole, gotAllUsersRole string for _, acl := range obj.Acl { if acl.Entity == "domain-google.com" { gotDomainRole = acl.Role } if acl.Entity == "allUsers" { gotAllUsersRole = acl.Role } } if gotDomainRole != wantDomainRole { err = fmt.Errorf("domain-google.com role = %q; want %q", gotDomainRole, wantDomainRole) } if gotAllUsersRole != wantAllUsersRole { err = fmt.Errorf("allUsers role = %q; want %q; %v", gotAllUsersRole, wantAllUsersRole, err) } return err } // TODO(gmlewis): Move this to a common location. func tokenSource(ctx context.Context, scopes ...string) (oauth2.TokenSource, error) { keyFile := os.Getenv(envPrivateKey) if keyFile == "" { return nil, errors.New(envPrivateKey + " not set") } jsonKey, err := ioutil.ReadFile(keyFile) if err != nil { return nil, fmt.Errorf("unable to read %q: %v", keyFile, err) } conf, err := google.JWTConfigFromJSON(jsonKey, scopes...) if err != nil { return nil, fmt.Errorf("google.JWTConfigFromJSON: %v", err) } return conf.TokenSource(ctx), nil } const defaultType = "text/plain; charset=utf-8" // writeObject writes some data and default metadata to the specified object. // Resumable upload is used if resumable is true. // The written data is returned. func writeObject(s *storage.Service, bucket, obj string, resumable bool, contents string) error { o := &storage.Object{ Bucket: bucket, Name: obj, ContentType: defaultType, ContentEncoding: "utf-8", ContentLanguage: "en", Metadata: map[string]string{"foo": "bar"}, } f := strings.NewReader(contents) insert := s.Objects.Insert(bucket, o) if resumable { insert.ResumableMedia(context.Background(), f, int64(len(contents)), defaultType) } else { insert.Media(f) } _, err := insert.Do() return err } func checkMetadata(t *testing.T, s *storage.Service, bucket, obj string) { o, err := s.Objects.Get(bucket, obj).Do() if err != nil { t.Error(err) } if got, want := o.Name, obj; got != want { t.Errorf("name of %q = %q; want %q", obj, got, want) } if got, want := o.ContentType, defaultType; got != want { t.Errorf("contentType of %q = %q; want %q", obj, got, want) } if got, want := o.Metadata["foo"], "bar"; got != want { t.Errorf("metadata entry foo of %q = %q; want %q", obj, got, want) } } func createService() *storage.Service { if projectID = os.Getenv(envProject); projectID == "" { log.Print("no project ID specified") return nil } if bucket = os.Getenv(envBucket); bucket == "" { log.Print("no project ID specified") return nil } ctx := context.Background() ts, err := tokenSource(ctx, storage.<API key>) if err != nil { log.Print("createService: %v", err) return nil } client := oauth2.NewClient(ctx, ts) s, err := storage.New(client) if err != nil { log.Print("unable to create service: %v", err) return nil } return s } func TestMain(m *testing.M) { if err := cleanup(); err != nil { log.Fatalf("Pre-test cleanup failed: %v", err) } exit := m.Run() if err := cleanup(); err != nil { log.Fatalf("Post-test cleanup failed: %v", err) } os.Exit(exit) } func TestContentType(t *testing.T) { s := createService() if s == nil { t.Fatal("Could not create service") } type testCase struct { objectContentType string <API key> bool optionContentType string wantContentType string } // The Media method will use resumable upload if the supplied data is // larger than googleapi.<API key> We run the following // tests with two different file contents: one that will trigger // resumable upload, and one that won't. forceResumableData := bytes.Repeat([]byte("a"), googleapi.<API key>+1) smallData := bytes.Repeat([]byte("a"), 2) // In the following test, the content type, if any, in the Object struct is always "text/plain". // The content type configured via googleapi.ContentType, if any, is always "text/html". for _, tc := range []testCase{ // With content type specified in the object struct { objectContentType: "text/plain", <API key>: true, optionContentType: "text/html", wantContentType: "text/html", }, { objectContentType: "text/plain", <API key>: true, optionContentType: "", wantContentType: "text/plain", }, { objectContentType: "text/plain", <API key>: false, wantContentType: "text/plain; charset=utf-8", // sniffed. }, // Without content type specified in the object struct { <API key>: true, optionContentType: "text/html", wantContentType: "text/html", }, { <API key>: true, optionContentType: "", wantContentType: "", // Result is an object without a content type. }, { <API key>: false, wantContentType: "text/plain; charset=utf-8", // sniffed. }, } { // The behavior should be the same, regardless of whether resumable upload is used or not. for _, data := range [][]byte{smallData, forceResumableData} { o := &storage.Object{ Bucket: bucket, Name: "test-content-type", ContentType: tc.objectContentType, } call := s.Objects.Insert(bucket, o) var opts []googleapi.MediaOption if tc.<API key> { opts = append(opts, googleapi.ContentType(tc.optionContentType)) } call.Media(bytes.NewReader(data), opts...) _, err := call.Do() if err != nil { t.Fatalf("unable to insert object %q: %v", o.Name, err) } readObj, err := s.Objects.Get(bucket, o.Name).Do() if err != nil { t.Error(err) } if got, want := readObj.ContentType, tc.wantContentType; got != want { t.Errorf("contentType of %q; got %q; want %q", o.Name, got, want) } } } } func TestFunctions(t *testing.T) { s := createService() if s == nil { t.Fatal("Could not create service") } t.Logf("Listing buckets for project %q", projectID) var numBuckets int pageToken := "" for { call := s.Buckets.List(projectID) if pageToken != "" { call.PageToken(pageToken) } resp, err := call.Do() if err != nil { t.Fatalf("unable to list buckets for project %q: %v", projectID, err) } numBuckets += len(resp.Items) if pageToken = resp.NextPageToken; pageToken == "" { break } } if numBuckets == 0 { t.Fatalf("no buckets found for project %q", projectID) } for _, obj := range objects { t.Logf("Writing %q", obj.name) // TODO(mcgreevy): stop relying on "resumable" name to determine whether to // do a resumable upload. err := writeObject(s, bucket, obj.name, obj.name == "resumable", obj.contents) if err != nil { t.Fatalf("unable to insert object %q: %v", obj.name, err) } } for _, obj := range objects { t.Logf("Reading %q", obj.name) resp, err := s.Objects.Get(bucket, obj.name).Download() if err != nil { t.Fatalf("unable to get object %q: %v", obj.name, err) } slurp, err := ioutil.ReadAll(resp.Body) if err != nil { t.Errorf("unable to read response body %q: %v", obj.name, err) } resp.Body.Close() if got, want := string(slurp), obj.contents; got != want { t.Errorf("contents of %q = %q; want %q", obj.name, got, want) } } name := "obj-not-exists" if _, err := s.Objects.Get(bucket, name).Download(); !isError(err, http.StatusNotFound) { t.Errorf("object %q should not exist, err = %v", name, err) } else { t.Log("Successfully tested StatusNotFound.") } for _, obj := range objects { t.Logf("Checking %q metadata", obj.name) checkMetadata(t, s, bucket, obj.name) } name = objects[0].name t.Logf("Rewriting %q to %q", name, copyObj) copy, err := s.Objects.Rewrite(bucket, name, bucket, copyObj, nil).Do() if err != nil { t.Errorf("unable to rewrite object %q to %q: %v", name, copyObj, err) } if copy.Resource.Name != copyObj { t.Errorf("copy object's name = %q; want %q", copy.Resource.Name, copyObj) } if copy.Resource.Bucket != bucket { t.Errorf("copy object's bucket = %q; want %q", copy.Resource.Bucket, bucket) } // Note that arrays such as ACLs below are completely overwritten using Patch // semantics, so these must be updated in a read-modify-write sequence of operations. // See https://cloud.google.com/storage/docs/json_api/v1/how-tos/performance#patch-semantics // for more details. t.Logf("Updating attributes of %q", name) obj, err := s.Objects.Get(bucket, name).Projection("full").Fields("acl").Do() if err != nil { t.Errorf("Objects.Get(%q, %q): %v", bucket, name, err) } if err := verifyAcls(obj, "", ""); err != nil { t.Errorf("before update ACLs: %v", err) } obj.ContentType = "text/html" for _, entity := range []string{"domain-google.com", "allUsers"} { obj.Acl = append(obj.Acl, &storage.ObjectAccessControl{Entity: entity, Role: "READER"}) } updated, err := s.Objects.Patch(bucket, name, obj).Projection("full").Fields("contentType", "acl").Do() if err != nil { t.Errorf("Objects.Patch(%q, %q, %#v) failed with %v", bucket, name, obj, err) } if want := "text/html"; updated.ContentType != want { t.Errorf("updated.ContentType == %q; want %q", updated.ContentType, want) } if err := verifyAcls(updated, "READER", "READER"); err != nil { t.Errorf("after update ACLs: %v", err) } t.Log("Testing checksums") checksumCases := []struct { name string contents string size uint64 md5 string crc32c uint32 }{ { name: "checksum-object", contents: "helloworld", size: 10, md5: "<API key>", crc32c: 1456190592, }, { name: "zero-object", contents: "", size: 0, md5: "<API key>", crc32c: 0, }, } for _, c := range checksumCases { f := strings.NewReader(c.contents) o := &storage.Object{ Bucket: bucket, Name: c.name, ContentType: defaultType, ContentEncoding: "utf-8", ContentLanguage: "en", } obj, err := s.Objects.Insert(bucket, o).Media(f).Do() if err != nil { t.Fatalf("unable to insert object %q: %v", obj, err) } if got, want := obj.Size, c.size; got != want { t.Errorf("object %q size = %v; want %v", c.name, got, want) } md5, err := base64.StdEncoding.DecodeString(obj.Md5Hash) if err != nil { t.Errorf("object %q base64 decode of MD5 %q: %v", c.name, obj.Md5Hash, err) } if got, want := fmt.Sprintf("%x", md5), c.md5; got != want { t.Errorf("object %q MD5 = %q; want %q", c.name, got, want) } var crc32c uint32 d, err := base64.StdEncoding.DecodeString(obj.Crc32c) if err != nil { t.Errorf("object %q base64 decode of CRC32 %q: %v", c.name, obj.Crc32c, err) } if err == nil && len(d) == 4 { crc32c = uint32(d[0])<<24 + uint32(d[1])<<16 + uint32(d[2])<<8 + uint32(d[3]) } if got, want := crc32c, c.crc32c; got != want { t.Errorf("object %q CRC32C = %v; want %v", c.name, got, want) } } } // cleanup destroys ALL objects in the bucket! func cleanup() error { s := createService() if s == nil { return errors.New("Could not create service") } var pageToken string var failed bool for { call := s.Objects.List(bucket) if pageToken != "" { call.PageToken(pageToken) } resp, err := call.Do() if err != nil { return fmt.Errorf("cleanup list failed: %v", err) } for _, obj := range resp.Items { log.Printf("Cleanup deletion of %q", obj.Name) if err := s.Objects.Delete(bucket, obj.Name).Do(); err != nil { // Print the error out, but keep going. log.Printf("Cleanup deletion of %q failed: %v", obj.Name, err) failed = true } if _, err := s.Objects.Get(bucket, obj.Name).Download(); !isError(err, http.StatusNotFound) { log.Printf("object %q should not exist, err = %v", obj.Name, err) failed = true } else { log.Printf("Successfully deleted %q.", obj.Name) } } if pageToken = resp.NextPageToken; pageToken == "" { break } } if failed { return errors.New("Failed to delete at least one object") } return nil } func isError(err error, code int) bool { if err == nil { return false } ae, ok := err.(*googleapi.Error) return ok && ae.Code == code }
class Prefixsuffix < Formula desc "GUI batch renaming utility" homepage "https://github.com/murraycu/prefixsuffix" url "https://download.gnome.org/sources/prefixsuffix/0.6/prefixsuffix-0.6.9.tar.xz" sha256 "<SHA256-like>" bottle do sha256 "<SHA256-like>" => :sierra sha256 "<SHA256-like>" => :el_capitan sha256 "<SHA256-like>" => :yosemite sha256 "<SHA256-like>" => :mavericks end depends_on "pkg-config" => :build depends_on "intltool" => :build depends_on "gtkmm3" needs :cxx11 def install ENV.cxx11 system "./configure", "--<API key>", "--<API key>", "--prefix=#{prefix}", "--<API key>" system "make", "install" end def post_install system "#{Formula["glib"].opt_bin}/<API key>", "#{HOMEBREW_PREFIX}/share/glib-2.0/schemas" end test do system "#{bin}/prefixsuffix", "--version" end end
This unofficial harmony branch has been removed. Esprima 2.x continues to add supports for ES6 features (#1099). It is recommended to use the latest 2.x release, e.g. from its official [npm module](https:
class Multimarkdown < Formula desc "Turn marked-up plain text into well-formatted documents" homepage "http://fletcherpenney.net/multimarkdown/" # Use git tag instead of the tarball to get submodules url "https://github.com/fletcher/MultiMarkdown-5.git", :tag => "5.4.0", :revision => "<SHA1-like>" head "https://github.com/fletcher/MultiMarkdown-5.git" bottle do cellar :any_skip_relocation sha256 "<SHA256-like>" => :sierra sha256 "<SHA256-like>" => :el_capitan sha256 "<SHA256-like>" => :yosemite sha256 "<SHA256-like>" => :mavericks end depends_on "cmake" => :build conflicts_with "mtools", :because => "both install `mmd` binaries" conflicts_with "markdown", :because => "both install `markdown` binaries" conflicts_with "discount", :because => "both install `markdown` binaries" def install system "sh", "link_git_modules" system "sh", "update_git_modules" system "make" cd "build" do system "make" bin.install "multimarkdown" end
cask 'fontexplorer-x-pro' do version '6.0.2' sha256 '<SHA256-like>' url "http://fast.fontexplorerx.com/FontExplorerXPro#{version.no_dots}.dmg" name 'FontExplorer X Pro' homepage 'https: depends_on macos: '>= :mountain_lion' app 'FontExplorer X Pro.app' zap delete: [ '/Library/<API key>/com.linotype.FontExplorerX.securityhelper', '/Library/LaunchDaemons/com.linotype.FontExplorerX.securityhelper.plist', '~/Library/Application Support/Linotype/FontExplorer X', '~/Library/Application\ Support/com.apple.sharedfilelist/com.apple.LSSharedFileList.<API key>/com.linotype.fontexplorerx.sfl', '~/Library/Caches/com.linotype.FontExplorerX', '~/Library/Cookies/com.linotype.FontExplorerX.binarycookies', '~/Library/LaunchAgents/com.linotype.FontFolderProtector.plist', '~/Library/Preferences/com.linotype.FontExplorerX.plist', '~/Library/Saved\ Application\ State/com.linotype.FontExplorerX.savedState', ] end
#include "config.h" #include <math.h> #include "libgfortran.h" #if defined (HAVE_GFC_REAL_16) && defined (HAVE_SCALBNL) && defined (HAVE_FREXPL) extern GFC_REAL_16 set_exponent_r16 (GFC_REAL_16 s, GFC_INTEGER_4 i); export_proto(set_exponent_r16); GFC_REAL_16 set_exponent_r16 (GFC_REAL_16 s, GFC_INTEGER_4 i) { int dummy_exp; return scalbnl (frexpl (s, &dummy_exp), i); } #endif
-a *.foo,-a ns|a{a:b}
<?php /** * @see <API key> */ // require_once 'Zend/Service/DeveloperGarden/Request/RequestAbstract.php'; class Zend_Service_DeveloperGarden_Request_ConferenceCall_CreateConferenceTemplateRequest extends <API key> { /** * unique owner id * * @var string */ public $ownerId = null; /** * object with details for this conference * * @var <API key> */ public $detail = null; /** * array with <API key> elements * * @var array */ public $participants = null; /** * constructor * * @param integer $environment * @param string $ownerId * @param <API key> $conferenceDetails * @param array $<API key> */ public function __construct($environment, $ownerId, <API key> $conferenceDetails, array $<API key> = null ) { parent::__construct($environment); $this->setOwnerId($ownerId) ->setDetail($conferenceDetails) ->setParticipants($<API key>); } /** * sets $participants * * @param array $participants * @return Zend_Service_DeveloperGarden_Request_ConferenceCall_CreateConferenceTemplateRequest */ public function setParticipants(array $participants = null) { $this->participants = $participants; return $this; } /** * sets $detail * * @param <API key> $detail * @return Zend_Service_DeveloperGarden_Request_ConferenceCall_CreateConferenceTemplateRequest */ public function setDetail(<API key> $detail) { $this->detail = $detail; return $this; } /** * sets $ownerId * * @param string $ownerId * @return Zend_Service_DeveloperGarden_Request_ConferenceCall_CreateConferenceTemplateRequest */ public function setOwnerId($ownerId) { $this->ownerId = $ownerId; return $this; } }
/** * Implements custom element observation and attached/detached callbacks * @module observe */ window.CustomElements.addModule(function(scope){ // imports var flags = scope.flags; var forSubtree = scope.forSubtree; var forDocumentTree = scope.forDocumentTree; /* Manage nodes attached to document trees */ // manage lifecycle on added node and it's subtree; upgrade the node and // entire subtree if necessary and process attached for the node and entire // subtree function addedNode(node, isAttached) { return added(node, isAttached) || addedSubtree(node, isAttached); } // manage lifecycle on added node; upgrade if necessary and process attached function added(node, isAttached) { if (scope.upgrade(node, isAttached)) { // Return true to indicate return true; } if (isAttached) { attached(node); } } // manage lifecycle on added node's subtree only; allows the entire subtree // to upgrade if necessary and process attached function addedSubtree(node, isAttached) { forSubtree(node, function(e) { if (added(e, isAttached)) { return true; } }); } // On platforms without MutationObserver, mutations may not be // reliable and therefore attached/detached are not reliable. // To make these callbacks less likely to fail, we defer all inserts and removes // to give a chance for elements to be attached into dom. // This ensures attachedCallback fires for elements that are created and // immediately added to dom. var <API key> = (!window.MutationObserver || (window.MutationObserver === window.JsMutationObserver)); scope.<API key> = <API key>; var isPendingMutations = false; var pendingMutations = []; function deferMutation(fn) { pendingMutations.push(fn); if (!isPendingMutations) { isPendingMutations = true; setTimeout(takeMutations); } } function takeMutations() { isPendingMutations = false; var $p = pendingMutations; for (var i=0, l=$p.length, p; (i<l) && (p=$p[i]); i++) { p(); } pendingMutations = []; } function attached(element) { if (<API key>) { deferMutation(function() { _attached(element); }); } else { _attached(element); } } // NOTE: due to how MO works (see comments below), an element may be attached // multiple times so we protect against extra processing here. function _attached(element) { // track element for insertion if it's upgraded and cares about insertion // bail if the element is already marked as attached if (element.__upgraded__ && !element.__attached) { element.__attached = true; if (element.attachedCallback) { element.attachedCallback(); } } } /* Manage nodes detached from document trees */ // manage lifecycle on detached node and it's subtree; process detached // for the node and entire subtree function detachedNode(node) { detached(node); forSubtree(node, function(e) { detached(e); }); } function detached(element) { if (<API key>) { deferMutation(function() { _detached(element); }); } else { _detached(element); } } // NOTE: due to how MO works (see comments below), an element may be detached // multiple times so we protect against extra processing here. function _detached(element) { // track element for removal if it's upgraded and cares about removal // bail if the element is already marked as not attached if (element.__upgraded__ && element.__attached) { element.__attached = false; if (element.detachedCallback) { element.detachedCallback(); } } } // recurse up the tree to check if an element is actually in the main document. function inDocument(element) { var p = element; var doc = window.wrap(document); while (p) { if (p == doc) { return true; } p = p.parentNode || ((p.nodeType === Node.<API key>) && p.host); } } // Install an element observer on all shadowRoots owned by node. function watchShadow(node) { if (node.shadowRoot && !node.shadowRoot.__watched) { flags.dom && console.log('watching shadow-root for: ', node.localName); // watch all unwatched roots... var root = node.shadowRoot; while (root) { observe(root); root = root.olderShadowRoot; } } } /* NOTE: In order to process all mutations, it's necessary to recurse into any added nodes. However, it's not possible to determine a priori if a node will get its own mutation record. This means *nodes can be seen multiple times*. Here's an example: (1) In this case, recursion is required to see `child`: node.innerHTML = '<div><child></child></div>' (2) In this case, child will get its own mutation record: node.appendChild(div).appendChild(child); We cannot know ahead of time if we need to walk into the node in (1) so we do and see child; however, if it was added via case (2) then it will have its own record and therefore be seen 2x. */ function handler(root, mutations) { // for logging only if (flags.dom) { var mx = mutations[0]; if (mx && mx.type === 'childList' && mx.addedNodes) { if (mx.addedNodes) { var d = mx.addedNodes[0]; while (d && d !== document && !d.host) { d = d.parentNode; } var u = d && (d.URL || d._URL || (d.host && d.host.localName)) || ''; u = u.split('/?').shift().split('/').pop(); } } console.group('mutations (%d) [%s]', mutations.length, u || ''); } // handle mutations // NOTE: do an `inDocument` check dynamically here. It's possible that `root` // is a document in which case the answer here can never change; however // `root` may be an element like a shadowRoot that can be added/removed // from the main document. var isAttached = inDocument(root); mutations.forEach(function(mx) { if (mx.type === 'childList') { forEach(mx.addedNodes, function(n) { if (!n.localName) { return; } addedNode(n, isAttached); }); forEach(mx.removedNodes, function(n) { if (!n.localName) { return; } detachedNode(n); }); } }); flags.dom && console.groupEnd(); }; /* When elements are added to the dom, upgrade and attached/detached may be asynchronous. `CustomElements.takeRecords` can be called to process any pending upgrades and attached/detached callbacks synchronously. */ function takeRecords(node) { node = window.wrap(node); // If the optional node is not supplied, assume we mean the whole document. if (!node) { node = window.wrap(document); } // Find the root of the tree, which will be an Document or ShadowRoot. while (node.parentNode) { node = node.parentNode; } var observer = node.__observer; if (observer) { handler(node, observer.takeRecords()); takeMutations(); } } var forEach = Array.prototype.forEach.call.bind(Array.prototype.forEach); // observe a node tree; bail if it's already being observed. function observe(inRoot) { if (inRoot.__observer) { return; } // For each ShadowRoot, we create a new MutationObserver, so the root can be // garbage collected once all references to the `inRoot` node are gone. // Give the handler access to the root so that an 'in document' check can // be done. var observer = new MutationObserver(handler.bind(this, inRoot)); observer.observe(inRoot, {childList: true, subtree: true}); inRoot.__observer = observer; } // upgrade an entire document and observe it for elements changes. function upgradeDocument(doc) { doc = window.wrap(doc); flags.dom && console.group('upgradeDocument: ', (doc.baseURI).split('/').pop()); var isMainDocument = (doc === window.wrap(document)); addedNode(doc, isMainDocument); observe(doc); flags.dom && console.groupEnd(); } /* This method is intended to be called when the document tree (including imports) has pending custom elements to upgrade. It can be called multiple times and should do nothing if no elements are in need of upgrade. */ function upgradeDocumentTree(doc) { forDocumentTree(doc, upgradeDocument); } // Patch `createShadowRoot()` if Shadow DOM is available, otherwise leave // undefined to aid feature detection of Shadow DOM. var <API key> = Element.prototype.createShadowRoot; if (<API key>) { Element.prototype.createShadowRoot = function() { var root = <API key>.call(this); window.CustomElements.watchShadow(this); return root; }; } // exports scope.watchShadow = watchShadow; scope.upgradeDocumentTree = upgradeDocumentTree; scope.upgradeDocument = upgradeDocument; scope.upgradeSubtree = addedSubtree; scope.upgradeAll = addedNode; scope.attached = attached; scope.takeRecords = takeRecords; });
<?php class <API key> implements <API key> { /** * A list of tokenizers this sniff supports. * * @var array */ public $supportedTokenizers = array( 'PHP', 'JS', ); /** * Returns an array of tokens this test wants to listen for. * * @return array */ public function register() { return <API key>::$operators; }//end register() /** * Processes this test, when one of its tokens is encountered. * * @param <API key> $phpcsFile The file being scanned. * @param int $stackPtr The position of the current token in the * stack passed in $tokens. * * @return void */ public function process(<API key> $phpcsFile, $stackPtr) { $tokens = $phpcsFile->getTokens(); if ($phpcsFile->tokenizerType === 'JS' && $tokens[$stackPtr]['code'] === T_PLUS) { // JavaScript uses the plus operator for string concatenation as well // so we cannot accurately determine if it is a string concat or addition. // So just ignore it. return; } // If the & is a reference, then we don't want to check for brackets. if ($tokens[$stackPtr]['code'] === T_BITWISE_AND && $phpcsFile->isReference($stackPtr) === true) { return; } // There is one instance where brackets aren't needed, which involves // the minus sign being used to assign a negative number to a variable. if ($tokens[$stackPtr]['code'] === T_MINUS) { // Check to see if we are trying to return -n. $prev = $phpcsFile->findPrevious(<API key>::$emptyTokens, ($stackPtr - 1), null, true); if ($tokens[$prev]['code'] === T_RETURN) { return; } $number = $phpcsFile->findNext(T_WHITESPACE, ($stackPtr + 1), null, true); if ($tokens[$number]['code'] === T_LNUMBER || $tokens[$number]['code'] === T_DNUMBER) { $previous = $phpcsFile->findPrevious(T_WHITESPACE, ($stackPtr - 1), null, true); if ($previous !== false) { $isAssignment = in_array($tokens[$previous]['code'], <API key>::$assignmentTokens); $isEquality = in_array($tokens[$previous]['code'], <API key>::$equalityTokens); $isComparison = in_array($tokens[$previous]['code'], <API key>::$comparisonTokens); if ($isAssignment === true || $isEquality === true || $isComparison === true) { // This is a negative assignment or comparion. // We need to check that the minus and the number are // adjacent. if (($number - $stackPtr) !== 1) { $error = 'No space allowed between minus sign and number'; $phpcsFile->addError($error, $stackPtr, 'SpacingAfterMinus'); } return; } } } }//end if $lastBracket = false; if (isset($tokens[$stackPtr]['nested_parenthesis']) === true) { $parenthesis = array_reverse($tokens[$stackPtr]['nested_parenthesis'], true); foreach ($parenthesis as $bracket => $endBracket) { $prevToken = $phpcsFile->findPrevious(T_WHITESPACE, ($bracket - 1), null, true); $prevCode = $tokens[$prevToken]['code']; if ($prevCode === T_ISSET) { // This operation is inside an isset() call, but has // no bracket of it's own. break; } if ($prevCode === T_STRING || $prevCode === T_SWITCH) { // We allow very simple operations to not be bracketed. // For example, ceil($one / $two). $allowed = array( T_VARIABLE, T_LNUMBER, T_DNUMBER, T_STRING, T_WHITESPACE, T_THIS, T_OBJECT_OPERATOR, <API key>, <API key>, T_MODULUS, ); for ($prev = ($stackPtr - 1); $prev > $bracket; $prev if (in_array($tokens[$prev]['code'], $allowed) === true) { continue; } if ($tokens[$prev]['code'] === T_CLOSE_PARENTHESIS) { $prev = $tokens[$prev]['parenthesis_opener']; } else { break; } } if ($prev !== $bracket) { break; } for ($next = ($stackPtr + 1); $next < $endBracket; $next++) { if (in_array($tokens[$next]['code'], $allowed) === true) { continue; } if ($tokens[$next]['code'] === T_OPEN_PARENTHESIS) { $next = $tokens[$next]['parenthesis_closer']; } else { break; } } if ($next !== $endBracket) { break; } }//end if if (in_array($prevCode, <API key>::$scopeOpeners) === true) { // This operation is inside a control structure like FOREACH // or IF, but has no bracket of it's own. // The only control structure allowed to do this is SWITCH. if ($prevCode !== T_SWITCH) { break; } } if ($prevCode === T_OPEN_PARENTHESIS) { // These are two open parenthesis in a row. If the current // one doesn't enclose the operator, go to the previous one. if ($endBracket < $stackPtr) { continue; } } $lastBracket = $bracket; break; }//end foreach }//end if if ($lastBracket === false) { // It is not in a bracketed statement at all. $previousToken = $phpcsFile->findPrevious(T_WHITESPACE, ($stackPtr - 1), null, true, null, true); if ($previousToken !== false) { // A list of tokens that indicate that the token is not // part of an arithmetic operation. $invalidTokens = array( T_COMMA, T_COLON, T_OPEN_PARENTHESIS, <API key>, T_CASE, ); if (in_array($tokens[$previousToken]['code'], $invalidTokens) === false) { $error = 'Arithmetic operation must be bracketed'; $phpcsFile->addError($error, $stackPtr, 'MissingBrackets'); } return; } } else if ($tokens[$lastBracket]['parenthesis_closer'] < $stackPtr) { // There are a set of brackets in front of it that don't include it. $error = 'Arithmetic operation must be bracketed'; $phpcsFile->addError($error, $stackPtr, 'MissingBrackets'); return; } else { // We are enclosed in a set of bracket, so the last thing to // check is that we are not also enclosed in square brackets // like this: ($array[$index + 1]), which is invalid. $brackets = array( <API key>, <API key>, ); $squareBracket = $phpcsFile->findPrevious($brackets, ($stackPtr - 1), $lastBracket); if ($squareBracket !== false && $tokens[$squareBracket]['code'] === <API key>) { $closeSquareBracket = $phpcsFile->findNext($brackets, ($stackPtr + 1)); if ($closeSquareBracket !== false && $tokens[$closeSquareBracket]['code'] === <API key>) { $error = 'Arithmetic operation must be bracketed'; $phpcsFile->addError($error, $stackPtr, 'MissingBrackets'); } } return; }//end if $lastAssignment = $phpcsFile->findPrevious(<API key>::$assignmentTokens, $stackPtr, null, false, null, true); if ($lastAssignment !== false && $lastAssignment > $lastBracket) { $error = 'Arithmetic operation must be bracketed'; $phpcsFile->addError($error, $stackPtr, 'MissingBrackets'); } }//end process() }//end class ?>
#include "config.h" #include "core/workers/WorkerEventQueue.h" #include "core/dom/ExecutionContext.h" #include "core/dom/<API key>.h" #include "core/events/Event.h" #include "core/inspector/<API key>.h" namespace blink { <API key><WorkerEventQueue> WorkerEventQueue::create(ExecutionContext* context) { return adoptPtrWillBeNoop(new WorkerEventQueue(context)); } WorkerEventQueue::WorkerEventQueue(ExecutionContext* context) : m_executionContext(context) , m_isClosed(false) { } WorkerEventQueue::~WorkerEventQueue() { ASSERT(m_eventTaskMap.isEmpty()); } void WorkerEventQueue::trace(Visitor* visitor) { #if ENABLE(OILPAN) visitor->trace(m_executionContext); visitor->trace(m_eventTaskMap); #endif EventQueue::trace(visitor); } class WorkerEventQueue::EventDispatcherTask : public <API key> { public: static PassOwnPtr<EventDispatcherTask> create(<API key><Event> event, WorkerEventQueue* eventQueue) { return adoptPtr(new EventDispatcherTask(event, eventQueue)); } virtual ~EventDispatcherTask() { if (m_event) m_eventQueue->removeEvent(m_event.get()); } void dispatchEvent(ExecutionContext*, <API key><Event> event) { event->target()->dispatchEvent(event); } virtual void performTask(ExecutionContext* context) { if (m_isCancelled) return; m_eventQueue->removeEvent(m_event.get()); dispatchEvent(context, m_event); m_event.clear(); } void cancel() { m_isCancelled = true; m_event.clear(); } private: EventDispatcherTask(<API key><Event> event, WorkerEventQueue* eventQueue) : m_event(event) , m_eventQueue(eventQueue) , m_isCancelled(false) { } <API key><Event> m_event; WorkerEventQueue* m_eventQueue; bool m_isCancelled; }; void WorkerEventQueue::removeEvent(Event* event) { <API key>::didRemoveEvent(event->target(), event); m_eventTaskMap.remove(event); } bool WorkerEventQueue::enqueueEvent(<API key><Event> prpEvent) { if (m_isClosed) return false; RefPtrWillBeRawPtr<Event> event = prpEvent; <API key>::didEnqueueEvent(event->target(), event.get()); OwnPtr<EventDispatcherTask> task = EventDispatcherTask::create(event, this); m_eventTaskMap.add(event.release(), task.get()); m_executionContext->postTask(task.release()); return true; } bool WorkerEventQueue::cancelEvent(Event* event) { EventDispatcherTask* task = m_eventTaskMap.get(event); if (!task) return false; task->cancel(); removeEvent(event); return true; } void WorkerEventQueue::close() { m_isClosed = true; for (EventTaskMap::iterator it = m_eventTaskMap.begin(); it != m_eventTaskMap.end(); ++it) { Event* event = it->key.get(); EventDispatcherTask* task = it->value; <API key>::didRemoveEvent(event->target(), event); task->cancel(); } m_eventTaskMap.clear(); } }
#!/usr/bin/env python """Generator for C++ structs from api json files. The purpose of this tool is to remove the need for hand-written code that converts to and from base::Value types when receiving javascript api calls. Originally written for generating code for extension apis. Reference schemas are in chrome/common/extensions/api. Usage example: compiler.py --root /home/Work/src --namespace extensions windows.json tabs.json compiler.py --destdir gen --root /home/Work/src --namespace extensions windows.json tabs.json """ import optparse import os import shlex import sys from <API key> import CppBundleGenerator from cpp_generator import CppGenerator from cpp_type_generator import CppTypeGenerator from <API key> import JsExternsGenerator from <API key> import <API key> import json_schema from <API key> import <API key> from model import Model from schema_loader import SchemaLoader # Names of supported code generators, as specified on the command-line. # First is default. GENERATORS = [ 'cpp', '<API key>', 'cpp-bundle-schema', 'externs', 'interface' ] def GenerateSchema(generator_name, file_paths, root, destdir, <API key>, bundle_name, impl_dir, include_rules): # Merge the source files into a single list of schemas. api_defs = [] for file_path in file_paths: schema = os.path.relpath(file_path, root) schema_loader = SchemaLoader( root, os.path.dirname(schema), include_rules, <API key>) api_def = schema_loader.LoadSchema(schema) # If compiling the C++ model code, delete 'nocompile' nodes. if generator_name == 'cpp': api_def = json_schema.DeleteNodes(api_def, 'nocompile') # Delete all 'nodefine' nodes. They are only for documentation. api_def = json_schema.DeleteNodes(api_def, 'nodefine') api_defs.extend(api_def) api_model = Model(allow_inline_enums=False) # For single-schema compilation make sure that the first (i.e. only) schema # is the default one. default_namespace = None # If we have files from multiple source paths, we'll use the common parent # path as the source directory. src_path = None # Load the actual namespaces into the model. for target_namespace, file_path in zip(api_defs, file_paths): relpath = os.path.relpath(os.path.normpath(file_path), root) namespace = api_model.AddNamespace(target_namespace, relpath, <API key>=True, environment=<API key>( <API key>)) if default_namespace is None: default_namespace = namespace if src_path is None: src_path = namespace.source_file_dir else: src_path = os.path.commonprefix((src_path, namespace.source_file_dir)) _, filename = os.path.split(file_path) filename_base, _ = os.path.splitext(filename) # Construct the type generator with all the namespaces in this model. type_generator = CppTypeGenerator(api_model, schema_loader, default_namespace) if generator_name in ('<API key>', 'cpp-bundle-schema'): <API key> = CppBundleGenerator(root, api_model, api_defs, type_generator, <API key>, bundle_name, src_path, impl_dir) if generator_name == '<API key>': generators = [ ('<API key>.cc', <API key>.api_cc_generator), ('<API key>.h', <API key>.api_h_generator), ] elif generator_name == 'cpp-bundle-schema': generators = [ ('generated_schemas.cc', <API key>.<API key>), ('generated_schemas.h', <API key>.schemas_h_generator) ] elif generator_name == 'cpp': cpp_generator = CppGenerator(type_generator) generators = [ ('%s.h' % filename_base, cpp_generator.h_generator), ('%s.cc' % filename_base, cpp_generator.cc_generator) ] elif generator_name == 'externs': generators = [ ('%s_externs.js' % namespace.unix_name, JsExternsGenerator()) ] elif generator_name == 'interface': generators = [ ('%s_interface.js' % namespace.unix_name, <API key>()) ] else: raise Exception('Unrecognised generator %s' % generator_name) output_code = [] for filename, generator in generators: code = generator.Generate(namespace).Render() if destdir: if generator_name == '<API key>': # Function registrations must be output to impl_dir, since they link in # API implementations. output_dir = os.path.join(destdir, impl_dir) else: output_dir = os.path.join(destdir, src_path) if not os.path.exists(output_dir): os.makedirs(output_dir) with open(os.path.join(output_dir, filename), 'w') as f: f.write(code) # If multiple files are being output, add the filename for each file. if len(generators) > 1: output_code += [filename, '', code, ''] else: output_code += [code] return '\n'.join(output_code) if __name__ == '__main__': parser = optparse.OptionParser( description='Generates a C++ model of an API from JSON schema', usage='usage: %prog [option]... schema') parser.add_option('-r', '--root', default='.', help='logical include root directory. Path to schema files from specified' ' dir will be the include path.') parser.add_option('-d', '--destdir', help='root directory to output generated files.') parser.add_option('-n', '--namespace', default='<API key>', help='C++ namespace for generated files. e.g extensions::api.') parser.add_option('-b', '--bundle-name', default='', help='A string to prepend to generated bundle class names, so that ' 'multiple bundle rules can be used without conflicting. ' 'Only used with one of the cpp-bundle generators.') parser.add_option('-g', '--generator', default=GENERATORS[0], choices=GENERATORS, help='The generator to use to build the output code. Supported values are' ' %s' % GENERATORS) parser.add_option('-i', '--impl-dir', dest='impl_dir', help='The root path of all API implementations') parser.add_option('-I', '--include-rules', help='A list of paths to include when searching for referenced objects,' ' with the namespace separated by a \':\'. Example: ' '/foo/bar:Foo::Bar::%(namespace)s') (opts, file_paths) = parser.parse_args() if not file_paths: sys.exit(0) # This is OK as a no-op # Unless in bundle mode, only one file should be specified. if (opts.generator not in ('<API key>', 'cpp-bundle-schema') and len(file_paths) > 1): # TODO(sashab): Could also just use file_paths[0] here and not complain. raise Exception( "Unless in bundle mode, only one file can be specified at a time.") def <API key>(path_and_namespace): if ':' not in path_and_namespace: raise ValueError('Invalid include rule "%s". Rules must be of ' 'the form path:namespace' % path_and_namespace) return path_and_namespace.split(':', 1) include_rules = [] if opts.include_rules: include_rules = map(<API key>, shlex.split(opts.include_rules)) result = GenerateSchema(opts.generator, file_paths, opts.root, opts.destdir, opts.namespace, opts.bundle_name, opts.impl_dir, include_rules) if not opts.destdir: print result
var BUGNUMBER = 565604; var summary = "Typed-array properties don't work when accessed from an object whose " + "prototype (or further-descended prototype) is a typed array"; print(BUGNUMBER + ": " + summary); var o = Object.create(new Uint8Array(1)); assertEq(o.length, 1); var o2 = Object.create(o); assertEq(o2.length, 1); var VARIABLE_OBJECT = {}; var props = [ { property: "length", value: 1 }, { property: "byteLength", value: 1 }, { property: "byteOffset", value: 0 }, { property: "buffer", value: VARIABLE_OBJECT }, ]; for (var i = 0, sz = props.length; i < sz; i++) { var p = props[i]; var o = Object.create(new Uint8Array(1)); var v = o[p.property]; if (p.value !== VARIABLE_OBJECT) assertEq(o[p.property], p.value, "bad " + p.property + " (proto)"); var o2 = Object.create(o); if (p.value !== VARIABLE_OBJECT) assertEq(o2[p.property], p.value, "bad " + p.property + " (grand-proto)"); assertEq(o2[p.property], v, p.property + " mismatch"); } reportCompare(true, true);
all: i3status.1 A2X?=a2x i3status.1: asciidoc.conf i3status.man ${A2X} -f manpage --asciidoc-opts="-f asciidoc.conf" i3status.man clean: rm -f i3status.xml i3status.1 i3status.html
#ifndef <API key> #define <API key> namespace extensions { // Icky RTTI used by a few systems to distinguish the host type of a given // WebContents. // TODO(aa): Remove this and teach those systems to keep track of their own // data. enum ViewType { VIEW_TYPE_INVALID, <API key>, <API key>, <API key>, <API key>, <API key>, <API key>, VIEW_TYPE_PANEL, <API key>, <API key>, VIEW_TYPE_LAST = <API key> }; // Constant strings corresponding to the Type enumeration values. Used // when converting JS arguments. extern const char kViewTypeAll[]; extern const char kViewTypeAppWindow[]; extern const char <API key>[]; extern const char <API key>[]; extern const char kViewTypeInfobar[]; extern const char kViewTypePanel[]; extern const char kViewTypePopup[]; extern const char <API key>[]; } // namespace extensions #endif // <API key>
#ifndef <API key> #define <API key> #include <GLES2/gl2.h> #include <array> #include <vector> typedef std::array<unsigned char, 4> Byte4; struct TGAImage { size_t width; size_t height; std::vector<Byte4> data; TGAImage(); }; bool <API key>(const std::string &path, TGAImage *image); GLuint <API key>(const TGAImage &image); #endif // <API key>
#include "config.h" #include "core/rendering/<API key>.h" #include "core/CSSValueKeywords.h" #include "platform/fonts/FontDescription.h" #include "wtf/StdLibExtras.h" #include "wtf/text/WTFString.h" namespace blink { // static void <API key>::setDefaultFontSize(int fontSize) { s_defaultFontSize = static_cast<float>(fontSize); } // static void <API key>::systemFont(CSSValueID valueID, FontDescription& fontDescription) { float fontSize = s_defaultFontSize; switch (valueID) { case <API key>: case <API key>: case <API key>: // Why 2 points smaller? Because that's what Gecko does. Note that we // are assuming a 96dpi screen, which is the default that we use on // Windows. static const float pointsPerInch = 72.0f; static const float pixelsPerInch = 96.0f; fontSize -= (2.0f / pointsPerInch) * pixelsPerInch; break; default: break; } fontDescription.firstFamily().setFamily(defaultGUIFont()); fontDescription.setSpecifiedSize(fontSize); fontDescription.setIsAbsoluteSize(true); fontDescription.setGenericFamily(FontDescription::NoFamily); fontDescription.setWeight(FontWeightNormal); fontDescription.setStyle(FontStyleNormal); } } // namespace blink
#ifndef __fw_api_scan_h__ #define __fw_api_scan_h__ #include "fw-api.h" /* Scan Commands, Responses, Notifications */ /* Masks for iwl_scan_channel.type flags */ #define <API key> BIT(0) #define <API key> BIT(22) /* Max number of IEs for direct SSID scans in a command */ #define PROBE_OPTION_MAX 20 struct iwl_scan_channel { __le32 type; __le16 channel; __le16 iteration_count; __le32 iteration_interval; __le16 active_dwell; __le16 passive_dwell; } __packed; /* <API key> */ /** * struct iwl_ssid_ie - directed scan network information element * * Up to 20 of these may appear in REPLY_SCAN_CMD, * selected by "type" bit field in struct iwl_scan_channel; * each channel may select different ssids from among the 20 entries. * SSID IEs get transmitted in reverse order of entry. */ struct iwl_ssid_ie { u8 id; u8 len; u8 ssid[<API key>]; } __packed; /* <API key> */ /** * iwl_scan_flags - masks for scan command flags *@<API key>: *@<API key>: *@<API key>: *@<API key>: *@<API key>: *@<API key>: use active scan on channels that was active * in the past hour, even if they are marked as passive. */ enum iwl_scan_flags { <API key> = BIT(0), <API key> = BIT(1), <API key> = BIT(2), <API key> = BIT(3), <API key> = BIT(4), <API key> = BIT(5), }; /** * enum iwl_scan_type - Scan types for scan command * @SCAN_TYPE_FORCED: * @<API key>: * @SCAN_TYPE_OS: * @SCAN_TYPE_ROAMING: * @SCAN_TYPE_ACTION: * @SCAN_TYPE_DISCOVERY: * @<API key>: */ enum iwl_scan_type { SCAN_TYPE_FORCED = 0, <API key> = 1, SCAN_TYPE_OS = 2, SCAN_TYPE_ROAMING = 3, SCAN_TYPE_ACTION = 4, SCAN_TYPE_DISCOVERY = 5, <API key> = 6, }; /* <API key> */ /* Maximal number of channels to scan */ #define <API key> 0x24 /** * struct iwl_scan_cmd - scan request command * ( SCAN_REQUEST_CMD = 0x80 ) * @len: command length in bytes * @scan_flags: scan flags from SCAN_FLAGS_* * @channel_count: num of channels in channel list (1 - <API key>) * @quiet_time: in msecs, dwell this time for active scan on quiet channels * @quiet_plcp_th: quiet PLCP threshold (channel is quiet if less than * this number of packets were received (typically 1) * @passive2active: is auto switching from passive to active during scan allowed * @rxchain_sel_flags: RXON_RX_CHAIN_* * @max_out_time: in usecs, max out of serving channel time * @suspend_time: how long to pause scan when returning to service channel: * bits 0-19: beacon interal in usecs (suspend before executing) * bits 20-23: reserved * bits 24-31: number of beacons (suspend between channels) * @rxon_flags: RXON_FLG_* * @filter_flags: RXON_FILTER_* * @tx_cmd: for active scans (zero for passive), w/o payload, * no RS so specify TX rate * @direct_scan: direct scan SSIDs * @type: one of SCAN_TYPE_* * @repeats: how many time to repeat the scan */ struct iwl_scan_cmd { __le16 len; u8 scan_flags; u8 channel_count; __le16 quiet_time; __le16 quiet_plcp_th; __le16 passive2active; __le16 rxchain_sel_flags; __le32 max_out_time; __le32 suspend_time; /* <API key> */ __le32 rxon_flags; __le32 filter_flags; struct iwl_tx_cmd tx_cmd; struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX]; __le32 type; __le32 repeats; /* * Probe request frame, followed by channel list. * * Size of probe request frame is specified by byte count in tx_cmd. * Channel list follows immediately after probe request frame. * Number of channels in list is specified by channel_count. * Each channel in list is of type: * * struct iwl_scan_channel channels[0]; * * NOTE: Only one band of channels can be scanned per pass. You * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait * for one scan to complete (i.e. receive <API key>) * before requesting another scan. */ u8 data[0]; } __packed; /* <API key> */ /* Response to scan request contains only status with one of these values */ #define SCAN_RESPONSE_OK 0x1 #define SCAN_RESPONSE_ERROR 0x2 /* * SCAN_ABORT_CMD = 0x81 * When scan abort is requested, the command has no fields except the common * header. The response contains only a status with one of these values. */ #define SCAN_ABORT_POSSIBLE 0x1 #define SCAN_ABORT_IGNORED 0x2 /* no pending scans */ /* TODO: complete documentation */ #define SCAN_OWNER_STATUS 0x1 #define <API key> 0x2 /** * struct <API key> - notifies start of scan in the device * ( <API key> = 0x82 ) * @tsf_low: TSF timer (lower half) in usecs * @tsf_high: TSF timer (higher half) in usecs * @beacon_timer: structured as follows: * bits 0:19 - beacon interval in usecs * bits 20:23 - reserved (0) * bits 24:31 - number of beacons * @channel: which channel is scanned * @band: 0 for 5.2 GHz, 1 for 2.4 GHz * @status: one of *_OWNER_STATUS */ struct <API key> { __le32 tsf_low; __le32 tsf_high; __le32 beacon_timer; u8 channel; u8 band; u8 reserved[2]; __le32 status; } __packed; /* <API key> */ /* scan results probe_status first bit indicates success */ #define <API key> 0 #define <API key> BIT(0) /* error statuses combined with TX_FAILED */ #define <API key> BIT(1) #define <API key> BIT(2) /* How many statistics are gathered for each channel */ #define <API key> 1 /** * enum <API key> - status codes for scan complete notifications * @SCAN_COMP_STATUS_OK: scan completed successfully * @<API key>: scan was aborted by user * @<API key>: sending null sleep packet failed * @<API key>: timeout before channel is ready * @<API key>: sending probe request failed * @<API key>: sending null wakeup packet failed * @<API key>: invalid antennas chosen at scan command * @<API key>: internal error caused scan abort * @<API key>: medium was lost ot WiMax * @<API key>: P2P public action frame TX was successful * (not an error!) * @<API key>: indicates end of one repeatition the driver * asked for * @<API key>: scan could not allocate time events */ enum <API key> { SCAN_COMP_STATUS_OK = 0x1, <API key> = 0x2, <API key> = 0x3, <API key> = 0x4, <API key> = 0x5, <API key> = 0x6, <API key> = 0x7, <API key> = 0x8, <API key> = 0x9, <API key> = 0xA, <API key> = 0x0B, <API key> = 0x0C, }; /** * struct <API key> - scan results for one channel * ( <API key> = 0x83 ) * @channel: which channel the results are from * @band: 0 for 5.2 GHz, 1 for 2.4 GHz * @probe_status: SCAN_PROBE_STATUS_*, indicates success of probe request * @num_probe_not_sent: # of request that weren't sent due to not enough time * @duration: duration spent in channel, in usecs * @statistics: statistics gathered for this channel */ struct <API key> { u8 channel; u8 band; u8 probe_status; u8 num_probe_not_sent; __le32 duration; __le32 statistics[<API key>]; } __packed; /* <API key> */ /** * struct <API key> - notifies end of scanning (all channels) * ( <API key> = 0x84 ) * @scanned_channels: number of channels scanned (and number of valid results) * @status: one of SCAN_COMP_STATUS_* * @bt_status: BT on/off status * @last_channel: last channel that was scanned * @tsf_low: TSF timer (lower half) in usecs * @tsf_high: TSF timer (higher half) in usecs * @results: all scan results, only "scanned_channels" of them are valid */ struct <API key> { u8 scanned_channels; u8 status; u8 bt_status; u8 last_channel; __le32 tsf_low; __le32 tsf_high; struct <API key> results[<API key>]; } __packed; /* <API key> */ /* scan offload */ #define <API key> 40 #define <API key> 64 #define <API key> 16 #define <API key> 11 #define <API key> 512 /* Default watchdog (in MS) for scheduled scan iteration */ #define <API key> cpu_to_le16(15000) #define <API key> cpu_to_le16(1) #define CAN_ABORT_STATUS 1 #define <API key> 5 #define <API key> 3 enum <API key> { <API key> = BIT(0), <API key> = BIT(1), <API key> = BIT(2), }; /** * struct <API key> - <API key> * @scan_flags: see enum iwl_scan_flags * @channel_count: channels in channel list * @quiet_time: dwell time, in milisiconds, on quiet channel * @quiet_plcp_th: quiet channel num of packets threshold * @good_CRC_th: passive to active promotion threshold * @rx_chain: RXON rx chain. * @max_out_time: max uSec to be out of assoceated channel * @suspend_time: pause scan this long when returning to service channel * @flags: RXON flags * @filter_flags: RXONfilter * @tx_cmd: tx command for active scan; for 2GHz and for 5GHz. * @direct_scan: list of SSIDs for directed active scan * @scan_type: see enum iwl_scan_type. * @rep_count: repetition count for each scheduled scan iteration. */ struct <API key> { __le16 len; u8 scan_flags; u8 channel_count; __le16 quiet_time; __le16 quiet_plcp_th; __le16 good_CRC_th; __le16 rx_chain; __le32 max_out_time; __le32 suspend_time; /* <API key> */ __le32 flags; __le32 filter_flags; struct iwl_tx_cmd tx_cmd[2]; /* <API key> */ struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX]; __le32 scan_type; __le32 rep_count; } __packed; enum <API key> { <API key> = BIT(0), <API key> = BIT(22), <API key> = BIT(24), <API key> = BIT(25), }; /** * <API key> - SCAN_CHANNEL_CFG_S * @type: bitmap - see enum <API key>. * 0: passive (0) or active (1) scan. * 1-20: directed scan to i'th ssid. * 22: channel width configuation - 1 for narrow. * 24: full scan. * 25: partial scan. * @channel_number: channel number 1-13 etc. * @iter_count: repetition count for the channel. * @iter_interval: interval between two innteration on one channel. * @dwell_time: entry 0 - active scan, entry 1 - passive scan. */ struct <API key> { __le32 type[<API key>]; __le16 channel_number[<API key>]; __le16 iter_count[<API key>]; __le32 iter_interval[<API key>]; u8 dwell_time[<API key>][2]; } __packed; /** * <API key> - <API key> * @scan_cmd: scan command fixed part * @channel_cfg: scan channel configuration * @data: probe request frames (one per band) */ struct <API key> { struct <API key> scan_cmd; struct <API key> channel_cfg; u8 data[0]; } __packed; /** * <API key> - <API key> * @ssid: MAC address to filter out * @reported_rssi: AP rssi reported to the host * @client_bitmap: clients ignore this entry - enum <API key> */ struct <API key> { u8 ssid[ETH_ALEN]; u8 reported_rssi; u8 client_bitmap; } __packed; enum <API key> { <API key> = 1, <API key> = 2, <API key> = 3, }; enum <API key> { <API key> = 0x4, <API key> = 0x8, <API key> = 0xc, }; /** * <API key> - <API key> * @ssid_index: index to ssid list in fixed part * @unicast_cipher: encryption olgorithm to match - bitmap * @aut_alg: authentication olgorithm to match - bitmap * @network_type: enum <API key> * @band_selection: enum <API key> * @client_bitmap: clients waiting for match - enum <API key> */ struct <API key> { u8 ssid_index; u8 unicast_cipher; u8 auth_alg; u8 network_type; u8 band_selection; u8 client_bitmap; u8 reserved[2]; } __packed; /** * <API key> - <API key> * @blaclist: AP list to filter off from scan results * @profiles: profiles to search for match * @blacklist_len: length of blacklist * @num_profiles: num of profiles in the list * @match_notify: clients waiting for match found notification * @pass_match: clients waiting for the results * @active_clients: active clients bitmap - enum <API key> * @any_beacon_notify: clients waiting for match notification without match */ struct <API key> { struct <API key> profiles[<API key>]; u8 blacklist_len; u8 num_profiles; u8 match_notify; u8 pass_match; u8 active_clients; u8 any_beacon_notify; u8 reserved[2]; } __packed; /** * <API key> - schedule of scan offload * @delay: delay between iterations, in seconds. * @iterations: num of scan iterations * @full_scan_mul: number of partial scans before each full scan */ struct <API key> { u16 delay; u8 iterations; u8 full_scan_mul; } __packed; /* * <API key> * * <API key>: pass all results - no filtering. * <API key>: add cached channels to partial scan. * <API key>: use energy based scan before partial scan * on A band. */ enum <API key> { <API key> = BIT(0), <API key> = BIT(2), <API key> = BIT(3), }; /** * <API key> - scan offload request command * @flags: bitmap - enum <API key>. * @watchdog: maximum scan duration in TU. * @delay: delay in seconds before first iteration. * @schedule_line: scan offload schedule, for fast and regular scan. */ struct <API key> { __le16 flags; __le16 watchdog; __le16 delay; __le16 reserved; struct <API key> schedule_line[2]; } __packed; enum <API key> { <API key> = 1, <API key> = 2, }; /** * <API key> - <API key> * @last_schedule_line: last schedule line executed (fast or regular) * @<API key>: last scan iteration executed before scan abort * @status: enum <API key> */ struct <API key> { u8 last_schedule_line; u8 <API key>; u8 status; u8 reserved; } __packed; /** * <API key> - <API key> * @ssid_bitmap: SSIDs indexes found in this iteration * @client_bitmap: clients that are active and wait for this notification */ struct <API key> { __le16 ssid_bitmap; u8 client_bitmap; u8 reserved; }; #endif
import AuthenticatedRoute from 'ghost/routes/authenticated'; import CurrentUserSettings from 'ghost/mixins/<API key>'; import styleBody from 'ghost/mixins/style-body'; var AppsRoute = AuthenticatedRoute.extend(styleBody, CurrentUserSettings, { titleToken: 'Apps', classNames: ['settings-view-apps'], beforeModel: function () { if (!this.get('config.apps')) { return this.transitionTo('settings.general'); } return this.get('session.user') .then(this.transitionAuthor()) .then(this.transitionEditor()); }, model: function () { return this.store.find('app'); } }); export default AppsRoute;
<?php /** * @file * Contains \Drupal\Core\Entity\Query\Sql\Query. */ namespace Drupal\Core\Entity\Query\Sql; use Drupal\Core\Database\Connection; use Drupal\Core\Database\Query\SelectInterface; use Drupal\Core\Entity\EntityTypeInterface; use Drupal\Core\Entity\Query\QueryBase; use Drupal\Core\Entity\Query\QueryException; use Drupal\Core\Entity\Query\QueryInterface; /** * The SQL storage entity query class. */ class Query extends QueryBase implements QueryInterface { /** * The build sql select query. * * @var \Drupal\Core\Database\Query\SelectInterface */ protected $sqlQuery; /** * An array of fields keyed by the field alias. * * Each entry correlates to the arguments of * \Drupal\Core\Database\Query\SelectInterface::addField(), so the first one * is the table alias, the second one the field and the last one optional the * field alias. * * @var array */ protected $sqlFields = array(); /** * An array of strings added as to the group by, keyed by the string to avoid * duplicates. * * @var array */ protected $sqlGroupBy = array(); /** * @var \Drupal\Core\Database\Connection */ protected $connection; /** * Stores the entity manager used by the query. * * @var \Drupal\Core\Entity\<API key> */ protected $entityManager; /** * Constructs a query object. * * @param \Drupal\Core\Entity\EntityTypeInterface $entity_type * The entity type definition. * @param string $conjunction * - AND: all of the conditions on the query need to match. * - OR: at least one of the conditions on the query need to match. * @param \Drupal\Core\Database\Connection $connection * The database connection to run the query against. * @param array $namespaces * List of potential namespaces of the classes belonging to this query. */ public function __construct(EntityTypeInterface $entity_type, $conjunction, Connection $connection, array $namespaces) { parent::__construct($entity_type, $conjunction, $namespaces); $this->connection = $connection; } /** * Implements \Drupal\Core\Entity\Query\QueryInterface::execute(). */ public function execute() { return $this ->prepare() ->compile() ->addSort() ->finish() ->result(); } /** * Prepares the basic query with proper metadata/tags and base fields. * * @throws \Drupal\Core\Entity\Query\QueryException * Thrown if the base table does not exists. * * @return \Drupal\Core\Entity\Query\Sql\Query * Returns the called object. */ protected function prepare() { if ($this->allRevisions) { if (!$base_table = $this->entityType->getRevisionTable()) { throw new QueryException("No revision table for " . $this->entityTypeId . ", invalid query."); } } else { if (!$base_table = $this->entityType->getBaseTable()) { throw new QueryException("No base table for " . $this->entityTypeId . ", invalid query."); } } $simple_query = TRUE; if ($this->entityType->getDataTable()) { $simple_query = FALSE; } $this->sqlQuery = $this->connection->select($base_table, 'base_table', array('conjunction' => $this->conjunction)); $this->sqlQuery->addMetaData('entity_type', $this->entityTypeId); $id_field = $this->entityType->getKey('id'); // Add the key field for fetchAllKeyed(). if (!$revision_field = $this->entityType->getKey('revision')) { // When there is no revision support, the key field is the entity key. $this->sqlFields["base_table.$id_field"] = array('base_table', $id_field); // Now add the value column for fetchAllKeyed(). This is always the // entity id. $this->sqlFields["base_table.$id_field" . '_1'] = array('base_table', $id_field); } else { // When there is revision support, the key field is the revision key. $this->sqlFields["base_table.$revision_field"] = array('base_table', $revision_field); // Now add the value column for fetchAllKeyed(). This is always the // entity id. $this->sqlFields["base_table.$id_field"] = array('base_table', $id_field); } if ($this->accessCheck) { $this->sqlQuery->addTag($this->entityTypeId . '_access'); } $this->sqlQuery->addTag('entity_query'); $this->sqlQuery->addTag('entity_query_' . $this->entityTypeId); // Add further tags added. if (isset($this->alterTags)) { foreach ($this->alterTags as $tag => $value) { $this->sqlQuery->addTag($tag); } } // Add further metadata added. if (isset($this->alterMetaData)) { foreach ($this->alterMetaData as $key => $value) { $this->sqlQuery->addMetaData($key, $value); } } // This now contains first the table containing entity properties and // last the entity base table. They might be the same. $this->sqlQuery->addMetaData('all_revisions', $this->allRevisions); $this->sqlQuery->addMetaData('simple_query', $simple_query); return $this; } /** * Compiles the conditions. * * @return \Drupal\Core\Entity\Query\Sql\Query * Returns the called object. */ protected function compile() { $this->condition->compile($this->sqlQuery); return $this; } /** * Adds the sort to the build query. * * @return \Drupal\Core\Entity\Query\Sql\Query * Returns the called object. */ protected function addSort() { if ($this->count) { $this->sort = array(); } // Gather the SQL field aliases first to make sure every field table // necessary is added. This might change whether the query is simple or // not. See below for more on simple queries. $sort = array(); if ($this->sort) { foreach ($this->sort as $key => $data) { $sort[$key] = $this->getSqlField($data['field'], $data['langcode']); } } $simple_query = $this->isSimpleQuery(); // If the query is set up for paging either via pager or by range or a // count is requested, then the correct amount of rows returned is // important. If the entity has a data table or multiple value fields are // involved then each revision might appear in several rows and this needs // a significantly more complex query. if (!$simple_query) { // First, GROUP BY revision id (if it has been added) and entity id. // Now each group contains a single revision of an entity. foreach ($this->sqlFields as $field) { $group_by = "$field[0].$field[1]"; $this->sqlGroupBy[$group_by] = $group_by; } } // Now we know whether this is a simple query or not, actually do the // sorting. foreach ($sort as $key => $sql_alias) { $direction = $this->sort[$key]['direction']; if ($simple_query || isset($this->sqlGroupBy[$sql_alias])) { // Simple queries, and the grouped columns of complicated queries // can be ordered normally, without the aggregation function. $this->sqlQuery->orderBy($sql_alias, $direction); if (!isset($this->sqlFields[$sql_alias])) { $this->sqlFields[$sql_alias] = explode('.', $sql_alias); } } else { // Order based on the smallest element of each group if the // direction is ascending, or on the largest element of each group // if the direction is descending. $function = $direction == 'ASC' ? 'min' : 'max'; $expression = "$function($sql_alias)"; $expression_alias = $this->sqlQuery->addExpression($expression); $this->sqlQuery->orderBy($expression_alias, $direction); } } return $this; } /** * Finish the query by adding fields, GROUP BY and range. * * @return \Drupal\Core\Entity\Query\Sql\Query * Returns the called object. */ protected function finish() { $this->initializePager(); if ($this->range) { $this->sqlQuery->range($this->range['start'], $this->range['length']); } foreach ($this->sqlGroupBy as $field) { $this->sqlQuery->groupBy($field); } foreach ($this->sqlFields as $field) { $this->sqlQuery->addField($field[0], $field[1], isset($field[2]) ? $field[2] : NULL); } return $this; } /** * Executes the query and returns the result. * * @return int|array * Returns the query result as entity IDs. */ protected function result() { if ($this->count) { return $this->sqlQuery->countQuery()->execute()->fetchField(); } // Return a keyed array of results. The key is either the revision_id or // the entity_id depending on whether the entity type supports revisions. // The value is always the entity id. return $this->sqlQuery->execute()->fetchAllKeyed(); } /** * Constructs a select expression for a given field and language. * * @param string $field * The name of the field being queried. * @param string $langcode * The language code of the field. * * @return string * An expression that will select the given field for the given language in * a SELECT query, such as 'base_table.id'. */ protected function getSqlField($field, $langcode) { if (!isset($this->tables)) { $this->tables = $this->getTables($this->sqlQuery); } $base_property = "base_table.$field"; if (isset($this->sqlFields[$base_property])) { return $base_property; } else { return $this->tables->addField($field, 'LEFT', $langcode); } } /** * Returns whether the query requires GROUP BY and ORDER BY MIN/MAX. * * @return bool */ protected function isSimpleQuery() { return (!$this->pager && !$this->range && !$this->count) || $this->sqlQuery->getMetaData('simple_query'); } /** * Implements the magic __clone method. * * Reset fields and GROUP BY when cloning. */ public function __clone() { parent::__clone(); $this->sqlFields = array(); $this->sqlGroupBy = array(); } /** * Gets the Tables object for this query. * * @param \Drupal\Core\Database\Query\SelectInterface $sql_query * The SQL query object being built. * * @return \Drupal\Core\Entity\Query\Sql\TablesInterface * The object that adds tables and fields to the SQL query object. */ public function getTables(SelectInterface $sql_query) { $class = static::getClass($this->namespaces, 'Tables'); return new $class($sql_query); } }
import { Component } from '@angular/core'; import { IonicPage, NavController, NavParams } from 'ionic-angular'; @IonicPage() @Component({ selector: 'page-hours', templateUrl: 'hours.html', }) export class HoursPage { started: boolean = false; stopped: boolean = true; constructor(public navCtrl: NavController, public navParams: NavParams) { } ionViewDidLoad() { } startstop() { this.started = !this.started; this.stopped = !this.stopped; var date = new Date(); var month = date.getMonth() + 1; var year = date.getFullYear(); var day = date.getUTCDate(); var hour = date.getHours(); var mins = date.getMinutes(); var time = `${month}/${day}/${year} ${hour}:${mins}`; var msg = `Time ${this.started ? 'in' : 'out'} ${time}`; document.getElementById('startstops').innerHTML = "<div class='time'>" + msg + "</div>" + document.getElementById('startstops').innerHTML; } }
namespace Microsoft.Test.Taupo.Query.Contracts { using System; using System.Linq; using Microsoft.Test.Taupo.Common; <summary> Result of a query evaluation which is a reference of entity </summary> public class QueryReferenceValue : QueryValue { internal QueryReferenceValue(QueryReferenceType type, QueryError evaluationError, <API key> evaluationStrategy) : base(evaluationError, evaluationStrategy) { ExceptionUtilities.<API key>(type, "type"); this.Type = type; } <summary> Gets the reference type. </summary> [System.Diagnostics.CodeAnalysis.SuppressMessage("Microsoft.Naming", "CA1721:<API key>", Justification = "Must be the same as the base class.")] public new QueryReferenceType Type { get; private set; } <summary> Gets a value indicating whether this instance is null. </summary> public override bool IsNull { get { return this.KeyValue == null; } } <summary> Gets the entity value (for dereference) </summary> <remarks>For dangling reference, this should be null value</remarks> public <API key> EntityValue { get; private set; } <summary> Gets the entity set full name </summary> public string EntitySetFullName { get; private set; } <summary> Gets the key value </summary> public QueryRecordValue KeyValue { get; private set; } <summary> Casts a <see cref="QueryValue"/> to a <see cref="QueryType"/>. The cast will return the value type cast to the new type. </summary> <param name="type">The type for the cast operation.</param> <returns><see cref="QueryValue"/> which is cast to the appropriate type</returns> public override QueryValue Cast(QueryType type) { return type.CreateErrorValue(new QueryError("Cannot perform Cast on a reference value")); } <summary> Checks if a <see cref="QueryValue"/> is of a particular <see cref="QueryType"/>. This operation will return a true if the value is of the specified type. </summary> <param name="type">The type for the IsOf operation.</param> <param name="performExactMatch">Determines if an exact match needs to be performed.</param> <returns>A <see cref="QueryValue"/> containing true or false depending on whether the value is of the specified type or not.</returns> public override QueryValue IsOf(QueryType type, bool performExactMatch) { return type.CreateErrorValue(new QueryError("Cannot perform IsOf on a reference value")); } <summary> Converts the <see cref="QueryValue"/> to a particular <see cref="QueryType"/>. </summary> <param name="type">The type for the As operation.</param> <returns>The <see cref="QueryValue"/> converted to the specified type if successful. Returns null if this operation fails.</returns> public override QueryValue TreatAs(QueryType type) { return type.CreateErrorValue(new QueryError("Cannot perform TreatAs on a reference value")); } <summary> Returns a <see cref="System.String"/> that represents this instance. </summary> <returns> A <see cref="System.String"/> that represents this instance. </returns> public override string ToString() { if (this.EvaluationError != null) { return "Reference Value Error=" + this.EvaluationError + ", Type=" + this.Type.<API key>; } else if (this.IsNull) { return "Null Reference, Type=" + this.Type.<API key>; } else { return "Reference Value=" + this.EntitySetFullName + ", keyValue[" + this.KeyValue + "], Type=" + this.Type.<API key>; } } <summary> The Accept method used to support the double-dispatch visitor pattern with a visitor that returns a result. </summary> <typeparam name="TResult">The result type returned by the visitor.</typeparam> <param name="visitor">The visitor that is visiting this query value.</param> <returns>The result of visiting this query value.</returns> public override TResult Accept<TResult>(IQueryValueVisitor<TResult> visitor) { return visitor.Visit(this); } <summary> Gets a <see cref="QueryReferenceValue"/> value indicating whether two values are equal. </summary> <param name="otherValue">The second value.</param> <returns> Instance of <see cref="QueryScalarValue"/> which represents the result of comparison. </returns> public QueryScalarValue EqualTo(QueryReferenceValue otherValue) { if ((this.IsNull && otherValue.IsNull) || object.ReferenceEquals(this.EntityValue, otherValue.EntityValue)) { return new QueryScalarValue(EvaluationStrategy.BooleanType, true, this.EvaluationError, this.EvaluationStrategy); } else { return new QueryScalarValue(EvaluationStrategy.BooleanType, false, this.EvaluationError, this.EvaluationStrategy); } } <summary> Gets a <see cref="QueryReferenceValue"/> value indicating whether two values are not equal. </summary> <param name="otherValue">The second value.</param> <returns> Instance of <see cref="QueryScalarValue"/> which represents the result of comparison. </returns> public QueryScalarValue NotEqualTo(QueryReferenceValue otherValue) { bool areEqual = (bool)this.EqualTo(otherValue).Value; return new QueryScalarValue(EvaluationStrategy.BooleanType, !areEqual, this.EvaluationError, this.EvaluationStrategy); } internal void SetReferenceValue(<API key> entityValue) { ExceptionUtilities.<API key>(entityValue, "entityValue"); this.EntityValue = entityValue; // compute key value QueryEntityType entityType = this.Type.QueryEntityType; var keyType = new QueryRecordType(this.EvaluationStrategy); keyType.AddProperties(entityType.Properties.Where(m => m.IsPrimaryKey)); this.KeyValue = keyType.CreateNewInstance(); for (int i = 0; i < keyType.Properties.Count; i++) { this.KeyValue.SetMemberValue(i, entityValue.GetValue(keyType.Properties[i].Name)); } var set = entityType.EntitySet; this.EntitySetFullName = set.Container.Name + "." + set.Name; } // this is only heppening when reading from product or creating dangling reference internal void SetReferenceValue(string entitySetFullName, QueryRecordValue keyValue) { ExceptionUtilities.<API key>(entitySetFullName, "entitySetFullName"); ExceptionUtilities.<API key>(keyValue, "keyValue"); this.EntitySetFullName = entitySetFullName; this.KeyValue = keyValue; this.EntityValue = this.Type.QueryEntityType.NullValue; } <summary> Gets the type of the value. </summary> <returns>Type of the value.</returns> protected override QueryType GetTypeInternal() { return this.Type; } } }
// CodeContracts // File System.IO.cs // Automatically generated contract file. using System.Collections.Generic; using System.IO; using System.Text; using System.Diagnostics.Contracts; using System; // Disable the "this variable is not used" warning as every field would imply it. #pragma warning disable 0414 // Disable the "this variable is never assigned to". #pragma warning disable 0067 // Disable the "this event is never assigned to". #pragma warning disable 0649 // Disable the "this variable is never used". #pragma warning disable 0169 // Disable the "new keyword not required" warning. #pragma warning disable 0109 // Disable the "extern without DllImport" warning. #pragma warning disable 0626 // Disable the "could hide other member" warning, can happen on certain properties. #pragma warning disable 0108 namespace System.IO { public enum <API key> { None = 0, Inheritable = 1, } }
{% extends "base.html" %} {% block preTitle %} {{ project.display_name }} (Build) - {% endblock %} {% block bodyclass %}job_view{% endblock %} {% block extra_head %} <link rel="stylesheet" href="/styles/<API key>.css"> {% endblock %} {% block bodyContent %} {% set page = "build" %} <div class="app ng-cloak" ng-app="job-status"> <script> var project = {{ project | scriptjson() | raw }}; var jobs = {{ jobs | scriptjson() | raw }}; var job = {{ job | scriptjson() | raw }}; var showStatus = {{ showStatus | scriptjson() | raw }}; var canAdminProject = {{ canAdminProject === true ? 'true' : 'false' }}; </script> <base href="/{{ page_base }}/"></base> <div id="build-page" class="main" ng-view></div> <script id="build-tpl.html" type="text/ng-template"> <div class="span8"> <div class="row-fluid job-page-intro"> <div class="job-title"> {% pluginblock JobPagePreTitle %}{% endpluginblock %} <h3 class="clearfix"> {% if currentUser %} <span ng-hide="job.running || job.project.access_level < 1" ng-click="startDeploy(job)" title="Retest &amp; Deploy" class="clickable <API key>"> <i class="fa fa-cloud-upload"></i> </span> <span ng-hide="job.running || job.project.access_level < 1" ng-click="startTest(job)" title="Retest" class="clickable test-only-action"> <i class="fa fa-refresh"></i> </span> {% endif %} <span class='job-repo'>{{ project.display_name }}</span> <a href="[[ project.display_url ]]" target="_blank"> <i class="fa fa-[[ project.provider.id ]]"></i> </a> {% if currentUser %} <a href="/[[ project.name ]]/config" ng-hide="job.project.access_level < 2" title="Configure" class="btn btn-default pull-right"> <i class="fa fa-wrench"></i> Configure </a> {% endif %} </h3> {% pluginblock JobPagePostTitle %}{% endpluginblock %} </div> </div> <div class='job-main'> <div class='row-fluid job-wrap'> {% pluginblock JobPagePreCols %} {% endpluginblock %} <div class='job-left-col'> <div class="row-fluid [[ job.status ]]" id="build-metadata"> {% include "partials/build_metadata.html" %} </div> <div class='row job-pre-console'> <div class='span12 <API key>'> {% pluginblock JobPagePreConsole %} {% endpluginblock %} </div> </div> {% for block in statusBlocks.runner %} <div class="status-{{ loop.key }} plugin-status runner-status {{ block.attrs.class }}" plugin-status="{{ loop.key }}"{% for val in block.attrs %}{% if loop.key != 'class' %} {{ loop.key }}="{{ val }}"{% endif %}{% endfor %}> {{ block.html | raw }} </div> {% endfor %} {% if statusBlocks.provider[project.provider.id] %} <div class="status-{{ loop.key }} plugin-status provider-status {{ block.attrs.class }}" plugin-status="{{ loop.key }}"{% for val in block.attrs %}{% if loop.key != 'class' %} {{ loop.key }}="{{ val }}"{% endif %}{% endfor %}> {{ block.html | raw }} </div> {% endif %} {% for block in statusBlocks.job %} <div class="status-{{ loop.key }} plugin-status job-plugin-status {{ block.attrs.class }}" plugin-status="{{ loop.key }}"{% for val in block.attrs %}{% if loop.key != 'class' %} {{ loop.key }}="{{ val }}"{% endif %}{% endfor %}> {{ block.html | raw }} </div> {% endfor %} <div class="build-error" ng-show="job.status === 'errored' && job.error"> <div class="alert alert-error"> <i class="fa <API key>"></i> [[ job.error.message ]] <a href="#" class="pull-right" ng-click="toggleErrorDetails()" ng-if="job.error.stack"> <i class="fa fa-ellipsis-h"></i> </a> <pre ng-if="showErrorDetails" ng-show="job.error.stack">[[ job.error.stack ]]</pre> </div> </div> <div class="console-output"> <i class="fa fa-gear fa-light fa-spin loading-icon" ng-show="loading"></i> {% include "build/console.html" %} </div> <div class="footer"> <a href="https://github.com/Strider-CD/strider">Strider-CD <i class="fa fa-github"></i></a> | <a href="https://github.com/Strider-CD/strider/issues?state=open">Get Help / Report a Bug</a> | <a href="http://strider.readthedocs.org/en/latest/intro.html">Docs</a> </div> </div> </div> </div> </div> <div class="span4"> <div class='job-detail-sidebar'> {% include "build/history.html" %} </div> </div> </script> </div> {% pluginblock AfterJobPage %}{% endpluginblock %} {% endblock %}
# Date Field Stores a `Date` in the model. Input is stripped to only store the Date part (no time). Internally uses [moment.js](http://momentjs.com/) to manage date parsing, formatting and comparison. If the `utc` option is set, `moment(value).utc()` is called in all methods to enable moment's utc mode. String parsing with moment will be done using the `inputFormat` option, which defaults to `"'YYYY-MM-DD'"`. ## Methods `format(formatString)` Formats the stored value using moment, with the provided format string. `formatString` defaults to the `format` option, which defaults to `"Do MMM YYYY"`. If no `formatString` is provided and the `format` option is false, the stored value will be returned. When the stored value is `undefined` an empty string is returned. `moment` Returns a moment instance initialised with the value stored in the item. `parse(value, formatString)` Returns a moment instance initialised with the provided value. `formatString` defaults to the `inputFormat` option. `updateItem` Updates with the provided value if it is different from the stored value. Uses `this.parse()` to interpret the input as a date. `null` and `""` can be used to clear the stored value. `validateInput` Ensures the value, if provided, is either a Date object, a number that can be interpreted as epoch time, or a string that can be parsed into a valid date by moment. Allows `null` and `""` to clear the field value. Inherits from [`Text`](../text) * `<API key>`
<http:/ <https:/ <mailto:foobarbaz> <http:/google <foo@
// CodeContracts // File System.Web.UI.WebControls.WebParts.<API key>.cs // Automatically generated contract file. using System.Collections.Generic; using System.IO; using System.Text; using System.Diagnostics.Contracts; using System; // Disable the "this variable is not used" warning as every field would imply it. #pragma warning disable 0414 // Disable the "this variable is never assigned to". #pragma warning disable 0067 // Disable the "this event is never assigned to". #pragma warning disable 0649 // Disable the "this variable is never used". #pragma warning disable 0169 // Disable the "new keyword not required" warning. #pragma warning disable 0109 // Disable the "extern without DllImport" warning. #pragma warning disable 0626 // Disable the "could hide other member" warning, can happen on certain properties. #pragma warning disable 0108 namespace System.Web.UI.WebControls.WebParts { sealed public partial class <API key> : System.Collections.<API key> { #region Methods and constructors public bool Contains(Type value) { return default(bool); } public void CopyTo(Type[] array, int index) { } public int IndexOf(Type value) { return default(int); } public <API key>() { } public <API key>(System.Web.UI.WebControls.WebParts.<API key> <API key>, System.Collections.ICollection transformerTypes) { } public <API key>(System.Collections.ICollection transformerTypes) { } #endregion #region Properties and indexers public Type this [int index] { get { return default(Type); } } #endregion #region Fields public readonly static System.Web.UI.WebControls.WebParts.<API key> Empty; #endregion } }
using System; using System.Collections.Generic; using System.Linq; using System.Text; namespace Umbraco.Core.Xml.XPath { <summary> Represents the type of a content that can be navigated via XPath. </summary> interface <API key> { <summary> Gets the name of the content type. </summary> string Name { get; } <summary> Gets the field types of the content type. </summary> <remarks>This includes the attributes and the properties.</remarks> INavigableFieldType[] FieldTypes { get; } } }
package com.mozu.api.utils; public class Endpoints { public static final String AUTH_URL = "api/platform/applications/authtickets"; public static final String AUTH_REFRESH_URL = "api/platform/applications/authtickets/refresh-ticket/%s"; public static final String TENANT_END_POINT = "api/platform/tenants"; public static final String SITES_END_POINT = "api/platform/tenants/%s/sites"; public static final String ATTRIBUTE_END_POINT = "api/commerce/catalog/admin/attributedefinition/attributes"; public static final String <API key> = "api/commerce/catalog/admin/attributedefinition/attributes/%s/VocabularyValues"; public static final String <API key> = "api/commerce/catalog/admin/attributedefinition/producttypes"; public static final String ORDER_END_POINT = "api/commerce/orders"; public static final String <API key> = "api/commerce/settings/applicationstatus"; public static final String <API key> = "api/platform/appdata"; public static final String <API key> = "api/platform/sitedata"; public static final String <API key> = "api/platform/tenantdata"; }
#ifndef <API key> #if !defined(<API key>) #define <API key> #include <boost/preprocessor/iterate.hpp> #include <boost/preprocessor/repetition/enum_params.hpp> #include <boost/preprocessor/repetition/enum_binary_params.hpp> #include <boost/fusion/tuple/detail/tuple.hpp> #include <boost/fusion/support/detail/as_fusion_element.hpp> namespace boost { namespace fusion { <API key> inline tuple<> make_tuple() { return tuple<>(); } }} #if !defined(<API key>) #include <boost/fusion/tuple/detail/preprocessed/make_tuple.hpp> #else #if defined(__WAVE__) && defined(<API key>) #pragma wave option(preserve: 2, line: 0, output: "preprocessed/make_tuple" <API key> ".hpp") #endif #if defined(__WAVE__) && defined(<API key>) #pragma wave option(preserve: 1) #endif namespace boost { namespace fusion { #define <API key>(z, n, data) \ typename detail::as_fusion_element<BOOST_PP_CAT(T, n)>::type #define BOOST_PP_FILENAME_1 <boost/fusion/tuple/detail/make_tuple.hpp> #define <API key> (1, <API key>) #include BOOST_PP_ITERATE() #undef <API key> }} #if defined(__WAVE__) && defined(<API key>) #pragma wave option(output: null) #endif #endif // <API key> #endif #else // defined(<API key>) // Preprocessor vertical repetition code #define N BOOST_PP_ITERATION() template <<API key>(N, typename T)> <API key> inline tuple<BOOST_PP_ENUM(N, <API key>, _)> make_tuple(<API key>(N, T, const& arg)) { return tuple<BOOST_PP_ENUM(N, <API key>, _)>( <API key>(N, arg)); } #undef N #endif // defined(<API key>)
<?php namespace Bolt\Helpers; use Cocur\Slugify\Slugify; class Str { /** * Returns a "safe" version of the given string - basically only US-ASCII and * numbers. Needed because filenames and titles and such, can't use all characters. * * @param string $str * @param boolean $strict * @param string $extrachars * * @return string */ public static function makeSafe($str, $strict = false, $extrachars = '') { $str = str_replace('&amp;', '', $str); $delim = '/'; if ($extrachars != '') { $extrachars = preg_quote($extrachars, $delim); } if ($strict) { $slugify = Slugify::create('/[^a-z0-9_' . $extrachars . ' -]+/'); $str = $slugify->slugify($str, ''); $str = str_replace(' ', '-', $str); } else { // Allow Uppercase and don't convert spaces to dashes $slugify = Slugify::create('/[^a-zA-Z0-9_.,' . $extrachars . ' -]+/', ['lowercase' => false]); $str = $slugify->slugify($str, ''); } return $str; } public static function replaceFirst($search, $replace, $subject) { $pos = strpos($subject, $search); if ($pos !== false) { $subject = substr_replace($subject, $replace, $pos, strlen($search)); } return $subject; } /** * Add 'soft hyphens' &shy; to a string, so that it won't break layout in HTML when * using strings without spaces or dashes. Only breaks in long (> 19 chars) words. * * @param string $str * * @return string */ public static function shyphenate($str) { $res = preg_match_all('/([a-z0-9]{19,})/i', $str, $matches); if ($res) { foreach ($matches[1] as $key => $match) { $str = str_replace($match, wordwrap($match, 10, '&shy;', true), $str); } } return $str; } }
import Ember from 'ember-metal'; // Ember as namespace import { A as emberA, typeOf, String as StringUtils, Namespace, Object as EmberObject } from 'ember-runtime'; /** @module ember @submodule <API key> */ export default EmberObject.extend({ /** The resolver instance of the application being debugged. This property will be injected on creation. @property resolver @default null @public */ resolver: null, /** Returns true if it is possible to catalog a list of available classes in the resolver for a given type. @method <API key> @param {String} type The type. e.g. "model", "controller", "route". @return {boolean} whether a list is available for this type. @public */ <API key>(type) { if (type === 'model' || type === 'template') { return false; } return true; }, /** Returns the available classes a given type. @method <API key> @param {String} type The type. e.g. "model", "controller", "route". @return {Array} An array of strings. @public */ <API key>(type) { let namespaces = emberA(Namespace.NAMESPACES); let types = emberA(); let typeSuffixRegex = new RegExp(`${StringUtils.classify(type)}$`); namespaces.forEach(namespace => { if (namespace !== Ember) { for (let key in namespace) { if (!namespace.hasOwnProperty(key)) { continue; } if (typeSuffixRegex.test(key)) { let klass = namespace[key]; if (typeOf(klass) === 'class') { types.push(StringUtils.dasherize(key.replace(typeSuffixRegex, ''))); } } } } }); return types; } });
* { box-sizing: border-box; -<API key>: touch; -<API key>: rgba(0, 0, 0, 0); -<API key>: none; -<API key>: none; -<API key>: antialiased; } .progress { background-color: #000; height: 2px; left: 0px; position: fixed; right: 0px; top: 0px; -webkit-transition: width 0.2s, opacity 0.4s; transition: width 0.2s, opacity 0.4s; width: 0%; z-index: 999999; } html, body { height: 100%; } body { -<API key>: grayscale; -<API key>: antialiased; font-family: 'Source Sans Pro', 'Helvetica Neue', Arial, sans-serif; font-size: 15px; letter-spacing: 0; margin: 0; overflow-x: hidden; color: #000; } img { max-width: 100%; } kbd { display: inline-block; padding: 3px 5px; margin-bottom: 3px; font-size: 12px !important; line-height: 12px; vertical-align: middle; border: solid 1px #ccc; border-radius: 3px; } /* navbar */ nav { position: absolute; right: 0; left: 0; z-index: 10; margin: 25px 60px 0 0; text-align: right; } nav p { margin: 0; } nav ul, nav li { list-style: none; display: inline-block; margin: 0; } nav a { margin: 0 1em; padding: 5px 0; font-size: 16px; text-decoration: none; color: inherit; -webkit-transition: color .3s; transition: color .3s; } nav a:hover { color: #000; } nav a.active { color: #000; border-bottom: 2px solid #000; } /* navbar dropdown */ nav li { position: relative; display: inline-block; } nav li ul { background-color: rgba(255, 255, 255, 0.6); border: 1px solid #000; opacity: 0; overflow: hidden; padding: 0; position: absolute; right: 1em; top: 26px; -<API key>: 100% 0%; transform-origin: 100% 0%; -webkit-transform: scale(1, 0); transform: scale(1, 0); -webkit-transition: opacity .4s ease-out, -webkit-transform .2s ease; transition: opacity .4s ease-out, -webkit-transform .2s ease; transition: opacity .4s ease-out, transform .2s ease; transition: opacity .4s ease-out, transform .2s ease, -webkit-transform .2s ease; -<API key>: .3s; transition-delay: .3s; } nav li ul li { display: block; font-size: 14px; margin: 0; padding: 4px 10px; white-space: nowrap; } nav li ul a { display: block; margin: 0; padding: 0; } nav li ul a.active { border-bottom: 0; } nav li:hover ul { opacity: 1; -webkit-transform: scale(1, 1); transform: scale(1, 1); -webkit-transition: opacity .4s ease, -webkit-transform .2s ease-out; transition: opacity .4s ease, -webkit-transform .2s ease-out; transition: opacity .4s ease, transform .2s ease-out; transition: opacity .4s ease, transform .2s ease-out, -webkit-transform .2s ease-out; -<API key>: 0; transition-delay: 0; } nav.no-badge { margin-right: 25px; } /* github corner */ .github-corner { position: fixed; top: 0; right: 0; z-index: 1; text-decoration: none; border-bottom: 0; } .github-corner svg { color: #fff; height: 80px; width: 80px; fill: #000; } .github-corner:hover .octo-arm { -webkit-animation: octocat-wave 560ms ease-in-out; animation: octocat-wave 560ms ease-in-out; } /* main */ main { width: 100vw; height: 100%; position: relative; } .anchor { text-decoration: none; -webkit-transition: all .3s; transition: all .3s; display: inline-block; } .anchor span { color: #000; } .anchor:hover { text-decoration: underline; } /* sidebar */ .sidebar { border-right: 1px solid rgba(0, 0, 0, .07); overflow-y: auto; padding: 40px 0; top: 0; bottom: 0; left: 0; position: absolute; -webkit-transition: -webkit-transform 250ms ease-out; transition: -webkit-transform 250ms ease-out; transition: transform 250ms ease-out; transition: transform 250ms ease-out, -webkit-transform 250ms ease-out; width: 300px; z-index: 20; } .sidebar ul { margin: 0; padding: 0; } .sidebar ul, .sidebar ul li { list-style: none; } .sidebar ul li a { display: block; border-bottom: none; } .sidebar ul li ul { padding-left: 20px; } /* sidebar toggle */ .sidebar-toggle { background-color: transparent; border: 0; outline: none; bottom: 0; left: 0; position: absolute; text-align: center; -webkit-transition: opacity .3s; transition: opacity .3s; width: 30px; z-index: 30; outline: none; width: 284px; padding: 10px; background-color: rgba(255, 255, 255, 0.8); } .sidebar-toggle .<API key>:hover { opacity: .4; } .sidebar-toggle span { background-color: #000; display: block; width: 16px; height: 2px; margin-bottom: 4px; } body.sticky .sidebar, body.sticky .sidebar-toggle { position: fixed; } /* main content */ .content { top: 0; right: 0; bottom: 0; left: 300px; position: absolute; padding-top: 20px; -webkit-transition: left 250ms ease; transition: left 250ms ease; } /* markdown content found on pages */ .markdown-section { position: relative; margin: 0 auto; max-width: 800px; padding: 20px 15px 40px 15px; } .markdown-section > * { box-sizing: border-box; font-size: inherit; } .markdown-section >:first-child { margin-top: 0!important; } .markdown-section table { display: block; width: 100%; overflow: auto; border-spacing: 0; border-collapse: collapse; margin-bottom: 1em; } .markdown-section th { font-weight: 700; padding: 6px 13px; border: 1px solid #ddd; } .markdown-section td { padding: 6px 13px; border: 1px solid #ddd; } .markdown-section tr { border-top: 1px solid #ccc; } .markdown-section tr:nth-child(2n) { background-color: #f8f8f8; } body.close .sidebar { -webkit-transform: translateX(-300px); transform: translateX(-300px); } body.close .sidebar-toggle { width: auto; } body.close .content { left: 0; } @media (max-width: 600px) { .github-corner, .sidebar-toggle, .sidebar { position: fixed; } nav { margin-top: 16px; } nav li ul { top: 30px; } main { height: auto; overflow-x: hidden; } .sidebar { left: -300px; -webkit-transition: -webkit-transform 250ms ease-out; transition: -webkit-transform 250ms ease-out; transition: transform 250ms ease-out; transition: transform 250ms ease-out, -webkit-transform 250ms ease-out; } .content { left: 0; max-width: 100vw; position: static; -webkit-transition: -webkit-transform 250ms ease; transition: -webkit-transform 250ms ease; transition: transform 250ms ease; transition: transform 250ms ease, -webkit-transform 250ms ease; } nav, .github-corner { -webkit-transition: -webkit-transform 250ms ease-out; transition: -webkit-transform 250ms ease-out; transition: transform 250ms ease-out; transition: transform 250ms ease-out, -webkit-transform 250ms ease-out; } .sidebar-toggle { width: auto; background-color: transparent; } body.close .sidebar { -webkit-transform: translateX(300px); transform: translateX(300px); } body.close .sidebar-toggle { width: 284px; background-color: rgba(255, 255, 255, 0.8); -webkit-transition: 1s background-color; transition: 1s background-color; } body.close .content { -webkit-transform: translateX(300px); transform: translateX(300px); } body.close nav, body.close .github-corner { display: none; } .github-corner .octo-arm { -webkit-animation: octocat-wave 560ms ease-in-out; animation: octocat-wave 560ms ease-in-out; } .github-corner:hover .octo-arm { -webkit-animation: none; animation: none; } } @-webkit-keyframes octocat-wave { 0%, 100% { -webkit-transform: rotate(0); transform: rotate(0); } 20%, 60% { -webkit-transform: rotate(-25deg); transform: rotate(-25deg); } 40%, 80% { -webkit-transform: rotate(10deg); transform: rotate(10deg); } } @keyframes octocat-wave { 0%, 100% { -webkit-transform: rotate(0); transform: rotate(0); } 20%, 60% { -webkit-transform: rotate(-25deg); transform: rotate(-25deg); } 40%, 80% { -webkit-transform: rotate(10deg); transform: rotate(10deg); } }
<?php use yii\helpers\Html; use yii\widgets\ActiveForm; /* @var $this yii\web\View */ /* @var $model common\models\User */ /* @var $form yii\widgets\ActiveForm */ ?> <div class="user-form"> <?php $form = ActiveForm::begin(); ?> <?= $form->field($model, 'status')->textInput() ?> <div class="form-group"> <?= Html::submitButton($model->isNewRecord ? Yii::t('app', 'Create') : Yii::t('app', 'Update'), ['class' => $model->isNewRecord ? 'btn btn-success' : 'btn btn-primary']) ?> </div> <?php ActiveForm::end(); ?> </div>
<?php /** * WPSEO plugin file. * * @package WPSEO\Internals\Options */ /** * Option: wpseo_taxonomy_meta. */ class WPSEO_Taxonomy_Meta extends WPSEO_Option { /** * @var string Option name. */ public $option_name = 'wpseo_taxonomy_meta'; /** * @var bool Whether to include the option in the return for WPSEO_Options::get_all(). */ public $include_in_all = false; /** * @var array Array of defaults for the option. * Shouldn't be requested directly, use $this->get_defaults(); * * {@internal Important: in contrast to most defaults, the below array format is * very bare. The real option is in the format [taxonomy_name][term_id][...] * where [...] is any of the $defaults_per_term options shown below. * This is of course taken into account in the below methods.}} */ protected $defaults = array(); /** * @var string Option name - same as $option_name property, but now also available to static methods. * @static */ public static $name; /** * @var array Array of defaults for individual taxonomy meta entries. * @static */ public static $defaults_per_term = array( 'wpseo_title' => '', 'wpseo_desc' => '', 'wpseo_canonical' => '', 'wpseo_bctitle' => '', 'wpseo_noindex' => 'default', 'wpseo_focuskw' => '', 'wpseo_linkdex' => '', 'wpseo_content_score' => '', // Social fields. '<API key>' => '', '<API key>' => '', '<API key>' => '', 'wpseo_twitter-title' => '', 'wpseo_<TwitterConsumerkey>' => '', 'wpseo_twitter-image' => '', ); /** * @var array Available index options. * Used for form generation and input validation. * * @static * * {@internal Labels (translation) added on admin_init via WPSEO_Taxonomy::<API key>().}} */ public static $no_index_options = array( 'default' => '', 'index' => '', 'noindex' => '', ); /** * Add the actions and filters for the option. * * @todo [JRF => testers] Check if the extra actions below would run into problems if an option * is updated early on and if so, change the call to schedule these for a later action on add/update * instead of running them straight away. * * @return \WPSEO_Taxonomy_Meta */ protected function __construct() { parent::__construct(); self::$name = $this->option_name; /* On succesfull update/add of the option, flush the W3TC cache. */ add_action( 'add_option_' . $this->option_name, array( 'WPSEO_Utils', 'flush_w3tc_cache' ) ); add_action( 'update_option_' . $this->option_name, array( 'WPSEO_Utils', 'flush_w3tc_cache' ) ); } /** * Get the singleton instance of this class. * * @return object */ public static function get_instance() { if ( ! ( self::$instance instanceof self ) ) { self::$instance = new self(); self::$name = self::$instance->option_name; } return self::$instance; } /** * Add extra default options received from a filter. */ public function enrich_defaults() { $<API key> = apply_filters( '<API key>', array() ); if ( is_array( $<API key> ) ) { self::$defaults_per_term = array_merge( $<API key>, self::$defaults_per_term ); } } /** * Helper method - Combines a fixed array of default values with an options array * while filtering out any keys which are not in the defaults array. * * @static * * @param string $option_key Option name of the option we're doing the merge for. * @param array $options Optional. Current options. If not set, the option defaults for the $option_key will be returned. * * @return array Combined and filtered options array. */ /** * Validate the option. * * @param array $dirty New value for the option. * @param array $clean Clean value for the option, normally the defaults. * @param array $old Old value of the option. * * @return array Validated clean value for the option to be saved to the database. */ protected function validate_option( $dirty, $clean, $old ) { /* * Prevent complete validation (which can be expensive when there are lots of terms) * if only one item has changed and has already been validated. */ if ( isset( $dirty['<API key>'] ) && $dirty['<API key>'] === true ) { unset( $dirty['<API key>'] ); return $dirty; } foreach ( $dirty as $taxonomy => $terms ) { /* Don't validate taxonomy - may not be registered yet and we don't want to remove valid ones. */ if ( is_array( $terms ) && $terms !== array() ) { foreach ( $terms as $term_id => $meta_data ) { /* Only validate term if the taxonomy exists. */ if ( taxonomy_exists( $taxonomy ) && get_term_by( 'id', $term_id, $taxonomy ) === false ) { /* Is this term id a special case ? */ if ( has_filter( '<API key>' . $term_id ) !== false ) { $clean[ $taxonomy ][ $term_id ] = apply_filters( '<API key>' . $term_id, $meta_data, $taxonomy, $term_id ); } continue; } if ( is_array( $meta_data ) && $meta_data !== array() ) { /* Validate meta data. */ $old_meta = self::get_term_meta( $term_id, $taxonomy ); $meta_data = self::<API key>( $meta_data, $old_meta ); if ( $meta_data !== array() ) { $clean[ $taxonomy ][ $term_id ] = $meta_data; } } // Deal with special cases (for when taxonomy doesn't exist yet). if ( ! isset( $clean[ $taxonomy ][ $term_id ] ) && has_filter( '<API key>' . $term_id ) !== false ) { $clean[ $taxonomy ][ $term_id ] = apply_filters( '<API key>' . $term_id, $meta_data, $taxonomy, $term_id ); } } } } return $clean; } /** * Validate the meta data for one individual term and removes default values (no need to save those). * * @static * * @param array $meta_data New values. * @param array $old_meta The original values. * * @return array Validated and filtered value. */ public static function <API key>( $meta_data, $old_meta ) { $clean = self::$defaults_per_term; $meta_data = array_map( array( 'WPSEO_Utils', 'trim_recursive' ), $meta_data ); if ( ! is_array( $meta_data ) || $meta_data === array() ) { return $clean; } foreach ( $clean as $key => $value ) { switch ( $key ) { case 'wpseo_noindex': if ( isset( $meta_data[ $key ] ) ) { if ( isset( self::$no_index_options[ $meta_data[ $key ] ] ) ) { $clean[ $key ] = $meta_data[ $key ]; } } elseif ( isset( $old_meta[ $key ] ) ) { // Retain old value if field currently not in use. $clean[ $key ] = $old_meta[ $key ]; } break; case 'wpseo_canonical': if ( isset( $meta_data[ $key ] ) && $meta_data[ $key ] !== '' ) { $url = WPSEO_Utils::sanitize_url( $meta_data[ $key ] ); if ( $url !== '' ) { $clean[ $key ] = $url; } unset( $url ); } break; case 'wpseo_bctitle': if ( isset( $meta_data[ $key ] ) ) { $clean[ $key ] = WPSEO_Utils::sanitize_text_field( stripslashes( $meta_data[ $key ] ) ); } elseif ( isset( $old_meta[ $key ] ) ) { // Retain old value if field currently not in use. $clean[ $key ] = $old_meta[ $key ]; } break; case 'wpseo_focuskw': case 'wpseo_title': case 'wpseo_desc': case 'wpseo_linkdex': default: if ( isset( $meta_data[ $key ] ) && is_string( $meta_data[ $key ] ) ) { $clean[ $key ] = WPSEO_Utils::sanitize_text_field( stripslashes( $meta_data[ $key ] ) ); } if ( 'wpseo_focuskw' === $key ) { $clean[ $key ] = str_replace( array( '&lt;', '&gt;', '&quot', '& '<', '>', '"', '`', ), '', $clean[ $key ] ); } break; } $clean[ $key ] = apply_filters( '<API key>' . $key, $clean[ $key ], ( isset( $meta_data[ $key ] ) ? $meta_data[ $key ] : null ), ( isset( $old_meta[ $key ] ) ? $old_meta[ $key ] : null ) ); } // Only save the non-default values. return array_diff_assoc( $clean, self::$defaults_per_term ); } /** * Clean a given option value. * - Convert old option values to new * - Fixes strings which were escaped (should have been sanitized - escaping is for output) * * @param array $option_value Old (not merged with defaults or filtered) option value to * clean according to the rules for this option. * @param string $current_version Optional. Version from which to upgrade, if not set, * version specific upgrades will be disregarded. * @param array $<API key> Optional. Only used when importing old options to have * access to the real old values, in contrast to the saved ones. * * @return array Cleaned option. */ protected function clean_option( $option_value, $current_version = null, $<API key> = null ) { /* Clean up old values and remove empty arrays. */ if ( is_array( $option_value ) && $option_value !== array() ) { foreach ( $option_value as $taxonomy => $terms ) { if ( is_array( $terms ) && $terms !== array() ) { foreach ( $terms as $term_id => $meta_data ) { if ( ! is_array( $meta_data ) || $meta_data === array() ) { // Remove empty term arrays. unset( $option_value[ $taxonomy ][ $term_id ] ); } else { foreach ( $meta_data as $key => $value ) { switch ( $key ) { case 'noindex': if ( $value === 'on' ) { // Convert 'on' to 'noindex'. $option_value[ $taxonomy ][ $term_id ][ $key ] = 'noindex'; } break; case 'canonical': case 'wpseo_bctitle': case 'wpseo_title': case 'wpseo_desc': case 'wpseo_linkdex': // @todo [JRF => whomever] needs checking, I don't have example data [JRF]. if ( $value !== '' ) { // Fix incorrectly saved (encoded) canonical urls and texts. $option_value[ $taxonomy ][ $term_id ][ $key ] = <API key>( stripslashes( $value ), ENT_QUOTES ); } break; default: // @todo [JRF => whomever] needs checking, I don't have example data [JRF]. if ( $value !== '' ) { // Fix incorrectly saved (escaped) text strings. $option_value[ $taxonomy ][ $term_id ][ $key ] = <API key>( $value, ENT_QUOTES ); } break; } } } } } else { // Remove empty taxonomy arrays. unset( $option_value[ $taxonomy ] ); } } } return $option_value; } /** * Retrieve a taxonomy term's meta value(s). * * @static * * @param mixed $term Term to get the meta value for * either (string) term name, (int) term id or (object) term. * @param string $taxonomy Name of the taxonomy to which the term is attached. * @param string $meta Optional. Meta value to get (without prefix). * * @return mixed|bool Value for the $meta if one is given, might be the default. * If no meta is given, an array of all the meta data for the term. * False if the term does not exist or the $meta provided is invalid. */ public static function get_term_meta( $term, $taxonomy, $meta = null ) { /* Figure out the term id. */ if ( is_int( $term ) ) { $term = get_term_by( 'id', $term, $taxonomy ); } elseif ( is_string( $term ) ) { $term = get_term_by( 'slug', $term, $taxonomy ); } if ( is_object( $term ) && isset( $term->term_id ) ) { $term_id = $term->term_id; } else { return false; } $tax_meta = self::get_term_tax_meta( $term_id, $taxonomy ); /* * Either return the complete array or a single value from it or false if the value does not exist * (shouldn't happen after merge with defaults, indicates typo in request). */ if ( ! isset( $meta ) ) { return $tax_meta; } if ( isset( $tax_meta[ 'wpseo_' . $meta ] ) ) { return $tax_meta[ 'wpseo_' . $meta ]; } return false; } /** * Get the current queried object and return the meta value. * * @param string $meta The meta field that is needed. * * @return bool|mixed */ public static function <API key>( $meta ) { $term = $GLOBALS['wp_query']->get_queried_object(); return self::get_term_meta( $term, $term->taxonomy, $meta ); } /** * Saving the values for the given term_id. * * @param int $term_id ID of the term to save data for. * @param string $taxonomy The taxonomy the term belongs to. * @param array $meta_values The values that will be saved. */ public static function set_values( $term_id, $taxonomy, array $meta_values ) { /* Validate the post values */ $old = self::get_term_meta( $term_id, $taxonomy ); $clean = self::<API key>( $meta_values, $old ); self::save_clean_values( $term_id, $taxonomy, $clean ); } /** * Setting a single value to the term meta. * * @param int $term_id ID of the term to save data for. * @param string $taxonomy The taxonomy the term belongs to. * @param string $meta_key The target meta key to store the value in. * @param string $meta_value The value of the target meta key. */ public static function set_value( $term_id, $taxonomy, $meta_key, $meta_value ) { if ( substr( strtolower( $meta_key ), 0, 6 ) !== 'wpseo_' ) { $meta_key = 'wpseo_' . $meta_key; } self::set_values( $term_id, $taxonomy, array( $meta_key => $meta_value ) ); } /** * Find the keyword usages in the metas for the taxonomies/terms. * * @param string $keyword The keyword to look for. * @param string $current_term_id The current term id. * @param string $current_taxonomy The current taxonomy name. * * @return array */ public static function get_keyword_usage( $keyword, $current_term_id, $current_taxonomy ) { $tax_meta = self::get_tax_meta(); $found = array(); // @todo Check for terms of all taxonomies, not only the current taxonomy. foreach ( $tax_meta as $taxonomy_name => $terms ) { foreach ( $terms as $term_id => $meta_values ) { $is_current = ( $current_taxonomy === $taxonomy_name && (string) $current_term_id === (string) $term_id ); if ( ! $is_current && ! empty( $meta_values['wpseo_focuskw'] ) && $meta_values['wpseo_focuskw'] === $keyword ) { $found[] = $term_id; } } } return array( $keyword => $found ); } /** * Saving the values for the given term_id. * * @param int $term_id ID of the term to save data for. * @param string $taxonomy The taxonomy the term belongs to. * @param array $clean Array with clean values. */ private static function save_clean_values( $term_id, $taxonomy, array $clean ) { $tax_meta = self::get_tax_meta(); /* Add/remove the result to/from the original option value. */ if ( $clean !== array() ) { $tax_meta[ $taxonomy ][ $term_id ] = $clean; } else { unset( $tax_meta[ $taxonomy ][ $term_id ] ); if ( isset( $tax_meta[ $taxonomy ] ) && $tax_meta[ $taxonomy ] === array() ) { unset( $tax_meta[ $taxonomy ] ); } } // Prevent complete array validation. $tax_meta['<API key>'] = true; self::save_tax_meta( $tax_meta ); } /** * Getting the meta from the options. * * @return void|array */ private static function get_tax_meta() { return get_option( self::$name ); } /** * Saving the tax meta values to the database. * * @param array $tax_meta Array with the meta values for taxonomy. */ private static function save_tax_meta( $tax_meta ) { update_option( self::$name, $tax_meta ); } /** * Getting the taxonomy meta for the given term_id and taxonomy. * * @param int $term_id The id of the term. * @param string $taxonomy Name of the taxonomy to which the term is attached. * * @return array */ private static function get_term_tax_meta( $term_id, $taxonomy ) { $tax_meta = self::get_tax_meta(); /* If we have data for the term, merge with defaults for complete array, otherwise set defaults. */ if ( isset( $tax_meta[ $taxonomy ][ $term_id ] ) ) { return array_merge( self::$defaults_per_term, $tax_meta[ $taxonomy ][ $term_id ] ); } return self::$defaults_per_term; } }
<?php namespace Sonata\AdminBundle\Tests\Fixtures\Admin; use Sonata\AdminBundle\Admin\Admin; class PostAdmin extends Admin { protected $metadataClass = null; public function <API key>($associationMapping) { $this-><API key> = $associationMapping; } public function setClassMetaData($classMetaData) { $this->classMetaData = $classMetaData; } public function getClassMetaData() { if ($this->classMetaData) { return $this->classMetaData; } return parent::getClassMetaData(); } }
// CodeContracts using System; using System.Collections; using System.Reflection; using System.Diagnostics.Contracts; //using System.Runtime.CompilerServices; namespace System.Collections.Generic { // Summary: // Represents a collection of objects that can be individually accessed by index. // Type parameters: // The type of elements in the list. [ContractClass(typeof(IListContract<>))] public interface IList<T> : ICollection<T> { // Summary: // Gets or sets the element at the specified index. // Parameters: // index: // The zero-based index of the element to get or set. // Returns: // The element at the specified index. // Exceptions: // System.<API key>: // index is not a valid index in the System.Collections.Generic.IList<T>. // System.<API key>: // The property is set and the System.Collections.Generic.IList<T> is read-only. T this[int index] { get; set; } // Summary: // Determines the index of a specific item in the System.Collections.Generic.IList<T>. // Parameters: // item: // The object to locate in the System.Collections.Generic.IList<T>. // Returns: // The index of item if found in the list; otherwise, -1. [Pure] int IndexOf(T item); // Summary: // Inserts an item to the System.Collections.Generic.IList<T> at the specified // index. // Parameters: // index: // The zero-based index at which item should be inserted. // item: // The object to insert into the System.Collections.Generic.IList<T>. // Exceptions: // System.<API key>: // index is not a valid index in the System.Collections.Generic.IList<T>. // System.<API key>: // The System.Collections.Generic.IList<T> is read-only. void Insert(int index, T item); // Summary: // Removes the System.Collections.Generic.IList<T> item at the specified index. // Parameters: // index: // The zero-based index of the item to remove. // Exceptions: // System.<API key>: // index is not a valid index in the System.Collections.Generic.IList<T>. // System.<API key>: // The System.Collections.Generic.IList<T> is read-only. void RemoveAt(int index); } [ContractClassFor(typeof(IList<>))] abstract class IListContract<T> : IList<T> { #region IList<T> Members T IList<T>.this[int index] { get { Contract.Requires(index >= 0); Contract.Requires(index < this.Count); return default(T); } set { Contract.Requires(index >= 0); Contract.Requires(index < this.Count); } } [Pure] int IList<T>.IndexOf(T item) { Contract.Ensures(Contract.Result<int>() >= -1); Contract.Ensures(Contract.Result<int>() < this.Count); throw new <API key>(); } void IList<T>.Insert(int index, T item) { Contract.Requires(index >= 0); Contract.Requires(index <= this.Count); } void IList<T>.RemoveAt(int index) { Contract.Requires(index >= 0); Contract.Requires(index < this.Count); Contract.Ensures(this.Count == Contract.OldValue(this.Count) - 1); } #endregion #region ICollection<T> Members public int Count { get { throw new <API key>(); } } bool ICollection<T>.IsReadOnly { get { throw new <API key>(); } } void ICollection<T>.Add(T item) { // Contract.Ensures(Count == Contract.OldValue(Count) + 1); // cannot be seen by our tools as there is no IList<T>.Add throw new <API key>(); } void ICollection<T>.Clear() { throw new <API key>(); } bool ICollection<T>.Contains(T item) { throw new <API key>(); } void ICollection<T>.CopyTo(T[] array, int arrayIndex) { throw new <API key>(); } bool ICollection<T>.Remove(T item) { throw new <API key>(); } #endregion #region IEnumerable<T> Members IEnumerator<T> IEnumerable<T>.GetEnumerator() { throw new <API key>(); } #endregion #region IEnumerable Members IEnumerator IEnumerable.GetEnumerator() { throw new <API key>(); } #endregion #region IEnumerable Members public object[] Model { get { throw new <API key>(); } } #endregion } }
namespace Merchello.Web.Models.ContentEditing { using System; using System.Collections.Generic; using Merchello.Core; using Merchello.Core.Models.TypeFields; using Newtonsoft.Json; using Newtonsoft.Json.Converters; <summary> The line item display base. </summary> public abstract class LineItemDisplayBase { <summary> Gets or sets the key. </summary> public Guid Key { get; set; } <summary> Gets or sets the container key. </summary> public Guid ContainerKey { get; set; } <summary> Gets or sets the line item type field key. </summary> public Guid LineItemTfKey { get; set; } <summary> Gets or sets the SKU. </summary> public string Sku { get; set; } <summary> Gets or sets the name. </summary> public string Name { get; set; } <summary> Gets or sets the quantity. </summary> public int Quantity { get; set; } <summary> Gets or sets the price. </summary> public decimal Price { get; set; } <summary> Gets or sets a value indicating whether exported. </summary> public bool Exported { get; set; } <summary> Gets or sets the line item type. </summary> [JsonConverter(typeof(StringEnumConverter))] public LineItemType LineItemType { get; set; } <summary> Gets or sets the line item type field. </summary> public TypeField LineItemTypeField { get; set; } <summary> Gets or sets the extended data. </summary> public IEnumerable<KeyValuePair<string, string>> ExtendedData { get; set; } } }
(function ($) { 'use strict'; $.extend(true, $.trumbowyg, { langs: { // jshint camelcase:false en: { fontFamily: 'Font' }, es: { fontFamily: 'Fuente' }, da: { fontFamily: 'Skrifttype' }, fr: { fontFamily: 'Police' }, de: { fontFamily: 'Schriftart' }, nl: { fontFamily: 'Lettertype' }, tr: { fontFamily: 'Yazı Tipi' }, zh_tw: { fontFamily: '', }, pt_br: { fontFamily: 'Fonte', } } }); // jshint camelcase:true var defaultOptions = { fontList: [ {name: 'Arial', family: 'Arial, Helvetica, sans-serif'}, {name: 'Arial Black', family: '\'Arial Black\', Gadget, sans-serif'}, {name: 'Comic Sans', family: '\'Comic Sans MS\', Textile, cursive, sans-serif'}, {name: 'Courier New', family: '\'Courier New\', Courier, monospace'}, {name: 'Georgia', family: 'Georgia, serif'}, {name: 'Impact', family: 'Impact, Charcoal, sans-serif'}, {name: 'Lucida Console', family: '\'Lucida Console\', Monaco, monospace'}, {name: 'Lucida Sans', family: '\'Lucida Sans Uncide\', \'Lucida Grande\', sans-serif'}, {name: 'Palatino', family: '\'Palatino Linotype\', \'Book Antiqua\', Palatino, serif'}, {name: 'Tahoma', family: 'Tahoma, Geneva, sans-serif'}, {name: 'Times New Roman', family: '\'Times New Roman\', Times, serif'}, {name: 'Trebuchet', family: '\'Trebuchet MS\', Helvetica, sans-serif'}, {name: 'Verdana', family: 'Verdana, Geneva, sans-serif'} ] }; // Add dropdown with web safe fonts $.extend(true, $.trumbowyg, { plugins: { fontfamily: { init: function (trumbowyg) { trumbowyg.o.plugins.fontfamily = $.extend(true, {}, defaultOptions, trumbowyg.o.plugins.fontfamily || {} ); trumbowyg.addBtnDef('fontfamily', { dropdown: buildDropdown(trumbowyg), hasIcon: false, text: trumbowyg.lang.fontFamily }); } } } }); function buildDropdown(trumbowyg) { var dropdown = []; $.each(trumbowyg.o.plugins.fontfamily.fontList, function (index, font) { trumbowyg.addBtnDef('fontfamily_' + index, { title: '<span style="font-family: ' + font.family + ';">' + font.name + '</span>', hasIcon: false, fn: function () { trumbowyg.execCmd('fontName', font.family, true); } }); dropdown.push('fontfamily_' + index); }); return dropdown; } })(jQuery);
CKEDITOR.plugins.setLang("uicolor","ro",{title:"Interfața cu utilizatorul a Selectorului de culoare",options:"Opțiuni culoare",highlight:"Evidențiere",selected:"Culoare selectată",predefined:"Seturi de culoare predefinite",config:"Copiază această expresie în fișierul tău config.js"});
<html> <head> <meta charset="utf-8"> <meta name=viewport content="width=device-width, initial-scale=1"> {% if dev %} {% style "css/site.css" %} {% else %} {% style "css/site.min.css" %} {% endif %} </head> <body> <div id="app"></div> <script src="js/app.js" type="text/javascript"></script> </body> </html>
<?php global $qode_options_proya; $blog_hide_comments = ""; if (isset($qode_options_proya['blog_hide_comments'])) { $blog_hide_comments = $qode_options_proya['blog_hide_comments']; } $blog_hide_author = ""; if (isset($qode_options_proya['blog_hide_author'])) { $blog_hide_author = $qode_options_proya['blog_hide_author']; } $qode_like = "on"; if (isset($qode_options_proya['qode_like'])) { $qode_like = $qode_options_proya['qode_like']; } ?> <?php $_post_format = get_post_format(); ?> <?php switch ($_post_format) { case "video": ?> <article id="post-<?php the_ID(); ?>" <?php post_class(); ?>> <div class="post_content_holder"> <div class="post_image"> <?php $_video_type = get_post_meta(get_the_ID(), "video_format_choose", true);?> <?php if($_video_type == "youtube") { ?> <iframe src=" <?php } elseif ($_video_type == "vimeo"){ ?> <iframe src="//player.vimeo.com/video/<?php echo get_post_meta(get_the_ID(), "video_format_link", true); ?>?title=0&amp;byline=0&amp;portrait=0" frameborder="0" <API key> mozallowfullscreen allowFullScreen></iframe> <?php } elseif ($_video_type == "self"){ ?> <div class="video"> <div class="mobile-video-image" style="background-image: url(<?php echo get_post_meta(get_the_ID(), "video_format_image", true); ?>);"></div> <div class="video-wrap" > <video class="video" poster="<?php echo get_post_meta(get_the_ID(), "video_format_image", true); ?>" preload="auto"> <?php if(get_post_meta(get_the_ID(), "video_format_webm", true) != "") { ?> <source type="video/webm" src="<?php echo get_post_meta(get_the_ID(), "video_format_webm", true); ?>"> <?php } ?> <?php if(get_post_meta(get_the_ID(), "video_format_mp4", true) != "") { ?> <source type="video/mp4" src="<?php echo get_post_meta(get_the_ID(), "video_format_mp4", true); ?>"> <?php } ?> <?php if(get_post_meta(get_the_ID(), "video_format_ogv", true) != "") { ?> <source type="video/ogg" src="<?php echo get_post_meta(get_the_ID(), "video_format_ogv", true); ?>"> <?php } ?> <object width="320" height="240" type="application/x-shockwave-flash" data="<?php echo <API key>(); ?>/js/flashmediaelement.swf"> <param name="movie" value="<?php echo <API key>(); ?>/js/flashmediaelement.swf" /> <param name="flashvars" value="controls=true&file=<?php echo get_post_meta(get_the_ID(), "video_format_mp4", true); ?>" /> <img src="<?php echo get_post_meta(get_the_ID(), "video_format_image", true); ?>" width="1920" height="800" title="No video playback capabilities" alt="Video thumb" /> </object> </video> </div></div> <?php } ?> </div> <div class="post_text"> <div class="post_text_inner"> <div class="minimalist_date"><?php the_time(get_option('date_format')); ?></div> <h2><a href="<?php the_permalink(); ?>" title="<?php the_title_attribute(); ?>"><?php the_title(); ?></a></h2> <div class="separator small center"></div> <?php qode_excerpt(); ?> </div> </div> </div> </article> <?php break; case "audio": ?> <article id="post-<?php the_ID(); ?>" <?php post_class(); ?>> <div class="post_content_holder"> <div class="post_image"> <audio class="blog_audio" src="<?php echo get_post_meta(get_the_ID(), "audio_link", true) ?>" controls="controls"> <?php _e("Your browser don't support audio player","qode"); ?> </audio> </div> <div class="post_text"> <div class="post_text_inner"> <div class="minimalist_date"><?php the_time(get_option('date_format')); ?></div> <h2><a href="<?php the_permalink(); ?>" title="<?php the_title_attribute(); ?>"><?php the_title(); ?></a></h2> <div class="separator small center"></div> <?php qode_excerpt(); ?> </div> </div> </div> </article> <?php break; case "gallery": ?> <article id="post-<?php the_ID(); ?>" <?php post_class(); ?>> <div class="post_content_holder"> <div class="post_image"> <div class="flexslider"> <ul class="slides"> <?php $post_content = get_the_content(); preg_match('/\[gallery.*ids=.(.*).\]/', $post_content, $ids); $array_id = explode(",", $ids[1]); foreach($array_id as $img_id){ ?> <li><a href="<?php the_permalink(); ?>"><?php echo <API key>( $img_id, 'full' ); ?></a></li> <?php } ?> </ul> </div> </div> <div class="post_text"> <div class="post_text_inner"> <div class="minimalist_date"><?php the_time(get_option('date_format')); ?></div> <h2><a href="<?php the_permalink(); ?>" title="<?php the_title_attribute(); ?>"><?php the_title(); ?></a></h2> <div class="separator small center"></div> <?php qode_excerpt(); ?> </div> </div> </div> </article> <?php break; case "link": ?> <article id="post-<?php the_ID(); ?>" <?php post_class(); ?>> <div class="post_content_holder"> <div class="post_text"> <div class="post_text_inner"> <div class="minimalist_date"><?php the_time(get_option('date_format')); ?></div> <i class="link_mark fa fa-link pull-left"></i> <div class="post_title"> <p><a href="<?php the_permalink(); ?>" title="<?php the_title_attribute(); ?>"><?php the_title(); ?></a></p> </div> </div> </div> </div> </article> <?php break; case "quote": ?> <article id="post-<?php the_ID(); ?>" <?php post_class(); ?>> <div class="post_content_holder"> <div class="post_text"> <div class="post_text_inner"> <div class="minimalist_date"><?php the_time(get_option('date_format')); ?></div> <i class="qoute_mark fa fa-quote-right pull-left"></i> <div class="post_title"> <p><a href="<?php the_permalink(); ?>" title="<?php the_title_attribute(); ?>"><?php echo get_post_meta(get_the_ID(), "quote_format", true); ?></a></p> <span class="quote_author">&mdash; <?php the_title(); ?></span> </div> </div> </div> </div> </article> <?php break; default: ?> <article id="post-<?php the_ID(); ?>" <?php post_class(); ?>> <div class="post_content_holder"> <?php if ( has_post_thumbnail() ) { ?> <div class="post_image"> <a href="<?php the_permalink(); ?>" title="<?php the_title_attribute(); ?>"> <?php the_post_thumbnail('full'); ?> </a> </div> <?php } ?> <div class="post_text"> <div class="post_text_inner"> <div class="minimalist_date"><?php the_time(get_option('date_format')); ?></div> <h2><a href="<?php the_permalink(); ?>" title="<?php the_title_attribute(); ?>"><?php the_title(); ?></a></h2> <div class="separator small center"></div> <?php qode_excerpt(); ?> </div> </div> </div> </article> <?php } ?>
#include "cache.h" #include "builtin.h" #include "refs.h" #include "tag.h" #include "run-command.h" #include "parse-options.h" #include "diff.h" #include "revision.h" #include "gpg-interface.h" #include "sha1-array.h" #include "column.h" #include "ref-filter.h" static const char * const git_tag_usage[] = { N_("git tag [-a | -s | -u <key-id>] [-f] [-m <msg> | -F <file>] <tagname> [<head>]"), N_("git tag -d <tagname>..."), N_("git tag -l [-n[<num>]] [--contains <commit>] [--points-at <object>]" "\n\t\t[--format=<format>] [--[no-]merged [<commit>]] [<pattern>...]"), N_("git tag -v <tagname>..."), NULL }; static unsigned int colopts; static int list_tags(struct ref_filter *filter, struct ref_sorting *sorting, const char *format) { struct ref_array array; char *to_free = NULL; int i; memset(&array, 0, sizeof(array)); if (filter->lines == -1) filter->lines = 0; if (!format) { if (filter->lines) { to_free = xstrfmt("%s %%(contents:lines=%d)", "%(align:15)%(refname:strip=2)%(end)", filter->lines); format = to_free; } else format = "%(refname:strip=2)"; } verify_ref_format(format); filter-><API key> = 1; filter_refs(&array, filter, FILTER_REFS_TAGS); ref_array_sort(sorting, &array); for (i = 0; i < array.nr; i++) show_ref_array_item(array.items[i], format, 0); ref_array_clear(&array); free(to_free); return 0; } typedef int (*each_tag_name_fn)(const char *name, const char *ref, const unsigned char *sha1); static int for_each_tag_name(const char **argv, each_tag_name_fn fn) { const char **p; char ref[PATH_MAX]; int had_error = 0; unsigned char sha1[20]; for (p = argv; *p; p++) { if (snprintf(ref, sizeof(ref), "refs/tags/%s", *p) >= sizeof(ref)) { error(_("tag name too long: %.*s..."), 50, *p); had_error = 1; continue; } if (read_ref(ref, sha1)) { error(_("tag '%s' not found."), *p); had_error = 1; continue; } if (fn(*p, ref, sha1)) had_error = 1; } return had_error; } static int delete_tag(const char *name, const char *ref, const unsigned char *sha1) { if (delete_ref(ref, sha1, 0)) return 1; printf(_("Deleted tag '%s' (was %s)\n"), name, find_unique_abbrev(sha1, DEFAULT_ABBREV)); return 0; } static int verify_tag(const char *name, const char *ref, const unsigned char *sha1) { const char *argv_verify_tag[] = {"verify-tag", "-v", "SHA1_HEX", NULL}; argv_verify_tag[2] = sha1_to_hex(sha1); if (run_command_v_opt(argv_verify_tag, RUN_GIT_CMD)) return error(_("could not verify the tag '%s'"), name); return 0; } static int do_sign(struct strbuf *buffer) { return sign_buffer(buffer, buffer, get_signing_key()); } static const char tag_template[] = N_("\nWrite a message for tag:\n %s\n" "Lines starting with '%c' will be ignored.\n"); static const char <API key>[] = N_("\nWrite a message for tag:\n %s\n" "Lines starting with '%c' will be kept; you may remove them" " yourself if you want to.\n"); /* Parse arg given and add it the ref_sorting array */ static int <API key>(const char *arg, struct ref_sorting **sorting_tail) { struct ref_sorting *s; int len; s = xcalloc(1, sizeof(*s)); s->next = *sorting_tail; *sorting_tail = s; if (*arg == '-') { s->reverse = 1; arg++; } if (skip_prefix(arg, "version:", &arg) || skip_prefix(arg, "v:", &arg)) s->version = 1; len = strlen(arg); s->atom = <API key>(arg, arg+len); return 0; } static int git_tag_config(const char *var, const char *value, void *cb) { int status; struct ref_sorting **sorting_tail = (struct ref_sorting **)cb; if (!strcmp(var, "tag.sort")) { if (!value) return <API key>(var); <API key>(value, sorting_tail); return 0; } status = git_gpg_config(var, value, cb); if (status) return status; if (starts_with(var, "column.")) return git_column_config(var, value, "tag", &colopts); return git_default_config(var, value, cb); } static void write_tag_body(int fd, const unsigned char *sha1) { unsigned long size; enum object_type type; char *buf, *sp; buf = read_sha1_file(sha1, &type, &size); if (!buf) return; /* skip header */ sp = strstr(buf, "\n\n"); if (!sp || !size || type != OBJ_TAG) { free(buf); return; } sp += 2; /* skip the 2 LFs */ write_or_die(fd, sp, parse_signature(sp, buf + size - sp)); free(buf); } static int build_tag_object(struct strbuf *buf, int sign, unsigned char *result) { if (sign && do_sign(buf) < 0) return error(_("unable to sign the tag")); if (write_sha1_file(buf->buf, buf->len, tag_type, result) < 0) return error(_("unable to write tag file")); return 0; } struct create_tag_options { unsigned int message_given:1; unsigned int sign; enum { CLEANUP_NONE, CLEANUP_SPACE, CLEANUP_ALL } cleanup_mode; }; static void create_tag(const unsigned char *object, const char *tag, struct strbuf *buf, struct create_tag_options *opt, unsigned char *prev, unsigned char *result) { enum object_type type; char header_buf[1024]; int header_len; char *path = NULL; type = sha1_object_info(object, NULL); if (type <= OBJ_NONE) die(_("bad object type.")); header_len = snprintf(header_buf, sizeof(header_buf), "object %s\n" "type %s\n" "tag %s\n" "tagger %s\n\n", sha1_to_hex(object), typename(type), tag, git_committer_info(IDENT_STRICT)); if (header_len > sizeof(header_buf) - 1) die(_("tag header too big.")); if (!opt->message_given) { int fd; /* write the template message before editing: */ path = git_pathdup("TAG_EDITMSG"); fd = open(path, O_CREAT | O_TRUNC | O_WRONLY, 0600); if (fd < 0) die_errno(_("could not create file '%s'"), path); if (!is_null_sha1(prev)) { write_tag_body(fd, prev); } else { struct strbuf buf = STRBUF_INIT; strbuf_addch(&buf, '\n'); if (opt->cleanup_mode == CLEANUP_ALL) <API key>(&buf, _(tag_template), tag, comment_line_char); else <API key>(&buf, _(<API key>), tag, comment_line_char); write_or_die(fd, buf.buf, buf.len); strbuf_release(&buf); } close(fd); if (launch_editor(path, buf, NULL)) { fprintf(stderr, _("Please supply the message using either -m or -F option.\n")); exit(1); } } if (opt->cleanup_mode != CLEANUP_NONE) strbuf_stripspace(buf, opt->cleanup_mode == CLEANUP_ALL); if (!opt->message_given && !buf->len) die(_("no tag message?")); strbuf_insert(buf, 0, header_buf, header_len); if (build_tag_object(buf, opt->sign, result) < 0) { if (path) fprintf(stderr, _("The tag message has been left in %s\n"), path); exit(128); } if (path) { unlink_or_warn(path); free(path); } } struct msg_arg { int given; struct strbuf buf; }; static int parse_msg_arg(const struct option *opt, const char *arg, int unset) { struct msg_arg *msg = opt->value; if (!arg) return -1; if (msg->buf.len) strbuf_addstr(&(msg->buf), "\n\n"); strbuf_addstr(&(msg->buf), arg); msg->given = 1; return 0; } static int <API key>(struct strbuf *sb, const char *name) { if (name[0] == '-') return -1; strbuf_reset(sb); strbuf_addf(sb, "refs/tags/%s", name); return <API key>(sb->buf, 0); } int cmd_tag(int argc, const char **argv, const char *prefix) { struct strbuf buf = STRBUF_INIT; struct strbuf ref = STRBUF_INIT; unsigned char object[20], prev[20]; const char *object_ref, *tag; struct create_tag_options opt; char *cleanup_arg = NULL; int create_reflog = 0; int annotate = 0, force = 0; int cmdmode = 0; const char *msgfile = NULL, *keyid = NULL; struct msg_arg msg = { 0, STRBUF_INIT }; struct ref_transaction *transaction; struct strbuf err = STRBUF_INIT; struct ref_filter filter; static struct ref_sorting *sorting = NULL, **sorting_tail = &sorting; const char *format = NULL; struct option options[] = { OPT_CMDMODE('l', "list", &cmdmode, N_("list tag names"), 'l'), { OPTION_INTEGER, 'n', NULL, &filter.lines, N_("n"), N_("print <n> lines of each tag message"), PARSE_OPT_OPTARG, NULL, 1 }, OPT_CMDMODE('d', "delete", &cmdmode, N_("delete tags"), 'd'), OPT_CMDMODE('v', "verify", &cmdmode, N_("verify tags"), 'v'), OPT_GROUP(N_("Tag creation options")), OPT_BOOL('a', "annotate", &annotate, N_("annotated tag, needs a message")), OPT_CALLBACK('m', "message", &msg, N_("message"), N_("tag message"), parse_msg_arg), OPT_FILENAME('F', "file", &msgfile, N_("read message from file")), OPT_BOOL('s', "sign", &opt.sign, N_("annotated and GPG-signed tag")), OPT_STRING(0, "cleanup", &cleanup_arg, N_("mode"), N_("how to strip spaces and #comments from message")), OPT_STRING('u', "local-user", &keyid, N_("key-id"), N_("use another key to sign the tag")), OPT__FORCE(&force, N_("replace the tag if exists")), OPT_BOOL(0, "create-reflog", &create_reflog, N_("create a reflog")), OPT_GROUP(N_("Tag listing options")), OPT_COLUMN(0, "column", &colopts, N_("show tag list in columns")), OPT_CONTAINS(&filter.with_commit, N_("print only tags that contain the commit")), OPT_WITH(&filter.with_commit, N_("print only tags that contain the commit")), OPT_MERGED(&filter, N_("print only tags that are merged")), OPT_NO_MERGED(&filter, N_("print only tags that are not merged")), OPT_CALLBACK(0 , "sort", sorting_tail, N_("key"), N_("field name to sort on"), &<API key>), { OPTION_CALLBACK, 0, "points-at", &filter.points_at, N_("object"), N_("print only tags of the object"), 0, <API key> }, OPT_STRING( 0 , "format", &format, N_("format"), N_("format to use for the output")), OPT_END() }; git_config(git_tag_config, sorting_tail); memset(&opt, 0, sizeof(opt)); memset(&filter, 0, sizeof(filter)); filter.lines = -1; argc = parse_options(argc, argv, prefix, options, git_tag_usage, 0); if (keyid) { opt.sign = 1; set_signing_key(keyid); } if (opt.sign) annotate = 1; if (argc == 0 && !cmdmode) cmdmode = 'l'; if ((annotate || msg.given || msgfile || force) && (cmdmode != 0)) usage_with_options(git_tag_usage, options); finalize_colopts(&colopts, -1); if (cmdmode == 'l' && filter.lines != -1) { if (<API key>(colopts)) die(_("--column and -n are incompatible")); colopts = 0; } if (!sorting) sorting = ref_default_sorting(); if (cmdmode == 'l') { int ret; if (column_active(colopts)) { struct column_options copts; memset(&copts, 0, sizeof(copts)); copts.padding = 2; run_column_filter(colopts, &copts); } filter.name_patterns = argv; ret = list_tags(&filter, sorting, format); if (column_active(colopts)) stop_column_filter(); return ret; } if (filter.lines != -1) die(_("-n option is only allowed with -l.")); if (filter.with_commit) die(_("--contains option is only allowed with -l.")); if (filter.points_at.nr) die(_("--points-at option is only allowed with -l.")); if (filter.merge_commit) die(_("--merged and --no-merged option are only allowed with -l")); if (cmdmode == 'd') return for_each_tag_name(argv, delete_tag); if (cmdmode == 'v') return for_each_tag_name(argv, verify_tag); if (msg.given || msgfile) { if (msg.given && msgfile) die(_("only one -F or -m option is allowed.")); annotate = 1; if (msg.given) strbuf_addbuf(&buf, &(msg.buf)); else { if (!strcmp(msgfile, "-")) { if (strbuf_read(&buf, 0, 1024) < 0) die_errno(_("cannot read '%s'"), msgfile); } else { if (strbuf_read_file(&buf, msgfile, 1024) < 0) die_errno(_("could not open or read '%s'"), msgfile); } } } tag = argv[0]; object_ref = argc == 2 ? argv[1] : "HEAD"; if (argc > 2) die(_("too many params")); if (get_sha1(object_ref, object)) die(_("Failed to resolve '%s' as a valid ref."), object_ref); if (<API key>(&ref, tag)) die(_("'%s' is not a valid tag name."), tag); if (read_ref(ref.buf, prev)) hashclr(prev); else if (!force) die(_("tag '%s' already exists"), tag); opt.message_given = msg.given || msgfile; if (!cleanup_arg || !strcmp(cleanup_arg, "strip")) opt.cleanup_mode = CLEANUP_ALL; else if (!strcmp(cleanup_arg, "verbatim")) opt.cleanup_mode = CLEANUP_NONE; else if (!strcmp(cleanup_arg, "whitespace")) opt.cleanup_mode = CLEANUP_SPACE; else die(_("Invalid cleanup mode %s"), cleanup_arg); if (annotate) create_tag(object, tag, &buf, &opt, prev, object); transaction = <API key>(&err); if (!transaction || <API key>(transaction, ref.buf, object, prev, create_reflog ? <API key> : 0, NULL, &err) || <API key>(transaction, &err)) die("%s", err.buf); <API key>(transaction); if (force && !is_null_sha1(prev) && hashcmp(prev, object)) printf(_("Updated tag '%s' (was %s)\n"), tag, find_unique_abbrev(prev, DEFAULT_ABBREV)); strbuf_release(&err); strbuf_release(&buf); strbuf_release(&ref); return 0; }
// <API key>: GPL-2.0-or-later #include <linux/module.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/fcntl.h> #include <linux/socket.h> #include <linux/sock_diag.h> #include <linux/in.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/if_packet.h> #include <linux/if_arp.h> #include <linux/gfp.h> #include <net/inet_common.h> #include <net/ip.h> #include <net/protocol.h> #include <net/netlink.h> #include <linux/skbuff.h> #include <linux/skmsg.h> #include <net/sock.h> #include <net/flow_dissector.h> #include <linux/errno.h> #include <linux/timer.h> #include <linux/uaccess.h> #include <asm/unaligned.h> #include <asm/cmpxchg.h> #include <linux/filter.h> #include <linux/ratelimit.h> #include <linux/seccomp.h> #include <linux/if_vlan.h> #include <linux/bpf.h> #include <net/sch_generic.h> #include <net/cls_cgroup.h> #include <net/dst_metadata.h> #include <net/dst.h> #include <net/sock_reuseport.h> #include <net/busy_poll.h> #include <net/tcp.h> #include <net/xfrm.h> #include <net/udp.h> #include <linux/bpf_trace.h> #include <net/xdp_sock.h> #include <linux/inetdevice.h> #include <net/inet_hashtables.h> #include <net/inet6_hashtables.h> #include <net/ip_fib.h> #include <net/flow.h> #include <net/arp.h> #include <net/ipv6.h> #include <net/net_namespace.h> #include <linux/seg6_local.h> #include <net/seg6.h> #include <net/seg6_local.h> #include <net/lwtunnel.h> #include <net/ipv6_stubs.h> #include <net/bpf_sk_storage.h> /** * sk_filter_trim_cap - run a packet through a socket filter * @sk: sock associated with &sk_buff * @skb: buffer to filter * @cap: limit on how short the eBPF program may trim the packet * * Run the eBPF program and then cut skb->data to correct size returned by * the program. If pkt_len is 0 we toss packet. If skb->len is smaller * than pkt_len we keep whole skb->data. This is the socket level * wrapper to BPF_PROG_RUN. It returns 0 if the packet should * be accepted or -EPERM if the packet should be tossed. * */ int sk_filter_trim_cap(struct sock *sk, struct sk_buff *skb, unsigned int cap) { int err; struct sk_filter *filter; /* * If the skb was allocated from pfmemalloc reserves, only * allow SOCK_MEMALLOC sockets to use it as this socket is * helping free memory */ if (skb_pfmemalloc(skb) && !sock_flag(sk, SOCK_MEMALLOC)) { NET_INC_STATS(sock_net(sk), <API key>); return -ENOMEM; } err = <API key>(sk, skb); if (err) return err; err = <API key>(sk, skb); if (err) return err; rcu_read_lock(); filter = rcu_dereference(sk->sk_filter); if (filter) { struct sock *save_sk = skb->sk; unsigned int pkt_len; skb->sk = sk; pkt_len = <API key>(filter->prog, skb); skb->sk = save_sk; err = pkt_len ? pskb_trim(skb, max(cap, pkt_len)) : -EPERM; } rcu_read_unlock(); return err; } EXPORT_SYMBOL(sk_filter_trim_cap); BPF_CALL_1(<API key>, struct sk_buff *, skb) { return skb_get_poff(skb); } BPF_CALL_3(bpf_skb_get_nlattr, struct sk_buff *, skb, u32, a, u32, x) { struct nlattr *nla; if (skb_is_nonlinear(skb)) return 0; if (skb->len < sizeof(struct nlattr)) return 0; if (a > skb->len - sizeof(struct nlattr)) return 0; nla = nla_find((struct nlattr *) &skb->data[a], skb->len - a, x); if (nla) return (void *) nla - (void *) skb->data; return 0; } BPF_CALL_3(<API key>, struct sk_buff *, skb, u32, a, u32, x) { struct nlattr *nla; if (skb_is_nonlinear(skb)) return 0; if (skb->len < sizeof(struct nlattr)) return 0; if (a > skb->len - sizeof(struct nlattr)) return 0; nla = (struct nlattr *) &skb->data[a]; if (nla->nla_len > skb->len - a) return 0; nla = nla_find_nested(nla, x); if (nla) return (void *) nla - (void *) skb->data; return 0; } BPF_CALL_4(<API key>, const struct sk_buff *, skb, const void *, data, int, headlen, int, offset) { u8 tmp, *ptr; const int len = sizeof(tmp); if (offset >= 0) { if (headlen - offset >= len) return *(u8 *)(data + offset); if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp))) return tmp; } else { ptr = <API key>(skb, offset, len); if (likely(ptr)) return *(u8 *)ptr; } return -EFAULT; } BPF_CALL_2(<API key>, const struct sk_buff *, skb, int, offset) { return <API key>(skb, skb->data, skb->len - skb->data_len, offset); } BPF_CALL_4(<API key>, const struct sk_buff *, skb, const void *, data, int, headlen, int, offset) { u16 tmp, *ptr; const int len = sizeof(tmp); if (offset >= 0) { if (headlen - offset >= len) return get_unaligned_be16(data + offset); if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp))) return be16_to_cpu(tmp); } else { ptr = <API key>(skb, offset, len); if (likely(ptr)) return get_unaligned_be16(ptr); } return -EFAULT; } BPF_CALL_2(<API key>, const struct sk_buff *, skb, int, offset) { return <API key>(skb, skb->data, skb->len - skb->data_len, offset); } BPF_CALL_4(<API key>, const struct sk_buff *, skb, const void *, data, int, headlen, int, offset) { u32 tmp, *ptr; const int len = sizeof(tmp); if (likely(offset >= 0)) { if (headlen - offset >= len) return get_unaligned_be32(data + offset); if (!skb_copy_bits(skb, offset, &tmp, sizeof(tmp))) return be32_to_cpu(tmp); } else { ptr = <API key>(skb, offset, len); if (likely(ptr)) return get_unaligned_be32(ptr); } return -EFAULT; } BPF_CALL_2(<API key>, const struct sk_buff *, skb, int, offset) { return <API key>(skb, skb->data, skb->len - skb->data_len, offset); } BPF_CALL_0(bpf_get_raw_cpu_id) { return <API key>(); } static const struct bpf_func_proto <API key> = { .func = bpf_get_raw_cpu_id, .gpl_only = false, .ret_type = RET_INTEGER, }; static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg, struct bpf_insn *insn_buf) { struct bpf_insn *insn = insn_buf; switch (skb_field) { case SKF_AD_MARK: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, offsetof(struct sk_buff, mark)); break; case SKF_AD_PKTTYPE: *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_TYPE_OFFSET()); *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, PKT_TYPE_MAX); #ifdef <API key> *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, 5); #endif break; case SKF_AD_QUEUE: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2); *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, offsetof(struct sk_buff, queue_mapping)); break; case SKF_AD_VLAN_TAG: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); /* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */ *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, offsetof(struct sk_buff, vlan_tci)); break; case <API key>: *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, <API key>()); if (<API key>) *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, <API key>); if (<API key> < 7) *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, 1); break; } return insn - insn_buf; } static bool <API key>(struct sock_filter *fp, struct bpf_insn **insnp) { struct bpf_insn *insn = *insnp; u32 cnt; switch (fp->k) { case SKF_AD_OFF + SKF_AD_PROTOCOL: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); /* A = *(u16 *) (CTX + offsetof(protocol)) */ *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, offsetof(struct sk_buff, protocol)); /* A = ntohs(A) [emitting a nop or swap16] */ *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16); break; case SKF_AD_OFF + SKF_AD_PKTTYPE: cnt = convert_skb_access(SKF_AD_PKTTYPE, BPF_REG_A, BPF_REG_CTX, insn); insn += cnt - 1; break; case SKF_AD_OFF + SKF_AD_IFINDEX: case SKF_AD_OFF + SKF_AD_HATYPE: BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev), BPF_REG_TMP, BPF_REG_CTX, offsetof(struct sk_buff, dev)); /* if (tmp != 0) goto pc + 1 */ *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_TMP, 0, 1); *insn++ = BPF_EXIT_INSN(); if (fp->k == SKF_AD_OFF + SKF_AD_IFINDEX) *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_TMP, offsetof(struct net_device, ifindex)); else *insn = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_TMP, offsetof(struct net_device, type)); break; case SKF_AD_OFF + SKF_AD_MARK: cnt = convert_skb_access(SKF_AD_MARK, BPF_REG_A, BPF_REG_CTX, insn); insn += cnt - 1; break; case SKF_AD_OFF + SKF_AD_RXHASH: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, offsetof(struct sk_buff, hash)); break; case SKF_AD_OFF + SKF_AD_QUEUE: cnt = convert_skb_access(SKF_AD_QUEUE, BPF_REG_A, BPF_REG_CTX, insn); insn += cnt - 1; break; case SKF_AD_OFF + SKF_AD_VLAN_TAG: cnt = convert_skb_access(SKF_AD_VLAN_TAG, BPF_REG_A, BPF_REG_CTX, insn); insn += cnt - 1; break; case SKF_AD_OFF + <API key>: cnt = convert_skb_access(<API key>, BPF_REG_A, BPF_REG_CTX, insn); insn += cnt - 1; break; case SKF_AD_OFF + SKF_AD_VLAN_TPID: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_proto) != 2); /* A = *(u16 *) (CTX + offsetof(vlan_proto)) */ *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, offsetof(struct sk_buff, vlan_proto)); /* A = ntohs(A) [emitting a nop or swap16] */ *insn = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, 16); break; case SKF_AD_OFF + SKF_AD_PAY_OFFSET: case SKF_AD_OFF + SKF_AD_NLATTR: case SKF_AD_OFF + SKF_AD_NLATTR_NEST: case SKF_AD_OFF + SKF_AD_CPU: case SKF_AD_OFF + SKF_AD_RANDOM: /* arg1 = CTX */ *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX); /* arg2 = A */ *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A); /* arg3 = X */ *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X); /* Emit call(arg1=CTX, arg2=A, arg3=X) */ switch (fp->k) { case SKF_AD_OFF + SKF_AD_PAY_OFFSET: *insn = BPF_EMIT_CALL(<API key>); break; case SKF_AD_OFF + SKF_AD_NLATTR: *insn = BPF_EMIT_CALL(bpf_skb_get_nlattr); break; case SKF_AD_OFF + SKF_AD_NLATTR_NEST: *insn = BPF_EMIT_CALL(<API key>); break; case SKF_AD_OFF + SKF_AD_CPU: *insn = BPF_EMIT_CALL(bpf_get_raw_cpu_id); break; case SKF_AD_OFF + SKF_AD_RANDOM: *insn = BPF_EMIT_CALL(bpf_user_rnd_u32); <API key>(); break; } break; case SKF_AD_OFF + SKF_AD_ALU_XOR_X: /* A ^= X */ *insn = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_X); break; default: /* This is just a dummy call to avoid letting the compiler * evict __bpf_call_base() as an optimization. Placed here * where no-one bothers. */ BUG_ON(__bpf_call_base(0, 0, 0, 0, 0) != 0); return false; } *insnp = insn; return true; } static bool convert_bpf_ld_abs(struct sock_filter *fp, struct bpf_insn **insnp) { const bool unaligned_ok = IS_BUILTIN(<API key>); int size = bpf_size_to_bytes(BPF_SIZE(fp->code)); bool endian = BPF_SIZE(fp->code) == BPF_H || BPF_SIZE(fp->code) == BPF_W; bool indirect = BPF_MODE(fp->code) == BPF_IND; const int ip_align = NET_IP_ALIGN; struct bpf_insn *insn = *insnp; int offset = fp->k; if (!indirect && ((unaligned_ok && offset >= 0) || (!unaligned_ok && offset >= 0 && offset + ip_align >= 0 && offset + ip_align % size == 0))) { bool ldx_off_ok = offset <= S16_MAX; *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_H); if (offset) *insn++ = BPF_ALU64_IMM(BPF_SUB, BPF_REG_TMP, offset); *insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP, size, 2 + endian + (!ldx_off_ok * 2)); if (ldx_off_ok) { *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A, BPF_REG_D, offset); } else { *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_D); *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_TMP, offset); *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A, BPF_REG_TMP, 0); } if (endian) *insn++ = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, size * 8); *insn++ = BPF_JMP_A(8); } *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX); *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_D); *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_H); if (!indirect) { *insn++ = BPF_MOV64_IMM(BPF_REG_ARG4, offset); } else { *insn++ = BPF_MOV64_REG(BPF_REG_ARG4, BPF_REG_X); if (fp->k) *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG4, offset); } switch (BPF_SIZE(fp->code)) { case BPF_B: *insn++ = BPF_EMIT_CALL(<API key>); break; case BPF_H: *insn++ = BPF_EMIT_CALL(<API key>); break; case BPF_W: *insn++ = BPF_EMIT_CALL(<API key>); break; default: return false; } *insn++ = BPF_JMP_IMM(BPF_JSGE, BPF_REG_A, 0, 2); *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A); *insn = BPF_EXIT_INSN(); *insnp = insn; return true; } /** * bpf_convert_filter - convert filter program * @prog: the user passed filter program * @len: the length of the user passed filter program * @new_prog: allocated 'struct bpf_prog' or NULL * @new_len: pointer to store length of converted program * @seen_ld_abs: bool whether we've seen ld_abs/ind * * Remap 'sock_filter' style classic BPF (cBPF) instruction set to 'bpf_insn' * style extended BPF (eBPF). * Conversion workflow: * * 1) First pass for calculating the new program length: * bpf_convert_filter(old_prog, old_len, NULL, &new_len, &seen_ld_abs) * * 2) 2nd pass to remap in two passes: 1st pass finds new * jump offsets, 2nd pass remapping: * bpf_convert_filter(old_prog, old_len, new_prog, &new_len, &seen_ld_abs) */ static int bpf_convert_filter(struct sock_filter *prog, int len, struct bpf_prog *new_prog, int *new_len, bool *seen_ld_abs) { int new_flen = 0, pass = 0, target, i, stack_off; struct bpf_insn *new_insn, *first_insn = NULL; struct sock_filter *fp; int *addrs = NULL; u8 bpf_src; BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK); BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); if (len <= 0 || len > BPF_MAXINSNS) return -EINVAL; if (new_prog) { first_insn = new_prog->insnsi; addrs = kcalloc(len, sizeof(*addrs), GFP_KERNEL | __GFP_NOWARN); if (!addrs) return -ENOMEM; } do_pass: new_insn = first_insn; fp = prog; /* Classic BPF related prologue emission. */ if (new_prog) { /* Classic BPF expects A and X to be reset first. These need * to be guaranteed to be the first two instructions. */ *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A); *new_insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_X, BPF_REG_X); /* All programs must keep CTX in callee saved BPF_REG_CTX. * In eBPF case it's done by the compiler, here we need to * do this ourself. Initial CTX is present in BPF_REG_ARG1. */ *new_insn++ = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1); if (*seen_ld_abs) { /* For packet access in classic BPF, cache skb->data * in callee-saved BPF R8 and skb->len - skb->data_len * (headlen) in BPF R9. Since classic BPF is read-only * on CTX, we only need to cache it once. */ *new_insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data), BPF_REG_D, BPF_REG_CTX, offsetof(struct sk_buff, data)); *new_insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_H, BPF_REG_CTX, offsetof(struct sk_buff, len)); *new_insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_TMP, BPF_REG_CTX, offsetof(struct sk_buff, data_len)); *new_insn++ = BPF_ALU32_REG(BPF_SUB, BPF_REG_H, BPF_REG_TMP); } } else { new_insn += 3; } for (i = 0; i < len; fp++, i++) { struct bpf_insn tmp_insns[32] = { }; struct bpf_insn *insn = tmp_insns; if (addrs) addrs[i] = new_insn - first_insn; switch (fp->code) { /* All arithmetic insns and skb loads map as-is. */ case BPF_ALU | BPF_ADD | BPF_X: case BPF_ALU | BPF_ADD | BPF_K: case BPF_ALU | BPF_SUB | BPF_X: case BPF_ALU | BPF_SUB | BPF_K: case BPF_ALU | BPF_AND | BPF_X: case BPF_ALU | BPF_AND | BPF_K: case BPF_ALU | BPF_OR | BPF_X: case BPF_ALU | BPF_OR | BPF_K: case BPF_ALU | BPF_LSH | BPF_X: case BPF_ALU | BPF_LSH | BPF_K: case BPF_ALU | BPF_RSH | BPF_X: case BPF_ALU | BPF_RSH | BPF_K: case BPF_ALU | BPF_XOR | BPF_X: case BPF_ALU | BPF_XOR | BPF_K: case BPF_ALU | BPF_MUL | BPF_X: case BPF_ALU | BPF_MUL | BPF_K: case BPF_ALU | BPF_DIV | BPF_X: case BPF_ALU | BPF_DIV | BPF_K: case BPF_ALU | BPF_MOD | BPF_X: case BPF_ALU | BPF_MOD | BPF_K: case BPF_ALU | BPF_NEG: case BPF_LD | BPF_ABS | BPF_W: case BPF_LD | BPF_ABS | BPF_H: case BPF_LD | BPF_ABS | BPF_B: case BPF_LD | BPF_IND | BPF_W: case BPF_LD | BPF_IND | BPF_H: case BPF_LD | BPF_IND | BPF_B: /* Check for overloaded BPF extension and * directly convert it if found, otherwise * just move on with mapping. */ if (BPF_CLASS(fp->code) == BPF_LD && BPF_MODE(fp->code) == BPF_ABS && <API key>(fp, &insn)) break; if (BPF_CLASS(fp->code) == BPF_LD && convert_bpf_ld_abs(fp, &insn)) { *seen_ld_abs = true; break; } if (fp->code == (BPF_ALU | BPF_DIV | BPF_X) || fp->code == (BPF_ALU | BPF_MOD | BPF_X)) { *insn++ = BPF_MOV32_REG(BPF_REG_X, BPF_REG_X); /* Error with exception code on div/mod by 0. * For cBPF programs, this was always return 0. */ *insn++ = BPF_JMP_IMM(BPF_JNE, BPF_REG_X, 0, 2); *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A); *insn++ = BPF_EXIT_INSN(); } *insn = BPF_RAW_INSN(fp->code, BPF_REG_A, BPF_REG_X, 0, fp->k); break; /* Jump transformation cannot use BPF block macros * everywhere as offset calculation and target updates * require a bit more work than the rest, i.e. jump * opcodes map as-is, but offsets need adjustment. */ #define BPF_EMIT_JMP \ do { \ const s32 off_min = S16_MIN, off_max = S16_MAX; \ s32 off; \ \ if (target >= len || target < 0) \ goto err; \ off = addrs ? addrs[target] - addrs[i] - 1 : 0; \ /* Adjust pc relative offset for 2nd or 3rd insn. */ \ off -= insn - tmp_insns; \ /* Reject anything not fitting into insn->off. */ \ if (off < off_min || off > off_max) \ goto err; \ insn->off = off; \ } while (0) case BPF_JMP | BPF_JA: target = i + fp->k + 1; insn->code = fp->code; BPF_EMIT_JMP; break; case BPF_JMP | BPF_JEQ | BPF_K: case BPF_JMP | BPF_JEQ | BPF_X: case BPF_JMP | BPF_JSET | BPF_K: case BPF_JMP | BPF_JSET | BPF_X: case BPF_JMP | BPF_JGT | BPF_K: case BPF_JMP | BPF_JGT | BPF_X: case BPF_JMP | BPF_JGE | BPF_K: case BPF_JMP | BPF_JGE | BPF_X: if (BPF_SRC(fp->code) == BPF_K && (int) fp->k < 0) { /* BPF immediates are signed, zero extend * immediate into tmp register and use it * in compare insn. */ *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k); insn->dst_reg = BPF_REG_A; insn->src_reg = BPF_REG_TMP; bpf_src = BPF_X; } else { insn->dst_reg = BPF_REG_A; insn->imm = fp->k; bpf_src = BPF_SRC(fp->code); insn->src_reg = bpf_src == BPF_X ? BPF_REG_X : 0; } /* Common case where 'jump_false' is next insn. */ if (fp->jf == 0) { insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src; target = i + fp->jt + 1; BPF_EMIT_JMP; break; } /* Convert some jumps when 'jump_true' is next insn. */ if (fp->jt == 0) { switch (BPF_OP(fp->code)) { case BPF_JEQ: insn->code = BPF_JMP | BPF_JNE | bpf_src; break; case BPF_JGT: insn->code = BPF_JMP | BPF_JLE | bpf_src; break; case BPF_JGE: insn->code = BPF_JMP | BPF_JLT | bpf_src; break; default: goto jmp_rest; } target = i + fp->jf + 1; BPF_EMIT_JMP; break; } jmp_rest: /* Other jumps are mapped into two insns: Jxx and JA. */ target = i + fp->jt + 1; insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src; BPF_EMIT_JMP; insn++; insn->code = BPF_JMP | BPF_JA; target = i + fp->jf + 1; BPF_EMIT_JMP; break; /* ldxb 4 * ([14] & 0xf) is remaped into 6 insns. */ case BPF_LDX | BPF_MSH | BPF_B: { struct sock_filter tmp = { .code = BPF_LD | BPF_ABS | BPF_B, .k = fp->k, }; *seen_ld_abs = true; /* X = A */ *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A); /* A = BPF_R0 = *(u8 *) (skb->data + K) */ convert_bpf_ld_abs(&tmp, &insn); insn++; /* A &= 0xf */ *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf); /* A <<= 2 */ *insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2); /* tmp = X */ *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_X); /* X = A */ *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A); /* A = tmp */ *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP); break; } /* RET_K is remaped into 2 insns. RET_A case doesn't need an * extra mov as BPF_REG_0 is already mapped into BPF_REG_A. */ case BPF_RET | BPF_A: case BPF_RET | BPF_K: if (BPF_RVAL(fp->code) == BPF_K) *insn++ = BPF_MOV32_RAW(BPF_K, BPF_REG_0, 0, fp->k); *insn = BPF_EXIT_INSN(); break; /* Store to stack. */ case BPF_ST: case BPF_STX: stack_off = fp->k * 4 + 4; *insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) == BPF_ST ? BPF_REG_A : BPF_REG_X, -stack_off); /* <API key>() verifies that classic BPF can * load from stack only after write, so tracking * stack_depth for ST|STX insns is enough */ if (new_prog && new_prog->aux->stack_depth < stack_off) new_prog->aux->stack_depth = stack_off; break; /* Load from stack. */ case BPF_LD | BPF_MEM: case BPF_LDX | BPF_MEM: stack_off = fp->k * 4 + 4; *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ? BPF_REG_A : BPF_REG_X, BPF_REG_FP, -stack_off); break; /* A = K or X = K */ case BPF_LD | BPF_IMM: case BPF_LDX | BPF_IMM: *insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ? BPF_REG_A : BPF_REG_X, fp->k); break; /* X = A */ case BPF_MISC | BPF_TAX: *insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A); break; /* A = X */ case BPF_MISC | BPF_TXA: *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X); break; /* A = skb->len or X = skb->len */ case BPF_LD | BPF_W | BPF_LEN: case BPF_LDX | BPF_W | BPF_LEN: *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ? BPF_REG_A : BPF_REG_X, BPF_REG_CTX, offsetof(struct sk_buff, len)); break; /* Access seccomp_data fields. */ case BPF_LDX | BPF_ABS | BPF_W: /* A = *(u32 *) (ctx + K) */ *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k); break; /* Unknown instruction. */ default: goto err; } insn++; if (new_prog) memcpy(new_insn, tmp_insns, sizeof(*insn) * (insn - tmp_insns)); new_insn += insn - tmp_insns; } if (!new_prog) { /* Only calculating new length. */ *new_len = new_insn - first_insn; if (*seen_ld_abs) *new_len += 4; /* Prologue bits. */ return 0; } pass++; if (new_flen != new_insn - first_insn) { new_flen = new_insn - first_insn; if (pass > 2) goto err; goto do_pass; } kfree(addrs); BUG_ON(*new_len != new_flen); return 0; err: kfree(addrs); return -EINVAL; } /* Security: * * As we dont want to clear mem[] array for each packet going through * __bpf_prog_run(), we check that filter loaded by user never try to read * a cell if not previously written, and we check all branches to be sure * a malicious user doesn't try to abuse us. */ static int <API key>(const struct sock_filter *filter, int flen) { u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */ int pc, ret = 0; BUILD_BUG_ON(BPF_MEMWORDS > 16); masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL); if (!masks) return -ENOMEM; memset(masks, 0xff, flen * sizeof(*masks)); for (pc = 0; pc < flen; pc++) { memvalid &= masks[pc]; switch (filter[pc].code) { case BPF_ST: case BPF_STX: memvalid |= (1 << filter[pc].k); break; case BPF_LD | BPF_MEM: case BPF_LDX | BPF_MEM: if (!(memvalid & (1 << filter[pc].k))) { ret = -EINVAL; goto error; } break; case BPF_JMP | BPF_JA: /* A jump must set masks on target */ masks[pc + 1 + filter[pc].k] &= memvalid; memvalid = ~0; break; case BPF_JMP | BPF_JEQ | BPF_K: case BPF_JMP | BPF_JEQ | BPF_X: case BPF_JMP | BPF_JGE | BPF_K: case BPF_JMP | BPF_JGE | BPF_X: case BPF_JMP | BPF_JGT | BPF_K: case BPF_JMP | BPF_JGT | BPF_X: case BPF_JMP | BPF_JSET | BPF_K: case BPF_JMP | BPF_JSET | BPF_X: /* A jump must set masks on targets */ masks[pc + 1 + filter[pc].jt] &= memvalid; masks[pc + 1 + filter[pc].jf] &= memvalid; memvalid = ~0; break; } } error: kfree(masks); return ret; } static bool chk_code_allowed(u16 code_to_probe) { static const bool codes[] = { /* 32 bit ALU operations */ [BPF_ALU | BPF_ADD | BPF_K] = true, [BPF_ALU | BPF_ADD | BPF_X] = true, [BPF_ALU | BPF_SUB | BPF_K] = true, [BPF_ALU | BPF_SUB | BPF_X] = true, [BPF_ALU | BPF_MUL | BPF_K] = true, [BPF_ALU | BPF_MUL | BPF_X] = true, [BPF_ALU | BPF_DIV | BPF_K] = true, [BPF_ALU | BPF_DIV | BPF_X] = true, [BPF_ALU | BPF_MOD | BPF_K] = true, [BPF_ALU | BPF_MOD | BPF_X] = true, [BPF_ALU | BPF_AND | BPF_K] = true, [BPF_ALU | BPF_AND | BPF_X] = true, [BPF_ALU | BPF_OR | BPF_K] = true, [BPF_ALU | BPF_OR | BPF_X] = true, [BPF_ALU | BPF_XOR | BPF_K] = true, [BPF_ALU | BPF_XOR | BPF_X] = true, [BPF_ALU | BPF_LSH | BPF_K] = true, [BPF_ALU | BPF_LSH | BPF_X] = true, [BPF_ALU | BPF_RSH | BPF_K] = true, [BPF_ALU | BPF_RSH | BPF_X] = true, [BPF_ALU | BPF_NEG] = true, /* Load instructions */ [BPF_LD | BPF_W | BPF_ABS] = true, [BPF_LD | BPF_H | BPF_ABS] = true, [BPF_LD | BPF_B | BPF_ABS] = true, [BPF_LD | BPF_W | BPF_LEN] = true, [BPF_LD | BPF_W | BPF_IND] = true, [BPF_LD | BPF_H | BPF_IND] = true, [BPF_LD | BPF_B | BPF_IND] = true, [BPF_LD | BPF_IMM] = true, [BPF_LD | BPF_MEM] = true, [BPF_LDX | BPF_W | BPF_LEN] = true, [BPF_LDX | BPF_B | BPF_MSH] = true, [BPF_LDX | BPF_IMM] = true, [BPF_LDX | BPF_MEM] = true, /* Store instructions */ [BPF_ST] = true, [BPF_STX] = true, /* Misc instructions */ [BPF_MISC | BPF_TAX] = true, [BPF_MISC | BPF_TXA] = true, /* Return instructions */ [BPF_RET | BPF_K] = true, [BPF_RET | BPF_A] = true, /* Jump instructions */ [BPF_JMP | BPF_JA] = true, [BPF_JMP | BPF_JEQ | BPF_K] = true, [BPF_JMP | BPF_JEQ | BPF_X] = true, [BPF_JMP | BPF_JGE | BPF_K] = true, [BPF_JMP | BPF_JGE | BPF_X] = true, [BPF_JMP | BPF_JGT | BPF_K] = true, [BPF_JMP | BPF_JGT | BPF_X] = true, [BPF_JMP | BPF_JSET | BPF_K] = true, [BPF_JMP | BPF_JSET | BPF_X] = true, }; if (code_to_probe >= ARRAY_SIZE(codes)) return false; return codes[code_to_probe]; } static bool bpf_check_basics_ok(const struct sock_filter *filter, unsigned int flen) { if (filter == NULL) return false; if (flen == 0 || flen > BPF_MAXINSNS) return false; return true; } static int bpf_check_classic(const struct sock_filter *filter, unsigned int flen) { bool anc_found; int pc; /* Check the filter code now */ for (pc = 0; pc < flen; pc++) { const struct sock_filter *ftest = &filter[pc]; /* May we actually operate on this code? */ if (!chk_code_allowed(ftest->code)) return -EINVAL; /* Some instructions need special checks */ switch (ftest->code) { case BPF_ALU | BPF_DIV | BPF_K: case BPF_ALU | BPF_MOD | BPF_K: /* Check for division by zero */ if (ftest->k == 0) return -EINVAL; break; case BPF_ALU | BPF_LSH | BPF_K: case BPF_ALU | BPF_RSH | BPF_K: if (ftest->k >= 32) return -EINVAL; break; case BPF_LD | BPF_MEM: case BPF_LDX | BPF_MEM: case BPF_ST: case BPF_STX: /* Check for invalid memory addresses */ if (ftest->k >= BPF_MEMWORDS) return -EINVAL; break; case BPF_JMP | BPF_JA: /* Note, the large ftest->k might cause loops. * Compare this with conditional jumps below, * where offsets are limited. --ANK (981016) */ if (ftest->k >= (unsigned int)(flen - pc - 1)) return -EINVAL; break; case BPF_JMP | BPF_JEQ | BPF_K: case BPF_JMP | BPF_JEQ | BPF_X: case BPF_JMP | BPF_JGE | BPF_K: case BPF_JMP | BPF_JGE | BPF_X: case BPF_JMP | BPF_JGT | BPF_K: case BPF_JMP | BPF_JGT | BPF_X: case BPF_JMP | BPF_JSET | BPF_K: case BPF_JMP | BPF_JSET | BPF_X: /* Both conditionals must be safe */ if (pc + ftest->jt + 1 >= flen || pc + ftest->jf + 1 >= flen) return -EINVAL; break; case BPF_LD | BPF_W | BPF_ABS: case BPF_LD | BPF_H | BPF_ABS: case BPF_LD | BPF_B | BPF_ABS: anc_found = false; if (bpf_anc_helper(ftest) & BPF_ANC) anc_found = true; /* Ancillary operation unknown or unsupported */ if (anc_found == false && ftest->k >= SKF_AD_OFF) return -EINVAL; } } /* Last instruction must be a RET code */ switch (filter[flen - 1].code) { case BPF_RET | BPF_K: case BPF_RET | BPF_A: return <API key>(filter, flen); } return -EINVAL; } static int <API key>(struct bpf_prog *fp, const struct sock_fprog *fprog) { unsigned int fsize = bpf_classic_proglen(fprog); struct sock_fprog_kern *fkprog; fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL); if (!fp->orig_prog) return -ENOMEM; fkprog = fp->orig_prog; fkprog->len = fprog->len; fkprog->filter = kmemdup(fp->insns, fsize, GFP_KERNEL | __GFP_NOWARN); if (!fkprog->filter) { kfree(fp->orig_prog); return -ENOMEM; } return 0; } static void <API key>(struct bpf_prog *fp) { struct sock_fprog_kern *fprog = fp->orig_prog; if (fprog) { kfree(fprog->filter); kfree(fprog); } } static void __bpf_prog_release(struct bpf_prog *prog) { if (prog->type == <API key>) { bpf_prog_put(prog); } else { <API key>(prog); bpf_prog_free(prog); } } static void __sk_filter_release(struct sk_filter *fp) { __bpf_prog_release(fp->prog); kfree(fp); } /** * <API key> - Release a socket filter by rcu_head * @rcu: rcu_head that contains the sk_filter to free */ static void <API key>(struct rcu_head *rcu) { struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu); __sk_filter_release(fp); } /** * sk_filter_release - release a socket filter * @fp: filter to remove * * Remove a filter from a socket and release its resources. */ static void sk_filter_release(struct sk_filter *fp) { if (<API key>(&fp->refcnt)) call_rcu(&fp->rcu, <API key>); } void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp) { u32 filter_size = bpf_prog_size(fp->prog->len); atomic_sub(filter_size, &sk->sk_omem_alloc); sk_filter_release(fp); } /* try to charge the socket memory if there is space available * return true on success */ static bool __sk_filter_charge(struct sock *sk, struct sk_filter *fp) { u32 filter_size = bpf_prog_size(fp->prog->len); /* same check as in sock_kmalloc() */ if (filter_size <= sysctl_optmem_max && atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) { atomic_add(filter_size, &sk->sk_omem_alloc); return true; } return false; } bool sk_filter_charge(struct sock *sk, struct sk_filter *fp) { if (!<API key>(&fp->refcnt)) return false; if (!__sk_filter_charge(sk, fp)) { sk_filter_release(fp); return false; } return true; } static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp) { struct sock_filter *old_prog; struct bpf_prog *old_fp; int err, new_len, old_len = fp->len; bool seen_ld_abs = false; /* We are free to overwrite insns et al right here as it * won't be used at this point in time anymore internally * after the migration to the internal BPF instruction * representation. */ BUILD_BUG_ON(sizeof(struct sock_filter) != sizeof(struct bpf_insn)); /* Conversion cannot happen on overlapping memory areas, * so we need to keep the user BPF around until the 2nd * pass. At this time, the user BPF is stored in fp->insns. */ old_prog = kmemdup(fp->insns, old_len * sizeof(struct sock_filter), GFP_KERNEL | __GFP_NOWARN); if (!old_prog) { err = -ENOMEM; goto out_err; } /* 1st pass: calculate the new program length. */ err = bpf_convert_filter(old_prog, old_len, NULL, &new_len, &seen_ld_abs); if (err) goto out_err_free; /* Expand fp for appending the new filter representation. */ old_fp = fp; fp = bpf_prog_realloc(old_fp, bpf_prog_size(new_len), 0); if (!fp) { /* The old_fp is still around in case we couldn't * allocate new memory, so uncharge on that one. */ fp = old_fp; err = -ENOMEM; goto out_err_free; } fp->len = new_len; /* 2nd pass: remap sock_filter insns into bpf_insn insns. */ err = bpf_convert_filter(old_prog, old_len, fp, &new_len, &seen_ld_abs); if (err) /* 2nd bpf_convert_filter() can fail only if it fails * to allocate memory, remapping must succeed. Note, * that at this time old_fp has already been released * by krealloc(). */ goto out_err_free; fp = <API key>(fp, &err); if (err) goto out_err_free; kfree(old_prog); return fp; out_err_free: kfree(old_prog); out_err: __bpf_prog_release(fp); return ERR_PTR(err); } static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp, <API key> trans) { int err; fp->bpf_func = NULL; fp->jited = 0; err = bpf_check_classic(fp->insns, fp->len); if (err) { __bpf_prog_release(fp); return ERR_PTR(err); } /* There might be additional checks and transformations * needed on classic filters, f.e. in case of seccomp. */ if (trans) { err = trans(fp->insns, fp->len); if (err) { __bpf_prog_release(fp); return ERR_PTR(err); } } /* Probe if we can JIT compile the filter and if so, do * the compilation of the filter. */ bpf_jit_compile(fp); /* JIT compiler couldn't process this filter, so do the * internal BPF translation for the optimized interpreter. */ if (!fp->jited) fp = bpf_migrate_filter(fp); return fp; } /** * bpf_prog_create - create an unattached filter * @pfp: the unattached filter that is created * @fprog: the filter program * * Create a filter independent of any socket. We first run some * sanity checks on it to make sure it does not explode on us later. * If an error occurs or there is insufficient memory for the filter * a negative errno code is returned. On success the return is zero. */ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog) { unsigned int fsize = bpf_classic_proglen(fprog); struct bpf_prog *fp; /* Make sure new filter is there and in the right amounts. */ if (!bpf_check_basics_ok(fprog->filter, fprog->len)) return -EINVAL; fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); if (!fp) return -ENOMEM; memcpy(fp->insns, fprog->filter, fsize); fp->len = fprog->len; /* Since unattached filters are not copied back to user * space through sk_get_filter(), we do not need to hold * a copy here, and can spare us the work. */ fp->orig_prog = NULL; /* bpf_prepare_filter() already takes care of freeing * memory in case something goes wrong. */ fp = bpf_prepare_filter(fp, NULL); if (IS_ERR(fp)) return PTR_ERR(fp); *pfp = fp; return 0; } EXPORT_SYMBOL_GPL(bpf_prog_create); /** * <API key> - create an unattached filter from user buffer * @pfp: the unattached filter that is created * @fprog: the filter program * @trans: post-classic verifier transformation handler * @save_orig: save classic BPF program * * This function effectively does the same as bpf_prog_create(), only * that it builds up its insns buffer from user space provided buffer. * It also allows for passing a <API key> handler. */ int <API key>(struct bpf_prog **pfp, struct sock_fprog *fprog, <API key> trans, bool save_orig) { unsigned int fsize = bpf_classic_proglen(fprog); struct bpf_prog *fp; int err; /* Make sure new filter is there and in the right amounts. */ if (!bpf_check_basics_ok(fprog->filter, fprog->len)) return -EINVAL; fp = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); if (!fp) return -ENOMEM; if (copy_from_user(fp->insns, fprog->filter, fsize)) { __bpf_prog_free(fp); return -EFAULT; } fp->len = fprog->len; fp->orig_prog = NULL; if (save_orig) { err = <API key>(fp, fprog); if (err) { __bpf_prog_free(fp); return -ENOMEM; } } /* bpf_prepare_filter() already takes care of freeing * memory in case something goes wrong. */ fp = bpf_prepare_filter(fp, trans); if (IS_ERR(fp)) return PTR_ERR(fp); *pfp = fp; return 0; } EXPORT_SYMBOL_GPL(<API key>); void bpf_prog_destroy(struct bpf_prog *fp) { __bpf_prog_release(fp); } EXPORT_SYMBOL_GPL(bpf_prog_destroy); static int __sk_attach_prog(struct bpf_prog *prog, struct sock *sk) { struct sk_filter *fp, *old_fp; fp = kmalloc(sizeof(*fp), GFP_KERNEL); if (!fp) return -ENOMEM; fp->prog = prog; if (!__sk_filter_charge(sk, fp)) { kfree(fp); return -ENOMEM; } refcount_set(&fp->refcnt, 1); old_fp = <API key>(sk->sk_filter, <API key>(sk)); rcu_assign_pointer(sk->sk_filter, fp); if (old_fp) sk_filter_uncharge(sk, old_fp); return 0; } static struct bpf_prog *__get_filter(struct sock_fprog *fprog, struct sock *sk) { unsigned int fsize = bpf_classic_proglen(fprog); struct bpf_prog *prog; int err; if (sock_flag(sk, SOCK_FILTER_LOCKED)) return ERR_PTR(-EPERM); /* Make sure new filter is there and in the right amounts. */ if (!bpf_check_basics_ok(fprog->filter, fprog->len)) return ERR_PTR(-EINVAL); prog = bpf_prog_alloc(bpf_prog_size(fprog->len), 0); if (!prog) return ERR_PTR(-ENOMEM); if (copy_from_user(prog->insns, fprog->filter, fsize)) { __bpf_prog_free(prog); return ERR_PTR(-EFAULT); } prog->len = fprog->len; err = <API key>(prog, fprog); if (err) { __bpf_prog_free(prog); return ERR_PTR(-ENOMEM); } /* bpf_prepare_filter() already takes care of freeing * memory in case something goes wrong. */ return bpf_prepare_filter(prog, NULL); } /** * sk_attach_filter - attach a socket filter * @fprog: the filter program * @sk: the socket to use * * Attach the user's filter code. We first run some sanity checks on * it to make sure it does not explode on us later. If an error * occurs or there is insufficient memory for the filter a negative * errno code is returned. On success the return is zero. */ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) { struct bpf_prog *prog = __get_filter(fprog, sk); int err; if (IS_ERR(prog)) return PTR_ERR(prog); err = __sk_attach_prog(prog, sk); if (err < 0) { __bpf_prog_release(prog); return err; } return 0; } EXPORT_SYMBOL_GPL(sk_attach_filter); int <API key>(struct sock_fprog *fprog, struct sock *sk) { struct bpf_prog *prog = __get_filter(fprog, sk); int err; if (IS_ERR(prog)) return PTR_ERR(prog); if (bpf_prog_size(prog->len) > sysctl_optmem_max) err = -ENOMEM; else err = <API key>(sk, prog); if (err) __bpf_prog_release(prog); return err; } static struct bpf_prog *__get_bpf(u32 ufd, struct sock *sk) { if (sock_flag(sk, SOCK_FILTER_LOCKED)) return ERR_PTR(-EPERM); return bpf_prog_get_type(ufd, <API key>); } int sk_attach_bpf(u32 ufd, struct sock *sk) { struct bpf_prog *prog = __get_bpf(ufd, sk); int err; if (IS_ERR(prog)) return PTR_ERR(prog); err = __sk_attach_prog(prog, sk); if (err < 0) { bpf_prog_put(prog); return err; } return 0; } int <API key>(u32 ufd, struct sock *sk) { struct bpf_prog *prog; int err; if (sock_flag(sk, SOCK_FILTER_LOCKED)) return -EPERM; prog = bpf_prog_get_type(ufd, <API key>); if (IS_ERR(prog) && PTR_ERR(prog) == -EINVAL) prog = bpf_prog_get_type(ufd, <API key>); if (IS_ERR(prog)) return PTR_ERR(prog); if (prog->type == <API key>) { /* Like other non <API key> * bpf prog (e.g. sockmap). It depends on the * limitation imposed by bpf_prog_load(). * Hence, sysctl_optmem_max is not checked. */ if ((sk->sk_type != SOCK_STREAM && sk->sk_type != SOCK_DGRAM) || (sk->sk_protocol != IPPROTO_UDP && sk->sk_protocol != IPPROTO_TCP) || (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)) { err = -ENOTSUPP; goto err_prog_put; } } else { /* <API key> */ if (bpf_prog_size(prog->len) > sysctl_optmem_max) { err = -ENOMEM; goto err_prog_put; } } err = <API key>(sk, prog); err_prog_put: if (err) bpf_prog_put(prog); return err; } void <API key>(struct bpf_prog *prog) { if (!prog) return; if (prog->type == <API key>) bpf_prog_put(prog); else bpf_prog_destroy(prog); } struct bpf_scratchpad { union { __be32 diff[MAX_BPF_STACK / sizeof(__be32)]; u8 buff[MAX_BPF_STACK]; }; }; static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp); static inline int <API key>(struct sk_buff *skb, unsigned int write_len) { return skb_ensure_writable(skb, write_len); } static inline int <API key>(struct sk_buff *skb, unsigned int write_len) { int err = <API key>(skb, write_len); <API key>(skb); return err; } static int <API key>(struct sk_buff *skb) { return <API key>(skb, skb_headlen(skb)); } static inline void bpf_push_mac_rcsum(struct sk_buff *skb) { if (skb_at_tc_ingress(skb)) skb_postpush_rcsum(skb, skb_mac_header(skb), skb->mac_len); } static inline void bpf_pull_mac_rcsum(struct sk_buff *skb) { if (skb_at_tc_ingress(skb)) skb_postpull_rcsum(skb, skb_mac_header(skb), skb->mac_len); } BPF_CALL_5(bpf_skb_store_bytes, struct sk_buff *, skb, u32, offset, const void *, from, u32, len, u64, flags) { void *ptr; if (unlikely(flags & ~(<API key> | <API key>))) return -EINVAL; if (unlikely(offset > 0xffff)) return -EFAULT; if (unlikely(<API key>(skb, offset + len))) return -EFAULT; ptr = skb->data + offset; if (flags & <API key>) <API key>(skb, ptr, len, offset); memcpy(ptr, from, len); if (flags & <API key>) <API key>(skb, ptr, len, offset); if (flags & <API key>) skb_clear_hash(skb); return 0; } static const struct bpf_func_proto <API key> = { .func = bpf_skb_store_bytes, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_PTR_TO_MEM, .arg4_type = ARG_CONST_SIZE, .arg5_type = ARG_ANYTHING, }; BPF_CALL_4(bpf_skb_load_bytes, const struct sk_buff *, skb, u32, offset, void *, to, u32, len) { void *ptr; if (unlikely(offset > 0xffff)) goto err_clear; ptr = skb_header_pointer(skb, offset, len, to); if (unlikely(!ptr)) goto err_clear; if (ptr != to) memcpy(to, ptr, len); return 0; err_clear: memset(to, 0, len); return -EFAULT; } static const struct bpf_func_proto <API key> = { .func = bpf_skb_load_bytes, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = <API key>, .arg4_type = ARG_CONST_SIZE, }; BPF_CALL_4(<API key>, const struct bpf_flow_dissector *, ctx, u32, offset, void *, to, u32, len) { void *ptr; if (unlikely(offset > 0xffff)) goto err_clear; if (unlikely(!ctx->skb)) goto err_clear; ptr = skb_header_pointer(ctx->skb, offset, len, to); if (unlikely(!ptr)) goto err_clear; if (ptr != to) memcpy(to, ptr, len); return 0; err_clear: memset(to, 0, len); return -EFAULT; } static const struct bpf_func_proto <API key> = { .func = <API key>, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = <API key>, .arg4_type = ARG_CONST_SIZE, }; BPF_CALL_5(<API key>, const struct sk_buff *, skb, u32, offset, void *, to, u32, len, u32, start_header) { u8 *end = skb_tail_pointer(skb); u8 *net = skb_network_header(skb); u8 *mac = skb_mac_header(skb); u8 *ptr; if (unlikely(offset > 0xffff || len > (end - mac))) goto err_clear; switch (start_header) { case BPF_HDR_START_MAC: ptr = mac + offset; break; case BPF_HDR_START_NET: ptr = net + offset; break; default: goto err_clear; } if (likely(ptr >= mac && ptr + len <= end)) { memcpy(to, ptr, len); return 0; } err_clear: memset(to, 0, len); return -EFAULT; } static const struct bpf_func_proto <API key> = { .func = <API key>, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = <API key>, .arg4_type = ARG_CONST_SIZE, .arg5_type = ARG_ANYTHING, }; BPF_CALL_2(bpf_skb_pull_data, struct sk_buff *, skb, u32, len) { /* Idea is the following: should the needed direct read/write * test fail during runtime, we can pull in more data and redo * again, since implicitly, we invalidate previous checks here. * * Or, since we know how much we need to make read/writeable, * this can be done once at the program beginning for direct * access case. By this we overcome limitations of only current * headroom being accessible. */ return <API key>(skb, len ? : skb_headlen(skb)); } static const struct bpf_func_proto <API key> = { .func = bpf_skb_pull_data, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, }; BPF_CALL_1(bpf_sk_fullsock, struct sock *, sk) { return sk_fullsock(sk) ? (unsigned long)sk : (unsigned long)NULL; } static const struct bpf_func_proto <API key> = { .func = bpf_sk_fullsock, .gpl_only = false, .ret_type = <API key>, .arg1_type = <API key>, }; static inline int <API key>(struct sk_buff *skb, unsigned int write_len) { int err = <API key>(skb, write_len); <API key>(skb); return err; } BPF_CALL_2(sk_skb_pull_data, struct sk_buff *, skb, u32, len) { /* Idea is the following: should the needed direct read/write * test fail during runtime, we can pull in more data and redo * again, since implicitly, we invalidate previous checks here. * * Or, since we know how much we need to make read/writeable, * this can be done once at the program beginning for direct * access case. By this we overcome limitations of only current * headroom being accessible. */ return <API key>(skb, len ? : skb_headlen(skb)); } static const struct bpf_func_proto <API key> = { .func = sk_skb_pull_data, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, }; BPF_CALL_5(bpf_l3_csum_replace, struct sk_buff *, skb, u32, offset, u64, from, u64, to, u64, flags) { __sum16 *ptr; if (unlikely(flags & ~(<API key>))) return -EINVAL; if (unlikely(offset > 0xffff || offset & 1)) return -EFAULT; if (unlikely(<API key>(skb, offset + sizeof(*ptr)))) return -EFAULT; ptr = (__sum16 *)(skb->data + offset); switch (flags & <API key>) { case 0: if (unlikely(from != 0)) return -EINVAL; <API key>(ptr, to); break; case 2: csum_replace2(ptr, from, to); break; case 4: csum_replace4(ptr, from, to); break; default: return -EINVAL; } return 0; } static const struct bpf_func_proto <API key> = { .func = bpf_l3_csum_replace, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; BPF_CALL_5(bpf_l4_csum_replace, struct sk_buff *, skb, u32, offset, u64, from, u64, to, u64, flags) { bool is_pseudo = flags & BPF_F_PSEUDO_HDR; bool is_mmzero = flags & <API key>; bool do_mforce = flags & BPF_F_MARK_ENFORCE; __sum16 *ptr; if (unlikely(flags & ~(<API key> | BPF_F_MARK_ENFORCE | BPF_F_PSEUDO_HDR | <API key>))) return -EINVAL; if (unlikely(offset > 0xffff || offset & 1)) return -EFAULT; if (unlikely(<API key>(skb, offset + sizeof(*ptr)))) return -EFAULT; ptr = (__sum16 *)(skb->data + offset); if (is_mmzero && !do_mforce && !*ptr) return 0; switch (flags & <API key>) { case 0: if (unlikely(from != 0)) return -EINVAL; <API key>(ptr, skb, to, is_pseudo); break; case 2: <API key>(ptr, skb, from, to, is_pseudo); break; case 4: <API key>(ptr, skb, from, to, is_pseudo); break; default: return -EINVAL; } if (is_mmzero && !*ptr) *ptr = CSUM_MANGLED_0; return 0; } static const struct bpf_func_proto <API key> = { .func = bpf_l4_csum_replace, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; BPF_CALL_5(bpf_csum_diff, __be32 *, from, u32, from_size, __be32 *, to, u32, to_size, __wsum, seed) { struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp); u32 diff_size = from_size + to_size; int i, j = 0; /* This is quite flexible, some examples: * * from_size == 0, to_size > 0, seed := csum --> pushing data * from_size > 0, to_size == 0, seed := csum --> pulling data * from_size > 0, to_size > 0, seed := 0 --> diffing data * * Even for diffing, from_size and to_size don't need to be equal. */ if (unlikely(((from_size | to_size) & (sizeof(__be32) - 1)) || diff_size > sizeof(sp->diff))) return -EINVAL; for (i = 0; i < from_size / sizeof(__be32); i++, j++) sp->diff[j] = ~from[i]; for (i = 0; i < to_size / sizeof(__be32); i++, j++) sp->diff[j] = to[i]; return csum_partial(sp->diff, diff_size, seed); } static const struct bpf_func_proto bpf_csum_diff_proto = { .func = bpf_csum_diff, .gpl_only = false, .pkt_access = true, .ret_type = RET_INTEGER, .arg1_type = <API key>, .arg2_type = <API key>, .arg3_type = <API key>, .arg4_type = <API key>, .arg5_type = ARG_ANYTHING, }; BPF_CALL_2(bpf_csum_update, struct sk_buff *, skb, __wsum, csum) { /* The interface is to be used in combination with bpf_csum_diff() * for direct packet writes. csum rotation for alignment as well * as emulating csum_sub() can be done from the eBPF program. */ if (skb->ip_summed == CHECKSUM_COMPLETE) return (skb->csum = csum_add(skb->csum, csum)); return -ENOTSUPP; } static const struct bpf_func_proto <API key> = { .func = bpf_csum_update, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, }; static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb) { return dev_forward_skb(dev, skb); } static inline int __bpf_rx_skb_no_mac(struct net_device *dev, struct sk_buff *skb) { int ret = ____dev_forward_skb(dev, skb); if (likely(!ret)) { skb->dev = dev; ret = netif_rx(skb); } return ret; } static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) { int ret; if (dev_xmit_recursion()) { <API key>("bpf: recursion limit reached on datapath, buggy bpf program?\n"); kfree_skb(skb); return -ENETDOWN; } skb->dev = dev; <API key>(); ret = dev_queue_xmit(skb); <API key>(); return ret; } static int <API key>(struct sk_buff *skb, struct net_device *dev, u32 flags) { unsigned int mlen = skb_network_offset(skb); if (mlen) { __skb_pull(skb, mlen); /* At ingress, the mac header has already been pulled once. * At egress, skb_pospull_rcsum has to be done in case that * the skb is originated from ingress (i.e. a forwarded skb) * to ensure that rcsum starts at net header. */ if (!skb_at_tc_ingress(skb)) skb_postpull_rcsum(skb, skb_mac_header(skb), mlen); } skb_pop_mac_header(skb); skb_reset_mac_len(skb); return flags & BPF_F_INGRESS ? __bpf_rx_skb_no_mac(dev, skb) : __bpf_tx_skb(dev, skb); } static int <API key>(struct sk_buff *skb, struct net_device *dev, u32 flags) { /* Verify that a link layer header is carried */ if (unlikely(skb->mac_header >= skb->network_header)) { kfree_skb(skb); return -ERANGE; } bpf_push_mac_rcsum(skb); return flags & BPF_F_INGRESS ? __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb); } static int __bpf_redirect(struct sk_buff *skb, struct net_device *dev, u32 flags) { if (<API key>(dev)) return <API key>(skb, dev, flags); else return <API key>(skb, dev, flags); } BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags) { struct net_device *dev; struct sk_buff *clone; int ret; if (unlikely(flags & ~(BPF_F_INGRESS))) return -EINVAL; dev = <API key>(dev_net(skb->dev), ifindex); if (unlikely(!dev)) return -EINVAL; clone = skb_clone(skb, GFP_ATOMIC); if (unlikely(!clone)) return -ENOMEM; /* For direct write, we need to keep the invariant that the skbs * we're dealing with need to be uncloned. Should uncloning fail * here, we need to free the just generated clone to unclone once * again. */ ret = <API key>(skb); if (unlikely(ret)) { kfree_skb(clone); return -ENOMEM; } return __bpf_redirect(clone, dev, flags); } static const struct bpf_func_proto <API key> = { .func = bpf_clone_redirect, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, }; DEFINE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info); <API key>(bpf_redirect_info); BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags) { struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); if (unlikely(flags & ~(BPF_F_INGRESS))) return TC_ACT_SHOT; ri->ifindex = ifindex; ri->flags = flags; return TC_ACT_REDIRECT; } int skb_do_redirect(struct sk_buff *skb) { struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); struct net_device *dev; dev = <API key>(dev_net(skb->dev), ri->ifindex); ri->ifindex = 0; if (unlikely(!dev)) { kfree_skb(skb); return -EINVAL; } return __bpf_redirect(skb, dev, ri->flags); } static const struct bpf_func_proto bpf_redirect_proto = { .func = bpf_redirect, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_ANYTHING, .arg2_type = ARG_ANYTHING, }; BPF_CALL_2(bpf_msg_apply_bytes, struct sk_msg *, msg, u32, bytes) { msg->apply_bytes = bytes; return 0; } static const struct bpf_func_proto <API key> = { .func = bpf_msg_apply_bytes, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, }; BPF_CALL_2(bpf_msg_cork_bytes, struct sk_msg *, msg, u32, bytes) { msg->cork_bytes = bytes; return 0; } static const struct bpf_func_proto <API key> = { .func = bpf_msg_cork_bytes, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, }; BPF_CALL_4(bpf_msg_pull_data, struct sk_msg *, msg, u32, start, u32, end, u64, flags) { u32 len = 0, offset = 0, copy = 0, poffset = 0, bytes = end - start; u32 first_sge, last_sge, i, shift, bytes_sg_total; struct scatterlist *sge; u8 *raw, *to, *from; struct page *page; if (unlikely(flags || end <= start)) return -EINVAL; /* First find the starting scatterlist element */ i = msg->sg.start; do { len = sk_msg_elem(msg, i)->length; if (start < offset + len) break; offset += len; <API key>(i); } while (i != msg->sg.end); if (unlikely(start >= offset + len)) return -EINVAL; first_sge = i; /* The start may point into the sg element so we need to also * account for the headroom. */ bytes_sg_total = start - offset + bytes; if (!msg->sg.copy[i] && bytes_sg_total <= len) goto out; /* At this point we need to linearize multiple scatterlist * elements or a single shared page. Either way we need to * copy into a linear buffer exclusively owned by BPF. Then * place the buffer in the scatterlist and fixup the original * entries by removing the entries now in the linear buffer * and shifting the remaining entries. For now we do not try * to copy partial entries to avoid complexity of running out * of sg_entry slots. The downside is reading a single byte * will copy the entire sg entry. */ do { copy += sk_msg_elem(msg, i)->length; <API key>(i); if (bytes_sg_total <= copy) break; } while (i != msg->sg.end); last_sge = i; if (unlikely(bytes_sg_total > copy)) return -EINVAL; page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP, get_order(copy)); if (unlikely(!page)) return -ENOMEM; raw = page_address(page); i = first_sge; do { sge = sk_msg_elem(msg, i); from = sg_virt(sge); len = sge->length; to = raw + poffset; memcpy(to, from, len); poffset += len; sge->length = 0; put_page(sg_page(sge)); <API key>(i); } while (i != last_sge); sg_set_page(&msg->sg.data[first_sge], page, copy, 0); /* To repair sg ring we need to shift entries. If we only * had a single entry though we can just replace it and * be done. Otherwise walk the ring and shift the entries. */ WARN_ON_ONCE(last_sge == first_sge); shift = last_sge > first_sge ? last_sge - first_sge - 1 : MAX_SKB_FRAGS - first_sge + last_sge - 1; if (!shift) goto out; i = first_sge; <API key>(i); do { u32 move_from; if (i + shift >= MAX_MSG_FRAGS) move_from = i + shift - MAX_MSG_FRAGS; else move_from = i + shift; if (move_from == msg->sg.end) break; msg->sg.data[i] = msg->sg.data[move_from]; msg->sg.data[move_from].length = 0; msg->sg.data[move_from].page_link = 0; msg->sg.data[move_from].offset = 0; <API key>(i); } while (1); msg->sg.end = msg->sg.end - shift > msg->sg.end ? msg->sg.end - shift + MAX_MSG_FRAGS : msg->sg.end - shift; out: msg->data = sg_virt(&msg->sg.data[first_sge]) + start - offset; msg->data_end = msg->data + bytes; return 0; } static const struct bpf_func_proto <API key> = { .func = bpf_msg_pull_data, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, .arg4_type = ARG_ANYTHING, }; BPF_CALL_4(bpf_msg_push_data, struct sk_msg *, msg, u32, start, u32, len, u64, flags) { struct scatterlist sge, nsge, nnsge, rsge = {0}, *psge; u32 new, i = 0, l, space, copy = 0, offset = 0; u8 *raw, *to, *from; struct page *page; if (unlikely(flags)) return -EINVAL; /* First find the starting scatterlist element */ i = msg->sg.start; do { l = sk_msg_elem(msg, i)->length; if (start < offset + l) break; offset += l; <API key>(i); } while (i != msg->sg.end); if (start >= offset + l) return -EINVAL; space = MAX_MSG_FRAGS - sk_msg_elem_used(msg); /* If no space available will fallback to copy, we need at * least one scatterlist elem available to push data into * when start aligns to the beginning of an element or two * when it falls inside an element. We handle the start equals * offset case because its the common case for inserting a * header. */ if (!space || (space == 1 && start != offset)) copy = msg->sg.data[i].length; page = alloc_pages(__GFP_NOWARN | GFP_ATOMIC | __GFP_COMP, get_order(copy + len)); if (unlikely(!page)) return -ENOMEM; if (copy) { int front, back; raw = page_address(page); psge = sk_msg_elem(msg, i); front = start - offset; back = psge->length - front; from = sg_virt(psge); if (front) memcpy(raw, from, front); if (back) { from += front; to = raw + front + len; memcpy(to, from, back); } put_page(sg_page(psge)); } else if (start - offset) { psge = sk_msg_elem(msg, i); rsge = sk_msg_elem_cpy(msg, i); psge->length = start - offset; rsge.length -= psge->length; rsge.offset += start; <API key>(i); sg_unmark_end(psge); sk_msg_iter_next(msg, end); } /* Slot(s) to place newly allocated data */ new = i; /* Shift one or two slots as needed */ if (!copy) { sge = sk_msg_elem_cpy(msg, i); <API key>(i); sg_unmark_end(&sge); sk_msg_iter_next(msg, end); nsge = sk_msg_elem_cpy(msg, i); if (rsge.length) { <API key>(i); nnsge = sk_msg_elem_cpy(msg, i); } while (i != msg->sg.end) { msg->sg.data[i] = sge; sge = nsge; <API key>(i); if (rsge.length) { nsge = nnsge; nnsge = sk_msg_elem_cpy(msg, i); } else { nsge = sk_msg_elem_cpy(msg, i); } } } /* Place newly allocated data buffer */ sk_mem_charge(msg->sk, len); msg->sg.size += len; msg->sg.copy[new] = false; sg_set_page(&msg->sg.data[new], page, len + copy, 0); if (rsge.length) { get_page(sg_page(&rsge)); <API key>(new); msg->sg.data[new] = rsge; } <API key>(msg); return 0; } static const struct bpf_func_proto <API key> = { .func = bpf_msg_push_data, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, .arg4_type = ARG_ANYTHING, }; static void sk_msg_shift_left(struct sk_msg *msg, int i) { int prev; do { prev = i; <API key>(i); msg->sg.data[prev] = msg->sg.data[i]; } while (i != msg->sg.end); sk_msg_iter_prev(msg, end); } static void sk_msg_shift_right(struct sk_msg *msg, int i) { struct scatterlist tmp, sge; sk_msg_iter_next(msg, end); sge = sk_msg_elem_cpy(msg, i); <API key>(i); tmp = sk_msg_elem_cpy(msg, i); while (i != msg->sg.end) { msg->sg.data[i] = sge; <API key>(i); sge = tmp; tmp = sk_msg_elem_cpy(msg, i); } } BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start, u32, len, u64, flags) { u32 i = 0, l, space, offset = 0; u64 last = start + len; int pop; if (unlikely(flags)) return -EINVAL; /* First find the starting scatterlist element */ i = msg->sg.start; do { l = sk_msg_elem(msg, i)->length; if (start < offset + l) break; offset += l; <API key>(i); } while (i != msg->sg.end); /* Bounds checks: start and pop must be inside message */ if (start >= offset + l || last >= msg->sg.size) return -EINVAL; space = MAX_MSG_FRAGS - sk_msg_elem_used(msg); pop = len; if (start != offset) { struct scatterlist *nsge, *sge = sk_msg_elem(msg, i); int a = start; int b = sge->length - pop - a; <API key>(i); if (pop < sge->length - a) { if (space) { sge->length = a; sk_msg_shift_right(msg, i); nsge = sk_msg_elem(msg, i); get_page(sg_page(sge)); sg_set_page(nsge, sg_page(sge), b, sge->offset + pop + a); } else { struct page *page, *orig; u8 *to, *from; page = alloc_pages(__GFP_NOWARN | __GFP_COMP | GFP_ATOMIC, get_order(a + b)); if (unlikely(!page)) return -ENOMEM; sge->length = a; orig = sg_page(sge); from = sg_virt(sge); to = page_address(page); memcpy(to, from, a); memcpy(to + a, from + a + pop, b); sg_set_page(sge, page, a + b, 0); put_page(orig); } pop = 0; } else if (pop >= sge->length - a) { sge->length = a; pop -= (sge->length - a); } } while (pop) { struct scatterlist *sge = sk_msg_elem(msg, i); if (pop < sge->length) { sge->length -= pop; sge->offset += pop; pop = 0; } else { pop -= sge->length; sk_msg_shift_left(msg, i); } <API key>(i); } sk_mem_uncharge(msg->sk, len - pop); msg->sg.size -= (len - pop); <API key>(msg); return 0; } static const struct bpf_func_proto <API key> = { .func = bpf_msg_pop_data, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, .arg4_type = ARG_ANYTHING, }; BPF_CALL_1(<API key>, const struct sk_buff *, skb) { return task_get_classid(skb); } static const struct bpf_func_proto <API key> = { .func = <API key>, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, }; BPF_CALL_1(bpf_get_route_realm, const struct sk_buff *, skb) { return dst_tclassid(skb); } static const struct bpf_func_proto <API key> = { .func = bpf_get_route_realm, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, }; BPF_CALL_1(bpf_get_hash_recalc, struct sk_buff *, skb) { /* If skb_clear_hash() was called due to mangling, we can * trigger SW recalculation here. Later access to hash * can then use the inline skb->hash via context directly * instead of calling this helper again. */ return skb_get_hash(skb); } static const struct bpf_func_proto <API key> = { .func = bpf_get_hash_recalc, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, }; BPF_CALL_1(<API key>, struct sk_buff *, skb) { /* After all direct packet write, this can be used once for * triggering a lazy recalc on next skb_get_hash() invocation. */ skb_clear_hash(skb); return 0; } static const struct bpf_func_proto <API key> = { .func = <API key>, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, }; BPF_CALL_2(bpf_set_hash, struct sk_buff *, skb, u32, hash) { /* Set user specified hash as L4(+), so that it gets returned * on skb_get_hash() call unless BPF prog later on triggers a * skb_clear_hash(). */ __skb_set_sw_hash(skb, hash, true); return 0; } static const struct bpf_func_proto bpf_set_hash_proto = { .func = bpf_set_hash, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, }; BPF_CALL_3(bpf_skb_vlan_push, struct sk_buff *, skb, __be16, vlan_proto, u16, vlan_tci) { int ret; if (unlikely(vlan_proto != htons(ETH_P_8021Q) && vlan_proto != htons(ETH_P_8021AD))) vlan_proto = htons(ETH_P_8021Q); bpf_push_mac_rcsum(skb); ret = skb_vlan_push(skb, vlan_proto, vlan_tci); bpf_pull_mac_rcsum(skb); <API key>(skb); return ret; } static const struct bpf_func_proto <API key> = { .func = bpf_skb_vlan_push, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, }; BPF_CALL_1(bpf_skb_vlan_pop, struct sk_buff *, skb) { int ret; bpf_push_mac_rcsum(skb); ret = skb_vlan_pop(skb); bpf_pull_mac_rcsum(skb); <API key>(skb); return ret; } static const struct bpf_func_proto <API key> = { .func = bpf_skb_vlan_pop, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, }; static int <API key>(struct sk_buff *skb, u32 off, u32 len) { /* Caller already did skb_cow() with len as headroom, * so no need to do it here. */ skb_push(skb, len); memmove(skb->data, skb->data + len, off); memset(skb->data + off, 0, len); /* No skb_postpush_rcsum(skb, skb->data + off, len) * needed here as it does not change the skb->csum * result for checksum complete when summing over * zeroed blocks. */ return 0; } static int bpf_skb_generic_pop(struct sk_buff *skb, u32 off, u32 len) { /* skb_ensure_writable() is not needed here, as we're * already working on an uncloned skb. */ if (unlikely(!pskb_may_pull(skb, off + len))) return -ENOMEM; skb_postpull_rcsum(skb, skb->data + off, len); memmove(skb->data + len, skb->data, off); __skb_pull(skb, len); return 0; } static int <API key>(struct sk_buff *skb, u32 off, u32 len) { bool trans_same = skb->transport_header == skb->network_header; int ret; /* There's no need for __skb_push()/__skb_pull() pair to * get to the start of the mac header as we're guaranteed * to always start from here under eBPF. */ ret = <API key>(skb, off, len); if (likely(!ret)) { skb->mac_header -= len; skb->network_header -= len; if (trans_same) skb->transport_header = skb->network_header; } return ret; } static int bpf_skb_net_hdr_pop(struct sk_buff *skb, u32 off, u32 len) { bool trans_same = skb->transport_header == skb->network_header; int ret; /* Same here, __skb_push()/__skb_pull() pair not needed. */ ret = bpf_skb_generic_pop(skb, off, len); if (likely(!ret)) { skb->mac_header += len; skb->network_header += len; if (trans_same) skb->transport_header = skb->network_header; } return ret; } static int <API key>(struct sk_buff *skb) { const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr); u32 off = skb_mac_header_len(skb); int ret; if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) return -ENOTSUPP; ret = skb_cow(skb, len_diff); if (unlikely(ret < 0)) return ret; ret = <API key>(skb, off, len_diff); if (unlikely(ret < 0)) return ret; if (skb_is_gso(skb)) { struct skb_shared_info *shinfo = skb_shinfo(skb); /* SKB_GSO_TCPV4 needs to be changed into * SKB_GSO_TCPV6. */ if (shinfo->gso_type & SKB_GSO_TCPV4) { shinfo->gso_type &= ~SKB_GSO_TCPV4; shinfo->gso_type |= SKB_GSO_TCPV6; } /* Due to IPv6 header, MSS needs to be downgraded. */ <API key>(shinfo, len_diff); /* Header must be checked, and gso_segs recomputed. */ shinfo->gso_type |= SKB_GSO_DODGY; shinfo->gso_segs = 0; } skb->protocol = htons(ETH_P_IPV6); skb_clear_hash(skb); return 0; } static int <API key>(struct sk_buff *skb) { const u32 len_diff = sizeof(struct ipv6hdr) - sizeof(struct iphdr); u32 off = skb_mac_header_len(skb); int ret; if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) return -ENOTSUPP; ret = skb_unclone(skb, GFP_ATOMIC); if (unlikely(ret < 0)) return ret; ret = bpf_skb_net_hdr_pop(skb, off, len_diff); if (unlikely(ret < 0)) return ret; if (skb_is_gso(skb)) { struct skb_shared_info *shinfo = skb_shinfo(skb); /* SKB_GSO_TCPV6 needs to be changed into * SKB_GSO_TCPV4. */ if (shinfo->gso_type & SKB_GSO_TCPV6) { shinfo->gso_type &= ~SKB_GSO_TCPV6; shinfo->gso_type |= SKB_GSO_TCPV4; } /* Due to IPv4 header, MSS can be upgraded. */ <API key>(shinfo, len_diff); /* Header must be checked, and gso_segs recomputed. */ shinfo->gso_type |= SKB_GSO_DODGY; shinfo->gso_segs = 0; } skb->protocol = htons(ETH_P_IP); skb_clear_hash(skb); return 0; } static int bpf_skb_proto_xlat(struct sk_buff *skb, __be16 to_proto) { __be16 from_proto = skb->protocol; if (from_proto == htons(ETH_P_IP) && to_proto == htons(ETH_P_IPV6)) return <API key>(skb); if (from_proto == htons(ETH_P_IPV6) && to_proto == htons(ETH_P_IP)) return <API key>(skb); return -ENOTSUPP; } BPF_CALL_3(<API key>, struct sk_buff *, skb, __be16, proto, u64, flags) { int ret; if (unlikely(flags)) return -EINVAL; /* General idea is that this helper does the basic groundwork * needed for changing the protocol, and eBPF program fills the * rest through bpf_skb_store_bytes(), bpf_lX_csum_replace() * and other helpers, rather than passing a raw buffer here. * * The rationale is to keep this minimal and without a need to * deal with raw packet data. F.e. even if we would pass buffers * here, the program still needs to call the bpf_lX_csum_replace() * helpers anyway. Plus, this way we keep also separation of * concerns, since f.e. bpf_skb_store_bytes() should only take * care of stores. * * Currently, additional options and extension header space are * not supported, but flags register is reserved so we can adapt * that. For offloads, we mark packet as dodgy, so that headers * need to be verified first. */ ret = bpf_skb_proto_xlat(skb, proto); <API key>(skb); return ret; } static const struct bpf_func_proto <API key> = { .func = <API key>, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, }; BPF_CALL_2(bpf_skb_change_type, struct sk_buff *, skb, u32, pkt_type) { /* We only allow a restricted subset to be changed for now. */ if (unlikely(!skb_pkt_type_ok(skb->pkt_type) || !skb_pkt_type_ok(pkt_type))) return -EINVAL; skb->pkt_type = pkt_type; return 0; } static const struct bpf_func_proto <API key> = { .func = bpf_skb_change_type, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, }; static u32 <API key>(const struct sk_buff *skb) { switch (skb->protocol) { case htons(ETH_P_IP): return sizeof(struct iphdr); case htons(ETH_P_IPV6): return sizeof(struct ipv6hdr); default: return ~0U; } } #define <API key> (<API key> | \ <API key>) #define BPF_F_ADJ_ROOM_MASK (<API key> | \ <API key> | \ <API key> | \ <API key> | \ <API key>( \ <API key>)) static int bpf_skb_net_grow(struct sk_buff *skb, u32 off, u32 len_diff, u64 flags) { u8 inner_mac_len = flags >> <API key>; bool encap = flags & <API key>; u16 mac_len = 0, inner_net = 0, inner_trans = 0; unsigned int gso_type = SKB_GSO_DODGY; int ret; if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) { /* udp gso_size delineates datagrams, only allow if fixed */ if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) || !(flags & <API key>)) return -ENOTSUPP; } ret = skb_cow_head(skb, len_diff); if (unlikely(ret < 0)) return ret; if (encap) { if (skb->protocol != htons(ETH_P_IP) && skb->protocol != htons(ETH_P_IPV6)) return -ENOTSUPP; if (flags & <API key> && flags & <API key>) return -EINVAL; if (flags & <API key> && flags & <API key>) return -EINVAL; if (skb->encapsulation) return -EALREADY; mac_len = skb->network_header - skb->mac_header; inner_net = skb->network_header; if (inner_mac_len > len_diff) return -EINVAL; inner_trans = skb->transport_header; } ret = <API key>(skb, off, len_diff); if (unlikely(ret < 0)) return ret; if (encap) { skb->inner_mac_header = inner_net - inner_mac_len; skb-><API key> = inner_net; skb-><API key> = inner_trans; <API key>(skb, skb->protocol); skb->encapsulation = 1; <API key>(skb, mac_len); if (flags & <API key>) gso_type |= SKB_GSO_UDP_TUNNEL; else if (flags & <API key>) gso_type |= SKB_GSO_GRE; else if (flags & <API key>) gso_type |= SKB_GSO_IPXIP6; else if (flags & <API key>) gso_type |= SKB_GSO_IPXIP4; if (flags & <API key> || flags & <API key>) { int nh_len = flags & <API key> ? sizeof(struct ipv6hdr) : sizeof(struct iphdr); <API key>(skb, mac_len + nh_len); } /* Match skb->protocol to new outer l3 protocol */ if (skb->protocol == htons(ETH_P_IP) && flags & <API key>) skb->protocol = htons(ETH_P_IPV6); else if (skb->protocol == htons(ETH_P_IPV6) && flags & <API key>) skb->protocol = htons(ETH_P_IP); } if (skb_is_gso(skb)) { struct skb_shared_info *shinfo = skb_shinfo(skb); /* Due to header grow, MSS needs to be downgraded. */ if (!(flags & <API key>)) <API key>(shinfo, len_diff); /* Header must be checked, and gso_segs recomputed. */ shinfo->gso_type |= gso_type; shinfo->gso_segs = 0; } return 0; } static int bpf_skb_net_shrink(struct sk_buff *skb, u32 off, u32 len_diff, u64 flags) { int ret; if (flags & ~<API key>) return -EINVAL; if (skb_is_gso(skb) && !skb_is_gso_tcp(skb)) { /* udp gso_size delineates datagrams, only allow if fixed */ if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) || !(flags & <API key>)) return -ENOTSUPP; } ret = skb_unclone(skb, GFP_ATOMIC); if (unlikely(ret < 0)) return ret; ret = bpf_skb_net_hdr_pop(skb, off, len_diff); if (unlikely(ret < 0)) return ret; if (skb_is_gso(skb)) { struct skb_shared_info *shinfo = skb_shinfo(skb); /* Due to header shrink, MSS can be upgraded. */ if (!(flags & <API key>)) <API key>(shinfo, len_diff); /* Header must be checked, and gso_segs recomputed. */ shinfo->gso_type |= SKB_GSO_DODGY; shinfo->gso_segs = 0; } return 0; } static u32 __bpf_skb_max_len(const struct sk_buff *skb) { return skb->dev ? skb->dev->mtu + skb->dev->hard_header_len : SKB_MAX_ALLOC; } BPF_CALL_4(bpf_skb_adjust_room, struct sk_buff *, skb, s32, len_diff, u32, mode, u64, flags) { u32 len_cur, len_diff_abs = abs(len_diff); u32 len_min = <API key>(skb); u32 len_max = __bpf_skb_max_len(skb); __be16 proto = skb->protocol; bool shrink = len_diff < 0; u32 off; int ret; if (unlikely(flags & ~BPF_F_ADJ_ROOM_MASK)) return -EINVAL; if (unlikely(len_diff_abs > 0xfffU)) return -EFAULT; if (unlikely(proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))) return -ENOTSUPP; off = skb_mac_header_len(skb); switch (mode) { case BPF_ADJ_ROOM_NET: off += <API key>(skb); break; case BPF_ADJ_ROOM_MAC: break; default: return -ENOTSUPP; } len_cur = skb->len - skb_network_offset(skb); if ((shrink && (len_diff_abs >= len_cur || len_cur - len_diff_abs < len_min)) || (!shrink && (skb->len + len_diff_abs > len_max && !skb_is_gso(skb)))) return -ENOTSUPP; ret = shrink ? bpf_skb_net_shrink(skb, off, len_diff_abs, flags) : bpf_skb_net_grow(skb, off, len_diff_abs, flags); <API key>(skb); return ret; } static const struct bpf_func_proto <API key> = { .func = bpf_skb_adjust_room, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, .arg4_type = ARG_ANYTHING, }; static u32 __bpf_skb_min_len(const struct sk_buff *skb) { u32 min_len = skb_network_offset(skb); if (<API key>(skb)) min_len = <API key>(skb); if (skb->ip_summed == CHECKSUM_PARTIAL) min_len = <API key>(skb) + skb->csum_offset + sizeof(__sum16); return min_len; } static int bpf_skb_grow_rcsum(struct sk_buff *skb, unsigned int new_len) { unsigned int old_len = skb->len; int ret; ret = __skb_grow_rcsum(skb, new_len); if (!ret) memset(skb->data + old_len, 0, new_len - old_len); return ret; } static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len) { return __skb_trim_rcsum(skb, new_len); } static inline int <API key>(struct sk_buff *skb, u32 new_len, u64 flags) { u32 max_len = __bpf_skb_max_len(skb); u32 min_len = __bpf_skb_min_len(skb); int ret; if (unlikely(flags || new_len > max_len || new_len < min_len)) return -EINVAL; if (skb->encapsulation) return -ENOTSUPP; /* The basic idea of this helper is that it's performing the * needed work to either grow or trim an skb, and eBPF program * rewrites the rest via helpers like bpf_skb_store_bytes(), * bpf_lX_csum_replace() and others rather than passing a raw * buffer here. This one is a slow path helper and intended * for replies with control messages. * * Like in <API key>(), we want to keep this rather * minimal and without protocol specifics so that we are able * to separate concerns as in bpf_skb_store_bytes() should only * be the one responsible for writing buffers. * * It's really expected to be a slow path operation here for * control message replies, so we're implicitly linearizing, * uncloning and drop offloads from the skb by this. */ ret = <API key>(skb, skb->len); if (!ret) { if (new_len > skb->len) ret = bpf_skb_grow_rcsum(skb, new_len); else if (new_len < skb->len) ret = bpf_skb_trim_rcsum(skb, new_len); if (!ret && skb_is_gso(skb)) skb_gso_reset(skb); } return ret; } BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len, u64, flags) { int ret = <API key>(skb, new_len, flags); <API key>(skb); return ret; } static const struct bpf_func_proto <API key> = { .func = bpf_skb_change_tail, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, }; BPF_CALL_3(sk_skb_change_tail, struct sk_buff *, skb, u32, new_len, u64, flags) { int ret = <API key>(skb, new_len, flags); <API key>(skb); return ret; } static const struct bpf_func_proto <API key> = { .func = sk_skb_change_tail, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, }; static inline int <API key>(struct sk_buff *skb, u32 head_room, u64 flags) { u32 max_len = __bpf_skb_max_len(skb); u32 new_len = skb->len + head_room; int ret; if (unlikely(flags || (!skb_is_gso(skb) && new_len > max_len) || new_len < skb->len)) return -EINVAL; ret = skb_cow(skb, head_room); if (likely(!ret)) { /* Idea for this helper is that we currently only * allow to expand on mac header. This means that * skb->protocol network header, etc, stay as is. * Compared to bpf_skb_change_tail(), we're more * flexible due to not needing to linearize or * reset GSO. Intention for this helper is to be * used by an L3 skb that needs to push mac header * for redirection into L2 device. */ __skb_push(skb, head_room); memset(skb->data, 0, head_room); <API key>(skb); } return ret; } BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room, u64, flags) { int ret = <API key>(skb, head_room, flags); <API key>(skb); return ret; } static const struct bpf_func_proto <API key> = { .func = bpf_skb_change_head, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, }; BPF_CALL_3(sk_skb_change_head, struct sk_buff *, skb, u32, head_room, u64, flags) { int ret = <API key>(skb, head_room, flags); <API key>(skb); return ret; } static const struct bpf_func_proto <API key> = { .func = sk_skb_change_head, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, }; static unsigned long xdp_get_metalen(const struct xdp_buff *xdp) { return <API key>(xdp) ? 0 : xdp->data - xdp->data_meta; } BPF_CALL_2(bpf_xdp_adjust_head, struct xdp_buff *, xdp, int, offset) { void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame); unsigned long metalen = xdp_get_metalen(xdp); void *data_start = xdp_frame_end + metalen; void *data = xdp->data + offset; if (unlikely(data < data_start || data > xdp->data_end - ETH_HLEN)) return -EINVAL; if (metalen) memmove(xdp->data_meta + offset, xdp->data_meta, metalen); xdp->data_meta += offset; xdp->data = data; return 0; } static const struct bpf_func_proto <API key> = { .func = bpf_xdp_adjust_head, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, }; BPF_CALL_2(bpf_xdp_adjust_tail, struct xdp_buff *, xdp, int, offset) { void *data_end = xdp->data_end + offset; /* only shrinking is allowed for now. */ if (unlikely(offset >= 0)) return -EINVAL; if (unlikely(data_end < xdp->data + ETH_HLEN)) return -EINVAL; xdp->data_end = data_end; return 0; } static const struct bpf_func_proto <API key> = { .func = bpf_xdp_adjust_tail, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, }; BPF_CALL_2(bpf_xdp_adjust_meta, struct xdp_buff *, xdp, int, offset) { void *xdp_frame_end = xdp->data_hard_start + sizeof(struct xdp_frame); void *meta = xdp->data_meta + offset; unsigned long metalen = xdp->data - meta; if (<API key>(xdp)) return -ENOTSUPP; if (unlikely(meta < xdp_frame_end || meta > xdp->data)) return -EINVAL; if (unlikely((metalen & (sizeof(__u32) - 1)) || (metalen > 32))) return -EACCES; xdp->data_meta = meta; return 0; } static const struct bpf_func_proto <API key> = { .func = bpf_xdp_adjust_meta, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, }; static int __bpf_tx_xdp(struct net_device *dev, struct bpf_map *map, struct xdp_buff *xdp, u32 index) { struct xdp_frame *xdpf; int err, sent; if (!dev->netdev_ops->ndo_xdp_xmit) { return -EOPNOTSUPP; } err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data); if (unlikely(err)) return err; xdpf = <API key>(xdp); if (unlikely(!xdpf)) return -EOVERFLOW; sent = dev->netdev_ops->ndo_xdp_xmit(dev, 1, &xdpf, XDP_XMIT_FLUSH); if (sent <= 0) return sent; return 0; } static noinline int <API key>(struct net_device *dev, struct xdp_buff *xdp, struct bpf_prog *xdp_prog, struct bpf_redirect_info *ri) { struct net_device *fwd; u32 index = ri->ifindex; int err; fwd = <API key>(dev_net(dev), index); ri->ifindex = 0; if (unlikely(!fwd)) { err = -EINVAL; goto err; } err = __bpf_tx_xdp(fwd, NULL, xdp, 0); if (unlikely(err)) goto err; _trace_xdp_redirect(dev, xdp_prog, index); return 0; err: <API key>(dev, xdp_prog, index, err); return err; } static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd, struct bpf_map *map, struct xdp_buff *xdp, u32 index) { int err; switch (map->map_type) { case BPF_MAP_TYPE_DEVMAP: { struct bpf_dtab_netdev *dst = fwd; err = dev_map_enqueue(dst, xdp, dev_rx); if (unlikely(err)) return err; <API key>(map, index); break; } case BPF_MAP_TYPE_CPUMAP: { struct bpf_cpu_map_entry *rcpu = fwd; err = cpu_map_enqueue(rcpu, xdp, dev_rx); if (unlikely(err)) return err; <API key>(map, index); break; } case BPF_MAP_TYPE_XSKMAP: { struct xdp_sock *xs = fwd; err = __xsk_map_redirect(map, xdp, xs); return err; } default: break; } return 0; } void xdp_do_flush_map(void) { struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); struct bpf_map *map = ri->map_to_flush; ri->map_to_flush = NULL; if (map) { switch (map->map_type) { case BPF_MAP_TYPE_DEVMAP: __dev_map_flush(map); break; case BPF_MAP_TYPE_CPUMAP: __cpu_map_flush(map); break; case BPF_MAP_TYPE_XSKMAP: __xsk_map_flush(map); break; default: break; } } } EXPORT_SYMBOL_GPL(xdp_do_flush_map); static inline void *<API key>(struct bpf_map *map, u32 index) { switch (map->map_type) { case BPF_MAP_TYPE_DEVMAP: return <API key>(map, index); case BPF_MAP_TYPE_CPUMAP: return <API key>(map, index); case BPF_MAP_TYPE_XSKMAP: return <API key>(map, index); default: return NULL; } } void <API key>(struct bpf_map *map) { struct bpf_redirect_info *ri; int cpu; <API key>(cpu) { ri = per_cpu_ptr(&bpf_redirect_info, cpu); /* Avoid polluting remote cacheline due to writes if * not needed. Once we pass this test, we need the * cmpxchg() to make sure it hasn't been changed in * the meantime by remote CPU. */ if (unlikely(READ_ONCE(ri->map) == map)) cmpxchg(&ri->map, map, NULL); } } static int xdp_do_redirect_map(struct net_device *dev, struct xdp_buff *xdp, struct bpf_prog *xdp_prog, struct bpf_map *map, struct bpf_redirect_info *ri) { u32 index = ri->ifindex; void *fwd = NULL; int err; ri->ifindex = 0; WRITE_ONCE(ri->map, NULL); fwd = <API key>(map, index); if (unlikely(!fwd)) { err = -EINVAL; goto err; } if (ri->map_to_flush && unlikely(ri->map_to_flush != map)) xdp_do_flush_map(); err = __bpf_tx_xdp_map(dev, fwd, map, xdp, index); if (unlikely(err)) goto err; ri->map_to_flush = map; <API key>(dev, xdp_prog, fwd, map, index); return 0; err: <API key>(dev, xdp_prog, fwd, map, index, err); return err; } int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp, struct bpf_prog *xdp_prog) { struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); struct bpf_map *map = READ_ONCE(ri->map); if (likely(map)) return xdp_do_redirect_map(dev, xdp, xdp_prog, map, ri); return <API key>(dev, xdp, xdp_prog, ri); } EXPORT_SYMBOL_GPL(xdp_do_redirect); static int <API key>(struct net_device *dev, struct sk_buff *skb, struct xdp_buff *xdp, struct bpf_prog *xdp_prog, struct bpf_map *map) { struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); u32 index = ri->ifindex; void *fwd = NULL; int err = 0; ri->ifindex = 0; WRITE_ONCE(ri->map, NULL); fwd = <API key>(map, index); if (unlikely(!fwd)) { err = -EINVAL; goto err; } if (map->map_type == BPF_MAP_TYPE_DEVMAP) { struct bpf_dtab_netdev *dst = fwd; err = <API key>(dst, skb, xdp_prog); if (unlikely(err)) goto err; } else if (map->map_type == BPF_MAP_TYPE_XSKMAP) { struct xdp_sock *xs = fwd; err = xsk_generic_rcv(xs, xdp); if (err) goto err; consume_skb(skb); } else { /* TODO: Handle BPF_MAP_TYPE_CPUMAP */ err = -EBADRQC; goto err; } <API key>(dev, xdp_prog, fwd, map, index); return 0; err: <API key>(dev, xdp_prog, fwd, map, index, err); return err; } int <API key>(struct net_device *dev, struct sk_buff *skb, struct xdp_buff *xdp, struct bpf_prog *xdp_prog) { struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); struct bpf_map *map = READ_ONCE(ri->map); u32 index = ri->ifindex; struct net_device *fwd; int err = 0; if (map) return <API key>(dev, skb, xdp, xdp_prog, map); ri->ifindex = 0; fwd = <API key>(dev_net(dev), index); if (unlikely(!fwd)) { err = -EINVAL; goto err; } err = xdp_ok_fwd_dev(fwd, skb->len); if (unlikely(err)) goto err; skb->dev = fwd; _trace_xdp_redirect(dev, xdp_prog, index); generic_xdp_tx(skb, xdp_prog); return 0; err: <API key>(dev, xdp_prog, index, err); return err; } EXPORT_SYMBOL_GPL(<API key>); BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags) { struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); if (unlikely(flags)) return XDP_ABORTED; ri->ifindex = ifindex; ri->flags = flags; WRITE_ONCE(ri->map, NULL); return XDP_REDIRECT; } static const struct bpf_func_proto <API key> = { .func = bpf_xdp_redirect, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_ANYTHING, .arg2_type = ARG_ANYTHING, }; BPF_CALL_3(<API key>, struct bpf_map *, map, u32, ifindex, u64, flags) { struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); if (unlikely(flags)) return XDP_ABORTED; ri->ifindex = ifindex; ri->flags = flags; WRITE_ONCE(ri->map, map); return XDP_REDIRECT; } static const struct bpf_func_proto <API key> = { .func = <API key>, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_CONST_MAP_PTR, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, }; static unsigned long bpf_skb_copy(void *dst_buff, const void *skb, unsigned long off, unsigned long len) { void *ptr = skb_header_pointer(skb, off, len, dst_buff); if (unlikely(!ptr)) return len; if (ptr != dst_buff) memcpy(dst_buff, ptr, len); return 0; } BPF_CALL_5(<API key>, struct sk_buff *, skb, struct bpf_map *, map, u64, flags, void *, meta, u64, meta_size) { u64 skb_size = (flags & BPF_F_CTXLEN_MASK) >> 32; if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK))) return -EINVAL; if (unlikely(skb_size > skb->len)) return -EFAULT; return bpf_event_output(map, flags, meta, meta_size, skb, skb_size, bpf_skb_copy); } static const struct bpf_func_proto <API key> = { .func = <API key>, .gpl_only = true, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_CONST_MAP_PTR, .arg3_type = ARG_ANYTHING, .arg4_type = ARG_PTR_TO_MEM, .arg5_type = <API key>, }; static unsigned short bpf_tunnel_key_af(u64 flags) { return flags & BPF_F_TUNINFO_IPV6 ? AF_INET6 : AF_INET; } BPF_CALL_4(<API key>, struct sk_buff *, skb, struct bpf_tunnel_key *, to, u32, size, u64, flags) { const struct ip_tunnel_info *info = skb_tunnel_info(skb); u8 compat[sizeof(struct bpf_tunnel_key)]; void *to_orig = to; int err; if (unlikely(!info || (flags & ~(BPF_F_TUNINFO_IPV6)))) { err = -EINVAL; goto err_clear; } if (ip_tunnel_info_af(info) != bpf_tunnel_key_af(flags)) { err = -EPROTO; goto err_clear; } if (unlikely(size != sizeof(struct bpf_tunnel_key))) { err = -EINVAL; switch (size) { case offsetof(struct bpf_tunnel_key, tunnel_label): case offsetof(struct bpf_tunnel_key, tunnel_ext): goto set_compat; case offsetof(struct bpf_tunnel_key, remote_ipv6[1]): /* Fixup deprecated structure layouts here, so we have * a common path later on. */ if (ip_tunnel_info_af(info) != AF_INET) goto err_clear; set_compat: to = (struct bpf_tunnel_key *)compat; break; default: goto err_clear; } } to->tunnel_id = be64_to_cpu(info->key.tun_id); to->tunnel_tos = info->key.tos; to->tunnel_ttl = info->key.ttl; to->tunnel_ext = 0; if (flags & BPF_F_TUNINFO_IPV6) { memcpy(to->remote_ipv6, &info->key.u.ipv6.src, sizeof(to->remote_ipv6)); to->tunnel_label = be32_to_cpu(info->key.label); } else { to->remote_ipv4 = be32_to_cpu(info->key.u.ipv4.src); memset(&to->remote_ipv6[1], 0, sizeof(__u32) * 3); to->tunnel_label = 0; } if (unlikely(size != sizeof(struct bpf_tunnel_key))) memcpy(to_orig, to, size); return 0; err_clear: memset(to_orig, 0, size); return err; } static const struct bpf_func_proto <API key> = { .func = <API key>, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = <API key>, .arg3_type = ARG_CONST_SIZE, .arg4_type = ARG_ANYTHING, }; BPF_CALL_3(<API key>, struct sk_buff *, skb, u8 *, to, u32, size) { const struct ip_tunnel_info *info = skb_tunnel_info(skb); int err; if (unlikely(!info || !(info->key.tun_flags & <API key>))) { err = -ENOENT; goto err_clear; } if (unlikely(size < info->options_len)) { err = -ENOMEM; goto err_clear; } <API key>(to, info); if (size > info->options_len) memset(to + info->options_len, 0, size - info->options_len); return info->options_len; err_clear: memset(to, 0, size); return err; } static const struct bpf_func_proto <API key> = { .func = <API key>, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = <API key>, .arg3_type = ARG_CONST_SIZE, }; static struct metadata_dst __percpu *md_dst; BPF_CALL_4(<API key>, struct sk_buff *, skb, const struct bpf_tunnel_key *, from, u32, size, u64, flags) { struct metadata_dst *md = this_cpu_ptr(md_dst); u8 compat[sizeof(struct bpf_tunnel_key)]; struct ip_tunnel_info *info; if (unlikely(flags & ~(BPF_F_TUNINFO_IPV6 | BPF_F_ZERO_CSUM_TX | BPF_F_DONT_FRAGMENT | BPF_F_SEQ_NUMBER))) return -EINVAL; if (unlikely(size != sizeof(struct bpf_tunnel_key))) { switch (size) { case offsetof(struct bpf_tunnel_key, tunnel_label): case offsetof(struct bpf_tunnel_key, tunnel_ext): case offsetof(struct bpf_tunnel_key, remote_ipv6[1]): /* Fixup deprecated structure layouts here, so we have * a common path later on. */ memcpy(compat, from, size); memset(compat + size, 0, sizeof(compat) - size); from = (const struct bpf_tunnel_key *) compat; break; default: return -EINVAL; } } if (unlikely((!(flags & BPF_F_TUNINFO_IPV6) && from->tunnel_label) || from->tunnel_ext)) return -EINVAL; skb_dst_drop(skb); dst_hold((struct dst_entry *) md); skb_dst_set(skb, (struct dst_entry *) md); info = &md->u.tun_info; memset(info, 0, sizeof(*info)); info->mode = IP_TUNNEL_INFO_TX; info->key.tun_flags = TUNNEL_KEY | TUNNEL_CSUM | TUNNEL_NOCACHE; if (flags & BPF_F_DONT_FRAGMENT) info->key.tun_flags |= <API key>; if (flags & BPF_F_ZERO_CSUM_TX) info->key.tun_flags &= ~TUNNEL_CSUM; if (flags & BPF_F_SEQ_NUMBER) info->key.tun_flags |= TUNNEL_SEQ; info->key.tun_id = cpu_to_be64(from->tunnel_id); info->key.tos = from->tunnel_tos; info->key.ttl = from->tunnel_ttl; if (flags & BPF_F_TUNINFO_IPV6) { info->mode |= IP_TUNNEL_INFO_IPV6; memcpy(&info->key.u.ipv6.dst, from->remote_ipv6, sizeof(from->remote_ipv6)); info->key.label = cpu_to_be32(from->tunnel_label) & IPV6_FLOWLABEL_MASK; } else { info->key.u.ipv4.dst = cpu_to_be32(from->remote_ipv4); } return 0; } static const struct bpf_func_proto <API key> = { .func = <API key>, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM, .arg3_type = ARG_CONST_SIZE, .arg4_type = ARG_ANYTHING, }; BPF_CALL_3(<API key>, struct sk_buff *, skb, const u8 *, from, u32, size) { struct ip_tunnel_info *info = skb_tunnel_info(skb); const struct metadata_dst *md = this_cpu_ptr(md_dst); if (unlikely(info != &md->u.tun_info || (size & (sizeof(u32) - 1)))) return -EINVAL; if (unlikely(size > IP_TUNNEL_OPTS_MAX)) return -ENOMEM; <API key>(info, from, size, <API key>); return 0; } static const struct bpf_func_proto <API key> = { .func = <API key>, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM, .arg3_type = ARG_CONST_SIZE, }; static const struct bpf_func_proto * <API key>(enum bpf_func_id which) { if (!md_dst) { struct metadata_dst __percpu *tmp; tmp = <API key>(IP_TUNNEL_OPTS_MAX, METADATA_IP_TUNNEL, GFP_KERNEL); if (!tmp) return NULL; if (cmpxchg(&md_dst, NULL, tmp)) <API key>(tmp); } switch (which) { case <API key>: return &<API key>; case <API key>: return &<API key>; default: return NULL; } } BPF_CALL_3(<API key>, struct sk_buff *, skb, struct bpf_map *, map, u32, idx) { struct bpf_array *array = container_of(map, struct bpf_array, map); struct cgroup *cgrp; struct sock *sk; sk = skb_to_full_sk(skb); if (!sk || !sk_fullsock(sk)) return -ENOENT; if (unlikely(idx >= array->map.max_entries)) return -E2BIG; cgrp = READ_ONCE(array->ptrs[idx]); if (unlikely(!cgrp)) return -EAGAIN; return <API key>(sk, cgrp); } static const struct bpf_func_proto <API key> = { .func = <API key>, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_CONST_MAP_PTR, .arg3_type = ARG_ANYTHING, }; #ifdef <API key> BPF_CALL_1(bpf_skb_cgroup_id, const struct sk_buff *, skb) { struct sock *sk = skb_to_full_sk(skb); struct cgroup *cgrp; if (!sk || !sk_fullsock(sk)) return 0; cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); return cgrp->kn->id.id; } static const struct bpf_func_proto <API key> = { .func = bpf_skb_cgroup_id, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, }; BPF_CALL_2(<API key>, const struct sk_buff *, skb, int, ancestor_level) { struct sock *sk = skb_to_full_sk(skb); struct cgroup *ancestor; struct cgroup *cgrp; if (!sk || !sk_fullsock(sk)) return 0; cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); ancestor = cgroup_ancestor(cgrp, ancestor_level); if (!ancestor) return 0; return ancestor->kn->id.id; } static const struct bpf_func_proto <API key> = { .func = <API key>, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, }; #endif static unsigned long bpf_xdp_copy(void *dst_buff, const void *src_buff, unsigned long off, unsigned long len) { memcpy(dst_buff, src_buff + off, len); return 0; } BPF_CALL_5(<API key>, struct xdp_buff *, xdp, struct bpf_map *, map, u64, flags, void *, meta, u64, meta_size) { u64 xdp_size = (flags & BPF_F_CTXLEN_MASK) >> 32; if (unlikely(flags & ~(BPF_F_CTXLEN_MASK | BPF_F_INDEX_MASK))) return -EINVAL; if (unlikely(xdp_size > (unsigned long)(xdp->data_end - xdp->data))) return -EFAULT; return bpf_event_output(map, flags, meta, meta_size, xdp->data, xdp_size, bpf_xdp_copy); } static const struct bpf_func_proto <API key> = { .func = <API key>, .gpl_only = true, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_CONST_MAP_PTR, .arg3_type = ARG_ANYTHING, .arg4_type = ARG_PTR_TO_MEM, .arg5_type = <API key>, }; BPF_CALL_1(<API key>, struct sk_buff *, skb) { return skb->sk ? sock_gen_cookie(skb->sk) : 0; } static const struct bpf_func_proto <API key> = { .func = <API key>, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, }; BPF_CALL_1(<API key>, struct bpf_sock_addr_kern *, ctx) { return sock_gen_cookie(ctx->sk); } static const struct bpf_func_proto <API key> = { .func = <API key>, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, }; BPF_CALL_1(<API key>, struct bpf_sock_ops_kern *, ctx) { return sock_gen_cookie(ctx->sk); } static const struct bpf_func_proto <API key> = { .func = <API key>, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, }; BPF_CALL_1(bpf_get_socket_uid, struct sk_buff *, skb) { struct sock *sk = sk_to_full_sk(skb->sk); kuid_t kuid; if (!sk || !sk_fullsock(sk)) return overflowuid; kuid = sock_net_uid(sock_net(sk), sk); return from_kuid_munged(sock_net(sk)->user_ns, kuid); } static const struct bpf_func_proto <API key> = { .func = bpf_get_socket_uid, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, }; BPF_CALL_5(<API key>, struct bpf_sock_ops_kern *, bpf_sock, struct bpf_map *, map, u64, flags, void *, data, u64, size) { if (unlikely(flags & ~(BPF_F_INDEX_MASK))) return -EINVAL; return bpf_event_output(map, flags, data, size, NULL, 0, NULL); } static const struct bpf_func_proto <API key> = { .func = <API key>, .gpl_only = true, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_CONST_MAP_PTR, .arg3_type = ARG_ANYTHING, .arg4_type = ARG_PTR_TO_MEM, .arg5_type = <API key>, }; BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock, int, level, int, optname, char *, optval, int, optlen) { struct sock *sk = bpf_sock->sk; int ret = 0; int val; if (!sk_fullsock(sk)) return -EINVAL; if (level == SOL_SOCKET) { if (optlen != sizeof(int)) return -EINVAL; val = *((int *)optval); /* Only some socketops are supported */ switch (optname) { case SO_RCVBUF: val = min_t(u32, val, sysctl_rmem_max); sk->sk_userlocks |= SOCK_RCVBUF_LOCK; sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF); break; case SO_SNDBUF: val = min_t(u32, val, sysctl_wmem_max); sk->sk_userlocks |= SOCK_SNDBUF_LOCK; sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF); break; case SO_MAX_PACING_RATE: /* 32bit version */ if (val != ~0U) cmpxchg(&sk->sk_pacing_status, SK_PACING_NONE, SK_PACING_NEEDED); sk->sk_max_pacing_rate = (val == ~0U) ? ~0UL : val; sk->sk_pacing_rate = min(sk->sk_pacing_rate, sk->sk_max_pacing_rate); break; case SO_PRIORITY: sk->sk_priority = val; break; case SO_RCVLOWAT: if (val < 0) val = INT_MAX; sk->sk_rcvlowat = val ? : 1; break; case SO_MARK: if (sk->sk_mark != val) { sk->sk_mark = val; sk_dst_reset(sk); } break; default: ret = -EINVAL; } #ifdef CONFIG_INET } else if (level == SOL_IP) { if (optlen != sizeof(int) || sk->sk_family != AF_INET) return -EINVAL; val = *((int *)optval); /* Only some options are supported */ switch (optname) { case IP_TOS: if (val < -1 || val > 0xff) { ret = -EINVAL; } else { struct inet_sock *inet = inet_sk(sk); if (val == -1) val = 0; inet->tos = val; } break; default: ret = -EINVAL; } #if IS_ENABLED(CONFIG_IPV6) } else if (level == SOL_IPV6) { if (optlen != sizeof(int) || sk->sk_family != AF_INET6) return -EINVAL; val = *((int *)optval); /* Only some options are supported */ switch (optname) { case IPV6_TCLASS: if (val < -1 || val > 0xff) { ret = -EINVAL; } else { struct ipv6_pinfo *np = inet6_sk(sk); if (val == -1) val = 0; np->tclass = val; } break; default: ret = -EINVAL; } #endif } else if (level == SOL_TCP && sk->sk_prot->setsockopt == tcp_setsockopt) { if (optname == TCP_CONGESTION) { char name[TCP_CA_NAME_MAX]; bool reinit = bpf_sock->op > <API key>; strncpy(name, optval, min_t(long, optlen, TCP_CA_NAME_MAX-1)); name[TCP_CA_NAME_MAX-1] = 0; ret = <API key>(sk, name, false, reinit); } else { struct tcp_sock *tp = tcp_sk(sk); if (optlen != sizeof(int)) return -EINVAL; val = *((int *)optval); /* Only some options are supported */ switch (optname) { case TCP_BPF_IW: if (val <= 0 || tp->data_segs_out > tp->syn_data) ret = -EINVAL; else tp->snd_cwnd = val; break; case <API key>: if (val <= 0) { ret = -EINVAL; } else { tp->snd_cwnd_clamp = val; tp->snd_ssthresh = val; } break; case TCP_SAVE_SYN: if (val < 0 || val > 1) ret = -EINVAL; else tp->save_syn = val; break; default: ret = -EINVAL; } } #endif } else { ret = -EINVAL; } return ret; } static const struct bpf_func_proto <API key> = { .func = bpf_setsockopt, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, .arg4_type = ARG_PTR_TO_MEM, .arg5_type = ARG_CONST_SIZE, }; BPF_CALL_5(bpf_getsockopt, struct bpf_sock_ops_kern *, bpf_sock, int, level, int, optname, char *, optval, int, optlen) { struct sock *sk = bpf_sock->sk; if (!sk_fullsock(sk)) goto err_clear; #ifdef CONFIG_INET if (level == SOL_TCP && sk->sk_prot->getsockopt == tcp_getsockopt) { struct <API key> *icsk; struct tcp_sock *tp; switch (optname) { case TCP_CONGESTION: icsk = inet_csk(sk); if (!icsk->icsk_ca_ops || optlen <= 1) goto err_clear; strncpy(optval, icsk->icsk_ca_ops->name, optlen); optval[optlen - 1] = 0; break; case TCP_SAVED_SYN: tp = tcp_sk(sk); if (optlen <= 0 || !tp->saved_syn || optlen > tp->saved_syn[0]) goto err_clear; memcpy(optval, tp->saved_syn + 1, optlen); break; default: goto err_clear; } } else if (level == SOL_IP) { struct inet_sock *inet = inet_sk(sk); if (optlen != sizeof(int) || sk->sk_family != AF_INET) goto err_clear; /* Only some options are supported */ switch (optname) { case IP_TOS: *((int *)optval) = (int)inet->tos; break; default: goto err_clear; } #if IS_ENABLED(CONFIG_IPV6) } else if (level == SOL_IPV6) { struct ipv6_pinfo *np = inet6_sk(sk); if (optlen != sizeof(int) || sk->sk_family != AF_INET6) goto err_clear; /* Only some options are supported */ switch (optname) { case IPV6_TCLASS: *((int *)optval) = (int)np->tclass; break; default: goto err_clear; } #endif } else { goto err_clear; } return 0; #endif err_clear: memset(optval, 0, optlen); return -EINVAL; } static const struct bpf_func_proto <API key> = { .func = bpf_getsockopt, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, .arg4_type = <API key>, .arg5_type = ARG_CONST_SIZE, }; BPF_CALL_2(<API key>, struct bpf_sock_ops_kern *, bpf_sock, int, argval) { struct sock *sk = bpf_sock->sk; int val = argval & <API key>; if (!IS_ENABLED(CONFIG_INET) || !sk_fullsock(sk)) return -EINVAL; tcp_sk(sk)-><API key> = val; return argval & (~<API key>); } static const struct bpf_func_proto <API key> = { .func = <API key>, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, }; const struct ipv6_bpf_stub *ipv6_bpf_stub __read_mostly; EXPORT_SYMBOL_GPL(ipv6_bpf_stub); BPF_CALL_3(bpf_bind, struct bpf_sock_addr_kern *, ctx, struct sockaddr *, addr, int, addr_len) { #ifdef CONFIG_INET struct sock *sk = ctx->sk; int err; /* Binding to port can be expensive so it's prohibited in the helper. * Only binding to IP is supported. */ err = -EINVAL; if (addr_len < offsetofend(struct sockaddr, sa_family)) return err; if (addr->sa_family == AF_INET) { if (addr_len < sizeof(struct sockaddr_in)) return err; if (((struct sockaddr_in *)addr)->sin_port != htons(0)) return err; return __inet_bind(sk, addr, addr_len, true, false); #if IS_ENABLED(CONFIG_IPV6) } else if (addr->sa_family == AF_INET6) { if (addr_len < SIN6_LEN_RFC2133) return err; if (((struct sockaddr_in6 *)addr)->sin6_port != htons(0)) return err; /* ipv6_bpf_stub cannot be NULL, since it's called from * <API key> hook and ipv6 is already loaded */ return ipv6_bpf_stub->inet6_bind(sk, addr, addr_len, true, false); #endif /* CONFIG_IPV6 */ } #endif /* CONFIG_INET */ return -EAFNOSUPPORT; } static const struct bpf_func_proto bpf_bind_proto = { .func = bpf_bind, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM, .arg3_type = ARG_CONST_SIZE, }; #ifdef CONFIG_XFRM BPF_CALL_5(<API key>, struct sk_buff *, skb, u32, index, struct bpf_xfrm_state *, to, u32, size, u64, flags) { const struct sec_path *sp = skb_sec_path(skb); const struct xfrm_state *x; if (!sp || unlikely(index >= sp->len || flags)) goto err_clear; x = sp->xvec[index]; if (unlikely(size != sizeof(struct bpf_xfrm_state))) goto err_clear; to->reqid = x->props.reqid; to->spi = x->id.spi; to->family = x->props.family; to->ext = 0; if (to->family == AF_INET6) { memcpy(to->remote_ipv6, x->props.saddr.a6, sizeof(to->remote_ipv6)); } else { to->remote_ipv4 = x->props.saddr.a4; memset(&to->remote_ipv6[1], 0, sizeof(__u32) * 3); } return 0; err_clear: memset(to, 0, size); return -EINVAL; } static const struct bpf_func_proto <API key> = { .func = <API key>, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = <API key>, .arg4_type = ARG_CONST_SIZE, .arg5_type = ARG_ANYTHING, }; #endif #if IS_ENABLED(CONFIG_INET) || IS_ENABLED(CONFIG_IPV6) static int <API key>(struct bpf_fib_lookup *params, const struct neighbour *neigh, const struct net_device *dev) { memcpy(params->dmac, neigh->ha, ETH_ALEN); memcpy(params->smac, dev->dev_addr, ETH_ALEN); params->h_vlan_TCI = 0; params->h_vlan_proto = 0; params->ifindex = dev->ifindex; return 0; } #endif #if IS_ENABLED(CONFIG_INET) static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params, u32 flags, bool check_mtu) { struct fib_nh_common *nhc; struct in_device *in_dev; struct neighbour *neigh; struct net_device *dev; struct fib_result res; struct flowi4 fl4; int err; u32 mtu; dev = <API key>(net, params->ifindex); if (unlikely(!dev)) return -ENODEV; /* verify forwarding is enabled on this interface */ in_dev = __in_dev_get_rcu(dev); if (unlikely(!in_dev || !IN_DEV_FORWARD(in_dev))) return <API key>; if (flags & <API key>) { fl4.flowi4_iif = 1; fl4.flowi4_oif = params->ifindex; } else { fl4.flowi4_iif = params->ifindex; fl4.flowi4_oif = 0; } fl4.flowi4_tos = params->tos & IPTOS_RT_MASK; fl4.flowi4_scope = RT_SCOPE_UNIVERSE; fl4.flowi4_flags = 0; fl4.flowi4_proto = params->l4_protocol; fl4.daddr = params->ipv4_dst; fl4.saddr = params->ipv4_src; fl4.fl4_sport = params->sport; fl4.fl4_dport = params->dport; if (flags & <API key>) { u32 tbid = <API key>(dev) ? : RT_TABLE_MAIN; struct fib_table *tb; tb = fib_get_table(net, tbid); if (unlikely(!tb)) return <API key>; err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF); } else { fl4.flowi4_mark = 0; fl4.flowi4_secid = 0; fl4.flowi4_tun_key.tun_id = 0; fl4.flowi4_uid = sock_net_uid(net, NULL); err = fib_lookup(net, &fl4, &res, FIB_LOOKUP_NOREF); } if (err) { /* map fib lookup errors to RTN_ type */ if (err == -EINVAL) return <API key>; if (err == -EHOSTUNREACH) return <API key>; if (err == -EACCES) return <API key>; return <API key>; } if (res.type != RTN_UNICAST) return <API key>; if (res.fi->fib_nhs > 1) fib_select_path(net, &res, &fl4, NULL); if (check_mtu) { mtu = <API key>(&res, params->ipv4_dst); if (params->tot_len > mtu) return <API key>; } nhc = res.nhc; /* do not handle lwt encaps right now */ if (nhc->nhc_lwtstate) return <API key>; dev = nhc->nhc_dev; params->rt_metric = res.fi->fib_priority; /* xdp and cls_bpf programs are run in RCU-bh so * rcu_read_lock_bh is not needed here */ if (likely(nhc->nhc_gw_family != AF_INET6)) { if (nhc->nhc_gw_family) params->ipv4_dst = nhc->nhc_gw.ipv4; neigh = <API key>(dev, (__force u32)params->ipv4_dst); } else { struct in6_addr *dst = (struct in6_addr *)params->ipv6_dst; params->family = AF_INET6; *dst = nhc->nhc_gw.ipv6; neigh = <API key>(dev, dst); } if (!neigh) return <API key>; return <API key>(params, neigh, dev); } #endif #if IS_ENABLED(CONFIG_IPV6) static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params, u32 flags, bool check_mtu) { struct in6_addr *src = (struct in6_addr *) params->ipv6_src; struct in6_addr *dst = (struct in6_addr *) params->ipv6_dst; struct fib6_result res = {}; struct neighbour *neigh; struct net_device *dev; struct inet6_dev *idev; struct flowi6 fl6; int strict = 0; int oif, err; u32 mtu; /* link local addresses are never forwarded */ if (rt6_need_strict(dst) || rt6_need_strict(src)) return <API key>; dev = <API key>(net, params->ifindex); if (unlikely(!dev)) return -ENODEV; idev = <API key>(dev); if (unlikely(!idev || !net->ipv6.devconf_all->forwarding)) return <API key>; if (flags & <API key>) { fl6.flowi6_iif = 1; oif = fl6.flowi6_oif = params->ifindex; } else { oif = fl6.flowi6_iif = params->ifindex; fl6.flowi6_oif = 0; strict = <API key>; } fl6.flowlabel = params->flowinfo; fl6.flowi6_scope = 0; fl6.flowi6_flags = 0; fl6.mp_hash = 0; fl6.flowi6_proto = params->l4_protocol; fl6.daddr = *dst; fl6.saddr = *src; fl6.fl6_sport = params->sport; fl6.fl6_dport = params->dport; if (flags & <API key>) { u32 tbid = <API key>(dev) ? : RT_TABLE_MAIN; struct fib6_table *tb; tb = ipv6_stub->fib6_get_table(net, tbid); if (unlikely(!tb)) return <API key>; err = ipv6_stub->fib6_table_lookup(net, tb, oif, &fl6, &res, strict); } else { fl6.flowi6_mark = 0; fl6.flowi6_secid = 0; fl6.flowi6_tun_key.tun_id = 0; fl6.flowi6_uid = sock_net_uid(net, NULL); err = ipv6_stub->fib6_lookup(net, oif, &fl6, &res, strict); } if (unlikely(err || IS_ERR_OR_NULL(res.f6i) || res.f6i == net->ipv6.fib6_null_entry)) return <API key>; switch (res.fib6_type) { /* only unicast is forwarded */ case RTN_UNICAST: break; case RTN_BLACKHOLE: return <API key>; case RTN_UNREACHABLE: return <API key>; case RTN_PROHIBIT: return <API key>; default: return <API key>; } ipv6_stub->fib6_select_path(net, &res, &fl6, fl6.flowi6_oif, fl6.flowi6_oif != 0, NULL, strict); if (check_mtu) { mtu = ipv6_stub->ip6_mtu_from_fib6(&res, dst, src); if (params->tot_len > mtu) return <API key>; } if (res.nh->fib_nh_lws) return <API key>; if (res.nh->fib_nh_gw_family) *dst = res.nh->fib_nh_gw6; dev = res.nh->fib_nh_dev; params->rt_metric = res.f6i->fib6_metric; /* xdp and cls_bpf programs are run in RCU-bh so rcu_read_lock_bh is * not needed here. */ neigh = <API key>(dev, dst); if (!neigh) return <API key>; return <API key>(params, neigh, dev); } #endif BPF_CALL_4(bpf_xdp_fib_lookup, struct xdp_buff *, ctx, struct bpf_fib_lookup *, params, int, plen, u32, flags) { if (plen < sizeof(*params)) return -EINVAL; if (flags & ~(<API key> | <API key>)) return -EINVAL; switch (params->family) { #if IS_ENABLED(CONFIG_INET) case AF_INET: return bpf_ipv4_fib_lookup(dev_net(ctx->rxq->dev), params, flags, true); #endif #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: return bpf_ipv6_fib_lookup(dev_net(ctx->rxq->dev), params, flags, true); #endif } return -EAFNOSUPPORT; } static const struct bpf_func_proto <API key> = { .func = bpf_xdp_fib_lookup, .gpl_only = true, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM, .arg3_type = ARG_CONST_SIZE, .arg4_type = ARG_ANYTHING, }; BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb, struct bpf_fib_lookup *, params, int, plen, u32, flags) { struct net *net = dev_net(skb->dev); int rc = -EAFNOSUPPORT; if (plen < sizeof(*params)) return -EINVAL; if (flags & ~(<API key> | <API key>)) return -EINVAL; switch (params->family) { #if IS_ENABLED(CONFIG_INET) case AF_INET: rc = bpf_ipv4_fib_lookup(net, params, flags, false); break; #endif #if IS_ENABLED(CONFIG_IPV6) case AF_INET6: rc = bpf_ipv6_fib_lookup(net, params, flags, false); break; #endif } if (!rc) { struct net_device *dev; dev = <API key>(net, params->ifindex); if (!is_skb_forwardable(dev, skb)) rc = <API key>; } return rc; } static const struct bpf_func_proto <API key> = { .func = bpf_skb_fib_lookup, .gpl_only = true, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM, .arg3_type = ARG_CONST_SIZE, .arg4_type = ARG_ANYTHING, }; #if IS_ENABLED(<API key>) static int bpf_push_seg6_encap(struct sk_buff *skb, u32 type, void *hdr, u32 len) { int err; struct ipv6_sr_hdr *srh = (struct ipv6_sr_hdr *)hdr; if (!seg6_validate_srh(srh, len)) return -EINVAL; switch (type) { case <API key>: if (skb->protocol != htons(ETH_P_IPV6)) return -EBADMSG; err = seg6_do_srh_inline(skb, srh); break; case BPF_LWT_ENCAP_SEG6: <API key>(skb); skb->encapsulation = 1; err = seg6_do_srh_encap(skb, srh, IPPROTO_IPV6); break; default: return -EINVAL; } <API key>(skb); if (err) return err; ipv6_hdr(skb)->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); <API key>(skb, sizeof(struct ipv6hdr)); return seg6_lookup_nexthop(skb, NULL, 0); } #endif /* <API key> */ #if IS_ENABLED(CONFIG_LWTUNNEL_BPF) static int bpf_push_ip_encap(struct sk_buff *skb, void *hdr, u32 len, bool ingress) { return <API key>(skb, hdr, len, ingress); } #endif BPF_CALL_4(<API key>, struct sk_buff *, skb, u32, type, void *, hdr, u32, len) { switch (type) { #if IS_ENABLED(<API key>) case BPF_LWT_ENCAP_SEG6: case <API key>: return bpf_push_seg6_encap(skb, type, hdr, len); #endif #if IS_ENABLED(CONFIG_LWTUNNEL_BPF) case BPF_LWT_ENCAP_IP: return bpf_push_ip_encap(skb, hdr, len, true /* ingress */); #endif default: return -EINVAL; } } BPF_CALL_4(<API key>, struct sk_buff *, skb, u32, type, void *, hdr, u32, len) { switch (type) { #if IS_ENABLED(CONFIG_LWTUNNEL_BPF) case BPF_LWT_ENCAP_IP: return bpf_push_ip_encap(skb, hdr, len, false /* egress */); #endif default: return -EINVAL; } } static const struct bpf_func_proto <API key> = { .func = <API key>, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_PTR_TO_MEM, .arg4_type = ARG_CONST_SIZE }; static const struct bpf_func_proto <API key> = { .func = <API key>, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_PTR_TO_MEM, .arg4_type = ARG_CONST_SIZE }; #if IS_ENABLED(<API key>) BPF_CALL_4(<API key>, struct sk_buff *, skb, u32, offset, const void *, from, u32, len) { struct seg6_bpf_srh_state *srh_state = this_cpu_ptr(&seg6_bpf_srh_states); struct ipv6_sr_hdr *srh = srh_state->srh; void *srh_tlvs, *srh_end, *ptr; int srhoff = 0; if (srh == NULL) return -EINVAL; srh_tlvs = (void *)((char *)srh + ((srh->first_segment + 1) << 4)); srh_end = (void *)((char *)srh + sizeof(*srh) + srh_state->hdrlen); ptr = skb->data + offset; if (ptr >= srh_tlvs && ptr + len <= srh_end) srh_state->valid = false; else if (ptr < (void *)&srh->flags || ptr + len > (void *)&srh->segments) return -EFAULT; if (unlikely(<API key>(skb, offset + len))) return -EFAULT; if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0) return -EINVAL; srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff); memcpy(skb->data + offset, from, len); return 0; } static const struct bpf_func_proto <API key> = { .func = <API key>, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_PTR_TO_MEM, .arg4_type = ARG_CONST_SIZE }; static void <API key>(struct sk_buff *skb) { struct seg6_bpf_srh_state *srh_state = this_cpu_ptr(&seg6_bpf_srh_states); int srhoff = 0; if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0) { srh_state->srh = NULL; } else { srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff); srh_state->hdrlen = srh_state->srh->hdrlen << 3; srh_state->valid = true; } } BPF_CALL_4(bpf_lwt_seg6_action, struct sk_buff *, skb, u32, action, void *, param, u32, param_len) { struct seg6_bpf_srh_state *srh_state = this_cpu_ptr(&seg6_bpf_srh_states); int hdroff = 0; int err; switch (action) { case <API key>: if (!<API key>(skb)) return -EBADMSG; if (param_len != sizeof(struct in6_addr)) return -EINVAL; return seg6_lookup_nexthop(skb, (struct in6_addr *)param, 0); case <API key>: if (!<API key>(skb)) return -EBADMSG; if (param_len != sizeof(int)) return -EINVAL; return seg6_lookup_nexthop(skb, NULL, *(int *)param); case <API key>: if (!<API key>(skb)) return -EBADMSG; if (param_len != sizeof(int)) return -EINVAL; if (ipv6_find_hdr(skb, &hdroff, IPPROTO_IPV6, NULL, NULL) < 0) return -EBADMSG; if (!pskb_pull(skb, hdroff)) return -EBADMSG; skb_postpull_rcsum(skb, skb_network_header(skb), hdroff); <API key>(skb); <API key>(skb); skb->encapsulation = 0; <API key>(skb); <API key>(skb); return seg6_lookup_nexthop(skb, NULL, *(int *)param); case <API key>: if (srh_state->srh && !<API key>(skb)) return -EBADMSG; err = bpf_push_seg6_encap(skb, <API key>, param, param_len); if (!err) <API key>(skb); return err; case <API key>: if (srh_state->srh && !<API key>(skb)) return -EBADMSG; err = bpf_push_seg6_encap(skb, BPF_LWT_ENCAP_SEG6, param, param_len); if (!err) <API key>(skb); return err; default: return -EINVAL; } } static const struct bpf_func_proto <API key> = { .func = bpf_lwt_seg6_action, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_PTR_TO_MEM, .arg4_type = ARG_CONST_SIZE }; BPF_CALL_3(<API key>, struct sk_buff *, skb, u32, offset, s32, len) { struct seg6_bpf_srh_state *srh_state = this_cpu_ptr(&seg6_bpf_srh_states); struct ipv6_sr_hdr *srh = srh_state->srh; void *srh_end, *srh_tlvs, *ptr; struct ipv6hdr *hdr; int srhoff = 0; int ret; if (unlikely(srh == NULL)) return -EINVAL; srh_tlvs = (void *)((unsigned char *)srh + sizeof(*srh) + ((srh->first_segment + 1) << 4)); srh_end = (void *)((unsigned char *)srh + sizeof(*srh) + srh_state->hdrlen); ptr = skb->data + offset; if (unlikely(ptr < srh_tlvs || ptr > srh_end)) return -EFAULT; if (unlikely(len < 0 && (void *)((char *)ptr - len) > srh_end)) return -EFAULT; if (len > 0) { ret = skb_cow_head(skb, len); if (unlikely(ret < 0)) return ret; ret = <API key>(skb, offset, len); } else { ret = bpf_skb_net_hdr_pop(skb, offset, -1 * len); } <API key>(skb); if (unlikely(ret < 0)) return ret; hdr = (struct ipv6hdr *)skb->data; hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); if (ipv6_find_hdr(skb, &srhoff, IPPROTO_ROUTING, NULL, NULL) < 0) return -EINVAL; srh_state->srh = (struct ipv6_sr_hdr *)(skb->data + srhoff); srh_state->hdrlen += len; srh_state->valid = false; return 0; } static const struct bpf_func_proto <API key> = { .func = <API key>, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = ARG_ANYTHING, }; #endif /* <API key> */ #define <API key>(md_type, CONVERT) \ do { \ switch (si->off) { \ case offsetof(md_type, snd_cwnd): \ CONVERT(snd_cwnd); break; \ case offsetof(md_type, srtt_us): \ CONVERT(srtt_us); break; \ case offsetof(md_type, snd_ssthresh): \ CONVERT(snd_ssthresh); break; \ case offsetof(md_type, rcv_nxt): \ CONVERT(rcv_nxt); break; \ case offsetof(md_type, snd_nxt): \ CONVERT(snd_nxt); break; \ case offsetof(md_type, snd_una): \ CONVERT(snd_una); break; \ case offsetof(md_type, mss_cache): \ CONVERT(mss_cache); break; \ case offsetof(md_type, ecn_flags): \ CONVERT(ecn_flags); break; \ case offsetof(md_type, rate_delivered): \ CONVERT(rate_delivered); break; \ case offsetof(md_type, rate_interval_us): \ CONVERT(rate_interval_us); break; \ case offsetof(md_type, packets_out): \ CONVERT(packets_out); break; \ case offsetof(md_type, retrans_out): \ CONVERT(retrans_out); break; \ case offsetof(md_type, total_retrans): \ CONVERT(total_retrans); break; \ case offsetof(md_type, segs_in): \ CONVERT(segs_in); break; \ case offsetof(md_type, data_segs_in): \ CONVERT(data_segs_in); break; \ case offsetof(md_type, segs_out): \ CONVERT(segs_out); break; \ case offsetof(md_type, data_segs_out): \ CONVERT(data_segs_out); break; \ case offsetof(md_type, lost_out): \ CONVERT(lost_out); break; \ case offsetof(md_type, sacked_out): \ CONVERT(sacked_out); break; \ case offsetof(md_type, bytes_received): \ CONVERT(bytes_received); break; \ case offsetof(md_type, bytes_acked): \ CONVERT(bytes_acked); break; \ } \ } while (0) #ifdef CONFIG_INET static struct sock *sk_lookup(struct net *net, struct bpf_sock_tuple *tuple, int dif, int sdif, u8 family, u8 proto) { bool refcounted = false; struct sock *sk = NULL; if (family == AF_INET) { __be32 src4 = tuple->ipv4.saddr; __be32 dst4 = tuple->ipv4.daddr; if (proto == IPPROTO_TCP) sk = __inet_lookup(net, &tcp_hashinfo, NULL, 0, src4, tuple->ipv4.sport, dst4, tuple->ipv4.dport, dif, sdif, &refcounted); else sk = __udp4_lib_lookup(net, src4, tuple->ipv4.sport, dst4, tuple->ipv4.dport, dif, sdif, &udp_table, NULL); #if IS_ENABLED(CONFIG_IPV6) } else { struct in6_addr *src6 = (struct in6_addr *)&tuple->ipv6.saddr; struct in6_addr *dst6 = (struct in6_addr *)&tuple->ipv6.daddr; if (proto == IPPROTO_TCP) sk = __inet6_lookup(net, &tcp_hashinfo, NULL, 0, src6, tuple->ipv6.sport, dst6, ntohs(tuple->ipv6.dport), dif, sdif, &refcounted); else if (likely(ipv6_bpf_stub)) sk = ipv6_bpf_stub->udp6_lib_lookup(net, src6, tuple->ipv6.sport, dst6, tuple->ipv6.dport, dif, sdif, &udp_table, NULL); #endif } if (unlikely(sk && !refcounted && !sock_flag(sk, SOCK_RCU_FREE))) { WARN_ONCE(1, "Found non-RCU, unreferenced socket!"); sk = NULL; } return sk; } /* bpf_skc_lookup performs the core lookup for different types of sockets, * taking a reference on the socket if it doesn't have the flag SOCK_RCU_FREE. * Returns the socket as an 'unsigned long' to simplify the casting in the * callers to satisfy BPF_CALL declarations. */ static struct sock * __bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id, u64 flags) { struct sock *sk = NULL; u8 family = AF_UNSPEC; struct net *net; int sdif; if (len == sizeof(tuple->ipv4)) family = AF_INET; else if (len == sizeof(tuple->ipv6)) family = AF_INET6; else return NULL; if (unlikely(family == AF_UNSPEC || flags || !((s32)netns_id < 0 || netns_id <= S32_MAX))) goto out; if (family == AF_INET) sdif = inet_sdif(skb); else sdif = inet6_sdif(skb); if ((s32)netns_id < 0) { net = caller_net; sk = sk_lookup(net, tuple, ifindex, sdif, family, proto); } else { net = get_net_ns_by_id(caller_net, netns_id); if (unlikely(!net)) goto out; sk = sk_lookup(net, tuple, ifindex, sdif, family, proto); put_net(net); } out: return sk; } static struct sock * __bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, struct net *caller_net, u32 ifindex, u8 proto, u64 netns_id, u64 flags) { struct sock *sk = __bpf_skc_lookup(skb, tuple, len, caller_net, ifindex, proto, netns_id, flags); if (sk) { sk = sk_to_full_sk(sk); if (!sk_fullsock(sk)) { if (!sock_flag(sk, SOCK_RCU_FREE)) sock_gen_put(sk); return NULL; } } return sk; } static struct sock * bpf_skc_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, u8 proto, u64 netns_id, u64 flags) { struct net *caller_net; int ifindex; if (skb->dev) { caller_net = dev_net(skb->dev); ifindex = skb->dev->ifindex; } else { caller_net = sock_net(skb->sk); ifindex = 0; } return __bpf_skc_lookup(skb, tuple, len, caller_net, ifindex, proto, netns_id, flags); } static struct sock * bpf_sk_lookup(struct sk_buff *skb, struct bpf_sock_tuple *tuple, u32 len, u8 proto, u64 netns_id, u64 flags) { struct sock *sk = bpf_skc_lookup(skb, tuple, len, proto, netns_id, flags); if (sk) { sk = sk_to_full_sk(sk); if (!sk_fullsock(sk)) { if (!sock_flag(sk, SOCK_RCU_FREE)) sock_gen_put(sk); return NULL; } } return sk; } BPF_CALL_5(bpf_skc_lookup_tcp, struct sk_buff *, skb, struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) { return (unsigned long)bpf_skc_lookup(skb, tuple, len, IPPROTO_TCP, netns_id, flags); } static const struct bpf_func_proto <API key> = { .func = bpf_skc_lookup_tcp, .gpl_only = false, .pkt_access = true, .ret_type = <API key>, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM, .arg3_type = ARG_CONST_SIZE, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; BPF_CALL_5(bpf_sk_lookup_tcp, struct sk_buff *, skb, struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) { return (unsigned long)bpf_sk_lookup(skb, tuple, len, IPPROTO_TCP, netns_id, flags); } static const struct bpf_func_proto <API key> = { .func = bpf_sk_lookup_tcp, .gpl_only = false, .pkt_access = true, .ret_type = <API key>, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM, .arg3_type = ARG_CONST_SIZE, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; BPF_CALL_5(bpf_sk_lookup_udp, struct sk_buff *, skb, struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) { return (unsigned long)bpf_sk_lookup(skb, tuple, len, IPPROTO_UDP, netns_id, flags); } static const struct bpf_func_proto <API key> = { .func = bpf_sk_lookup_udp, .gpl_only = false, .pkt_access = true, .ret_type = <API key>, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM, .arg3_type = ARG_CONST_SIZE, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; BPF_CALL_1(bpf_sk_release, struct sock *, sk) { if (!sock_flag(sk, SOCK_RCU_FREE)) sock_gen_put(sk); return 0; } static const struct bpf_func_proto <API key> = { .func = bpf_sk_release, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = <API key>, }; BPF_CALL_5(<API key>, struct xdp_buff *, ctx, struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags) { struct net *caller_net = dev_net(ctx->rxq->dev); int ifindex = ctx->rxq->dev->ifindex; return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, caller_net, ifindex, IPPROTO_UDP, netns_id, flags); } static const struct bpf_func_proto <API key> = { .func = <API key>, .gpl_only = false, .pkt_access = true, .ret_type = <API key>, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM, .arg3_type = ARG_CONST_SIZE, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; BPF_CALL_5(<API key>, struct xdp_buff *, ctx, struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags) { struct net *caller_net = dev_net(ctx->rxq->dev); int ifindex = ctx->rxq->dev->ifindex; return (unsigned long)__bpf_skc_lookup(NULL, tuple, len, caller_net, ifindex, IPPROTO_TCP, netns_id, flags); } static const struct bpf_func_proto <API key> = { .func = <API key>, .gpl_only = false, .pkt_access = true, .ret_type = <API key>, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM, .arg3_type = ARG_CONST_SIZE, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; BPF_CALL_5(<API key>, struct xdp_buff *, ctx, struct bpf_sock_tuple *, tuple, u32, len, u32, netns_id, u64, flags) { struct net *caller_net = dev_net(ctx->rxq->dev); int ifindex = ctx->rxq->dev->ifindex; return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, caller_net, ifindex, IPPROTO_TCP, netns_id, flags); } static const struct bpf_func_proto <API key> = { .func = <API key>, .gpl_only = false, .pkt_access = true, .ret_type = <API key>, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM, .arg3_type = ARG_CONST_SIZE, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; BPF_CALL_5(<API key>, struct bpf_sock_addr_kern *, ctx, struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) { return (unsigned long)__bpf_skc_lookup(NULL, tuple, len, sock_net(ctx->sk), 0, IPPROTO_TCP, netns_id, flags); } static const struct bpf_func_proto <API key> = { .func = <API key>, .gpl_only = false, .ret_type = <API key>, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM, .arg3_type = ARG_CONST_SIZE, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; BPF_CALL_5(<API key>, struct bpf_sock_addr_kern *, ctx, struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) { return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, sock_net(ctx->sk), 0, IPPROTO_TCP, netns_id, flags); } static const struct bpf_func_proto <API key> = { .func = <API key>, .gpl_only = false, .ret_type = <API key>, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM, .arg3_type = ARG_CONST_SIZE, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; BPF_CALL_5(<API key>, struct bpf_sock_addr_kern *, ctx, struct bpf_sock_tuple *, tuple, u32, len, u64, netns_id, u64, flags) { return (unsigned long)__bpf_sk_lookup(NULL, tuple, len, sock_net(ctx->sk), 0, IPPROTO_UDP, netns_id, flags); } static const struct bpf_func_proto <API key> = { .func = <API key>, .gpl_only = false, .ret_type = <API key>, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM, .arg3_type = ARG_CONST_SIZE, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; bool <API key>(int off, int size, enum bpf_access_type type, struct bpf_insn_access_aux *info) { if (off < 0 || off >= offsetofend(struct bpf_tcp_sock, bytes_acked)) return false; if (off % size != 0) return false; switch (off) { case offsetof(struct bpf_tcp_sock, bytes_received): case offsetof(struct bpf_tcp_sock, bytes_acked): return size == sizeof(__u64); default: return size == sizeof(__u32); } } u32 <API key>(enum bpf_access_type type, const struct bpf_insn *si, struct bpf_insn *insn_buf, struct bpf_prog *prog, u32 *target_size) { struct bpf_insn *insn = insn_buf; #define <API key>(FIELD) \ do { \ BUILD_BUG_ON(FIELD_SIZEOF(struct tcp_sock, FIELD) > \ FIELD_SIZEOF(struct bpf_tcp_sock, FIELD)); \ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct tcp_sock, FIELD),\ si->dst_reg, si->src_reg, \ offsetof(struct tcp_sock, FIELD)); \ } while (0) <API key>(struct bpf_tcp_sock, <API key>); if (insn > insn_buf) return insn - insn_buf; switch (si->off) { case offsetof(struct bpf_tcp_sock, rtt_min): BUILD_BUG_ON(FIELD_SIZEOF(struct tcp_sock, rtt_min) != sizeof(struct minmax)); BUILD_BUG_ON(sizeof(struct minmax) < sizeof(struct minmax_sample)); *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, offsetof(struct tcp_sock, rtt_min) + offsetof(struct minmax_sample, v)); break; } return insn - insn_buf; } BPF_CALL_1(bpf_tcp_sock, struct sock *, sk) { if (sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP) return (unsigned long)sk; return (unsigned long)NULL; } static const struct bpf_func_proto bpf_tcp_sock_proto = { .func = bpf_tcp_sock, .gpl_only = false, .ret_type = <API key>, .arg1_type = <API key>, }; BPF_CALL_1(<API key>, struct sock *, sk) { sk = sk_to_full_sk(sk); if (sk->sk_state == TCP_LISTEN && sock_flag(sk, SOCK_RCU_FREE)) return (unsigned long)sk; return (unsigned long)NULL; } static const struct bpf_func_proto <API key> = { .func = <API key>, .gpl_only = false, .ret_type = <API key>, .arg1_type = <API key>, }; BPF_CALL_1(bpf_skb_ecn_set_ce, struct sk_buff *, skb) { unsigned int iphdr_len; if (skb->protocol == cpu_to_be16(ETH_P_IP)) iphdr_len = sizeof(struct iphdr); else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) iphdr_len = sizeof(struct ipv6hdr); else return 0; if (skb_headlen(skb) < iphdr_len) return 0; if (skb_cloned(skb) && !skb_clone_writable(skb, iphdr_len)) return 0; return INET_ECN_set_ce(skb); } static const struct bpf_func_proto <API key> = { .func = bpf_skb_ecn_set_ce, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, }; BPF_CALL_5(<API key>, struct sock *, sk, void *, iph, u32, iph_len, struct tcphdr *, th, u32, th_len) { #ifdef CONFIG_SYN_COOKIES u32 cookie; int ret; if (unlikely(th_len < sizeof(*th))) return -EINVAL; /* sk_listener() allows TCP_NEW_SYN_RECV, which makes no sense here. */ if (sk->sk_protocol != IPPROTO_TCP || sk->sk_state != TCP_LISTEN) return -EINVAL; if (!sock_net(sk)->ipv4.<API key>) return -EINVAL; if (!th->ack || th->rst || th->syn) return -ENOENT; if (<API key>(sk)) return -ENOENT; cookie = ntohl(th->ack_seq) - 1; switch (sk->sk_family) { case AF_INET: if (unlikely(iph_len < sizeof(struct iphdr))) return -EINVAL; ret = __cookie_v4_check((struct iphdr *)iph, th, cookie); break; #if IS_BUILTIN(CONFIG_IPV6) case AF_INET6: if (unlikely(iph_len < sizeof(struct ipv6hdr))) return -EINVAL; ret = __cookie_v6_check((struct ipv6hdr *)iph, th, cookie); break; #endif /* CONFIG_IPV6 */ default: return -EPROTONOSUPPORT; } if (ret > 0) return 0; return -ENOENT; #else return -ENOTSUPP; #endif } static const struct bpf_func_proto <API key> = { .func = <API key>, .gpl_only = true, .pkt_access = true, .ret_type = RET_INTEGER, .arg1_type = <API key>, .arg2_type = ARG_PTR_TO_MEM, .arg3_type = ARG_CONST_SIZE, .arg4_type = ARG_PTR_TO_MEM, .arg5_type = ARG_CONST_SIZE, }; #endif /* CONFIG_INET */ bool <API key>(void *func) { if (func == bpf_skb_vlan_push || func == bpf_skb_vlan_pop || func == bpf_skb_store_bytes || func == <API key> || func == bpf_skb_change_head || func == sk_skb_change_head || func == bpf_skb_change_tail || func == sk_skb_change_tail || func == bpf_skb_adjust_room || func == bpf_skb_pull_data || func == sk_skb_pull_data || func == bpf_clone_redirect || func == bpf_l3_csum_replace || func == bpf_l4_csum_replace || func == bpf_xdp_adjust_head || func == bpf_xdp_adjust_meta || func == bpf_msg_pull_data || func == bpf_msg_push_data || func == bpf_msg_pop_data || func == bpf_xdp_adjust_tail || #if IS_ENABLED(<API key>) func == <API key> || func == <API key> || func == bpf_lwt_seg6_action || #endif func == <API key> || func == <API key>) return true; return false; } static const struct bpf_func_proto * bpf_base_func_proto(enum bpf_func_id func_id) { switch (func_id) { case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case BPF_FUNC_tail_call: return &bpf_tail_call_proto; case <API key>: return &<API key>; default: break; } if (!capable(CAP_SYS_ADMIN)) return NULL; switch (func_id) { case BPF_FUNC_spin_lock: return &bpf_spin_lock_proto; case <API key>: return &<API key>; case <API key>: return <API key>(); default: return NULL; } } static const struct bpf_func_proto * <API key>(enum bpf_func_id func_id, const struct bpf_prog *prog) { switch (func_id) { /* inet and inet6 sockets are created in a process * context so there is always a valid uid/gid */ case <API key>: return &<API key>; case <API key>: return &<API key>; default: return bpf_base_func_proto(func_id); } } static const struct bpf_func_proto * <API key>(enum bpf_func_id func_id, const struct bpf_prog *prog) { switch (func_id) { /* inet and inet6 sockets are created in a process * context so there is always a valid uid/gid */ case <API key>: return &<API key>; case BPF_FUNC_bind: switch (prog-><API key>) { case <API key>: case <API key>: return &bpf_bind_proto; default: return NULL; } case <API key>: return &<API key>; case <API key>: return &<API key>; #ifdef CONFIG_INET case <API key>: return &<API key>; case <API key>: return &<API key>; case BPF_FUNC_sk_release: return &<API key>; case <API key>: return &<API key>; #endif /* CONFIG_INET */ default: return bpf_base_func_proto(func_id); } } static const struct bpf_func_proto * <API key>(enum bpf_func_id func_id, const struct bpf_prog *prog) { switch (func_id) { case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; default: return bpf_base_func_proto(func_id); } } const struct bpf_func_proto <API key> __weak; const struct bpf_func_proto <API key> __weak; static const struct bpf_func_proto * cg_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { switch (func_id) { case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; #ifdef CONFIG_INET case BPF_FUNC_tcp_sock: return &bpf_tcp_sock_proto; case <API key>: return &<API key>; case <API key>: return &<API key>; #endif default: return <API key>(func_id, prog); } } static const struct bpf_func_proto * <API key>(enum bpf_func_id func_id, const struct bpf_prog *prog) { switch (func_id) { case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case BPF_FUNC_csum_diff: return &bpf_csum_diff_proto; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return <API key>(func_id); case <API key>: return &<API key>; case <API key>: return <API key>(func_id); case BPF_FUNC_redirect: return &bpf_redirect_proto; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case BPF_FUNC_set_hash: return &bpf_set_hash_proto; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case BPF_FUNC_fib_lookup: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; #ifdef CONFIG_XFRM case <API key>: return &<API key>; #endif #ifdef <API key> case <API key>: return &<API key>; case <API key>: return &<API key>; #endif #ifdef CONFIG_INET case <API key>: return &<API key>; case <API key>: return &<API key>; case BPF_FUNC_sk_release: return &<API key>; case BPF_FUNC_tcp_sock: return &bpf_tcp_sock_proto; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; #endif default: return bpf_base_func_proto(func_id); } } static const struct bpf_func_proto * xdp_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { switch (func_id) { case <API key>: return &<API key>; case <API key>: return &<API key>; case BPF_FUNC_csum_diff: return &bpf_csum_diff_proto; case <API key>: return &<API key>; case <API key>: return &<API key>; case BPF_FUNC_redirect: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case BPF_FUNC_fib_lookup: return &<API key>; #ifdef CONFIG_INET case <API key>: return &<API key>; case <API key>: return &<API key>; case BPF_FUNC_sk_release: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; #endif default: return bpf_base_func_proto(func_id); } } const struct bpf_func_proto <API key> __weak; const struct bpf_func_proto <API key> __weak; static const struct bpf_func_proto * sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { switch (func_id) { case BPF_FUNC_setsockopt: return &<API key>; case BPF_FUNC_getsockopt: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; default: return bpf_base_func_proto(func_id); } } const struct bpf_func_proto <API key> __weak; const struct bpf_func_proto <API key> __weak; static const struct bpf_func_proto * sk_msg_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { switch (func_id) { case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; default: return bpf_base_func_proto(func_id); } } const struct bpf_func_proto <API key> __weak; const struct bpf_func_proto <API key> __weak; static const struct bpf_func_proto * sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { switch (func_id) { case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; #ifdef CONFIG_INET case <API key>: return &<API key>; case <API key>: return &<API key>; case BPF_FUNC_sk_release: return &<API key>; case <API key>: return &<API key>; #endif default: return bpf_base_func_proto(func_id); } } static const struct bpf_func_proto * <API key>(enum bpf_func_id func_id, const struct bpf_prog *prog) { switch (func_id) { case <API key>: return &<API key>; default: return bpf_base_func_proto(func_id); } } static const struct bpf_func_proto * lwt_out_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { switch (func_id) { case <API key>: return &<API key>; case <API key>: return &<API key>; case BPF_FUNC_csum_diff: return &bpf_csum_diff_proto; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; default: return bpf_base_func_proto(func_id); } } static const struct bpf_func_proto * lwt_in_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { switch (func_id) { case <API key>: return &<API key>; default: return lwt_out_func_proto(func_id, prog); } } static const struct bpf_func_proto * lwt_xmit_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) { switch (func_id) { case <API key>: return &<API key>; case <API key>: return <API key>(func_id); case <API key>: return &<API key>; case <API key>: return <API key>(func_id); case BPF_FUNC_redirect: return &bpf_redirect_proto; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; default: return lwt_out_func_proto(func_id, prog); } } static const struct bpf_func_proto * <API key>(enum bpf_func_id func_id, const struct bpf_prog *prog) { switch (func_id) { #if IS_ENABLED(<API key>) case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; #endif default: return lwt_out_func_proto(func_id, prog); } } static bool <API key>(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, struct bpf_insn_access_aux *info) { const int size_default = sizeof(__u32); if (off < 0 || off >= sizeof(struct __sk_buff)) return false; /* The verifier guarantees that size > 0. */ if (off % size != 0) return false; switch (off) { case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]): if (off + size > offsetofend(struct __sk_buff, cb[4])) return false; break; case bpf_ctx_range_till(struct __sk_buff, remote_ip6[0], remote_ip6[3]): case bpf_ctx_range_till(struct __sk_buff, local_ip6[0], local_ip6[3]): case bpf_ctx_range_till(struct __sk_buff, remote_ip4, remote_ip4): case bpf_ctx_range_till(struct __sk_buff, local_ip4, local_ip4): case bpf_ctx_range(struct __sk_buff, data): case bpf_ctx_range(struct __sk_buff, data_meta): case bpf_ctx_range(struct __sk_buff, data_end): if (size != size_default) return false; break; case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): return false; case bpf_ctx_range(struct __sk_buff, tstamp): if (size != sizeof(__u64)) return false; break; case offsetof(struct __sk_buff, sk): if (type == BPF_WRITE || size != sizeof(__u64)) return false; info->reg_type = <API key>; break; default: /* Only narrow read access allowed for now. */ if (type == BPF_WRITE) { if (size != size_default) return false; } else { <API key>(info, size_default); if (!<API key>(off, size, size_default)) return false; } } return true; } static bool <API key>(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, struct bpf_insn_access_aux *info) { switch (off) { case bpf_ctx_range(struct __sk_buff, tc_classid): case bpf_ctx_range(struct __sk_buff, data): case bpf_ctx_range(struct __sk_buff, data_meta): case bpf_ctx_range(struct __sk_buff, data_end): case bpf_ctx_range_till(struct __sk_buff, family, local_port): case bpf_ctx_range(struct __sk_buff, tstamp): case bpf_ctx_range(struct __sk_buff, wire_len): return false; } if (type == BPF_WRITE) { switch (off) { case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]): break; default: return false; } } return <API key>(off, size, type, prog, info); } static bool <API key>(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, struct bpf_insn_access_aux *info) { switch (off) { case bpf_ctx_range(struct __sk_buff, tc_classid): case bpf_ctx_range(struct __sk_buff, data_meta): case bpf_ctx_range(struct __sk_buff, wire_len): return false; case bpf_ctx_range(struct __sk_buff, data): case bpf_ctx_range(struct __sk_buff, data_end): if (!capable(CAP_SYS_ADMIN)) return false; break; } if (type == BPF_WRITE) { switch (off) { case bpf_ctx_range(struct __sk_buff, mark): case bpf_ctx_range(struct __sk_buff, priority): case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]): break; case bpf_ctx_range(struct __sk_buff, tstamp): if (!capable(CAP_SYS_ADMIN)) return false; break; default: return false; } } switch (off) { case bpf_ctx_range(struct __sk_buff, data): info->reg_type = PTR_TO_PACKET; break; case bpf_ctx_range(struct __sk_buff, data_end): info->reg_type = PTR_TO_PACKET_END; break; } return <API key>(off, size, type, prog, info); } static bool lwt_is_valid_access(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, struct bpf_insn_access_aux *info) { switch (off) { case bpf_ctx_range(struct __sk_buff, tc_classid): case bpf_ctx_range_till(struct __sk_buff, family, local_port): case bpf_ctx_range(struct __sk_buff, data_meta): case bpf_ctx_range(struct __sk_buff, tstamp): case bpf_ctx_range(struct __sk_buff, wire_len): return false; } if (type == BPF_WRITE) { switch (off) { case bpf_ctx_range(struct __sk_buff, mark): case bpf_ctx_range(struct __sk_buff, priority): case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]): break; default: return false; } } switch (off) { case bpf_ctx_range(struct __sk_buff, data): info->reg_type = PTR_TO_PACKET; break; case bpf_ctx_range(struct __sk_buff, data_end): info->reg_type = PTR_TO_PACKET_END; break; } return <API key>(off, size, type, prog, info); } /* Attach type specific accesses */ static bool <API key>(int off, enum bpf_access_type access_type, enum bpf_attach_type attach_type) { switch (off) { case offsetof(struct bpf_sock, bound_dev_if): case offsetof(struct bpf_sock, mark): case offsetof(struct bpf_sock, priority): switch (attach_type) { case <API key>: goto full_access; default: return false; } case bpf_ctx_range(struct bpf_sock, src_ip4): switch (attach_type) { case <API key>: goto read_only; default: return false; } case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]): switch (attach_type) { case <API key>: goto read_only; default: return false; } case bpf_ctx_range(struct bpf_sock, src_port): switch (attach_type) { case <API key>: case <API key>: goto read_only; default: return false; } } read_only: return access_type == BPF_READ; full_access: return true; } bool <API key>(int off, int size, enum bpf_access_type type, struct bpf_insn_access_aux *info) { switch (off) { case bpf_ctx_range_till(struct bpf_sock, type, priority): return false; default: return <API key>(off, size, type, info); } } bool <API key>(int off, int size, enum bpf_access_type type, struct bpf_insn_access_aux *info) { const int size_default = sizeof(__u32); if (off < 0 || off >= sizeof(struct bpf_sock)) return false; if (off % size != 0) return false; switch (off) { case offsetof(struct bpf_sock, state): case offsetof(struct bpf_sock, family): case offsetof(struct bpf_sock, type): case offsetof(struct bpf_sock, protocol): case offsetof(struct bpf_sock, dst_port): case offsetof(struct bpf_sock, src_port): case bpf_ctx_range(struct bpf_sock, src_ip4): case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]): case bpf_ctx_range(struct bpf_sock, dst_ip4): case bpf_ctx_range_till(struct bpf_sock, dst_ip6[0], dst_ip6[3]): <API key>(info, size_default); return <API key>(off, size, size_default); } return size == size_default; } static bool <API key>(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, struct bpf_insn_access_aux *info) { if (!<API key>(off, size, type, info)) return false; return <API key>(off, type, prog-><API key>); } static int bpf_noop_prologue(struct bpf_insn *insn_buf, bool direct_write, const struct bpf_prog *prog) { /* Neither direct read nor direct write requires any preliminary * action. */ return 0; } static int <API key>(struct bpf_insn *insn_buf, bool direct_write, const struct bpf_prog *prog, int drop_verdict) { struct bpf_insn *insn = insn_buf; if (!direct_write) return 0; /* if (!skb->cloned) * goto start; * * (Fast-path, otherwise approximation that we might be * a clone, do the rest in helper.) */ *insn++ = BPF_LDX_MEM(BPF_B, BPF_REG_6, BPF_REG_1, CLONED_OFFSET()); *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_6, CLONED_MASK); *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_6, 0, 7); /* ret = bpf_skb_pull_data(skb, 0); */ *insn++ = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1); *insn++ = BPF_ALU64_REG(BPF_XOR, BPF_REG_2, BPF_REG_2); *insn++ = BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, <API key>); /* if (!ret) * goto restore; * return TC_ACT_SHOT; */ *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2); *insn++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, drop_verdict); *insn++ = BPF_EXIT_INSN(); /* restore: */ *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6); /* start: */ *insn++ = prog->insnsi[0]; return insn - insn_buf; } static int bpf_gen_ld_abs(const struct bpf_insn *orig, struct bpf_insn *insn_buf) { bool indirect = BPF_MODE(orig->code) == BPF_IND; struct bpf_insn *insn = insn_buf; /* We're guaranteed here that CTX is in R6. */ *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_CTX); if (!indirect) { *insn++ = BPF_MOV64_IMM(BPF_REG_2, orig->imm); } else { *insn++ = BPF_MOV64_REG(BPF_REG_2, orig->src_reg); if (orig->imm) *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, orig->imm); } switch (BPF_SIZE(orig->code)) { case BPF_B: *insn++ = BPF_EMIT_CALL(<API key>); break; case BPF_H: *insn++ = BPF_EMIT_CALL(<API key>); break; case BPF_W: *insn++ = BPF_EMIT_CALL(<API key>); break; } *insn++ = BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 2); *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_0, BPF_REG_0); *insn++ = BPF_EXIT_INSN(); return insn - insn_buf; } static int tc_cls_act_prologue(struct bpf_insn *insn_buf, bool direct_write, const struct bpf_prog *prog) { return <API key>(insn_buf, direct_write, prog, TC_ACT_SHOT); } static bool <API key>(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, struct bpf_insn_access_aux *info) { if (type == BPF_WRITE) { switch (off) { case bpf_ctx_range(struct __sk_buff, mark): case bpf_ctx_range(struct __sk_buff, tc_index): case bpf_ctx_range(struct __sk_buff, priority): case bpf_ctx_range(struct __sk_buff, tc_classid): case bpf_ctx_range_till(struct __sk_buff, cb[0], cb[4]): case bpf_ctx_range(struct __sk_buff, tstamp): case bpf_ctx_range(struct __sk_buff, queue_mapping): break; default: return false; } } switch (off) { case bpf_ctx_range(struct __sk_buff, data): info->reg_type = PTR_TO_PACKET; break; case bpf_ctx_range(struct __sk_buff, data_meta): info->reg_type = PTR_TO_PACKET_META; break; case bpf_ctx_range(struct __sk_buff, data_end): info->reg_type = PTR_TO_PACKET_END; break; case bpf_ctx_range_till(struct __sk_buff, family, local_port): return false; } return <API key>(off, size, type, prog, info); } static bool <API key>(int off, int size) { if (off < 0 || off >= sizeof(struct xdp_md)) return false; if (off % size != 0) return false; if (size != sizeof(__u32)) return false; return true; } static bool xdp_is_valid_access(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, struct bpf_insn_access_aux *info) { if (type == BPF_WRITE) { if (<API key>(prog->aux)) { switch (off) { case offsetof(struct xdp_md, rx_queue_index): return <API key>(off, size); } } return false; } switch (off) { case offsetof(struct xdp_md, data): info->reg_type = PTR_TO_PACKET; break; case offsetof(struct xdp_md, data_meta): info->reg_type = PTR_TO_PACKET_META; break; case offsetof(struct xdp_md, data_end): info->reg_type = PTR_TO_PACKET_END; break; } return <API key>(off, size); } void <API key>(u32 act) { const u32 act_max = XDP_REDIRECT; WARN_ONCE(1, "%s XDP return value %u, expect packet loss!\n", act > act_max ? "Illegal" : "Driver unsupported", act); } EXPORT_SYMBOL_GPL(<API key>); static bool <API key>(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, struct bpf_insn_access_aux *info) { const int size_default = sizeof(__u32); if (off < 0 || off >= sizeof(struct bpf_sock_addr)) return false; if (off % size != 0) return false; /* Disallow access to IPv6 fields from IPv4 contex and vise * versa. */ switch (off) { case bpf_ctx_range(struct bpf_sock_addr, user_ip4): switch (prog-><API key>) { case <API key>: case <API key>: case <API key>: case <API key>: break; default: return false; } break; case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]): switch (prog-><API key>) { case <API key>: case <API key>: case <API key>: case <API key>: break; default: return false; } break; case bpf_ctx_range(struct bpf_sock_addr, msg_src_ip4): switch (prog-><API key>) { case <API key>: break; default: return false; } break; case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0], msg_src_ip6[3]): switch (prog-><API key>) { case <API key>: break; default: return false; } break; } switch (off) { case bpf_ctx_range(struct bpf_sock_addr, user_ip4): case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]): case bpf_ctx_range(struct bpf_sock_addr, msg_src_ip4): case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0], msg_src_ip6[3]): /* Only narrow read access allowed for now. */ if (type == BPF_READ) { <API key>(info, size_default); if (!<API key>(off, size, size_default)) return false; } else { if (size != size_default) return false; } break; case bpf_ctx_range(struct bpf_sock_addr, user_port): if (size != size_default) return false; break; default: if (type == BPF_READ) { if (size != size_default) return false; } else { return false; } } return true; } static bool <API key>(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, struct bpf_insn_access_aux *info) { const int size_default = sizeof(__u32); if (off < 0 || off >= sizeof(struct bpf_sock_ops)) return false; /* The verifier guarantees that size > 0. */ if (off % size != 0) return false; if (type == BPF_WRITE) { switch (off) { case offsetof(struct bpf_sock_ops, reply): case offsetof(struct bpf_sock_ops, sk_txhash): if (size != size_default) return false; break; default: return false; } } else { switch (off) { case bpf_ctx_range_till(struct bpf_sock_ops, bytes_received, bytes_acked): if (size != sizeof(__u64)) return false; break; default: if (size != size_default) return false; break; } } return true; } static int sk_skb_prologue(struct bpf_insn *insn_buf, bool direct_write, const struct bpf_prog *prog) { return <API key>(insn_buf, direct_write, prog, SK_DROP); } static bool <API key>(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, struct bpf_insn_access_aux *info) { switch (off) { case bpf_ctx_range(struct __sk_buff, tc_classid): case bpf_ctx_range(struct __sk_buff, data_meta): case bpf_ctx_range(struct __sk_buff, tstamp): case bpf_ctx_range(struct __sk_buff, wire_len): return false; } if (type == BPF_WRITE) { switch (off) { case bpf_ctx_range(struct __sk_buff, tc_index): case bpf_ctx_range(struct __sk_buff, priority): break; default: return false; } } switch (off) { case bpf_ctx_range(struct __sk_buff, mark): return false; case bpf_ctx_range(struct __sk_buff, data): info->reg_type = PTR_TO_PACKET; break; case bpf_ctx_range(struct __sk_buff, data_end): info->reg_type = PTR_TO_PACKET_END; break; } return <API key>(off, size, type, prog, info); } static bool <API key>(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, struct bpf_insn_access_aux *info) { if (type == BPF_WRITE) return false; if (off % size != 0) return false; switch (off) { case offsetof(struct sk_msg_md, data): info->reg_type = PTR_TO_PACKET; if (size != sizeof(__u64)) return false; break; case offsetof(struct sk_msg_md, data_end): info->reg_type = PTR_TO_PACKET_END; if (size != sizeof(__u64)) return false; break; case bpf_ctx_range(struct sk_msg_md, family): case bpf_ctx_range(struct sk_msg_md, remote_ip4): case bpf_ctx_range(struct sk_msg_md, local_ip4): case bpf_ctx_range_till(struct sk_msg_md, remote_ip6[0], remote_ip6[3]): case bpf_ctx_range_till(struct sk_msg_md, local_ip6[0], local_ip6[3]): case bpf_ctx_range(struct sk_msg_md, remote_port): case bpf_ctx_range(struct sk_msg_md, local_port): case bpf_ctx_range(struct sk_msg_md, size): if (size != sizeof(__u32)) return false; break; default: return false; } return true; } static bool <API key>(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, struct bpf_insn_access_aux *info) { const int size_default = sizeof(__u32); if (off < 0 || off >= sizeof(struct __sk_buff)) return false; if (type == BPF_WRITE) return false; switch (off) { case bpf_ctx_range(struct __sk_buff, data): if (size != size_default) return false; info->reg_type = PTR_TO_PACKET; return true; case bpf_ctx_range(struct __sk_buff, data_end): if (size != size_default) return false; info->reg_type = PTR_TO_PACKET_END; return true; case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): if (size != sizeof(__u64)) return false; info->reg_type = PTR_TO_FLOW_KEYS; return true; default: return false; } } static u32 <API key>(enum bpf_access_type type, const struct bpf_insn *si, struct bpf_insn *insn_buf, struct bpf_prog *prog, u32 *target_size) { struct bpf_insn *insn = insn_buf; switch (si->off) { case offsetof(struct __sk_buff, data): *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_flow_dissector, data), si->dst_reg, si->src_reg, offsetof(struct bpf_flow_dissector, data)); break; case offsetof(struct __sk_buff, data_end): *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_flow_dissector, data_end), si->dst_reg, si->src_reg, offsetof(struct bpf_flow_dissector, data_end)); break; case offsetof(struct __sk_buff, flow_keys): *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_flow_dissector, flow_keys), si->dst_reg, si->src_reg, offsetof(struct bpf_flow_dissector, flow_keys)); break; } return insn - insn_buf; } static u32 <API key>(enum bpf_access_type type, const struct bpf_insn *si, struct bpf_insn *insn_buf, struct bpf_prog *prog, u32 *target_size) { struct bpf_insn *insn = insn_buf; int off; switch (si->off) { case offsetof(struct __sk_buff, len): *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, bpf_target_off(struct sk_buff, len, 4, target_size)); break; case offsetof(struct __sk_buff, protocol): *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, bpf_target_off(struct sk_buff, protocol, 2, target_size)); break; case offsetof(struct __sk_buff, vlan_proto): *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, bpf_target_off(struct sk_buff, vlan_proto, 2, target_size)); break; case offsetof(struct __sk_buff, priority): if (type == BPF_WRITE) *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, bpf_target_off(struct sk_buff, priority, 4, target_size)); else *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, bpf_target_off(struct sk_buff, priority, 4, target_size)); break; case offsetof(struct __sk_buff, ingress_ifindex): *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, bpf_target_off(struct sk_buff, skb_iif, 4, target_size)); break; case offsetof(struct __sk_buff, ifindex): *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev), si->dst_reg, si->src_reg, offsetof(struct sk_buff, dev)); *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1); *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, bpf_target_off(struct net_device, ifindex, 4, target_size)); break; case offsetof(struct __sk_buff, hash): *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, bpf_target_off(struct sk_buff, hash, 4, target_size)); break; case offsetof(struct __sk_buff, mark): if (type == BPF_WRITE) *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, bpf_target_off(struct sk_buff, mark, 4, target_size)); else *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, bpf_target_off(struct sk_buff, mark, 4, target_size)); break; case offsetof(struct __sk_buff, pkt_type): *target_size = 1; *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg, PKT_TYPE_OFFSET()); *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, PKT_TYPE_MAX); #ifdef <API key> *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, 5); #endif break; case offsetof(struct __sk_buff, queue_mapping): if (type == BPF_WRITE) { *insn++ = BPF_JMP_IMM(BPF_JGE, si->src_reg, NO_QUEUE_MAPPING, 1); *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg, bpf_target_off(struct sk_buff, queue_mapping, 2, target_size)); } else { *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, bpf_target_off(struct sk_buff, queue_mapping, 2, target_size)); } break; case offsetof(struct __sk_buff, vlan_present): *target_size = 1; *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg, <API key>()); if (<API key>) *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, <API key>); if (<API key> < 7) *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, 1); break; case offsetof(struct __sk_buff, vlan_tci): *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, bpf_target_off(struct sk_buff, vlan_tci, 2, target_size)); break; case offsetof(struct __sk_buff, cb[0]) ... offsetofend(struct __sk_buff, cb[4]) - 1: BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, data) < 20); BUILD_BUG_ON((offsetof(struct sk_buff, cb) + offsetof(struct qdisc_skb_cb, data)) % sizeof(__u64)); prog->cb_access = 1; off = si->off; off -= offsetof(struct __sk_buff, cb[0]); off += offsetof(struct sk_buff, cb); off += offsetof(struct qdisc_skb_cb, data); if (type == BPF_WRITE) *insn++ = BPF_STX_MEM(BPF_SIZE(si->code), si->dst_reg, si->src_reg, off); else *insn++ = BPF_LDX_MEM(BPF_SIZE(si->code), si->dst_reg, si->src_reg, off); break; case offsetof(struct __sk_buff, tc_classid): BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, tc_classid) != 2); off = si->off; off -= offsetof(struct __sk_buff, tc_classid); off += offsetof(struct sk_buff, cb); off += offsetof(struct qdisc_skb_cb, tc_classid); *target_size = 2; if (type == BPF_WRITE) *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg, off); else *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, off); break; case offsetof(struct __sk_buff, data): *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data), si->dst_reg, si->src_reg, offsetof(struct sk_buff, data)); break; case offsetof(struct __sk_buff, data_meta): off = si->off; off -= offsetof(struct __sk_buff, data_meta); off += offsetof(struct sk_buff, cb); off += offsetof(struct bpf_skb_data_end, data_meta); *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, si->src_reg, off); break; case offsetof(struct __sk_buff, data_end): off = si->off; off -= offsetof(struct __sk_buff, data_end); off += offsetof(struct sk_buff, cb); off += offsetof(struct bpf_skb_data_end, data_end); *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, si->src_reg, off); break; case offsetof(struct __sk_buff, tc_index): #ifdef CONFIG_NET_SCHED if (type == BPF_WRITE) *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg, bpf_target_off(struct sk_buff, tc_index, 2, target_size)); else *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg, bpf_target_off(struct sk_buff, tc_index, 2, target_size)); #else *target_size = 2; if (type == BPF_WRITE) *insn++ = BPF_MOV64_REG(si->dst_reg, si->dst_reg); else *insn++ = BPF_MOV64_IMM(si->dst_reg, 0); #endif break; case offsetof(struct __sk_buff, napi_id): #if defined(<API key>) *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, bpf_target_off(struct sk_buff, napi_id, 4, target_size)); *insn++ = BPF_JMP_IMM(BPF_JGE, si->dst_reg, MIN_NAPI_ID, 1); *insn++ = BPF_MOV64_IMM(si->dst_reg, 0); #else *target_size = 4; *insn++ = BPF_MOV64_IMM(si->dst_reg, 0); #endif break; case offsetof(struct __sk_buff, family): BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), si->dst_reg, si->src_reg, offsetof(struct sk_buff, sk)); *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, bpf_target_off(struct sock_common, skc_family, 2, target_size)); break; case offsetof(struct __sk_buff, remote_ip4): BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), si->dst_reg, si->src_reg, offsetof(struct sk_buff, sk)); *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, bpf_target_off(struct sock_common, skc_daddr, 4, target_size)); break; case offsetof(struct __sk_buff, local_ip4): BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_rcv_saddr) != 4); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), si->dst_reg, si->src_reg, offsetof(struct sk_buff, sk)); *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, bpf_target_off(struct sock_common, skc_rcv_saddr, 4, target_size)); break; case offsetof(struct __sk_buff, remote_ip6[0]) ... offsetof(struct __sk_buff, remote_ip6[3]): #if IS_ENABLED(CONFIG_IPV6) BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_v6_daddr.s6_addr32[0]) != 4); off = si->off; off -= offsetof(struct __sk_buff, remote_ip6[0]); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), si->dst_reg, si->src_reg, offsetof(struct sk_buff, sk)); *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, offsetof(struct sock_common, skc_v6_daddr.s6_addr32[0]) + off); #else *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); #endif break; case offsetof(struct __sk_buff, local_ip6[0]) ... offsetof(struct __sk_buff, local_ip6[3]): #if IS_ENABLED(CONFIG_IPV6) BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_v6_rcv_saddr.s6_addr32[0]) != 4); off = si->off; off -= offsetof(struct __sk_buff, local_ip6[0]); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), si->dst_reg, si->src_reg, offsetof(struct sk_buff, sk)); *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, offsetof(struct sock_common, skc_v6_rcv_saddr.s6_addr32[0]) + off); #else *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); #endif break; case offsetof(struct __sk_buff, remote_port): BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), si->dst_reg, si->src_reg, offsetof(struct sk_buff, sk)); *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, bpf_target_off(struct sock_common, skc_dport, 2, target_size)); #ifndef <API key> *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16); #endif break; case offsetof(struct __sk_buff, local_port): BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), si->dst_reg, si->src_reg, offsetof(struct sk_buff, sk)); *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, bpf_target_off(struct sock_common, skc_num, 2, target_size)); break; case offsetof(struct __sk_buff, tstamp): BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, tstamp) != 8); if (type == BPF_WRITE) *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, si->src_reg, bpf_target_off(struct sk_buff, tstamp, 8, target_size)); else *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->src_reg, bpf_target_off(struct sk_buff, tstamp, 8, target_size)); break; case offsetof(struct __sk_buff, gso_segs): /* si->dst_reg = skb_shinfo(SKB); */ #ifdef <API key> *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, head), si->dst_reg, si->src_reg, offsetof(struct sk_buff, head)); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end), BPF_REG_AX, si->src_reg, offsetof(struct sk_buff, end)); *insn++ = BPF_ALU64_REG(BPF_ADD, si->dst_reg, BPF_REG_AX); #else *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, end), si->dst_reg, si->src_reg, offsetof(struct sk_buff, end)); #endif *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct skb_shared_info, gso_segs), si->dst_reg, si->dst_reg, bpf_target_off(struct skb_shared_info, gso_segs, 2, target_size)); break; case offsetof(struct __sk_buff, wire_len): BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, pkt_len) != 4); off = si->off; off -= offsetof(struct __sk_buff, wire_len); off += offsetof(struct sk_buff, cb); off += offsetof(struct qdisc_skb_cb, pkt_len); *target_size = 4; *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, off); break; case offsetof(struct __sk_buff, sk): *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), si->dst_reg, si->src_reg, offsetof(struct sk_buff, sk)); break; } return insn - insn_buf; } u32 <API key>(enum bpf_access_type type, const struct bpf_insn *si, struct bpf_insn *insn_buf, struct bpf_prog *prog, u32 *target_size) { struct bpf_insn *insn = insn_buf; int off; switch (si->off) { case offsetof(struct bpf_sock, bound_dev_if): BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_bound_dev_if) != 4); if (type == BPF_WRITE) *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, offsetof(struct sock, sk_bound_dev_if)); else *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, offsetof(struct sock, sk_bound_dev_if)); break; case offsetof(struct bpf_sock, mark): BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_mark) != 4); if (type == BPF_WRITE) *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, offsetof(struct sock, sk_mark)); else *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, offsetof(struct sock, sk_mark)); break; case offsetof(struct bpf_sock, priority): BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_priority) != 4); if (type == BPF_WRITE) *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, offsetof(struct sock, sk_priority)); else *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, offsetof(struct sock, sk_priority)); break; case offsetof(struct bpf_sock, family): *insn++ = BPF_LDX_MEM( BPF_FIELD_SIZEOF(struct sock_common, skc_family), si->dst_reg, si->src_reg, bpf_target_off(struct sock_common, skc_family, FIELD_SIZEOF(struct sock_common, skc_family), target_size)); break; case offsetof(struct bpf_sock, type): BUILD_BUG_ON(HWEIGHT32(SK_FL_TYPE_MASK) != BITS_PER_BYTE * 2); *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, offsetof(struct sock, __sk_flags_offset)); *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_TYPE_MASK); *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_TYPE_SHIFT); *target_size = 2; break; case offsetof(struct bpf_sock, protocol): BUILD_BUG_ON(HWEIGHT32(SK_FL_PROTO_MASK) != BITS_PER_BYTE); *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, offsetof(struct sock, __sk_flags_offset)); *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK); *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_PROTO_SHIFT); *target_size = 1; break; case offsetof(struct bpf_sock, src_ip4): *insn++ = BPF_LDX_MEM( BPF_SIZE(si->code), si->dst_reg, si->src_reg, bpf_target_off(struct sock_common, skc_rcv_saddr, FIELD_SIZEOF(struct sock_common, skc_rcv_saddr), target_size)); break; case offsetof(struct bpf_sock, dst_ip4): *insn++ = BPF_LDX_MEM( BPF_SIZE(si->code), si->dst_reg, si->src_reg, bpf_target_off(struct sock_common, skc_daddr, FIELD_SIZEOF(struct sock_common, skc_daddr), target_size)); break; case bpf_ctx_range_till(struct bpf_sock, src_ip6[0], src_ip6[3]): #if IS_ENABLED(CONFIG_IPV6) off = si->off; off -= offsetof(struct bpf_sock, src_ip6[0]); *insn++ = BPF_LDX_MEM( BPF_SIZE(si->code), si->dst_reg, si->src_reg, bpf_target_off( struct sock_common, skc_v6_rcv_saddr.s6_addr32[0], FIELD_SIZEOF(struct sock_common, skc_v6_rcv_saddr.s6_addr32[0]), target_size) + off); #else (void)off; *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); #endif break; case bpf_ctx_range_till(struct bpf_sock, dst_ip6[0], dst_ip6[3]): #if IS_ENABLED(CONFIG_IPV6) off = si->off; off -= offsetof(struct bpf_sock, dst_ip6[0]); *insn++ = BPF_LDX_MEM( BPF_SIZE(si->code), si->dst_reg, si->src_reg, bpf_target_off(struct sock_common, skc_v6_daddr.s6_addr32[0], FIELD_SIZEOF(struct sock_common, skc_v6_daddr.s6_addr32[0]), target_size) + off); #else *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); *target_size = 4; #endif break; case offsetof(struct bpf_sock, src_port): *insn++ = BPF_LDX_MEM( BPF_FIELD_SIZEOF(struct sock_common, skc_num), si->dst_reg, si->src_reg, bpf_target_off(struct sock_common, skc_num, FIELD_SIZEOF(struct sock_common, skc_num), target_size)); break; case offsetof(struct bpf_sock, dst_port): *insn++ = BPF_LDX_MEM( BPF_FIELD_SIZEOF(struct sock_common, skc_dport), si->dst_reg, si->src_reg, bpf_target_off(struct sock_common, skc_dport, FIELD_SIZEOF(struct sock_common, skc_dport), target_size)); break; case offsetof(struct bpf_sock, state): *insn++ = BPF_LDX_MEM( BPF_FIELD_SIZEOF(struct sock_common, skc_state), si->dst_reg, si->src_reg, bpf_target_off(struct sock_common, skc_state, FIELD_SIZEOF(struct sock_common, skc_state), target_size)); break; } return insn - insn_buf; } static u32 <API key>(enum bpf_access_type type, const struct bpf_insn *si, struct bpf_insn *insn_buf, struct bpf_prog *prog, u32 *target_size) { struct bpf_insn *insn = insn_buf; switch (si->off) { case offsetof(struct __sk_buff, ifindex): *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev), si->dst_reg, si->src_reg, offsetof(struct sk_buff, dev)); *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, bpf_target_off(struct net_device, ifindex, 4, target_size)); break; default: return <API key>(type, si, insn_buf, prog, target_size); } return insn - insn_buf; } static u32 <API key>(enum bpf_access_type type, const struct bpf_insn *si, struct bpf_insn *insn_buf, struct bpf_prog *prog, u32 *target_size) { struct bpf_insn *insn = insn_buf; switch (si->off) { case offsetof(struct xdp_md, data): *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data), si->dst_reg, si->src_reg, offsetof(struct xdp_buff, data)); break; case offsetof(struct xdp_md, data_meta): *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_meta), si->dst_reg, si->src_reg, offsetof(struct xdp_buff, data_meta)); break; case offsetof(struct xdp_md, data_end): *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_end), si->dst_reg, si->src_reg, offsetof(struct xdp_buff, data_end)); break; case offsetof(struct xdp_md, ingress_ifindex): *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq), si->dst_reg, si->src_reg, offsetof(struct xdp_buff, rxq)); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_rxq_info, dev), si->dst_reg, si->dst_reg, offsetof(struct xdp_rxq_info, dev)); *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, offsetof(struct net_device, ifindex)); break; case offsetof(struct xdp_md, rx_queue_index): *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, rxq), si->dst_reg, si->src_reg, offsetof(struct xdp_buff, rxq)); *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, offsetof(struct xdp_rxq_info, queue_index)); break; } return insn - insn_buf; } /* <API key>() loads Nested Field S.F.NF where S is type of * context Structure, F is Field in context structure that contains a pointer * to Nested Structure of type NS that has the field NF. * * SIZE encodes the load size (BPF_B, BPF_H, etc). It's up to caller to make * sure that SIZE is not greater than actual size of S.F.NF. * * If offset OFF is provided, the load happens from that offset relative to * offset of NF. */ #define <API key>(S, NS, F, NF, SIZE, OFF) \ do { \ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), si->dst_reg, \ si->src_reg, offsetof(S, F)); \ *insn++ = BPF_LDX_MEM( \ SIZE, si->dst_reg, si->dst_reg, \ bpf_target_off(NS, NF, FIELD_SIZEOF(NS, NF), \ target_size) \ + OFF); \ } while (0) #define <API key>(S, NS, F, NF) \ <API key>(S, NS, F, NF, \ BPF_FIELD_SIZEOF(NS, NF), 0) /* <API key>() has semantic similar to * <API key>() but for store operation. * * It doesn't support SIZE argument though since narrow stores are not * supported for now. * * In addition it uses Temporary Field TF (member of struct S) as the 3rd * "register" since two registers available in convert_ctx_access are not * enough: we can't override neither SRC, since it contains value to store, nor * DST since it contains pointer to context that may be used by later * instructions. But we need a temporary place to save pointer to nested * structure whose field we want to store to. */ #define <API key>(S, NS, F, NF, OFF, TF) \ do { \ int tmp_reg = BPF_REG_9; \ if (si->src_reg == tmp_reg || si->dst_reg == tmp_reg) \ --tmp_reg; \ if (si->src_reg == tmp_reg || si->dst_reg == tmp_reg) \ --tmp_reg; \ *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, tmp_reg, \ offsetof(S, TF)); \ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), tmp_reg, \ si->dst_reg, offsetof(S, F)); \ *insn++ = BPF_STX_MEM( \ BPF_FIELD_SIZEOF(NS, NF), tmp_reg, si->src_reg, \ bpf_target_off(NS, NF, FIELD_SIZEOF(NS, NF), \ target_size) \ + OFF); \ *insn++ = BPF_LDX_MEM(BPF_DW, tmp_reg, si->dst_reg, \ offsetof(S, TF)); \ } while (0) #define <API key>(S, NS, F, NF, SIZE, OFF, \ TF) \ do { \ if (type == BPF_WRITE) { \ <API key>(S, NS, F, NF, OFF, \ TF); \ } else { \ <API key>( \ S, NS, F, NF, SIZE, OFF); \ } \ } while (0) #define <API key>(S, NS, F, NF, TF) \ <API key>( \ S, NS, F, NF, BPF_FIELD_SIZEOF(NS, NF), 0, TF) static u32 <API key>(enum bpf_access_type type, const struct bpf_insn *si, struct bpf_insn *insn_buf, struct bpf_prog *prog, u32 *target_size) { struct bpf_insn *insn = insn_buf; int off; switch (si->off) { case offsetof(struct bpf_sock_addr, user_family): <API key>(struct bpf_sock_addr_kern, struct sockaddr, uaddr, sa_family); break; case offsetof(struct bpf_sock_addr, user_ip4): <API key>( struct bpf_sock_addr_kern, struct sockaddr_in, uaddr, sin_addr, BPF_SIZE(si->code), 0, tmp_reg); break; case bpf_ctx_range_till(struct bpf_sock_addr, user_ip6[0], user_ip6[3]): off = si->off; off -= offsetof(struct bpf_sock_addr, user_ip6[0]); <API key>( struct bpf_sock_addr_kern, struct sockaddr_in6, uaddr, sin6_addr.s6_addr32[0], BPF_SIZE(si->code), off, tmp_reg); break; case offsetof(struct bpf_sock_addr, user_port): /* To get port we need to know sa_family first and then treat * sockaddr as either sockaddr_in or sockaddr_in6. * Though we can simplify since port field has same offset and * size in both structures. * Here we check this invariant and use just one of the * structures if it's true. */ BUILD_BUG_ON(offsetof(struct sockaddr_in, sin_port) != offsetof(struct sockaddr_in6, sin6_port)); BUILD_BUG_ON(FIELD_SIZEOF(struct sockaddr_in, sin_port) != FIELD_SIZEOF(struct sockaddr_in6, sin6_port)); <API key>(struct bpf_sock_addr_kern, struct sockaddr_in6, uaddr, sin6_port, tmp_reg); break; case offsetof(struct bpf_sock_addr, family): <API key>(struct bpf_sock_addr_kern, struct sock, sk, sk_family); break; case offsetof(struct bpf_sock_addr, type): <API key>( struct bpf_sock_addr_kern, struct sock, sk, __sk_flags_offset, BPF_W, 0); *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_TYPE_MASK); *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_TYPE_SHIFT); break; case offsetof(struct bpf_sock_addr, protocol): <API key>( struct bpf_sock_addr_kern, struct sock, sk, __sk_flags_offset, BPF_W, 0); *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK); *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_PROTO_SHIFT); break; case offsetof(struct bpf_sock_addr, msg_src_ip4): /* Treat t_ctx as struct in_addr for msg_src_ip4. */ <API key>( struct bpf_sock_addr_kern, struct in_addr, t_ctx, s_addr, BPF_SIZE(si->code), 0, tmp_reg); break; case bpf_ctx_range_till(struct bpf_sock_addr, msg_src_ip6[0], msg_src_ip6[3]): off = si->off; off -= offsetof(struct bpf_sock_addr, msg_src_ip6[0]); /* Treat t_ctx as struct in6_addr for msg_src_ip6. */ <API key>( struct bpf_sock_addr_kern, struct in6_addr, t_ctx, s6_addr32[0], BPF_SIZE(si->code), off, tmp_reg); break; } return insn - insn_buf; } static u32 <API key>(enum bpf_access_type type, const struct bpf_insn *si, struct bpf_insn *insn_buf, struct bpf_prog *prog, u32 *target_size) { struct bpf_insn *insn = insn_buf; int off; /* Helper macro for adding read access to tcp_sock or sock fields. */ #define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \ do { \ BUILD_BUG_ON(FIELD_SIZEOF(OBJ, OBJ_FIELD) > \ FIELD_SIZEOF(struct bpf_sock_ops, BPF_FIELD)); \ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ struct bpf_sock_ops_kern, \ is_fullsock), \ si->dst_reg, si->src_reg, \ offsetof(struct bpf_sock_ops_kern, \ is_fullsock)); \ *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 2); \ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ struct bpf_sock_ops_kern, sk),\ si->dst_reg, si->src_reg, \ offsetof(struct bpf_sock_ops_kern, sk));\ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(OBJ, \ OBJ_FIELD), \ si->dst_reg, si->dst_reg, \ offsetof(OBJ, OBJ_FIELD)); \ } while (0) #define <API key>(FIELD) \ SOCK_OPS_GET_FIELD(FIELD, FIELD, struct tcp_sock) /* Helper macro for adding write access to tcp_sock or sock fields. * The macro is called with two registers, dst_reg which contains a pointer * to ctx (context) and src_reg which contains the value that should be * stored. However, we need an additional register since we cannot overwrite * dst_reg because it may be used later in the program. * Instead we "borrow" one of the other register. We first save its value * into a new (temp) field in bpf_sock_ops_kern, use it, and then restore * it at the end of the macro. */ #define SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \ do { \ int reg = BPF_REG_9; \ BUILD_BUG_ON(FIELD_SIZEOF(OBJ, OBJ_FIELD) > \ FIELD_SIZEOF(struct bpf_sock_ops, BPF_FIELD)); \ if (si->dst_reg == reg || si->src_reg == reg) \ reg if (si->dst_reg == reg || si->src_reg == reg) \ reg *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, reg, \ offsetof(struct bpf_sock_ops_kern, \ temp)); \ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ struct bpf_sock_ops_kern, \ is_fullsock), \ reg, si->dst_reg, \ offsetof(struct bpf_sock_ops_kern, \ is_fullsock)); \ *insn++ = BPF_JMP_IMM(BPF_JEQ, reg, 0, 2); \ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ struct bpf_sock_ops_kern, sk),\ reg, si->dst_reg, \ offsetof(struct bpf_sock_ops_kern, sk));\ *insn++ = BPF_STX_MEM(BPF_FIELD_SIZEOF(OBJ, OBJ_FIELD), \ reg, si->src_reg, \ offsetof(OBJ, OBJ_FIELD)); \ *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->dst_reg, \ offsetof(struct bpf_sock_ops_kern, \ temp)); \ } while (0) #define <API key>(BPF_FIELD, OBJ_FIELD, OBJ, TYPE) \ do { \ if (TYPE == BPF_WRITE) \ SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ); \ else \ SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ); \ } while (0) <API key>(struct bpf_sock_ops, <API key>); if (insn > insn_buf) return insn - insn_buf; switch (si->off) { case offsetof(struct bpf_sock_ops, op) ... offsetof(struct bpf_sock_ops, replylong[3]): BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, op) != FIELD_SIZEOF(struct bpf_sock_ops_kern, op)); BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, reply) != FIELD_SIZEOF(struct bpf_sock_ops_kern, reply)); BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, replylong) != FIELD_SIZEOF(struct bpf_sock_ops_kern, replylong)); off = si->off; off -= offsetof(struct bpf_sock_ops, op); off += offsetof(struct bpf_sock_ops_kern, op); if (type == BPF_WRITE) *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, off); else *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, off); break; case offsetof(struct bpf_sock_ops, family): BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( struct bpf_sock_ops_kern, sk), si->dst_reg, si->src_reg, offsetof(struct bpf_sock_ops_kern, sk)); *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, offsetof(struct sock_common, skc_family)); break; case offsetof(struct bpf_sock_ops, remote_ip4): BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( struct bpf_sock_ops_kern, sk), si->dst_reg, si->src_reg, offsetof(struct bpf_sock_ops_kern, sk)); *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, offsetof(struct sock_common, skc_daddr)); break; case offsetof(struct bpf_sock_ops, local_ip4): BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_rcv_saddr) != 4); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( struct bpf_sock_ops_kern, sk), si->dst_reg, si->src_reg, offsetof(struct bpf_sock_ops_kern, sk)); *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, offsetof(struct sock_common, skc_rcv_saddr)); break; case offsetof(struct bpf_sock_ops, remote_ip6[0]) ... offsetof(struct bpf_sock_ops, remote_ip6[3]): #if IS_ENABLED(CONFIG_IPV6) BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_v6_daddr.s6_addr32[0]) != 4); off = si->off; off -= offsetof(struct bpf_sock_ops, remote_ip6[0]); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( struct bpf_sock_ops_kern, sk), si->dst_reg, si->src_reg, offsetof(struct bpf_sock_ops_kern, sk)); *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, offsetof(struct sock_common, skc_v6_daddr.s6_addr32[0]) + off); #else *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); #endif break; case offsetof(struct bpf_sock_ops, local_ip6[0]) ... offsetof(struct bpf_sock_ops, local_ip6[3]): #if IS_ENABLED(CONFIG_IPV6) BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_v6_rcv_saddr.s6_addr32[0]) != 4); off = si->off; off -= offsetof(struct bpf_sock_ops, local_ip6[0]); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( struct bpf_sock_ops_kern, sk), si->dst_reg, si->src_reg, offsetof(struct bpf_sock_ops_kern, sk)); *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, offsetof(struct sock_common, skc_v6_rcv_saddr.s6_addr32[0]) + off); #else *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); #endif break; case offsetof(struct bpf_sock_ops, remote_port): BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( struct bpf_sock_ops_kern, sk), si->dst_reg, si->src_reg, offsetof(struct bpf_sock_ops_kern, sk)); *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, offsetof(struct sock_common, skc_dport)); #ifndef <API key> *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16); #endif break; case offsetof(struct bpf_sock_ops, local_port): BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( struct bpf_sock_ops_kern, sk), si->dst_reg, si->src_reg, offsetof(struct bpf_sock_ops_kern, sk)); *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, offsetof(struct sock_common, skc_num)); break; case offsetof(struct bpf_sock_ops, is_fullsock): *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( struct bpf_sock_ops_kern, is_fullsock), si->dst_reg, si->src_reg, offsetof(struct bpf_sock_ops_kern, is_fullsock)); break; case offsetof(struct bpf_sock_ops, state): BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_state) != 1); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( struct bpf_sock_ops_kern, sk), si->dst_reg, si->src_reg, offsetof(struct bpf_sock_ops_kern, sk)); *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->dst_reg, offsetof(struct sock_common, skc_state)); break; case offsetof(struct bpf_sock_ops, rtt_min): BUILD_BUG_ON(FIELD_SIZEOF(struct tcp_sock, rtt_min) != sizeof(struct minmax)); BUILD_BUG_ON(sizeof(struct minmax) < sizeof(struct minmax_sample)); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( struct bpf_sock_ops_kern, sk), si->dst_reg, si->src_reg, offsetof(struct bpf_sock_ops_kern, sk)); *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, offsetof(struct tcp_sock, rtt_min) + FIELD_SIZEOF(struct minmax_sample, t)); break; case offsetof(struct bpf_sock_ops, <API key>): SOCK_OPS_GET_FIELD(<API key>, <API key>, struct tcp_sock); break; case offsetof(struct bpf_sock_ops, sk_txhash): <API key>(sk_txhash, sk_txhash, struct sock, type); break; } return insn - insn_buf; } static u32 <API key>(enum bpf_access_type type, const struct bpf_insn *si, struct bpf_insn *insn_buf, struct bpf_prog *prog, u32 *target_size) { struct bpf_insn *insn = insn_buf; int off; switch (si->off) { case offsetof(struct __sk_buff, data_end): off = si->off; off -= offsetof(struct __sk_buff, data_end); off += offsetof(struct sk_buff, cb); off += offsetof(struct tcp_skb_cb, bpf.data_end); *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, si->src_reg, off); break; default: return <API key>(type, si, insn_buf, prog, target_size); } return insn - insn_buf; } static u32 <API key>(enum bpf_access_type type, const struct bpf_insn *si, struct bpf_insn *insn_buf, struct bpf_prog *prog, u32 *target_size) { struct bpf_insn *insn = insn_buf; #if IS_ENABLED(CONFIG_IPV6) int off; #endif /* convert ctx uses the fact sg element is first in struct */ BUILD_BUG_ON(offsetof(struct sk_msg, sg) != 0); switch (si->off) { case offsetof(struct sk_msg_md, data): *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg, data), si->dst_reg, si->src_reg, offsetof(struct sk_msg, data)); break; case offsetof(struct sk_msg_md, data_end): *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg, data_end), si->dst_reg, si->src_reg, offsetof(struct sk_msg, data_end)); break; case offsetof(struct sk_msg_md, family): BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( struct sk_msg, sk), si->dst_reg, si->src_reg, offsetof(struct sk_msg, sk)); *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, offsetof(struct sock_common, skc_family)); break; case offsetof(struct sk_msg_md, remote_ip4): BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( struct sk_msg, sk), si->dst_reg, si->src_reg, offsetof(struct sk_msg, sk)); *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, offsetof(struct sock_common, skc_daddr)); break; case offsetof(struct sk_msg_md, local_ip4): BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_rcv_saddr) != 4); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( struct sk_msg, sk), si->dst_reg, si->src_reg, offsetof(struct sk_msg, sk)); *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, offsetof(struct sock_common, skc_rcv_saddr)); break; case offsetof(struct sk_msg_md, remote_ip6[0]) ... offsetof(struct sk_msg_md, remote_ip6[3]): #if IS_ENABLED(CONFIG_IPV6) BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_v6_daddr.s6_addr32[0]) != 4); off = si->off; off -= offsetof(struct sk_msg_md, remote_ip6[0]); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( struct sk_msg, sk), si->dst_reg, si->src_reg, offsetof(struct sk_msg, sk)); *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, offsetof(struct sock_common, skc_v6_daddr.s6_addr32[0]) + off); #else *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); #endif break; case offsetof(struct sk_msg_md, local_ip6[0]) ... offsetof(struct sk_msg_md, local_ip6[3]): #if IS_ENABLED(CONFIG_IPV6) BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_v6_rcv_saddr.s6_addr32[0]) != 4); off = si->off; off -= offsetof(struct sk_msg_md, local_ip6[0]); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( struct sk_msg, sk), si->dst_reg, si->src_reg, offsetof(struct sk_msg, sk)); *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, offsetof(struct sock_common, skc_v6_rcv_saddr.s6_addr32[0]) + off); #else *insn++ = BPF_MOV32_IMM(si->dst_reg, 0); #endif break; case offsetof(struct sk_msg_md, remote_port): BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( struct sk_msg, sk), si->dst_reg, si->src_reg, offsetof(struct sk_msg, sk)); *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, offsetof(struct sock_common, skc_dport)); #ifndef <API key> *insn++ = BPF_ALU32_IMM(BPF_LSH, si->dst_reg, 16); #endif break; case offsetof(struct sk_msg_md, local_port): BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2); *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( struct sk_msg, sk), si->dst_reg, si->src_reg, offsetof(struct sk_msg, sk)); *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->dst_reg, offsetof(struct sock_common, skc_num)); break; case offsetof(struct sk_msg_md, size): *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_msg_sg, size), si->dst_reg, si->src_reg, offsetof(struct sk_msg_sg, size)); break; } return insn - insn_buf; } const struct bpf_verifier_ops <API key> = { .get_func_proto = <API key>, .is_valid_access = <API key>, .convert_ctx_access = <API key>, .gen_ld_abs = bpf_gen_ld_abs, }; const struct bpf_prog_ops sk_filter_prog_ops = { .test_run = <API key>, }; const struct bpf_verifier_ops <API key> = { .get_func_proto = <API key>, .is_valid_access = <API key>, .convert_ctx_access = <API key>, .gen_prologue = tc_cls_act_prologue, .gen_ld_abs = bpf_gen_ld_abs, }; const struct bpf_prog_ops tc_cls_act_prog_ops = { .test_run = <API key>, }; const struct bpf_verifier_ops xdp_verifier_ops = { .get_func_proto = xdp_func_proto, .is_valid_access = xdp_is_valid_access, .convert_ctx_access = <API key>, .gen_prologue = bpf_noop_prologue, }; const struct bpf_prog_ops xdp_prog_ops = { .test_run = <API key>, }; const struct bpf_verifier_ops cg_skb_verifier_ops = { .get_func_proto = cg_skb_func_proto, .is_valid_access = <API key>, .convert_ctx_access = <API key>, }; const struct bpf_prog_ops cg_skb_prog_ops = { .test_run = <API key>, }; const struct bpf_verifier_ops lwt_in_verifier_ops = { .get_func_proto = lwt_in_func_proto, .is_valid_access = lwt_is_valid_access, .convert_ctx_access = <API key>, }; const struct bpf_prog_ops lwt_in_prog_ops = { .test_run = <API key>, }; const struct bpf_verifier_ops <API key> = { .get_func_proto = lwt_out_func_proto, .is_valid_access = lwt_is_valid_access, .convert_ctx_access = <API key>, }; const struct bpf_prog_ops lwt_out_prog_ops = { .test_run = <API key>, }; const struct bpf_verifier_ops <API key> = { .get_func_proto = lwt_xmit_func_proto, .is_valid_access = lwt_is_valid_access, .convert_ctx_access = <API key>, .gen_prologue = tc_cls_act_prologue, }; const struct bpf_prog_ops lwt_xmit_prog_ops = { .test_run = <API key>, }; const struct bpf_verifier_ops <API key> = { .get_func_proto = <API key>, .is_valid_access = lwt_is_valid_access, .convert_ctx_access = <API key>, }; const struct bpf_prog_ops <API key> = { .test_run = <API key>, }; const struct bpf_verifier_ops <API key> = { .get_func_proto = <API key>, .is_valid_access = <API key>, .convert_ctx_access = <API key>, }; const struct bpf_prog_ops cg_sock_prog_ops = { }; const struct bpf_verifier_ops <API key> = { .get_func_proto = <API key>, .is_valid_access = <API key>, .convert_ctx_access = <API key>, }; const struct bpf_prog_ops <API key> = { }; const struct bpf_verifier_ops <API key> = { .get_func_proto = sock_ops_func_proto, .is_valid_access = <API key>, .convert_ctx_access = <API key>, }; const struct bpf_prog_ops sock_ops_prog_ops = { }; const struct bpf_verifier_ops sk_skb_verifier_ops = { .get_func_proto = sk_skb_func_proto, .is_valid_access = <API key>, .convert_ctx_access = <API key>, .gen_prologue = sk_skb_prologue, }; const struct bpf_prog_ops sk_skb_prog_ops = { }; const struct bpf_verifier_ops sk_msg_verifier_ops = { .get_func_proto = sk_msg_func_proto, .is_valid_access = <API key>, .convert_ctx_access = <API key>, .gen_prologue = bpf_noop_prologue, }; const struct bpf_prog_ops sk_msg_prog_ops = { }; const struct bpf_verifier_ops <API key> = { .get_func_proto = <API key>, .is_valid_access = <API key>, .convert_ctx_access = <API key>, }; const struct bpf_prog_ops <API key> = { .test_run = <API key>, }; int sk_detach_filter(struct sock *sk) { int ret = -ENOENT; struct sk_filter *filter; if (sock_flag(sk, SOCK_FILTER_LOCKED)) return -EPERM; filter = <API key>(sk->sk_filter, <API key>(sk)); if (filter) { RCU_INIT_POINTER(sk->sk_filter, NULL); sk_filter_uncharge(sk, filter); ret = 0; } return ret; } EXPORT_SYMBOL_GPL(sk_detach_filter); int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, unsigned int len) { struct sock_fprog_kern *fprog; struct sk_filter *filter; int ret = 0; lock_sock(sk); filter = <API key>(sk->sk_filter, <API key>(sk)); if (!filter) goto out; /* We're copying the filter that has been originally attached, * so no conversion/decode needed anymore. eBPF programs that * have no original program cannot be dumped through this. */ ret = -EACCES; fprog = filter->prog->orig_prog; if (!fprog) goto out; ret = fprog->len; if (!len) /* User space only enquires number of filter blocks. */ goto out; ret = -EINVAL; if (len < fprog->len) goto out; ret = -EFAULT; if (copy_to_user(ubuf, fprog->filter, bpf_classic_proglen(fprog))) goto out; /* Instead of bytes, the API requests to return the number * of filter blocks. */ ret = fprog->len; out: release_sock(sk); return ret; } #ifdef CONFIG_INET struct sk_reuseport_kern { struct sk_buff *skb; struct sock *sk; struct sock *selected_sk; void *data_end; u32 hash; u32 reuseport_id; bool bind_inany; }; static void <API key>(struct sk_reuseport_kern *reuse_kern, struct sock_reuseport *reuse, struct sock *sk, struct sk_buff *skb, u32 hash) { reuse_kern->skb = skb; reuse_kern->sk = sk; reuse_kern->selected_sk = NULL; reuse_kern->data_end = skb->data + skb_headlen(skb); reuse_kern->hash = hash; reuse_kern->reuseport_id = reuse->reuseport_id; reuse_kern->bind_inany = reuse->bind_inany; } struct sock *<API key>(struct sock_reuseport *reuse, struct sock *sk, struct bpf_prog *prog, struct sk_buff *skb, u32 hash) { struct sk_reuseport_kern reuse_kern; enum sk_action action; <API key>(&reuse_kern, reuse, sk, skb, hash); action = BPF_PROG_RUN(prog, &reuse_kern); if (action == SK_PASS) return reuse_kern.selected_sk; else return ERR_PTR(-ECONNREFUSED); } BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern, struct bpf_map *, map, void *, key, u32, flags) { struct sock_reuseport *reuse; struct sock *selected_sk; selected_sk = map->ops->map_lookup_elem(map, key); if (!selected_sk) return -ENOENT; reuse = rcu_dereference(selected_sk->sk_reuseport_cb); if (!reuse) /* selected_sk is unhashed (e.g. by close()) after the * above map_lookup_elem(). Treat selected_sk has already * been removed from the map. */ return -ENOENT; if (unlikely(reuse->reuseport_id != reuse_kern->reuseport_id)) { struct sock *sk; if (unlikely(!reuse_kern->reuseport_id)) /* There is a small race between adding the * sk to the map and setting the * reuse_kern->reuseport_id. * Treat it as the sk has not been added to * the bpf map yet. */ return -ENOENT; sk = reuse_kern->sk; if (sk->sk_protocol != selected_sk->sk_protocol) return -EPROTOTYPE; else if (sk->sk_family != selected_sk->sk_family) return -EAFNOSUPPORT; /* Catch all. Likely bound to a different sockaddr. */ return -EBADFD; } reuse_kern->selected_sk = selected_sk; return 0; } static const struct bpf_func_proto <API key> = { .func = sk_select_reuseport, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_CONST_MAP_PTR, .arg3_type = ARG_PTR_TO_MAP_KEY, .arg4_type = ARG_ANYTHING, }; BPF_CALL_4(<API key>, const struct sk_reuseport_kern *, reuse_kern, u32, offset, void *, to, u32, len) { return <API key>(reuse_kern->skb, offset, to, len); } static const struct bpf_func_proto <API key> = { .func = <API key>, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = <API key>, .arg4_type = ARG_CONST_SIZE, }; BPF_CALL_5(<API key>, const struct sk_reuseport_kern *, reuse_kern, u32, offset, void *, to, u32, len, u32, start_header) { return <API key>(reuse_kern->skb, offset, to, len, start_header); } static const struct bpf_func_proto <API key> = { .func = <API key>, .gpl_only = false, .ret_type = RET_INTEGER, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_ANYTHING, .arg3_type = <API key>, .arg4_type = ARG_CONST_SIZE, .arg5_type = ARG_ANYTHING, }; static const struct bpf_func_proto * <API key>(enum bpf_func_id func_id, const struct bpf_prog *prog) { switch (func_id) { case <API key>: return &<API key>; case <API key>: return &<API key>; case <API key>: return &<API key>; default: return bpf_base_func_proto(func_id); } } static bool <API key>(int off, int size, enum bpf_access_type type, const struct bpf_prog *prog, struct bpf_insn_access_aux *info) { const u32 size_default = sizeof(__u32); if (off < 0 || off >= sizeof(struct sk_reuseport_md) || off % size || type != BPF_READ) return false; switch (off) { case offsetof(struct sk_reuseport_md, data): info->reg_type = PTR_TO_PACKET; return size == sizeof(__u64); case offsetof(struct sk_reuseport_md, data_end): info->reg_type = PTR_TO_PACKET_END; return size == sizeof(__u64); case offsetof(struct sk_reuseport_md, hash): return size == size_default; /* Fields that allow narrowing */ case offsetof(struct sk_reuseport_md, eth_protocol): if (size < FIELD_SIZEOF(struct sk_buff, protocol)) return false; /* fall through */ case offsetof(struct sk_reuseport_md, ip_protocol): case offsetof(struct sk_reuseport_md, bind_inany): case offsetof(struct sk_reuseport_md, len): <API key>(info, size_default); return <API key>(off, size, size_default); default: return false; } } #define <API key>(F) ({ \ *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_reuseport_kern, F), \ si->dst_reg, si->src_reg, \ bpf_target_off(struct sk_reuseport_kern, F, \ FIELD_SIZEOF(struct sk_reuseport_kern, F), \ target_size)); \ }) #define <API key>(SKB_FIELD) \ <API key>(struct sk_reuseport_kern, \ struct sk_buff, \ skb, \ SKB_FIELD) #define <API key>(SK_FIELD, BPF_SIZE, EXTRA_OFF) \ <API key>(struct sk_reuseport_kern, \ struct sock, \ sk, \ SK_FIELD, BPF_SIZE, EXTRA_OFF) static u32 <API key>(enum bpf_access_type type, const struct bpf_insn *si, struct bpf_insn *insn_buf, struct bpf_prog *prog, u32 *target_size) { struct bpf_insn *insn = insn_buf; switch (si->off) { case offsetof(struct sk_reuseport_md, data): <API key>(data); break; case offsetof(struct sk_reuseport_md, len): <API key>(len); break; case offsetof(struct sk_reuseport_md, eth_protocol): <API key>(protocol); break; case offsetof(struct sk_reuseport_md, ip_protocol): BUILD_BUG_ON(HWEIGHT32(SK_FL_PROTO_MASK) != BITS_PER_BYTE); <API key>(__sk_flags_offset, BPF_W, 0); *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK); *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_PROTO_SHIFT); /* SK_FL_PROTO_MASK and SK_FL_PROTO_SHIFT are endian * aware. No further narrowing or masking is needed. */ *target_size = 1; break; case offsetof(struct sk_reuseport_md, data_end): <API key>(data_end); break; case offsetof(struct sk_reuseport_md, hash): <API key>(hash); break; case offsetof(struct sk_reuseport_md, bind_inany): <API key>(bind_inany); break; } return insn - insn_buf; } const struct bpf_verifier_ops <API key> = { .get_func_proto = <API key>, .is_valid_access = <API key>, .convert_ctx_access = <API key>, }; const struct bpf_prog_ops <API key> = { }; #endif /* CONFIG_INET */
<?php namespace Drupal\Tests\rest\Functional\EntityResource\Media; @trigger_error('The ' . __NAMESPACE__ . '\<API key> is deprecated in Drupal 8.6.x and will be removed before Drupal 9.0.0. Instead, use Drupal\Tests\media\Functional\Rest\<API key>. See https: use Drupal\Tests\media\Functional\Rest\<API key> as <API key>; abstract class <API key> extends <API key> { }
#include "drmP.h" #include "savage_drm.h" #include "savage_drv.h" /* Need a long timeout for shadow status updates can take a while * and so can waiting for events when the queue is full. */ #define <API key> 1000000 #define <API key> 5000000 #define <API key> 0 static int <API key>(struct drm_device *dev); static int <API key>(<API key> * dev_priv, unsigned int n) { uint32_t mask = dev_priv->status_used_mask; uint32_t threshold = dev_priv->bci_threshold_hi; uint32_t status; int i; #if SAVAGE_BCI_DEBUG if (n > dev_priv->cob_size + <API key> - threshold) DRM_ERROR("Trying to emit %d words " "(more than guaranteed space in COB)\n", n); #endif for (i = 0; i < <API key>; i++) { DRM_MEMORYBARRIER(); status = dev_priv->status_ptr[0]; if ((status & mask) < threshold) return 0; DRM_UDELAY(1); } #if SAVAGE_BCI_DEBUG DRM_ERROR("failed!\n"); DRM_INFO(" status=0x%08x, threshold=0x%08x\n", status, threshold); #endif return -EBUSY; } static int <API key>(<API key> * dev_priv, unsigned int n) { uint32_t maxUsed = dev_priv->cob_size + <API key> - n; uint32_t status; int i; for (i = 0; i < <API key>; i++) { status = SAVAGE_READ(SAVAGE_STATUS_WORD0); if ((status & <API key>) <= maxUsed) return 0; DRM_UDELAY(1); } #if SAVAGE_BCI_DEBUG DRM_ERROR("failed!\n"); DRM_INFO(" status=0x%08x\n", status); #endif return -EBUSY; } static int <API key>(<API key> * dev_priv, unsigned int n) { uint32_t maxUsed = dev_priv->cob_size + <API key> - n; uint32_t status; int i; for (i = 0; i < <API key>; i++) { status = SAVAGE_READ(<API key>); if ((status & <API key>) <= maxUsed) return 0; DRM_UDELAY(1); } #if SAVAGE_BCI_DEBUG DRM_ERROR("failed!\n"); DRM_INFO(" status=0x%08x\n", status); #endif return -EBUSY; } /* * Waiting for events. * * The BIOSresets the event tag to 0 on mode changes. Therefore we * never emit 0 to the event tag. If we find a 0 event tag we know the * BIOS stomped on it and return success assuming that the BIOS waited * for engine idle. * * Note: if the Xserver uses the event tag it has to follow the same * rule. Otherwise there may be glitches every 2^16 events. */ static int <API key>(<API key> * dev_priv, uint16_t e) { uint32_t status; int i; for (i = 0; i < <API key>; i++) { DRM_MEMORYBARRIER(); status = dev_priv->status_ptr[1]; if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff || (status & 0xffff) == 0) return 0; DRM_UDELAY(1); } #if SAVAGE_BCI_DEBUG DRM_ERROR("failed!\n"); DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e); #endif return -EBUSY; } static int <API key>(<API key> * dev_priv, uint16_t e) { uint32_t status; int i; for (i = 0; i < <API key>; i++) { status = SAVAGE_READ(SAVAGE_STATUS_WORD1); if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff || (status & 0xffff) == 0) return 0; DRM_UDELAY(1); } #if SAVAGE_BCI_DEBUG DRM_ERROR("failed!\n"); DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e); #endif return -EBUSY; } uint16_t <API key>(<API key> * dev_priv, unsigned int flags) { uint16_t count; BCI_LOCALS; if (dev_priv->status_ptr) { /* coordinate with Xserver */ count = dev_priv->status_ptr[1023]; if (count < dev_priv->event_counter) dev_priv->event_wrap++; } else { count = dev_priv->event_counter; } count = (count + 1) & 0xffff; if (count == 0) { count++; /* See the comment above savage_wait_event_*. */ dev_priv->event_wrap++; } dev_priv->event_counter = count; if (dev_priv->status_ptr) dev_priv->status_ptr[1023] = (uint32_t) count; if ((flags & (SAVAGE_WAIT_2D | SAVAGE_WAIT_3D))) { unsigned int wait_cmd = BCI_CMD_WAIT; if ((flags & SAVAGE_WAIT_2D)) wait_cmd |= BCI_CMD_WAIT_2D; if ((flags & SAVAGE_WAIT_3D)) wait_cmd |= BCI_CMD_WAIT_3D; BEGIN_BCI(2); BCI_WRITE(wait_cmd); } else { BEGIN_BCI(1); } BCI_WRITE(<API key> | (uint32_t) count); return count; } /* * Freelist management */ static int <API key>(struct drm_device * dev) { <API key> *dev_priv = dev->dev_private; struct drm_device_dma *dma = dev->dma; struct drm_buf *buf; <API key> *entry; int i; DRM_DEBUG("count=%d\n", dma->buf_count); dev_priv->head.next = &dev_priv->tail; dev_priv->head.prev = NULL; dev_priv->head.buf = NULL; dev_priv->tail.next = NULL; dev_priv->tail.prev = &dev_priv->head; dev_priv->tail.buf = NULL; for (i = 0; i < dma->buf_count; i++) { buf = dma->buflist[i]; entry = buf->dev_private; SET_AGE(&entry->age, 0, 0); entry->buf = buf; entry->next = dev_priv->head.next; entry->prev = &dev_priv->head; dev_priv->head.next->prev = entry; dev_priv->head.next = entry; } return 0; } static struct drm_buf *savage_freelist_get(struct drm_device * dev) { <API key> *dev_priv = dev->dev_private; <API key> *tail = dev_priv->tail.prev; uint16_t event; unsigned int wrap; DRM_DEBUG("\n"); <API key>(); if (dev_priv->status_ptr) event = dev_priv->status_ptr[1] & 0xffff; else event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff; wrap = dev_priv->event_wrap; if (event > dev_priv->event_counter) wrap--; /* hardware hasn't passed the last wrap yet */ DRM_DEBUG(" tail=0x%04x %d\n", tail->age.event, tail->age.wrap); DRM_DEBUG(" head=0x%04x %d\n", event, wrap); if (tail->buf && (TEST_AGE(&tail->age, event, wrap) || event == 0)) { <API key> *next = tail->next; <API key> *prev = tail->prev; prev->next = next; next->prev = prev; tail->next = tail->prev = NULL; return tail->buf; } DRM_DEBUG("returning NULL, tail->buf=%p!\n", tail->buf); return NULL; } void savage_freelist_put(struct drm_device * dev, struct drm_buf * buf) { <API key> *dev_priv = dev->dev_private; <API key> *entry = buf->dev_private, *prev, *next; DRM_DEBUG("age=0x%04x wrap=%d\n", entry->age.event, entry->age.wrap); if (entry->next != NULL || entry->prev != NULL) { DRM_ERROR("entry already on freelist.\n"); return; } prev = &dev_priv->head; next = prev->next; prev->next = entry; next->prev = entry; entry->prev = prev; entry->next = next; } /* * Command DMA */ static int savage_dma_init(<API key> * dev_priv) { unsigned int i; dev_priv->nr_dma_pages = dev_priv->cmd_dma->size / (<API key> * 4); dev_priv->dma_pages = kmalloc(sizeof(<API key>) * dev_priv->nr_dma_pages, GFP_KERNEL); if (dev_priv->dma_pages == NULL) return -ENOMEM; for (i = 0; i < dev_priv->nr_dma_pages; ++i) { SET_AGE(&dev_priv->dma_pages[i].age, 0, 0); dev_priv->dma_pages[i].used = 0; dev_priv->dma_pages[i].flushed = 0; } SET_AGE(&dev_priv->last_dma_age, 0, 0); dev_priv->first_dma_page = 0; dev_priv->current_dma_page = 0; return 0; } void savage_dma_reset(<API key> * dev_priv) { uint16_t event; unsigned int wrap, i; event = <API key>(dev_priv, 0); wrap = dev_priv->event_wrap; for (i = 0; i < dev_priv->nr_dma_pages; ++i) { SET_AGE(&dev_priv->dma_pages[i].age, event, wrap); dev_priv->dma_pages[i].used = 0; dev_priv->dma_pages[i].flushed = 0; } SET_AGE(&dev_priv->last_dma_age, event, wrap); dev_priv->first_dma_page = dev_priv->current_dma_page = 0; } void savage_dma_wait(<API key> * dev_priv, unsigned int page) { uint16_t event; unsigned int wrap; /* Faked DMA buffer pages don't age. */ if (dev_priv->cmd_dma == &dev_priv->fake_dma) return; <API key>(); if (dev_priv->status_ptr) event = dev_priv->status_ptr[1] & 0xffff; else event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff; wrap = dev_priv->event_wrap; if (event > dev_priv->event_counter) wrap--; /* hardware hasn't passed the last wrap yet */ if (dev_priv->dma_pages[page].age.wrap > wrap || (dev_priv->dma_pages[page].age.wrap == wrap && dev_priv->dma_pages[page].age.event > event)) { if (dev_priv->wait_evnt(dev_priv, dev_priv->dma_pages[page].age.event) < 0) DRM_ERROR("wait_evnt failed!\n"); } } uint32_t *savage_dma_alloc(<API key> * dev_priv, unsigned int n) { unsigned int cur = dev_priv->current_dma_page; unsigned int rest = <API key> - dev_priv->dma_pages[cur].used; unsigned int nr_pages = (n - rest + <API key> - 1) / <API key>; uint32_t *dma_ptr; unsigned int i; DRM_DEBUG("cur=%u, cur->used=%u, n=%u, rest=%u, nr_pages=%u\n", cur, dev_priv->dma_pages[cur].used, n, rest, nr_pages); if (cur + nr_pages < dev_priv->nr_dma_pages) { dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle + cur * <API key> + dev_priv->dma_pages[cur].used; if (n < rest) rest = n; dev_priv->dma_pages[cur].used += rest; n -= rest; cur++; } else { dev_priv->dma_flush(dev_priv); nr_pages = (n + <API key> - 1) / <API key>; for (i = cur; i < dev_priv->nr_dma_pages; ++i) { dev_priv->dma_pages[i].age = dev_priv->last_dma_age; dev_priv->dma_pages[i].used = 0; dev_priv->dma_pages[i].flushed = 0; } dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle; dev_priv->first_dma_page = cur = 0; } for (i = cur; nr_pages > 0; ++i, --nr_pages) { #if SAVAGE_DMA_DEBUG if (dev_priv->dma_pages[i].used) { DRM_ERROR("unflushed page %u: used=%u\n", i, dev_priv->dma_pages[i].used); } #endif if (n > <API key>) dev_priv->dma_pages[i].used = <API key>; else dev_priv->dma_pages[i].used = n; n -= <API key>; } dev_priv->current_dma_page = --i; DRM_DEBUG("cur=%u, cur->used=%u, n=%u\n", i, dev_priv->dma_pages[i].used, n); savage_dma_wait(dev_priv, dev_priv->current_dma_page); return dma_ptr; } static void savage_dma_flush(<API key> * dev_priv) { unsigned int first = dev_priv->first_dma_page; unsigned int cur = dev_priv->current_dma_page; uint16_t event; unsigned int wrap, pad, align, len, i; unsigned long phys_addr; BCI_LOCALS; if (first == cur && dev_priv->dma_pages[cur].used == dev_priv->dma_pages[cur].flushed) return; /* pad length to multiples of 2 entries * align start of next DMA block to multiles of 8 entries */ pad = -dev_priv->dma_pages[cur].used & 1; align = -(dev_priv->dma_pages[cur].used + pad) & 7; DRM_DEBUG("first=%u, cur=%u, first->flushed=%u, cur->used=%u, " "pad=%u, align=%u\n", first, cur, dev_priv->dma_pages[first].flushed, dev_priv->dma_pages[cur].used, pad, align); /* pad with noops */ if (pad) { uint32_t *dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle + cur * <API key> + dev_priv->dma_pages[cur].used; dev_priv->dma_pages[cur].used += pad; while (pad != 0) { *dma_ptr++ = BCI_CMD_WAIT; pad } } DRM_MEMORYBARRIER(); /* do flush ... */ phys_addr = dev_priv->cmd_dma->offset + (first * <API key> + dev_priv->dma_pages[first].flushed) * 4; len = (cur - first) * <API key> + dev_priv->dma_pages[cur].used - dev_priv->dma_pages[first].flushed; DRM_DEBUG("phys_addr=%lx, len=%u\n", phys_addr | dev_priv->dma_type, len); BEGIN_BCI(3); BCI_SET_REGISTERS(SAVAGE_DMABUFADDR, 1); BCI_WRITE(phys_addr | dev_priv->dma_type); BCI_DMA(len); /* fix alignment of the start of the next block */ dev_priv->dma_pages[cur].used += align; /* age DMA pages */ event = <API key>(dev_priv, 0); wrap = dev_priv->event_wrap; for (i = first; i < cur; ++i) { SET_AGE(&dev_priv->dma_pages[i].age, event, wrap); dev_priv->dma_pages[i].used = 0; dev_priv->dma_pages[i].flushed = 0; } /* age the current page only when it's full */ if (dev_priv->dma_pages[cur].used == <API key>) { SET_AGE(&dev_priv->dma_pages[cur].age, event, wrap); dev_priv->dma_pages[cur].used = 0; dev_priv->dma_pages[cur].flushed = 0; /* advance to next page */ cur++; if (cur == dev_priv->nr_dma_pages) cur = 0; dev_priv->first_dma_page = dev_priv->current_dma_page = cur; } else { dev_priv->first_dma_page = cur; dev_priv->dma_pages[cur].flushed = dev_priv->dma_pages[i].used; } SET_AGE(&dev_priv->last_dma_age, event, wrap); DRM_DEBUG("first=cur=%u, cur->used=%u, cur->flushed=%u\n", cur, dev_priv->dma_pages[cur].used, dev_priv->dma_pages[cur].flushed); } static void <API key>(<API key> * dev_priv) { unsigned int i, j; BCI_LOCALS; if (dev_priv->first_dma_page == dev_priv->current_dma_page && dev_priv->dma_pages[dev_priv->current_dma_page].used == 0) return; DRM_DEBUG("first=%u, cur=%u, cur->used=%u\n", dev_priv->first_dma_page, dev_priv->current_dma_page, dev_priv->dma_pages[dev_priv->current_dma_page].used); for (i = dev_priv->first_dma_page; i <= dev_priv->current_dma_page && dev_priv->dma_pages[i].used; ++i) { uint32_t *dma_ptr = (uint32_t *) dev_priv->cmd_dma->handle + i * <API key>; #if SAVAGE_DMA_DEBUG /* Sanity check: all pages except the last one must be full. */ if (i < dev_priv->current_dma_page && dev_priv->dma_pages[i].used != <API key>) { DRM_ERROR("partial DMA page %u: used=%u", i, dev_priv->dma_pages[i].used); } #endif BEGIN_BCI(dev_priv->dma_pages[i].used); for (j = 0; j < dev_priv->dma_pages[i].used; ++j) { BCI_WRITE(dma_ptr[j]); } dev_priv->dma_pages[i].used = 0; } /* reset to first page */ dev_priv->first_dma_page = dev_priv->current_dma_page = 0; } int savage_driver_load(struct drm_device *dev, unsigned long chipset) { <API key> *dev_priv; dev_priv = kzalloc(sizeof(<API key>), GFP_KERNEL); if (dev_priv == NULL) return -ENOMEM; dev->dev_private = (void *)dev_priv; dev_priv->chipset = (enum savage_family)chipset; pci_set_master(dev->pdev); return 0; } /* * Initialize mappings. On Savage4 and SavageIX the alignment * and size of the aperture is not suitable for automatic MTRR setup * in drm_addmap. Therefore we add them manually before the maps are * initialized, and tear them down on last close. */ int <API key>(struct drm_device *dev) { <API key> *dev_priv = dev->dev_private; unsigned long mmio_base, fb_base, fb_size, aperture_base; /* fb_rsrc and aper_rsrc aren't really used currently, but still exist * in case we decide we need information on the BAR for BSD in the * future. */ unsigned int fb_rsrc, aper_rsrc; int ret = 0; dev_priv->mtrr[0].handle = -1; dev_priv->mtrr[1].handle = -1; dev_priv->mtrr[2].handle = -1; if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { fb_rsrc = 0; fb_base = pci_resource_start(dev->pdev, 0); fb_size = SAVAGE_FB_SIZE_S3; mmio_base = fb_base + SAVAGE_FB_SIZE_S3; aper_rsrc = 0; aperture_base = fb_base + <API key>; /* this should always be true */ if (pci_resource_len(dev->pdev, 0) == 0x08000000) { /* Don't make MMIO write-cobining! We need 3 * MTRRs. */ dev_priv->mtrr[0].base = fb_base; dev_priv->mtrr[0].size = 0x01000000; dev_priv->mtrr[0].handle = drm_mtrr_add(dev_priv->mtrr[0].base, dev_priv->mtrr[0].size, DRM_MTRR_WC); dev_priv->mtrr[1].base = fb_base + 0x02000000; dev_priv->mtrr[1].size = 0x02000000; dev_priv->mtrr[1].handle = drm_mtrr_add(dev_priv->mtrr[1].base, dev_priv->mtrr[1].size, DRM_MTRR_WC); dev_priv->mtrr[2].base = fb_base + 0x04000000; dev_priv->mtrr[2].size = 0x04000000; dev_priv->mtrr[2].handle = drm_mtrr_add(dev_priv->mtrr[2].base, dev_priv->mtrr[2].size, DRM_MTRR_WC); } else { DRM_ERROR("strange pci_resource_len %08llx\n", (unsigned long long) pci_resource_len(dev->pdev, 0)); } } else if (dev_priv->chipset != S3_SUPERSAVAGE && dev_priv->chipset != S3_SAVAGE2000) { mmio_base = pci_resource_start(dev->pdev, 0); fb_rsrc = 1; fb_base = pci_resource_start(dev->pdev, 1); fb_size = SAVAGE_FB_SIZE_S4; aper_rsrc = 1; aperture_base = fb_base + <API key>; /* this should always be true */ if (pci_resource_len(dev->pdev, 1) == 0x08000000) { /* Can use one MTRR to cover both fb and * aperture. */ dev_priv->mtrr[0].base = fb_base; dev_priv->mtrr[0].size = 0x08000000; dev_priv->mtrr[0].handle = drm_mtrr_add(dev_priv->mtrr[0].base, dev_priv->mtrr[0].size, DRM_MTRR_WC); } else { DRM_ERROR("strange pci_resource_len %08llx\n", (unsigned long long) pci_resource_len(dev->pdev, 1)); } } else { mmio_base = pci_resource_start(dev->pdev, 0); fb_rsrc = 1; fb_base = pci_resource_start(dev->pdev, 1); fb_size = pci_resource_len(dev->pdev, 1); aper_rsrc = 2; aperture_base = pci_resource_start(dev->pdev, 2); /* Automatic MTRR setup will do the right thing. */ } ret = drm_addmap(dev, mmio_base, SAVAGE_MMIO_SIZE, _DRM_REGISTERS, _DRM_READ_ONLY, &dev_priv->mmio); if (ret) return ret; ret = drm_addmap(dev, fb_base, fb_size, _DRM_FRAME_BUFFER, <API key>, &dev_priv->fb); if (ret) return ret; ret = drm_addmap(dev, aperture_base, <API key>, _DRM_FRAME_BUFFER, <API key>, &dev_priv->aperture); return ret; } /* * Delete MTRRs and free device-private data. */ void <API key>(struct drm_device *dev) { <API key> *dev_priv = dev->dev_private; int i; for (i = 0; i < 3; ++i) if (dev_priv->mtrr[i].handle >= 0) drm_mtrr_del(dev_priv->mtrr[i].handle, dev_priv->mtrr[i].base, dev_priv->mtrr[i].size, DRM_MTRR_WC); } int <API key>(struct drm_device *dev) { <API key> *dev_priv = dev->dev_private; kfree(dev_priv); return 0; } static int savage_do_init_bci(struct drm_device * dev, drm_savage_init_t * init) { <API key> *dev_priv = dev->dev_private; if (init->fb_bpp != 16 && init->fb_bpp != 32) { DRM_ERROR("invalid frame buffer bpp %d!\n", init->fb_bpp); return -EINVAL; } if (init->depth_bpp != 16 && init->depth_bpp != 32) { DRM_ERROR("invalid depth buffer bpp %d!\n", init->fb_bpp); return -EINVAL; } if (init->dma_type != SAVAGE_DMA_AGP && init->dma_type != SAVAGE_DMA_PCI) { DRM_ERROR("invalid dma memory type %d!\n", init->dma_type); return -EINVAL; } dev_priv->cob_size = init->cob_size; dev_priv->bci_threshold_lo = init->bci_threshold_lo; dev_priv->bci_threshold_hi = init->bci_threshold_hi; dev_priv->dma_type = init->dma_type; dev_priv->fb_bpp = init->fb_bpp; dev_priv->front_offset = init->front_offset; dev_priv->front_pitch = init->front_pitch; dev_priv->back_offset = init->back_offset; dev_priv->back_pitch = init->back_pitch; dev_priv->depth_bpp = init->depth_bpp; dev_priv->depth_offset = init->depth_offset; dev_priv->depth_pitch = init->depth_pitch; dev_priv->texture_offset = init->texture_offset; dev_priv->texture_size = init->texture_size; dev_priv->sarea = drm_getsarea(dev); if (!dev_priv->sarea) { DRM_ERROR("could not find sarea!\n"); <API key>(dev); return -EINVAL; } if (init->status_offset != 0) { dev_priv->status = drm_core_findmap(dev, init->status_offset); if (!dev_priv->status) { DRM_ERROR("could not find shadow status region!\n"); <API key>(dev); return -EINVAL; } } else { dev_priv->status = NULL; } if (dev_priv->dma_type == SAVAGE_DMA_AGP && init->buffers_offset) { dev->agp_buffer_token = init->buffers_offset; dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); if (!dev->agp_buffer_map) { DRM_ERROR("could not find DMA buffer region!\n"); <API key>(dev); return -EINVAL; } drm_core_ioremap(dev->agp_buffer_map, dev); if (!dev->agp_buffer_map->handle) { DRM_ERROR("failed to ioremap DMA buffer region!\n"); <API key>(dev); return -ENOMEM; } } if (init->agp_textures_offset) { dev_priv->agp_textures = drm_core_findmap(dev, init->agp_textures_offset); if (!dev_priv->agp_textures) { DRM_ERROR("could not find agp texture region!\n"); <API key>(dev); return -EINVAL; } } else { dev_priv->agp_textures = NULL; } if (init->cmd_dma_offset) { if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { DRM_ERROR("command DMA not supported on " "Savage3D/MX/IX.\n"); <API key>(dev); return -EINVAL; } if (dev->dma && dev->dma->buflist) { DRM_ERROR("command and vertex DMA not supported " "at the same time.\n"); <API key>(dev); return -EINVAL; } dev_priv->cmd_dma = drm_core_findmap(dev, init->cmd_dma_offset); if (!dev_priv->cmd_dma) { DRM_ERROR("could not find command DMA region!\n"); <API key>(dev); return -EINVAL; } if (dev_priv->dma_type == SAVAGE_DMA_AGP) { if (dev_priv->cmd_dma->type != _DRM_AGP) { DRM_ERROR("AGP command DMA region is not a " "_DRM_AGP map!\n"); <API key>(dev); return -EINVAL; } drm_core_ioremap(dev_priv->cmd_dma, dev); if (!dev_priv->cmd_dma->handle) { DRM_ERROR("failed to ioremap command " "DMA region!\n"); <API key>(dev); return -ENOMEM; } } else if (dev_priv->cmd_dma->type != _DRM_CONSISTENT) { DRM_ERROR("PCI command DMA region is not a " "_DRM_CONSISTENT map!\n"); <API key>(dev); return -EINVAL; } } else { dev_priv->cmd_dma = NULL; } dev_priv->dma_flush = savage_dma_flush; if (!dev_priv->cmd_dma) { DRM_DEBUG("falling back to faked command DMA.\n"); dev_priv->fake_dma.offset = 0; dev_priv->fake_dma.size = <API key>; dev_priv->fake_dma.type = _DRM_SHM; dev_priv->fake_dma.handle = kmalloc(<API key>, GFP_KERNEL); if (!dev_priv->fake_dma.handle) { DRM_ERROR("could not allocate faked DMA buffer!\n"); <API key>(dev); return -ENOMEM; } dev_priv->cmd_dma = &dev_priv->fake_dma; dev_priv->dma_flush = <API key>; } dev_priv->sarea_priv = (drm_savage_sarea_t *) ((uint8_t *) dev_priv->sarea->handle + init->sarea_priv_offset); /* setup bitmap descriptors */ { unsigned int color_tile_format; unsigned int depth_tile_format; unsigned int front_stride, back_stride, depth_stride; if (dev_priv->chipset <= S3_SAVAGE4) { color_tile_format = dev_priv->fb_bpp == 16 ? <API key> : <API key>; depth_tile_format = dev_priv->depth_bpp == 16 ? <API key> : <API key>; } else { color_tile_format = SAVAGE_BD_TILE_DEST; depth_tile_format = SAVAGE_BD_TILE_DEST; } front_stride = dev_priv->front_pitch / (dev_priv->fb_bpp / 8); back_stride = dev_priv->back_pitch / (dev_priv->fb_bpp / 8); depth_stride = dev_priv->depth_pitch / (dev_priv->depth_bpp / 8); dev_priv->front_bd = front_stride | <API key> | (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) | (color_tile_format << <API key>); dev_priv->back_bd = back_stride | <API key> | (dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) | (color_tile_format << <API key>); dev_priv->depth_bd = depth_stride | <API key> | (dev_priv->depth_bpp << SAVAGE_BD_BPP_SHIFT) | (depth_tile_format << <API key>); } /* setup status and bci ptr */ dev_priv->event_counter = 0; dev_priv->event_wrap = 0; dev_priv->bci_ptr = (volatile uint32_t *) ((uint8_t *) dev_priv->mmio->handle + SAVAGE_BCI_OFFSET); if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { dev_priv->status_used_mask = <API key>; } else { dev_priv->status_used_mask = <API key>; } if (dev_priv->status != NULL) { dev_priv->status_ptr = (volatile uint32_t *)dev_priv->status->handle; dev_priv->wait_fifo = <API key>; dev_priv->wait_evnt = <API key>; dev_priv->status_ptr[1023] = dev_priv->event_counter; } else { dev_priv->status_ptr = NULL; if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { dev_priv->wait_fifo = <API key>; } else { dev_priv->wait_fifo = <API key>; } dev_priv->wait_evnt = <API key>; } /* cliprect functions */ if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) dev_priv->emit_clip_rect = <API key>; else dev_priv->emit_clip_rect = <API key>; if (<API key>(dev) < 0) { DRM_ERROR("could not initialize freelist\n"); <API key>(dev); return -ENOMEM; } if (savage_dma_init(dev_priv) < 0) { DRM_ERROR("could not initialize command DMA\n"); <API key>(dev); return -ENOMEM; } return 0; } static int <API key>(struct drm_device * dev) { <API key> *dev_priv = dev->dev_private; if (dev_priv->cmd_dma == &dev_priv->fake_dma) { kfree(dev_priv->fake_dma.handle); } else if (dev_priv->cmd_dma && dev_priv->cmd_dma->handle && dev_priv->cmd_dma->type == _DRM_AGP && dev_priv->dma_type == SAVAGE_DMA_AGP) <API key>(dev_priv->cmd_dma, dev); if (dev_priv->dma_type == SAVAGE_DMA_AGP && dev->agp_buffer_map && dev->agp_buffer_map->handle) { <API key>(dev->agp_buffer_map, dev); /* make sure the next instance (which may be running * in PCI mode) doesn't try to use an old * agp_buffer_map. */ dev->agp_buffer_map = NULL; } kfree(dev_priv->dma_pages); return 0; } static int savage_bci_init(struct drm_device *dev, void *data, struct drm_file *file_priv) { drm_savage_init_t *init = data; <API key>(dev, file_priv); switch (init->func) { case SAVAGE_INIT_BCI: return savage_do_init_bci(dev, init); case SAVAGE_CLEANUP_BCI: return <API key>(dev); } return -EINVAL; } static int <API key>(struct drm_device *dev, void *data, struct drm_file *file_priv) { <API key> *dev_priv = dev->dev_private; <API key> *event = data; DRM_DEBUG("\n"); <API key>(dev, file_priv); event->count = <API key>(dev_priv, event->flags); event->count |= dev_priv->event_wrap << 16; return 0; } static int <API key>(struct drm_device *dev, void *data, struct drm_file *file_priv) { <API key> *dev_priv = dev->dev_private; <API key> *event = data; unsigned int event_e, hw_e; unsigned int event_w, hw_w; DRM_DEBUG("\n"); <API key>(); if (dev_priv->status_ptr) hw_e = dev_priv->status_ptr[1] & 0xffff; else hw_e = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff; hw_w = dev_priv->event_wrap; if (hw_e > dev_priv->event_counter) hw_w--; /* hardware hasn't passed the last wrap yet */ event_e = event->count & 0xffff; event_w = event->count >> 16; /* Don't need to wait if * - event counter wrapped since the event was emitted or * - the hardware has advanced up to or over the event to wait for. */ if (event_w < hw_w || (event_w == hw_w && event_e <= hw_e)) return 0; else return dev_priv->wait_evnt(dev_priv, event_e); } /* * DMA buffer management */ static int <API key>(struct drm_device *dev, struct drm_file *file_priv, struct drm_dma *d) { struct drm_buf *buf; int i; for (i = d->granted_count; i < d->request_count; i++) { buf = savage_freelist_get(dev); if (!buf) return -EAGAIN; buf->file_priv = file_priv; if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx, sizeof(buf->idx))) return -EFAULT; if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total, sizeof(buf->total))) return -EFAULT; d->granted_count++; } return 0; } int savage_bci_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_device_dma *dma = dev->dma; struct drm_dma *d = data; int ret = 0; <API key>(dev, file_priv); /* Please don't send us buffers. */ if (d->send_count != 0) { DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n", DRM_CURRENTPID, d->send_count); return -EINVAL; } /* We'll send you buffers. */ if (d->request_count < 0 || d->request_count > dma->buf_count) { DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", DRM_CURRENTPID, d->request_count, dma->buf_count); return -EINVAL; } d->granted_count = 0; if (d->request_count) { ret = <API key>(dev, file_priv, d); } return ret; } void <API key>(struct drm_device *dev, struct drm_file *file_priv) { struct drm_device_dma *dma = dev->dma; <API key> *dev_priv = dev->dev_private; int i; if (!dma) return; if (!dev_priv) return; if (!dma->buflist) return; /*i830_flush_queue(dev); */ for (i = 0; i < dma->buf_count; i++) { struct drm_buf *buf = dma->buflist[i]; <API key> *buf_priv = buf->dev_private; if (buf->file_priv == file_priv && buf_priv && buf_priv->next == NULL && buf_priv->prev == NULL) { uint16_t event; DRM_DEBUG("reclaimed from client\n"); event = <API key>(dev_priv, SAVAGE_WAIT_3D); SET_AGE(&buf_priv->age, event, dev_priv->event_wrap); savage_freelist_put(dev, buf); } } <API key>(dev, file_priv); } struct drm_ioctl_desc savage_ioctls[] = { DRM_IOCTL_DEF_DRV(SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH), DRM_IOCTL_DEF_DRV(<API key>, <API key>, DRM_AUTH), DRM_IOCTL_DEF_DRV(<API key>, <API key>, DRM_AUTH), }; int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls);
#include <linux/moduleparam.h> #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/hdreg.h> #include <linux/kdev_t.h> #include <linux/blkdev.h> #include <linux/mutex.h> #include <linux/scatterlist.h> #include <linux/string_helpers.h> #include <linux/delay.h> #include <linux/capability.h> #include <linux/compat.h> #include <linux/pm_runtime.h> #include <linux/mmc/ioctl.h> #include <linux/mmc/card.h> #include <linux/mmc/host.h> #include <linux/mmc/mmc.h> #include <linux/mmc/sd.h> #include <asm/uaccess.h> #include "queue.h" MODULE_ALIAS("mmc:block"); #ifdef MODULE_PARAM_PREFIX #undef MODULE_PARAM_PREFIX #endif #define MODULE_PARAM_PREFIX "mmcblk." #define <API key> 113 #define <API key> 0x00 #define <API key> 0x01 #define <API key> 0x80 #define <API key> 0x81 #define <API key> 0x88 #define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */ #define <API key> 240000 #define <API key>(x) ((x & 0x00FF0000) >> 16) #define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \ (req->cmd_flags & REQ_META)) && \ (rq_data_dir(req) == WRITE)) #define PACKED_CMD_VER 0x01 #define PACKED_CMD_WR 0x02 static DEFINE_MUTEX(block_mutex); /* * The defaults come from config options but can be overriden by module * or bootarg options. */ static int perdev_minors = <API key>; /* * We've only got one major, so number of mmcblk devices is * limited to (1 << 20) / number of minors per device. It is also * currently limited by the size of the static bitmaps below. */ static int max_devices; #define MAX_DEVICES 256 /* TODO: Replace these with struct ida */ static DECLARE_BITMAP(dev_use, MAX_DEVICES); static DECLARE_BITMAP(name_use, MAX_DEVICES); /* * There is one mmc_blk_data per slot. */ struct mmc_blk_data { spinlock_t lock; struct gendisk *disk; struct mmc_queue queue; struct list_head part; unsigned int flags; #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */ #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */ #define MMC_BLK_PACKED_CMD (1 << 2) /* MMC packed command support */ unsigned int usage; unsigned int read_only; unsigned int part_type; unsigned int name_idx; unsigned int reset_done; #define MMC_BLK_READ BIT(0) #define MMC_BLK_WRITE BIT(1) #define MMC_BLK_DISCARD BIT(2) #define MMC_BLK_SECDISCARD BIT(3) /* * Only set in main mmc_blk_data associated * with mmc_card with dev_set_drvdata, and keeps * track of the current selected device partition. */ unsigned int part_curr; struct device_attribute force_ro; struct device_attribute power_ro_lock; int area_type; }; static DEFINE_MUTEX(open_lock); enum { MMC_PACKED_NR_IDX = -1, MMC_PACKED_NR_ZERO, <API key>, }; module_param(perdev_minors, int, 0444); MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device"); static inline int mmc_blk_part_switch(struct mmc_card *card, struct mmc_blk_data *md); static int get_card_status(struct mmc_card *card, u32 *status, int retries); static inline void <API key>(struct mmc_queue_req *mqrq) { struct mmc_packed *packed = mqrq->packed; BUG_ON(!packed); mqrq->cmd_type = MMC_PACKED_NONE; packed->nr_entries = MMC_PACKED_NR_ZERO; packed->idx_failure = MMC_PACKED_NR_IDX; packed->retries = 0; packed->blocks = 0; } static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) { struct mmc_blk_data *md; mutex_lock(&open_lock); md = disk->private_data; if (md && md->usage == 0) md = NULL; if (md) md->usage++; mutex_unlock(&open_lock); return md; } static inline int mmc_get_devidx(struct gendisk *disk) { int devmaj = MAJOR(disk_devt(disk)); int devidx = MINOR(disk_devt(disk)) / perdev_minors; if (!devmaj) devidx = disk->first_minor / perdev_minors; return devidx; } static void mmc_blk_put(struct mmc_blk_data *md) { mutex_lock(&open_lock); md->usage if (md->usage == 0) { int devidx = mmc_get_devidx(md->disk); blk_cleanup_queue(md->queue.queue); __clear_bit(devidx, dev_use); put_disk(md->disk); kfree(md); } mutex_unlock(&open_lock); } static ssize_t power_ro_lock_show(struct device *dev, struct device_attribute *attr, char *buf) { int ret; struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); struct mmc_card *card = md->queue.card; int locked = 0; if (card->ext_csd.boot_ro_lock & <API key>) locked = 2; else if (card->ext_csd.boot_ro_lock & <API key>) locked = 1; ret = snprintf(buf, PAGE_SIZE, "%d\n", locked); mmc_blk_put(md); return ret; } static ssize_t power_ro_lock_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int ret; struct mmc_blk_data *md, *part_md; struct mmc_card *card; unsigned long set; if (kstrtoul(buf, 0, &set)) return -EINVAL; if (set != 1) return count; md = mmc_blk_get(dev_to_disk(dev)); card = md->queue.card; mmc_get_card(card); ret = mmc_switch(card, <API key>, EXT_CSD_BOOT_WP, card->ext_csd.boot_ro_lock | <API key>, card->ext_csd.part_time); if (ret) pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret); else card->ext_csd.boot_ro_lock |= <API key>; mmc_put_card(card); if (!ret) { pr_info("%s: Locking boot partition ro until next power on\n", md->disk->disk_name); set_disk_ro(md->disk, 1); list_for_each_entry(part_md, &md->part, part) if (part_md->area_type == <API key>) { pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name); set_disk_ro(part_md->disk, 1); } } mmc_blk_put(md); return count; } static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr, char *buf) { int ret; struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); ret = snprintf(buf, PAGE_SIZE, "%d\n", get_disk_ro(dev_to_disk(dev)) ^ md->read_only); mmc_blk_put(md); return ret; } static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int ret; char *end; struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); unsigned long set = simple_strtoul(buf, &end, 0); if (end == buf) { ret = -EINVAL; goto out; } set_disk_ro(dev_to_disk(dev), set || md->read_only); ret = count; out: mmc_blk_put(md); return ret; } static int mmc_blk_open(struct block_device *bdev, fmode_t mode) { struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk); int ret = -ENXIO; mutex_lock(&block_mutex); if (md) { if (md->usage == 2) check_disk_change(bdev); ret = 0; if ((mode & FMODE_WRITE) && md->read_only) { mmc_blk_put(md); ret = -EROFS; } } mutex_unlock(&block_mutex); return ret; } static void mmc_blk_release(struct gendisk *disk, fmode_t mode) { struct mmc_blk_data *md = disk->private_data; mutex_lock(&block_mutex); mmc_blk_put(md); mutex_unlock(&block_mutex); } static int mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo) { geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16); geo->heads = 4; geo->sectors = 16; return 0; } struct mmc_blk_ioc_data { struct mmc_ioc_cmd ic; unsigned char *buf; u64 buf_bytes; }; static struct mmc_blk_ioc_data *<API key>( struct mmc_ioc_cmd __user *user) { struct mmc_blk_ioc_data *idata; int err; idata = kzalloc(sizeof(*idata), GFP_KERNEL); if (!idata) { err = -ENOMEM; goto out; } if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) { err = -EFAULT; goto idata_err; } idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks; if (idata->buf_bytes > MMC_IOC_MAX_BYTES) { err = -EOVERFLOW; goto idata_err; } if (!idata->buf_bytes) return idata; idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL); if (!idata->buf) { err = -ENOMEM; goto idata_err; } if (copy_from_user(idata->buf, (void __user *)(unsigned long) idata->ic.data_ptr, idata->buf_bytes)) { err = -EFAULT; goto copy_err; } return idata; copy_err: kfree(idata->buf); idata_err: kfree(idata); out: return ERR_PTR(err); } static int <API key>(struct mmc_card *card, u32 *status, u32 retries_max) { int err; u32 retry_count = 0; if (!status || !retries_max) return -EINVAL; do { err = get_card_status(card, status, 5); if (err) break; if (!R1_STATUS(*status) && (R1_CURRENT_STATE(*status) != R1_STATE_PRG)) break; /* RPMB programming operation complete */ /* * Rechedule to give the MMC device a chance to continue * processing the previous command without being polled too * frequently. */ usleep_range(1000, 5000); } while (++retry_count < retries_max); if (retry_count == retries_max) err = -EPERM; return err; } static int ioctl_do_sanitize(struct mmc_card *card) { int err; if (!mmc_can_sanitize(card)) { pr_warn("%s: %s - SANITIZE is not supported\n", mmc_hostname(card->host), __func__); err = -EOPNOTSUPP; goto out; } pr_debug("%s: %s - SANITIZE IN PROGRESS...\n", mmc_hostname(card->host), __func__); err = mmc_switch(card, <API key>, <API key>, 1, <API key>); if (err) pr_err("%s: %s - <API key> failed. err=%d\n", mmc_hostname(card->host), __func__, err); pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host), __func__); out: return err; } static int mmc_blk_ioctl_cmd(struct block_device *bdev, struct mmc_ioc_cmd __user *ic_ptr) { struct mmc_blk_ioc_data *idata; struct mmc_blk_data *md; struct mmc_card *card; struct mmc_command cmd = {0}; struct mmc_data data = {0}; struct mmc_request mrq = {NULL}; struct scatterlist sg; int err; int is_rpmb = false; u32 status = 0; /* * The caller must have CAP_SYS_RAWIO, and must be calling this on the * whole block device, not on a partition. This prevents overspray * between sibling partitions. */ if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains)) return -EPERM; idata = <API key>(ic_ptr); if (IS_ERR(idata)) return PTR_ERR(idata); md = mmc_blk_get(bdev->bd_disk); if (!md) { err = -EINVAL; goto cmd_err; } if (md->area_type & <API key>) is_rpmb = true; card = md->queue.card; if (IS_ERR(card)) { err = PTR_ERR(card); goto cmd_done; } cmd.opcode = idata->ic.opcode; cmd.arg = idata->ic.arg; cmd.flags = idata->ic.flags; if (idata->buf_bytes) { data.sg = &sg; data.sg_len = 1; data.blksz = idata->ic.blksz; data.blocks = idata->ic.blocks; sg_init_one(data.sg, idata->buf, idata->buf_bytes); if (idata->ic.write_flag) data.flags = MMC_DATA_WRITE; else data.flags = MMC_DATA_READ; /* data.flags must already be set before doing this. */ <API key>(&data, card); /* Allow overriding the timeout_ns for empirical tuning. */ if (idata->ic.data_timeout_ns) data.timeout_ns = idata->ic.data_timeout_ns; if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) { /* * Pretend this is a data transfer and rely on the * host driver to compute timeout. When all host * drivers support cmd.cmd_timeout for R1B, this * can be changed to: * * mrq.data = NULL; * cmd.cmd_timeout = idata->ic.cmd_timeout_ms; */ data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000; } mrq.data = &data; } mrq.cmd = &cmd; mmc_get_card(card); err = mmc_blk_part_switch(card, md); if (err) goto cmd_rel_host; if (idata->ic.is_acmd) { err = mmc_app_cmd(card->host, card); if (err) goto cmd_rel_host; } if (is_rpmb) { err = mmc_set_blockcount(card, data.blocks, idata->ic.write_flag & (1 << 31)); if (err) goto cmd_rel_host; } if ((<API key>(cmd.arg) == <API key>) && (cmd.opcode == MMC_SWITCH)) { err = ioctl_do_sanitize(card); if (err) pr_err("%s: ioctl_do_sanitize() failed. err = %d", __func__, err); goto cmd_rel_host; } mmc_wait_for_req(card->host, &mrq); if (cmd.error) { dev_err(mmc_dev(card->host), "%s: cmd error %d\n", __func__, cmd.error); err = cmd.error; goto cmd_rel_host; } if (data.error) { dev_err(mmc_dev(card->host), "%s: data error %d\n", __func__, data.error); err = data.error; goto cmd_rel_host; } /* * According to the SD specs, some commands require a delay after * issuing the command. */ if (idata->ic.postsleep_min_us) usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us); if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) { err = -EFAULT; goto cmd_rel_host; } if (!idata->ic.write_flag) { if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr, idata->buf, idata->buf_bytes)) { err = -EFAULT; goto cmd_rel_host; } } if (is_rpmb) { /* * Ensure RPMB command has completed by polling CMD13 * "Send Status". */ err = <API key>(card, &status, 5); if (err) dev_err(mmc_dev(card->host), "%s: Card Status=0x%08X, error %d\n", __func__, status, err); } cmd_rel_host: mmc_put_card(card); cmd_done: mmc_blk_put(md); cmd_err: kfree(idata->buf); kfree(idata); return err; } static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { int ret = -EINVAL; if (cmd == MMC_IOC_CMD) ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg); return ret; } #ifdef CONFIG_COMPAT static int <API key>(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg)); } #endif static const struct <API key> mmc_bdops = { .open = mmc_blk_open, .release = mmc_blk_release, .getgeo = mmc_blk_getgeo, .owner = THIS_MODULE, .ioctl = mmc_blk_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = <API key>, #endif }; static inline int mmc_blk_part_switch(struct mmc_card *card, struct mmc_blk_data *md) { int ret; struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev); if (main_md->part_curr == md->part_type) return 0; if (mmc_card_mmc(card)) { u8 part_config = card->ext_csd.part_config; part_config &= ~<API key>; part_config |= md->part_type; ret = mmc_switch(card, <API key>, EXT_CSD_PART_CONFIG, part_config, card->ext_csd.part_time); if (ret) return ret; card->ext_csd.part_config = part_config; } main_md->part_curr = md->part_type; return 0; } static u32 <API key>(struct mmc_card *card) { int err; u32 result; __be32 *blocks; struct mmc_request mrq = {NULL}; struct mmc_command cmd = {0}; struct mmc_data data = {0}; struct scatterlist sg; cmd.opcode = MMC_APP_CMD; cmd.arg = card->rca << 16; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; err = mmc_wait_for_cmd(card->host, &cmd, 0); if (err) return (u32)-1; if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD)) return (u32)-1; memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = <API key>; cmd.arg = 0; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; data.blksz = 4; data.blocks = 1; data.flags = MMC_DATA_READ; data.sg = &sg; data.sg_len = 1; <API key>(&data, card); mrq.cmd = &cmd; mrq.data = &data; blocks = kmalloc(4, GFP_KERNEL); if (!blocks) return (u32)-1; sg_init_one(&sg, blocks, 4); mmc_wait_for_req(card->host, &mrq); result = ntohl(*blocks); kfree(blocks); if (cmd.error || data.error) result = (u32)-1; return result; } static int get_card_status(struct mmc_card *card, u32 *status, int retries) { struct mmc_command cmd = {0}; int err; cmd.opcode = MMC_SEND_STATUS; if (!mmc_host_is_spi(card->host)) cmd.arg = card->rca << 16; cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; err = mmc_wait_for_cmd(card->host, &cmd, retries); if (err == 0) *status = cmd.resp[0]; return err; } static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms, bool hw_busy_detect, struct request *req, int *gen_err) { unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); int err = 0; u32 status; do { err = get_card_status(card, &status, 5); if (err) { pr_err("%s: error %d requesting status\n", req->rq_disk->disk_name, err); return err; } if (status & R1_ERROR) { pr_err("%s: %s: error sending status cmd, status % req->rq_disk->disk_name, __func__, status); *gen_err = 1; } /* We may rely on the host hw to handle busy detection.*/ if ((card->host->caps & <API key>) && hw_busy_detect) break; /* * Timeout if the device never becomes ready for data and never * leaves the program state. */ if (time_after(jiffies, timeout)) { pr_err("%s: Card stuck in programming state! %s %s\n", mmc_hostname(card->host), req->rq_disk->disk_name, __func__); return -ETIMEDOUT; } /* * Some cards mishandle the status bits, * so make sure to check both the busy * indication and the card state. */ } while (!(status & R1_READY_FOR_DATA) || (R1_CURRENT_STATE(status) == R1_STATE_PRG)); return err; } static int send_stop(struct mmc_card *card, unsigned int timeout_ms, struct request *req, int *gen_err, u32 *stop_status) { struct mmc_host *host = card->host; struct mmc_command cmd = {0}; int err; bool use_r1b_resp = rq_data_dir(req) == WRITE; /* * Normally we use R1B responses for WRITE, but in cases where the host * has specified a max_busy_timeout we need to validate it. A failure * means we need to prevent the host from doing hw busy detection, which * is done by converting to a R1 response instead. */ if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout)) use_r1b_resp = false; cmd.opcode = <API key>; if (use_r1b_resp) { cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; cmd.busy_timeout = timeout_ms; } else { cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; } err = mmc_wait_for_cmd(host, &cmd, 5); if (err) return err; *stop_status = cmd.resp[0]; /* No need to check card status in case of READ. */ if (rq_data_dir(req) == READ) return 0; if (!mmc_host_is_spi(host) && (*stop_status & R1_ERROR)) { pr_err("%s: %s: general error sending stop command, resp % req->rq_disk->disk_name, __func__, *stop_status); *gen_err = 1; } return card_busy_detect(card, timeout_ms, use_r1b_resp, req, gen_err); } #define ERR_NOMEDIUM 3 #define ERR_RETRY 2 #define ERR_ABORT 1 #define ERR_CONTINUE 0 static int mmc_blk_cmd_error(struct request *req, const char *name, int error, bool status_valid, u32 status) { switch (error) { case -EILSEQ: /* response crc error, retry the r/w cmd */ pr_err("%s: %s sending %s command, card status % req->rq_disk->disk_name, "response CRC error", name, status); return ERR_RETRY; case -ETIMEDOUT: pr_err("%s: %s sending %s command, card status % req->rq_disk->disk_name, "timed out", name, status); /* If the status cmd initially failed, retry the r/w cmd */ if (!status_valid) return ERR_RETRY; if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) return ERR_RETRY; /* Otherwise abort the command */ return ERR_ABORT; default: /* We don't understand the error code the driver gave us */ pr_err("%s: unknown error %d sending read/write command, card status % req->rq_disk->disk_name, error, status); return ERR_ABORT; } } static int <API key>(struct mmc_card *card, struct request *req, struct mmc_blk_request *brq, int *ecc_err, int *gen_err) { bool <API key> = true; u32 status, stop_status = 0; int err, retry; if (mmc_card_removed(card)) return ERR_NOMEDIUM; /* * Try to get card status which indicates both the card state * and why there was no response. If the first attempt fails, * we can't be sure the returned status is for the r/w command. */ for (retry = 2; retry >= 0; retry err = get_card_status(card, &status, 0); if (!err) break; /* Re-tune if needed */ mmc_retune_recheck(card->host); <API key> = false; pr_err("%s: error %d sending status command, %sing\n", req->rq_disk->disk_name, err, retry ? "retry" : "abort"); } /* We couldn't get a response from the card. Give up. */ if (err) { /* Check if the card is removed */ if (<API key>(card->host)) return ERR_NOMEDIUM; return ERR_ABORT; } /* Flag ECC errors */ if ((status & R1_CARD_ECC_FAILED) || (brq->stop.resp[0] & R1_CARD_ECC_FAILED) || (brq->cmd.resp[0] & R1_CARD_ECC_FAILED)) *ecc_err = 1; /* Flag General errors */ if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) if ((status & R1_ERROR) || (brq->stop.resp[0] & R1_ERROR)) { pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n", req->rq_disk->disk_name, __func__, brq->stop.resp[0], status); *gen_err = 1; } /* * Check the current card state. If it is in some data transfer * mode, tell it to stop (and hopefully transition back to TRAN.) */ if (R1_CURRENT_STATE(status) == R1_STATE_DATA || R1_CURRENT_STATE(status) == R1_STATE_RCV) { err = send_stop(card, DIV_ROUND_UP(brq->data.timeout_ns, 1000000), req, gen_err, &stop_status); if (err) { pr_err("%s: error %d sending stop command\n", req->rq_disk->disk_name, err); /* * If the stop cmd also timed out, the card is probably * not present, so abort. Other errors are bad news too. */ return ERR_ABORT; } if (stop_status & R1_CARD_ECC_FAILED) *ecc_err = 1; } /* Check for set block count errors */ if (brq->sbc.error) return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error, <API key>, status); /* Check for r/w command errors */ if (brq->cmd.error) return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error, <API key>, status); /* Data errors */ if (!brq->stop.error) return ERR_CONTINUE; /* Now for stop errors. These aren't fatal to the transfer. */ pr_info("%s: error %d sending stop command, original cmd response %#x, card status %#x\n", req->rq_disk->disk_name, brq->stop.error, brq->cmd.resp[0], status); /* * Subsitute in our own stop status as this will give the error * state which happened during the execution of the r/w command. */ if (stop_status) { brq->stop.resp[0] = stop_status; brq->stop.error = 0; } return ERR_CONTINUE; } static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host, int type) { int err; if (md->reset_done & type) return -EEXIST; md->reset_done |= type; err = mmc_hw_reset(host); /* Ensure we switch back to the correct partition */ if (err != -EOPNOTSUPP) { struct mmc_blk_data *main_md = dev_get_drvdata(&host->card->dev); int part_err; main_md->part_curr = main_md->part_type; part_err = mmc_blk_part_switch(host->card, md); if (part_err) { /* * We have failed to get back into the correct * partition, so we need to abort the whole request. */ return -ENODEV; } } return err; } static inline void <API key>(struct mmc_blk_data *md, int type) { md->reset_done &= ~type; } int mmc_access_rpmb(struct mmc_queue *mq) { struct mmc_blk_data *md = mq->data; /* * If this is a RPMB partition access, return ture */ if (md && md->part_type == <API key>) return true; return false; } static int <API key>(struct mmc_queue *mq, struct request *req) { struct mmc_blk_data *md = mq->data; struct mmc_card *card = md->queue.card; unsigned int from, nr, arg; int err = 0, type = MMC_BLK_DISCARD; if (!mmc_can_erase(card)) { err = -EOPNOTSUPP; goto out; } from = blk_rq_pos(req); nr = blk_rq_sectors(req); if (mmc_can_discard(card)) arg = MMC_DISCARD_ARG; else if (mmc_can_trim(card)) arg = MMC_TRIM_ARG; else arg = MMC_ERASE_ARG; retry: if (card->quirks & <API key>) { err = mmc_switch(card, <API key>, <API key>, arg == MMC_TRIM_ARG ? <API key> : <API key>, 0); if (err) goto out; } err = mmc_erase(card, from, nr, arg); out: if (err == -EIO && !mmc_blk_reset(md, card->host, type)) goto retry; if (!err) <API key>(md, type); blk_end_request(req, err, blk_rq_bytes(req)); return err ? 0 : 1; } static int <API key>(struct mmc_queue *mq, struct request *req) { struct mmc_blk_data *md = mq->data; struct mmc_card *card = md->queue.card; unsigned int from, nr, arg; int err = 0, type = MMC_BLK_SECDISCARD; if (!(<API key>(card))) { err = -EOPNOTSUPP; goto out; } from = blk_rq_pos(req); nr = blk_rq_sectors(req); if (mmc_can_trim(card) && !<API key>(card, from, nr)) arg = <API key>; else arg = <API key>; retry: if (card->quirks & <API key>) { err = mmc_switch(card, <API key>, <API key>, arg == <API key> ? <API key> : <API key>, 0); if (err) goto out_retry; } err = mmc_erase(card, from, nr, arg); if (err == -EIO) goto out_retry; if (err) goto out; if (arg == <API key>) { if (card->quirks & <API key>) { err = mmc_switch(card, <API key>, <API key>, <API key>, 0); if (err) goto out_retry; } err = mmc_erase(card, from, nr, <API key>); if (err == -EIO) goto out_retry; if (err) goto out; } out_retry: if (err && !mmc_blk_reset(md, card->host, type)) goto retry; if (!err) <API key>(md, type); out: blk_end_request(req, err, blk_rq_bytes(req)); return err ? 0 : 1; } static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req) { struct mmc_blk_data *md = mq->data; struct mmc_card *card = md->queue.card; int ret = 0; ret = mmc_flush_cache(card); if (ret) ret = -EIO; blk_end_request_all(req, ret); return ret ? 0 : 1; } /* * Reformat current write as a reliable write, supporting * both legacy and the enhanced reliable write MMC cards. * In each transfer we'll handle only as much as a single * reliable write can handle, thus finish the request in * partial completions. */ static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq, struct mmc_card *card, struct request *req) { if (!(card->ext_csd.rel_param & <API key>)) { /* Legacy mode imposes restrictions on transfers. */ if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors)) brq->data.blocks = 1; if (brq->data.blocks > card->ext_csd.rel_sectors) brq->data.blocks = card->ext_csd.rel_sectors; else if (brq->data.blocks < card->ext_csd.rel_sectors) brq->data.blocks = 1; } } #define CMD_ERRORS \ (R1_OUT_OF_RANGE | /* Command argument out of range */ \ R1_ADDRESS_ERROR | /* Misaligned address */ \ R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\ R1_WP_VIOLATION | /* Tried to write to protected block */ \ R1_CC_ERROR | /* Card controller error */ \ R1_ERROR) /* General/unknown error */ static int mmc_blk_err_check(struct mmc_card *card, struct mmc_async_req *areq) { struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req, mmc_active); struct mmc_blk_request *brq = &mq_mrq->brq; struct request *req = mq_mrq->req; int need_retune = card->host->need_retune; int ecc_err = 0, gen_err = 0; /* * sbc.error indicates a problem with the set block count * command. No data will have been transferred. * * cmd.error indicates a problem with the r/w command. No * data will have been transferred. * * stop.error indicates a problem with the stop command. Data * may have been transferred, or may still be transferring. */ if (brq->sbc.error || brq->cmd.error || brq->stop.error || brq->data.error) { switch (<API key>(card, req, brq, &ecc_err, &gen_err)) { case ERR_RETRY: return MMC_BLK_RETRY; case ERR_ABORT: return MMC_BLK_ABORT; case ERR_NOMEDIUM: return MMC_BLK_NOMEDIUM; case ERR_CONTINUE: break; } } /* * Check for errors relating to the execution of the * initial command - such as address errors. No data * has been transferred. */ if (brq->cmd.resp[0] & CMD_ERRORS) { pr_err("%s: r/w command failed, status = % req->rq_disk->disk_name, brq->cmd.resp[0]); return MMC_BLK_ABORT; } /* * Everything else is either success, or a data error of some * kind. If it was a write, we may have transitioned to * program mode, which we have to wait for it to complete. */ if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) { int err; /* Check stop command response */ if (brq->stop.resp[0] & R1_ERROR) { pr_err("%s: %s: general error sending stop command, stop cmd response % req->rq_disk->disk_name, __func__, brq->stop.resp[0]); gen_err = 1; } err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req, &gen_err); if (err) return MMC_BLK_CMD_ERR; } /* if general error occurs, retry the write operation. */ if (gen_err) { pr_warn("%s: retrying write for general error\n", req->rq_disk->disk_name); return MMC_BLK_RETRY; } if (brq->data.error) { if (need_retune && !brq->retune_retry_done) { pr_info("%s: retrying because a re-tune was needed\n", req->rq_disk->disk_name); brq->retune_retry_done = 1; return MMC_BLK_RETRY; } pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n", req->rq_disk->disk_name, brq->data.error, (unsigned)blk_rq_pos(req), (unsigned)blk_rq_sectors(req), brq->cmd.resp[0], brq->stop.resp[0]); if (rq_data_dir(req) == READ) { if (ecc_err) return MMC_BLK_ECC_ERR; return MMC_BLK_DATA_ERR; } else { return MMC_BLK_CMD_ERR; } } if (!brq->data.bytes_xfered) return MMC_BLK_RETRY; if (mmc_packed_cmd(mq_mrq->cmd_type)) { if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered)) return MMC_BLK_PARTIAL; else return MMC_BLK_SUCCESS; } if (blk_rq_bytes(req) != brq->data.bytes_xfered) return MMC_BLK_PARTIAL; return MMC_BLK_SUCCESS; } static int <API key>(struct mmc_card *card, struct mmc_async_req *areq) { struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req, mmc_active); struct request *req = mq_rq->req; struct mmc_packed *packed = mq_rq->packed; int err, check, status; u8 *ext_csd; BUG_ON(!packed); packed->retries check = mmc_blk_err_check(card, areq); err = get_card_status(card, &status, 0); if (err) { pr_err("%s: error %d sending status command\n", req->rq_disk->disk_name, err); return MMC_BLK_ABORT; } if (status & R1_EXCEPTION_EVENT) { err = mmc_get_ext_csd(card, &ext_csd); if (err) { pr_err("%s: error %d sending ext_csd\n", req->rq_disk->disk_name, err); return MMC_BLK_ABORT; } if ((ext_csd[<API key>] & <API key>) && (ext_csd[<API key>] & <API key>)) { if (ext_csd[<API key>] & <API key>) { packed->idx_failure = ext_csd[<API key>] - 1; check = MMC_BLK_PARTIAL; } pr_err("%s: packed cmd failed, nr %u, sectors %u, " "failure index: %d\n", req->rq_disk->disk_name, packed->nr_entries, packed->blocks, packed->idx_failure); } kfree(ext_csd); } return check; } static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, struct mmc_card *card, int disable_multi, struct mmc_queue *mq) { u32 readcmd, writecmd; struct mmc_blk_request *brq = &mqrq->brq; struct request *req = mqrq->req; struct mmc_blk_data *md = mq->data; bool do_data_tag; /* * Reliable writes are used to implement Forced Unit Access and * REQ_META accesses, and are supported only on MMCs. * * XXX: this really needs a good explanation of why REQ_META * is treated special. */ bool do_rel_wr = ((req->cmd_flags & REQ_FUA) || (req->cmd_flags & REQ_META)) && (rq_data_dir(req) == WRITE) && (md->flags & MMC_BLK_REL_WR); memset(brq, 0, sizeof(struct mmc_blk_request)); brq->mrq.cmd = &brq->cmd; brq->mrq.data = &brq->data; brq->cmd.arg = blk_rq_pos(req); if (!mmc_card_blockaddr(card)) brq->cmd.arg <<= 9; brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; brq->data.blksz = 512; brq->stop.opcode = <API key>; brq->stop.arg = 0; brq->data.blocks = blk_rq_sectors(req); /* * The block layer doesn't support all sector count * restrictions, so we need to be prepared for too big * requests. */ if (brq->data.blocks > card->host->max_blk_count) brq->data.blocks = card->host->max_blk_count; if (brq->data.blocks > 1) { /* * After a read error, we redo the request one sector * at a time in order to accurately determine which * sectors can be read successfully. */ if (disable_multi) brq->data.blocks = 1; /* * Some controllers have HW issues while operating * in multiple I/O mode */ if (card->host->ops->multi_io_quirk) brq->data.blocks = card->host->ops->multi_io_quirk(card, (rq_data_dir(req) == READ) ? MMC_DATA_READ : MMC_DATA_WRITE, brq->data.blocks); } if (brq->data.blocks > 1 || do_rel_wr) { /* SPI multiblock writes terminate using a special * token, not a STOP_TRANSMISSION request. */ if (!mmc_host_is_spi(card->host) || rq_data_dir(req) == READ) brq->mrq.stop = &brq->stop; readcmd = <API key>; writecmd = <API key>; } else { brq->mrq.stop = NULL; readcmd = <API key>; writecmd = MMC_WRITE_BLOCK; } if (rq_data_dir(req) == READ) { brq->cmd.opcode = readcmd; brq->data.flags |= MMC_DATA_READ; if (brq->mrq.stop) brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; } else { brq->cmd.opcode = writecmd; brq->data.flags |= MMC_DATA_WRITE; if (brq->mrq.stop) brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; } if (do_rel_wr) mmc_apply_rel_rw(brq, card, req); /* * Data tag is used only during writing meta data to speed * up write and any subsequent read of this meta data */ do_data_tag = (card->ext_csd.data_tag_unit_size) && (req->cmd_flags & REQ_META) && (rq_data_dir(req) == WRITE) && ((brq->data.blocks * brq->data.blksz) >= card->ext_csd.data_tag_unit_size); /* * Pre-defined multi-block transfers are preferable to * open ended-ones (and necessary for reliable writes). * However, it is not sufficient to just send CMD23, * and avoid the final CMD12, as on an error condition * CMD12 (stop) needs to be sent anyway. This, coupled * with Auto-CMD23 enhancements provided by some * hosts, means that the complexity of dealing * with this is best left to the host. If CMD23 is * supported by card and host, we'll fill sbc in and let * the host deal with handling it correctly. This means * that for hosts that don't expose MMC_CAP_CMD23, no * change of behavior will be observed. * * N.B: Some MMC cards experience perf degradation. * We'll avoid using CMD23-bounded multiblock writes for * these, while retaining features like reliable writes. */ if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) && (do_rel_wr || !(card->quirks & <API key>) || do_data_tag)) { brq->sbc.opcode = MMC_SET_BLOCK_COUNT; brq->sbc.arg = brq->data.blocks | (do_rel_wr ? (1 << 31) : 0) | (do_data_tag ? (1 << 29) : 0); brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; brq->mrq.sbc = &brq->sbc; } <API key>(&brq->data, card); brq->data.sg = mqrq->sg; brq->data.sg_len = mmc_queue_map_sg(mq, mqrq); /* * Adjust the sg list so it is the same size as the * request. */ if (brq->data.blocks != blk_rq_sectors(req)) { int i, data_size = brq->data.blocks << 9; struct scatterlist *sg; for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) { data_size -= sg->length; if (data_size <= 0) { sg->length += data_size; i++; break; } } brq->data.sg_len = i; } mqrq->mmc_active.mrq = &brq->mrq; mqrq->mmc_active.err_check = mmc_blk_err_check; <API key>(mqrq); } static inline u8 <API key>(struct request_queue *q, struct mmc_card *card) { unsigned int hdr_sz = mmc_large_sector(card) ? 4096 : 512; unsigned int max_seg_sz = <API key>(q); unsigned int len, nr_segs = 0; do { len = min(hdr_sz, max_seg_sz); hdr_sz -= len; nr_segs++; } while (hdr_sz); return nr_segs; } static u8 <API key>(struct mmc_queue *mq, struct request *req) { struct request_queue *q = mq->queue; struct mmc_card *card = mq->card; struct request *cur = req, *next = NULL; struct mmc_blk_data *md = mq->data; struct mmc_queue_req *mqrq = mq->mqrq_cur; bool en_rel_wr = card->ext_csd.rel_param & <API key>; unsigned int req_sectors = 0, phys_segments = 0; unsigned int max_blk_count, max_phys_segs; bool put_back = true; u8 max_packed_rw = 0; u8 reqs = 0; if (!(md->flags & MMC_BLK_PACKED_CMD)) goto no_packed; if ((rq_data_dir(cur) == WRITE) && mmc_host_packed_wr(card->host)) max_packed_rw = card->ext_csd.max_packed_writes; if (max_packed_rw == 0) goto no_packed; if (mmc_req_rel_wr(cur) && (md->flags & MMC_BLK_REL_WR) && !en_rel_wr) goto no_packed; if (mmc_large_sector(card) && !IS_ALIGNED(blk_rq_sectors(cur), 8)) goto no_packed; <API key>(mqrq); max_blk_count = min(card->host->max_blk_count, card->host->max_req_size >> 9); if (unlikely(max_blk_count > 0xffff)) max_blk_count = 0xffff; max_phys_segs = queue_max_segments(q); req_sectors += blk_rq_sectors(cur); phys_segments += cur->nr_phys_segments; if (rq_data_dir(cur) == WRITE) { req_sectors += mmc_large_sector(card) ? 8 : 1; phys_segments += <API key>(q, card); } do { if (reqs >= max_packed_rw - 1) { put_back = false; break; } spin_lock_irq(q->queue_lock); next = blk_fetch_request(q); spin_unlock_irq(q->queue_lock); if (!next) { put_back = false; break; } if (mmc_large_sector(card) && !IS_ALIGNED(blk_rq_sectors(next), 8)) break; if (next->cmd_flags & REQ_DISCARD || next->cmd_flags & REQ_FLUSH) break; if (rq_data_dir(cur) != rq_data_dir(next)) break; if (mmc_req_rel_wr(next) && (md->flags & MMC_BLK_REL_WR) && !en_rel_wr) break; req_sectors += blk_rq_sectors(next); if (req_sectors > max_blk_count) break; phys_segments += next->nr_phys_segments; if (phys_segments > max_phys_segs) break; list_add_tail(&next->queuelist, &mqrq->packed->list); cur = next; reqs++; } while (1); if (put_back) { spin_lock_irq(q->queue_lock); blk_requeue_request(q, next); spin_unlock_irq(q->queue_lock); } if (reqs > 0) { list_add(&req->queuelist, &mqrq->packed->list); mqrq->packed->nr_entries = ++reqs; mqrq->packed->retries = reqs; return reqs; } no_packed: mqrq->cmd_type = MMC_PACKED_NONE; return 0; } static void <API key>(struct mmc_queue_req *mqrq, struct mmc_card *card, struct mmc_queue *mq) { struct mmc_blk_request *brq = &mqrq->brq; struct request *req = mqrq->req; struct request *prq; struct mmc_blk_data *md = mq->data; struct mmc_packed *packed = mqrq->packed; bool do_rel_wr, do_data_tag; u32 *packed_cmd_hdr; u8 hdr_blocks; u8 i = 1; BUG_ON(!packed); mqrq->cmd_type = MMC_PACKED_WRITE; packed->blocks = 0; packed->idx_failure = MMC_PACKED_NR_IDX; packed_cmd_hdr = packed->cmd_hdr; memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr)); packed_cmd_hdr[0] = (packed->nr_entries << 16) | (PACKED_CMD_WR << 8) | PACKED_CMD_VER; hdr_blocks = mmc_large_sector(card) ? 8 : 1; /* * Argument for each entry of packed group */ list_for_each_entry(prq, &packed->list, queuelist) { do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR); do_data_tag = (card->ext_csd.data_tag_unit_size) && (prq->cmd_flags & REQ_META) && (rq_data_dir(prq) == WRITE) && ((brq->data.blocks * brq->data.blksz) >= card->ext_csd.data_tag_unit_size); /* Argument of CMD23 */ packed_cmd_hdr[(i * 2)] = (do_rel_wr ? <API key> : 0) | (do_data_tag ? <API key> : 0) | blk_rq_sectors(prq); /* Argument of CMD18 or CMD25 */ packed_cmd_hdr[((i * 2)) + 1] = mmc_card_blockaddr(card) ? blk_rq_pos(prq) : blk_rq_pos(prq) << 9; packed->blocks += blk_rq_sectors(prq); i++; } memset(brq, 0, sizeof(struct mmc_blk_request)); brq->mrq.cmd = &brq->cmd; brq->mrq.data = &brq->data; brq->mrq.sbc = &brq->sbc; brq->mrq.stop = &brq->stop; brq->sbc.opcode = MMC_SET_BLOCK_COUNT; brq->sbc.arg = <API key> | (packed->blocks + hdr_blocks); brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; brq->cmd.opcode = <API key>; brq->cmd.arg = blk_rq_pos(req); if (!mmc_card_blockaddr(card)) brq->cmd.arg <<= 9; brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; brq->data.blksz = 512; brq->data.blocks = packed->blocks + hdr_blocks; brq->data.flags |= MMC_DATA_WRITE; brq->stop.opcode = <API key>; brq->stop.arg = 0; brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; <API key>(&brq->data, card); brq->data.sg = mqrq->sg; brq->data.sg_len = mmc_queue_map_sg(mq, mqrq); mqrq->mmc_active.mrq = &brq->mrq; mqrq->mmc_active.err_check = <API key>; <API key>(mqrq); } static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, struct mmc_blk_request *brq, struct request *req, int ret) { struct mmc_queue_req *mq_rq; mq_rq = container_of(brq, struct mmc_queue_req, brq); /* * If this is an SD card and we're writing, we can first * mark the known good sectors as ok. * * If the card is not SD, we can still ok written sectors * as reported by the controller (which might be less than * the real number of written sectors, but never more). */ if (mmc_card_sd(card)) { u32 blocks; blocks = <API key>(card); if (blocks != (u32)-1) { ret = blk_end_request(req, 0, blocks << 9); } } else { if (!mmc_packed_cmd(mq_rq->cmd_type)) ret = blk_end_request(req, 0, brq->data.bytes_xfered); } return ret; } static int <API key>(struct mmc_queue_req *mq_rq) { struct request *prq; struct mmc_packed *packed = mq_rq->packed; int idx = packed->idx_failure, i = 0; int ret = 0; BUG_ON(!packed); while (!list_empty(&packed->list)) { prq = list_entry_rq(packed->list.next); if (idx == i) { /* retry from error index */ packed->nr_entries -= idx; mq_rq->req = prq; ret = 1; if (packed->nr_entries == <API key>) { list_del_init(&prq->queuelist); <API key>(mq_rq); } return ret; } list_del_init(&prq->queuelist); blk_end_request(prq, 0, blk_rq_bytes(prq)); i++; } <API key>(mq_rq); return ret; } static void <API key>(struct mmc_queue_req *mq_rq) { struct request *prq; struct mmc_packed *packed = mq_rq->packed; BUG_ON(!packed); while (!list_empty(&packed->list)) { prq = list_entry_rq(packed->list.next); list_del_init(&prq->queuelist); blk_end_request(prq, -EIO, blk_rq_bytes(prq)); } <API key>(mq_rq); } static void <API key>(struct mmc_queue *mq, struct mmc_queue_req *mq_rq) { struct request *prq; struct request_queue *q = mq->queue; struct mmc_packed *packed = mq_rq->packed; BUG_ON(!packed); while (!list_empty(&packed->list)) { prq = list_entry_rq(packed->list.prev); if (prq->queuelist.prev != &packed->list) { list_del_init(&prq->queuelist); spin_lock_irq(q->queue_lock); blk_requeue_request(mq->queue, prq); spin_unlock_irq(q->queue_lock); } else { list_del_init(&prq->queuelist); } } <API key>(mq_rq); } static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) { struct mmc_blk_data *md = mq->data; struct mmc_card *card = md->queue.card; struct mmc_blk_request *brq = &mq->mqrq_cur->brq; int ret = 1, disable_multi = 0, retry = 0, type, retune_retry_done = 0; enum mmc_blk_status status; struct mmc_queue_req *mq_rq; struct request *req = rqc; struct mmc_async_req *areq; const u8 packed_nr = 2; u8 reqs = 0; if (!rqc && !mq->mqrq_prev->req) return 0; if (rqc) reqs = <API key>(mq, rqc); do { if (rqc) { /* * When 4KB native sector is enabled, only 8 blocks * multiple read or write is allowed */ if ((brq->data.blocks & 0x07) && (card->ext_csd.data_sector_size == 4096)) { pr_err("%s: Transfer size is not 4KB sector size aligned\n", req->rq_disk->disk_name); mq_rq = mq->mqrq_cur; goto cmd_abort; } if (reqs >= packed_nr) <API key>(mq->mqrq_cur, card, mq); else mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); areq = &mq->mqrq_cur->mmc_active; } else areq = NULL; areq = mmc_start_req(card->host, areq, (int *) &status); if (!areq) { if (status == MMC_BLK_NEW_REQUEST) mq->flags |= <API key>; return 0; } mq_rq = container_of(areq, struct mmc_queue_req, mmc_active); brq = &mq_rq->brq; req = mq_rq->req; type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; <API key>(mq_rq); switch (status) { case MMC_BLK_SUCCESS: case MMC_BLK_PARTIAL: /* * A block was successfully transferred. */ <API key>(md, type); if (mmc_packed_cmd(mq_rq->cmd_type)) { ret = <API key>(mq_rq); break; } else { ret = blk_end_request(req, 0, brq->data.bytes_xfered); } /* * If the blk_end_request function returns non-zero even * though all data has been transferred and no errors * were returned by the host controller, it's a bug. */ if (status == MMC_BLK_SUCCESS && ret) { pr_err("%s BUG rq_tot %d d_xfer %d\n", __func__, blk_rq_bytes(req), brq->data.bytes_xfered); rqc = NULL; goto cmd_abort; } break; case MMC_BLK_CMD_ERR: ret = mmc_blk_cmd_err(md, card, brq, req, ret); if (mmc_blk_reset(md, card->host, type)) goto cmd_abort; if (!ret) goto start_new_req; break; case MMC_BLK_RETRY: retune_retry_done = brq->retune_retry_done; if (retry++ < 5) break; /* Fall through */ case MMC_BLK_ABORT: if (!mmc_blk_reset(md, card->host, type)) break; goto cmd_abort; case MMC_BLK_DATA_ERR: { int err; err = mmc_blk_reset(md, card->host, type); if (!err) break; if (err == -ENODEV || mmc_packed_cmd(mq_rq->cmd_type)) goto cmd_abort; /* Fall through */ } case MMC_BLK_ECC_ERR: if (brq->data.blocks > 1) { /* Redo read one sector at a time */ pr_warn("%s: retrying using single block read\n", req->rq_disk->disk_name); disable_multi = 1; break; } /* * After an error, we redo I/O one sector at a * time, so we only reach here after trying to * read a single sector. */ ret = blk_end_request(req, -EIO, brq->data.blksz); if (!ret) goto start_new_req; break; case MMC_BLK_NOMEDIUM: goto cmd_abort; default: pr_err("%s: Unhandled return value (%d)", req->rq_disk->disk_name, status); goto cmd_abort; } if (ret) { if (mmc_packed_cmd(mq_rq->cmd_type)) { if (!mq_rq->packed->retries) goto cmd_abort; <API key>(mq_rq, card, mq); mmc_start_req(card->host, &mq_rq->mmc_active, NULL); } else { /* * In case of a incomplete request * prepare it again and resend. */ mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq); mmc_start_req(card->host, &mq_rq->mmc_active, NULL); } mq_rq->brq.retune_retry_done = retune_retry_done; } } while (ret); return 1; cmd_abort: if (mmc_packed_cmd(mq_rq->cmd_type)) { <API key>(mq_rq); } else { if (mmc_card_removed(card)) req->cmd_flags |= REQ_QUIET; while (ret) ret = blk_end_request(req, -EIO, blk_rq_cur_bytes(req)); } start_new_req: if (rqc) { if (mmc_card_removed(card)) { rqc->cmd_flags |= REQ_QUIET; blk_end_request_all(rqc, -EIO); } else { /* * If current request is packed, it needs to put back. */ if (mmc_packed_cmd(mq->mqrq_cur->cmd_type)) <API key>(mq, mq->mqrq_cur); mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); mmc_start_req(card->host, &mq->mqrq_cur->mmc_active, NULL); } } return 0; } static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) { int ret; struct mmc_blk_data *md = mq->data; struct mmc_card *card = md->queue.card; struct mmc_host *host = card->host; unsigned long flags; unsigned int cmd_flags = req ? req->cmd_flags : 0; if (req && !mq->mqrq_prev->req) /* claim host only for the first request */ mmc_get_card(card); ret = mmc_blk_part_switch(card, md); if (ret) { if (req) { blk_end_request_all(req, -EIO); } ret = 0; goto out; } mq->flags &= ~<API key>; if (cmd_flags & REQ_DISCARD) { /* complete ongoing async transfer before issuing discard */ if (card->host->areq) mmc_blk_issue_rw_rq(mq, NULL); if (req->cmd_flags & REQ_SECURE) ret = <API key>(mq, req); else ret = <API key>(mq, req); } else if (cmd_flags & REQ_FLUSH) { /* complete ongoing async transfer before issuing flush */ if (card->host->areq) mmc_blk_issue_rw_rq(mq, NULL); ret = mmc_blk_issue_flush(mq, req); } else { if (!req && host->areq) { spin_lock_irqsave(&host->context_info.lock, flags); host->context_info.is_waiting_last_req = true; <API key>(&host->context_info.lock, flags); } ret = mmc_blk_issue_rw_rq(mq, req); } out: if ((!req && !(mq->flags & <API key>)) || (cmd_flags & <API key>)) /* * Release host when there are no more requests * and after special request(discard, flush) is done. * In case sepecial request, there is no reentry to * the 'mmc_blk_issue_rq' with 'mqrq_prev->req'. */ mmc_put_card(card); return ret; } static inline int mmc_blk_readonly(struct mmc_card *card) { return mmc_card_readonly(card) || !(card->csd.cmdclass & CCC_BLOCK_WRITE); } static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, struct device *parent, sector_t size, bool default_ro, const char *subname, int area_type) { struct mmc_blk_data *md; int devidx, ret; devidx = find_first_zero_bit(dev_use, max_devices); if (devidx >= max_devices) return ERR_PTR(-ENOSPC); __set_bit(devidx, dev_use); md = kzalloc(sizeof(struct mmc_blk_data), GFP_KERNEL); if (!md) { ret = -ENOMEM; goto out; } /* * !subname implies we are creating main mmc_blk_data that will be * associated with mmc_card with dev_set_drvdata. Due to device * partitions, devidx will not coincide with a per-physical card * index anymore so we keep track of a name index. */ if (!subname) { int idx; idx = <API key>(card->host); if (idx >= 0 && !test_bit(idx, name_use)) md->name_idx = idx; else md->name_idx = find_next_zero_bit(name_use, max_devices, <API key>()); __set_bit(md->name_idx, name_use); } else md->name_idx = ((struct mmc_blk_data *) dev_to_disk(parent)->private_data)->name_idx; md->area_type = area_type; /* * Set the read-only status based on the supported commands * and the write protect switch. */ md->read_only = mmc_blk_readonly(card); md->disk = alloc_disk(perdev_minors); if (md->disk == NULL) { ret = -ENOMEM; goto err_kfree; } spin_lock_init(&md->lock); INIT_LIST_HEAD(&md->part); md->usage = 1; ret = mmc_init_queue(&md->queue, card, &md->lock, subname); if (ret) goto err_putdisk; md->queue.issue_fn = mmc_blk_issue_rq; md->queue.data = md; md->disk->major = MMC_BLOCK_MAJOR; md->disk->first_minor = devidx * perdev_minors; md->disk->fops = &mmc_bdops; md->disk->private_data = md; md->disk->queue = md->queue.queue; md->disk->driverfs_dev = parent; set_disk_ro(md->disk, md->read_only || default_ro); if (area_type & (<API key> | <API key>)) md->disk->flags |= <API key>; /* * As discussed on lkml, GENHD_FL_REMOVABLE should: * * - be set for removable media with permanent block devices * - be unset for removable block devices with permanent media * * Since MMC block devices clearly fall under the second * case, we do not set GENHD_FL_REMOVABLE. Userspace * should use the block device creation/destruction hotplug * messages to tell when the card is present. */ snprintf(md->disk->disk_name, sizeof(md->disk->disk_name), "mmcblk%u%s", md->name_idx, subname ? subname : ""); if (mmc_card_mmc(card)) <API key>(md->queue.queue, card->ext_csd.data_sector_size); else <API key>(md->queue.queue, 512); set_capacity(md->disk, size); if (mmc_host_cmd23(card->host)) { if (mmc_card_mmc(card) || (mmc_card_sd(card) && card->scr.cmds & <API key>)) md->flags |= MMC_BLK_CMD23; } if (mmc_card_mmc(card) && md->flags & MMC_BLK_CMD23 && ((card->ext_csd.rel_param & <API key>) || card->ext_csd.rel_sectors)) { md->flags |= MMC_BLK_REL_WR; blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA); } if (mmc_card_mmc(card) && (area_type == <API key>) && (md->flags & MMC_BLK_CMD23) && card->ext_csd.packed_event_en) { if (!mmc_packed_init(&md->queue, card)) md->flags |= MMC_BLK_PACKED_CMD; } return md; err_putdisk: put_disk(md->disk); err_kfree: kfree(md); out: return ERR_PTR(ret); } static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card) { sector_t size; if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) { /* * The EXT_CSD sector count is in number or 512 byte * sectors. */ size = card->ext_csd.sectors; } else { /* * The CSD capacity field is in units of read_blkbits. * set_capacity takes units of 512 bytes. */ size = (typeof(sector_t))card->csd.capacity << (card->csd.read_blkbits - 9); } return mmc_blk_alloc_req(card, &card->dev, size, false, NULL, <API key>); } static int mmc_blk_alloc_part(struct mmc_card *card, struct mmc_blk_data *md, unsigned int part_type, sector_t size, bool default_ro, const char *subname, int area_type) { char cap_str[10]; struct mmc_blk_data *part_md; part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro, subname, area_type); if (IS_ERR(part_md)) return PTR_ERR(part_md); part_md->part_type = part_type; list_add(&part_md->part, &md->part); string_get_size((u64)get_capacity(part_md->disk), 512, STRING_UNITS_2, cap_str, sizeof(cap_str)); pr_info("%s: %s %s partition %u %s\n", part_md->disk->disk_name, mmc_card_id(card), mmc_card_name(card), part_md->part_type, cap_str); return 0; } /* MMC Physical partitions consist of two boot partitions and * up to four general purpose partitions. * For each partition enabled in EXT_CSD a block device will be allocatedi * to provide access to the partition. */ static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md) { int idx, ret = 0; if (!mmc_card_mmc(card)) return 0; for (idx = 0; idx < card->nr_parts; idx++) { if (card->part[idx].size) { ret = mmc_blk_alloc_part(card, md, card->part[idx].part_cfg, card->part[idx].size >> 9, card->part[idx].force_ro, card->part[idx].name, card->part[idx].area_type); if (ret) return ret; } } return ret; } static void mmc_blk_remove_req(struct mmc_blk_data *md) { struct mmc_card *card; if (md) { /* * Flush remaining requests and free queues. It * is freeing the queue that stops new requests * from being accepted. */ card = md->queue.card; mmc_cleanup_queue(&md->queue); if (md->flags & MMC_BLK_PACKED_CMD) mmc_packed_clean(&md->queue); if (md->disk->flags & GENHD_FL_UP) { device_remove_file(disk_to_dev(md->disk), &md->force_ro); if ((md->area_type & <API key>) && card->ext_csd.boot_ro_lockable) device_remove_file(disk_to_dev(md->disk), &md->power_ro_lock); del_gendisk(md->disk); } mmc_blk_put(md); } } static void <API key>(struct mmc_card *card, struct mmc_blk_data *md) { struct list_head *pos, *q; struct mmc_blk_data *part_md; __clear_bit(md->name_idx, name_use); list_for_each_safe(pos, q, &md->part) { part_md = list_entry(pos, struct mmc_blk_data, part); list_del(pos); mmc_blk_remove_req(part_md); } } static int mmc_add_disk(struct mmc_blk_data *md) { int ret; struct mmc_card *card = md->queue.card; add_disk(md->disk); md->force_ro.show = force_ro_show; md->force_ro.store = force_ro_store; sysfs_attr_init(&md->force_ro.attr); md->force_ro.attr.name = "force_ro"; md->force_ro.attr.mode = S_IRUGO | S_IWUSR; ret = device_create_file(disk_to_dev(md->disk), &md->force_ro); if (ret) goto force_ro_fail; if ((md->area_type & <API key>) && card->ext_csd.boot_ro_lockable) { umode_t mode; if (card->ext_csd.boot_ro_lock & <API key>) mode = S_IRUGO; else mode = S_IRUGO | S_IWUSR; md->power_ro_lock.show = power_ro_lock_show; md->power_ro_lock.store = power_ro_lock_store; sysfs_attr_init(&md->power_ro_lock.attr); md->power_ro_lock.attr.mode = mode; md->power_ro_lock.attr.name = "<API key>"; ret = device_create_file(disk_to_dev(md->disk), &md->power_ro_lock); if (ret) goto power_ro_lock_fail; } return ret; power_ro_lock_fail: device_remove_file(disk_to_dev(md->disk), &md->force_ro); force_ro_fail: del_gendisk(md->disk); return ret; } #define CID_MANFID_SANDISK 0x2 #define CID_MANFID_TOSHIBA 0x11 #define CID_MANFID_MICRON 0x13 #define CID_MANFID_SAMSUNG 0x15 static const struct mmc_fixup blk_fixups[] = { MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk, <API key>), MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk, <API key>), MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk, <API key>), MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk, <API key>), MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk, <API key>), /* * Some MMC cards experience performance degradation with CMD23 * instead of CMD12-bounded multiblock transfers. For now we'll * black list what's bad... * - Certain Toshiba cards. * * N.B. This doesn't affect SD cards. */ MMC_FIXUP("SDMB-32", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc, <API key>), MMC_FIXUP("SDM032", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc, <API key>), MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, <API key>), MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, <API key>), MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, <API key>), /* * Some Micron MMC cards needs longer data read timeout than * indicated in CSD. */ MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc, <API key>), /* * On these Samsung MoviNAND parts, performing secure erase or * secure trim can result in unrecoverable corruption due to a * firmware bug. */ MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, <API key>), MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, <API key>), MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, <API key>), MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, <API key>), MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, <API key>), MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, <API key>), MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, <API key>), MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, <API key>), END_FIXUP }; static int mmc_blk_probe(struct mmc_card *card) { struct mmc_blk_data *md, *part_md; char cap_str[10]; /* * Check that the card supports the command class(es) we need. */ if (!(card->csd.cmdclass & CCC_BLOCK_READ)) return -ENODEV; mmc_fixup_device(card, blk_fixups); md = mmc_blk_alloc(card); if (IS_ERR(md)) return PTR_ERR(md); string_get_size((u64)get_capacity(md->disk), 512, STRING_UNITS_2, cap_str, sizeof(cap_str)); pr_info("%s: %s %s %s %s\n", md->disk->disk_name, mmc_card_id(card), mmc_card_name(card), cap_str, md->read_only ? "(ro)" : ""); if (mmc_blk_alloc_parts(card, md)) goto out; dev_set_drvdata(&card->dev, md); if (mmc_add_disk(md)) goto out; list_for_each_entry(part_md, &md->part, part) { if (mmc_add_disk(part_md)) goto out; } <API key>(&card->dev, 3000); <API key>(&card->dev); /* * Don't enable runtime PM for SD-combo cards here. Leave that * decision to be taken during the SDIO init sequence instead. */ if (card->type != MMC_TYPE_SD_COMBO) { <API key>(&card->dev); pm_runtime_enable(&card->dev); } return 0; out: <API key>(card, md); mmc_blk_remove_req(md); return 0; } static void mmc_blk_remove(struct mmc_card *card) { struct mmc_blk_data *md = dev_get_drvdata(&card->dev); <API key>(card, md); pm_runtime_get_sync(&card->dev); mmc_claim_host(card->host); mmc_blk_part_switch(card, md); mmc_release_host(card->host); if (card->type != MMC_TYPE_SD_COMBO) pm_runtime_disable(&card->dev); <API key>(&card->dev); mmc_blk_remove_req(md); dev_set_drvdata(&card->dev, NULL); } static int _mmc_blk_suspend(struct mmc_card *card) { struct mmc_blk_data *part_md; struct mmc_blk_data *md = dev_get_drvdata(&card->dev); if (md) { mmc_queue_suspend(&md->queue); list_for_each_entry(part_md, &md->part, part) { mmc_queue_suspend(&part_md->queue); } } return 0; } static void mmc_blk_shutdown(struct mmc_card *card) { _mmc_blk_suspend(card); } #ifdef CONFIG_PM_SLEEP static int mmc_blk_suspend(struct device *dev) { struct mmc_card *card = mmc_dev_to_card(dev); return _mmc_blk_suspend(card); } static int mmc_blk_resume(struct device *dev) { struct mmc_blk_data *part_md; struct mmc_blk_data *md = dev_get_drvdata(dev); if (md) { /* * Resume involves the card going into idle state, * so current partition is always the main one. */ md->part_curr = md->part_type; mmc_queue_resume(&md->queue); list_for_each_entry(part_md, &md->part, part) { mmc_queue_resume(&part_md->queue); } } return 0; } #endif static SIMPLE_DEV_PM_OPS(mmc_blk_pm_ops, mmc_blk_suspend, mmc_blk_resume); static struct mmc_driver mmc_driver = { .drv = { .name = "mmcblk", .pm = &mmc_blk_pm_ops, }, .probe = mmc_blk_probe, .remove = mmc_blk_remove, .shutdown = mmc_blk_shutdown, }; static int __init mmc_blk_init(void) { int res; if (perdev_minors != <API key>) pr_info("mmcblk: using %d minors per device\n", perdev_minors); max_devices = min(MAX_DEVICES, (1 << MINORBITS) / perdev_minors); res = register_blkdev(MMC_BLOCK_MAJOR, "mmc"); if (res) goto out; res = mmc_register_driver(&mmc_driver); if (res) goto out2; return 0; out2: unregister_blkdev(MMC_BLOCK_MAJOR, "mmc"); out: return res; } static void __exit mmc_blk_exit(void) { <API key>(&mmc_driver); unregister_blkdev(MMC_BLOCK_MAJOR, "mmc"); } module_init(mmc_blk_init); module_exit(mmc_blk_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
/* ScriptData SDName: Boss_Moorabi SD%Complete: 20% SDComment: SDCategory: Gundrak EndScriptData */ #include "precompiled.h" #include "gundrak.h" enum { SAY_AGGRO = -1604011, SAY_QUAKE = -1604012, SAY_TRANSFORM = -1604013, SAY_SLAY_1 = -1604014, SAY_SLAY_2 = -1604015, SAY_SLAY_3 = -1604016, SAY_DEATH = -1604017, EMOTE_TRANSFORM = -1604018, EMOTE_TRANSFORMED = -1604029, // Troll form <API key> = 55104, SPELL_MOJO_FRENZY = 55163, SPELL_GROUND_TREMOR = 55142, SPELL_NUMBING_SHOUT = 55106, <API key> = 55098, // Mammoth <API key> = 55102, <API key> = 59444, SPELL_QUAKE = 55101, SPELL_NUMBING_ROAR = 55100, }; struct boss_moorabiAI : public ScriptedAI { boss_moorabiAI(Creature* pCreature) : ScriptedAI(pCreature) { m_pInstance = (instance_gundrak*)pCreature->GetInstanceData(); m_bIsRegularMode = pCreature->GetMap()->IsRegularDifficulty(); Reset(); } instance_gundrak* m_pInstance; bool m_bIsRegularMode; uint32 m_uiStabTimer; // used for stab and gore uint32 m_uiQuakeTimer; // used for quake and ground tremor uint32 m_uiRoarTimer; // both roars on it uint32 <API key>; uint32 m_uiPreviousTimer; bool m_bMammothPhase; void Reset() override { m_bMammothPhase = false; m_uiStabTimer = 8000; m_uiQuakeTimer = 1000; m_uiRoarTimer = 7000; <API key> = 10000; m_uiPreviousTimer = 10000; } void Aggro(Unit* /*pWho*/) override { DoScriptText(SAY_AGGRO, m_creature); DoCastSpellIfCan(m_creature, SPELL_MOJO_FRENZY); if (m_pInstance) m_pInstance->SetData(TYPE_MOORABI, IN_PROGRESS); } void KilledUnit(Unit* /*pVictim*/) override { switch (urand(0, 2)) { case 0: DoScriptText(SAY_SLAY_1, m_creature); break; case 1: DoScriptText(SAY_SLAY_2, m_creature); break; case 2: DoScriptText(SAY_SLAY_3, m_creature); break; } } void JustDied(Unit* /*pKiller*/) override { DoScriptText(SAY_DEATH, m_creature); if (m_pInstance) m_pInstance->SetData(TYPE_MOORABI, DONE); } void UpdateAI(const uint32 uiDiff) override { if (!m_creature->SelectHostileTarget() || !m_creature->getVictim()) return; if (m_creature->HasAura(<API key>) && !m_bMammothPhase) { DoScriptText(EMOTE_TRANSFORMED, m_creature); m_bMammothPhase = true; // Set the achievement to failed if (m_pInstance) m_pInstance-><API key>(false); } if (m_uiRoarTimer < uiDiff) { DoCastSpellIfCan(m_creature->getVictim(), m_bMammothPhase ? SPELL_NUMBING_ROAR : SPELL_NUMBING_SHOUT); m_uiRoarTimer = 20000; } else m_uiRoarTimer -= uiDiff; if (m_uiQuakeTimer < uiDiff) { DoScriptText(SAY_QUAKE, m_creature); DoCastSpellIfCan(m_creature->getVictim(), m_bMammothPhase ? SPELL_QUAKE : SPELL_GROUND_TREMOR); m_uiQuakeTimer = m_bMammothPhase ? 13000 : 18000; } else m_uiQuakeTimer -= uiDiff; if (m_uiStabTimer < uiDiff) { if (m_bMammothPhase) DoCastSpellIfCan(m_creature->getVictim(), m_bIsRegularMode ? <API key> : <API key>); else DoCastSpellIfCan(m_creature->getVictim(), <API key>); m_uiStabTimer = 7000; } else m_uiStabTimer -= uiDiff; // check only in troll phase if (!m_bMammothPhase) { if (<API key> < uiDiff) { DoScriptText(SAY_TRANSFORM, m_creature); DoScriptText(EMOTE_TRANSFORM, m_creature); DoCastSpellIfCan(m_creature, <API key>); m_uiPreviousTimer *= 0.8; <API key> = m_uiPreviousTimer; } else <API key> -= uiDiff; } <API key>(); } }; CreatureAI* GetAI_boss_moorabi(Creature* pCreature) { return new boss_moorabiAI(pCreature); } void AddSC_boss_moorabi() { Script* pNewScript; pNewScript = new Script; pNewScript->Name = "boss_moorabi"; pNewScript->GetAI = &GetAI_boss_moorabi; pNewScript->RegisterSelf(); }
#ifndef <API key> #define <API key> #include <linux/mm.h> #include <asm/glue-cache.h> #include <asm/shmparam.h> #include <asm/cachetype.h> #include <asm/outercache.h> #include <asm/rodata.h> #define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT) /* * This flag is used to indicate that the page pointed to by a pte is clean * and does not require cleaning before returning it to the user. */ #define PG_dcache_clean PG_arch_1 struct cpu_cache_fns { void (*flush_icache_all)(void); void (*flush_kern_all)(void); void (*flush_user_all)(void); void (*flush_user_range)(unsigned long, unsigned long, unsigned int); void (*coherent_kern_range)(unsigned long, unsigned long); void (*coherent_user_range)(unsigned long, unsigned long); void (*<API key>)(void *, size_t); void (*dma_map_area)(const void *, size_t, int); void (*dma_unmap_area)(const void *, size_t, int); void (*dma_flush_range)(const void *, const void *); }; /* * Select the calling method */ #ifdef MULTI_CACHE extern struct cpu_cache_fns cpu_cache; #define <API key> cpu_cache.flush_icache_all #define <API key> cpu_cache.flush_kern_all #define <API key> cpu_cache.flush_user_all #define <API key> cpu_cache.flush_user_range #define <API key> cpu_cache.coherent_kern_range #define <API key> cpu_cache.coherent_user_range #define <API key> cpu_cache.<API key> /* * These are private to the dma-mapping API. Do not use directly. * Their sole purpose is to ensure that data held in the cache * is visible to DMA, or data written by DMA to system memory is * visible to the CPU. */ #define dmac_map_area cpu_cache.dma_map_area #define dmac_unmap_area cpu_cache.dma_unmap_area #define dmac_flush_range cpu_cache.dma_flush_range #else extern void <API key>(void); extern void <API key>(void); extern void <API key>(void); extern void <API key>(unsigned long, unsigned long, unsigned int); extern void <API key>(unsigned long, unsigned long); extern void <API key>(unsigned long, unsigned long); extern void <API key>(void *, size_t); /* * These are private to the dma-mapping API. Do not use directly. * Their sole purpose is to ensure that data held in the cache * is visible to DMA, or data written by DMA to system memory is * visible to the CPU. */ extern void dmac_map_area(const void *, size_t, int); extern void dmac_unmap_area(const void *, size_t, int); extern void dmac_flush_range(const void *, const void *); #endif /* * Copy user data from/to a page which is mapped into a different * processes address space. Really, we want to allow our "user * space" model to handle this. */ extern void copy_to_user_page(struct vm_area_struct *, struct page *, unsigned long, void *, const void *, unsigned long); #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ do { \ memcpy(dst, src, len); \ } while (0) /* * Convert calls to our calling convention. */ /* Invalidate I-cache */ #define <API key>() \ asm("mcr p15, 0, %0, c7, c5, 0" \ : : "r" (0)); /* Invalidate I-cache inner shareable */ #define <API key>() \ asm("mcr p15, 0, %0, c7, c1, 0" \ : : "r" (0)); /* * Optimized __flush_icache_all for the common cases. Note that UP ARMv7 * will fall through to use <API key>. */ #if (defined(CONFIG_CPU_V7) && \ (defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K))) || \ defined(CONFIG_SMP_ON_UP) #define <API key> <API key> #elif __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP) #define <API key> <API key> #elif __LINUX_ARM_ARCH__ == 6 && defined(<API key>) #define <API key> <API key> #else #define <API key> <API key> #endif static inline void __flush_icache_all(void) { <API key>(); } #define flush_cache_all() <API key>() #ifndef CONFIG_SMP #define <API key>() flush_cache_all() #else extern void <API key>(void); #endif static inline void vivt_flush_cache_mm(struct mm_struct *mm) { if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) <API key>(); } static inline void <API key>(struct vm_area_struct *vma, unsigned long start, unsigned long end) { struct mm_struct *mm = vma->vm_mm; if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) <API key>(start & PAGE_MASK, PAGE_ALIGN(end), vma->vm_flags); } static inline void <API key>(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) { struct mm_struct *mm = vma->vm_mm; if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) { unsigned long addr = user_addr & PAGE_MASK; <API key>(addr, addr + PAGE_SIZE, vma->vm_flags); } } #ifndef <API key> #define flush_cache_mm(mm) \ vivt_flush_cache_mm(mm) #define flush_cache_range(vma,start,end) \ <API key>(vma,start,end) #define flush_cache_page(vma,addr,pfn) \ <API key>(vma,addr,pfn) #else extern void flush_cache_mm(struct mm_struct *mm); extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn); #endif #define flush_cache_dup_mm(mm) flush_cache_mm(mm) /* * <API key> is used when we want to ensure that the * Harvard caches are synchronised for the user space address range. * This is used for the ARM private sys_cacheflush system call. */ #define <API key>(start,end) \ <API key>((start) & PAGE_MASK, PAGE_ALIGN(end)) /* * Perform necessary cache operations to ensure that data previously * stored within this range of addresses can be executed by the CPU. */ #define flush_icache_range(s,e) <API key>(s,e) /* * Perform necessary cache operations to ensure that the TLB will * see data written in the specified area. */ #define clean_dcache_area(start,size) <API key>(start, size) /* * flush_dcache_page is used when the kernel has written to the page * cache page at virtual address page->virtual. * * If this page isn't mapped (ie, page_mapping == NULL), or it might * have userspace mappings, then we _must_ always clean + invalidate * the dcache entries associated with the kernel mapping. * * Otherwise we can defer the operation, and clean the cache when we are * about to change to user space. This is the same method as used on SPARC64. * See update_mmu_cache for the user space part. */ #define <API key> 1 extern void flush_dcache_page(struct page *); static inline void <API key>(void *addr, int size) { if ((cache_is_vivt() || <API key>())) <API key>(addr, (size_t)size); } static inline void <API key>(void *addr, int size) { if ((cache_is_vivt() || <API key>())) <API key>(addr, (size_t)size); } #define <API key> static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr) { extern void __flush_anon_page(struct vm_area_struct *vma, struct page *, unsigned long); if (PageAnon(page)) __flush_anon_page(vma, page, vmaddr); } #define <API key> static inline void <API key>(struct page *page) { } #define <API key>(mapping) \ spin_lock_irq(&(mapping)->tree_lock) #define <API key>(mapping) \ spin_unlock_irq(&(mapping)->tree_lock) #define <API key>(vma,page,addr,len) \ flush_dcache_page(page) /* * We don't appear to need to do anything here. In fact, if we did, we'd * duplicate cache flushing elsewhere performed by flush_dcache_page(). */ #define flush_icache_page(vma,page) do { } while (0) /* * flush_cache_vmap() is used when creating mappings (eg, via vmap, * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT * caches, since the direct-mappings of these pages may contain cached * data, we need to do a full cache flush to ensure that writebacks * don't corrupt data placed into these pages via the new mappings. */ static inline void flush_cache_vmap(unsigned long start, unsigned long end) { if (!<API key>()) flush_cache_all(); else /* * set_pte_at() called from vmap_pte_range() does not * have a DSB after cleaning the cache line. */ dsb(); } static inline void flush_cache_vunmap(unsigned long start, unsigned long end) { if (!<API key>()) flush_cache_all(); } #endif
#include <linux/device.h> #include <linux/file.h> #include <linux/freezer.h> #include <linux/fs.h> #include <linux/anon_inodes.h> #include <linux/ion.h> #include <linux/mtk_ion.h> #include <linux/kthread.h> #include <linux/list.h> #include <linux/memblock.h> #include <linux/miscdevice.h> #include <linux/export.h> #include <linux/mm.h> #include <linux/mm_types.h> #include <linux/rbtree.h> #include <linux/slab.h> #include <linux/seq_file.h> #include <linux/uaccess.h> #include <linux/vmalloc.h> #include <linux/debugfs.h> #include <linux/dma-buf.h> #include <linux/idr.h> #include "ion_priv.h" #include "ion_profile.h" #define DEBUG_HEAP_SHRINKER #if 0 //we move it to ion_priv.h. so we can dump every buffer info in ion_mm_heap.c /** * struct ion_device - the metadata of the ion device node * @dev: the actual misc device * @buffers: an rb tree of all the existing buffers * @buffer_lock: lock protecting the tree of buffers * @lock: rwsem protecting the tree of heaps and clients * @heaps: list of all the heaps in the system * @user_clients: list of all the clients created from userspace */ struct ion_device { struct miscdevice dev; struct rb_root buffers; struct mutex buffer_lock; struct rw_semaphore lock; struct plist_head heaps; long (*custom_ioctl) (struct ion_client *client, unsigned int cmd, unsigned long arg); struct rb_root clients; struct dentry *debug_root; }; /** * struct ion_client - a process/hw block local address space * @node: node in the tree of all clients * @dev: backpointer to ion device * @handles: an rb tree of all the handles in this client * @idr: an idr space for allocating handle ids * @lock: lock protecting the tree of handles * @name: used for debugging * @task: used for debugging * * A client represents a list of buffers this client may access. * The mutex stored here is used to protect both handles tree * as well as the handles themselves, and should be held while modifying either. */ struct ion_client { struct rb_node node; struct ion_device *dev; struct rb_root handles; struct idr idr; struct mutex lock; const char *name; struct task_struct *task; pid_t pid; struct dentry *debug_root; }; struct ion_handle_debug { pid_t pid; pid_t tgid; unsigned int backtrace[BACKTRACE_SIZE]; unsigned int backtrace_num; }; /** * ion_handle - a client local reference to a buffer * @ref: reference count * @client: back pointer to the client the buffer resides in * @buffer: pointer to the buffer * @node: node in the client's handle rbtree * @kmap_cnt: count of times this client has mapped to kernel * @id: client-unique id allocated by client->idr * * Modifications to node, map_cnt or mapping should be protected by the * lock in the client. Other fields are never changed after initialization. */ struct ion_handle { struct kref ref; struct ion_client *client; struct ion_buffer *buffer; struct rb_node node; unsigned int kmap_cnt; int id; #if <API key> struct ion_handle_debug dbg; #endif }; #endif bool <API key>(struct ion_buffer *buffer) { return ((buffer->flags & ION_FLAG_CACHED) && !(buffer->flags & <API key>)); } bool ion_buffer_cached(struct ion_buffer *buffer) { return !!(buffer->flags & ION_FLAG_CACHED); } static inline struct page *ion_buffer_page(struct page *page) { return (struct page *)((unsigned long)page & ~(1UL)); } static inline bool <API key>(struct page *page) { return !!((unsigned long)page & 1UL); } static inline void <API key>(struct page **page) { *page = (struct page *)((unsigned long)(*page) | 1UL); } static inline void <API key>(struct page **page) { *page = (struct page *)((unsigned long)(*page) & ~(1UL)); } /* this function should only be called while dev->lock is held */ static void ion_buffer_add(struct ion_device *dev, struct ion_buffer *buffer) { struct rb_node **p = &dev->buffers.rb_node; struct rb_node *parent = NULL; struct ion_buffer *entry; while (*p) { parent = *p; entry = rb_entry(parent, struct ion_buffer, node); if (buffer < entry) { p = &(*p)->rb_left; } else if (buffer > entry) { p = &(*p)->rb_right; } else { pr_err("%s: buffer already found.", __func__); BUG(); } } rb_link_node(&buffer->node, parent, p); rb_insert_color(&buffer->node, &dev->buffers); } /* this function should only be called while dev->lock is held */ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, struct ion_device *dev, unsigned long len, unsigned long align, unsigned long flags) { struct ion_buffer *buffer; struct sg_table *table; struct scatterlist *sg; int i, ret; buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL); if (!buffer) return ERR_PTR(-ENOMEM); buffer->heap = heap; buffer->flags = flags; kref_init(&buffer->ref); ret = heap->ops->allocate(heap, buffer, len, align, flags); if (ret) { if (!(heap->flags & <API key>)) goto err2; <API key>(heap, 0); ret = heap->ops->allocate(heap, buffer, len, align, flags); if (ret) goto err2; } buffer->dev = dev; buffer->size = len; table = heap->ops->map_dma(heap, buffer); if (WARN_ONCE(table == NULL, "heap->ops->map_dma should return ERR_PTR on error")) table = ERR_PTR(-EINVAL); if (IS_ERR(table)) { heap->ops->free(buffer); kfree(buffer); return ERR_PTR(PTR_ERR(table)); } buffer->sg_table = table; if (<API key>(buffer)) { int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; struct scatterlist *sg; int i, j, k = 0; buffer->pages = vmalloc(sizeof(struct page *) * num_pages); if (!buffer->pages) { ret = -ENOMEM; goto err1; } for_each_sg(table->sgl, sg, table->nents, i) { struct page *page = sg_page(sg); for (j = 0; j < sg_dma_len(sg) / PAGE_SIZE; j++) buffer->pages[k++] = page++; } if (ret) goto err; } buffer->dev = dev; buffer->size = len; INIT_LIST_HEAD(&buffer->vmas); //log task pid for debug +by k.zhang { struct task_struct *task; task = current->group_leader; get_task_comm(buffer->task_comm, task); buffer->pid = task_pid_nr(task); } mutex_init(&buffer->lock); /* this will set up dma addresses for the sglist -- it is not technically correct as per the dma api -- a specific device isn't really taking ownership here. However, in practice on our systems the only dma_address space is physical addresses. Additionally, we can't afford the overhead of invalidating every allocation via dma_map_sg. The implicit contract here is that memory comming from the heaps is ready for dma, ie if it has a cached mapping that mapping has been invalidated */ for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) sg_dma_address(sg) = sg_phys(sg); mutex_lock(&dev->buffer_lock); ion_buffer_add(dev, buffer); mutex_unlock(&dev->buffer_lock); return buffer; err: heap->ops->unmap_dma(heap, buffer); heap->ops->free(buffer); err1: if (buffer->pages) vfree(buffer->pages); err2: kfree(buffer); return ERR_PTR(ret); } void ion_buffer_destroy(struct ion_buffer *buffer) { if (WARN_ON(buffer->kmap_cnt > 0)) buffer->heap->ops->unmap_kernel(buffer->heap, buffer); buffer->heap->ops->unmap_dma(buffer->heap, buffer); buffer->heap->ops->free(buffer); if (buffer->pages) vfree(buffer->pages); kfree(buffer); } static void _ion_buffer_destroy(struct kref *kref) { struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref); struct ion_heap *heap = buffer->heap; struct ion_device *dev = buffer->dev; mutex_lock(&dev->buffer_lock); rb_erase(&buffer->node, &dev->buffers); mutex_unlock(&dev->buffer_lock); if (heap->flags & <API key>) <API key>(heap, buffer); else ion_buffer_destroy(buffer); } static void ion_buffer_get(struct ion_buffer *buffer) { kref_get(&buffer->ref); } static int ion_buffer_put(struct ion_buffer *buffer) { return kref_put(&buffer->ref, _ion_buffer_destroy); } static void <API key>(struct ion_buffer *buffer) { mutex_lock(&buffer->lock); buffer->handle_count++; mutex_unlock(&buffer->lock); } static void <API key>(struct ion_buffer *buffer) { /* * when a buffer is removed from a handle, if it is not in * any other handles, copy the taskcomm and the pid of the * process it's being removed from into the buffer. At this * point there will be no way to track what processes this buffer is * being used by, it only exists as a dma_buf file descriptor. * The taskcomm and pid can provide a debug hint as to where this fd * is in the system */ mutex_lock(&buffer->lock); buffer->handle_count BUG_ON(buffer->handle_count < 0); if (!buffer->handle_count) { struct task_struct *task; task = current->group_leader; get_task_comm(buffer->task_comm, task); buffer->pid = task_pid_nr(task); } mutex_unlock(&buffer->lock); } static struct ion_handle *ion_handle_create(struct ion_client *client, struct ion_buffer *buffer) { struct ion_handle *handle; handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL); if (!handle) return ERR_PTR(-ENOMEM); kref_init(&handle->ref); rb_init_node(&handle->node); handle->client = client; ion_buffer_get(buffer); <API key>(buffer); handle->buffer = buffer; return handle; } static void ion_handle_kmap_put(struct ion_handle *); static void ion_handle_destroy(struct kref *kref) { struct ion_handle *handle = container_of(kref, struct ion_handle, ref); struct ion_client *client = handle->client; struct ion_buffer *buffer = handle->buffer; mutex_lock(&buffer->lock); while (handle->kmap_cnt) ion_handle_kmap_put(handle); mutex_unlock(&buffer->lock); idr_remove(&client->idr, handle->id); if (!RB_EMPTY_NODE(&handle->node)) rb_erase(&handle->node, &client->handles); <API key>(buffer); ion_buffer_put(buffer); handle->buffer = NULL; handle->client = NULL; kfree(handle); } struct ion_buffer *ion_handle_buffer(struct ion_handle *handle) { return handle->buffer; } static void ion_handle_get(struct ion_handle *handle) { kref_get(&handle->ref); } static int ion_handle_put(struct ion_handle *handle) { return kref_put(&handle->ref, ion_handle_destroy); } static struct ion_handle *ion_handle_lookup(struct ion_client *client, struct ion_buffer *buffer) { struct rb_node *n = client->handles.rb_node; while (n) { struct ion_handle *entry = rb_entry(n, struct ion_handle, node); if (buffer < entry->buffer) n = n->rb_left; else if (buffer > entry->buffer) n = n->rb_right; else return entry; } return ERR_PTR(-EINVAL); } struct ion_handle *ion_uhandle_get(struct ion_client *client, int id) { return idr_find(&client->idr, id); } bool ion_handle_validate(struct ion_client *client, struct ion_handle *handle) { return (ion_uhandle_get(client, handle->id) == handle); } static int ion_handle_add(struct ion_client *client, struct ion_handle *handle) { int rc; struct rb_node **p = &client->handles.rb_node; struct rb_node *parent = NULL; struct ion_handle *entry; do { int id; rc = idr_pre_get(&client->idr, GFP_KERNEL); if (!rc) return -ENOMEM; rc = idr_get_new_above(&client->idr, handle, 1, &id); handle->id = id; } while (rc == -EAGAIN); if (rc < 0) return rc; while (*p) { parent = *p; entry = rb_entry(parent, struct ion_handle, node); if (handle->buffer < entry->buffer) p = &(*p)->rb_left; else if (handle->buffer > entry->buffer) p = &(*p)->rb_right; else WARN(1, "%s: buffer already found.", __func__); } rb_link_node(&handle->node, parent, p); rb_insert_color(&handle->node, &client->handles); return 0; } struct ion_handle *ion_alloc(struct ion_client *client, size_t len, size_t align, unsigned int heap_id_mask, unsigned int flags) { struct ion_handle *handle; struct ion_device *dev = client->dev; struct ion_buffer *buffer = NULL; struct ion_heap *heap; int ret; pr_debug("%s: len %d align %d heap_id_mask %u flags %x\n", __func__, len, align, heap_id_mask, flags); /* * traverse the list of heaps available in this system in priority * order. If the heap type is supported by the client, and matches the * request of the caller allocate from it. Repeat until allocate has * succeeded or all heaps have been tried */ if (WARN_ON(!len)) return ERR_PTR(-EINVAL); //add by k.zhang if((len > 1024*1024*1024)) { IONMSG("%s error: size (%d) is more than 1G !!\n", len); return ERR_PTR(-EINVAL); } MMProfileLogEx(ION_MMP_Events[PROFILE_ALLOC], MMProfileFlagStart, len, 0); len = PAGE_ALIGN(len); down_read(&dev->lock); <API key>(heap, &dev->heaps, node) { /* if the caller didn't specify this heap id */ if (!((1 << heap->id) & heap_id_mask)) continue; buffer = ion_buffer_create(heap, dev, len, align, flags); if (!IS_ERR(buffer)) break; } up_read(&dev->lock); if (buffer == NULL) return ERR_PTR(-ENODEV); if (IS_ERR(buffer)) return ERR_PTR(PTR_ERR(buffer)); handle = ion_handle_create(client, buffer); /* * ion_buffer_create will create a buffer with a ref_cnt of 1, * and ion_handle_create will take a second reference, drop one here */ ion_buffer_put(buffer); if (IS_ERR(handle)) return handle; mutex_lock(&client->lock); ret = ion_handle_add(client, handle); if (ret) { ion_handle_put(handle); handle = ERR_PTR(ret); } mutex_unlock(&client->lock); MMProfileLogEx(ION_MMP_Events[PROFILE_ALLOC], MMProfileFlagEnd, buffer->size, 0); return handle; } EXPORT_SYMBOL(ion_alloc); void ion_free(struct ion_client *client, struct ion_handle *handle) { bool valid_handle; BUG_ON(client != handle->client); mutex_lock(&client->lock); valid_handle = ion_handle_validate(client, handle); if (!valid_handle) { WARN(1, "%s: invalid handle passed to free.\n", __func__); mutex_unlock(&client->lock); return; } ion_handle_put(handle); mutex_unlock(&client->lock); } EXPORT_SYMBOL(ion_free); int ion_phys(struct ion_client *client, struct ion_handle *handle, ion_phys_addr_t *addr, size_t *len) { struct ion_buffer *buffer; int ret; MMProfileLogEx(ION_MMP_Events[PROFILE_GET_PHYS], MMProfileFlagStart, (unsigned int)client, (unsigned int)handle); mutex_lock(&client->lock); if (!ion_handle_validate(client, handle)) { mutex_unlock(&client->lock); return -EINVAL; } buffer = handle->buffer; if (!buffer->heap->ops->phys) { pr_err("%s: ion_phys is not implemented by this heap.\n", __func__); mutex_unlock(&client->lock); return -ENODEV; } mutex_unlock(&client->lock); ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len); MMProfileLogEx(ION_MMP_Events[PROFILE_GET_PHYS], MMProfileFlagEnd, buffer->size, *addr); return ret; } EXPORT_SYMBOL(ion_phys); static void *ion_buffer_kmap_get(struct ion_buffer *buffer) { void *vaddr; if (buffer->kmap_cnt) { buffer->kmap_cnt++; return buffer->vaddr; } vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer); if (WARN_ONCE(vaddr == NULL, "heap->ops->map_kernel should return ERR_PTR on error")) return ERR_PTR(-EINVAL); if (IS_ERR(vaddr)) return vaddr; buffer->vaddr = vaddr; buffer->kmap_cnt++; return vaddr; } static void *ion_handle_kmap_get(struct ion_handle *handle) { struct ion_buffer *buffer = handle->buffer; void *vaddr; if (handle->kmap_cnt) { handle->kmap_cnt++; return buffer->vaddr; } vaddr = ion_buffer_kmap_get(buffer); if (IS_ERR(vaddr)) return vaddr; handle->kmap_cnt++; return vaddr; } static void ion_buffer_kmap_put(struct ion_buffer *buffer) { buffer->kmap_cnt if (!buffer->kmap_cnt) { MMProfileLogEx(ION_MMP_Events[<API key>], MMProfileFlagStart, buffer->size, 0); buffer->heap->ops->unmap_kernel(buffer->heap, buffer); MMProfileLogEx(ION_MMP_Events[<API key>], MMProfileFlagEnd, buffer->size, 0); buffer->vaddr = NULL; } } static void ion_handle_kmap_put(struct ion_handle *handle) { struct ion_buffer *buffer = handle->buffer; handle->kmap_cnt if (!handle->kmap_cnt) ion_buffer_kmap_put(buffer); } void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle) { struct ion_buffer *buffer; void *vaddr; mutex_lock(&client->lock); if (!ion_handle_validate(client, handle)) { pr_err("%s: invalid handle passed to map_kernel.\n", __func__); mutex_unlock(&client->lock); return ERR_PTR(-EINVAL); } buffer = handle->buffer; if (!handle->buffer->heap->ops->map_kernel) { pr_err("%s: map_kernel is not implemented by this heap.\n", __func__); mutex_unlock(&client->lock); return ERR_PTR(-ENODEV); } mutex_lock(&buffer->lock); vaddr = ion_handle_kmap_get(handle); mutex_unlock(&buffer->lock); mutex_unlock(&client->lock); return vaddr; } EXPORT_SYMBOL(ion_map_kernel); void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle) { struct ion_buffer *buffer; mutex_lock(&client->lock); buffer = handle->buffer; mutex_lock(&buffer->lock); ion_handle_kmap_put(handle); mutex_unlock(&buffer->lock); mutex_unlock(&client->lock); } EXPORT_SYMBOL(ion_unmap_kernel); static int <API key>(struct seq_file *s, void *unused) { struct ion_client *client = s->private; struct rb_node *n; size_t sizes[ION_NUM_HEAP_IDS] = {0}; const char *names[ION_NUM_HEAP_IDS] = {0}; int i; mutex_lock(&client->lock); for (n = rb_first(&client->handles); n; n = rb_next(n)) { struct ion_handle *handle = rb_entry(n, struct ion_handle, node); unsigned int id = handle->buffer->heap->id; if (!names[id]) names[id] = handle->buffer->heap->name; sizes[id] += handle->buffer->size; } mutex_unlock(&client->lock); seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes"); for (i = 0; i < ION_NUM_HEAP_IDS; i++) { if (!names[i]) continue; seq_printf(s, "%16.16s: %16u\n", names[i], sizes[i]); } return 0; } static int <API key>(struct inode *inode, struct file *file) { return single_open(file, <API key>, inode->i_private); } static const struct file_operations debug_client_fops = { .open = <API key>, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; struct ion_client *ion_client_create(struct ion_device *dev, const char *name) { struct ion_client *client; struct task_struct *task; struct rb_node **p; struct rb_node *parent = NULL; struct ion_client *entry; char debug_name[64]; pid_t pid; get_task_struct(current->group_leader); task_lock(current->group_leader); pid = task_pid_nr(current->group_leader); /* don't bother to store task struct for kernel threads, they can't be killed anyway */ if (current->group_leader->flags & PF_KTHREAD) { put_task_struct(current->group_leader); task = NULL; } else { task = current->group_leader; } task_unlock(current->group_leader); client = kzalloc(sizeof(struct ion_client), GFP_KERNEL); if (!client) { if (task) put_task_struct(current->group_leader); return ERR_PTR(-ENOMEM); } client->dev = dev; client->handles = RB_ROOT; idr_init(&client->idr); mutex_init(&client->lock); client->name = name; client->task = task; client->pid = pid; down_write(&dev->lock); p = &dev->clients.rb_node; while (*p) { parent = *p; entry = rb_entry(parent, struct ion_client, node); if (client < entry) p = &(*p)->rb_left; else if (client > entry) p = &(*p)->rb_right; } rb_link_node(&client->node, parent, p); rb_insert_color(&client->node, &dev->clients); snprintf(debug_name, 64, "%u", client->pid); client->debug_root = debugfs_create_file(debug_name, 0664, dev->debug_root, client, &debug_client_fops); up_write(&dev->lock); return client; } EXPORT_SYMBOL(ion_client_create); void ion_client_destroy(struct ion_client *client) { struct ion_device *dev = client->dev; struct rb_node *n; pr_debug("%s: %d\n", __func__, __LINE__); while ((n = rb_first(&client->handles))) { struct ion_handle *handle = rb_entry(n, struct ion_handle, node); mutex_lock(&client->lock); ion_handle_destroy(&handle->ref); mutex_unlock(&client->lock); } idr_remove_all(&client->idr); idr_destroy(&client->idr); down_write(&dev->lock); if (client->task) put_task_struct(client->task); rb_erase(&client->node, &dev->clients); <API key>(client->debug_root); up_write(&dev->lock); kfree(client); } EXPORT_SYMBOL(ion_client_destroy); struct sg_table *ion_sg_table(struct ion_client *client, struct ion_handle *handle) { struct ion_buffer *buffer; struct sg_table *table; mutex_lock(&client->lock); if (!ion_handle_validate(client, handle)) { pr_err("%s: invalid handle passed to map_dma.\n", __func__); mutex_unlock(&client->lock); return ERR_PTR(-EINVAL); } buffer = handle->buffer; table = buffer->sg_table; mutex_unlock(&client->lock); return table; } EXPORT_SYMBOL(ion_sg_table); static void <API key>(struct ion_buffer *buffer, struct device *dev, enum dma_data_direction direction); static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment, enum dma_data_direction direction) { struct dma_buf *dmabuf = attachment->dmabuf; struct ion_buffer *buffer = dmabuf->priv; <API key>(buffer, attachment->dev, direction); return buffer->sg_table; } static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment, struct sg_table *table, enum dma_data_direction direction) { } struct ion_vma_list { struct list_head list; struct vm_area_struct *vma; }; static void <API key>(struct ion_buffer *buffer, struct device *dev, enum dma_data_direction dir) { struct ion_vma_list *vma_list; int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; int i; pr_debug("%s: syncing for device %s\n", __func__, dev ? dev_name(dev) : "null"); if (!<API key>(buffer)) return; mutex_lock(&buffer->lock); for (i = 0; i < pages; i++) { struct page *page = buffer->pages[i]; if (<API key>(page)) <API key>(page, 0, PAGE_SIZE, dir); <API key>(buffer->pages + i); } list_for_each_entry(vma_list, &buffer->vmas, list) { struct vm_area_struct *vma = vma_list->vma; zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, NULL); } mutex_unlock(&buffer->lock); } int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct ion_buffer *buffer = vma->vm_private_data; int ret; mutex_lock(&buffer->lock); <API key>(buffer->pages + vmf->pgoff); BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]); ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, ion_buffer_page(buffer->pages[vmf->pgoff])); mutex_unlock(&buffer->lock); if (ret) return VM_FAULT_ERROR; return VM_FAULT_NOPAGE; } static void ion_vm_open(struct vm_area_struct *vma) { struct ion_buffer *buffer = vma->vm_private_data; struct ion_vma_list *vma_list; vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL); if (!vma_list) return; vma_list->vma = vma; mutex_lock(&buffer->lock); list_add(&vma_list->list, &buffer->vmas); mutex_unlock(&buffer->lock); pr_debug("%s: adding %p\n", __func__, vma); } static void ion_vm_close(struct vm_area_struct *vma) { struct ion_buffer *buffer = vma->vm_private_data; struct ion_vma_list *vma_list, *tmp; pr_debug("%s\n", __func__); mutex_lock(&buffer->lock); <API key>(vma_list, tmp, &buffer->vmas, list) { if (vma_list->vma != vma) continue; list_del(&vma_list->list); kfree(vma_list); pr_debug("%s: deleting %p\n", __func__, vma); break; } mutex_unlock(&buffer->lock); } struct <API key> ion_vma_ops = { .open = ion_vm_open, .close = ion_vm_close, .fault = ion_vm_fault, }; static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) { struct ion_buffer *buffer = dmabuf->priv; int ret = 0; MMProfileLogEx(ION_MMP_Events[PROFILE_MAP_USER], MMProfileFlagStart, buffer->size, vma->vm_start); if (!buffer->heap->ops->map_user) { pr_err("%s: this heap does not define a method for mapping " "to userspace\n", __func__); return -EINVAL; } if (<API key>(buffer)) { vma->vm_private_data = buffer; vma->vm_ops = &ion_vma_ops; ion_vm_open(vma); return 0; } //if (!(buffer->flags & ION_FLAG_CACHED)) //vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); mutex_lock(&buffer->lock); /* now map it to userspace */ ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma); mutex_unlock(&buffer->lock); if (ret) pr_err("%s: failure mapping buffer to userspace\n", __func__); MMProfileLogEx(ION_MMP_Events[PROFILE_MAP_USER], MMProfileFlagEnd, buffer->size, vma->vm_start); return ret; } static void ion_dma_buf_release(struct dma_buf *dmabuf) { struct ion_buffer *buffer = dmabuf->priv; ion_buffer_put(buffer); } static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset) { struct ion_buffer *buffer = dmabuf->priv; return buffer->vaddr + offset * PAGE_SIZE; } static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset, void *ptr) { return; } static int <API key>(struct dma_buf *dmabuf, size_t start, size_t len, enum dma_data_direction direction) { struct ion_buffer *buffer = dmabuf->priv; void *vaddr; if (!buffer->heap->ops->map_kernel) { pr_err("%s: map kernel is not implemented by this heap.\n", __func__); return -ENODEV; } mutex_lock(&buffer->lock); vaddr = ion_buffer_kmap_get(buffer); mutex_unlock(&buffer->lock); if (IS_ERR(vaddr)) return PTR_ERR(vaddr); return 0; } static void <API key>(struct dma_buf *dmabuf, size_t start, size_t len, enum dma_data_direction direction) { struct ion_buffer *buffer = dmabuf->priv; mutex_lock(&buffer->lock); ion_buffer_kmap_put(buffer); mutex_unlock(&buffer->lock); } struct dma_buf_ops dma_buf_ops = { .map_dma_buf = ion_map_dma_buf, .unmap_dma_buf = ion_unmap_dma_buf, .mmap = ion_mmap, .release = ion_dma_buf_release, .begin_cpu_access = <API key>, .end_cpu_access = <API key>, .kmap_atomic = ion_dma_buf_kmap, .kunmap_atomic = ion_dma_buf_kunmap, .kmap = ion_dma_buf_kmap, .kunmap = ion_dma_buf_kunmap, }; struct dma_buf *ion_share_dma_buf(struct ion_client *client, struct ion_handle *handle) { struct ion_buffer *buffer; struct dma_buf *dmabuf; bool valid_handle; mutex_lock(&client->lock); valid_handle = ion_handle_validate(client, handle); mutex_unlock(&client->lock); if (!valid_handle) { WARN(1, "%s: invalid handle passed to share.\n", __func__); return ERR_PTR(-EINVAL); } buffer = handle->buffer; ion_buffer_get(buffer); dmabuf = dma_buf_export(buffer, &dma_buf_ops, buffer->size, O_RDWR); if (IS_ERR(dmabuf)) { ion_buffer_put(buffer); return dmabuf; } return dmabuf; } EXPORT_SYMBOL(ion_share_dma_buf); int <API key>(struct ion_client *client, struct ion_handle *handle) { struct dma_buf *dmabuf; int fd; dmabuf = ion_share_dma_buf(client, handle); if (IS_ERR(dmabuf)) return PTR_ERR(dmabuf); fd = dma_buf_fd(dmabuf, O_CLOEXEC); if (fd < 0) dma_buf_put(dmabuf); return fd; } EXPORT_SYMBOL(<API key>); struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd) { struct dma_buf *dmabuf; struct ion_buffer *buffer; struct ion_handle *handle; int ret; MMProfileLogEx(ION_MMP_Events[PROFILE_IMPORT], MMProfileFlagStart, 1, 1); dmabuf = dma_buf_get(fd); if (IS_ERR(dmabuf)) return ERR_PTR(PTR_ERR(dmabuf)); /* if this memory came from ion */ if (dmabuf->ops != &dma_buf_ops) { pr_err("%s: can not import dmabuf from another exporter\n", __func__); dma_buf_put(dmabuf); return ERR_PTR(-EINVAL); } buffer = dmabuf->priv; mutex_lock(&client->lock); /* if a handle exists for this buffer just take a reference to it */ handle = ion_handle_lookup(client, buffer); if (!IS_ERR(handle)) { ion_handle_get(handle); goto end; } handle = ion_handle_create(client, buffer); if (IS_ERR(handle)) goto end; ret = ion_handle_add(client, handle); if (ret) { ion_handle_put(handle); handle = ERR_PTR(ret); } end: mutex_unlock(&client->lock); dma_buf_put(dmabuf); MMProfileLogEx(ION_MMP_Events[PROFILE_IMPORT], MMProfileFlagEnd, 1, 1); return handle; } EXPORT_SYMBOL(ion_import_dma_buf); static int ion_sync_for_device(struct ion_client *client, int fd) { struct dma_buf *dmabuf; struct ion_buffer *buffer; dmabuf = dma_buf_get(fd); if (IS_ERR(dmabuf)) return PTR_ERR(dmabuf); /* if this memory came from ion */ if (dmabuf->ops != &dma_buf_ops) { pr_err("%s: can not sync dmabuf from another exporter\n", __func__); dma_buf_put(dmabuf); return -EINVAL; } buffer = dmabuf->priv; <API key>(NULL, buffer->sg_table->sgl, buffer->sg_table->nents, DMA_BIDIRECTIONAL); dma_buf_put(dmabuf); return 0; } static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct ion_client *client = filp->private_data; switch (cmd) { case ION_IOC_ALLOC: { struct ion_allocation_data data; struct ion_handle *handle; if (copy_from_user(&data, (void __user *)arg, sizeof(data))) return -EFAULT; handle = ion_alloc(client, data.len, data.align, data.heap_id_mask, data.flags); if (IS_ERR(handle)) return PTR_ERR(handle); data.handle = (struct ion_handle *)handle->id; if (copy_to_user((void __user *)arg, &data, sizeof(data))) { ion_free(client, handle); return -EFAULT; } break; } case ION_IOC_FREE: { struct ion_handle_data data; struct ion_handle *handle; if (copy_from_user(&data, (void __user *)arg, sizeof(struct ion_handle_data))) return -EFAULT; mutex_lock(&client->lock); handle = ion_uhandle_get(client, (int)data.handle); mutex_unlock(&client->lock); if(IS_ERR_OR_NULL(handle)) { pr_err("%s: handle invalid, handle_id=%d.\n", __FUNCTION__, (int)data.handle); return -EINVAL; } ion_free(client, handle); break; } case ION_IOC_SHARE: case ION_IOC_MAP: { struct ion_fd_data data; struct ion_handle *handle; if (copy_from_user(&data, (void __user *)arg, sizeof(data))) return -EFAULT; handle = ion_uhandle_get(client, (int)data.handle); if(IS_ERR_OR_NULL(handle)) { pr_err("%s: handle invalid, handle_id=%d\n", __FUNCTION__, (int)data.handle); return -EINVAL; } data.fd = <API key>(client, handle); if (copy_to_user((void __user *)arg, &data, sizeof(data))) return -EFAULT; if (data.fd < 0) return data.fd; break; } case ION_IOC_IMPORT: { struct ion_fd_data data; struct ion_handle *handle; int ret = 0; if (copy_from_user(&data, (void __user *)arg, sizeof(struct ion_fd_data))) return -EFAULT; handle = ion_import_dma_buf(client, data.fd); if (IS_ERR(handle)) ret = PTR_ERR(handle); else data.handle = (struct ion_handle *)handle->id; if (copy_to_user((void __user *)arg, &data, sizeof(struct ion_fd_data))) return -EFAULT; if (ret < 0) return ret; break; } case ION_IOC_SYNC: { struct ion_fd_data data; if (copy_from_user(&data, (void __user *)arg, sizeof(struct ion_fd_data))) return -EFAULT; ion_sync_for_device(client, data.fd); break; } case ION_IOC_CUSTOM: { struct ion_device *dev = client->dev; struct ion_custom_data data; if (!dev->custom_ioctl) return -ENOTTY; if (copy_from_user(&data, (void __user *)arg, sizeof(struct ion_custom_data))) return -EFAULT; return dev->custom_ioctl(client, data.cmd, data.arg); } default: return -ENOTTY; } return 0; } static int ion_release(struct inode *inode, struct file *file) { struct ion_client *client = file->private_data; pr_debug("%s: %d\n", __func__, __LINE__); ion_client_destroy(client); return 0; } static int ion_open(struct inode *inode, struct file *file) { struct miscdevice *miscdev = file->private_data; struct ion_device *dev = container_of(miscdev, struct ion_device, dev); struct ion_client *client; pr_debug("%s: %d\n", __func__, __LINE__); client = ion_client_create(dev, "user"); if (IS_ERR(client)) return PTR_ERR(client); file->private_data = client; return 0; } static const struct file_operations ion_fops = { .owner = THIS_MODULE, .open = ion_open, .release = ion_release, .unlocked_ioctl = ion_ioctl, }; static size_t <API key>(struct ion_client *client, unsigned int id) { size_t size = 0; struct rb_node *n; mutex_lock(&client->lock); for (n = rb_first(&client->handles); n; n = rb_next(n)) { struct ion_handle *handle = rb_entry(n, struct ion_handle, node); if (handle->buffer->heap->id == id) size += handle->buffer->size; } mutex_unlock(&client->lock); return size; } static int ion_debug_heap_show(struct seq_file *s, void *unused) { struct ion_heap *heap = s->private; struct ion_device *dev = heap->dev; struct rb_node *n; size_t total_size = 0; size_t total_orphaned_size = 0; seq_printf(s, "%16.s %16.s %16.s\n", "client", "pid", "size"); seq_printf(s, " down_read(&dev->lock); for (n = rb_first(&dev->clients); n; n = rb_next(n)) { struct ion_client *client = rb_entry(n, struct ion_client, node); size_t size = <API key>(client, heap->id); if (!size) continue; if (client->task) { char task_comm[TASK_COMM_LEN]; get_task_comm(task_comm, client->task); seq_printf(s, "%16.s %16u %16u\n", task_comm, client->pid, size); } else { seq_printf(s, "%16.s %16u %16u\n", client->name, client->pid, size); } } up_read(&dev->lock); seq_printf(s, " seq_printf(s, "orphaned allocations (info is from last known client):" "\n"); mutex_lock(&dev->buffer_lock); for (n = rb_first(&dev->buffers); n; n = rb_next(n)) { struct ion_buffer *buffer = rb_entry(n, struct ion_buffer, node); if (buffer->heap->id != heap->id) continue; total_size += buffer->size; if (!buffer->handle_count) { seq_printf(s, "%16.s %16u %16u %d %d\n", buffer->task_comm, buffer->pid, buffer->size, buffer->kmap_cnt, atomic_read(&buffer->ref.refcount)); total_orphaned_size += buffer->size; } } mutex_unlock(&dev->buffer_lock); seq_printf(s, " seq_printf(s, "%16.s %16u\n", "total orphaned", total_orphaned_size); seq_printf(s, "%16.s %16u\n", "total ", total_size); if (heap->flags & <API key>) seq_printf(s, "%16.s %16u\n", "deferred free", heap->free_list_size); seq_printf(s, " if (heap->debug_show) heap->debug_show(heap, s, unused); return 0; } static int ion_debug_heap_open(struct inode *inode, struct file *file) { return single_open(file, ion_debug_heap_show, inode->i_private); } static const struct file_operations debug_heap_fops = { .open = ion_debug_heap_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; #ifdef DEBUG_HEAP_SHRINKER static int debug_shrink_set(void *data, u64 val) { struct ion_heap *heap = data; struct shrink_control sc; int objs; sc.gfp_mask = -1; sc.nr_to_scan = 0; if (!val) return 0; objs = heap->shrinker.shrink(&heap->shrinker, &sc); sc.nr_to_scan = objs; heap->shrinker.shrink(&heap->shrinker, &sc); return 0; } static int debug_shrink_get(void *data, u64 *val) { struct ion_heap *heap = data; struct shrink_control sc; int objs; sc.gfp_mask = -1; sc.nr_to_scan = 0; objs = heap->shrinker.shrink(&heap->shrinker, &sc); *val = objs; return 0; } <API key>(debug_shrink_fops, debug_shrink_get, debug_shrink_set, "%llu\n"); #endif void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) { if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma || !heap->ops->unmap_dma) pr_err("%s: can not add heap with invalid ops struct.\n", __func__); if (heap->flags & <API key>) <API key>(heap); heap->dev = dev; down_write(&dev->lock); /* use negative heap->id to reverse the priority -- when traversing the list later attempt higher id numbers first */ plist_node_init(&heap->node, -heap->id); plist_add(&heap->node, &dev->heaps); debugfs_create_file(heap->name, 0664, dev->debug_root, heap, &debug_heap_fops); #ifdef DEBUG_HEAP_SHRINKER if (heap->shrinker.shrink) { char debug_name[64]; snprintf(debug_name, 64, "%s_shrink", heap->name); debugfs_create_file(debug_name, 0644, dev->debug_root, heap, &debug_shrink_fops); } #endif up_write(&dev->lock); } struct ion_device *ion_device_create(long (*custom_ioctl) (struct ion_client *client, unsigned int cmd, unsigned long arg)) { struct ion_device *idev; int ret; idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL); if (!idev) return ERR_PTR(-ENOMEM); idev->dev.minor = MISC_DYNAMIC_MINOR; idev->dev.name = "ion"; idev->dev.fops = &ion_fops; idev->dev.parent = NULL; ret = misc_register(&idev->dev); if (ret) { pr_err("ion: failed to register misc device.\n"); return ERR_PTR(ret); } idev->debug_root = debugfs_create_dir("ion", NULL); if (!idev->debug_root) pr_err("ion: failed to create debug files.\n"); idev->custom_ioctl = custom_ioctl; idev->buffers = RB_ROOT; mutex_init(&idev->buffer_lock); init_rwsem(&idev->lock); plist_head_init(&idev->heaps); idev->clients = RB_ROOT; return idev; } void ion_device_destroy(struct ion_device *dev) { misc_deregister(&dev->dev); /* XXX need to free the heaps and clients ? */ kfree(dev); } void __init ion_reserve(struct ion_platform_data *data) { int i; for (i = 0; i < data->nr; i++) { if (data->heaps[i].size == 0) continue; IONMSG("reserve memory: base=0x%x, size=0x%x\n", data->heaps[i].base, data->heaps[i].size); if (data->heaps[i].base == 0) { phys_addr_t paddr; paddr = memblock_alloc_base(data->heaps[i].size, data->heaps[i].align, <API key>); if (!paddr) { pr_err("%s: error allocating memblock for " "heap %d\n", __func__, i); continue; } data->heaps[i].base = paddr; } else { int ret = memblock_reserve(data->heaps[i].base, data->heaps[i].size); if (ret) pr_err("memblock reserve of %x@%lx failed\n", data->heaps[i].size, data->heaps[i].base); } pr_info("%s: %s reserved base %lx size %d\n", __func__, data->heaps[i].name, data->heaps[i].base, data->heaps[i].size); } }
#include <drm/drmP.h> #ifdef CONFIG_DRM_VXD_BYT #include "vxd_drv.h" #else #include "psb_drv.h" #include "psb_reg.h" #endif #ifdef SUPPORT_VSP #include "vsp.h" #endif /* * Code for the MSVDX/TOPAZ MMU: */ /* * clflush on one processor only: * clflush should apparently flush the cache line on all processors in an * SMP system. */ /* * kmap atomic: * The usage of the slots must be completely encapsulated within a spinlock, and * no other functions that may be using the locks for other purposed may be * called from within the locked region. * Since the slots are per processor, this will guarantee that we are the only * user. */ /* * TODO: Inserting ptes from an interrupt handler: * This may be desirable for some SGX functionality where the GPU can fault in * needed pages. For that, we need to make an atomic insert_pages function, that * may fail. * If it fails, the caller need to insert the page using a workqueue function, * but on average it should be fast. */ struct psb_mmu_driver { /* protects driver- and pd structures. Always take in read mode * before taking the page table spinlock. */ struct rw_semaphore sem; /* protects page tables, directory tables and pt tables. * and pt structures. */ spinlock_t lock; atomic_t needs_tlbflush; uint8_t __iomem *register_map; struct psb_mmu_pd *default_pd; /*uint32_t bif_ctrl;*/ int has_clflush; int clflush_add; unsigned long clflush_mask; struct drm_psb_private *dev_priv; enum mmu_type_t mmu_type; }; struct psb_mmu_pd; struct psb_mmu_pt { struct psb_mmu_pd *pd; uint32_t index; uint32_t count; struct page *p; uint32_t *v; }; struct psb_mmu_pd { struct psb_mmu_driver *driver; int hw_context; struct psb_mmu_pt **tables; struct page *p; struct page *dummy_pt; struct page *dummy_page; uint32_t pd_mask; uint32_t invalid_pde; uint32_t invalid_pte; }; static inline uint32_t psb_mmu_pt_index(uint32_t offset) { return (offset >> PSB_PTE_SHIFT) & 0x3FF; } static inline uint32_t psb_mmu_pd_index(uint32_t offset) { return offset >> PSB_PDE_SHIFT; } #if defined(CONFIG_X86) static inline void psb_clflush(volatile void *addr) { __asm__ __volatile__("clflush (%0)\n" : : "r"(addr) : "memory"); } static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr) { if (!driver->has_clflush) return; mb(); psb_clflush(addr); mb(); } static void psb_page_clflush(struct psb_mmu_driver *driver, struct page* page) { uint32_t clflush_add = driver->clflush_add >> PAGE_SHIFT; uint32_t clflush_count = PAGE_SIZE / clflush_add; int i; uint8_t *clf; #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) clf = kmap_atomic(page, KM_USER0); #else clf = kmap_atomic(page); #endif mb(); for (i = 0; i < clflush_count; ++i) { psb_clflush(clf); clf += clflush_add; } mb(); #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) kunmap_atomic(clf, KM_USER0); #else kunmap_atomic(clf); #endif } static void psb_pages_clflush(struct psb_mmu_driver *driver, struct page *page[], unsigned long num_pages) { int i; if (!driver->has_clflush) return ; for (i = 0; i < num_pages; i++) psb_page_clflush(driver, *page++); } #else static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr) { ; } static void psb_pages_clflush(struct psb_mmu_driver *driver, struct page *page[], unsigned long num_pages) { printk("Dumy psb_pages_clflush\n"); } #endif static void <API key>(struct psb_mmu_driver *driver, int force) { if (atomic_read(&driver->needs_tlbflush) || force) { if (!driver->dev_priv) goto out; if (driver->mmu_type == IMG_MMU) { atomic_set( &driver->dev_priv->msvdx_mmu_invaldc, 1); #ifndef CONFIG_DRM_VXD_BYT atomic_set( &driver->dev_priv->topaz_mmu_invaldc, 1); #endif } else if (driver->mmu_type == VSP_MMU) { #ifdef SUPPORT_VSP atomic_set(&driver->dev_priv->vsp_mmu_invaldc, 1); #endif } else { DRM_ERROR("MMU: invalid MMU type %d\n", driver->mmu_type); } } out: atomic_set(&driver->needs_tlbflush, 0); } #if 0 static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force) { down_write(&driver->sem); <API key>(driver, force); up_write(&driver->sem); } #endif static void <API key>(struct psb_mmu_driver *driver, void *vaddr, uint32_t num_pages) { int i, j; uint8_t *clf = (uint8_t*)vaddr; uint32_t clflush_add = (driver->clflush_add * sizeof(uint32_t)) >> PAGE_SHIFT; uint32_t clflush_count = PAGE_SIZE / clflush_add; DRM_INFO("clflush pages %d\n", num_pages); mb(); for (i = 0; i < num_pages; ++i) { for (j = 0; j < clflush_count; ++j) { psb_clflush(clf); clf += clflush_add; } } mb(); } void psb_mmu_flush(struct psb_mmu_driver *driver, int rc_prot) { if (rc_prot) down_write(&driver->sem); if (!driver->dev_priv) goto out; if (driver->mmu_type == IMG_MMU) { atomic_set(&driver->dev_priv->msvdx_mmu_invaldc, 1); #ifndef CONFIG_DRM_VXD_BYT atomic_set(&driver->dev_priv->topaz_mmu_invaldc, 1); #endif } else if (driver->mmu_type == VSP_MMU) { #ifdef SUPPORT_VSP atomic_set(&driver->dev_priv->vsp_mmu_invaldc, 1); #endif } else { DRM_ERROR("MMU: invalid MMU type %d\n", driver->mmu_type); } out: if (rc_prot) up_write(&driver->sem); } void <API key>(struct psb_mmu_pd *pd, int hw_context) { /*ttm_tt_cache_flush(&pd->p, 1);*/ psb_pages_clflush(pd->driver, &pd->p, 1); down_write(&pd->driver->sem); wmb(); <API key>(pd->driver, 1); pd->hw_context = hw_context; up_write(&pd->driver->sem); } static inline unsigned long psb_pd_addr_end(unsigned long addr, unsigned long end) { addr = (addr + PSB_PDE_MASK + 1) & ~PSB_PDE_MASK; return (addr < end) ? addr : end; } static inline uint32_t psb_mmu_mask_pte(uint32_t pfn, int type) { uint32_t mask = PSB_PTE_VALID; if (type & <API key>) mask |= PSB_PTE_CACHED; if (type & PSB_MMU_RO_MEMORY) mask |= PSB_PTE_RO; if (type & PSB_MMU_WO_MEMORY) mask |= PSB_PTE_WO; return (pfn << PAGE_SHIFT) | mask; } #ifdef SUPPORT_VSP static inline uint32_t vsp_mmu_mask_pte(uint32_t pfn, int type) { return (pfn & VSP_PDE_MASK) | VSP_PTE_VALID; } #endif struct psb_mmu_pd *psb_mmu_alloc_pd(struct psb_mmu_driver *driver, int trap_pagefaults, int invalid_type) { struct psb_mmu_pd *pd = kmalloc(sizeof(*pd), GFP_KERNEL); uint32_t *v; int i; if (!pd) return NULL; pd->p = alloc_page(GFP_DMA32); if (!pd->p) goto out_err1; pd->dummy_pt = alloc_page(GFP_DMA32); if (!pd->dummy_pt) goto out_err2; pd->dummy_page = alloc_page(GFP_DMA32); if (!pd->dummy_page) goto out_err3; if (!trap_pagefaults) { if (driver->mmu_type == IMG_MMU) { pd->invalid_pde = psb_mmu_mask_pte(page_to_pfn(pd->dummy_pt), invalid_type); pd->invalid_pte = psb_mmu_mask_pte(page_to_pfn(pd->dummy_page), invalid_type); } else if (driver->mmu_type == VSP_MMU) { #ifdef SUPPORT_VSP pd->invalid_pde = vsp_mmu_mask_pte(page_to_pfn(pd->dummy_pt), invalid_type); pd->invalid_pte = vsp_mmu_mask_pte(page_to_pfn(pd->dummy_page), invalid_type); #endif } else { DRM_ERROR("MMU: invalid MMU type %d\n", driver->mmu_type); goto out_err4; } } else { pd->invalid_pde = 0; pd->invalid_pte = 0; } v = kmap(pd->dummy_pt); if (!v) goto out_err4; for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) v[i] = pd->invalid_pte; kunmap(pd->dummy_pt); v = kmap(pd->p); if (!v) goto out_err4; for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) v[i] = pd->invalid_pde; kunmap(pd->p); v = kmap(pd->dummy_page); if (!v) goto out_err4; clear_page(v); kunmap(pd->dummy_page); pd->tables = vmalloc_user(sizeof(struct psb_mmu_pt *) * 1024); if (!pd->tables) goto out_err4; pd->hw_context = -1; pd->pd_mask = PSB_PTE_VALID; pd->driver = driver; return pd; out_err4: __free_page(pd->dummy_page); out_err3: __free_page(pd->dummy_pt); out_err2: __free_page(pd->p); out_err1: kfree(pd); return NULL; } void psb_mmu_free_pt(struct psb_mmu_pt *pt) { __free_page(pt->p); kfree(pt); } void <API key>(struct psb_mmu_pd *pd) { struct psb_mmu_driver *driver = pd->driver; struct psb_mmu_pt *pt; int i; down_write(&driver->sem); if (pd->hw_context != -1) <API key>(driver, 1); /* Should take the spinlock here, but we don't need to do that since we have the semaphore in write mode. */ for (i = 0; i < 1024; ++i) { pt = pd->tables[i]; if (pt) psb_mmu_free_pt(pt); } vfree(pd->tables); __free_page(pd->dummy_page); __free_page(pd->dummy_pt); __free_page(pd->p); kfree(pd); up_write(&driver->sem); } static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd) { struct psb_mmu_pt *pt = kmalloc(sizeof(*pt), GFP_KERNEL); void *v; uint32_t clflush_add = pd->driver->clflush_add >> PAGE_SHIFT; uint32_t clflush_count = PAGE_SIZE / clflush_add; spinlock_t *lock = &pd->driver->lock; uint8_t *clf; uint32_t *ptes; int i; if (!pt) return NULL; pt->p = alloc_page(GFP_DMA32); if (!pt->p) { kfree(pt); return NULL; } spin_lock(lock); #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) v = kmap_atomic(pt->p, KM_USER0); #else v = kmap_atomic(pt->p); #endif clf = (uint8_t *) v; ptes = (uint32_t *) v; for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) *ptes++ = pd->invalid_pte; #if defined(CONFIG_X86) if (pd->driver->has_clflush && pd->hw_context != -1) { mb(); for (i = 0; i < clflush_count; ++i) { psb_clflush(clf); clf += clflush_add; } mb(); } #endif #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) kunmap_atomic(v, KM_USER0); #else kunmap_atomic(v); #endif spin_unlock(lock); pt->count = 0; pt->pd = pd; pt->index = 0; return pt; } struct psb_mmu_pt *<API key>(struct psb_mmu_pd *pd, unsigned long addr) { uint32_t index = psb_mmu_pd_index(addr); struct psb_mmu_pt *pt; uint32_t *v; spinlock_t *lock = &pd->driver->lock; struct psb_mmu_driver *driver = pd->driver; spin_lock(lock); pt = pd->tables[index]; while (!pt) { spin_unlock(lock); pt = psb_mmu_alloc_pt(pd); if (!pt) return NULL; spin_lock(lock); if (pd->tables[index]) { spin_unlock(lock); psb_mmu_free_pt(pt); spin_lock(lock); pt = pd->tables[index]; continue; } #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) v = kmap_atomic(pd->p, KM_USER0); #else v = kmap_atomic(pd->p); #endif pd->tables[index] = pt; if (driver->mmu_type == IMG_MMU) v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask; #ifdef SUPPORT_VSP else if (driver->mmu_type == VSP_MMU) v[index] = (page_to_pfn(pt->p)); #endif else DRM_ERROR("MMU: invalid MMU type %d\n", driver->mmu_type); pt->index = index; #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) kunmap_atomic((void *) v, KM_USER0); #else kunmap_atomic((void *) v); #endif if (pd->hw_context != -1) { psb_mmu_clflush(pd->driver, (void *) &v[index]); atomic_set(&pd->driver->needs_tlbflush, 1); } } #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) pt->v = kmap_atomic(pt->p, KM_USER0); #else pt->v = kmap_atomic(pt->p); #endif return pt; } static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd, unsigned long addr) { uint32_t index = psb_mmu_pd_index(addr); struct psb_mmu_pt *pt; spinlock_t *lock = &pd->driver->lock; spin_lock(lock); pt = pd->tables[index]; if (!pt) { spin_unlock(lock); return NULL; } #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) pt->v = kmap_atomic(pt->p, KM_USER0); #else pt->v = kmap_atomic(pt->p); #endif return pt; } static void <API key>(struct psb_mmu_pt *pt) { struct psb_mmu_pd *pd = pt->pd; uint32_t *v; #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) kunmap_atomic(pt->v, KM_USER0); #else kunmap_atomic(pt->v); #endif if (pt->count == 0) { #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) v = kmap_atomic(pd->p, KM_USER0); #else v = kmap_atomic(pd->p); #endif v[pt->index] = pd->invalid_pde; pd->tables[pt->index] = NULL; if (pd->hw_context != -1) { psb_mmu_clflush(pd->driver, (void *) &v[pt->index]); atomic_set(&pd->driver->needs_tlbflush, 1); } #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) kunmap_atomic(pt->v, KM_USER0); #else kunmap_atomic(pt->v); #endif spin_unlock(&pd->driver->lock); psb_mmu_free_pt(pt); return; } spin_unlock(&pd->driver->lock); } static inline void psb_mmu_set_pte(struct psb_mmu_pt *pt, unsigned long addr, uint32_t pte) { pt->v[psb_mmu_pt_index(addr)] = pte; } static inline void <API key>(struct psb_mmu_pt *pt, unsigned long addr) { pt->v[psb_mmu_pt_index(addr)] = pt->pd->invalid_pte; } #if 0 static uint32_t <API key>(struct psb_mmu_pd *pd, uint32_t mmu_offset) { uint32_t *v; uint32_t pfn; v = kmap_atomic(pd->p, KM_USER0); if (!v) { printk(KERN_INFO "Could not kmap pde page.\n"); return 0; } pfn = v[psb_mmu_pd_index(mmu_offset)]; /* printk(KERN_INFO "pde is 0x%08x\n",pfn); */ kunmap_atomic(v, KM_USER0); if (((pfn & 0x0F) != PSB_PTE_VALID)) { printk(KERN_INFO "Strange pde at 0x%08x: 0x%08x.\n", mmu_offset, pfn); } v = ioremap(pfn & 0xFFFFF000, 4096); if (!v) { printk(KERN_INFO "Could not kmap pte page.\n"); return 0; } pfn = v[psb_mmu_pt_index(mmu_offset)]; /* printk(KERN_INFO "pte is 0x%08x\n",pfn); */ iounmap(v); if (((pfn & 0x0F) != PSB_PTE_VALID)) { printk(KERN_INFO "Strange pte at 0x%08x: 0x%08x.\n", mmu_offset, pfn); } return pfn >> PAGE_SHIFT; } static void <API key>(struct psb_mmu_pd *pd, uint32_t mmu_offset, uint32_t gtt_pages) { uint32_t start; uint32_t next; printk(KERN_INFO "Checking mirrored gtt 0x%08x %d\n", mmu_offset, gtt_pages); down_read(&pd->driver->sem); start = <API key>(pd, mmu_offset); mmu_offset += PAGE_SIZE; gtt_pages -= 1; while (gtt_pages next = <API key>(pd, mmu_offset); if (next != start + 1) { printk(KERN_INFO "Ptes out of order: 0x%08x, 0x%08x.\n", start, next); } start = next; mmu_offset += PAGE_SIZE; } up_read(&pd->driver->sem); } void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd, uint32_t mmu_offset, uint32_t gtt_start, uint32_t gtt_pages) { uint32_t *v; uint32_t start = psb_mmu_pd_index(mmu_offset); struct psb_mmu_driver *driver = pd->driver; int num_pages = gtt_pages; down_read(&driver->sem); spin_lock(&driver->lock); v = kmap_atomic(pd->p, KM_USER0); v += start; while (gtt_pages *v++ = gtt_start | pd->pd_mask; gtt_start += PAGE_SIZE; } /*ttm_tt_cache_flush(&pd->p, num_pages);*/ psb_pages_clflush(pd->driver, &pd->p, num_pages); kunmap_atomic(v, KM_USER0); spin_unlock(&driver->lock); if (pd->hw_context != -1) atomic_set(&pd->driver->needs_tlbflush, 1); up_read(&pd->driver->sem); psb_mmu_flush_pd(pd->driver, 0); } #endif struct psb_mmu_pd *<API key>(struct psb_mmu_driver *driver) { struct psb_mmu_pd *pd; /* down_read(&driver->sem); */ pd = driver->default_pd; /* up_read(&driver->sem); */ return pd; } /* Returns the physical address of the PD shared by sgx/msvdx */ uint32_t <API key>(struct psb_mmu_driver *driver) { struct psb_mmu_pd *pd; pd = <API key>(driver); return page_to_pfn(pd->p) << PAGE_SHIFT; } void <API key>(struct psb_mmu_driver *driver) { <API key>(driver->default_pd); kfree(driver); } struct psb_mmu_driver *psb_mmu_driver_init(uint8_t __iomem * registers, int trap_pagefaults, int invalid_type, struct drm_psb_private *dev_priv, enum mmu_type_t mmu_type) { struct psb_mmu_driver *driver; driver = kmalloc(sizeof(*driver), GFP_KERNEL); if (!driver) return NULL; driver->dev_priv = dev_priv; driver->mmu_type = mmu_type; driver->default_pd = psb_mmu_alloc_pd(driver, trap_pagefaults, invalid_type); if (!driver->default_pd) goto out_err1; spin_lock_init(&driver->lock); init_rwsem(&driver->sem); down_write(&driver->sem); driver->register_map = registers; atomic_set(&driver->needs_tlbflush, 1); driver->has_clflush = 0; #if defined(CONFIG_X86) if (boot_cpu_has(X86_FEATURE_CLFLSH)) { uint32_t tfms, misc, cap0, cap4, clflush_size; /* * clflush size is determined at kernel setup for x86_64 * but not for i386. We have to do it here. */ cpuid(0x00000001, &tfms, &misc, &cap0, &cap4); clflush_size = ((misc >> 8) & 0xff) * 8; driver->has_clflush = 1; driver->clflush_add = PAGE_SIZE * clflush_size / sizeof(uint32_t); driver->clflush_mask = driver->clflush_add - 1; driver->clflush_mask = ~driver->clflush_mask; } #endif up_write(&driver->sem); return driver; out_err1: kfree(driver); return NULL; } #if defined(CONFIG_X86) static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address, uint32_t num_pages, uint32_t desired_tile_stride, uint32_t hw_tile_stride) { struct psb_mmu_pt *pt; uint32_t rows = 1; uint32_t i; unsigned long addr; unsigned long end; unsigned long next; unsigned long add; unsigned long row_add; unsigned long clflush_add = pd->driver->clflush_add; unsigned long clflush_mask = pd->driver->clflush_mask; if (!pd->driver->has_clflush) { /*ttm_tt_cache_flush(&pd->p, num_pages);*/ psb_pages_clflush(pd->driver, &pd->p, num_pages); return; } if (hw_tile_stride) rows = num_pages / desired_tile_stride; else desired_tile_stride = num_pages; add = desired_tile_stride << PAGE_SHIFT; row_add = hw_tile_stride << PAGE_SHIFT; mb(); for (i = 0; i < rows; ++i) { addr = address; end = addr + add; do { next = psb_pd_addr_end(addr, end); pt = psb_mmu_pt_map_lock(pd, addr); if (!pt) continue; do { psb_clflush(&pt->v [psb_mmu_pt_index(addr)]); } while (addr += clflush_add, (addr & clflush_mask) < next); <API key>(pt); } while (addr = next, next != end); address += row_add; } mb(); } #else static void psb_mmu_flush_ptes(struct psb_mmu_pd *pd, unsigned long address, uint32_t num_pages, uint32_t desired_tile_stride, uint32_t hw_tile_stride) { drm_ttm_cache_flush(&pd->p, num_pages); } #endif void <API key>(struct psb_mmu_pd *pd, unsigned long address, uint32_t num_pages) { struct psb_mmu_pt *pt; unsigned long addr; unsigned long end; unsigned long next; unsigned long f_address = address; down_read(&pd->driver->sem); addr = address; end = addr + (num_pages << PAGE_SHIFT); do { next = psb_pd_addr_end(addr, end); pt = <API key>(pd, addr); if (!pt) goto out; do { <API key>(pt, addr); --pt->count; } while (addr += PAGE_SIZE, addr < next); <API key>(pt); } while (addr = next, next != end); out: if (pd->hw_context != -1) psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1); up_read(&pd->driver->sem); if (pd->hw_context != -1) psb_mmu_flush(pd->driver, 0); return; } void <API key>(struct psb_mmu_pd *pd, unsigned long address, uint32_t num_pages, uint32_t desired_tile_stride, uint32_t hw_tile_stride) { struct psb_mmu_pt *pt; uint32_t rows = 1; uint32_t i; unsigned long addr; unsigned long end; unsigned long next; unsigned long add; unsigned long row_add; unsigned long f_address = address; if (hw_tile_stride) rows = num_pages / desired_tile_stride; else desired_tile_stride = num_pages; add = desired_tile_stride << PAGE_SHIFT; row_add = hw_tile_stride << PAGE_SHIFT; /* down_read(&pd->driver->sem); */ /* Make sure we only need to flush this processor's cache */ for (i = 0; i < rows; ++i) { addr = address; end = addr + add; do { next = psb_pd_addr_end(addr, end); pt = psb_mmu_pt_map_lock(pd, addr); if (!pt) continue; do { <API key>(pt, addr); --pt->count; } while (addr += PAGE_SIZE, addr < next); <API key>(pt); } while (addr = next, next != end); address += row_add; } if (pd->hw_context != -1) psb_mmu_flush_ptes(pd, f_address, num_pages, desired_tile_stride, hw_tile_stride); /* up_read(&pd->driver->sem); */ if (pd->hw_context != -1) psb_mmu_flush(pd->driver, 0); } int <API key>(struct psb_mmu_pd *pd, uint32_t start_pfn, unsigned long address, uint32_t num_pages, int type) { struct psb_mmu_pt *pt; struct psb_mmu_driver *driver = pd->driver; uint32_t pte; unsigned long addr; unsigned long end; unsigned long next; unsigned long f_address = address; int ret = 0; down_read(&pd->driver->sem); addr = address; end = addr + (num_pages << PAGE_SHIFT); do { next = psb_pd_addr_end(addr, end); pt = <API key>(pd, addr); if (!pt) { ret = -ENOMEM; goto out; } do { if (driver->mmu_type == IMG_MMU) { pte = psb_mmu_mask_pte(start_pfn++, type); #ifdef SUPPORT_VSP } else if (driver->mmu_type == VSP_MMU) { pte = vsp_mmu_mask_pte(start_pfn++, type); #endif } else { DRM_ERROR("MMU: mmu type invalid %d\n", driver->mmu_type); ret = -EINVAL; goto out; } psb_mmu_set_pte(pt, addr, pte); pt->count++; } while (addr += PAGE_SIZE, addr < next); <API key>(pt); } while (addr = next, next != end); out: if (pd->hw_context != -1) psb_mmu_flush_ptes(pd, f_address, num_pages, 1, 1); up_read(&pd->driver->sem); if (pd->hw_context != -1) psb_mmu_flush(pd->driver, 1); return ret; } int <API key>(struct psb_mmu_pd *pd, struct page **pages, unsigned long address, uint32_t num_pages, uint32_t desired_tile_stride, uint32_t hw_tile_stride, int type) { struct psb_mmu_pt *pt; struct psb_mmu_driver *driver = pd->driver; uint32_t rows = 1; uint32_t i; uint32_t pte; unsigned long addr; unsigned long end; unsigned long next; unsigned long add; unsigned long row_add; unsigned long f_address = address; int ret = 0; if (hw_tile_stride) { if (num_pages % desired_tile_stride != 0) return -EINVAL; rows = num_pages / desired_tile_stride; } else { desired_tile_stride = num_pages; } add = desired_tile_stride << PAGE_SHIFT; row_add = hw_tile_stride << PAGE_SHIFT; down_read(&pd->driver->sem); for (i = 0; i < rows; ++i) { addr = address; end = addr + add; do { next = psb_pd_addr_end(addr, end); pt = <API key>(pd, addr); if (!pt) { ret = -ENOMEM; goto out; } do { if (driver->mmu_type == IMG_MMU) { pte = psb_mmu_mask_pte( page_to_pfn(*pages++), type); #ifdef SUPPORT_VSP } else if (driver->mmu_type == VSP_MMU) { pte = vsp_mmu_mask_pte( page_to_pfn(*pages++), type); #endif } else { DRM_ERROR("MMU: mmu type invalid %d\n", driver->mmu_type); ret = -EINVAL; goto out; } psb_mmu_set_pte(pt, addr, pte); pt->count++; } while (addr += PAGE_SIZE, addr < next); <API key>(pt); } while (addr = next, next != end); address += row_add; } out: if (pd->hw_context != -1) psb_mmu_flush_ptes(pd, f_address, num_pages, desired_tile_stride, hw_tile_stride); up_read(&pd->driver->sem); if (pd->hw_context != -1) psb_mmu_flush(pd->driver, 1); return ret; } #if 0 /*comented out, only used in mmu test now*/ void <API key>(struct psb_mmu_driver *driver, uint32_t mask) { mask &= _PSB_MMU_ER_MASK; psb_iowrite32(driver, psb_ioread32(driver, PSB_CR_BIF_CTRL) & ~mask, PSB_CR_BIF_CTRL); (void) psb_ioread32(driver, PSB_CR_BIF_CTRL); } void <API key>(struct psb_mmu_driver *driver, uint32_t mask) { mask &= _PSB_MMU_ER_MASK; psb_iowrite32(driver, psb_ioread32(driver, PSB_CR_BIF_CTRL) | mask, PSB_CR_BIF_CTRL); (void) psb_ioread32(driver, PSB_CR_BIF_CTRL); } int <API key>(struct psb_mmu_pd *pd, uint32_t virtual, unsigned long *pfn) { int ret; struct psb_mmu_pt *pt; uint32_t tmp; spinlock_t *lock = &pd->driver->lock; down_read(&pd->driver->sem); pt = psb_mmu_pt_map_lock(pd, virtual); if (!pt) { uint32_t *v; spin_lock(lock); v = kmap_atomic(pd->p, KM_USER0); tmp = v[psb_mmu_pd_index(virtual)]; kunmap_atomic(v, KM_USER0); spin_unlock(lock); if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) || !(pd->invalid_pte & PSB_PTE_VALID)) { ret = -EINVAL; goto out; } ret = 0; *pfn = pd->invalid_pte >> PAGE_SHIFT; goto out; } tmp = pt->v[psb_mmu_pt_index(virtual)]; if (!(tmp & PSB_PTE_VALID)) { ret = -EINVAL; } else { ret = 0; *pfn = tmp >> PAGE_SHIFT; } <API key>(pt); out: up_read(&pd->driver->sem); return ret; } void psb_mmu_test(struct psb_mmu_driver *driver, uint32_t offset) { struct page *p; unsigned long pfn; int ret = 0; struct psb_mmu_pd *pd; uint32_t *v; uint32_t *vmmu; pd = driver->default_pd; if (!pd) printk(KERN_WARNING "Could not get default pd\n"); p = alloc_page(GFP_DMA32); if (!p) { printk(KERN_WARNING "Failed allocating page\n"); return; } v = kmap(p); memset(v, 0x67, PAGE_SIZE); pfn = (offset >> PAGE_SHIFT); ret = <API key>(pd, &p, pfn << PAGE_SHIFT, 1, 0, 0, 0); if (ret) { printk(KERN_WARNING "Failed inserting mmu page\n"); goto out_err1; } /* Ioremap the page through the GART aperture */ vmmu = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); if (!vmmu) { printk(KERN_WARNING "Failed ioremapping page\n"); goto out_err2; } /* Read from the page with mmu disabled. */ printk(KERN_INFO "Page first dword is 0x%08x\n", ioread32(vmmu)); /* Enable the mmu for host accesses and read again. */ <API key>(driver, _PSB_MMU_ER_HOST); printk(KERN_INFO "MMU Page first dword is (0x67676767) 0x%08x\n", ioread32(vmmu)); *v = 0x15243705; printk(KERN_INFO "MMU Page new dword is (0x15243705) 0x%08x\n", ioread32(vmmu)); iowrite32(0x16243355, vmmu); (void) ioread32(vmmu); printk(KERN_INFO "Page new dword is (0x16243355) 0x%08x\n", *v); printk(KERN_INFO "Int stat is 0x%08x\n", psb_ioread32(driver, PSB_CR_BIF_INT_STAT)); printk(KERN_INFO "Fault is 0x%08x\n", psb_ioread32(driver, PSB_CR_BIF_FAULT)); /* Disable MMU for host accesses and clear page fault register */ <API key>(driver, _PSB_MMU_ER_HOST); iounmap(vmmu); out_err2: <API key>(pd, pfn << PAGE_SHIFT, 1, 0, 0); out_err1: kunmap(p); __free_page(p); } #endif /* void <API key>(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; struct psb_mmu_pd *pd = <API key>(dev_priv->mmu); struct psb_mmu_pt *pt; int i, j; uint32_t flags; uint32_t *v; spinlock_t *lock = &pd->driver->lock; down_read(&pd->driver->sem); spin_lock_irqsave(lock, flags); v = kmap_atomic(pd->p, KM_USER0); if (!v) { printk(KERN_INFO "%s: Kmap pg fail, abort\n", __func__); return; } printk(KERN_INFO "%s: start dump mmu page table\n", __func__); for (i = 0; i < 1024; i++) { pt = pd->tables[i]; if (!pt) { printk(KERN_INFO "pt[%d] is NULL, 0x%08x\n", i, v[i]); continue; } printk(KERN_INFO "pt[%d] is 0x%08x\n", i, v[i]); pt->v = kmap_atomic(pt->p, KM_USER0); if (!(pt->v)) { printk(KERN_INFO "%s: Kmap fail, abort\n", __func__); break; } for (j = 0; j < 1024; j++) { if (!(j%16)) printk(KERN_INFO "pte%d:", j); uint32_t pte = pt->v[j]; printk("%08xh ", pte); //if ((j%16) == 15) //printk(KERN_INFO "\n"); } kunmap_atomic(pt->v, KM_USER0); } <API key>(lock, flags); up_read(&pd->driver->sem); kunmap_atomic((void *) v, KM_USER0); printk(KERN_INFO "%s: finish dump mmu page table\n", __func__); } */ int psb_ttm_bo_clflush(struct psb_mmu_driver *mmu, struct ttm_buffer_object *bo) { int ret = 0; bool is_iomem; void *addr; struct ttm_bo_kmap_obj bo_kmap; if (unlikely(!mmu || !bo)) { DRM_ERROR("NULL pointer, mmu:%p bo:%p\n", mmu, bo); return 1; } /*map surface parameters*/ ret = ttm_bo_kmap(bo, 0, bo->num_pages, &bo_kmap); if (ret) { DRM_ERROR("ttm_bo_kmap failed: %d.\n", ret); return ret; } addr = (void *)<API key>(&bo_kmap, &is_iomem); if (unlikely(!addr)) { DRM_ERROR("failed to <API key>\n"); ret = 1; } <API key>(mmu, addr, bo->num_pages); ttm_bo_kunmap(&bo_kmap); return ret; }
#include <accdet_hal.h> #include <mach/mt_boot.h> #include <cust_eint.h> #include <cust_gpio_usage.h> #include <mach/mt_gpio.h> //#include "accdet_drv.h" static struct platform_driver accdet_driver; static int debug_enable_drv = 1; #define ACCDET_DEBUG_DRV(format, args...) do{ \ if(debug_enable_drv) \ {\ printk(KERN_WARNING format,##args);\ }\ }while(0) static long <API key>(struct file *file, unsigned int cmd,unsigned long arg) { return <API key>(cmd, arg); } static int accdet_open(struct inode *inode, struct file *file) { return 0; } static int accdet_release(struct inode *inode, struct file *file) { return 0; } static struct file_operations accdet_fops = { .owner = THIS_MODULE, .unlocked_ioctl = <API key>, .open = accdet_open, .release = accdet_release, }; struct file_operations *accdet_get_fops(void) { return &accdet_fops; } static int accdet_probe(struct platform_device *dev) { mt_accdet_probe(); return 0; } static int accdet_remove(struct platform_device *dev) { mt_accdet_remove(); return 0; } static int accdet_suspend(struct device *device) // wake up { mt_accdet_suspend(); return 0; } static int accdet_resume(struct device *device) // wake up { mt_accdet_resume(); return 0; } #ifdef CONFIG_PM static int <API key>(struct device *device) { <API key>(); return 0; } static struct dev_pm_ops accdet_pm_ops = { .suspend = accdet_suspend, .resume = accdet_resume, .restore_noirq = <API key>, }; #endif static struct platform_driver accdet_driver = { .probe = accdet_probe, //.suspend = accdet_suspend, //.resume = accdet_resume, .remove = accdet_remove, .driver = { .name = "Accdet_Driver", #ifdef CONFIG_PM .pm = &accdet_pm_ops, #endif }, }; struct platform_driver accdet_driver_func(void) { return accdet_driver; } static int accdet_mod_init(void) { int ret = 0; ACCDET_DEBUG_DRV("[Accdet]accdet_mod_init begin!\n"); // Accdet PM ret = <API key>(&accdet_driver); if (ret) { ACCDET_DEBUG_DRV("[Accdet]<API key> error:(%d)\n", ret); return ret; } else { ACCDET_DEBUG_DRV("[Accdet]<API key> done!\n"); } ACCDET_DEBUG_DRV("[Accdet]accdet_mod_init done!\n"); return 0; } static void accdet_mod_exit(void) { ACCDET_DEBUG_DRV("[Accdet]accdet_mod_exit\n"); <API key>(&accdet_driver); ACCDET_DEBUG_DRV("[Accdet]accdet_mod_exit Done!\n"); } /*Patch for CR ALPS00804150 & ALPS00804802 PMIC temp not correct issue*/ int <API key>(void) { //ACCDET_DEBUG("[ACCDET] <API key>=%d\n",<API key>()); return <API key>(); } EXPORT_SYMBOL(<API key>); /*Patch for CR ALPS00804150 & ALPS00804802 PMIC temp not correct issue*/ module_init(accdet_mod_init); module_exit(accdet_mod_exit); module_param(debug_enable_drv,int,0644); MODULE_DESCRIPTION("MTK MT6588 ACCDET driver"); MODULE_AUTHOR("Anny <Anny.Hu@mediatek.com>"); MODULE_LICENSE("GPL");
#include <linux/atomic.h> #include <linux/audit.h> #include <linux/compat.h> #include <linux/sched.h> #include <linux/seccomp.h> #include <linux/slab.h> #include <linux/syscalls.h> /* #define SECCOMP_DEBUG 1 */ #ifdef <API key> #include <asm/syscall.h> #endif #ifdef <API key> #include <linux/filter.h> #include <linux/pid.h> #include <linux/ptrace.h> #include <linux/security.h> #include <linux/tracehook.h> #include <linux/uaccess.h> /** * struct seccomp_filter - container for seccomp BPF programs * * @usage: reference count to manage the object lifetime. * get/put helpers should be used when accessing an instance * outside of a lifetime-guarded section. In general, this * is only needed for handling filters shared across tasks. * @prev: points to a previously installed, or inherited, filter * @len: the number of instructions in the program * @insns: the BPF program instructions to evaluate * * seccomp_filter objects are organized in a tree linked via the @prev * pointer. For any task, it appears to be a singly-linked list starting * with current->seccomp.filter, the most recently attached or inherited filter. * However, multiple filters may share a @prev node, by way of fork(), which * results in a unidirectional tree existing in memory. This is similar to * how namespaces work. * * seccomp_filter objects should never be modified after being attached * to a task_struct (other than @usage). */ struct seccomp_filter { atomic_t usage; struct seccomp_filter *prev; unsigned short len; /* Instruction count */ struct sock_filter insns[]; }; /* Limit any path through the tree to 256KB worth of instructions. */ #define MAX_INSNS_PER_PATH ((1 << 18) / sizeof(struct sock_filter)) /** * get_u32 - returns a u32 offset into data * @data: a unsigned 64 bit value * @index: 0 or 1 to return the first or second 32-bits * * This inline exists to hide the length of unsigned long. If a 32-bit * unsigned long is passed in, it will be extended and the top 32-bits will be * 0. If it is a 64-bit unsigned long, then whatever data is resident will be * properly returned. * * Endianness is explicitly ignored and left for BPF program authors to manage * as per the specific architecture. */ static inline u32 get_u32(u64 data, int index) { return ((u32 *)&data)[index]; } /* Helper for bpf_load below. */ #define BPF_DATA(_name) offsetof(struct seccomp_data, _name) /** * bpf_load: checks and returns a pointer to the requested offset * @off: offset into struct seccomp_data to load from * * Returns the requested 32-bits of data. * <API key>() should assure that @off is 32-bit aligned * and not out of bounds. Failure to do so is a BUG. */ u32 seccomp_bpf_load(int off) { struct pt_regs *regs = task_pt_regs(current); if (off == BPF_DATA(nr)) return syscall_get_nr(current, regs); if (off == BPF_DATA(arch)) return syscall_get_arch(); if (off >= BPF_DATA(args[0]) && off < BPF_DATA(args[6])) { unsigned long value; int arg = (off - BPF_DATA(args[0])) / sizeof(u64); int index = !!(off % sizeof(u64)); <API key>(current, regs, arg, 1, &value); return get_u32(value, index); } if (off == BPF_DATA(instruction_pointer)) return get_u32(KSTK_EIP(current), 0); if (off == BPF_DATA(instruction_pointer) + sizeof(u32)) return get_u32(KSTK_EIP(current), 1); /* <API key> should make this impossible. */ BUG(); } static int <API key>(struct sock_filter *filter, unsigned int flen) { int pc; for (pc = 0; pc < flen; pc++) { struct sock_filter *ftest = &filter[pc]; u16 code = ftest->code; u32 k = ftest->k; switch (code) { case BPF_S_LD_W_ABS: ftest->code = <API key>; /* 32-bit aligned and not out of bounds. */ if (k >= sizeof(struct seccomp_data) || k & 3) return -EINVAL; continue; case BPF_S_LD_W_LEN: ftest->code = BPF_S_LD_IMM; ftest->k = sizeof(struct seccomp_data); continue; case BPF_S_LDX_W_LEN: ftest->code = BPF_S_LDX_IMM; ftest->k = sizeof(struct seccomp_data); continue; /* Explicitly include allowed calls. */ case BPF_S_RET_K: case BPF_S_RET_A: case BPF_S_ALU_ADD_K: case BPF_S_ALU_ADD_X: case BPF_S_ALU_SUB_K: case BPF_S_ALU_SUB_X: case BPF_S_ALU_MUL_K: case BPF_S_ALU_MUL_X: case BPF_S_ALU_DIV_X: case BPF_S_ALU_AND_K: case BPF_S_ALU_AND_X: case BPF_S_ALU_OR_K: case BPF_S_ALU_OR_X: case BPF_S_ALU_XOR_K: case BPF_S_ALU_XOR_X: case BPF_S_ALU_LSH_K: case BPF_S_ALU_LSH_X: case BPF_S_ALU_RSH_K: case BPF_S_ALU_RSH_X: case BPF_S_ALU_NEG: case BPF_S_LD_IMM: case BPF_S_LDX_IMM: case BPF_S_MISC_TAX: case BPF_S_MISC_TXA: case BPF_S_ALU_DIV_K: case BPF_S_LD_MEM: case BPF_S_LDX_MEM: case BPF_S_ST: case BPF_S_STX: case BPF_S_JMP_JA: case BPF_S_JMP_JEQ_K: case BPF_S_JMP_JEQ_X: case BPF_S_JMP_JGE_K: case BPF_S_JMP_JGE_X: case BPF_S_JMP_JGT_K: case BPF_S_JMP_JGT_X: case BPF_S_JMP_JSET_K: case BPF_S_JMP_JSET_X: continue; default: return -EINVAL; } } return 0; } /** * seccomp_run_filters - evaluates all seccomp filters against @syscall * @syscall: number of the current system call * * Returns valid seccomp BPF response codes. */ static u32 seccomp_run_filters(void) { struct seccomp_filter *f = ACCESS_ONCE(current->seccomp.filter); u32 ret = SECCOMP_RET_ALLOW; /* Ensure unexpected behavior doesn't result in failing open. */ if (unlikely(WARN_ON(f == NULL))) return SECCOMP_RET_KILL; /* Make sure cross-thread synced filter points somewhere sane. */ <API key>(); /* * All filters in the list are evaluated and the lowest BPF return * value always takes priority (ignoring the DATA). */ for (; f; f = f->prev) { u32 cur_ret = sk_run_filter(NULL, f->insns); if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION)) ret = cur_ret; } return ret; } #endif /* <API key> */ static inline bool <API key>(unsigned long seccomp_mode) { assert_spin_locked(&current->sighand->siglock); if (current->seccomp.mode && current->seccomp.mode != seccomp_mode) return false; return true; } static inline void seccomp_assign_mode(struct task_struct *task, unsigned long seccomp_mode) { assert_spin_locked(&task->sighand->siglock); task->seccomp.mode = seccomp_mode; /* * Make sure TIF_SECCOMP cannot be set before the mode (and * filter) is set. */ smp_mb(); set_tsk_thread_flag(task, TIF_SECCOMP); } #ifdef <API key> /* Returns 1 if the parent is an ancestor of the child. */ static int is_ancestor(struct seccomp_filter *parent, struct seccomp_filter *child) { /* NULL is the root ancestor. */ if (parent == NULL) return 1; for (; child; child = child->prev) if (child == parent) return 1; return 0; } /** * <API key>: checks if all threads can be synchronized * * Expects sighand and cred_guard_mutex locks to be held. * * Returns 0 on success, -ve on error, or the pid of a thread which was * either not in the correct seccomp mode or it did not have an ancestral * seccomp filter. */ static inline pid_t <API key>(void) { struct task_struct *thread, *caller; BUG_ON(!mutex_is_locked(&current->signal->cred_guard_mutex)); assert_spin_locked(&current->sighand->siglock); /* Validate all threads being eligible for synchronization. */ caller = current; for_each_thread(caller, thread) { pid_t failed; /* Skip current, since it is initiating the sync. */ if (thread == caller) continue; if (thread->seccomp.mode == <API key> || (thread->seccomp.mode == SECCOMP_MODE_FILTER && is_ancestor(thread->seccomp.filter, caller->seccomp.filter))) continue; /* Return the first thread that cannot be synchronized. */ failed = task_pid_vnr(thread); /* If the pid cannot be resolved, then return -ESRCH */ if (unlikely(WARN_ON(failed == 0))) failed = -ESRCH; return failed; } return 0; } /** * <API key>: sets all threads to use current's filter * * Expects sighand and cred_guard_mutex locks to be held, and for * <API key>() to have returned success already * without dropping the locks. * */ static inline void <API key>(void) { struct task_struct *thread, *caller; BUG_ON(!mutex_is_locked(&current->signal->cred_guard_mutex)); assert_spin_locked(&current->sighand->siglock); /* Synchronize all threads. */ caller = current; for_each_thread(caller, thread) { /* Skip current, since it needs no changes. */ if (thread == caller) continue; /* Get a task reference for the new leaf node. */ get_seccomp_filter(caller); /* * Drop the task reference to the shared ancestor since * current's path will hold a reference. (This also * allows a put before the assignment.) */ put_seccomp_filter(thread); smp_store_release(&thread->seccomp.filter, caller->seccomp.filter); /* * Opt the other thread into seccomp if needed. * As threads are considered to be trust-realm * equivalent (see ptrace_may_access), it is safe to * allow one thread to transition the other. */ if (thread->seccomp.mode == <API key>) { /* * Don't let an unprivileged task work around * the no_new_privs restriction by creating * a thread that sets it up, enters seccomp, * then dies. */ if (task_no_new_privs(caller)) <API key>(thread); seccomp_assign_mode(thread, SECCOMP_MODE_FILTER); } } } /** * <API key>: Prepares a seccomp filter for use. * @fprog: BPF program to install * * Returns filter on success or an ERR_PTR on failure. */ static struct seccomp_filter *<API key>(struct sock_fprog *fprog) { struct seccomp_filter *filter; unsigned long fp_size = fprog->len * sizeof(struct sock_filter); unsigned long total_insns = fprog->len; long ret; if (fprog->len == 0 || fprog->len > BPF_MAXINSNS) return ERR_PTR(-EINVAL); BUG_ON(INT_MAX / fprog->len < sizeof(struct sock_filter)); for (filter = current->seccomp.filter; filter; filter = filter->prev) total_insns += filter->len + 4; /* include a 4 instr penalty */ if (total_insns > MAX_INSNS_PER_PATH) return ERR_PTR(-ENOMEM); /* * Installing a seccomp filter requires that the task have * CAP_SYS_ADMIN in its namespace or be running with no_new_privs. * This avoids scenarios where unprivileged tasks can affect the * behavior of privileged children. */ if (!task_no_new_privs(current) && <API key>(current_cred(), current_user_ns(), CAP_SYS_ADMIN) != 0) return ERR_PTR(-EACCES); /* Allocate a new seccomp_filter */ filter = kzalloc(sizeof(struct seccomp_filter) + fp_size, GFP_KERNEL|__GFP_NOWARN); if (!filter) return ERR_PTR(-ENOMEM);; atomic_set(&filter->usage, 1); filter->len = fprog->len; /* Copy the instructions from fprog. */ ret = -EFAULT; if (copy_from_user(filter->insns, fprog->filter, fp_size)) goto fail; /* Check and rewrite the fprog via the skb checker */ ret = sk_chk_filter(filter->insns, filter->len); if (ret) goto fail; /* Check and rewrite the fprog for seccomp use */ ret = <API key>(filter->insns, filter->len); if (ret) goto fail; return filter; fail: kfree(filter); return ERR_PTR(ret); } /** * <API key> - prepares a user-supplied sock_fprog * @user_filter: pointer to the user data containing a sock_fprog. * * Returns 0 on success and non-zero otherwise. */ static struct seccomp_filter * <API key>(const char __user *user_filter) { struct sock_fprog fprog; struct seccomp_filter *filter = ERR_PTR(-EFAULT); #ifdef CONFIG_COMPAT if (is_compat_task()) { struct compat_sock_fprog fprog32; if (copy_from_user(&fprog32, user_filter, sizeof(fprog32))) goto out; fprog.len = fprog32.len; fprog.filter = compat_ptr(fprog32.filter); } else /* falls through to the if below. */ #endif if (copy_from_user(&fprog, user_filter, sizeof(fprog))) goto out; filter = <API key>(&fprog); out: return filter; } /** * <API key>: validate and attach filter * @flags: flags to change filter behavior * @filter: seccomp filter to add to the current process * * Caller must be holding current->sighand->siglock lock. * * Returns 0 on success, -ve on error. */ static long <API key>(unsigned int flags, struct seccomp_filter *filter) { unsigned long total_insns; struct seccomp_filter *walker; assert_spin_locked(&current->sighand->siglock); /* Validate resulting filter length. */ total_insns = filter->len; for (walker = current->seccomp.filter; walker; walker = walker->prev) total_insns += walker->len + 4; /* 4 instr penalty */ if (total_insns > MAX_INSNS_PER_PATH) return -ENOMEM; /* If thread sync has been requested, check that it is possible. */ if (flags & <API key>) { int ret; ret = <API key>(); if (ret) return ret; } /* * If there is an existing filter, make it the prev and don't drop its * task reference. */ filter->prev = current->seccomp.filter; current->seccomp.filter = filter; /* Now that the new filter is in place, synchronize to all threads. */ if (flags & <API key>) <API key>(); return 0; } /* get_seccomp_filter - increments the reference count of the filter on @tsk */ void get_seccomp_filter(struct task_struct *tsk) { struct seccomp_filter *orig = tsk->seccomp.filter; if (!orig) return; /* Reference count is bounded by the number of total processes. */ atomic_inc(&orig->usage); } static inline void seccomp_filter_free(struct seccomp_filter *filter) { if (filter) { kfree(filter); } } /* put_seccomp_filter - decrements the ref count of tsk->seccomp.filter */ void put_seccomp_filter(struct task_struct *tsk) { struct seccomp_filter *orig = tsk->seccomp.filter; /* Clean up single-reference branches iteratively. */ while (orig && atomic_dec_and_test(&orig->usage)) { struct seccomp_filter *freeme = orig; orig = orig->prev; seccomp_filter_free(freeme); } } /** * seccomp_send_sigsys - signals the task to allow in-process syscall emulation * @syscall: syscall number to send to userland * @reason: filter-supplied reason code to send to userland (via si_errno) * * Forces a SIGSYS with a code of SYS_SECCOMP and related sigsys info. */ static void seccomp_send_sigsys(int syscall, int reason) { struct siginfo info; memset(&info, 0, sizeof(info)); info.si_signo = SIGSYS; info.si_code = SYS_SECCOMP; info.si_call_addr = (void __user *)KSTK_EIP(current); info.si_errno = reason; info.si_arch = syscall_get_arch(); info.si_syscall = syscall; force_sig_info(SIGSYS, &info, current); } #endif /* <API key> */ /* * Secure computing mode 1 allows only read/write/exit/sigreturn. * To be fully secure this must be combined with rlimit * to limit the stack allocations too. */ static int mode1_syscalls[] = { __NR_seccomp_read, __NR_seccomp_write, __NR_seccomp_exit, <API key>, 0, /* null terminated */ }; #ifdef CONFIG_COMPAT static int mode1_syscalls_32[] = { <API key>, <API key>, <API key>, <API key>, 0, /* null terminated */ }; #endif static void <API key>(int this_syscall) { int *syscall_whitelist = mode1_syscalls; #ifdef CONFIG_COMPAT if (is_compat_task()) syscall_whitelist = mode1_syscalls_32; #endif do { if (*syscall_whitelist == this_syscall) return; } while (*++syscall_whitelist); #ifdef SECCOMP_DEBUG dump_stack(); #endif audit_seccomp(this_syscall, SIGKILL, SECCOMP_RET_KILL); do_exit(SIGKILL); } #ifndef <API key> void <API key>(int this_syscall) { int mode = current->seccomp.mode; if (mode == 0) return; else if (mode == SECCOMP_MODE_STRICT) <API key>(this_syscall); else BUG(); } #else int __secure_computing(void) { struct pt_regs *regs = task_pt_regs(current); int this_syscall = syscall_get_nr(current, regs); int exit_sig = 0; u32 ret; /* * Make sure that any changes to mode from another thread have * been seen after TIF_SECCOMP was seen. */ rmb(); switch (current->seccomp.mode) { case SECCOMP_MODE_STRICT: <API key>(this_syscall); return 0; #ifdef <API key> case SECCOMP_MODE_FILTER: { int data; ret = seccomp_run_filters(); data = ret & SECCOMP_RET_DATA; ret &= SECCOMP_RET_ACTION; switch (ret) { case SECCOMP_RET_ERRNO: /* Set low-order bits as an errno, capped at MAX_ERRNO. */ if (data > MAX_ERRNO) data = MAX_ERRNO; <API key>(current, regs, -data, 0); goto skip; case SECCOMP_RET_TRAP: /* Show the handler the original registers. */ syscall_rollback(current, regs); /* Let the filter pass back 16 bits of data. */ seccomp_send_sigsys(this_syscall, data); goto skip; case SECCOMP_RET_TRACE: /* Skip these calls if there is no tracer. */ if (!<API key>(current, <API key>)) { <API key>(current, regs, -ENOSYS, 0); goto skip; } /* Allow the BPF to provide the event message */ ptrace_event(<API key>, data); /* * The delivery of a fatal signal during event * notification may silently skip tracer notification. * Terminating the task now avoids executing a system * call that may not be intended. */ if (<API key>(current)) break; if (syscall_get_nr(current, regs) < 0) goto skip; /* Explicit request to skip. */ return 0; case SECCOMP_RET_ALLOW: return 0; case SECCOMP_RET_KILL: default: break; } exit_sig = SIGSYS; break; } #endif default: BUG(); } #ifdef SECCOMP_DEBUG dump_stack(); #endif audit_seccomp(this_syscall, exit_sig, ret); do_exit(exit_sig); #ifdef <API key> skip: audit_seccomp(this_syscall, exit_sig, ret); return -1; #endif } #endif /* <API key> */ long prctl_get_seccomp(void) { return current->seccomp.mode; } /** * <API key>: internal function for setting strict seccomp * * Once current->seccomp.mode is non-zero, it may not be changed. * * Returns 0 on success or -EINVAL on failure. */ static long <API key>(void) { const unsigned long seccomp_mode = SECCOMP_MODE_STRICT; long ret = -EINVAL; spin_lock_irq(&current->sighand->siglock); if (!<API key>(seccomp_mode)) goto out; #ifdef TIF_NOTSC disable_TSC(); #endif seccomp_assign_mode(current, seccomp_mode); ret = 0; out: spin_unlock_irq(&current->sighand->siglock); return ret; } #ifdef <API key> /** * <API key>: internal function for setting seccomp filter * @flags: flags to change filter behavior * @filter: struct sock_fprog containing filter * * This function may be called repeatedly to install additional filters. * Every filter successfully installed will be evaluated (in reverse order) * for each system call the task makes. * * Once current->seccomp.mode is non-zero, it may not be changed. * * Returns 0 on success or -EINVAL on failure. */ static long <API key>(unsigned int flags, const char __user *filter) { const unsigned long seccomp_mode = SECCOMP_MODE_FILTER; struct seccomp_filter *prepared = NULL; long ret = -EINVAL; /* Validate flags. */ if (flags & ~<API key>) return -EINVAL; /* Prepare the new filter before holding any locks. */ prepared = <API key>(filter); if (IS_ERR(prepared)) return PTR_ERR(prepared); /* * Make sure we cannot change seccomp or nnp state via TSYNC * while another thread is in the middle of calling exec. */ if (flags & <API key> && mutex_lock_killable(&current->signal->cred_guard_mutex)) goto out_free; spin_lock_irq(&current->sighand->siglock); if (!<API key>(seccomp_mode)) goto out; ret = <API key>(flags, prepared); if (ret) goto out; /* Do not free the successfully attached filter. */ prepared = NULL; seccomp_assign_mode(current, seccomp_mode); out: spin_unlock_irq(&current->sighand->siglock); if (flags & <API key>) mutex_unlock(&current->signal->cred_guard_mutex); out_free: seccomp_filter_free(prepared); return ret; } #else static inline long <API key>(unsigned int flags, const char __user *filter) { return -EINVAL; } #endif /* Common entry point for both prctl and syscall. */ static long do_seccomp(unsigned int op, unsigned int flags, const char __user *uargs) { switch (op) { case <API key>: if (flags != 0 || uargs != NULL) return -EINVAL; return <API key>(); case <API key>: return <API key>(flags, uargs); default: return -EINVAL; } } SYSCALL_DEFINE3(seccomp, unsigned int, op, unsigned int, flags, const char __user *, uargs) { return do_seccomp(op, flags, uargs); } /** * prctl_set_seccomp: configures current->seccomp.mode * @seccomp_mode: requested mode to use * @filter: optional struct sock_fprog for use with SECCOMP_MODE_FILTER * * Returns 0 on success or -EINVAL on failure. */ long prctl_set_seccomp(unsigned long seccomp_mode, char __user *filter) { unsigned int op; char __user *uargs; switch (seccomp_mode) { case SECCOMP_MODE_STRICT: op = <API key>; /* * Setting strict mode through prctl always ignored filter, * so make sure it is always NULL here to pass the internal * check in do_seccomp(). */ uargs = NULL; break; case SECCOMP_MODE_FILTER: op = <API key>; uargs = filter; break; default: return -EINVAL; } /* prctl interface doesn't have flags, so they are always zero. */ return do_seccomp(op, 0, uargs); }