text
stringlengths
29
850k
# Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Implementation of gcloud bigquery jobs show-rows. """ from googlecloudsdk.api_lib.bigquery import bigquery from googlecloudsdk.calliope import base from googlecloudsdk.core.console import console_io class JobsShowRows(base.Command): """Displays selected rows in the result of a query job. """ @staticmethod def Args(parser): """Register flags for this command.""" parser.add_argument( '--limit', type=int, default=bigquery.DEFAULT_RESULTS_LIMIT, help='The maximum number of rows to display.') parser.add_argument( '--start-row', type=int, default=0, help='The number of rows to skip before showing table data.') parser.add_argument('job_id', help='The job ID of the asynchronous query.') def Run(self, args): """This is what gets called when the user runs this command. Args: args: an argparse namespace, All the arguments that were provided to this command invocation. Returns: A bigquery.QueryResults object. """ job = bigquery.Job.ResolveFromId(args.job_id) return job.GetQueryResults(start_row=args.start_row, max_rows=args.limit) def Display(self, args, query_results): """This method is called to print the result of the Run() method. Args: args: The arguments that the command was run with. query_results: A bigquery.QueryResults object. """ console_io.PrintExtendedList(query_results, query_results.GetColumnFetchers())
The first, an annual ACIP Workshop in South Africa, provides interdisciplinary, cross-institutional contexts to address issues central to such debate and to develop comparative, critical frameworks that can yield fresh insights, innovative and informed practice, and lively interchange for those working in and on institutions of public culture in Africa. The second component, the Ivan Karp Doctoral Research Awards, fosters work by the next generation of scholar-practitioners. In these ways, ACIP strengthens public scholarship and institutions of public culture in South Africa and enhances our understanding of these vital sites of cultural production and social action. Details about the workshops and research awards will be posted as they become available. ACIP is supported by contributions to the Ivan Karp and Corinne Kratz Fund. The Fund was created to honor the late National Endowment for the Humanities Professor Ivan Karp whose work was monumental in the fields of anthropology, museum studies, African studies, social theory, and public scholarship. The Fund helps continue his collaborative work with universities, museums and other cultural institutions in South Africa through activities such as lectures, programs and student research support. Please donate here to support this work. You can learn more about Dr. Ivan Karp and his work by visiting the Ivan Karp Archive. This online archive of his publications is intended to make his work widely available. The links provide lists of Karp’s books and downloads of his articles, as well as lists of the works published in the two book series for which he served as editor. The archive also includes several videos of Karp’s presentations and In Memoriam tributes to him. Click here to visit the Ivan Karp Archive. Make an online conribution to the Ivan Karp and Corinne Kratz Fund. From February 2010, Dr. Ivan Karp discusses grants and proposal writing for students in the humanities and humanistic social sciences.
# encoding=utf8 # wfpy internal implementation # snowdreamist@live.cn from contextlib import contextmanager from threading import Thread, Lock, Semaphore @contextmanager def lock(lock): try: lock.acquire(True) yield finally: lock.release() class InternalLogger(object): """ Internal logger """ def __init__(self, name, logger): self.__name = name self.__logger = logger @property def name(self): return self.__name def error(self, message, error = None, paths = None): """ Log error Parameters: message error message error exception object """ if paths is None: paths = [ self.name ] else: paths.append(self.name) self.__logger.error(message, error, paths) def info(self, message, error = None, paths = None): """ Log info """ if paths is None: paths = [ self.name ] else: paths.append(self.name) self.__logger.info(message, error, paths) def warn(self, message, error = None, paths = None): """ Log warn """ if paths is None: paths = [ self.name ] else: paths.append(self.name) self.__logger.warn(message, error, paths) def debug(self, message, error = None, paths = None): """ Log debug """ if paths is None: paths = [ self.name ] else: paths.append(self.name) self.__logger.debug(message, error, paths) class InternalRootLogger(InternalLogger): """ Root internal logger """ def __init__(self, sysLogger, logCallback = None): """ Parameters: sysLogger python built-in logger logCallback callback on log event callback function signature: callbackHandler(level, path, message, error) """ self.__sysLogger = sysLogger self.__logCallback = logCallback super(InternalRootLogger, self).__init__('root', None) def __formatexception__(self, error): """ Format exception """ pass def __log__(self, level, paths, message, error): """ Do log """ if paths is None: path = '' else: path = '.'.join(reversed(paths)) # Callback cbError = None if not self.__logCallback is None: try: self.__logCallback(level, path, message, error) except Exception as callbackError: cbError = callbackError # Log if isinstance(path, unicode): path = path.encode('utf8') if isinstance(message, unicode): message = message.encode('utf8') # Get method if level == 'error': logfunc = self.__sysLogger.error elif level == 'warn': logfunc = self.__sysLogger.warn elif level == 'debug': logfunc = self.__sysLogger.debug else: logfunc = self.__sysLogger.info logfunc('[%s]:[%s]', path, message) if not error is None: logfunc('[%s]:[Related exception]:[%s]', path, self.__formatexception__(error)) if not cbError is None: self.__sysLogger.warn('[Logger][%s]:[Callback failed]', path) self.__sysLogger.warn('[Logger][%s]:[Related exception]:[%s]', path, self.__formatexception__(cbError)) def error(self, message, error = None, paths = None): """ Log error """ self.__log__('error', paths, message, error) def info(self, message, error, paths = None): """ Log info """ self.__log__('info', paths, message, error) def warn(self, message, error = None, paths = None): """ Log warn """ self.__log__('warn', paths, message, error) def debug(self, message, error = None, paths = None): """ Log debug """ self.__log__('debug', paths, message, error) class Dispatcher(object): """ The worker dispatcher """ class Instance(object): """ Dispatcher instance """ def __init__(self, method, args, kwargs): self.method = method self.args = args self.kwargs = kwargs def __call__(self): """ Invoke """ args = self.args if not self.args is None else [] kwargs = self.kwargs if not self.kwargs is None else {} return self.method(*args, **kwargs) def __init__(self, name, logger): self.__thread = Thread(name = name, target = self.__worker__) self.__lock = Lock() self.__sema = Semaphore(0) self.__isStopping = False self.__isRunning = False self.__queue = [] self.__logger = InternalLogger('dispatcher', logger) def run(self): """ Start running """ if self.__isStopping: raise RuntimeError('Cannot run when stopping or stopped') if self.__isRunning: raise RuntimeError('Cannot run when running') self.__isRunning = True self.__thread.start() self.__logger.info('Started') def stop(self): """ Stop running """ self.__isStopping = True self.__sema.release() # Notify worker thread def invoke(self, method, args = None, kwargs = None): """ Add an method """ if self.__isStopping: raise RuntimeError('Cannot invoke when stopping') if method is None: raise ValueError('Argument method cannot be None') instance = Dispatcher.Instance(method, args, kwargs) with lock(self.__lock): self.__queue.append(instance) self.__sema.release() def __worker__(self): """ The dispatcher worker """ while True: # Wait until new request in self.__sema.acquire() # Check if the dispatcher is stopping # If so ignore all existing invokings and exit if self.__isStopping: break with lock(self.__lock): # Test empty if len(self.__queue) == 0: continue # Pop an instance instance = self.__queue.pop(0) # Do it try: instance() except Exception as error: # Something went wrong, do error report self.logger.error('Invoke failed', error) # Exit if len(self.__queue) > 0: self.logger.warn('Abandon %d invoking instance on exiting' % len(self.__queue)) self.logger.info('Exit') class Event(object): """ Event object """ def __init__(self): self.__events = [] def add(self, handler): """ Add an event handler """ self.__events.append(handler) def remove(self, handler): """ Remove an event handler """ self.__events.remove(handler) def clear(self): """ Clear all events """ self.__events = [] def __call__(self, sender, eventArgs): """ Raise this event """ for handler in this.__events: handler(sender, eventArgs) class WorkerInstance(object): """ A worker instance """ def __init__(self, worker): self.__worker = worker @property def worker(self): return self.__worker class Workerpool(object): """ The worker pool """ def __init__(self): self.__queues = {} self.__lock = Lock() def add(self, instance): """ Add an worker instance """ self[instance.worker.guid].append(instance) def __getitem__(self, guid): """ Get a queue or create one by guid """ with lock(self.__lock): if not guid in self.__queues: queue = WorkerQueue() self.__queues[guid] = queue else: queue = self.__queues[guid] return queue class WorkerQueue(object): """ The worker queue """ def __init__(self): self.__lst = [] self.__lock = Lock() def append(self, instance): """ Add a worker instance """ with lock(self.__lock): self.__lst.append(instance) def pop(self): """ Pop a worker instance None if the queue is empty """ with lock(self.__lock): if len(self.__lst) == 0: return None return self.__lst.pop(0) class Components(object): """ Component container NOTE: Why not use PluginRepo as Components directly? Since there're some components are application wide and some are workflow wide. The scopes are not consistent """ def __init__(self, pluginRepo): self.pluginRepo = pluginRepo self.__comsdct = {} def __contains__(self, name): """ Check component """ return name in self.__comsdct def __getitem__(self, name): """ Get component """ return self.__comsdct[name] def __setitem__(self, name, value): """ Set component """ self.__comsdct[name] = value class CallbackInvoker(object): """ Support all callback invoking """ def __init__(self, components): self.__dispatcher = components['dispatcher'] def invoke(self, callbackHandler, *args, **kwargs): """ Invoke callbackHandler NOTE: All callback should run in dispatcher in order to avoid multi-thread problems """ self.__dispatcher.invoke(callbackHandler, args, kwargs)
One of the greatest compliments I received from a friend was “you’ve changed”. Life is always changing and if we are doing our inner work, we are going to change. We become more present and stop tolerating guilt and shame tactics from others. Most importantly, we shed the things that are holding us back in life. We become more welcoming to rapid change as we embrace our inner guidance. In many cases this will defy logic and often trigger others. So it’s important to remain centered and grounded within the self. When someone says “you’ve changed” what they are really expressing are their fears and insecurities from having to relearn you. More often than not it’s a manipulation tactic to get you to do something they want. They might call you selfish or use global communication tactics with words such as “always” or “never”, etc. These projections stem from their own insecurities and are used to trigger guilt, shame or fear as an attempt to manipulate you. If we alter our choices due to projections from others we end up enabling them. Enabling is not an act of self-love, let alone unconditional love, and continues to feed the same cycles. Some friends may not agree with your choices in life but this has nothing to do with you and everything to do with them. Release the concern and focus on living your truth. True friends will remain supportive. They may not understand and no explanation is necessary as that will only provide them more details for manipulation. If we do not remain mindful of our thoughts we find ourselves projecting from our past. There is a vast difference between coming from a place of love and coming from a place of fear. The vibration is easily picked up here as you can feel into the energy behind the words for the underlying intent. Fear is often the result of projecting from our past, and when we do this towards others we are also doing this towards our self. It’s an attempt to hold others back but ultimately has a greater impact to the self. This ties to the mirror aspect of life, which is really life reflecting back to us our most dominate inner state. The beauty here is that it reveals our self-imposed limitations as we reflect. When we come from love we might share some wisdom but it comes from a different place. We become more supportive in offering unconditional guidance. A few years ago, I connected with one of the greatest hypnotherapists and healers I have ever met (Melina Johnson CHt). In one of our discussions she conveyed how she listens to the energy and only moves when it tells her to. If something is off, she changes course taking the path of least resistance. You know when things are aligned as everything will flow. I would practice this at times, but once we had this discussion I decided to be more consistent in living and testing this for myself. To really test a belief we must embody it, live it and breathe it. This is how we move from belief to experience to knowing. I started living this more consistently and was amazed with the difference it made in my choices and experiences. It’s basically retraining ourselves to live from a deeply present mind with an open heart. It’s impossible to be manipulated when practicing this, as presence sees through everything. This also allows for us to utilize intuition as our guide. The inner guidance always knows, we only need to get silent enough to understand its inner voice. The more we listen to the flow of life, the easier it becomes to embrace change. An open heart and present mind is crucial for this. With these two things, the struggles of life have no power over the self. Change can trigger us causing us to be more reactive and resistant to life. If you find yourself triggered, embrace it. If you are triggered and you think “I shouldn’t be triggered” then you have resistance which keeps you in that pain. Resistance keeps us stuck; the more we release, the more welcoming we are to life and THIS is where the beauty unfolds. We become more fluid and find ourselves moving beyond the emotional roller coasters of life. Without resistance we pass through through the emotions far more quickly, in many instances its so fast that we don’t even realize it. We experience it, but we experience it with the depth of our being, as we remain calm and present through it all.
#! /usr/bin/python3 # -*- coding:Utf-8 -*- """ MyNotes - Sticky notes/post-it Copyright 2016-2019 Juliette Monsel <j_4321@protonmail.com> MyNotes is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. MyNotes is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. Text class with improved undo/redo """ import os import re from tkinter import Text, TclError from tkinter.font import Font from tkinter.ttk import Checkbutton from mynoteslib.constants import CONFIG, TEXT_COLORS, PATH_LATEX, AUTOCORRECT, \ text_ranges, sorting class Checkbox(Checkbutton): def __init__(self, master, **kw): Checkbutton.__init__(self, master, command=self.command, **kw) def command(self): self.master.cb_state_change(self, 'selected' in self.state()) class MyText(Text): def __init__(self, master=None, mode='note', cb_style="TCheckbutton", **kw): Text.__init__(self, master, wrap='word', undo=False, autoseparator=False, tabs=(10, 'right', 21, 'left'), relief="flat", borderwidth=0, highlightthickness=0, **kw) self.mode = mode self.cb_style = cb_style self.links = {} self.latex = {} self._current_word = "" # --- undo/redo self._undo_stack = [[]] self._redo_stack = [] size = CONFIG.get("Font", "text_size") font_text = "%s %s" % (CONFIG.get("Font", "text_family").replace(" ", "\ "), size) mono = "%s %s" % (CONFIG.get("Font", "mono").replace(" ", "\ "), size) self.configure(font=font_text) # --- tags self.tag_configure("mono", font=mono) self.tag_configure("bold", font="%s bold" % font_text) self.tag_configure("italic", font="%s italic" % font_text) self.tag_configure("bold-italic", font="%s bold italic" % font_text) try: # only >= tk8.6.6 support selectforeground self.tag_configure("underline", underline=True, selectforeground="white") self.tag_configure("overstrike", overstrike=True, selectforeground="white") self.tag_configure("link", foreground="blue", underline=True, selectforeground="white") self.tag_configure("file", foreground="blue", underline=True, selectforeground="white") for coul in TEXT_COLORS.values(): self.tag_configure(coul, foreground=coul, selectforeground="white") self.tag_configure(coul + "-underline", foreground=coul, selectforeground="white", underline=True) self.tag_configure(coul + "-overstrike", foreground=coul, overstrike=True, selectforeground="white") except TclError: self.tag_configure("underline", underline=True) self.tag_configure("overstrike", overstrike=True) self.tag_configure("link", foreground="blue", underline=True) self.tag_configure("file", foreground="blue", underline=True) for coul in TEXT_COLORS.values(): self.tag_configure(coul, foreground=coul) self.tag_configure(coul + "-underline", foreground=coul, underline=True) self.tag_configure(coul + "-overstrike", foreground=coul, overstrike=True) self.tag_configure("center", justify="center") self.tag_configure("left", justify="left") self.tag_configure("right", justify="right") self.tag_configure("list", lmargin1=0, lmargin2=21, tabs=(10, 'right', 21, 'left')) self.tag_configure("todolist", lmargin1=0, lmargin2=21, tabs=(10, 'right', 21, 'left')) margin = 2 * Font(self, font=font_text).measure("m") self.tag_configure("enum", lmargin1=0, lmargin2=margin + 5, tabs=(margin, 'right', margin + 5, 'left')) # --- bindings self.bind('<Key>', self._on_keypress) self.bind('<Control-Key>', self._on_ctrl_keypress) self.bind('<Control-z>', self.undo) self.bind('<Control-y>', self.redo) self.bind_class('Text', '<Control-y>', lambda e: None) self.tag_bind("link", "<Enter>", lambda event: self.configure(cursor="hand1")) self.tag_bind("link", "<Leave>", lambda event: self.configure(cursor="")) def update_font(self): """Update font after configuration change.""" size = CONFIG.get("Font", "text_size") font = "%s %s" % (CONFIG.get("Font", "text_family").replace(" ", "\ "), size) mono = "%s %s" % (CONFIG.get("Font", "mono").replace(" ", "\ "), size) self.configure(font=font) self.tag_configure("mono", font=mono) self.tag_configure("bold", font="%s bold" % font) self.tag_configure("italic", font="%s italic" % font) self.tag_configure("bold-italic", font="%s bold italic" % font) margin = 2 * Font(self, font=font).measure("m") self.tag_configure("enum", lmargin1=0, lmargin2=margin + 5, tabs=(margin, 'right', margin + 5, 'left')) def mode_change(self, new_mode): self._undo_stack[-1].append(('mode', self.mode, new_mode)) self.mode = new_mode def cb_state_change(self, cb, new_state): self.add_undo_sep() self._undo_stack[-1].append(('checkbox_state', self.index(cb), new_state)) self.add_undo_sep() def undo(self, event=None): if self.cget("state") != "disabled": try: items = [] # skip empty sets while not items: items = self._undo_stack.pop() except IndexError: # empty stack self._undo_stack.append([]) else: self._redo_stack.append(items) for item in reversed(items): self._undo_single(item) if not self._undo_stack: self._undo_stack.append([]) return "break" def redo(self, event=None): if self.cget("state") != "disabled": try: items = self._redo_stack.pop() except IndexError: # empty stack pass else: self._undo_stack.append(items) for item in items: self._redo_single(item) return "break" def add_undo_sep(self): if self._undo_stack[-1]: self._undo_stack.append([]) self._redo_stack.clear() def _undo_single(self, item): if 'insert_' in item[0]: self.delete(item[1]) elif item[0] == 'insert': self.delete(item[1], item[2]) elif item[0] == 'link': self.links[item[1]] = item[2] elif item[0] == 'delete': self._restore_text_with_prop(item[1], item[3]) elif item[0] == 'paste': self.delete(item[1], item[2]) elif item[0] == 'tag_remove': self.tag_add(*item[1:]) elif item[0] == 'tag_add': self.tag_remove(item[1], item[2], *item[3]) elif item[0] == 'mode': self.mode = item[1] self.master.mode.set(item[1]) elif item[0] == 'checkbox_state': win = self.window_cget(item[1], 'window') if item[2]: self.nametowidget(win).state(('!selected', '!alternate')) else: self.nametowidget(win).state(('selected', '!alternate')) def _redo_single(self, item): if item[0] == 'insert_char': self.insert(item[1], item[2]) elif item[0] == 'insert_image': self.image_create(item[1], item[2]) elif item[0] == 'insert_latex': index, kw, img_name, latex = item[1:] self.latex[img_name] = latex self.image_create(index, **kw) elif item[0] == 'insert_checkbox': self.checkbox_create(item[1], item[2]) elif item[0] == 'insert': self.insert(item[1], item[3], *item[4]) if self.mode != "note": self.tag_add(self.mode, "1.0", "end") elif item[0] == 'link': self.links[item[1]] = item[3] elif item[0] == 'delete': self.delete(item[1], item[2]) elif item[0] == 'paste': self._restore_text_with_prop(item[1], item[3]) elif item[0] == 'tag_remove': self.tag_remove(*item[1:]) elif item[0] == 'tag_add': self.tag_add(item[1], item[2], *item[3]) elif item[0] == 'mode': self.mode = item[2] self.master.mode.set(item[2]) elif item[0] == 'checkbox_state': win = self.window_cget(item[1], 'window') if item[2]: self.nametowidget(win).state(('selected', '!alternate')) else: self.nametowidget(win).state(('!selected', '!alternate')) def checkbox_create(self, index, state=('!alternate',), **kw): kw2 = kw.copy() kw2['takefocus'] = False kw2['style'] = self.cb_style ch = Checkbox(self, **kw2) ch.state(state) self.window_create(index, window=ch) def checkbox_create_undoable(self, index, state=('!alternate',)): self._undo_stack[-1].append(('insert_checkbox', self.index(index), state)) self._redo_stack.clear() ch = Checkbox(self, takefocus=False, style=self.cb_style) ch.state(state) self.window_create(index, window=ch) def image_create_undoable(self, index, **kw): self._undo_stack[-1].append(('insert_image', self.index(index), kw)) self._redo_stack.clear() self.image_create(index, **kw) def link_create_undoable(self, link_nb, link): self._undo_stack[-1].append(('link', link_nb, self.links.get(link_nb), link)) self._redo_stack.clear() self.links[link_nb] = link def latex_create_undoable(self, index, img_name, image, latex): """Insert image generated from latex expression given in the entry.""" im = os.path.join(PATH_LATEX, img_name) kw = dict(align='bottom', image=image, name=im) self._undo_stack[-1].append(('insert_latex', self.index(index), kw, img_name, latex)) self._redo_stack.clear() self.latex[img_name] = latex self.image_create(index, **kw) def tag_remove_undoable(self, tagName, index1, index2=None): self._undo_stack[-1].append(('tag_remove', tagName, self.index(index1), self.index(index2))) self.tag_remove(tagName, index1, index2) def tag_add_undoable(self, tagName, index1, *args): self._undo_stack[-1].append(('tag_add', tagName, self.index(index1), [self.index(i) for i in args])) self.tag_add(tagName, index1, *args) def _on_ctrl_keypress(self, event): pass def delete_undoable(self, index1, index2=None): index1 = self.index(index1) if index2 is None: index2 = self.index('{}+1c'.format(index1)) else: index2 = self.index(index2) self._undo_stack[-1].append(('delete', index1, index2, self._copy_text(index1, index2))) self.delete(index1, index2) def insert_undoable(self, index, chars, *args): index1 = self.index(index) self.insert(index, chars, *args) index2 = self.index('{}+{}c'.format(index1, len(chars))) self._undo_stack[-1].append(('insert', index1, index2, chars, args)) def _auto_word_replacement(self): if self._current_word == self.get('insert-%ic' % len(self._current_word), 'insert'): replacement = AUTOCORRECT.get(self._current_word) if replacement is not None: self.add_undo_sep() self.delete_undoable('insert-%ic' % len(self._current_word), 'insert') self.insert_undoable('insert', replacement) self.add_undo_sep() self._current_word = "" def _on_keypress(self, event): # --- deletion if event.keysym == 'BackSpace': self._redo_stack.clear() self._current_word = "" self.add_undo_sep() deb_line = self.get("insert linestart", "insert") tags = self.tag_names("insert") if self.tag_ranges("sel"): if self.tag_nextrange("enum", "sel.first", "sel.last"): update = True else: update = False self.delete_undoable("sel.first", "sel.last") if update: self.update_enum() elif self.index("insert") != "1.0": if re.match('^\t[0-9]+\.\t$', deb_line) and 'enum' in tags: self.delete_undoable("insert linestart", "insert") self.insert_undoable("insert", "\t\t") self.update_enum() elif deb_line == "\t•\t" and 'list' in tags: self.delete_undoable("insert linestart", "insert") self.insert_undoable("insert", "\t\t") elif deb_line == "\t\t": self.delete_undoable("insert linestart", "insert") elif "todolist" in tags and self.index("insert") == self.index("insert linestart+1c"): try: ch = self.window_cget("insert-1c", "window") self.delete_undoable("insert-1c") self.children[ch.split('.')[-1]].destroy() self.insert_undoable("insert", "\t\t") except TclError: self.delete_undoable("insert-1c") else: self.delete_undoable("insert-1c") self.add_undo_sep() return 'break' elif event.keysym == 'Delete': self._redo_stack.clear() self._current_word = "" sel = self.tag_ranges('sel') if sel: self.add_undo_sep() self._undo_stack[-1].append(('delete', sel[0], sel[1], self._copy_text(*sel))) self.add_undo_sep() # --- newline elif event.keysym == 'Return': self._redo_stack.clear() self._auto_word_replacement() if self.mode == "list": self.add_undo_sep() self.insert_undoable("insert", "\n\t•\t") self.tag_add("list", "1.0", "end") self.add_undo_sep() elif self.mode == "todolist": self.add_undo_sep() self.insert_undoable("insert", "\n") self.checkbox_create_undoable("insert", ('!alternate',)) self.tag_add("todolist", "1.0", "end") self.add_undo_sep() elif self.mode == "enum": self.add_undo_sep() self.insert_undoable("insert", "\n\t0.\t") self.update_enum() self.add_undo_sep() else: self.insert_undoable("insert", "\n") self.add_undo_sep() return 'break' # --- normal char elif event.char != '': self._redo_stack.clear() char = event.char self._current_word += char sel = self.tag_ranges('sel') if sel: self.add_undo_sep() self._undo_stack[-1].append(('delete', sel[0], sel[1], self._copy_text(*sel))) self.add_undo_sep() self._undo_stack[-1].append(('insert_char', sel[0], char)) else: self._undo_stack[-1].append(('insert_char', self.index('insert'), char)) if event.keysym in ['space', 'Tab']: self._current_word = self._current_word[:-1] self._auto_word_replacement() self.add_undo_sep() def _copy_text(self, index1, index2): """Copy text, images, checkboxes with the formatting between index1 and index2.""" content = [] deb = sorting(str(index1)) fin = sorting(str(index2)) for l in range(deb[0], fin[0] + 1): if l == deb[0]: dc = deb[1] else: dc = 0 if l == fin[0]: nc = fin[1] else: nc = sorting(str(self.index('%i.end' % l)))[1] for c in range(dc, nc): index = '%i.%i' % (l, c) try: keys = ['name', 'image', 'align', 'padx', 'pady'] kw = {k: self.image_cget(index, k) for k in keys} tags = self.tag_names(index) i = 0 while i < len(tags) and not re.match(r'[0-9]+\.png', tags[i]): i += 1 if i < len(tags): latex = self.latex[tags[i]] content.append(('latex', kw, tags, tags[i], latex)) else: content.append(('image', kw, tags)) except TclError: try: win = self.nametowidget(self.window_cget(index, 'window')) state = win.state() tags = self.tag_names(index) content.append(('checkbox', state, tags)) except TclError: tags = self.tag_names(index) content.append(('char', self.get(index), tags)) if l < fin[0]: content.append(('char', '\n', [])) return content def _restore_text_with_prop(self, index1, content): """Restore text, images, checkboxes and formatting at index1.""" self.mark_set('insert', index1) for c in content: index = self.index('insert') if c[0] == 'image': self.image_create(index, **c[1]) elif c[0] == 'latex': self.image_create(index, **c[1]) self.latex[c[3]] = c[4] elif c[0] == 'checkbox': self.checkbox_create(index, c[1]) self.update_idletasks() else: self.insert('insert', c[1]) for tag in c[2]: self.tag_add(tag, index) self.tag_remove('sel', '1.0', 'end') # --- Text style def toggle_text_style(self, style): """Toggle the style of the selected text.""" if self.tag_ranges("sel"): current_tags = self.tag_names("sel.first") self.add_undo_sep() # remove tag if style in current_tags: # first char is in style so 'unstyle' the range tag_ranges = text_ranges(self, style, "sel.first", "sel.last") for d, f in zip(tag_ranges[::2], tag_ranges[1::2]): self.tag_remove_undoable(style, d, f) tag_ranges = text_ranges(self, "bold-italic", "sel.first", "sel.last") style2 = "bold" if style == "italic" else "italic" for d, f in zip(tag_ranges[::2], tag_ranges[1::2]): self.tag_remove_undoable("bold-italic", d, f) self.tag_add_undoable(style2, d, f) elif style == "bold" and "bold-italic" in current_tags: tag_ranges = text_ranges(self, "bold-italic", "sel.first", "sel.last") for d, f in zip(tag_ranges[::2], tag_ranges[1::2]): self.tag_remove_undoable("bold-italic", d, f) self.tag_add_undoable("italic", d, f) tag_ranges = text_ranges(self, "bold", "sel.first", "sel.last") for d, f in zip(tag_ranges[::2], tag_ranges[1::2]): self.tag_remove_undoable("bold", d, f) elif style == "italic" and "bold-italic" in current_tags: tag_ranges = text_ranges(self, "bold-italic", "sel.first", "sel.last") for d, f in zip(tag_ranges[::2], tag_ranges[1::2]): self.tag_remove_undoable("bold-italic", d, f) self.tag_add_undoable("bold", d, f) tag_ranges = text_ranges(self, "italic", "sel.first", "sel.last") for d, f in zip(tag_ranges[::2], tag_ranges[1::2]): self.tag_remove_undoable("italic", d, f) # add tag elif style == "bold": self.tag_add_undoable("bold", "sel.first", "sel.last") tag_ranges = text_ranges(self, "italic", "sel.first", "sel.last") for d, f in zip(tag_ranges[::2], tag_ranges[1::2]): self.tag_add_undoable("bold-italic", d, f) self.tag_remove_undoable("italic", d, f) self.tag_remove_undoable("bold", d, f) elif style == "italic": self.tag_add_undoable("italic", "sel.first", "sel.last") tag_ranges = text_ranges(self, "bold", "sel.first", "sel.last") for d, f in zip(tag_ranges[::2], tag_ranges[1::2]): self.tag_add_undoable("bold-italic", d, f) self.tag_remove_undoable("italic", d, f) self.tag_remove_undoable("bold", d, f) else: self.tag_add_undoable(style, "sel.first", "sel.last") self.add_undo_sep() def toggle_underline(self): """Toggle underline property of the selected text.""" if self.tag_ranges("sel"): current_tags = self.tag_names("sel.first") self.add_undo_sep() if "underline" in current_tags: # first char is in style so 'unstyle' the range tag_ranges = text_ranges(self, "underline", "sel.first", "sel.last") for d, f in zip(tag_ranges[::2], tag_ranges[1::2]): self.tag_remove_undoable("underline", d, f) for coul in TEXT_COLORS.values(): tag_ranges = text_ranges(self, coul + "-underline", "sel.first", "sel.last") for d, f in zip(tag_ranges[::2], tag_ranges[1::2]): self.tag_remove_undoable(coul + "-underline", d, f) else: self.tag_add_undoable("underline", "sel.first", "sel.last") for coul in TEXT_COLORS.values(): r = text_ranges(self, coul, "sel.first", "sel.last") if r: for deb, fin in zip(r[::2], r[1::2]): self.tag_add_undoable(coul + "-underline", deb, fin) self.add_undo_sep() def toggle_overstrike(self): """Toggle overstrike property of the selected text.""" if self.tag_ranges("sel"): current_tags = self.tag_names("sel.first") self.add_undo_sep() if "overstrike" in current_tags: # first char is in style so 'unstyle' the range tag_ranges = text_ranges(self, "overstrike", "sel.first", "sel.last") for d, f in zip(tag_ranges[::2], tag_ranges[1::2]): self.tag_remove_undoable("overstrike", d, f) for coul in TEXT_COLORS.values(): tag_ranges = text_ranges(self, coul + "-overstrike", "sel.first", "sel.last") for d, f in zip(tag_ranges[::2], tag_ranges[1::2]): self.tag_remove_undoable(coul + "-overstrike", d, f) else: self.tag_add_undoable("overstrike", "sel.first", "sel.last") for coul in TEXT_COLORS.values(): r = text_ranges(self, coul, "sel.first", "sel.last") if r: for deb, fin in zip(r[::2], r[1::2]): self.tag_add_undoable(coul + "-overstrike", deb, fin) self.add_undo_sep() def change_sel_color(self, color): """Change the color of the selection.""" if self.tag_ranges("sel"): self.add_undo_sep() for coul in TEXT_COLORS.values(): tag_ranges = text_ranges(self, coul, "sel.first", "sel.last") for d, f in zip(tag_ranges[::2], tag_ranges[1::2]): self.tag_remove_undoable(coul, d, f) tag_ranges = text_ranges(self, coul + "-overstrike", "sel.first", "sel.last") for d, f in zip(tag_ranges[::2], tag_ranges[1::2]): self.tag_remove_undoable(coul + "-overstrike", d, f) tag_ranges = text_ranges(self, coul + "-underline", "sel.first", "sel.last") for d, f in zip(tag_ranges[::2], tag_ranges[1::2]): self.tag_remove_undoable(coul + "-underline", d, f) if not color == "black": self.tag_add_undoable(color, "sel.first", "sel.last") underline = text_ranges(self, "underline", "sel.first", "sel.last") overstrike = text_ranges(self, "overstrike", "sel.first", "sel.last") for deb, fin in zip(underline[::2], underline[1::2]): self.tag_add_undoable(color + "-underline", deb, fin) for deb, fin in zip(overstrike[::2], overstrike[1::2]): self.tag_add_undoable(color + "-overstrike", deb, fin) self.add_undo_sep() def set_align(self, alignment): """Align the text according to alignment (left, right, center).""" if self.tag_ranges("sel"): deb = self.index("sel.first linestart") fin = self.index("sel.last lineend") else: deb = self.index("insert linestart") fin = self.index("insert lineend") if "\t" not in self.get(deb, fin): self.add_undo_sep() # tabulations don't support right/center alignment # remove old alignment tag for align in ['left', 'right', 'center']: if align != alignment: tag_ranges = text_ranges(self, align, deb, fin) for d, f in zip(tag_ranges[::2], tag_ranges[1::2]): self.tag_remove_undoable(align, d, f) # set new alignment tag self.tag_add_undoable(alignment, deb, fin) self.add_undo_sep() def update_enum(self): """Update enumeration numbers.""" lines = self.get("1.0", "end").splitlines() indexes = [] for i, l in enumerate(lines): res = re.match('^\t[0-9]+\.\t', l) res2 = re.match('^\t[0-9]+\.', l) if res: indexes.append((i, res.end())) elif res2: indexes.append((i, res2.end())) for j, (i, end) in enumerate(indexes): self.delete_undoable("%i.0" % (i + 1), "%i.%i" % (i + 1, end)) self.insert_undoable("%i.0" % (i + 1), "\t%i.\t" % (j + 1)) self.tag_add("enum", "1.0", "end") self.add_undo_sep()
Hi Nikki, Thankyou Thankyou Thankyou for your awesomeness and uplifting presentation today. I really got a lot out of what you said, especially planning the week. Hi Nikki, Your presentation was WOW. I enjoyed your presentation and took a lot away from that for myself and my family. Thanks again for your inspiration. This podcast was amazing. I have a few of my favourites saved so I can revisit them regularly and this will be added to that list. Nikki is committed to improving wellness in the workplace through education, enabling clients to make informed decisions and choices to sustain their individual well being. I can’t recommend Nikki highly enough. Nikki is the picture of health and oozes vitality herself. Nikki has embodied her approach to life through her passion for working with clients, corporate and individuals encouraging them to work to their optimal level – with lasting results. Great Blog Nikki, reading your blog was similar to having my morning coffee, it made my heart quicken and armed me for the rest of the day with conviction! Thanks for the wisdom as always.
# coding=utf-8 import traceback import etcd import logging import json from octopus import err from octopus import constant from octopus.util import tools log = logging.getLogger(constant.LOGGER_NAME) def register(ec, service_name, service_info): """ :param ec: etcd的客户端对象 :param service_name: :param service_info: :type ec: etcd.Client :type service_name: str :type service_info: dict :return: :rtype: str """ add_info = service_info.get('addr') if not add_info\ or not {'host', 'port'}.issubset(add_info.keys()): raise err.OctpParamError('service_addr must contain "host" and "port".') result = ec.write( tools.service_dir_name(service_name), json.dumps(service_info), append=True, ttl=constant.SERVICE_TTL ) log.debug('new key: %s', result.key) return result.key def unregister(ec, service_token): """ :param ec: :param service_token: :type ec: etcd.Client :type service_token: str :return: 是否成功 :rtype: bool """ try: ec.delete(service_token) except Exception: # TODO 完善对异常的处理 log.warn('Unregister service failed. err: %s', traceback.format_exc()) return False else: return True def watch(ec, service_name, timeout=None): """ 用来监听一个服务集群的改动 :param ec: :param service_name: :param timeout: :type ec: etcd.Client :type service_name: str :type timeout: float :return: """ return ec.watch(tools.service_dir_name(service_name), timeout=timeout, recursive=True) def watch_locker(ec, service_locker_key, timeout=None): """ watch locker's change. :param ec: :param service_locker_key: :param timeout: :type ec: etcd.Client :type service_locker_key: str :type timeout: float :return: """ return ec.watch(service_locker_key, timeout=timeout, recursive=True) def get(ec, service_name): """ :param ec: :param service_name: :type ec: etcd.Client :return: """ try: result = ec.get(tools.service_dir_name(service_name)) except etcd.EtcdKeyNotFound: raise err.OctpServiceNotFoundError('Can NOT find service(%s) from etcd', service_name) else: return result def locker(ec, service_name): """ :param ec: :param service_name: :type ec: etcd.Client :return: """ return etcd.Lock(ec, tools.locker_name(service_name)) def alive(ec, service_name, service_token): """ :param ec: :param service_name: :param service_token: :type ec: etcd.Client :return: """ # this way, not upload parameter 'refresh', so can't only refresh ttl. # return ec.write( # service_token, # None, # ttl=constant.SERVICE_TTL, # refresh=True, # prevExist=True, # refresh and prevExist, can refresh ttl only. # ) return ec.api_execute( '/v2/keys/' + service_token, ec._MPUT, { 'refresh': True, 'prevExist': True, 'ttl': constant.SERVICE_TTL, } )
In-stock items ship usually by the next business day, Allow for 14 days to fill your order for out of stock/special order items. Safariland's Model 87 Sam Browne duty belt features SafariLaminate™ construction with suede lining. This four-stitch design has the Sam Browne-style double tongued buckle and center belt stud to fasten it firmly around the waist. 2.25" (58mm) width. Available in Plain, Basketweave, Hi Gloss or Nylon-Look finish. Choose brass or chrome buckle.
#http://doc.aldebaran.com/2-5/naoqi/audio/alspeechrecognition-api.html import signal import qi import argparse import sys from os.path import expanduser import os import time from conditions import set_condition class SpeechRecognition(object): USE_GOOGLE = True CHANNELS = [0, 0, 1, 0] audio_recorder = None recording = False def __init__(self, vocab, app): super(SpeechRecognition, self).__init__() app.start() self.session = app.session self.__shutdown_requested = False signal.signal(signal.SIGINT, self.signal_handler) #Starting services self.asr_service = self.session.service("ALSpeechRecognition") self.asr_service.setLanguage("English") self.audio_recorder = self.session.service("ALAudioRecorder") self.memory_service = self.session.service("ALMemory") #establishing test vocabulary #vocabulary = ["yes", "no", "please", "hello", "goodbye", "hi, there", "go to the kitchen"] with open(vocab) as f: content = f.readlines() # you may also want to remove whitespace characters like `\n` at the end of each line vocabulary = [x.strip() for x in content] print "Vocabulary read", vocabulary self.asr_service.pause(True) #self.asr_service.removeAllContext() try: self.asr_service.setVocabulary(vocabulary, False) #self.asr_service.setParameter("Sensitivity", 0.1) self.asr_service.setParameter("NbHypotheses", 3) self.asr_service.setAudioExpression(False) except: print "error setting vocabulary" self.asr_service.pause(False) def start(self): # Start the speech recognition engine with user Test_ASR self.subscribe_name = "Test_ASR" + str(time.time()) self.asr_service.subscribe(self.subscribe_name) #print 'Speech recognition engine started' #subscribe to event WordRecognized self.subWordRecognized = self.memory_service.subscriber("WordRecognized") #self.idSubWordRecognized = self.subWordRecognized.signal.connect(self.onWordRecognized) # speech detected self.subSpeechDet = self.memory_service.subscriber("SpeechDetected") #self.idSubSpeechDet = self.subSpeechDet.signal.connect(self.onSpeechDetected) # enable self.subEnable = self.memory_service.subscriber("ASR_enable") self.idSubEnable = self.subEnable.signal.connect(self.onEnable) #subscribe to google asr transcription #if self.USE_GOOGLE: #self.audio_recorder.stopMicrophonesRecording() #self.googleAsrRecognized = self.memory_service.subscriber("GoogleAsrRecognized") #self.idGoogleAsrRecognized = self.googleAsrRecognized.signal.connect(self.onGoogleASR) #self.audio_recorder.startMicrophonesRecording("utterance" + ".wav", "wav", 44100, [1, 1, 1, 1]) #print 'Audio recorder engine started' self.is_enabled = False def quit(self): #Disconnecting callbacks and subscribers self.asr_service.unsubscribe(self.subscribe_name) if self.idSubWordRecognized is not None: self.subWordRecognized.signal.disconnect(self.idSubWordRecognized) if self.idSubSpeechDet is not None: self.subSpeechDet.signal.disconnect(self.idSubSpeechDet) if self.idSubEnable is not None: self.subEnable.signal.disconnect(self.idSubEnable) #if self.USE_GOOGLE: # self.googleAsrRecognized.signal.disconnect(self.idGoogleAsrRecognized) def signal_handler(self, signal, frame): print "[" + self.__class__.__name__ + "] Caught Ctrl+C, stopping." self.__shutdown_requested = True print "[" + self.__class__.__name__ + "] Good-bye" def onSpeechDetected(self, value): print "speechdetected=", value if value == 1: if self.USE_GOOGLE: if not self.recording: #try: # self.AUDIO_FILE_DIR = self.memory_proxy.getData("NAOqibag/CurrentLogFolder") + "/asr_logs/" #except: self.AUDIO_FILE_DIR = expanduser('~') + '/bags/no_data/asr_logs/' if not os.path.exists(self.AUDIO_FILE_DIR): os.makedirs(self.AUDIO_FILE_DIR) self.AUDIO_FILE_PATH = self.AUDIO_FILE_DIR + 'SPQReL_mic_' #self.audio_recorder.stopMicrophonesRecording() self.AUDIO_FILE = self.AUDIO_FILE_PATH + str(time.time()) self.audio_recorder.startMicrophonesRecording(self.AUDIO_FILE + ".wav", "wav", 44100, self.CHANNELS) self.recording = True print "Audio recorder started recording" def onWordRecognized(self, value): print "value=",value if self.USE_GOOGLE: if self.recording: self.audio_recorder.stopMicrophonesRecording() self.recording = False print "Audio recorder stopped recording" self.memory_service.raiseEvent("GoogleRequest", self.AUDIO_FILE) for i, val in enumerate(len(value)): if val in ["stop", "stop following", "don't follow", "stop following me"]: if value[i+1] > "0.4": set_condition(self.memory, "stopfollowing", "true") #self.audio_recorder.stopMicrophonesRecording() #print "Audio recorder stopped recording" #if self.USE_GOOGLE: # self.memory_service.raiseEvent("GoogleRequest", self.AUDIO_FILE) #def onGoogleASR(self, value): # print "googleasr=", value def onEnable(self, value): print "enable=", value if value == "0": if self.is_enabled: self.is_enabled = False if self.USE_GOOGLE: self.audio_recorder.stopMicrophonesRecording() if self.subWordRecognized is not None: self.subWordRecognized.signal.disconnect(self.idSubWordRecognized) if self.subSpeechDet is not None: self.subSpeechDet.signal.disconnect(self.idSubSpeechDet) print "ASR disabled" else: print "ASR already disabled" else: if not self.is_enabled: self.is_enabled = True self.idSubWordRecognized = self.subWordRecognized.signal.connect(self.onWordRecognized) self.idSubSpeechDet = self.subSpeechDet.signal.connect(self.onSpeechDetected) # TODO move it here!! #self.subscribe( # event=SpeechRecognition.WR_EVENT, # callback=self.word_recognized_callback #) print "ASR enabled" else: print "ASR already enabled" def main(): parser = argparse.ArgumentParser() parser.add_argument("--pip", type=str, default=os.environ['PEPPER_IP'], help="Robot IP address. On robot or Local Naoqi: use '127.0.0.1'.") parser.add_argument("--pport", type=int, default=9559, help="Naoqi port number") parser.add_argument("--vocab", type=str, default="resources/nuance_dictionary.txt", help="The nuance vocabulary") args = parser.parse_args() pip = args.pip pport = args.pport vocab = args.vocab #Starting application try: connection_url = "tcp://" + pip + ":" + str(pport) app = qi.Application(["asr", "--qi-url=" + connection_url ]) except RuntimeError: print ("Can't connect to Naoqi at ip \"" + pip + "\" on port " + str(pport) +".\n" "Please check your script arguments. Run with -h option for help.") sys.exit(1) sr = SpeechRecognition( vocab=vocab, app=app ) sr.start() #let it run app.run() sr.quit() if __name__ == "__main__": main()
← V Is For Mystery II is here…..and Stewie Holmes and Scottish Brute Peter are NOT timed!!! did anyone notice the Logo change when you start the game (after the latest update?) what is JamCity ?? Jam City bought TinyCo a while back. That’s so weird as I’ve never heard any Londoners, apart from tour guides understandably, refer to it as anything but Big Ben, maybe they only correct tourists. Next time I’m down there I’m going test this out for fun. You say tomato, I say potato. It is a cartoon game, not a documentary, bro. So far I seem to be doing better at achieving Scottish Brute Peter rather than Stewie Holmes. Although, we need Holmes to earn the Brute. Unfortunately, Stewie Holmes has only 3 days to earn 20 clams and Scottish Brute Peter has 8 days to earn 50 clams! That’s interesting. Requirements for Scottish Peter much more stringent. Slowly grinding for Stewie, as I really like Stewie. Should be possible to get its bonus clams. What makes it even better is that it is a new character not just a costume.
# -*- coding: utf-8 -*- import sys, os sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'src')) sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'src/elements')) from curses import wrapper # Use my own wrapper from wpm import Wpm from toggle_table import ToggleTable # Configuration toggletable_width = 30 toggletable_height = 10 toggletable_x0 = 1 toggletable_y0 = 2 # Variables wpm = None background = None def initialize(): global wpm global background wpm = Wpm(True) wpm.logger.info("Starting %s" % os.path.basename(__file__)) background = wpm.get_screen() # Get main window to print return None def draw_toggletable(): background.print_message("This is a toggle table:") toggletable = ToggleTable(toggletable_width, toggletable_height, toggletable_x0, toggletable_y0) toggletable.set(0, 0) toggletable.set(2, 3) toggletable.set(2, 4) toggletable.draw() #toggletable.print_border_type() background.waitforkey() return None def draw_random_toggletable(): background.clear() background.print_message("This is a toggle table generated randomly") toggletable = ToggleTable(toggletable_width, toggletable_height, toggletable_x0, toggletable_y0) toggletable.generate_random_table() toggletable.draw() background.waitforkey() return None def run_toggletable_widget(): return None def main(stdscr): initialize() draw_toggletable() draw_random_toggletable() run_toggletable_widget() return None if __name__ == "__main__": wrapper(main) print("Thanks for use %s" % os.path.basename(__file__))
We are here to help you organize and re-prioritize!! We have everything you need to help make your house a home. Get great tips & tricks on how to organize the areas of your house and how "upcylcle" some of those unused items. What are you doing to prepare?? We here at Clutter Me Not are excited to dive into the Spring Cleaning! But, before we do that, we are PLANTING!! We are grabbing those gardening gloves, organic potting soil and newspapers and getting busy. Newspapers? What for? Well, I'm glad you asked. If you are like me, saving time is something I'm always looking for. Especially when the days are getting nicer and I just want to be relaxing outside with a cold drink. So, to prepare myself for that time, I decided I would save a step in our process of growing fresh and organic food for my family. Take a look below! These were super easy to make and cost me $0 and not much time. On Friday mornings we receive the "throw paper" (the local freebie one) which I used for this project. Where I will be saving time is in the transplanting process. The newspaper pots can be planted directly in the ground!! How awesome is that? To keep in line with the organic growing process, my friend told me a great secret. Right before you are ready to plant your seeds, boil some eggs. Use the same water you just boiled the eggs in (after it cools!!) to water the seeds. After boiling your eggs, the water has collected some of the nutrients from the shells and it happens to be a great fertilizer! I don't think you can get much more organic than that! Next up, Garlic!!! I'm using just a regular bulb of organic garlic I bought at the grocery store. I had it in my pantry long enough that it already started to grow. Oops! Time to plant it! Tune in tomorrow to see the delicious pots we are going to use to grow them inside!
from itertools import chain from django import forms from django.forms.util import flatatt from django.utils.encoding import force_text from django.utils.html import format_html from django.utils.safestring import mark_safe from drawquest.apps.palettes.models import Color, ColorPack # Widget rendering code forked from django.forms.widgets class ColorCheckboxInput(forms.CheckboxInput): def render(self, name, value, attrs=None): final_attrs = self.build_attrs(attrs, type='checkbox', name=name) if self.check_test(value): final_attrs['checked'] = 'checked' if not (value is True or value is False or value is None or value == ''): # Only add the 'value' attribute if a value is non-empty. final_attrs['value'] = force_text(value) return format_html('<input{0} />', flatatt(final_attrs)) class ColorsSelect(forms.CheckboxSelectMultiple): def render(self, name, value, attrs=None, choices=()): if value is None: value = [] has_id = attrs and 'id' in attrs final_attrs = self.build_attrs(attrs, name=name) output = ['<ul>'] # Normalize to strings str_values = set([force_text(v) for v in value]) for i, (option_value, option_label) in enumerate(chain(self.choices, choices)): # If an ID attribute was given, add a numeric index as a suffix, # so that the checkboxes don't all have the same ID attribute. if has_id: final_attrs = dict(final_attrs, id='%s_%s' % (attrs['id'], i)) label_for = format_html(' for="{0}"', final_attrs['id']) else: label_for = '' cb = ColorCheckboxInput(final_attrs, check_test=lambda value: value in str_values) option_value = force_text(option_value) rendered_cb = cb.render(name, option_value) option_label = force_text(option_label) output.append(format_html('<li><label{0}>{1} {2}</label></li>', label_for, rendered_cb, option_label)) output.append('</ul>') return mark_safe('\n'.join(output)) def label_from_color_instance(obj): return mark_safe('<div class="color_option_swatch" style="background-color:rgb({red}, {green}, {blue})" title="{label}"><div class="color_option_swatch_inner"></div></div>'.format(red=obj.red, green=obj.green, blue=obj.blue, label=obj.label)) class ColorPackForm(forms.ModelForm): def __init__(self, *args, **kwargs): super(ColorPackForm, self).__init__(*args, **kwargs) self.fields['colors'].label_from_instance = label_from_color_instance self.fields['colors'].queryset = Color.includable_in_color_pack() class Meta(object): model = ColorPack exclude = ['id', 'owners', 'legacy_palette_name'] widgets = { 'ordinal': forms.TextInput(attrs={'class': 'ordinal'}), 'colors': ColorsSelect(), } class ColorForm(forms.ModelForm): def __init__(self, *args, **kwargs): super(ColorForm, self).__init__(*args, **kwargs) class Meta(object): model = Color exclude = ['id', 'owners'] widgets = { 'ordinal': forms.TextInput(attrs={'class': 'ordinal'}), }
After EC ban on Mayawati, Akhilesh wonders if it will stop Modi from invoking the Army. The Samajwadi Party chief questioned the poll panel’s integrity after it banned Mayawati from campaigning for 48 hours for violating the Model Code of Conduct. Samajwadi Party chief Akhilesh Yadav on Monday questioned the Election Commission’s integrity after the poll body banned Bahujan Samaj Party chief Mayawati from campaigning for 48 hours for violating the Model Code of Conduct. The Samajwadi Party and Bahujan Samaj Party have formed an alliance in Uttar Pradesh for the Lok Sabha elections. Yadav asked the Election Commission if it could stop Prime Minister Narendra Modi from asking for votes in the name of the Army. “EC directive against Mayawati ji begs the question: do they have integrity to stop PM from asking for votes in name of the army?” Yadav tweeted. The Samajwadi Party chief quoted Modi saying: “I want to ask first time voters if they could cast their first ever vote for the soldiers who lost their lives in Pulwama [attack].” Modi made the remark during a rally in Maharashtra’s Latur city on April 9. On Monday, the poll panel said Mayawati has been penalised for her speech earlier this month urging the Muslim community to vote for the alliance in the state and to not divide votes by choosing the Congress. The Election Commission also banned Uttar Pradesh Chief Minister Adityanath from campaigning for 72 hours. The poll body cited Adityanath’s speech where he compared the upcoming elections to a contest between Ali, a revered figure in Islam, and Bajrang Bali, the Hindu god Hanuman, to impose the penalty. The Bharatiya Janata Party leader had also suggested that the party will win the elections as it has faith in Bajrang Bali. The authority had issued show cause notices to Adityanath and Mayawati for their controversial comments on Thursday. The 2019 General Elections are being held in seven phases from April 11 to May 19, and votes will be counted on May 23.
# -*- coding: utf-8 -*- # Copyright 2012 Christoph Reiter # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. from gi.repository import Gst, Gtk, GObject from quodlibet import _ from quodlibet.plugins import PluginImportException from quodlibet.plugins.gstelement import GStreamerPlugin from quodlibet import qltk from quodlibet import config from quodlibet.qltk import Icons _PLUGIN_ID = "karaoke" _SETTINGS = { "band": [_("Filter _band:"), _("The Frequency band of the filter"), 220.0], "width": [_("Filter _width:"), _("The Frequency width of the filter"), 100.0], "level": [_("_Level:"), _("Level of the effect"), 1.0], } def get_cfg(option): cfg_option = "%s_%s" % (_PLUGIN_ID, option) default = _SETTINGS[option][2] return config.getfloat("plugins", cfg_option, default) def set_cfg(option, value): cfg_option = "%s_%s" % (_PLUGIN_ID, option) if get_cfg(option) != value: config.set("plugins", cfg_option, value) class Preferences(Gtk.VBox): __gsignals__ = { 'changed': (GObject.SignalFlags.RUN_LAST, None, tuple()), } def __init__(self): super(Preferences, self).__init__(spacing=12) table = Gtk.Table(n_rows=3, n_columns=2) table.set_col_spacings(6) table.set_row_spacings(6) labels = {} for idx, key in enumerate(["level", "band", "width"]): label = Gtk.Label(label=_SETTINGS[key][0]) labels[key] = label label.set_alignment(0.0, 0.5) label.set_padding(0, 6) label.set_tooltip_text(_SETTINGS[key][1]) label.set_use_underline(True) table.attach(label, 0, 1, idx, idx + 1, xoptions=Gtk.AttachOptions.FILL | Gtk.AttachOptions.SHRINK) def scale_changed(scale, option): value = scale.get_value() set_cfg(option, value) self.emit("changed") max_values = [1.0, 441, 100] steps = [0.01, 10, 10] pages = [0.1, 50, 25] scales = {} for idx, key in enumerate(["level", "band", "width"]): max_value = max_values[idx] step = steps[idx] page = pages[idx] scale = Gtk.HScale( adjustment=Gtk.Adjustment.new(0, 0, max_value, step, page, 0)) scales[key] = scale if step < 0.1: scale.set_digits(2) scale.add_mark(_SETTINGS[key][2], Gtk.PositionType.BOTTOM, None) labels[key].set_mnemonic_widget(scale) scale.set_value_pos(Gtk.PositionType.RIGHT) table.attach(scale, 1, 2, idx, idx + 1) scale.connect('value-changed', scale_changed, key) scale.set_value(get_cfg(key)) def format_perc(scale, value): return _("%d %%") % (value * 100) scales["level"].connect('format-value', format_perc) def format_hertz(scale, value): return _("%d Hz") % value scales["band"].connect('format-value', format_hertz) scales["width"].connect('format-value', format_hertz) self.pack_start(qltk.Frame(_("Preferences"), child=table), True, True, 0) class Karaoke(GStreamerPlugin): PLUGIN_ID = _PLUGIN_ID PLUGIN_NAME = _("Karaoke") PLUGIN_DESC = _("Removes main vocals from audio.") PLUGIN_ICON = Icons.AUDIO_INPUT_MICROPHONE @classmethod def setup_element(cls): return Gst.ElementFactory.make('audiokaraoke', cls.PLUGIN_ID) @classmethod def update_element(cls, element): element.set_property("level", get_cfg("level")) element.set_property("filter-band", get_cfg("band")) element.set_property("filter-width", get_cfg("width")) @classmethod def PluginPreferences(cls, window): prefs = Preferences() prefs.connect("changed", lambda *x: cls.queue_update()) return prefs if not Karaoke.setup_element(): raise PluginImportException( "GStreamer element 'audiokaraoke' missing (gst-plugins-good)")
It’s in a mother’s nature to want people to like, enjoy and accept her children. I’ve never kissed my kids goodbye in the morning and sweetly said, “Bye honey, I hope you get rejected today, love ya!” Maybe I should. Even as adults we seek acceptance, we want to fit in and be liked by our peers but we have to be cautious that those offering acceptance are also those who will lead us closer to Christ. What would our earthly life be like if we lived each day realizing the acceptance of our Heavenly Father is the only nod of approval necessary? Jesus’ message in this parable is crystal clear, live a life acceptable to God and you will bear great fruit; get tangled up in approval of man and you will most certainly lose your way. Living as a light of Christ is not for sissies and it may cause us to be rejected by those who aren’t as committed to holiness but in that rejection, we are in good company and at that very moment we may be completely open to the work of Christ and become the cornerstone of something he’s building around us. In quiet prayer ask God to reveal to your heart all those people you are seeking approval from. What makes their approval important and will it bring you closer to Christ and allow you to bear great fruit?
import collections class Solution: def checkInclusion(self, s1: str, s2: str) -> bool: if len(s2) < len(s1): return False table = collections.Counter(s1) count = len(table) start = 0 for i in range(len(s2)): table[s2[i]] -= 1 if table[s2[i]] == 0: count -= 1 if count == 0 and i - start + 1 == len(s1): return True while count == 0: table[s2[start]] += 1 if table[s2[start]] == 1: count += 1 elif i - start == len(s1): return True start += 1 return False class Solution2: def checkInclusion(self, s1: str, s2: str) -> bool: if len(s2) < len(s1): return False table = collections.Counter(s1) count = len(s1) start = 0 for i in range(len(s2)): table[s2[i]] -= 1 if table[s2[i]] >= 0: count -= 1 if count == 0: return True if i - start + 1 == len(s1): table[s2[start]] += 1 if table[s2[start]] > 0: count += 1 start += 1 return False
Adipose-derived stem cells (ASCs) represent a promising cell source in the field of tissue engineering and regenerative medicine. Due to the wide availability and multipotent ability of ASCs to differentiate into tissues such as bone, cartilage, muscle, and adipose, ASCs may serve a wide variety of regenerative medicine applications. Accordingly, ASCs have been utilized in studies addressing osteoarthritis, diabetes mellitus, heart disease, and soft tissue regeneration and reconstruction after mastectomy and facial trauma. Traditional, static, two-dimensional cell culture of ASCs do not allow for mature adipocyte differentiation or long-term maintenance of adipocytes in vitro. In order to study metabolic diseases, such as type II diabetes mellitus, a three-dimensional scaffold for in vitro adipocyte maintenance is necessary. In collaboration with the Bioreactor Laboratory at the McGowan Institute for Regenerative Medicine, our laboratory has developed the use of a hollow fiber-based bioreactor for three-dimensional, dynamic perfusion of ASCs and adipose tissue formation ex vivo, creating a stable system in which long-term culture of adipocytes is possible, providing a model useful for potential drug discovery and tissue engineering applications, specifically those addressing type II diabetes mellitus. The studies presented in this dissertation aim to assess metabolic activity and differentiation of ASCs from patients with or without type II diabetes in the bioreactor system; engineer a long-term culture environment relevant to physiological type II diabetic and non-diabetic conditions ex vivo; optimize tissue growth homogeneity; enhance adipogenesis within the bioreactor culture with the use of a decellularized adipose extracellular matrix (ECM) hydrogel. ASCs derived from patients with type II diabetes at time of isolation were found to behave metabolically similar and appear architecturally comparable to those derived from patients without type II diabetes mellitus when differentiated and maintained as adipocytes in the bioreactor system. When cultured at a physiologically relevant glucose level matching that of healthy patients or patients with type II diabetes, ASCs were able to proliferate, differentiate into adipocytes, and be maintained within the bioreactor system for at least one week. A decellularized adipose ECM hydrogel was established and applied to the bioreactor cultures; however, due to technical challenges, no firm conclusions can be made. The microenvironment by which ASCs are surrounded is critical for cell differentiation and growth. Engineering and control of such microenvironment is possible within the hollow fiber-based, three-dimensional, dynamic perfusion bioreactor culture system, proving to be a promising model for potential drug discovery and therapeutics. Future directions include further evaluation of ASC differentiation and adipocyte metabolism within type II diabetic environments, application of established decellularized adipose ECM hydrogels to wound healing treatments and adipose graft volume retention.
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from __future__ import print_function import os import platform import re from datetime import datetime from glob import glob import llnl.util.tty as tty from llnl.util.filesystem import working_dir import spack.architecture as architecture import spack.paths from spack.main import get_version from spack.util.executable import which description = "debugging commands for troubleshooting Spack" section = "developer" level = "long" def setup_parser(subparser): sp = subparser.add_subparsers(metavar='SUBCOMMAND', dest='debug_command') sp.add_parser('create-db-tarball', help="create a tarball of Spack's installation metadata") sp.add_parser('report', help='print information useful for bug reports') def _debug_tarball_suffix(): now = datetime.now() suffix = now.strftime('%Y-%m-%d-%H%M%S') git = which('git') if not git: return 'nobranch-nogit-%s' % suffix with working_dir(spack.paths.prefix): if not os.path.isdir('.git'): return 'nobranch.nogit.%s' % suffix # Get symbolic branch name and strip any special chars (mainly '/') symbolic = git( 'rev-parse', '--abbrev-ref', '--short', 'HEAD', output=str).strip() symbolic = re.sub(r'[^\w.-]', '-', symbolic) # Get the commit hash too. commit = git( 'rev-parse', '--short', 'HEAD', output=str).strip() if symbolic == commit: return "nobranch.%s.%s" % (commit, suffix) else: return "%s.%s.%s" % (symbolic, commit, suffix) def create_db_tarball(args): tar = which('tar') tarball_name = "spack-db.%s.tar.gz" % _debug_tarball_suffix() tarball_path = os.path.abspath(tarball_name) base = os.path.basename(str(spack.store.root)) transform_args = [] if 'GNU' in tar('--version', output=str): transform_args = ['--transform', 's/^%s/%s/' % (base, tarball_name)] else: transform_args = ['-s', '/^%s/%s/' % (base, tarball_name)] wd = os.path.dirname(str(spack.store.root)) with working_dir(wd): files = [spack.store.db._index_path] files += glob('%s/*/*/*/.spack/spec.yaml' % base) files = [os.path.relpath(f) for f in files] args = ['-czf', tarball_path] args += transform_args args += files tar(*args) tty.msg('Created %s' % tarball_name) def report(args): print('* **Spack:**', get_version()) print('* **Python:**', platform.python_version()) print('* **Platform:**', architecture.Arch( architecture.platform(), 'frontend', 'frontend')) def debug(parser, args): action = { 'create-db-tarball': create_db_tarball, 'report': report, } action[args.debug_command](args)
I am making a mod that adds a Bauble that enables a Keybinding to fire a rocket in use with an Elytra to avoid having Fireworks in the hot bar. The item works as expected, but the rocket never stops propelling you forwards. My best guess going on Mojang's code is that it automatically applies a "LifeTime" to the Entity separate from the spawning code. And due to my code not adding it doesn't kill it. I tried to add NBT to the Rocket, but to no avail. I could really need some help with this. You can't do any of this. All entities must be spawned on a server, not the client, and inventory manipulations must also be done on the server, not the client. You need to send a packet to the server and do all this on the server. How would I go about making a packet to send an ItemStack and EntityPlayer to the server? And listen to them. I just read through the Forge Doc about networking and SimpleImpl. Or can I only send Int? How would I go about making a packet to send an ItemStack and EntityPlayer to the server? You don't need to send those to the server, it makes no sense. You need to send just enough information so the server can validate it, find the player, find the itemstack in their inventory and do whatever it is supposed to.
# coding:utf-8 __author__ = 'cupen@foxmail.com' import sys from django.utils import six from django.utils.encoding import smart_str, force_str PY2 = sys.version_info[0] == 2 PY3 = sys.version_info[0] == 3 if PY3: smart_unicode = smart_str force_unicode = force_str if PY2: from django.utils.encoding import smart_unicode, force_unicode def filte_dict(_dict, callback): """ >>> from django.conf import settings >>> settings.configure() >>> d = {"a":1, "b":2, "c":3} >>> filte_dict(d, lambda k,v: k != "b") :param _dict: :param callback: :return: """ if not isinstance(_dict, dict): raise TypeError("Invalid dict:%s"%(_dict)) rs = {} for k, v in _dict.items(): if callback(k, v): rs[k] = v return rs _buindin_filter = filter def oh_my_filter(callback, iterable): if PY2: return _buindin_filter(callback, iterable) return type(iterable)(_buindin_filter(callback, iterable)) filter = oh_my_filter # used as py3 urllib. # @see https://pythonhosted.org/six/#module-six.moves urllib = six.moves.urllib def urlopen(*args, **kwargs): return urllib.request.urlopen(*args, **kwargs) def http_get(url, encoding="utf-8"): """ >>> None != http_get("https://www.google.com") True """ return urlopen(url).read().decode(encoding)
FIFA President Gianni Infantino gestures from inside a television studio where he was giving an interview at the annual meeting of the World Economic Forum in Davos, Switzerland. Microsoft's CEO Satya Nadella addresses the audience of a session at the annual meeting of the the World Economic Forum in Davos, Switzerland. Jack Ma, CEO of Alibaba group, gestures during a session at the annual meeting of the World Economic Forum in Davos, Switzerland. Shinzo Abe, Prime Minister of Japan, speaks during a plenary session in the Congress Hall at the 49th annual meeting of the World Economic Forum, WEF, in Davos, Switzerland. Bill Gates, chairman of the Bill & Melinda Gates Foundation, gestures during a session at the annual meeting of the World Economic Forum in Davos, Switzerland. Al Gore, former vice-President of the United States, speaks during the "Safeguarding the planet" session at the annual meeting of the World Economic Forum in Davos, Switzerland.
#!/usr/bin/env python # -*- coding: utf-8 -*- """ reads position from thumb-joystick needs spidev installed http://www.raspberrypi-spy.co.uk/2014/08/enabling-the-spi-interface-on-the-raspberry-pi/ """ import RPi.GPIO as GPIO import time import spidev xPin = 0 # joystick x connected to A0 yPin = 1 # joystick y connected to A1 swPin = 27 # sw connected to D27 GPIO.setwarnings(False) GPIO.setmode(GPIO.BCM) GPIO.setup(swPin, GPIO.IN, pull_up_down=GPIO.PUD_UP) tolerancevalue = 10 xZero = 512 yZero = 512 spi = spidev.SpiDev() spi.open(0,0) def readadc(adcnum): # read SPI data from MCP3004 chip, 4 possible adc’s (0 thru 3) if ((adcnum > 3) or (adcnum < 0)): return -1 r = spi.xfer2([1,8 + adcnum << 4, 0]) print(r) adcout = ((r[1] &3) << 8) + r[2] return adcout def position(adcnum, zerovalue): return readadc(adcnum) - zerovalue def eventSWButton(e): print("SW from joystick pressed") print(e) # using the callback is optional GPIO.add_event_detect(swPin, GPIO.FALLING, bouncetime = 200, callback = eventSWButton) while True: xPos = position(xPin, xZero) yPos = position(yPin, yZero) # in case you don't want to use the callback #if (GPIO.input(swPin) == 0): # print("Button pressed!") if (abs(xPos) < tolerancevalue): print("Not moving in X.") elif (xPos > 0): print("Moving ahead.") print("X intensity: %5d" % abs(xPos)) else: print("Moving backwards.") print("X intensity: %5d" % abs(xPos)) if (abs(yPos) < tolerancevalue): print("Not moving in Y.") elif (yPos > 0): print("Moving left.") print("Y intensity: %5d" % abs(yPos)) else: print("Moving right.") print("Y intensity: %5d" % abs(yPos)) print("") time.sleep(0.5) print('done.')
The Leader must recognize that you communicate standards by your example an by what behaviors you ignore, reward, and punish. Effective communication implies that your soldiers listen and understand you, the leader. Soldiers want to be led by leaders who provide strength, inspiration, and guidance and will help them become winners. Whether or not they are willing to trust their lives to a leader depends on their assessment of that leader’s courage, competence, and commitment. Interpret the situation. What is the ethical dilemma? Officers must give NCOs the guidance, resources, assistance, and supervision necessary to do their duties. NCO support channel parallels and reeinforces it. Commands, establishes policy and manages the Army. Focuses on collective training leading to mission accomplishment. Is primarily involved with units and unit operations. Concentrates on unit effectiveness and readiness. Concentrates on the standards of performance, training and professional development of officers and NCOs. Conduct the daily business of the Army within established policy. Focuses on individual training that leads to mission capability. Is primarily involved with individual soldiers and team leading. Ensures subordinate NCOs and soldiers, with their personal equipment, are prepared to operate as effective unit members. Concentrates on the standards of performance, training and professional development of subordinate NCOs and soldiers. Legitimate power of leaders to direct subordinates or to take action wihtin the scope of their responsibility. Leaders have command authority when they fill positions requiring the direction and control of other members of the Army. It should be a positive, useful experience that does not confuse, intimidate, or negatively impact on leaders. I (state your name), do solemnly swear (or affirm) that I will support and defend the Constitution of the United States against all enemies, foreign and domestic; that I will bear true faith and allegiance to the same; and that I will obey the orders of the President of the United States and the orders of the officers appointed over me, according to regulations and the Uniform Code of Military Justice. So help me God.
# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import base64 import enum import os import platform import shlex from . import bitrate_configuration from . import configuration from typing import List # A randomly-chosen content ID in hex. RANDOM_CONTENT_ID = base64.b16encode(os.urandom(16)).decode('UTF-8') # The Widevine UAT server URL. UAT_SERVER = 'https://license.uat.widevine.com/cenc/getcontentkey/widevine_test' # Credentials for the Widevine test account. WIDEVINE_TEST_ACCOUNT = 'widevine_test' WIDEVINE_TEST_SIGNING_KEY = '1ae8ccd0e7985cc0b6203a55855a1034afc252980e970ca90e5202689f947ab9' WIDEVINE_TEST_SIGNING_IV = 'd58ce954203b7c9a9a9d467f59839249' # The default hardware acceleration API to use, per platform. if platform.system() == 'Linux': DEFAULT_HWACCEL_API = 'vaapi' elif platform.system() == 'Darwin': # AKA macOS DEFAULT_HWACCEL_API = 'videotoolbox' else: DEFAULT_HWACCEL_API = '' class StreamingMode(enum.Enum): LIVE = 'live' """Indicates a live stream, which has no end.""" VOD = 'vod' """Indicates a video-on-demand (VOD) stream, which is finite.""" class ManifestFormat(enum.Enum): DASH = 'dash' HLS = 'hls' class ProtectionScheme(enum.Enum): CENC = 'cenc' """AES-128-CTR mode.""" CBCS = 'cbcs' """AES-128-CBC mode with pattern encryption.""" class ProtectionSystem(enum.Enum): WIDEVINE = 'Widevine' FAIRPLAY = 'FairPlay' PLAYREADY = 'PlayReady' MARLIN = 'Marlin' COMMON = 'CommonSystem' class EncryptionMode(enum.Enum): WIDEVINE = 'widevine' """Widevine key server mode""" RAW = 'raw' """Raw key mode""" class RawKeyConfig(configuration.Base): """An object representing a list of keys for Raw key encryption""" label = configuration.Field(str).cast() """An arbitary string or a predefined DRM label like AUDIO, SD, HD, etc. If not specified, indicates the default key and key_id.""" key_id = configuration.Field(configuration.HexString, required=True).cast() """A key identifier as a 32-digit hex string""" key = configuration.Field(configuration.HexString, required=True).cast() """The encryption key to use as a 32-digit hex string""" class EncryptionConfig(configuration.Base): """An object representing the encryption config for Shaka Streamer.""" enable = configuration.Field(bool, default=False).cast() """If true, encryption is enabled. Otherwise, all other encryption settings are ignored. """ encryption_mode = configuration.Field( EncryptionMode, default=EncryptionMode.WIDEVINE).cast() """Encryption mode to use. By default it is widevine but can be changed to raw.""" protection_systems = configuration.Field(List[ProtectionSystem]).cast() """Protection Systems to be generated. Supported protection systems include Widevine, PlayReady, FairPlay, Marin and CommonSystem. """ pssh = configuration.Field(configuration.HexString).cast() """One or more concatenated PSSH boxes in hex string format. If this and `protection_systems` is not specified, a v1 common PSSH box will be generated. Applies to 'raw' encryption_mode only. """ iv = configuration.Field(configuration.HexString).cast() """IV in hex string format. If not specified, a random IV will be generated. Applies to 'raw' encryption_mode only. """ keys = configuration.Field(List[RawKeyConfig]).cast() """A list of encryption keys to use. Applies to 'raw' encryption_mode only.""" content_id = configuration.Field( configuration.HexString, default=RANDOM_CONTENT_ID).cast() """The content ID, in hex. If omitted, a random content ID will be chosen for you. Applies to 'widevine' encryption_mode only. """ key_server_url = configuration.Field(str, default=UAT_SERVER).cast() """The URL of your key server. This is used to generate an encryption key. By default, it is Widevine's UAT server. Applies to 'widevine' encryption_mode only. """ signer = configuration.Field(str, default=WIDEVINE_TEST_ACCOUNT).cast() """The name of the signer when authenticating to the key server. Applies to 'widevine' encryption_mode only. Defaults to the Widevine test account. """ signing_key = configuration.Field( configuration.HexString, default=WIDEVINE_TEST_SIGNING_KEY).cast() """The signing key, in hex, when authenticating to the key server. Applies to 'widevine' encryption_mode only. Defaults to the Widevine test account's key. """ signing_iv = configuration.Field( configuration.HexString, default=WIDEVINE_TEST_SIGNING_IV).cast() """The signing IV, in hex, when authenticating to the key server. Applies to 'widevine' encryption_mode only. Defaults to the Widevine test account's IV. """ protection_scheme = configuration.Field(ProtectionScheme, default=ProtectionScheme.CENC).cast() """The protection scheme (cenc or cbcs) to use when encrypting.""" clear_lead = configuration.Field(int, default=10).cast() """The seconds of unencrypted media at the beginning of the stream.""" def __init__(self, *args) -> None: super().__init__(*args) # Don't do any further checks if encryption is disabled if not self.enable: return if self.encryption_mode == EncryptionMode.WIDEVINE: field_names = ['keys', 'pssh', 'iv'] for field_name in field_names: if getattr(self, field_name): field = getattr(self.__class__, field_name) reason = 'cannot be set when encryption_mode is "%s"' % \ self.encryption_mode raise configuration.MalformedField( self.__class__, field_name, field, reason) elif self.encryption_mode == EncryptionMode.RAW: # Check at least one key has been specified if not self.keys: field = self.__class__.keys reason = 'at least one key must be specified' raise configuration.MalformedField( self.__class__, 'keys', field, reason) class PipelineConfig(configuration.Base): """An object representing the entire pipeline config for Shaka Streamer.""" streaming_mode = configuration.Field(StreamingMode, required=True).cast() """The streaming mode, which can be either 'vod' or 'live'.""" quiet = configuration.Field(bool, default=False).cast() """If true, reduce the level of output. Only errors will be shown in quiet mode. """ debug_logs = configuration.Field(bool, default=False).cast() """If true, output simple log files from each node. No control is given over log filenames. Logs are written to the current working directory. We do not yet support log rotation. This is meant only for debugging. """ hwaccel_api = configuration.Field(str, default=DEFAULT_HWACCEL_API).cast() """The FFmpeg hardware acceleration API to use with hardware codecs. A per-platform default will be chosen if this field is omitted. See documentation here: https://trac.ffmpeg.org/wiki/HWAccelIntro """ resolutions = configuration.Field( List[bitrate_configuration.VideoResolutionName], required=True).cast() """A list of resolution names to encode. Any resolution greater than the input resolution will be ignored, to avoid upscaling the content. This also allows you to reuse a pipeline config for multiple inputs. """ # TODO(joeyparrish): Default to whatever is in the input. channels = configuration.Field(int, default=2).cast() """The number of audio channels to encode.""" audio_codecs = configuration.Field( List[bitrate_configuration.AudioCodec], default=[bitrate_configuration.AudioCodec.AAC]).cast() """The audio codecs to encode with.""" video_codecs = configuration.Field( List[bitrate_configuration.VideoCodec], default=[bitrate_configuration.VideoCodec.H264]).cast() """The video codecs to encode with. Note that the prefix "hw:" indicates that a hardware encoder should be used. """ manifest_format = configuration.Field(List[ManifestFormat], default=[ ManifestFormat.DASH, ManifestFormat.HLS, ]).cast() """A list of manifest formats (dash or hls) to create. By default, this will create both. """ dash_output = configuration.Field(str, default='dash.mpd').cast() """Output filename for the DASH manifest, if created.""" hls_output = configuration.Field(str, default='hls.m3u8').cast() """Output filename for the HLS master playlist, if created.""" segment_folder = configuration.Field(str, default='').cast() """Sub-folder for segment output (or blank for none).""" segment_size = configuration.Field(float, default=4).cast() """The length of each segment in seconds.""" segment_per_file = configuration.Field(bool, default=True).cast() """If true, force each segment to be in a separate file. Must be true for live content. """ availability_window = configuration.Field(int, default=300).cast() """The number of seconds a segment remains available.""" presentation_delay = configuration.Field(int, default=30).cast() """How far back from the live edge the player should be, in seconds.""" update_period = configuration.Field(int, default=8).cast() """How often the player should fetch a new manifest, in seconds.""" encryption = configuration.Field(EncryptionConfig, default=EncryptionConfig({})).cast() """Encryption settings.""" def __init__(self, *args) -> None: super().__init__(*args) if self.streaming_mode == StreamingMode.LIVE and not self.segment_per_file: field = self.__class__.segment_per_file reason = 'must be true when streaming_mode is "live"' raise configuration.MalformedField( self.__class__, 'segment_per_file', field, reason) def get_resolutions(self) -> List[bitrate_configuration.VideoResolution]: VideoResolution = bitrate_configuration.VideoResolution # alias return [VideoResolution.get_value(name) for name in self.resolutions]
Ark Encounter, the proposed creationist theme park in northern Kentucky, continues to attract comment. According to the Louisville Courier-Journal (December 1, 2010), "Ark Encounter, which will feature a 500-foot-long wooden replica of Noah’s Ark containing live animals such as juvenile giraffes, is projected to cost $150 million and create 900 jobs ... The park, to be located on 800 acres in Grant County off Interstate 75, also will include a Walled City, live animal shows, a replica of the Tower of Babel, a 500-seat special-effects theater, an aviary and a first-century Middle Eastern village." Collaborating on the project are Ark Encounters LLC and the young-earth creationist ministry Answers in Genesis, which already operates a Creation "Museum" in northern Kentucky. Whether the project will be able to benefit from the state tourism development incentives for which its organizers have applied is still disputed. Erwin Chemerinsky of the University of California, Irvine, School of Law told The New York Times (December 5, 2010), "If this is about bringing the Bible to life, and it’s the Bible’s account of history that they’re presenting, then the government is paying for the advancement of religion." Bill Sharp of the American Civil Liberties Union of Kentucky, however, was not so dismissive, telling USA Today (December 5, 2010), "Courts have found that giving such tax exemptions on a nondiscriminatory basis does not violate the establishment clause, even when the tax exemption goes to a religious purpose." A different potential constitutional barrier was identified by Joseph Gerth, who argued in his column for the Louisville Courier-Journal (December 6, 2010), "If there is a constitutional problem with the incentives, the problem may be more with the Kentucky Constitution, which says no one should be 'compelled to attend any place of worship, to contribute to the erection or maintenance of any such place, or to the salary or support of any minister of religion.'" As the Courier-Journal (December 1, 2010) previously noted, there are also legal concerns about whether Ark Encounter could discriminate on the basis of religion in hiring; Answers in Genesis already requires its employees to endorse its statement of faith. Broader concerns about the state's entanglement with the project persist, too. Writing in the Louisville Courier-Journal (December 5, 2010), Pam Platt regretted "the inevitable jokes." But after reviewing various challenges and obstacles to the integrity of education in the United States, she concluded, "So let us not consider Kentucky, and its real and perceived backwardness, apart and separate from our 49 fellow states and from the whole of the country. Yes, the proposed creationism park reinforces unfortunate stereotypes about Kentucky and Kentuckians, some of them true, but the points I assembled about the United States ought to be provoking a lot of questions about who Americans are and where, exactly, we're heading."
# -*- coding: utf-8 -*- """All color options in Pyslvs.""" __author__ = "Yuan Chang" __copyright__ = "Copyright (C) 2016-2019" __license__ = "AGPL" __email__ = "pyslvs@gmail.com" from typing import ( Tuple, List, Sequence, Set, Dict, Iterator, Any, Union, Optional, ClassVar, ) from abc import abstractmethod from dataclasses import dataclass from enum import auto, unique, IntEnum from math import radians, sin, cos, atan2, hypot, isnan from functools import reduce from qtpy.QtCore import Slot, Qt, QPointF, QRectF, QSizeF from qtpy.QtWidgets import QWidget, QSizePolicy from qtpy.QtGui import ( QPolygonF, QPainter, QBrush, QPen, QColor, QFont, QPainterPath, QImage, QPaintEvent, ) from pyslvs import VPoint, Graph, edges_view, parse_pos from pyslvs_ui.qt_patch import QABCMeta from .color import color_num, color_qt, target_path_style LINK_COLOR = QColor(226, 219, 190) _Coord = Tuple[float, float] def convex_hull( points: List[_Coord], *, as_qpoint: bool = False ) -> Union[List[_Coord], List[QPointF]]: """Returns points on convex hull in counterclockwise order according to Graham's scan algorithm. """ def cmp(a: float, b: float) -> int: return (a > b) - (a < b) def turn(p: _Coord, q: _Coord, r: _Coord) -> int: px, py = p qx, qy = q rx, ry = r return cmp((qx - px) * (ry - py) - (rx - px) * (qy - py), 0) def keep_left(hull: List[_Coord], r: _Coord) -> List[_Coord]: while len(hull) > 1 and turn(hull[-2], hull[-1], r) != 1: hull.pop() if not hull or hull[-1] != r: hull.append(r) return hull points.sort() lower: List[Tuple[float, float]] = reduce(keep_left, points, []) upper: List[Tuple[float, float]] = reduce(keep_left, reversed(points), []) lower.extend(upper[i] for i in range(1, len(upper) - 1)) result = [] for x, y in lower: if as_qpoint: result.append(QPointF(x, y)) else: result.append((x, y)) return result @dataclass(repr=False, eq=False) class _PathOption: """Path option class. Attributes: + Path data (-1: Hide, 0: Preview path data) + Show mode parameter. + The path will be the curve, otherwise using the points. """ path: Sequence[Sequence[_Coord]] = () show: int = -1 curve: bool = True @unique class _TickMark(IntEnum): """The status of tick mark.""" hide = auto() show = auto() show_num = auto() class BaseCanvas(QWidget, metaclass=QABCMeta): """The subclass can draw a blank canvas more easier.""" @abstractmethod def __init__(self, parent: QWidget) -> None: """Set the parameters for drawing.""" super(BaseCanvas, self).__init__(parent) self.setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)) self.setFocusPolicy(Qt.StrongFocus) self.painter = QPainter() # Origin coordinate self.ox = self.width() / 2 self.oy = self.height() / 2 # Canvas zoom rate self.zoom = 1. # Joint size self.joint_size = 5 # Canvas line width self.link_width = 3 self.path_width = 3 # Font size self.font_size = 15 # Show point mark or dimension self.show_ticks = _TickMark.show self.show_point_mark = True self.show_dimension = True # Path track self.path = _PathOption() # Path solving self.ranges: Dict[str, QRectF] = {} self.target_path: Dict[str, Sequence[_Coord]] = {} self.show_target_path = False # Background self.background = QImage() self.background_opacity = 1. self.background_scale = 1. self.background_offset = QPointF(0, 0) # Monochrome mode self.monochrome = False # Grab mode self.__grab_mode = False def switch_grab(self) -> None: """Start grab mode.""" self.__grab_mode = not self.__grab_mode @staticmethod def zoom_factor( width: int, height: int, x_right: float, x_left: float, y_top: float, y_bottom: float ) -> float: """Calculate the zoom factor.""" x_diff = x_left - x_right y_diff = y_top - y_bottom x_diff = x_diff if x_diff else 1. y_diff = y_diff if y_diff else 1. if width / x_diff < height / y_diff: return width / x_diff else: return height / y_diff @abstractmethod def paintEvent(self, event: QPaintEvent) -> None: """Using a QPainter under 'self', so just change QPen or QBrush before painting. """ if not self.__grab_mode: self.painter.begin(self) self.painter.fillRect(event.rect(), QBrush(Qt.white)) # Translation self.painter.translate(self.ox, self.oy) # Background if not self.background.isNull(): rect = self.background.rect() self.painter.setOpacity(self.background_opacity) self.painter.drawImage(QRectF( self.background_offset * self.zoom, QSizeF(rect.width(), rect.height()) * self.background_scale * self.zoom ), self.background, QRectF(rect)) self.painter.setOpacity(1) # Show frame pen = QPen(Qt.blue) pen.setWidth(1) self.painter.setPen(pen) self.painter.setFont(QFont("Arial", self.font_size)) # Draw origin lines if self.show_ticks not in {_TickMark.show, _TickMark.show_num}: return pen.setColor(Qt.gray) self.painter.setPen(pen) x_l = -self.ox x_r = self.width() - self.ox self.painter.drawLine(QPointF(x_l, 0), QPointF(x_r, 0)) y_t = self.height() - self.oy y_b = -self.oy self.painter.drawLine(QPointF(0, y_b), QPointF(0, y_t)) def indexing(v: float) -> int: """Draw tick.""" return int(v / self.zoom - v / self.zoom % 5) # Draw tick for x in range(indexing(x_l), indexing(x_r) + 1, 5): if x == 0: continue is_ten = x % 10 == 0 end = QPointF(x * self.zoom, -10 if is_ten else -5) self.painter.drawLine(QPointF(x, 0) * self.zoom, end) if self.show_ticks == _TickMark.show_num and is_ten: self.painter.drawText(end + QPointF(0, 3), f"{x}") for y in range(indexing(y_b), indexing(y_t) + 1, 5): if y == 0: continue is_ten = y % 10 == 0 end = QPointF(10 if is_ten else 5, y * self.zoom) self.painter.drawLine(QPointF(0, y) * self.zoom, end) if self.show_ticks == _TickMark.show_num and is_ten: self.painter.drawText(end + QPointF(3, 0), f"{-y}") # Please to call the "end" method when ending paint event. def draw_circle(self, p: QPointF, r: float) -> None: """Draw circle.""" self.painter.drawEllipse(p, r, r) def draw_point( self, i: int, cx: float, cy: float, fixed: bool, color: Optional[Tuple[int, int, int]], mul: int = 1 ) -> None: """Draw a joint.""" if self.monochrome or color is None: color = Qt.black else: color = QColor(*color) pen = QPen(color) pen.setWidth(2) self.painter.setPen(pen) x = cx * self.zoom y = cy * -self.zoom if fixed: # Draw a triangle below self.painter.drawPolygon( QPointF(x, y), QPointF(x - self.joint_size, y + 2 * self.joint_size), QPointF(x + self.joint_size, y + 2 * self.joint_size) ) r = self.joint_size for _ in range(1 if mul < 1 else mul): self.draw_circle(QPointF(x, y), r) r += 5 if not self.show_point_mark: return pen.setColor(Qt.darkGray) pen.setWidth(2) self.painter.setPen(pen) text = f"[Point{i}]" if self.show_dimension: text += f":({cx:.02f}, {cy:.02f})" self.painter.drawText(QPointF(x, y) + QPointF(6, -6), text) def draw_slvs_ranges(self) -> None: """Draw solving range.""" pen = QPen() pen.setWidth(5) for i, (tag, rect) in enumerate(self.ranges.items()): range_color = QColor(color_num(i + 1)) range_color.setAlpha(30) self.painter.setBrush(range_color) range_color.setAlpha(255) pen.setColor(range_color) self.painter.setPen(pen) cx = rect.x() * self.zoom cy = rect.y() * -self.zoom if rect.width(): self.painter.drawRect(QRectF( QPointF(cx, cy), QSizeF(rect.width(), rect.height()) * self.zoom )) else: self.draw_circle(QPointF(cx, cy), 3) range_color.setAlpha(255) pen.setColor(range_color) self.painter.setPen(pen) self.painter.drawText(QPointF(cx, cy) + QPointF(6, -6), tag) self.painter.setBrush(Qt.NoBrush) def draw_target_path(self) -> None: """Draw solving path.""" pen = QPen() pen.setWidth(self.path_width) for i, name in enumerate(sorted(self.target_path)): path = self.target_path[name] road, dot, brush = target_path_style(i) pen.setColor(road) self.painter.setPen(pen) self.painter.setBrush(brush) if len(path) == 1: x, y = path[0] p = QPointF(x, -y) * self.zoom self.painter.drawText(p + QPointF(6, -6), name) pen.setColor(dot) self.painter.setPen(pen) self.draw_circle(p, self.joint_size) else: painter_path = QPainterPath() for j, (x, y) in enumerate(path): p = QPointF(x, -y) * self.zoom self.draw_circle(p, self.joint_size) if j == 0: self.painter.drawText(p + QPointF(6, -6), name) painter_path.moveTo(p) else: x2, y2 = path[j - 1] self.__draw_arrow(x, -y, x2, -y2, zoom=True) painter_path.lineTo(p) pen.setColor(road) self.painter.setPen(pen) self.painter.drawPath(painter_path) for x, y in path: pen.setColor(dot) self.painter.setPen(pen) self.draw_circle(QPointF(x, -y) * self.zoom, self.joint_size) self.painter.setBrush(Qt.NoBrush) def __draw_arrow( self, x1: float, y1: float, x2: float, y2: float, *, zoom: bool = False, text: str = '' ) -> None: """Front point -> Back point""" if zoom: x1 *= self.zoom y1 *= self.zoom x2 *= self.zoom y2 *= self.zoom a = atan2(y2 - y1, x2 - x1) x1 = (x1 + x2) / 2 - 7.5 * cos(a) y1 = (y1 + y2) / 2 - 7.5 * sin(a) first_point = QPointF(x1, y1) self.painter.drawLine(first_point, QPointF( x1 + 15 * cos(a + radians(20)), y1 + 15 * sin(a + radians(20)) )) self.painter.drawLine(first_point, QPointF( x1 + 15 * cos(a - radians(20)), y1 + 15 * sin(a - radians(20)) )) if not text: return # Font font = self.painter.font() font_copy = QFont(font) font.setBold(True) font.setPointSize(font.pointSize() + 8) self.painter.setFont(font) # Color pen = self.painter.pen() color = pen.color() pen.setColor(color.darker()) self.painter.setPen(pen) self.painter.drawText(first_point, text) pen.setColor(color) self.painter.setPen(pen) self.painter.setFont(font_copy) def draw_curve(self, path: Sequence[_Coord]) -> None: """Draw path as curve.""" if len(set(path)) < 2: return painter_path = QPainterPath() error = False for i, (x, y) in enumerate(path): if isnan(x): error = True self.painter.drawPath(painter_path) painter_path = QPainterPath() else: p = QPointF(x, -y) * self.zoom if i == 0: painter_path.moveTo(p) self.draw_circle(p, 2) continue if error: painter_path.moveTo(p) error = False else: painter_path.lineTo(p) self.painter.drawPath(painter_path) def draw_dot(self, path: Sequence[_Coord]) -> None: """Draw path as dots.""" if len(set(path)) < 2: return for i, (x, y) in enumerate(path): if isnan(x): continue p = QPointF(x, -y) * self.zoom if i == 0: self.draw_circle(p, 2) else: self.painter.drawPoint(p) def solution_polygon( self, func: str, args: Sequence[str], target: str, pos: Sequence[VPoint] ) -> Tuple[List[QPointF], QColor]: """Get solution polygon.""" if func == 'PLLP': color = QColor(121, 171, 252) params = [args[0], args[-1]] elif func == 'PLAP': color = QColor(249, 84, 216) params = [args[0]] else: if func == 'PLPP': color = QColor(94, 255, 185) else: # PXY color = QColor(249, 175, 27) params = [args[0]] params.append(target) tmp_list = [] for name in params: try: index = int(name.replace('P', '')) except ValueError: continue else: vpoint = pos[index] tmp_list.append(QPointF(vpoint.cx, -vpoint.cy) * self.zoom) return tmp_list, color def draw_solution( self, func: str, args: Sequence[str], target: str, pos: Sequence[VPoint] ) -> None: """Draw the solution triangle.""" points, color = self.solution_polygon(func, args, target, pos) color.setAlpha(150) pen = QPen(color) pen.setWidth(self.joint_size) self.painter.setPen(pen) def draw_arrow(index: int, text: str) -> None: """Draw arrow.""" self.__draw_arrow( points[-1].x(), points[-1].y(), points[index].x(), points[index].y(), text=text ) draw_arrow(0, args[1]) if func == 'PLLP': draw_arrow(1, args[2]) color.setAlpha(30) self.painter.setBrush(QBrush(color)) self.painter.drawPolygon(QPolygonF(points)) self.painter.setBrush(Qt.NoBrush) @Slot(int) def set_show_ticks(self, show: int): """Set the appearance of tick mark.""" self.show_ticks = _TickMark(show + 1) self.update() @Slot(bool) def set_monochrome_mode(self, monochrome: bool) -> None: """Set monochrome mode.""" self.monochrome = monochrome self.update() class PreviewCanvas(BaseCanvas): """A preview canvas use to show structure diagram.""" view_size: ClassVar[int] = 240 def __init__(self, parent: QWidget) -> None: """Input parameters and attributes. + Origin graph + Customize points: Dict[str, int] + Multiple joints: Dict[int, int] + Positions: Dict[int, Tuple[float, float]] + Joint status: Dict[int, bool] + Name dict: Dict['P0', 'A'] """ super(PreviewCanvas, self).__init__(parent) self.graph = Graph([]) self.cus: Dict[int, int] = {} self.same: Dict[int, int] = {} self.pos: Dict[int, _Coord] = {} self.status: Dict[int, bool] = {} # Additional attributes. self.grounded = -1 self.driver: Set[int] = set() self.target: Set[int] = set() self.clear() def clear(self) -> None: """Clear the attributes.""" self.graph = Graph([]) self.cus.clear() self.same.clear() self.pos.clear() self.status.clear() self.grounded = -1 self.driver.clear() self.target.clear() self.update() def paintEvent(self, event: QPaintEvent) -> None: """Draw the structure.""" width = self.width() height = self.height() if self.pos: x_right, x_left, y_top, y_bottom = self.__zoom_to_fit_limit() self.zoom = self.zoom_factor( width, height, x_right, x_left, y_top, y_bottom ) * 0.75 self.ox = width / 2 - (x_left + x_right) / 2 * self.zoom self.oy = height / 2 + (y_top + y_bottom) / 2 * self.zoom else: if width <= height: self.zoom = width / PreviewCanvas.view_size else: self.zoom = height / PreviewCanvas.view_size self.ox = width / 2 self.oy = height / 2 super(PreviewCanvas, self).paintEvent(event) pen = QPen() pen.setWidth(self.joint_size) self.painter.setPen(pen) if self.monochrome: color = QColor(Qt.darkGray) else: color = LINK_COLOR color.setAlpha(150) self.painter.setBrush(QBrush(color)) # Links for link in self.graph.vertices: if link == self.grounded: continue points = [] # Points that is belong with the link. for num, edge in edges_view(self.graph): if link in edge: if num in self.same: num = self.same[num] x, y = self.pos[num] points.append((x * self.zoom, y * -self.zoom)) # Customize points. for name, link_ in self.cus.items(): if link == link_: x, y = self.pos[name] points.append((x * self.zoom, y * -self.zoom)) self.painter.drawPolygon(*convex_hull(points, as_qpoint=True)) # Nodes for node, (x, y) in self.pos.items(): if node in self.same: continue x *= self.zoom y *= -self.zoom if self.monochrome: color = Qt.black elif node in self.driver: color = color_qt('Red') elif node in self.target: color = color_qt('Orange') elif self.get_status(node): color = color_qt('Green') else: color = color_qt('Blue') pen.setColor(color) self.painter.setPen(pen) self.painter.setBrush(QBrush(color)) self.draw_circle(QPointF(x, y), self.joint_size) pen.setColor(Qt.black) self.painter.setPen(pen) # Text of node. pen.setColor(Qt.black) self.painter.setPen(pen) for node, (x, y) in self.pos.items(): if node in self.same: continue x *= self.zoom x += 2 * self.joint_size y *= -self.zoom y -= 2 * self.joint_size self.painter.drawText(QPointF(x, y), f'P{node}') self.painter.end() def __zoom_to_fit_limit(self) -> Tuple[float, float, float, float]: """Limitations of four side.""" inf = float('inf') x_right = inf x_left = -inf y_top = -inf y_bottom = inf for x, y in self.pos.values(): if x < x_right: x_right = x if x > x_left: x_left = x if y < y_bottom: y_bottom = y if y > y_top: y_top = y return x_right, x_left, y_top, y_bottom def set_graph(self, graph: Graph, pos: Dict[int, _Coord]) -> None: """Set the graph from NetworkX graph type.""" self.graph = graph self.pos = pos self.status = {k: False for k in pos} self.update() def set_grounded(self, link: int) -> None: """Set the grounded link number.""" self.grounded = link for n, edge in edges_view(self.graph): self.status[n] = self.grounded in edge for n, link in self.cus.items(): self.status[n] = self.grounded == link self.update() def set_driver(self, input_list: List[Tuple[int, int]]) -> None: """Set driver nodes.""" self.driver.clear() self.driver.update(pair[0] for pair in input_list) self.update() def set_target(self, points: Sequence[int]) -> None: """Set target nodes.""" self.target.clear() self.target.update(points) self.update() def set_status(self, point: str, status: bool) -> None: """Set status node.""" self.status[int(point.replace('P', ''))] = status self.update() def get_status(self, point: int) -> bool: """Get status. If multiple joints, return true.""" return self.status[point] or (point in self.same) @staticmethod def grounded_detect( placement: Set[int], g: Graph, same: Dict[int, int] ) -> Iterator[int]: """Find the grounded link.""" links: List[Set[int]] = [set() for _ in range(len(g.vertices))] for joint, link in edges_view(g): for node in link: links[node].add(joint) for row, link in enumerate(links): if placement == link - set(same): # Return once yield row return def from_profile(self, params: Dict[str, Any]) -> None: """Simple load by dict object.""" # Customize points and multiple joints g = Graph(params['graph']) expression: str = params['expression'] pos_list = parse_pos(expression) cus: Dict[int, int] = params['cus'] same: Dict[int, int] = params['same'] self.cus = cus self.same = same for node, ref in sorted(self.same.items()): pos_list.insert(node, pos_list[ref]) self.set_graph(g, {i: (x, y) for i, (x, y) in enumerate(pos_list)}) # Grounded setting for row in self.grounded_detect(set(params['placement']), g, self.same): self.set_grounded(row) # Driver setting input_list: List[Tuple[Tuple[int, int], Tuple[float, float]]] = params['input'] self.driver.clear() self.driver.update(b for (b, _), _ in input_list) # Target setting target: Dict[int, Sequence[_Coord]] = params['target'] self.target.clear() self.target.update(target) self.update() def is_all_lock(self) -> bool: """Is all joint has solution.""" for node, status in self.status.items(): if not status and node not in self.same: return False return True def distance(self, n1: int, n2: int) -> float: """Return the distance of two point.""" x1, y1 = self.pos[n1] x2, y2 = self.pos[n2] return hypot(x1 - x2, y1 - y2)
After languishing in jail for more than a year, multi-crore Saradha Group chit fund scam accused Trinamool Congress leader Madan Mitra claims to have turned ‘Bajrangi Bhaijaan’ now. Over the past one year as he remained behind the bars, the influential Trinamool leader, who was West Bengal’s sports and transport minister, underwent a series of role changes. “Agey ami chilam prabhabshali, tar por holam bahubali and ekhon ami Bajrangbali (earlier I was an influential leader, then I became a muscle man and now I am Bajrang bali),” said Mitra, while he was being produced at a city court on Thursday. The leader, who was known to throw his weight and power around, claimed to have become a humble person like Pavan, aka Bajrangi Bhaijaan, a role played by superstar Salman Khan in the movie of the same name. Mitra’s statement came in the wake of the city court extending his judicial custody by another 14-days to January 28. Additional Chief Judicial Magistrate Sougata Roy Chowdhury passed the order after Mitra’s counsel did not press for bail. Mitra’s counsel also requested the court to treat him as a division-I prisoner and provide him with all necessary medical assistance as he is suffering from various ailments. Mitra was arrested by CBI officials in the Saradha scam on December 12, 2014, on charges of criminal conspiracy, cheating and misappropriation of funds. The influential West Bengal minister was also charged with deriving undue financial advantages from the Saradha Group. The CBI had been opposing Mitra’s bail plea repeatedly citing that he may influence the probe if he gets bail. When questioned about his son Swarup Mitra’s nomination from the Kamarhati Assembly constituency, he said that there was no question of his son contesting the polls this year.
#!/usr/bin/env python # encoding: utf-8 """ Common ofdb provider stuff for parsing and extracting values. """ # stdlib import json import re from collections import deque class OFDBCommon: def __init__(self): self._urls = [ # seems to be currently the only correct working mirror, other # mirrors wont't find e.g. plot metadata, even if it exists 'http://ofdbgw.geeksphere.de/{path}/{query}', #'http://ofdbgw.home-of-root.de/{path}/{query}' # those mirrors seems to be broken #'http://ofdbgw.scheeper.de/{path}/{query}' #'http://ofdbgw.johann-scharl.de/{path}/{query}' #'http://ofdbgw.org/{path}/{query}' #'http://ofdbgw.h1915283.stratoserver.net/{path}/{query}' #'http://ofdbgw.metawave.ch/{path}/{query}' ] self.base_url = self._get_url_iter(self._urls) def _get_url_iter(self, urls): url_list = deque(urls) while True: yield url_list[0] url_list.rotate(1) def get_base_url(self): return next(self.base_url) def _try_sanitize(self, response): """ Try to sanitize a broken response containing a valid json doc. """ try: splited = response.splitlines() response = '' for item in splited: if re.search('{\s*"ofdbgw"', item): response = item break return json.loads(response).get('ofdbgw') except (TypeError, ValueError, AttributeError): return False def _build_urllist_from_idlist(self, ids, path): """ Build list with urls out of given ids. """ url_list = [] for ofdbid in ids: url = self.get_base_url().format(path=path, query=ofdbid) url_list.append([url]) return url_list def validate_url_response(self, url_response): """ Validate a url-response tuple and load json response. """ try: url, response = url_response.pop() return 'ok', (None, None), url, json.loads(response).get('ofdbgw') except (ValueError, AttributeError, TypeError): response = self._try_sanitize(response) if response is False: return 'critical', (None, True), url, response else: return 'ok', (None, None), url, response def personids_to_urllist(self, ids): """ Build person provider urllist from person ids. """ return self._build_urllist_from_idlist(ids, 'person_json') def movieids_to_urllist(self, ids): """ Build movie provider urllist from person ids. """ return self._build_urllist_from_idlist(ids, 'movie_json') def check_response_status(self, response): """ Validates the http response object status. Possible error codes that may apear in the valid json http response:: 0 = Keine Fehler 1 = Unbekannter Fehler 2 = Fehler oder Timeout bei Anfrage an IMDB bzw. OFDB 3 = Keine oder falsche ID angebene 4 = Keine Daten zu angegebener ID oder Query gefunden 5 = Fehler bei der Datenverarbeitung 9 = Wartungsmodus, OFDBGW derzeit nicht verfügbar. Returns a state flag and a return value specific to its error code. For possible flags an return value tuples see code block below. :param response: A json http response object. :returns: A tuple containing a status flag and a return value. """ status = response['status']['rcode'] return_code = { 'unknown_error': [1, 2, 5], 'no_data_found': [4, 9], 'critical_error': [3], 'valid_response': [0] } if status in return_code['critical_error']: return 'critical', (None, True) if status in return_code['unknown_error']: return 'unknown', (None, False) if status in return_code['no_data_found']: return 'no_data', ([], True) if status in return_code['valid_response']: return 'valid', ()
Atelerix is a new company based at the Babraham Research Campus in Cambridge and spunout from the research of BBSRC researcher Che Connon at the University of Newcastle. Atelerix is commercialising a patented simple, low cost system capable of preserving the viability and functionality of cells at hypothermic temperatures (4°C to 21°C). Used as a method of cell storage and transport, it overcomes the acknowledged problems associated with cryo- shipping. Cells are encapsulated by in situ formation of the gel for shipping in plates or vials, and can be rapidly released from the gel by the addition of a simple buffer.
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # # Copyright 2013 Canonical Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.network import model as network_model from nova import test from nova.tests import matchers from nova.virt.vmwareapi import network_util from nova.virt.vmwareapi import vif class VMwareVifTestCase(test.NoDBTestCase): def setUp(self): super(VMwareVifTestCase, self).setUp() self.flags(vlan_interface='vmnet0', group='vmware') network = network_model.Network(id=0, bridge='fa0', label='fake', vlan=3, bridge_interface='eth0', injected=True) self.vif = network_model.NetworkInfo([ network_model.VIF(id=None, address='DE:AD:BE:EF:00:00', network=network, type=None, devname=None, ovs_interfaceid=None, rxtx_cap=3) ])[0] self.session = "fake" self.cluster = None def tearDown(self): super(VMwareVifTestCase, self).tearDown() def test_ensure_vlan_bridge(self): self.mox.StubOutWithMock(network_util, 'get_network_with_the_name') self.mox.StubOutWithMock(network_util, 'get_vswitch_for_vlan_interface') self.mox.StubOutWithMock(network_util, 'check_if_vlan_interface_exists') self.mox.StubOutWithMock(network_util, 'create_port_group') network_util.get_network_with_the_name(self.session, 'fa0', self.cluster).AndReturn(None) network_util.get_vswitch_for_vlan_interface(self.session, 'vmnet0', self.cluster).AndReturn('vmnet0') network_util.check_if_vlan_interface_exists(self.session, 'vmnet0', self.cluster).AndReturn(True) network_util.create_port_group(self.session, 'fa0', 'vmnet0', 3, self.cluster) network_util.get_network_with_the_name('fake', 'fa0', None) self.mox.ReplayAll() vif.ensure_vlan_bridge(self.session, self.vif, create_vlan=True) # FlatDHCP network mode without vlan - network doesn't exist with the host def test_ensure_vlan_bridge_without_vlan(self): self.mox.StubOutWithMock(network_util, 'get_network_with_the_name') self.mox.StubOutWithMock(network_util, 'get_vswitch_for_vlan_interface') self.mox.StubOutWithMock(network_util, 'check_if_vlan_interface_exists') self.mox.StubOutWithMock(network_util, 'create_port_group') network_util.get_network_with_the_name(self.session, 'fa0', self.cluster).AndReturn(None) network_util.get_vswitch_for_vlan_interface(self.session, 'vmnet0', self.cluster).AndReturn('vmnet0') network_util.check_if_vlan_interface_exists(self.session, 'vmnet0', self.cluster).AndReturn(True) network_util.create_port_group(self.session, 'fa0', 'vmnet0', 0, self.cluster) network_util.get_network_with_the_name('fake', 'fa0', None) self.mox.ReplayAll() vif.ensure_vlan_bridge(self.session, self.vif, create_vlan=False) # FlatDHCP network mode without vlan - network exists with the host # Get vswitch and check vlan interface should not be called def test_ensure_vlan_bridge_with_network(self): self.mox.StubOutWithMock(network_util, 'get_network_with_the_name') self.mox.StubOutWithMock(network_util, 'get_vswitch_for_vlan_interface') self.mox.StubOutWithMock(network_util, 'check_if_vlan_interface_exists') self.mox.StubOutWithMock(network_util, 'create_port_group') vm_network = {'name': 'VM Network', 'type': 'Network'} network_util.get_network_with_the_name(self.session, 'fa0', self.cluster).AndReturn(vm_network) self.mox.ReplayAll() vif.ensure_vlan_bridge(self.session, self.vif, create_vlan=False) # Flat network mode with DVS def test_ensure_vlan_bridge_with_existing_dvs(self): network_ref = {'dvpg': 'dvportgroup-2062', 'type': 'DistributedVirtualPortgroup'} self.mox.StubOutWithMock(network_util, 'get_network_with_the_name') self.mox.StubOutWithMock(network_util, 'get_vswitch_for_vlan_interface') self.mox.StubOutWithMock(network_util, 'check_if_vlan_interface_exists') self.mox.StubOutWithMock(network_util, 'create_port_group') network_util.get_network_with_the_name(self.session, 'fa0', self.cluster).AndReturn(network_ref) self.mox.ReplayAll() ref = vif.ensure_vlan_bridge(self.session, self.vif, create_vlan=False) self.assertThat(ref, matchers.DictMatches(network_ref)) def test_get_network_ref_neutron(self): self.mox.StubOutWithMock(vif, 'get_neutron_network') vif.get_neutron_network(self.session, 'fa0', self.cluster, self.vif) self.mox.ReplayAll() vif.get_network_ref(self.session, self.cluster, self.vif, True) def test_get_network_ref_flat_dhcp(self): self.mox.StubOutWithMock(vif, 'ensure_vlan_bridge') vif.ensure_vlan_bridge(self.session, self.vif, cluster=self.cluster, create_vlan=False) self.mox.ReplayAll() vif.get_network_ref(self.session, self.cluster, self.vif, False) def test_get_network_ref_bridge(self): self.mox.StubOutWithMock(vif, 'ensure_vlan_bridge') vif.ensure_vlan_bridge(self.session, self.vif, cluster=self.cluster, create_vlan=True) self.mox.ReplayAll() network = network_model.Network(id=0, bridge='fa0', label='fake', vlan=3, bridge_interface='eth0', injected=True, should_create_vlan=True) self.vif = network_model.NetworkInfo([ network_model.VIF(id=None, address='DE:AD:BE:EF:00:00', network=network, type=None, devname=None, ovs_interfaceid=None, rxtx_cap=3) ])[0] vif.get_network_ref(self.session, self.cluster, self.vif, False)
This 1994 Fountaine Pajot Tobago, CAT ALEE, was in the process of being entirely refitted for a retiring live aboard. Sadly, extreme health issues ended that, and the boat is for immediate sale. This boat is priced well. It has brand new Beta diesels, new mast and standing rigging, and a new main. The interior of the boat is 1994, and in need of some upgrades. The exterior is clean, and there was a very good survey done several years back which I'll make available to qualified buyers. This is a great opportunity to get a decent catamaran at an excellent price in sailaway condition, needed only some minor upgrades to be Bristol. Don't wait!!
#!/usr/bin/env python # -*- coding: utf-8 -*- """ Funnels are a collection of events within a bucket. """ from itertools import chain from twisted.internet.defer import inlineCallbacks, returnValue, DeferredList from telephus.cassandra.c08.ttypes import NotFoundException from ..models import bucket_check, user_authorize from ..models import FunnelModel, EventModel, PropertyModel from ..lib.authentication import authenticate from ..exceptions import MissingParameterException from ..lib.b64encode import uri_b64decode, uri_b64encode, \ b64encode_nested_keys, b64encode_double_nested_keys, b64encode_keys from ..lib.hash import pack_hash from ..lib.parameters import require from ..lib.profiler import profile def _parse(data, event_ids): """Zip responses by data[offset::step]""" data = [x[1] for x in data] return ( dict(zip(event_ids, data[0::4])), dict(zip(event_ids, data[1::4])), dict(zip(event_ids, data[2::4])), dict(zip(event_ids, data[3::4]))) def encode_nested_lists(dictionary): """ Base64 encodes nested lists. """ return dict([(uri_b64encode(k), [(uri_b64encode(x[0]), x[1]) for x in v]) \ for k, v in dictionary.items()]) class Funnel(object): """ Funnel. """ def __init__(self, dispatcher): dispatcher.connect( name='funnel', route='/{user_name}/{bucket_name}/funnel/{funnel_name}', controller=self, action='post', conditions={"method": "POST"}) dispatcher.connect( name='funnel', route='/{user_name}/{bucket_name}/funnel', controller=self, action='preview_funnel', conditions={"method": "GET"}) dispatcher.connect( name='funnel', route='/{user_name}/{bucket_name}/funnel/{funnel_name}', controller=self, action='get_saved_funnel', conditions={"method": "GET"}) dispatcher.connect( name='funnel', route='/{user_name}/{bucket_name}/funnel/{funnel_name}', controller=self, action='delete', conditions={"method": "DELETE"}) @authenticate @user_authorize @bucket_check @require("event_id", "description") @profile @inlineCallbacks def post(self, request, user_name, bucket_name, funnel_name): """ Create a new funnel. """ if len(request.args["event_id"]) < 2: request.setResponseCode(403) raise MissingParameterException("Parameter 'event_id' requires " "at least two values.") event_ids = [uri_b64decode(x) for x in request.args["event_id"]] description = request.args["description"][0] if "property" in request.args: property_name = request.args["property"][0] else: property_name = None funnel = FunnelModel(user_name, bucket_name, funnel_name) yield funnel.create(description, event_ids, property_name) request.setResponseCode(201) @authenticate @user_authorize @bucket_check @profile @inlineCallbacks def preview_funnel(self, request, user_name, bucket_name): """ Information about an unsaved funnel. """ if "event_id" in request.args: if len(request.args["event_id"]) < 2: request.setResponseCode(403) raise MissingParameterException("Parameter 'event_id' requires" " at least two values.") event_ids = [uri_b64decode(x) for x in request.args["event_id"]] elif "event" in request.args: if len(request.args["event"]) < 2: request.setResponseCode(403) raise MissingParameterException("Parameter 'event' requires" " at least two values.") event_ids = [pack_hash((x,)) for x in request.args["event"]] else: request.setResponseCode(403) raise MissingParameterException("Parameters 'event' or 'event_id'" " required.") if "property" in request.args: _property = PropertyModel( user_name, bucket_name, property_name=request.args["property"][0]) else: _property = None data = yield _get(user_name, bucket_name, event_ids, _property) returnValue(data) @authenticate @user_authorize @bucket_check @profile @inlineCallbacks def get_saved_funnel(self, request, user_name, bucket_name, funnel_name): """ Information about a saved funnel. """ funnel = FunnelModel(user_name, bucket_name, funnel_name) try: yield funnel.get() except NotFoundException: request.setResponseCode(404) raise data = yield _get(user_name, bucket_name, funnel.event_ids, funnel.property) data["description"] = funnel.description returnValue(data) @authenticate @user_authorize @bucket_check @profile @inlineCallbacks def delete(self, request, user_name, bucket_name, funnel_name): """ Delete funnel. """ funnel = FunnelModel(user_name, bucket_name, funnel_name) yield funnel.delete() @inlineCallbacks def _get(user_name, bucket_name, event_ids, _property): """ Information about a funnel. """ # Combine requests for event data. deferreds = [] for event_id in event_ids: event = EventModel(user_name, bucket_name, event_id=event_id) deferreds.extend([ event.get_total(_property), event.get_unique_total(_property), event.get_path(_property), event.get_unique_path(_property)]) if _property: deferreds.append(_property.get_values()) data = yield DeferredList(deferreds) response = {"event_ids": [uri_b64encode(x) for x in event_ids]} if _property: property_values = data.pop()[1] response.update({ "property":{ "name":_property.property_name, "id": uri_b64encode(_property.id), "values": b64encode_keys(property_values)}}) _get_with_property(data, event_ids, response) else: _get_without_property(data, event_ids, response) returnValue(response) def _get_with_property(data, event_ids, response): """ Information about a funnel on a property. """ totals, unique_totals, paths, unique_paths = _parse(data, event_ids) property_ids = set(chain(*[x.keys() for x in totals.values()])) funnels = {} unique_funnels = {} for property_id in property_ids - set(event_ids): event_id = event_ids[0] _funnel = [(event_id, totals[event_id][property_id])] unique_funnel = [(event_id, unique_totals[event_id][property_id])] for i in range(1, len(event_ids)): event_id = event_ids[i - 1] new_event_id = event_ids[i] if event_id not in paths[new_event_id][property_id]: continue _funnel.append(( new_event_id, paths[new_event_id][property_id][event_id])) unique_funnel.append(( new_event_id, unique_paths[new_event_id][property_id][event_id])) funnels[property_id] = _funnel unique_funnels[property_id] = unique_funnel response.update({ "totals": b64encode_nested_keys(totals), "unique_totals": b64encode_nested_keys(unique_totals), "paths": b64encode_double_nested_keys(paths), "unique_paths": b64encode_double_nested_keys(unique_paths), "funnels": encode_nested_lists(funnels), "unique_funnels": encode_nested_lists(unique_funnels)}) def _get_without_property(data, event_ids, response): """ Information about a funnel without a property. """ totals, unique_totals, paths, unique_paths = _parse(data, event_ids) # Full funnel, no properties. event_id = event_ids[0] totals = dict([(x, totals[x][x]) for x in totals]) unique_totals = dict([(x, unique_totals[x][x]) for x in unique_totals]) _funnel = [(event_id, totals[event_id])] unique_funnel = [(event_id, unique_totals[event_id])] paths = dict([(x, paths[x][x]) for x in paths]) unique_paths = dict([(x, unique_paths[x][x]) for x in unique_paths]) for i in range(1, len(event_ids)): event_id = event_ids[i - 1] new_event_id = event_ids[i] _funnel.append((new_event_id, paths[new_event_id][event_id])) unique_funnel.append(( new_event_id, unique_paths[new_event_id][event_id])) response.update({ "total": b64encode_keys(totals), "unique_total": b64encode_keys(unique_totals), "path": b64encode_nested_keys(paths), "unique_path": b64encode_nested_keys(unique_paths), "funnel": [(uri_b64encode(x[0]), x[1]) for x in _funnel], "unique_funnel": [(uri_b64encode(x[0]), x[1]) \ for x in unique_funnel]})
I am blessed to live in an area that is rich in diverse culture. This was my first time celebrating and going to Dia de Los Muertos events, so it was a special treat. Southern California has a large Latino community, and it was wonderful traveling across counties to see multiple events dedicated to celebrating the memories of loved ones. The color caught my attention. It was one of my favorites of the night. This piece was at Marcas Gallery in Santa Ana. This photograph stood out from other black and white mates because of the red border, and I love the composition, and feeling of peace and protection it conveys. I was honored with Pan de Muertos by a grandma from Michoacan, Mexico. The altars reminded me of Buddhist shrines where family honor their elders and other loved ones with flowers, food, and candles. It seemed very familiar to me, so much that I felt like a kid. I thought of my grandparents and how much love they shared with me. I thought of their hopes and joys for me, and was filled with warmth that their memories live on through me. I can't wait to go again next year, and perhaps participate.
#!/usr/bin/python2 """ xyz2npy Author: Patrick A. O'Neil License: This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import numpy as np import sys if len(sys.argv) != 3: print """ Converts from npy array to xyz format. Usage: ./npy2xyz.py input/path.npy output/path.xyz """ # --- Read Inputs --- # in_file = sys.argv[1] # Input File (Npy) out_file = sys.argv[2] # Output File (xyz) # --- Load Npy --- # try: D = np.load(in_file) except IOError: print """ Failed to load Npy file! Exiting. """ N = len(D) d = len(D[0]) # --- Write xyz --- # ofile = open(out_file,'w') for p_ in D: line = "" for p in p_: line += str(p) + " " # Compensate for no rotation coordinate if d == 3: # Pad with zeros line += "0 0 0" else: # Remove extra space line = line[:-1] + "\n" ofile.write(line) ofile.close()
My wife has a personal page, which she uses mostly, but not exclusively, for her public persona as an author. She's been doing this for years, but it seems like now she needs a professional page. So I created one, and one for the book she has coming out. But now I'm totally confused about how to manage this. When you convert your personal account to a Facebook Page, we'll transfer your current profile picture and add all your friends and subscribers as people who like your Page. We'll also make your account's username the username for your Page. It sounds like she would actually lose her personal account, and I think it would offend people to be automatically converted to "fans" of an author, when they thought they were friends with a person. What I want is a way to make her author page her public persona on Facebook AND let her keep her private account, but I can't see a way to do this? Is there one? Or even to let her keep her personal account and just use that, except that I've heard that you can't really do this, because of the 5000 friend limit, and the rules about using a Facebook account for professional purposes. Can I do this? I was just researching this exact same question for a client. I work primarily with authors and speakers, and this question comes up a lot. Here are the key things to note. you do lose the old profile. You have to create a new profile for your personal use. But if the idea is to clean up the profile, starting with a clean slate isn't a bad thing! only your profile pictures and friends/subscribers are transferred over to the page. So, backup your content if you have any interest in saving it. It won't be accessible later. other than the new likes and a profile picture, your new page is blank. Once again, not a bad issue. Just start creating content. At least you'll be off to a great start with the following. if your profile is the admin for any groups or apps, assign new admins before you migrate. if you have a username specified for your profile (otherwise known as a "vanity URL"), it will be applied to the new page instead. FB does not allow you to change the username of a page with more than 200 "likes," so depending on how many people are friends, you may not be able to update the username of the page. from what I can piece together, the name of the new page (the page title) is based on the profile name. Therefore, Joe Schmoe's profile will be displayed as "Joe Schmoe" for the page title. At least that's my guess. So, if you want a different title, change your profile name before you update the account. The first name could be "Joe Schmoe's" and the last name "Awesome New Facebook Page." You get the idea. I have to admit, I have not tried this out yet, but my guess is that it would work. Otherwise, to change a page title, you have to put in a special request with Facebook, and they're not very responsive about it. the newsfeed of your new page will be blank because you haven't "liked" any other pages yet. some people may be put off by becoming a fan when they thought they were your friend (think old high school classmates, family members, etc.). So, anticipate a drop-off in "likes" soon after the conversion. Know that making a page is definitely the right answer. You need to be in conformance with Facebook's terms of agreement, otherwise, you risk them shutting your account down. But there are other reasons for it too. For instance, Google indexes pages, but not profiles. In addition, pages allow much more functionality than profiles do, including adding apps for a mailing list, creating events, etc. I hope this helps! Nothing prevents you from having a public persona page and a personal Facebook page. I'm a visual/musical artist. I had a personal page, and when I started the pro page, I started a new friend/subscriber list from scratch (only those who liked the pro page.) For a while, I used the personal page to help promote the pro page to personal friends who may not have seen the pro page or its posts. After enough likes (many were folks I never met and were not personal FB friends), I flew solo and stopped promoting on the personal page so much. But I still do once in a while. Now, if someone looking for my art finds my personal page first, it can lead them to the pro page eventually. Now the pro page is growing on its own. I didn't have to make my friends fans but I'm glad that now most of my fans are friends, and just the ones who want to be. As personal friends are added, I invite them to like the pro page as well. The nice thing about this arrangement is that when a daily content (say, a cute, humorous picture about art) is posted on the pro page then shared to the personal page, it can reach further by way of personal friends' pages, comments, shares, etc. How can I get all questions asked by a Facebook page? Facebook account deleted but shows up on google? Can a user with added role of admin invite friends to that fan page? Is there a way to backup one's Facebook Page to protect it from hacking?
""" Overrides of standard python classes and functions to provide customized behavior and windows/mac compatibility. """ __all__ = [ "Template", "sCall", "sPopen" ] ##### Template ################################################################# from string import Template from re import compile class TemplateWrapper: def __init__(self, cls): PYTHON_LL = 80 HTML_LL = 80 self.cls = cls self.headers = [ ( # Primary python file header template compile(r'\$ph{(.*?)}'), lambda x: "\n\n{1}\n##### {0} {2}\n{1}\n".format( x.upper(), '#'*PYTHON_LL, '#'*(PYTHON_LL-len(x)-7) ) ), ( # Secondary python file header template compile(r'\$sh{(.*?)}'), lambda x: "\n### {0} {1}".format( x, '#'*(PYTHON_LL-len(x)-5) ) ), ( # HTML file header template compile(r'\$wh{(.*?)}'), lambda x: "<!-- ***** {0} {1} -->".format( x, '*'*(HTML_LL-len(x)-16) ) ) ] def __call__(self, template): for header in self.headers: ptn, tpl = header for match in ptn.finditer(template): replacements = ( match.group(0), tpl(match.group(1)) ) template = template.replace(*replacements) template_obj = self.cls(template) template_obj.populate = self.populate return template_obj @staticmethod def populate(template, filepath, **kwargs): for key, value in kwargs.items(): if isinstance(value, list): kwargs[key] = "\n".join( [ t[0].safe_substitute(**t[1]) for t in value ] ) try: with open(filepath, 'w') as f: f.write(template.safe_substitute(**kwargs)) except Exception as exception: raise exception Template = TemplateWrapper(Template) ##### System Calls ############################################################# from subprocess import Popen, call, DEVNULL, STDOUT, PIPE from sys import executable import os def sPopen(*args): command, shell = list(args), True if command[0] == 'python': command[0] = executable shell = False if os.name == 'nt': from subprocess import CREATE_NEW_CONSOLE return Popen( command, shell=shell, creationflags=CREATE_NEW_CONSOLE ) else: return Popen( command, shell=shell ) def sCall(*args): command, shell = list(args), True if command[0] == 'python': command[0] = executable shell = False if os.name != 'nt': shell = False call( command, shell=shell, stdout=DEVNULL, stderr=STDOUT )
Voted the best packaway mac in its class by Trail magazine Mac in a Sac is known worldwide for it's .. Waterproof100% durable rubberOuter Material: SyntheticLined with 60% polyester / 40% cottonCushioned.. When the weather turns protect your young explorers and puddle stompers from sudden showers and heav.. 80g duck down fill600 fill powerR.D.S. certified down90% duck down / 10% featherWater repellent oute..
# -*- coding: utf-8 -*- """ /*************************************************************************** VetEpiGIS-Group A QGIS plugin Spatial functions for vet epidemiology ------------------- begin : 2016-05-06 git sha : $Format:%H$ copyright : (C) 2016 by Norbert Solymosi email : solymosi.norbert@gmail.com ***************************************************************************/ /*************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ """ import os, shutil from PyQt5.QtGui import * from PyQt5.QtCore import pyqtSignal, Qt, QSettings, QCoreApplication, QFile, QFileInfo, QDate, QVariant, \ pyqtSignal, QRegExp, QDateTime, QTranslator, QFile, QDir, QIODevice, QTextStream from PyQt5.QtWidgets import * from qgis.core import QgsDataSourceUri from PyQt5.QtSql import * import psycopg2 import psycopg2.extensions # use unicode! psycopg2.extensions.register_type(psycopg2.extensions.UNICODE) psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY) from .dbconnection_dialog import Ui_Dialog class Dialog(QDialog, Ui_Dialog): def __init__(self): """Constructor for the dialog. """ QDialog.__init__(self) self.setupUi(self) self.plugin_dir = '' self.settings = '' #self.comboBox_pg_db.currentIndexChanged.connect(self.seltype) #self.commandLinkButton.clicked.connect(self.createNewSLdb) self.toolButton_sl_db.clicked.connect(self.dbSource) self.radioButton_spatialite.clicked.connect(self.seltype) self.radioButton_postgis.clicked.connect(self.seltype) #self.commandLinkButton_2.clicked.connect(self.createPGtables) # self.lineEdit.setText('/home/sn/dev/QGISplugins/VetEpiGIS/groupdata/c.sqlite') def dbSource(self): dbpath = QFileDialog.getOpenFileName(self, 'Select file', QDir.currentPath(), 'SpatiaLite file (*.sqlite *.*)') dbpath = dbpath[0] if not dbpath or dbpath =='': self.lineEdit_spatialite.setText('') else: self.lineEdit_spatialite.setText(dbpath) def seltype(self): if self.radioButton_spatialite.isChecked(): self.groupBox_postgis.setEnabled(False) self.groupBox_spatialite.setEnabled(True) self.radioButton_postgis.setChecked(False) if self.radioButton_postgis.isChecked(): self.groupBox_spatialite.setEnabled(False) self.groupBox_postgis.setEnabled(True) self.radioButton_spatialite.setChecked(False)
French roast coffee is a delight for many coffee drinkers because of its sweetness and great taste. It is a dark roasted coffee that became popular in Europe in the 19th century and until today continues to win the hearts of coffee drinkers all over the world. Many things make it stand out among other types of roasted coffee, while its natural flavor will leave an exceptional taste in the mouth. There are many other things you need to know about French roast coffee and why it is the favorite of many. In this guide, we will cover points that clearly distinguish French roast from others. Coffee drinkers have testified to the excellent taste of French roast. It’s dark-brown in color and oily surface with natural flavor, although French roast is the most popular and top-selling coffee in Europe – most drinkers are not familiar with its history and how it came about its name. In an article “The French Roast Explained“, the writer pointed out some facts about the history of French roast which is said to be in existence since the 19th century. The name “French Roast” refers to the dark-brown color after the coffee beans have gone through roasting processes. It is also very important to note that French roast is basically the way of processing (roasting) the beans, and has nothing to do with the bean. So, beans used to make French roast come from various countries like Kenya or Brazil, and can also be a mix of a variety of beans of different origins. French roast coffee is easy to prepare, but you have to be very good in pressing the coffee to give fine blend. Experts believe the secret to make excellent French roast is in the bean grinds. Choose grinds that are not very coarse or too fine, but the medium sizes that will not clog nor pass through the filter. The items you’ll need are French press, water, chopstick or spoon, kitchen timer and plunger. With precise steps such as what is explained in our guide “How To Brew French Press Coffee” youʹ re going to produce a delicious taste of coffee. You’ll also discover steps to avoid a bitter taste that incorrectly brewed roast could leave in your mouth such as over-extraction. It is obvious that French roast coffee is unique in several ways and has a very long and interesting history. It remains the delight of coffee drinkers, particularly in Europe and easy to make. What Does French Roast Coffee Taste Like? French roast coffee is undoubtedly the most popular coffee in Europe and the rest of the world, obviously due to its finest color and irresistible sweet-flavor taste. Its dark-brown color is quite attractive while you can feel a unique thin watery drink in your mouth. This coffee is far less acidic when compared to other roasts out there, and in my own view, it is the optimum choice for coffee drinkers who want to cut their consumption of caffeine. However, I also found it to be the best choice if you crave for a creamy coffee that gives a sweet taste that suits your mood. Though, how you make your coffee can affect its taste, but you can’t deny that a perfectly roasted French coffee will come out lighter and sweeter with an excellent flavor that stands out among other roasts. While everyone has a choice of coffee probably because of the taste or color, or even the smoothness, it has been discovered that most coffee drinkers love dark roasted coffee. French and Italian roasts are both dark in color with an oily surface after the completion of the process. Italian roast is darker, has a strong burnt flavor, and produces much oil as well, while its popularity is greater in Italy than any other place. Coffee is a complex substance that consists of hundreds of coffee flavors and aromas. We often look for the best coffee before stepping out every morning, and it is common for some people to wake up to the mouth-watering aroma of coffee coming from the kitchen. However, every household has its unique style of brewing their coffee which may either be French or Italian roast. You may also wonder why there are so many comparisons between the two roasts when they both smell good and taste great. Is there any difference between French and Italian roast coffee? Well, I think the difference is very little – or not at all. What many people consider as a difference between the two roasts is the style in which each is roasted and the person doing it. Green coffee beans of different origins, could be roasted and the taste of the coffee wouldn’t be different. French and Italian roast both release sweet flavor that no die-hard coffee drinker can resist. The process of roasting them is similar – only that Italian coffee is darker and produces a strong flavor. French roast coffee has become a morning delight in many homes but there are things to look out for when buying it. The top 2 things you should consider when buying French roast is its oily surface and rich dark color. It has faded acidity, which may leave a sweet taste in your mouth. While it is very common for coffee buyers to look for smoothness and, of course, the flavor of the products, there are other features on the package label to consider. So, check for caffeine level in a coffee you are buying because French roast contains no caffeine. With the increasing popularity of coffee spreading all over the world, every drinker has a choice, and I think French roast coffee is among the best available in the market. It produces an inviting aroma when roasted, and the product gives an irresistible sweet flavor. French roast coffee can be made using a variety of coffee beans. It is dark-brown in color and has an oil surface after roasted. It’s quite similar to Italian roast coffee, but the latter is darker and popular among Italian drinkers. You should consider buying a quality French roast coffee by checking for its features which can be seen written on the package label and you should always remember that its taste makes a difference.
''' reads wav file http://soundfile.sapp.org/doc/WaveFormat/ https://blogs.msdn.microsoft.com/dawate/2009/06/23/intro-to-audio-programming-part-2-demystifying-the-wav-format/ ''' import numpy as np #for 2d array def read_data(name): with open(name, "rb") as f: sGroupID1 = f.read(4).decode('ascii') #always RIFF dwFileLength = int.from_bytes(f.read(4), byteorder='little') #file size minus 8 in bytes sRiffType = f.read(4).decode('ascii') #always "WAVE" sGroupID2 = f.read(4).decode('ascii') #always "fmt " dwHeaderChunkSize = int.from_bytes(f.read(4), byteorder='little') #size of next header section in bytes subchunk = f.read(dwHeaderChunkSize) #reads the section above wFormatTag = int.from_bytes(subchunk[:2], byteorder='little') #always 1 for PCM files (basically everything) wChannels = int.from_bytes(subchunk[2:4], byteorder='little') #amount of channels (1 = mono, 2 stereo...) dwSamplesPerSec = int.from_bytes(subchunk[4:8], byteorder='little') #samples taken per sec [Hz] dwAvgBytesPerSec = int.from_bytes(subchunk[8:12], byteorder='little') #amount of bytes/ sec, useful for mem allocation wBlockAlign = int.from_bytes(subchunk[12:14], byteorder='little') #amount of bytes for a sample*channels dwBitsPerSample = int.from_bytes(subchunk[14:16], byteorder='little') #bits per sample (8 = 8bit song, 16 = 2byte...) extraParams = subchunk[16:] #stores extra parameters sGroupID3 = f.read(4).decode('ascii') #always "data" dwChunkSize = int.from_bytes(f.read(4), byteorder='little') #size of data field in bytes data = f.read(dwChunkSize) #reads data field, stores in bytes datatype sound = np.zeros((wChannels, dwChunkSize//wChannels)) #creates numpy array. y size = n channels; x size = n of samples j = 0 #j cycles between channels for i in range(dwChunkSize//(dwBitsPerSample//8)): sound[j][(i//wChannels)] = int.from_bytes(data[i:i+dwBitsPerSample//8], byteorder='little') #j selects channel; i/channels tells y position !integer division, decimals cut! j += 1 j = j % wChannels #when j= n of channels, resets back to 0 #print(sound) #debug return sound
Achieving a dimensional vibrant color requires starting on the perfect canvas—that’s why BTC Team Member Michelle Stevenson (@michellehair) balayages her clients before applying fashion colors. A balayaged canvas adds dimension and offers a better grow-out process for the client. Catch her formulas and techniques below for this rich, painted pink using the new SOCOLOR Cult color line from Matrix. Curious why Michelle starts with a balayaged canvas? Here are the details behind her prepping process. If the starting canvas is a blended balayage, the fashion colors (especially when working with pinks and purples) will fade back to that starting point without a harsh line of demarcation. 1. Balayage with clay lightener, concentrating on the client’s regrowth. 2. Then, apply Formula A at the roots, but not all the way to the scalp. Leaving a small amount of the client’s natural root shade ensures a more seamless grow-out process. 3. Alternate applying all four formulas through the midlengths and the ends. 4. To create the most vibrant outcome, use Formulas A and B (the darker colors) on the warmer areas, and Formulas C and D (the lighter pink colors) on the lighter, more porous ends. 5. Process for 20 minutes at room temperate. Then rinse and seal with the Biolage Acidic Milk Rinse, leaving on for 10 minutes before rinsing and conditioning.
# Copyright 2014, Jeff Buttars, A10 Networks. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import acos_client.errors as acos_errors import acos_client.v30.base as base from member import Member class ServiceGroup(base.BaseV30): url_prefix = '/slb/service-group/' @property def member(self): return Member(self.client) # Valid LB methods ROUND_ROBIN = 'round-robin' WEIGHTED_ROUND_ROBIN = 'weighted-rr' LEAST_CONNECTION = 'least-connection' WEIGHTED_LEAST_CONNECTION = 'weighted-least-connection' LEAST_CONNECTION_ON_SERVICE_PORT = 'service-least-connection' WEIGHTED_LEAST_CONNECTION_ON_SERVICE_PORT = \ 'service-weighted-least-connection' FAST_RESPONSE_TIME = 'fastest-response' LEAST_REQUEST = 'least-request' STRICT_ROUND_ROBIN = 'round-robin-strict' STATELESS_SOURCE_IP_HASH = 'stateless-src-ip-hash' STATELESS_SOURCE_IP_HASH_ONLY = 'stateless-src-ip-only-hash' STATELESS_DESTINATION_IP_HASH = 'stateless-dst-ip-hash' STATELESS_SOURCE_DESTINATION_IP_HASH = 'stateless-src-dst-ip-hash' STATELESS_PER_PACKET_ROUND_ROBIN = 'stateless-per-pkt-round-robin' # Valid protocols TCP = 'tcp' UDP = 'udp' def get(self, name, **kwargs): return self._get(self.url_prefix + name, **kwargs) def _set(self, name, protocol=None, lb_method=None, hm_name=None, update=False, **kwargs): # Normalize "" -> None for json hm_name = hm_name or None # v30 needs unit tests badly... params = { "service-group": self.minimal_dict({ "name": name, "protocol": protocol, }) } # If we explicitly disable health checks, ensure it happens # Else, we implicitly disable health checks if not specified. health_check_disable = 1 if kwargs.get("health_check_disable", False) else 0 # When enabling/disabling a health monitor, you can't specify # health-check-disable and health-check at the same time. if hm_name is None: params["service-group"]["health-check-disable"] = health_check_disable else: params["service-group"]["health-check"] = hm_name if lb_method is None: pass elif lb_method[-16:] == 'least-connection': params['service-group']['lc-method'] = lb_method elif lb_method[:9] == 'stateless': params['service-group']['stateless-lb-method'] = lb_method else: params['service-group']['lb-method'] = lb_method if not update: name = '' self._post(self.url_prefix + name, params, **kwargs) def create(self, name, protocol=TCP, lb_method=ROUND_ROBIN, **kwargs): try: self.get(name) except acos_errors.NotFound: pass else: raise acos_errors.Exists self._set(name, protocol, lb_method, **kwargs) def update(self, name, protocol=None, lb_method=None, health_monitor=None, **kwargs): self._set(name, protocol, lb_method, health_monitor, update=True, **kwargs) def delete(self, name): self._delete(self.url_prefix + name) def stats(self, name, *args, **kwargs): return self._get(self.url_prefix + name + "/stats", **kwargs)
I was having lunch with a friend yesterday and he was telling about the water shortage they’ve been facing at his building for weeks now. Obviously this is a problem we’re all facing nowadays and every summer, except those who illegally dug wells next to their homes, water shortage is getting worse every year and we might be facing a serious crisis by 2035 because of poor rainfall, droughts and government mismanagement. So this friend naively decided to go to the water company to file a complaint. He headed to the reception to ask for directions and the woman there pointed at an office with no one inside. He went there just to make sure and the complaints office was indeed empty. He went back and told her, she was like call the number so he did and the operator was as helpful as the complaints office. As he was desperately leaving their offices, a guy grabs him and tells him **wait for it** to bring in four angry ladies from the building and ask them to come and yell at the employees here. He was like “trust me it worked before”. That’s where he left and promised himself never to go back.
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import sys try: from setuptools import setup except ImportError: from distutils.core import setup APP_NAME = 'legit' APP_SCRIPT = './legit_r' VERSION = '0.1.0' # Grab requirments. with open('reqs.txt') as f: required = f.readlines() settings = dict() # Publish Helper. if sys.argv[-1] == 'publish': os.system('python setup.py sdist upload') sys.exit() # Build Helper. if sys.argv[-1] == 'build': try: import py2exe except ImportError: print 'py2exe is required to continue.' sys.exit(1) sys.argv.append('py2exe') settings.update( console=[{'script': APP_SCRIPT}], zipfile = None, options = { 'py2exe': { 'compressed': 1, 'optimize': 0, 'bundle_files': 1}}) settings.update( name=APP_NAME, version=VERSION, description='Sexy Git CLI, Inspired by GitHub for Mac.', long_description=open('README.rst').read(), author='Kenneth Reitz', author_email='me@kennethreitz.com', url='https://github.com/kennethreitz/legit', packages= ['legit',], install_requires=required, license='BSD', classifiers=( # 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Natural Language :: English', 'License :: OSI Approved :: BSD License', 'Programming Language :: Python', # 'Programming Language :: Python :: 2.5', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', ), entry_points={ 'console_scripts': [ 'legit = legit.cli:main', ], } ) setup(**settings)
To tell you the truth, Dad, I'm convinced that there's nobody in the world who gets me quite like you do. I'm so grateful for our regular breakfast dates and larger-than-life conversations, the way you're constantly teaching me new things and helping me grow as a person. Thank you for all of the years you spent videotaping my dance recitals, entertaining my wildest hopes and dreams, and guiding me in my journey of faith. You've always been my biggest supporter and voice of reason, the one person who can instantly melt away my troubles with a single hug. I adore your boyish nature, adventurous spirit, and positive outlook on life. I like when we make fun of each other, and how we can't argue without laughing. I love that you chose to call me Emmy before I was even born, despite Mom's insisting that Emily be my "real" name. I get a kick out of the way you try to cheat at Scrabble but let me win when we Indian wrestle. I think it's funny that this blog post will embarrass the heck out of you. And I love that every good memory we share—even those from the early Cinderella stages of my life—seems like ten minutes ago. Thank you, Daddy, for everything.
from lib.common import helpers class Module: def __init__(self, mainMenu, params=[]): self.info = { 'Name': 'Get-ComputerDetails', 'Author': ['@JosephBialek'], 'Description': ('Enumerates useful information on the system. By default, all checks are run.'), 'Background' : True, 'OutputExtension' : None, 'NeedsAdmin' : True, 'OpsecSafe' : True, 'MinPSVersion' : '2', 'Comments': [ 'https://github.com/mattifestation/PowerSploit/blob/master/Recon/Get-ComputerDetails.ps1' ] } # any options needed by the module, settable during runtime self.options = { # format: # value_name : {description, required, default_value} 'Agent' : { 'Description' : 'Agent to run module on.', 'Required' : True, 'Value' : '' }, '4648' : { 'Description' : 'Switch. Only return 4648 logon information (RDP to another machine).', 'Required' : False, 'Value' : '' }, '4624' : { 'Description' : 'Switch. Only return 4624 logon information (logons to this machine).', 'Required' : False, 'Value' : '' }, 'AppLocker' : { 'Description' : 'Switch. Only return AppLocker logs.', 'Required' : False, 'Value' : '' }, 'PSScripts' : { 'Description' : 'Switch. Only return PowerShell scripts run from operational log.', 'Required' : False, 'Value' : '' }, 'SavedRDP' : { 'Description' : 'Switch. Only return saved RDP connections.', 'Required' : False, 'Value' : '' } } # save off a copy of the mainMenu object to access external functionality # like listeners/agent handlers/etc. self.mainMenu = mainMenu for param in params: # parameter format is [Name, Value] option, value = param if option in self.options: self.options[option]['Value'] = value def generate(self): # read in the common module source code moduleSource = self.mainMenu.installPath + "/data/module_source/situational_awareness/host/Get-ComputerDetails.ps1" try: f = open(moduleSource, 'r') except: print helpers.color("[!] Could not read module source path at: " + str(moduleSource)) return "" moduleCode = f.read() f.close() script = moduleCode for option,values in self.options.iteritems(): if option.lower() != "agent": if values['Value'] and values['Value'] != '': if option == "4648": script += "$SecurityLog = Get-EventLog -LogName Security;$Filtered4624 = Find-4624Logons $SecurityLog;Write-Output $Filtered4624.Values | Format-List" return script if option == "4624": script += "$SecurityLog = Get-EventLog -LogName Security;$Filtered4648 = Find-4648Logons $SecurityLog;Write-Output $Filtered4648.Values | Format-List" return script if option == "AppLocker": script += "$AppLockerLogs = Find-AppLockerLogs;Write-Output $AppLockerLogs.Values | Format-List" return script if option == "PSLogs": script += "$PSLogs = Find-PSScriptsInPSAppLog;Write-Output $PSLogs.Values | Format-List" return script if option == "SavedRDP": script += "$RdpClientData = Find-RDPClientConnections;Write-Output $RdpClientData.Values | Format-List" return script # if we get to this point, no switched were specified return script + "Get-ComputerDetails -ToString"
Ovation event sponsors enjoy high visibility in front of a dynamic, engaged audience of music lovers. Visibility includes your company name and logo on printed materials, event signage, website, emails, and social media, depending on sponsorship level. For more information, please contact Scott Moore at scott@ethos.org. Founded in 1998, Ethos provides group classes, private lessons, summer camps and music outreach programs to thousands of youth across Oregon. Ethos has been named one of the top 50 after school arts programs by the President's Commission for the Arts and Humanities on 8 separate occasions. Please contact Ethos' Executive Director, Scott Moore, for more information!
import numpy import pytest import chainerx import chainerx.testing from chainerx_tests import array_utils @chainerx.testing.numpy_chainerx_array_equal() @pytest.mark.parametrize_device(['native:0', 'cuda:0']) def test_negative(xp, device, shape, dtype, is_module): if dtype == 'bool_': # Checked in test_invalid_bool_neg return chainerx.testing.ignore() x = array_utils.create_dummy_ndarray(xp, shape, dtype) if is_module: return xp.negative(x) else: return -x @chainerx.testing.numpy_chainerx_array_equal( accept_error=(chainerx.DtypeError, TypeError)) @pytest.mark.parametrize_device(['native:0', 'cuda:0']) def test_negative_invalid_bool(xp, device, is_module): x = xp.array([True, False], dtype='bool_') if is_module: xp.negative(x) else: -x @chainerx.testing.numpy_chainerx_array_equal() @pytest.mark.parametrize_device(['native:0', 'cuda:0']) def test_add(xp, device, shape, dtype, is_module): lhs = array_utils.create_dummy_ndarray(xp, shape, dtype, pattern=1) rhs = array_utils.create_dummy_ndarray(xp, shape, dtype, pattern=2) if is_module: return xp.add(lhs, rhs) else: return lhs + rhs @chainerx.testing.numpy_chainerx_array_equal() @pytest.mark.parametrize_device(['native:0', 'cuda:0']) def test_iadd(xp, device, shape, dtype): lhs = array_utils.create_dummy_ndarray(xp, shape, dtype, pattern=1) rhs = array_utils.create_dummy_ndarray(xp, shape, dtype, pattern=2) lhs += rhs return lhs @pytest.mark.parametrize('scalar', [0, -1, 1, 2]) @pytest.mark.parametrize_device(['native:0', 'cuda:0']) def test_add_scalar(scalar, device, shape, dtype): x_np = array_utils.create_dummy_ndarray(numpy, shape, dtype) # Implicit casting in NumPy's multiply depends on the 'casting' argument, # which is not yet supported (ChainerX always casts). # Therefore, we explicitly cast the scalar to the dtype of the ndarray # before the multiplication for NumPy. expected = x_np + numpy.dtype(dtype).type(scalar) x = chainerx.array(x_np) scalar_chx = chainerx.Scalar(scalar, dtype) chainerx.testing.assert_array_equal_ex(x + scalar, expected) chainerx.testing.assert_array_equal_ex(x + scalar_chx, expected) chainerx.testing.assert_array_equal_ex(scalar + x, expected) chainerx.testing.assert_array_equal_ex(scalar_chx + x, expected) chainerx.testing.assert_array_equal_ex(chainerx.add(x, scalar), expected) chainerx.testing.assert_array_equal_ex( chainerx.add(x, scalar_chx), expected) chainerx.testing.assert_array_equal_ex(chainerx.add(scalar, x), expected) chainerx.testing.assert_array_equal_ex( chainerx.add(scalar_chx, x), expected) @chainerx.testing.numpy_chainerx_array_equal() @pytest.mark.parametrize('scalar', [0, -1, 1, 2]) @pytest.mark.parametrize_device(['native:0', 'cuda:0']) def test_iadd_scalar(xp, scalar, device, shape, dtype): lhs = array_utils.create_dummy_ndarray(xp, shape, dtype) rhs = scalar if xp is numpy: rhs = numpy.dtype(dtype).type(rhs) lhs += rhs return lhs @chainerx.testing.numpy_chainerx_array_equal() @pytest.mark.parametrize_device(['native:0', 'cuda:0']) def test_sub(xp, device, shape, numeric_dtype, is_module): lhs = array_utils.create_dummy_ndarray(xp, shape, numeric_dtype, pattern=1) rhs = array_utils.create_dummy_ndarray(xp, shape, numeric_dtype, pattern=2) if is_module: return xp.subtract(lhs, rhs) else: return lhs - rhs @chainerx.testing.numpy_chainerx_array_equal() @pytest.mark.parametrize_device(['native:0', 'cuda:0']) def test_isub(xp, device, shape, numeric_dtype): lhs = array_utils.create_dummy_ndarray(xp, shape, numeric_dtype, pattern=1) rhs = array_utils.create_dummy_ndarray(xp, shape, numeric_dtype, pattern=2) lhs -= rhs return lhs @pytest.mark.parametrize('scalar', [0, -1, 1, 2]) @pytest.mark.parametrize_device(['native:0', 'cuda:0']) def test_sub_scalar(scalar, device, shape, dtype): if dtype == 'bool_': # Boolean subtract is deprecated. return chainerx.testing.ignore() x_np = array_utils.create_dummy_ndarray(numpy, shape, dtype) # Implicit casting in NumPy's multiply depends on the 'casting' argument, # which is not yet supported (ChainerX always casts). # Therefore, we explicitly cast the scalar to the dtype of the ndarray # before the multiplication for NumPy. expected = x_np - numpy.dtype(dtype).type(scalar) expected_rev = numpy.dtype(dtype).type(scalar) - x_np x = chainerx.array(x_np) scalar_chx = chainerx.Scalar(scalar, dtype) chainerx.testing.assert_array_equal_ex(x - scalar, expected) chainerx.testing.assert_array_equal_ex(x - scalar_chx, expected) chainerx.testing.assert_array_equal_ex(scalar - x, expected_rev) chainerx.testing.assert_array_equal_ex(scalar_chx - x, expected_rev) chainerx.testing.assert_array_equal_ex( chainerx.subtract(x, scalar), expected) chainerx.testing.assert_array_equal_ex( chainerx.subtract(x, scalar_chx), expected) chainerx.testing.assert_array_equal_ex( chainerx.subtract(scalar, x), expected_rev) chainerx.testing.assert_array_equal_ex( chainerx.subtract(scalar_chx, x), expected_rev) @chainerx.testing.numpy_chainerx_array_equal() @pytest.mark.parametrize('scalar', [0, -1, 1, 2]) @pytest.mark.parametrize_device(['native:0', 'cuda:0']) def test_isub_scalar(xp, scalar, device, shape, dtype): if dtype == 'bool_': # Boolean subtract is deprecated. return chainerx.testing.ignore() lhs = array_utils.create_dummy_ndarray(xp, shape, dtype) rhs = scalar if xp is numpy: rhs = numpy.dtype(dtype).type(rhs) lhs -= rhs return lhs @chainerx.testing.numpy_chainerx_array_equal() @pytest.mark.parametrize_device(['native:0', 'cuda:0']) def test_mul(xp, device, shape, dtype, is_module): lhs = array_utils.create_dummy_ndarray(xp, shape, dtype, pattern=1) rhs = array_utils.create_dummy_ndarray(xp, shape, dtype, pattern=2) if is_module: return xp.multiply(lhs, rhs) else: return lhs * rhs @chainerx.testing.numpy_chainerx_array_equal() @pytest.mark.parametrize_device(['native:0', 'cuda:0']) def test_imul(xp, device, shape, dtype): lhs = array_utils.create_dummy_ndarray(xp, shape, dtype, pattern=1) rhs = array_utils.create_dummy_ndarray(xp, shape, dtype, pattern=2) lhs *= rhs return lhs @pytest.mark.parametrize('scalar', [0, -1, 1, 2]) @pytest.mark.parametrize_device(['native:0', 'cuda:0']) def test_mul_scalar(scalar, device, shape, dtype): x_np = array_utils.create_dummy_ndarray(numpy, shape, dtype) # Implicit casting in NumPy's multiply depends on the 'casting' argument, # which is not yet supported (ChainerX always casts). # Therefore, we explicitly cast the scalar to the dtype of the ndarray # before the multiplication for NumPy. expected = x_np * numpy.dtype(dtype).type(scalar) x = chainerx.array(x_np) scalar_chx = chainerx.Scalar(scalar, dtype) chainerx.testing.assert_array_equal_ex(x * scalar, expected) chainerx.testing.assert_array_equal_ex(x * scalar_chx, expected) chainerx.testing.assert_array_equal_ex(scalar * x, expected) chainerx.testing.assert_array_equal_ex(scalar_chx * x, expected) chainerx.testing.assert_array_equal_ex( chainerx.multiply(x, scalar), expected) chainerx.testing.assert_array_equal_ex( chainerx.multiply(x, scalar_chx), expected) chainerx.testing.assert_array_equal_ex( chainerx.multiply(scalar, x), expected) chainerx.testing.assert_array_equal_ex( chainerx.multiply(scalar_chx, x), expected) @chainerx.testing.numpy_chainerx_array_equal() @pytest.mark.parametrize('scalar', [0, -1, 1, 2]) @pytest.mark.parametrize_device(['native:0', 'cuda:0']) def test_imul_scalar(xp, scalar, device, shape, dtype): lhs = array_utils.create_dummy_ndarray(xp, shape, dtype) rhs = scalar if xp is numpy: rhs = numpy.dtype(dtype).type(rhs) lhs *= rhs return lhs @chainerx.testing.numpy_chainerx_array_equal() @pytest.mark.parametrize_device(['native:0', 'cuda:0']) def test_truediv(xp, device, shape, numeric_dtype, is_module): lhs = array_utils.create_dummy_ndarray(xp, shape, numeric_dtype) rhs = xp.arange(1, lhs.size + 1, dtype=numeric_dtype).reshape(shape) # TODO(beam2d): Remove astype after supporting correct dtype promotion. if is_module: return xp.divide(lhs, rhs).astype(numeric_dtype) else: return (lhs / rhs).astype(numeric_dtype) @chainerx.testing.numpy_chainerx_array_equal() @pytest.mark.parametrize_device(['native:0', 'cuda:0']) def test_itruediv(xp, device, shape, numeric_dtype): # TODO(niboshi): Remove padding=False lhs = array_utils.create_dummy_ndarray( xp, shape, numeric_dtype, padding=False) rhs = xp.arange(1, lhs.size + 1, dtype=numeric_dtype).reshape(shape) # TODO(beam2d): Fix after supporting correct dtype promotion. if xp is numpy and 'int' in numeric_dtype: # NumPy does not support itruediv to integer arrays. lhs = (lhs / rhs).astype(numeric_dtype) else: lhs /= rhs return lhs # TODO(hvy): Support and test zero division and mixed dtypes (dtype kinds). # TODO(hvy): Support and test chainerx.Scalar / chainerx.ndarray. @pytest.mark.parametrize('scalar', [1, 2]) @pytest.mark.parametrize_device(['native:0', 'cuda:0']) def test_truediv_scalar(scalar, device, shape, numeric_dtype): x_np = array_utils.create_dummy_ndarray(numpy, shape, numeric_dtype) if 'int' in numeric_dtype: # NumPy does not support itruediv to integer arrays. expected = (x_np / scalar).astype(numeric_dtype) else: expected = x_np / scalar x = chainerx.array(x_np) scalar_chx = chainerx.Scalar(scalar, numeric_dtype) chainerx.testing.assert_array_equal_ex(x / scalar, expected) chainerx.testing.assert_array_equal_ex(x / scalar_chx, expected) chainerx.testing.assert_array_equal_ex( chainerx.divide(x, scalar), expected) chainerx.testing.assert_array_equal_ex( chainerx.divide(x, scalar_chx), expected) @chainerx.testing.numpy_chainerx_array_equal() @pytest.mark.parametrize('scalar', [1, 2]) @pytest.mark.parametrize_device(['native:0', 'cuda:0']) def test_itruediv_scalar(xp, scalar, device, shape, numeric_dtype): # TODO(niboshi): Remove padding=False lhs = array_utils.create_dummy_ndarray( xp, shape, numeric_dtype, padding=False) rhs = scalar # TODO(hvy): Fix after supporting correct dtype promotion. if xp is numpy and 'int' in numeric_dtype: # NumPy does not support itruediv to integer arrays. lhs = (lhs / rhs).astype(numeric_dtype) else: lhs /= rhs return lhs # TODO(niboshi): Remove strides_check=False @chainerx.testing.numpy_chainerx_array_equal(strides_check=False) @pytest.mark.parametrize('keepdims', [False, True]) @pytest.mark.parametrize('shape,axis', [ ((), None), ((), ()), ((2,), None), ((2,), ()), ((2,), 0), ((2,), (0,)), ((2,), (-1,)), ((2, 3), None), ((2, 3), ()), ((2, 3), 0), ((2, 3), (0,)), ((2, 3), (1,)), ((2, 3), (-1,)), ((2, 3), (-2,)), ((2, 3), (0, 1)), ((2, 3), (-2, -1)), ((1, 3), None), # sum over 1-dim axis ((0, 3), None), # sum over 0-dim axis # Sum over axes that are in the middle or apart ((2, 3, 4), (1,)), ((2, 3, 4), (0, 2)), # Sum over axes that are apart and/or unsorted ((2, 3), (1, 0)), ((2, 3, 4), (2, 0)), ((2, 3, 4), (2, 0, 1)), ((2, 3, 4), (-2, 2, 0)), ]) @pytest.mark.parametrize_device(['native:0', 'cuda:0']) def test_sum(is_module, xp, device, shape, axis, keepdims, dtype): a = array_utils.create_dummy_ndarray(xp, shape, dtype) if is_module: out = xp.sum(a, axis=axis, keepdims=keepdims) else: out = a.sum(axis=axis, keepdims=keepdims) # TODO(niboshi): Unsigned integer dtypes should result in uint64. # Currently chainerx returns int64. if xp is numpy and numpy.dtype(dtype).kind == 'u': out = out.astype(numpy.int64) return out @chainerx.testing.numpy_chainerx_array_equal( accept_error=(chainerx.DimensionError, ValueError)) @pytest.mark.parametrize('keepdims', [False, True]) @pytest.mark.parametrize('shape,axis', [ # ((), 0), # TODO(sonots): Fix compatibility ((), 1), ((), (1,)), ((2,), 2), ((2,), (2,)), ((2,), (-2,)), ((2, 3,), (-3,)), ((2, 3,), (-3, -4)), ((2, 3,), (0, 0)), ((2, 3,), (-1, -1)), ((2, 3,), (0, 1, 1)), ((2, 3,), (0, -2)), ]) def test_sum_invalid(is_module, xp, shape, axis, keepdims, dtype): a = array_utils.create_dummy_ndarray(xp, shape, dtype) if is_module: xp.sum(a, axis=axis, keepdims=keepdims) else: a.sum(axis=axis, keepdims=keepdims) # TODO(sonots): Fix type compatibility for when shape is () @chainerx.testing.numpy_chainerx_array_equal(dtype_check=False) @pytest.mark.parametrize("shape,value", [ ((), -1), ((), 1), ((1,), -1), ((1,), 1), ((2,), 1), ((2, 3), 3), ]) @pytest.mark.parametrize_device(['native:0', 'cuda:0']) def test_maximum_with_scalar(xp, device, shape, value, signed_dtype): a = array_utils.create_dummy_ndarray(xp, shape, signed_dtype) return xp.maximum(a, value) def _create_dummy_array_for_dot(xp, shape, dtype): x = numpy.arange(numpy.prod(shape)).reshape(shape) if dtype == 'bool_': x = numpy.asarray(x % 2 == 0) else: x = x.astype(dtype) return xp.array(x) @chainerx.testing.numpy_chainerx_array_equal() @pytest.mark.parametrize_device(['native:0', 'cuda:0']) @pytest.mark.parametrize('input', [ numpy.asarray(0), numpy.asarray(-4), numpy.asarray(4), numpy.asarray(-float('inf')), numpy.asarray(float('inf') ), numpy.asarray(float('nan')), numpy.full((), 2), numpy.full((0,), 2), numpy.full((2, 3), 2) ]) # TODO(niboshi): Dtype promotion is not supported yet. def test_exp(xp, device, input, float_dtype): dtype = float_dtype a = xp.array(input.astype(dtype)) return xp.exp(a) @chainerx.testing.numpy_chainerx_array_equal() @pytest.mark.parametrize_device(['native:0', 'cuda:0']) @pytest.mark.parametrize('input', [ numpy.asarray(0), numpy.asarray(-1), numpy.asarray(1), numpy.asarray( 10), numpy.asarray(float('inf')), numpy.asarray(float('nan')), numpy.full((), 2), numpy.full((0,), 2), numpy.full((2, 3), 2) ]) # TODO(niboshi): Dtype promotion is not supported yet. def test_log(xp, device, input, float_dtype): dtype = float_dtype a = xp.array(input.astype(dtype)) return xp.log(a) _logsumexp_params = [ ((2,), 0), ((2,), -1), ((2, 3), None), ((2, 3), 0), ((2, 3), 1), ((2, 3), -2), ((2, 3), (0, 1)), ((2, 3), (-2, 1)), ((1, 2, 3), None), ((1, 2, 3), (1)), ((1, 2, 3), (1, 0)), ((1, 2, 3), (0, 1, 2)), ] _invalid_logsumexp_params = [ # Axis out of bounds ((2,), 1), ((2,), -2), ((2,), (0, 1)), ((2, 3), (0, 1, 2)), # Duplicate axes ((2,), (0, 0)), ((2, 3), (0, 0)), ] @pytest.mark.parametrize_device(['native:0', 'cuda:0']) @pytest.mark.parametrize('a_shape,axis', _logsumexp_params) @pytest.mark.parametrize('keepdims', [True, False]) @chainerx.testing.numpy_chainerx_allclose(rtol=1e-7, atol=0, dtype_check=False) # TODO(hvy): Dtype promotion is not supported yet. def test_logsumexp(xp, device, a_shape, axis, float_dtype, keepdims): a = array_utils.create_dummy_ndarray(xp, a_shape, float_dtype) if xp is numpy: return xp.log(xp.sum(xp.exp(a), axis=axis, keepdims=keepdims)) return xp.logsumexp(a, axis=axis, keepdims=keepdims) @pytest.mark.parametrize_device(['native:0', 'cuda:0']) @pytest.mark.parametrize('a_shape,axis', _invalid_logsumexp_params) @pytest.mark.parametrize('keepdims', [True, False]) # TODO(hvy): Dtype promotion is not supported yet. # TODO(hvy): Should not overflow for large numbers, add tests def test_logsumexp_invalid(device, a_shape, axis, float_dtype, keepdims): a = array_utils.create_dummy_ndarray(chainerx, a_shape, float_dtype) with pytest.raises(chainerx.DimensionError): chainerx.logsumexp(a, axis=axis, keepdims=keepdims) @pytest.mark.parametrize_device(['native:0', 'cuda:0']) @pytest.mark.parametrize('a_shape,axis', _logsumexp_params) @chainerx.testing.numpy_chainerx_allclose( rtol=1e-7, atol=1e-5, dtype_check=False) # TODO(hvy): Dtype promotion is not supported yet. def test_log_softmax(xp, device, a_shape, axis, float_dtype): a = array_utils.create_dummy_ndarray(xp, a_shape, float_dtype) if xp is numpy: # Default is the second axis axis = axis if axis is not None else 1 return a - xp.log(xp.sum(xp.exp(a), axis=axis, keepdims=True)) return xp.log_softmax(a, axis=axis) @pytest.mark.parametrize_device(['native:0', 'cuda:0']) @pytest.mark.parametrize('a_shape,axis', _invalid_logsumexp_params) # TODO(hvy): Dtype promotion is not supported yet. def test_log_softmax_invalid(device, a_shape, axis, float_dtype): a = array_utils.create_dummy_ndarray(chainerx, a_shape, float_dtype) with pytest.raises(chainerx.DimensionError): return chainerx.log_softmax(a, axis=axis) @chainerx.testing.numpy_chainerx_array_equal() @pytest.mark.parametrize_device(['native:0', 'cuda:0']) @pytest.mark.parametrize('input', [ numpy.asarray(0), numpy.asarray(-4), numpy.asarray(4), numpy.asarray(-float('inf')), numpy.asarray(float('inf') ), numpy.asarray(float('nan')), numpy.full((), 2), numpy.full((0,), 2), numpy.full((2, 3), 2) ]) # TODO(hamaji): Dtype promotion is not supported yet. def test_sqrt(xp, device, input, float_dtype): dtype = float_dtype a = xp.array(input.astype(dtype)) return xp.sqrt(a) @chainerx.testing.numpy_chainerx_array_equal() @pytest.mark.parametrize_device(['native:0', 'cuda:0']) @pytest.mark.parametrize('input', [ numpy.asarray(0), numpy.asarray(-1), numpy.asarray(1), numpy.asarray( 10), numpy.asarray(float('inf')), numpy.asarray(float('nan')), numpy.full((), 2), numpy.full((0,), 2), numpy.full((2, 3), 2) ]) # TODO(hamaji): Dtype promotion is not supported yet. def test_tanh(xp, device, input, float_dtype): dtype = float_dtype a = xp.array(input.astype(dtype)) return xp.tanh(a) @chainerx.testing.numpy_chainerx_array_equal() @pytest.mark.parametrize_device(['native:0', 'cuda:0']) @pytest.mark.parametrize('input', [ numpy.asarray(0), numpy.asarray(-1), numpy.asarray( 10), numpy.asarray(float('inf')), numpy.asarray(-float('inf')), numpy.asarray(float('nan')), numpy.full( (), 2), numpy.full((0,), 2), numpy.full((2, 3), 2) ]) def test_isnan(xp, device, input, float_dtype): dtype = float_dtype a = xp.array(input.astype(dtype)) return xp.isnan(a) @chainerx.testing.numpy_chainerx_array_equal() @pytest.mark.parametrize_device(['native:0', 'cuda:0']) @pytest.mark.parametrize('input', [ numpy.asarray(0), numpy.asarray(-1), numpy.asarray( 10), numpy.asarray(float('inf')), numpy.asarray(-float('inf')), numpy.asarray(float('nan')), numpy.full( (), 2), numpy.full((0,), 2), numpy.full((2, 3), 2) ]) def test_isinf(xp, device, input, float_dtype): dtype = float_dtype a = xp.array(input.astype(dtype)) return xp.isinf(a) def test_max_amax(): assert chainerx.amax is chainerx.max @chainerx.testing.numpy_chainerx_array_equal( accept_error=(ValueError, chainerx.DimensionError), strides_check=False) @pytest.mark.parametrize('input,axis', [ # --- single axis # input, axis # valid params (numpy.asarray(0), None), (numpy.asarray(-1), None), (numpy.asarray(float('inf')), None), (numpy.asarray(float('nan')), None), (numpy.asarray(-float('inf')), None), (numpy.asarray([4, 1, 4, 1]), None), (numpy.asarray([4, 1, 4, 1]), 0), (numpy.asarray([[4, 4, 1, 1], [4, 1, 4, 1]]), 0), (numpy.asarray([[4, 4, 1, 1], [4, 1, 4, 1]]).T, 1), (numpy.asarray([-0.0, +0.0, +0.0, -0.0]), None), (numpy.asarray([[True, True, False, False], [True, False, True, False]]), 0), (numpy.ones((2, 0, 3)), 2), (numpy.ones((2, 3)), 1), (numpy.ones((2, 3)), -2), # invalid params (numpy.ones((0,)), None), (numpy.ones((2, 0, 3)), 1), (numpy.ones((2, 0, 3)), None), (numpy.ones((2, 3)), 2), (numpy.ones((2, 3)), -3), # --- multiple axes # input, axis # valid params (numpy.asarray([[1, 4, 3, 1], [4, 6, 3, 2], [2, 3, 6, 1]]), (0, 1)), (numpy.asarray([[1, 4, 3, 1], [4, 6, 3, 2], [2, 3, 6, 1]]), (-2, -1)), # invalid params (numpy.asarray([[1, 4, 3, 1], [4, 6, 3, 2], [2, 3, 6, 1]]), (1, 1)), (numpy.asarray([[1, 4, 3, 1], [4, 6, 3, 2], [2, 3, 6, 1]]), (-3, 1)), (numpy.asarray([[1, 4, 3, 1], [4, 6, 3, 2], [2, 3, 6, 1]]), (1, 2)), ]) @pytest.mark.parametrize_device(['native:0', 'cuda:0']) # TODO(niboshi): Remove strides_check=False def test_max(is_module, xp, device, input, axis, dtype): try: a_np = input.astype(dtype) except (ValueError, OverflowError): return xp.zeros(()) # invalid combination of data and dtype a = xp.array(a_np) if is_module: return xp.max(a, axis) else: return a.max(axis)
Recent court rulings on FERC Order No. 1000 uphold FERC authority over regional planning, placing federal regulators at the helm of modernizing the electric grid particularly in states operating in deregulated electric markets. The court decision vacating FERC’s ROE orders casts uncertainty for utilities that rely on FERC-approved rates for transmission services. The effectiveness of transmission planning processes will continue to be under scrutiny, as the U.S. power system undergoes transformational changes driven primarily by changes in the generation resource mix toward renewables and natural gas. Oklahoma has enacted legislation ending the state’s wind energy tax credits on July 1, 2017, more than three years ahead of the original schedule to help close a budget deficit. The early sunset of tax credits is unlikely to impact the budget outlook in the near term, but creates uncertainty for wind projects under construction. Addressing the state’s recurring budget deficits require a comprehensive economic plan and a long-term strategy to draw new investment and align revenues. Maryland legislature has passed a bill (SB 758) establishing the nation’s first tax credits for energy storage systems. The legislation is well timed to support Maryland’s new renewable portfolio standard goals, reflecting the growing role of energy storage technologies in supporting the growth of renewables. Maryland could serve as a model for states exploring ways to bolster their nascent storage markets, as tax credits present a simpler alternative to mandates and incentives.
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from openstack import resource from openstack import utils from rackspace.monitoring import monitoring_service class Notification(resource.Resource): base_path = 'notifications' resources_key = 'values' service = monitoring_service.MonitoringService() # capabilities allow_create = True allow_delete = True allow_list = True allow_retrieve = True allow_update = True # Properties #: Details specific to the notification. *Type: dict* details = resource.prop('details', type=dict) #: A friendly label for the notification type name = resource.prop('label') #: The type of notification to send type = resource.prop('type') def test(self, session): """Test an existing notification The notification comes from the same server that the alert messages come from. One use for this test is to verify that your firewall is configured properly. :param session: The session to use for making this request. :type session: :class:`~openstack.session.Session` :returns: ``dict`` """ url = utils.urljoin(self.base_path, self.id, 'test') return session.post(url, endpoint_filter=self.service).body
P0455 EVAP Large leak detected HELP updated. P0455 EVAP Large leak detected. 2012 SRX TPMS programming procedure? reset 2010 SRX transmission Control Module? Possible transmission issue for first gen SRX V8?? add fog lights to my 2012 SRX? Help with Check engine light! Does anyone recognize these rims? Long term quality of interior of SRX? long term maintenance costs? How do I deactivate the horn alerts? Windshield cracked after 9 days and 500 miles?!?! 25% less Power until ?? Got an iPhone. Can I record songs to my HDD on my SRX? Tire chains allowed on 2013 SRX? What are holes in headliner for? FOR SALE 22" LEXANI SRX RIMS AND TIRES LIKE NEW. Just Came Home with a 2012!!! Any experience with E85 Fuel? Anyone use the Tire Sealant/Compressor yet? 2011 SRX Performance auto dimming driver's mirror. SRX Poor Performance Reviews - is it that bad? Has anyone traded a first-gen SRX for the new one? What do you think? Equinox / Terrain vs. SRX: what about the Cadillac makes it worth the extra $$? How much is shared between the SRX and the Terrain/Equinox??
# -*- Mode: Python -*- # vi:si:et:sw=4:sts=4:ts=4 # Copyright (c) 2001-2002, MetaSlash Inc. All rights reserved. """ Object to hold information about functions. Also contain a pseudo Python function object """ import string _ARGS_ARGS_FLAG = 4 _KW_ARGS_FLAG = 8 _CO_FLAGS_MASK = _ARGS_ARGS_FLAG + _KW_ARGS_FLAG class _ReturnValues: """ I am a base class that can track return values. @ivar returnValues: tuple of (line number, stack item, index to next instruction) @type returnValues: tuple of (int, L{pychecker.Stack.Item}, int) """ def __init__(self): self.returnValues = None def returnsNoValue(self): returnValues = self.returnValues # if unset, we don't know if returnValues is None: return 0 # it's an empty list, that means no values if not returnValues: return 1 # make sure each value is not None for rv in returnValues: if not rv[1].isNone(): return 0 return returnValues[-1][1].isImplicitNone() class FakeCode : "This is a holder class for code objects (so we can modify them)" def __init__(self, code, varnames = None) : """ @type code: L{types.CodeType} """ for attr in dir(code): try: setattr(self, attr, getattr(code, attr)) except: pass if varnames is not None: self.co_varnames = varnames class FakeFunction(_ReturnValues): """ This is a holder class for turning non-scoped code (for example at module-global level, or generator expressions) into a function. Pretends to be a normal callable and can be used as constructor argument to L{Function} """ def __init__(self, name, code, func_globals = {}, varnames = None) : _ReturnValues.__init__(self) self.func_name = self.__name__ = name self.func_doc = self.__doc__ = "ignore" self.func_code = FakeCode(code, varnames) self.func_defaults = None self.func_globals = func_globals def __str__(self): return self.func_name def __repr__(self): return '%s from %r' % (self.func_name, self.func_code.co_filename) class Function(_ReturnValues): """ Class to hold all information about a function @ivar function: the function to wrap @type function: callable @ivar isMethod: whether the callable is a method @type isMethod: int (used as bool) @ivar minArgs: the minimum number of arguments that should be passed to this function @type minArgs: int @ivar minArgs: the maximum number of arguments that should be passed to this function, or None in case of *args/unlimited @type maxArgs: int or None @ivar supportsKW: whether the function supports keyword arguments. @type supportsKW: int (used as bool) """ def __init__(self, function, isMethod=0): """ @param function: the function to wrap @type function: callable or L{FakeFunction} @param isMethod: whether the callable is a method @type isMethod: int (used as bool) """ _ReturnValues.__init__(self) self.function = function self.isMethod = isMethod # co_argcount is the number of positional arguments (including # arguments with default values) self.minArgs = self.maxArgs = function.func_code.co_argcount # func_defaults is a tuple containing default argument values for those # arguments that have defaults, or None if no arguments have a default # value if function.func_defaults is not None: self.minArgs = self.minArgs - len(function.func_defaults) # if function uses *args, there is no max # args try: # co_flags is an integer encoding a number of flags for the # interpreter. if function.func_code.co_flags & _ARGS_ARGS_FLAG != 0: self.maxArgs = None self.supportsKW = function.func_code.co_flags & _KW_ARGS_FLAG except AttributeError: # this happens w/Zope self.supportsKW = 0 def __str__(self): return self.function.func_name def __repr__(self): # co_filename is the filename from which the code was compiled # co_firstlineno is the first line number of the function return '<%s from %r:%d>' % (self.function.func_name, self.function.func_code.co_filename, self.function.func_code.co_firstlineno) def arguments(self): """ @returns: a list of argument names to this function @rtype: list of str """ # see http://docs.python.org/reference/datamodel.html#types # for more info on func_code # co_argcount is the number of positional arguments (including # arguments with default values) numArgs = self.function.func_code.co_argcount if self.maxArgs is None: # co_varnames has the name of the *args variable after the # positional arguments numArgs = numArgs + 1 if self.supportsKW: # co_varnames has the name of the **kwargs variable after the # positional arguments and *args variable numArgs = numArgs + 1 # co_varnames is a tuple containing the names of the local variables # (starting with the argument names) # FIXME: a generator seems to have .0 as the first member here, # and then the generator variable as the second. # should we special-case that here ? return self.function.func_code.co_varnames[:numArgs] def isParam(self, name): """ @type name: str @returns: Whether the given name is the name of an argument to the function @rtype: bool """ return name in self.arguments() def isStaticMethod(self): return self.isMethod and isinstance(self.function, type(create_fake)) def isClassMethod(self): try: return self.isMethod and self.function.im_self is not None except AttributeError: return 0 def defaultValue(self, name): """ @type name: str @returns: the default value for the function parameter with the given name. """ func_code = self.function.func_code arg_names = list(func_code.co_varnames[:func_code.co_argcount]) i = arg_names.index(name) if i < self.minArgs: raise ValueError return self.function.func_defaults[i - self.minArgs] def varArgName(self): """ @returns: the name of the *args parameter of the function. @rtype: str """ if self.maxArgs is not None: return None func_code = self.function.func_code return func_code.co_varnames[func_code.co_argcount] def create_fake(name, code, func_globals = {}, varnames = None) : return Function(FakeFunction(name, code, func_globals, varnames)) def create_from_file(file, filename, module): """ @type filename: str @returns: a function that represents the __main__ entry point, if there was a file @rtype: L{Function} """ if file is None: return create_fake(filename, compile('', filename, 'exec')) # Make sure the file is at the beginning # if python compiled the file, it will be at the end file.seek(0) # Read in the source file, see py_compile.compile() for games w/src str codestr = file.read() codestr = string.replace(codestr, "\r\n", "\n") codestr = string.replace(codestr, "\r", "\n") if codestr and codestr[-1] != '\n': codestr = codestr + '\n' code = compile(codestr, filename, 'exec') return Function(FakeFunction('__main__', code, module.__dict__)) def _co_flags_equal(o1, o2) : return (o1.co_flags & _CO_FLAGS_MASK) == (o2.co_flags & _CO_FLAGS_MASK) def same_signature(func, object) : '''Return a boolean value if the <func> has the same signature as a function with the same name in <object> (ie, an overriden method)''' try : baseMethod = getattr(object, func.func_name) base_func_code = baseMethod.im_func.func_code except AttributeError : return 1 return _co_flags_equal(base_func_code, func.func_code) and \ base_func_code.co_argcount == func.func_code.co_argcount
Y560 is unable to connect to internet. Ideapad Z570 - Failing to boot - Lenovo recover disk query. ReadyComm - What is it good for? How to get 4.5+ hours on battery with Y580? IdeaPad Y460 - Bloatware/Removable Software? Weird lines on screen on z570. Y530 caught in the rain. How can I check what needs to be replaced? What wifi card else does my Z580 Support? Anyone know? Y480 temps increased dramatically this past month, could it be dust? Lenovo y560p graphics distortion, where to get replaceemt video connector? Lenovo Y580 won't start games? IdeaPad Y580 Laptop on spain? How do I find out what and how to change the Intel(R) HD Graphics Family dedicated memory?
from __future__ import print_function from os.path import lexists, realpath from os import unlink, rename, symlink, stat from optparse import OptionParser from hashlib import md5 from PIL import Image, PngImagePlugin from wellpapp import Client, make_pdirs def main(arg0, argv): p = OptionParser(usage="Usage: %prog [-t] post-spec new-file", prog=arg0) p.add_option("-t", "--regenerate-thumbs", action="store_true", help="Regenerate thumbnails from new-file" ) opts, args = p.parse_args(argv) if len(args) != 2: p.print_help() return 1 client = Client() oldfile, newfile = args m = client.postspec2md5(oldfile) post = client.get_post(m, wanted=(["ext", "rotate"])) if not post: print("Post not found") return 1 data = open(newfile, "rb").read() newm = md5(data).hexdigest() mtime = stat(newfile).st_mtime if client.get_post(newm, wanted=()): print("New file already has post") return 1 path = client.image_path(newm) if lexists(path): unlink(path) make_pdirs(path) symlink(realpath(newfile), path) if opts.regenerate_thumbs: # @@ assumes same ext client.save_thumbs(newm, None, post.ext, post.rotate, True) else: meta = PngImagePlugin.PngInfo() meta.add_text("Thumb::URI", str(newm + "." + post.ext), 0) meta.add_text("Thumb::MTime", str(int(mtime)), 0) sizes = list(map(int, client.cfg.thumb_sizes.split())) + ["normal", "large"] for z in sizes: if isinstance(z, int): oldpath = client.thumb_path(m, z) if opts.regenerate_thumbs: unlink(oldpath) else: newpath = client.thumb_path(newm, z) make_pdirs(newpath) rename(oldpath, newpath) else: oldpath = client.pngthumb_path(m, post.ext, z) if opts.regenerate_thumbs: unlink(oldpath) else: t = Image.open(oldpath) t.load() newpath = client.pngthumb_path(newm, post.ext, z) make_pdirs(newpath) t.save(newpath, format="PNG", pnginfo=meta) client.modify_post(m, MD5=newm) path = client.image_path(m) if lexists(path): unlink(path)
There’s a key lesson to be learnt by Americans from the disastrous Brexit process. Theresa May wanted to bypass parliament and dictate her own path but the judiciary said otherwise. The right-wing press tried to shoot the judges down, but the fact remains that an individual does not govern alone in our countries. Governing alone is exactly what Trump appears to be doing. No wonder he admires Putin. With his executive orders, he bypasses Congress and Senate and all the checks and balances of a democratic system. Whether or not they prove to be yet more bluster remains to be seen. It is difficult to know from information available in the press, what legal value his executive orders have (1). In the mean time, why do we feel a sneaking sense of relief at Trump’s impetuous pronouncements? Because, even if we disagree with him entirely, he appears to free up a system that, with its sterile bipartite confrontation, was blocking much real constructive progress. At the same time, Trump’s headstrong, impetuous decision-making in the absence of any awareness of the long term consequences (not just on Americans but on everybody) clearly requires checks and balances. Trump is not the answer. But the fact that he was elected and that he acts as he does, points to an urgent need for a carefully thought out reorganisation of the democratic system. (1) The recent order banning Muslims from certain countries from entering the US seems to show that such executive orders do have a tangible effect.
#!/usr/bin/python ''' Copyright (c) 2005 Hewlett-Packard Company All rights reserved Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the Hewlett-Packard Company nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Common tokenizing and tree stuff February 2005 Andrew Christian ''' ##################################################### class Token: def __init__(self, type, attr=None, lineno=-1): self.type = type self.attr = attr self.lineno = lineno def __cmp__(self, o): return cmp(self.type, o) def __repr__(self): if self.attr is not None: return "%s token on line %d" % (self.attr, self.lineno) return "%s token on line %d" % (self.type, self.lineno) # __getitem__ only if you have heterogeneous ASTs #def __getitem__(self, i): # raise IndexError class AST: def __init__(self, type): self.type = type self._kids = [] self.lineno = -1 def __getitem__(self, i): return self._kids[i] def __len__(self): return len(self._kids) def __setslice__(self, low, high, seq): self._kids[low:high] = seq def __cmp__(self, o): return cmp(self.type, o) def __repr__(self): if hasattr(self,'attr') and self.attr is not None: return "%s token on line %d" % (self.attr, self.lineno) return "%s token on line %d" % (self.type, self.lineno) ##################################################### def tokenize_by_line(scanner,filename): '''Parse a data file with a line-by-line scanner Pass the class of the scanner and the filename Returns a token list ''' fd = open(filename) tlist = [] input = fd.readline() lineno = 1 while input: bs = scanner(lineno) tlist += bs.tokenize(input) lineno += 1 input = fd.readline() fd.close() return tlist def parse_tokens(parser,tlist): p = parser(AST) atree = p.parse(tlist) return atree ##################################################### import re def dump_token_list(tlist): foo = re.compile('\n') for t in tlist: if t.attr: print t.lineno, "TOKEN %s '%s'" % (t.type, foo.sub('.',t.attr)) else: print t.lineno, "TOKEN", t.type def dump_ast(atree,depth=0): foo = re.compile('\n') if hasattr(atree,'attr') and atree.attr is not None: a = atree.attr if type(a) is str: a = foo.sub('.',a) print " " * depth, atree.type, a else: print " " * depth, atree.type try: for k in atree: dump_ast(k,depth+1) except: pass
The Mercedes-Benz S-Class embodies everything you’ve ever dreamt of in a luxury sedan. Plush and lavish interiors that transport you to another dimension of comfort, charming energy and performance that makes every drive an entirely unique experience, and its handsomely crafted exterior that steals the show on the streets. If you’re ready, this sleek and speedy rear-wheel drive is calling your name.
from enum import Enum from typing import List from random import randint class Suit(Enum): SPADE = 0 CLUB = 1 HEART = 2 DIAMOND = 3 def __str__(self) -> str: return str(self.name).capitalize() class Rank(Enum): ACE = 1 TWO = 2 THREE = 3 FOUR = 4 FIVE = 5 SIX = 6 SEVEN = 7 EIGHT = 8 NINE = 9 TEN = 10 JACK = 11 QUEEN = 12 KING = 13 def __str__(self) -> str: if self.value >= 2 and self.value <= 10: return str(self.value) return str(self.name).capitalize() class Card: def __init__(self, suit: Suit, rank: Rank) -> None: self.suit = suit self.rank = rank def __str__(self) -> str: return "{} of {}".format(self.rank, self.suit) class Deck: def __init__(self) -> None: self.deck = [] # type: List[Card] for s in list(Suit): for v in list(Rank): self.deck.append(Card(s, v)) def shuffle(self, iterations:int=1000) -> None: a, b = 0, 0 for i in range(1000): a = i % 52 while True: b = randint(0, 51) if a != b: break self.deck[a], self.deck[b] = self.deck[b], self.deck[a] def __str__(self) -> str: s = "" for i, card in enumerate(self.deck): s += str(card) separator = '\t' if i % 4 == 3: separator = '\n' s += separator return s if __name__ == '__main__': d = Deck() d.shuffle() print(d)
We work to improve the lives of underprivileged children and young adults in southern Malawi by initiating or supporting educational, medical and youth employability projects in rural and urban areas. We believe the only way to improve the general welfare of children and young adults in Malawi is to provide them with the chance to go to school, to eat at least one meal a day, and to teach them tangible vocational skills. Our aim is to provide the next Malawian generation with a self-reliant future, so that they, in years to come, can help themselves. Advocate and foster the education, health and well-being of children and young adults in Malawi. Provide educational and medical assistance for children and take all other measures to improve their welfare. Establish, maintain, support and manage educational and medical institutions, provided that these are created for the aims and objectives of the Children’s Fund, on a non-profit basis. Transfer ‘ownership’ and responsibility for CFM projects to local Malawian management as soon as these projects are sustainable. The building of an Accident and Emergency ward and 24 hour Short Stay Ward for children at the Queen Elizabeth Central Hospital in Blantyre. The building of Moyo House, an extension to the Nutritional Rehabilitation Centre at Queen Elizabeth Central Hospital in Blantyre. The distribution of medical equipment to paediatric wards of hospitals in the southern region of Malawi. Salary support to paediatric nurses at Queen Elizabeth Central Hospital in Blantyre. Feeding vulnerable children (2-5 yrs old) and the elderly during the famine of 2003/2004 in the Thyolo and Chikwawa region. Renovation of Chiuta and Bvumbwe orphanages. The building of teachers homes and classrooms at Chigonjetsu School. The building of over 70 gowelos: traditional, individual homes for orphans. The building of Green Malata: an entrepreneurial training village for children and young adults.
#!/usr/bin/python -u # Bootstrap Samba and run a number of tests against it. # Copyright (C) 2012 Jelmer Vernooij <jelmer@samba.org> # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """Test command running.""" import datetime from subunit import iso8601 import os import subprocess import subunit import sys import tempfile import warnings # expand strings from %ENV def expand_environment_strings(s, vars): # we use a reverse sort so we do the longer ones first for k in sorted(vars.keys(), reverse=True): v = vars[k] s = s.replace("$%s" % k, v) return s def expand_command_list(cmd): if not "$LISTOPT" in cmd: return None return cmd.replace("$LISTOPT", "--list") def expand_command_run(cmd, supports_loadfile, supports_idlist, subtests=None): """Expand a test command. :param cmd: Command to expand :param supports_loadfile: Whether command supports loadfile :param supports_idlist: Whether the command supports running specific subtests :param subtests: List of subtests to run - None for all subtests :return: Tuple with command to run and temporary file to remove after running (or None) """ # Generate a file with the individual tests to run, if the # test runner for this test suite supports it. if subtests is None: return (cmd.replace("$LOADLIST", ""), None) if supports_loadfile: (fd, listid_file) = tempfile.mkstemp() f = os.fdopen(fd, 'w') try: for test in subtests: f.write(test+"\n") finally: f.close() return ( cmd.replace("$LOADLIST", "--load-list=%s" % listid_file), listid_file) elif supports_idlist: cmd += " " + " ".join(subtests) return (cmd, None) else: warnings.warn( "Running subtests requested, but command does not support " "this.") return (cmd, None) def exported_envvars_str(vars, names): out = "" for n in names: if not n in vars: continue out += "%s=%s\n" % (n, vars[n]) return out def now(): """Return datetime instance for current time in UTC. """ return datetime.datetime.utcnow().replace(tzinfo=iso8601.Utc()) def run_testsuite_command(name, cmd, subunit_ops, env=None, outf=None): """Run a testsuite command. :param name: Name of the testsuite :param cmd: Command to run :param subunit_ops: Subunit ops to use for reporting results :param env: Environment the test is run in :param outf: File-like object to write standard out to (defaults to sys.stdout) :return: Exit code or None if the test failed to run completely """ if outf is None: outf = sys.stdout subunit_ops.start_testsuite(name) subunit_ops.progress(None, subunit.PROGRESS_PUSH) subunit_ops.time(now()) try: exitcode = subprocess.call(cmd, shell=True, stdout=outf) except Exception, e: subunit_ops.time(now()) subunit_ops.progress(None, subunit.PROGRESS_POP) subunit_ops.end_testsuite(name, "error", "Unable to run %r: %s" % (cmd, e)) return None subunit_ops.time(now()) subunit_ops.progress(None, subunit.PROGRESS_POP) if env is not None: envlog = env.get_log() if envlog != "": outf.write("envlog: %s\n" % envlog) outf.write("command: %s\n" % cmd) outf.write("expanded command: %s\n" % expand_environment_strings(cmd, os.environ)) if exitcode == 0: subunit_ops.end_testsuite(name, "success") else: subunit_ops.end_testsuite(name, "failure", "Exit code was %d" % exitcode) return exitcode
n. 1. (Zool.) The European woodpecker, or yaffle; - called also nicker pecker.
# ----------------------------------------------------------------------------- # From Numpy to Python # Copyright (2017) Nicolas P. Rougier - BSD license # More information at https://github.com/rougier/numpy-book # ----------------------------------------------------------------------------- import numpy as np import matplotlib.pyplot as plt from matplotlib.animation import FuncAnimation # Parameters from http://www.aliensaint.com/uo/java/rd/ # ----------------------------------------------------- n = 256 # Du, Dv, F, k = 0.16, 0.08, 0.035, 0.065 # Bacteria 1 # Du, Dv, F, k = 0.14, 0.06, 0.035, 0.065 # Bacteria 2 # Du, Dv, F, k = 0.16, 0.08, 0.060, 0.062 # Coral # Du, Dv, F, k = 0.19, 0.05, 0.060, 0.062 # Fingerprint Du, Dv, F, k = 0.10, 0.10, 0.018, 0.050 # Spirals # Du, Dv, F, k = 0.12, 0.08, 0.020, 0.050 # Spirals Dense # Du, Dv, F, k = 0.10, 0.16, 0.020, 0.050 # Spirals Fast # Du, Dv, F, k = 0.16, 0.08, 0.020, 0.055 # Unstable # Du, Dv, F, k = 0.16, 0.08, 0.050, 0.065 # Worms 1 # Du, Dv, F, k = 0.16, 0.08, 0.054, 0.063 # Worms 2 # Du, Dv, F, k = 0.16, 0.08, 0.035, 0.060 # Zebrafish Z = np.zeros((n+2, n+2), [('U', np.double), ('V', np.double)]) U, V = Z['U'], Z['V'] u, v = U[1:-1, 1:-1], V[1:-1, 1:-1] r = 20 u[...] = 1.0 U[n//2-r:n//2+r, n//2-r:n//2+r] = 0.50 V[n//2-r:n//2+r, n//2-r:n//2+r] = 0.25 u += 0.05*np.random.uniform(-1, +1, (n, n)) v += 0.05*np.random.uniform(-1, +1, (n, n)) def update(frame): global U, V, u, v, im for i in range(10): Lu = ( U[0:-2, 1:-1] + U[1:-1, 0:-2] - 4*U[1:-1, 1:-1] + U[1:-1, 2:] + U[2: , 1:-1]) Lv = ( V[0:-2, 1:-1] + V[1:-1, 0:-2] - 4*V[1:-1, 1:-1] + V[1:-1, 2:] + V[2: , 1:-1]) uvv = u*v*v u += (Du*Lu - uvv + F*(1-u)) v += (Dv*Lv + uvv - (F+k)*v) im.set_data(V) im.set_clim(vmin=V.min(), vmax=V.max()) fig = plt.figure(figsize=(4, 4)) fig.add_axes([0.0, 0.0, 1.0, 1.0], frameon=False) im = plt.imshow(V, interpolation='bicubic', cmap=plt.cm.viridis) plt.xticks([]), plt.yticks([]) animation = FuncAnimation(fig, update, interval=10, frames=2000) # animation.save('gray-scott-1.mp4', fps=40, dpi=80, bitrate=-1, codec="libx264", # extra_args=['-pix_fmt', 'yuv420p'], # metadata={'artist':'Nicolas P. Rougier'}) plt.show()
The Louisiana Department of Health is warning residents about the dangers of the improper use of neti pots. The warning follows the state's second death this year caused by Naegleria fowleri, the so-called brain-eating ameba. A 51-year-old DeSoto Parish woman died recently after using tap water in a neti pot to irrigate her sinuses and becoming infected with the deadly ameba. In June, a 20-year-old St. Bernard Parish man died under the same circumstances. Naegleria fowleri infects people by entering the body through the nose. A neti pot is commonly used to irrigate sinuses, and looks like a genie's lamp. "If you are irrigating, flushing, or rinsing your sinuses, for example, by using a neti pot, use distilled, sterile or previously boiled water to make up the irrigation solution," said Louisiana State Epidemiologist, Dr. Raoult Ratard. "Tap water is safe for drinking, but not for irrigating your nose." It's also important to rinse the irrigation device after each use and leave open to air dry. Naegleria fowleri infection typically occurs when people go swimming or diving in warm freshwater lakes and rivers. In very rare instances, Naegleria fowleri infections may also occur when contaminated water from other sources (such as inadequately chlorinated swimming pool water or heated tap water less than 116.6 degrees Fahrenheit) enters the nose when people submerge their heads or when people irrigate their sinuses with devices such as a neti pot. You cannot be infected with Naegleria fowleri by drinking water. Naegleria fowleri causes the disease primary amebic meningoencephalitis (PAM), a brain infection that leads to the destruction of brain tissue. In its early stages, symptoms of PAM may be similar to symptoms of bacterial meningitis. Initial symptoms of PAM start one to seven days after infection. The initial symptoms include headache, fever, nausea, vomiting, and stiff neck. Later symptoms include confusion, lack of attention to people and surroundings, loss of balance, seizures, and hallucinations. After the start of symptoms, the disease progresses rapidly and usually causes death within one to 12 days. Naegleria fowleri infections are very rare. In the 10 years from 2001 to 2010, 32 infections were reported in the U.S. Of those cases, 30 people were infected by contaminated recreational water and two people were infected by water from a geothermal drinking water supply. The Louisiana Department of Health strives to protect and promote health statewide and to ensure access to medical, preventive and rehabilitative services for all state citizens. To learn more about LDH, visit http://www.dhh.louisiana.gov. For up-to-date health information, news and emergency updates, follow LDH's blog, Twitter account and Facebook.
# coding=utf8 import logging from handlers.base import BaseHandler from models.playlist import PlaylistManager from replay import get_provider, get_provider_list logger = logging.getLogger('listenone.' + __name__) class ShowPlaylistHandler(BaseHandler): def get(self): source = self.get_argument('source', '0') provider_list = get_provider_list() index = int(source) if index >= 0 and index < len(provider_list): provider = provider_list[index] playlist = provider.list_playlist() else: playlist = [] result = dict(result=playlist) self.write(result) class PlaylistHandler(BaseHandler): def get(self): list_id = self.get_argument('list_id', '') if list_id.startswith('my_'): playlist = PlaylistManager.shared_instance().get_playlist(list_id) info = dict( cover_img_url=playlist['cover_img_url'], title=playlist['title'], id=playlist['id']) result = dict( status='1', tracks=playlist['tracks'], info=info, is_mine='1') else: provider = get_provider(list_id) item_id = list_id.split('_')[1] result = provider.get_playlist(item_id) result.update(dict(is_mine='0')) self.write(result) class AddMyPlaylistHandler(BaseHandler): def post(self): list_id = self.get_argument('list_id', '') track_id = self.get_argument('id', '') title = self.get_argument('title', '') artist = self.get_argument('artist', '') url = self.get_argument('url', '') artist_id = self.get_argument('artist_id', '') album = self.get_argument('album', '') album_id = self.get_argument('album_id', '') source = self.get_argument('source', '') source_url = self.get_argument('source_url', '') track = { 'id': track_id, 'title': title, 'artist': artist, 'url': url, 'artist_id': artist_id, 'album': album, 'album_id': album_id, 'source': source, 'source_url': source_url, } PlaylistManager.shared_instance().add_track_in_playlist(track, list_id) result = dict(result='success') self.write(result) class CreateMyPlaylistHandler(BaseHandler): def post(self): list_title = self.get_argument('list_title', '') track_id = self.get_argument('id', '') title = self.get_argument('title', '') artist = self.get_argument('artist', '') url = self.get_argument('url', '') artist_id = self.get_argument('artist_id', '') album = self.get_argument('album', '') album_id = self.get_argument('album_id', '') source = self.get_argument('source', '') source_url = self.get_argument('source_url', '') track = { 'id': track_id, 'title': title, 'artist': artist, 'url': url, 'artist_id': artist_id, 'album': album, 'album_id': album_id, 'source': source, 'source_url': source_url, } newlist_id = PlaylistManager.shared_instance()\ .create_playlist(list_title) PlaylistManager.shared_instance()\ .add_track_in_playlist(track, newlist_id) result = dict(result='success') self.write(result) class ShowMyPlaylistHandler(BaseHandler): def get(self): resultlist = PlaylistManager.shared_instance().\ list_playlist() result = dict(result=resultlist) self.write(result) class ClonePlaylistHandler(BaseHandler): def post(self): list_id = self.get_argument('list_id', '') provider = get_provider(list_id) if list_id[2:].startswith('album'): album_id = list_id.split('_')[1] album = provider.get_album(album_id) tracks = album['tracks'] info = album['info'] elif list_id[2:].startswith('artist'): artist_id = list_id.split('_')[1] artist = provider.get_artist(artist_id) tracks = artist['tracks'] info = artist['info'] elif list_id[2:].startswith('playlist'): playlist_id = list_id.split('_')[1] playlist = provider.get_playlist(playlist_id) tracks = playlist['tracks'] info = playlist['info'] list_title = info['title'] cover_img_url = info['cover_img_url'] newlist_id = PlaylistManager.shared_instance()\ .create_playlist(list_title, cover_img_url) for track in tracks: PlaylistManager.shared_instance()\ .add_track_in_playlist(track, newlist_id) result = dict(result='success') self.write(result) class RemoveTrackHandler(BaseHandler): def post(self): track_id = self.get_argument('track_id', '') list_id = self.get_argument('list_id', '') PlaylistManager.shared_instance().remove_track_in_playlist( track_id, list_id) result = dict(result='success') PlaylistManager.shared_instance() self.write(result) class RemoveMyPlaylistHandler(BaseHandler): def post(self): list_id = self.get_argument('list_id', '') PlaylistManager.shared_instance().remove_playlist(list_id) result = dict(result='success') self.write(result)
You can easily save GIFs in your iPhone and iPad but you will find that it is not as simple to play GIFs in your iPad or iPhone. There are many jailbreak tweaks that let you play GIFs in your iOS devices such as iPhone and iPad. But, in this article we will discuss with you a method that will allow you to play GIFs in your iPhone/iPad without jailbreak. While you can save GIFs in Apple’s iPhone or iPad without any issues, you cannot play them by default in these devices. The format of GIF is different from simple image files. The Graphics Interchange Format (GIF) is an animated image without any sound. It seems like a short video that loads very fast. An iPhone/iPad user can save GIFs by following this simple method. Step 1: Choose a GIF to save it in your iPhone/iPad. You can also use an app called “Giphy” that lets you find and save GIFs. However, if you try to play these GIFs, they won’t play. On the contrary, you will only see a still from the GIF. Now that you have saved the GIFs in your iOS device, you would naturally want to play these GIFs as well. For that you need to download an app called GIFs Viewer. GIFs Viewer is easily available on App Store. Step 4: Import your saved GIFs to the GIFs Viewer app and it will play the GIFs without any problem. So, you see the method to play GIFs in your iPhone or iPad is quite simple. First save the GIFs and then import them to the app to play them. To upgrade, modify and bring useful and attractive changes in everything is part of the evolutionary process in the domain of technology. Everyone likes to save GIFs in their devices and show them to their friends later. There is another tool called Giffy that you can use to play GIFs on your iOS device. Giffy is, however, a jailbreak app and can only be used only by jailbreak users. It has many exciting features such as you can preview GIFs in your mail and messages. No need to get a third-party app from the app store. Similarly, with Giffy you don’t need to import the GIFs to any software. You can play GIFs right in the app you’re using at that time. GIF Viewer doesn’t require jailbreak. This means that you can play GIFs on your iOS 10 devices as well. This is one advantage of GIF Viewer that gives it an edge over Giffy. If you have jailbreak, go for Giffy by all means. But for those who want to play Gif without jailbreak, GIF Viewer is your tool.
import cv2 import numpy as np import os def read_ini(fpath): ret = {} with open(fpath, 'r') as f: for line in f: if '=' in line: k, v = line.split('=') v = v.rstrip() try: v = int(v) except: pass ret[k] = v return ret def get_data(mot_pathes, part='train'): found_bg = False all_videos = [] classes_count = {} class_mapping = {} visualise = False datasets = [] for mot_path in mot_pathes: path = os.path.join(mot_path, part) datasets += [x for x in map(lambda x: os.path.join(path, x), os.listdir(path)) if not ('/.' in x)] print('Parsing annotation files') for dataset in datasets: try: form = read_ini(os.path.join(dataset, 'seqinfo.ini'))['imExt'][1:] except: form = 'jpg' try: sprob = read_ini(os.path.join(dataset, 'seqinfo.ini'))['sampleprob'] except: sprob = 1 coord_form = 'xywh' try: coord_form = read_ini(os.path.join(dataset, 'seqinfo.ini'))['coordform'] except: pass frame_path = lambda x: os.path.join(dataset, 'img1', str(x).zfill(6) + '.' + form) #print(frame_path) frames = {} last_frame = -1 first_frame = 1e8 if part == 'train': bfile = 'gt/gt.txt' else: bfile = 'det/det.txt' with open(os.path.join(dataset, bfile),'r') as f: for line in f: line_split = line.strip().split(',') if part == 'train': try: cls = int(line_split[6]) except: print(line) print(dataset) raise if cls not in [1, 2, 7]: continue try: frameix,x1,y1,w,h = map(lambda x: int(float(x)), line_split[0:1] + line_split[2:6]) except: print(dataset, line) raise if coord_form == 'xywh': x2 = x1 + w y2 = y1 + h else: x2 = w y2 = h class_name = 'bbox' last_frame = max(frameix, last_frame) first_frame = min(first_frame, frameix) if class_name not in classes_count: classes_count[class_name] = 1 else: classes_count[class_name] += 1 if class_name not in class_mapping: class_mapping[class_name] = len(class_mapping) if not frameix in frames: frames[frameix] = {} #print(frame_path.format(frameix)) img = cv2.imread(frame_path(frameix)) try: (rows,cols) = img.shape[:2] except: print(frame_path(frameix), frameix) frames[frameix]['filepath'] = frame_path(frameix) frames[frameix]['width'] = cols frames[frameix]['height'] = rows frames[frameix]['bboxes'] = [] frames[frameix]['bboxes'].append({'class': class_name, 'x1': int(x1), 'x2': int(x2), 'y1': int(y1), 'y2': int(y2)}) video = [] break_flag = False for frameix in range(first_frame, last_frame+1): try: video.append(frames[frameix]) except: print('Unable to fetch frames in {}, passing'.format(dataset)) break_flag = True break if break_flag: continue all_videos.append({'video': video, 'sampleprob': sprob}) return all_videos, classes_count, class_mapping
The Antelope of A. V. Reflections, 8 Volumes, 20 pgs. ea $2 ea. ALL PRICES INCLUDE SALES TAX AND ARE TAX DEDUCTIBLE. SHIPPING COSTS WILL BE ADDED.
import numpy as np from numpy.testing import assert_array_equal import os from capitalization_train.evaluate import (eval_rule_based, is_consistent_prediction, eval_stat) from nose.tools import (assert_equal, assert_false, assert_true) CURDIR = os.path.dirname(os.path.realpath(__file__)) def test_eval_rule_based(): actual = eval_rule_based(CURDIR + '/data/rule_based_output.txt', okform_dir=CURDIR + '/data/docs_okformed/', accepted_labels=['AL', 'IC']) expected = np.asarray([[10, 11, 10], [5, 5, 6]]) assert_array_equal(actual, expected) def test_eval_stat(): pred_tokens = ["SuperGroup", "sales", "rebound", "over", "Christmas", "to", "defy", "city", "EXPECTATIONS"] true_tokens = 'SuperGroup sales rebound over Christmas to defy City EXPECTATIONS'.split() actual = eval_stat(pred_tokens, true_tokens, accepted_labels=['AL', 'IC']) expected = np.asarray([[5, 6, 5], [1, 1, 2]]) assert_array_equal(actual, expected) def test_is_consistent_prediction(): assert_false( is_consistent_prediction(['A'], ['A', 'extra token']) ) assert_false( is_consistent_prediction(['A', 'B'], ['A', 'different']) ) assert_true( is_consistent_prediction(['A', 'B'], ['A', 'B']) )
Win one of 10 copies of All You mag! Have you bought the latest issue of All You magazine, available at Walmart? If not, win one of 10 copies donated by All You that I’m giving away TODAY on BargainBabe.com. The current special edition issue (cover at right) is devoted to smart shopping. It is 116 pages, sells for $4.97, and has one coupon for $1 off Southern Living mag. (Earlier, I mistakenly said that it included many coupons). Inside you’ll find and you’ve been stocking up on, like fennel-apple salad and creamy ginger-parsnip soup. To win, comment with your best shopping secret to save money. Comment by 11:59 p.m. EST today, Sept. 30, 2011 to be eligible to win. Winners will be announced Monday. If you don’t win, print this to get All You for $1 off. You can also subscribe to All You for $20 a year through Amazon. I love to stack deals by using coupons on clearance items in nice department stores. Its also great to scour thrift stores in nice neighborhoods. You can get great designer clothes for a fraction of the cost. My best saving idea is stop getting the paper and reading the sale flyers. There is so much there that calls out to you that you really don’t need. Make a set of menus for 30 meals and a prepared grocery list of all items I use. Then when I go to the store for my weekly shopping trip – STICK TO THE LIST. Best shopping tip go to store with coupons in hand and look for those unadvertised deals! My tip is to shop grocery stores that double or triple coupons if you have them near you. Stockpile (with using coupons). I use credit cards for everything and then get cash back. My best strategy is really just to compare prices everywhere. As a student, I especially love to google ISBNs when its time to buy textbooks. Search results show the best comparison websites with the cheapest prices. I always click on at least 3 comparison websites because one always has a different bookseller that the other one doesn’t. Make a list and plan your route before leaving home. Saves gas, money and time. One of my best grocery shopping tips is when whole chickens go on sale for cheap, grab a whole bunch and have the butcher cut them into 8ths for you. That way, you have cut up chicken and you don’t have to do it yourself! I buy a lot of stuff, even food, at dollar stores. I’m not sure if I trust some of the dairy products there but $1 for a loaf of bread or a box of crackers sure beats grocery store prices. Find out when the store does markdowns and go shopping on, or around that day. I shop using a combination of coupons and gift cards. I usually don’t spend a dime of my own money. Take advantage of late sale 2-for-1 books, still usually with two to three months worth of offers. I wait for double or triple coupon days @ my grocery stores, combining with valued customer cards. Resale stores and Internet sales help me with bigger ticket purchases. Don’t discount recycling for cash, especially of large appliances, cash for pounds. My best shopping “secret” now that I am an “empty nester” is to use up my ‘stash’ that I have been accumulating using store coupon, manufactures coupon on a clearanor sale item. Use it up and give my grocery budget a rest and save up for the next ’round’ of super saving when my stash is almost used up. The easiest way for me to save is simply don’t go to the store until I have a list of things that I really need.Stick to your shopping list while there and use coupons on things you were going to buy anyway. I collect & use coupons-specially on Sale events, try to stockpile, compare prices, go around, and digging through, on trash days to see what is usable. Meaning; for my family, for sale or donate to charity. NO, not ashamed for doing this! I spend more when I use a credit card. I am trying to use cash only when I buy something. Dollar earring stores! We have them here, necklaces, bracelets, earrings for $1, You can fix any of your own jewelry, update it, or buy and swap pieces together. Great if you want one-of-a-kind look and on the cheap. I bought a pendant at a bead shop; but used a necklace from $1 Earring store to put it together. My best shopping secret to save money is to wait for things to go on sale (if I really need it, a get a coupon and buy it) but otherwise I don’t buy things that are not on sale. I go to the store and I see what is on sale and I buy with manufacturer’s coupons, in-store coupons, and a if there’s any a mail-rebate. That’s how I get the most out of a deal and the cheaper way!!!! When I buy books, I take of the dust jacket until I’m finished reading the book. Then I re-sell the book on Ebay as “good” condition. I always get great feedback because the books ends up being almost like new; and I can read best sellers for little or no money. I rarely, if ever, shop without coupons or things being on sale. Watch the ads diligently. In doing so you can start to see price trends and know when it’s time to buy. Use coupons, sale papers, and discount stores! To save money, I buy discounted gift cards at plasticjungle.com and save before I even walk in the door! Use social deal aggregator sites like YipIt, Dealery and Deal Surf. Set your filters up so they only show you the kinds of deals you want and in the geographic areas you want. Saves you time sorting through the hundreds of deal sites out there and makes everything relevant for YOU. Find what you love in a store, and google to find it cheaper. I can almost always find cheaper prices for the same products online, then I google for promocodes to that website and almost always find a way to sweeten the already sweeter deal! I always ask for coupons at Walgreens. My Walgreens keeps a stock of coupons in the HBA section. Also Walgreens coupons may have an expire date, but often they work till the end of the month they expired in. I try to buy say from CVS on sale, while on sale get a extra bucks deal from CVS and then add a manufacturers coupon too. Very good savings! I make a list before I go and make sure I have coupons with me. I often do research online before bigger purchases to see where I can get it the cheapest. Like someone else said, I also sign up for many mailing lists, e-clubs, etc. of shops and restaurants that I frequent so I get coupons sent to me often. I also use either my rewards credit card or debit card so I’m earning cash back while spending. I am careful to “know” what is a good deal or only a so-so deal. So important, whether you are at a warehouse store, staring at a clearance shelf at a discount store, or cruising the grocery store ads for the week. I track the sales cycles on items, so I’m not tempted to stock up when I’m not getting a truly great deal. My best shopping secret to save money is to stay home!! YIKES!!! i pair coupons with sales to max savings as much as possible. also, i try to teach my husband that just because something is “on sale” doesn’t mean that it is the best deal possible. I always review all of the flyers for the week to check out the best deals. I also match up my list w/ the coupons/ matching coupons & extra bucks to save time in store. I love deals, so I’m never tempted by “quantity” offers that are frequently advertised, they rarely if ever are the best value. Also, know what you want and need, I only buy extras if in fact they are a deal and I can contribute to the local food pantry.. I DON’T look at the fliers because then I’m tempted to buy things I don’t really need. And I stick to my list. Most money is wasted not in paying regular price, but by impulse purchases. I look at all the blogs that found the deals. Stacking coupons when the item is on sale. Wherever we go, we bring a bag of all kinds of coupons (groceries, restaurants, bookstores, entertainment, etc) and membership cards with us. So whenever my family wanats to go shopping, watch movies, buy books, eat outside we have it all. We make sure that we search for coupons first before any penny comes out of our wallet. I stock up on the “on sale” staple products when I go to the store and usually the next week everything else I need is then on sale. Treats are only if I can get a two-fer. The only weekly items are produce, which I buy from a farmer’s market at a discount. My grocery bill is then cut in half. My shopping secret are not really secrets, because I’d happily share with anyone! Plan simple meals, that don’t require fancy ingredients, and cook bigger than you need for one meal, so you can freeze it for later, use the main dish again in another recipe, or give some away to a friend who trades you her meal as well. I have a friend who is a wonderful cook, and we trade half of our dinner for half of hers, a couple nights a month, and it saves an evening of cooking, and we each get a different meal. My shopping secret is never leave home without your coupons. I even have a holder for restuarant, and store coupons that I keep in my purse. Anytime I have a last minute errand, or a quick stop I always check my coupon holder. Last week, I saved $5 at Ace hardware, and for lunch B1g1 free lunch coupon… Always be prepared to save!!! Sometimes I even ive out coupons while Im shopping. Coupons are like children.. NEVER LEAVE HOME WITHOUT THEM!!!! One of my best shopping secrets is to not “go shopping” just to look around. If I don’t know it exists I can’t want to buy it. Saves a lot of money. I use coupons when the item is on sale, and I bring a calculator and figure out the price per oz in items and go with the cheapest. If I’m shopping online, I always search for coupon codes ahead of time, and look for free shipping offers (like on freeshipping.org). I also use comparison shopping websites if I know exactly what item I’m looking for, to get the best price. Never buy retail! It’s a sin! No kidding, if you knew how much markup goes on in grocery stores and department stores you would choke. I know that they still make money even when things go on sale. I buy on sale, and use coupons whenever possible. It’s a wonderful feeling! I always go thru the “repackaged” items in Target…usually socks, boxers and girls underwear….the package has been opened and oftentimes 1 item will be missing (like a pair of socks), but the other items in the multipack have been untouched–sometimes, people open packages to check for size. The price of these “repackaged” new items is really marked down, making them quite a bargain!! To save the very most money – window shop? My best bargain shopping tip is even if it’s a bargain, if you don’t need it don’t buy it!!! I use coupons all the time, stock up on the things we use that are on sale, and if I dont need it I dont buy it. I shop for meat at my local grocery stores early in the morning because I find a lot of the meat has been marked down. I freeze it to use at a later time. While I do most of the suggestions, my best strategy is to share with 2 other friends for bulk items. Recent example, I use maybe 20 packets of Equal a year. My 2 friends use waaay more. We buy 1 bulk box & split. Another benefit of this, we get together about once a month, catch up, share idea from recipes to cleaning. It is a fun and savings day. Shop the sales racks and plan ahead. Shop for summer clothes in the fall and winter clothes in the spring when they are on clearance. Take note of items you need to replace so you don’t purchase something you already have hanging in your closet. Use coupons on sales items to get great prices on nonperishables. Check out clearance items at the drug stores. I bring my coupon binder EVERYWHERE. I love to shop the reduced bins that are in the far corners of the stores and pull out my coupon matchups for rock bottom prices. One step further than comparing prices is to compare Unit Prices, when they’re shown. Sometimes the smaller box or can is cheaper! I keep coupons and organize them in a binder. But I carry it with me and only use the coupns that make an item almost free. If a generic item is less than a namebrand with one, buy the cheapest. I also hold onto my coupons until close yo expiration because kroger usually has their big sales then. I keep all of my Buy One Get one Free coupons separate, and look for the sales in the stores that are BOGO, and then get BOTH Items free!! The Store gives you one, and the manufacturer does as well. I save the most by making a grocery list and not deviating from it. Best shopping tip is to take all my coupons to the store and look for deals on unadvertised deals and then use them with doubles or items that are clearanced! I usually only stock up on stuff when its on sale and I have a coupon both (double the savings). Instead of just using a coupon w/o a sale or buying something on sale w/o a coupon. my best tip is to check online and compare store ads with the coupons you have, THEN make out your list. Sometimes you can save more than you think and get more items for the monthly menu plan! Of course number one is shop around. I shop at the military commissary and find a lot of good deals but sales at places like Walmart, Walgreens, or CVS really give me great savings. Sometimes it’s hard to hang onto a coupon in hopes of finding a better deal but it can be worth it. Barnes & Noble only accepts buy one get one free coupon if you are a B&N member. It’s a toss up between stacking coupons with a sale OR really reducing my time “window shopping” on line which results in far fewer purchases!!
''' Take Amazon csv and visualize your spending ''' import pdb import csv import numpy as np import matplotlib.pyplot as plt import matplotlib.mlab as mlab #pdb.set_trace() from datetime import datetime #Initialize variables date = [] total = [] rownum = 0 # Import the csv file with open ('sampleHistory.csv') as csvfile: amzreader = csv.DictReader(csvfile,delimiter=',') for row in amzreader: date.append(datetime.strptime(row['Order Date'], '%m/%d/%y')) totalstr = row['Item Total'] totalstr = totalstr[1:] total.append(float(totalstr)) #print(row['Order Date']) rownum = rownum+1 #### Calculate quantities of interest ### cumtotal = np.cumsum(total) #### Visualize the data #### fig, ax = plt.subplots(2) #Plot the distribution of purchase sizes n, bins, patches = ax[0].hist(total, 5, facecolor='green', alpha=0.75) ax[0].set_ylabel("# of Purchases") ax[0].set_xlabel("Dollar Amount") #Plot cumulative total ax[1].plot(date,cumtotal) ax[1].set_ylabel("Cumulative Spend ($)") #Format figure plt.setp(plt.xticks()[1],rotation=30, ha='right') plt.tight_layout() # Moves axis edges to prevent axis labels from being clipped plt.show() print('test')
This page displays upcoming concerts scheduled in Tampa and the surrounding area. This guide includes national and regional acts playing at local venues. Use the drop-down to view concerts scheduled in other cities and states. These concert listings are updated regularly so that you don't miss any of the shows happening in the Tampa area! You can even find tickets to most of these concerts by clicking on the "View Tickets" link (note: you will be taken to a ticket reseller site where prices may be higher or lower than face value). Check out the schedule and see if your favorite artist is coming to a location near you!!
"""Youku Open API V2 Python Client doc: http://open.youku.com/docs/tech_doc.html """ import requests from .util import check_error, remove_none_value class YoukuVideos(object): """Youku Videos API. doc:http://open.youku.com/docs/api_videos.html """ def __init__(self, client_id): super(YoukuVideos, self).__init__() self.client_id = client_id def find_video_by_id(self, video_id): """doc: http://open.youku.com/docs/doc?id=44 """ url = 'https://openapi.youku.com/v2/videos/show_basic.json' params = { 'client_id': self.client_id, 'video_id': video_id } r = requests.get(url, params=params) check_error(r) return r.json() def find_video_by_url(self, video_url): """doc: http://open.youku.com/docs/doc?id=44 """ url = 'https://openapi.youku.com/v2/videos/show_basic.json' params = { 'client_id': self.client_id, 'video_url': video_url } r = requests.get(url, params=params) check_error(r) return r.json() def find_videos_by_ids(self, video_ids): """doc: http://open.youku.com/docs/doc?id=45 """ url = 'https://openapi.youku.com/v2/videos/show_basic_batch.json' params = { 'client_id': self.client_id, 'video_ids': video_ids } r = requests.get(url, params=params) check_error(r) return r.json() def find_video_detail_by_id(self, video_id, ext=None): """doc: http://cloud.youku.com/docs?id=46 """ url = 'https://api.youku.com/videos/show.json' params = { 'client_id': self.client_id, 'video_id': video_id } if ext: params['ext'] = ext r = requests.get(url, params=params) check_error(r) return r.json() def find_video_details_by_ids(self, video_ids, ext=None): """doc: http://open.youku.com/docs/doc?id=47 """ url = 'https://openapi.youku.com/v2/videos/show_batch.json' params = { 'client_id': self.client_id, 'video_ids': video_ids } if ext: params['ext'] = ext r = requests.get(url, params=params) check_error(r) return r.json() def find_videos_by_me(self, access_token, orderby='published', page=1, count=20): """doc: http://cloud.youku.com/docs?id=48 """ url = 'https://api.youku.com/videos/by_me.json' params = { 'client_id': self.client_id, 'access_token': access_token, 'orderby': orderby, 'page': page, 'count': count } r = requests.get(url, params=params) check_error(r) return r.json() def find_videos_by_userid(self, user_id, orderby='published', page=1, count=20): """doc: http://open.youku.com/docs/doc?id=49 """ url = 'https://openapi.youku.com/v2/videos/by_user.json' params = { 'client_id': self.client_id, 'user_id': user_id, 'orderby': orderby, 'page': page, 'count': count } r = requests.get(url, params=params) check_error(r) return r.json() def find_videos_by_username(self, user_name, orderby='published', page=1, count=20): """doc: http://open.youku.com/docs/doc?id=49 """ url = 'https://openapi.youku.com/v2/videos/by_user.json' params = { 'client_id': self.client_id, 'user_name': user_name, 'orderby': orderby, 'page': page, 'count': count } r = requests.get(url, params=params) check_error(r) return r.json() def update_video(self, access_token, video_id, title=None, tags=None, category=None, copyright_type=None, public_type=None, watch_password=None, description=None, thumbnail_seq=None): """doc: http://open.youku.com/docs/doc?id=50 """ url = 'https://openapi.youku.com/v2/videos/update.json' data = { 'client_id': self.client_id, 'access_token': access_token, 'video_id': video_id, 'title': title, 'tags': tags, 'category': category, 'copyright_type': copyright_type, 'public_type': public_type, 'watch_password': watch_password, 'description': description, 'thumbnail_seq': thumbnail_seq } data = remove_none_value(data) r = requests.post(url, data=data) check_error(r) return r.json()['id'] def destroy_video(self, access_token, video_id): """doc: http://open.youku.com/docs/doc?id=51 """ url = 'https://openapi.youku.com/v2/videos/destroy.json' data = { 'client_id': self.client_id, 'access_token': access_token, 'video_id': video_id } r = requests.post(url, data=data) check_error(r) return r.json()['id'] def find_videos_by_related(self, video_id, count=20): """doc: http://open.youku.com/docs/doc?id=52 """ url = 'https://openapi.youku.com/v2/videos/by_related.json' params = { 'client_id': self.client_id, 'video_id': video_id, 'count': count } r = requests.get(url, params=params) check_error(r) return r.json() def find_favorite_videos_by_me(self, access_token, orderby='favorite-time', page=1, count=20): """doc: http://open.youku.com/docs/doc?id=53 """ url = 'https://openapi.youku.com/v2/videos/favorite/by_me.json' params = { 'client_id': self.client_id, 'access_token': access_token, 'orderby': orderby, 'page': page, 'count': count } r = requests.get(url, params=params) check_error(r) return r.json() def find_favorite_videos_by_userid(self, user_id, orderby='favorite-time', page=1, count=20): """doc: http://open.youku.com/docs/doc?id=54 """ url = 'https://openapi.youku.com/v2/videos/favorite/by_user.json' params = { 'client_id': self.client_id, 'user_id': user_id, 'orderby': orderby, 'page': page, 'count': count } r = requests.get(url, params=params) check_error(r) return r.json() def find_favorite_videos_by_username(self, user_name, orderby='favorite-time', page=1, count=20): """doc: http://open.youku.com/docs/doc?id=54 """ url = 'https://openapi.youku.com/v2/videos/favorite/by_user.json' params = { 'client_id': self.client_id, 'user_name': user_name, 'orderby': orderby, 'page': page, 'count': count } r = requests.get(url, params=params) check_error(r) return r.json() def create_favorite_video(self, access_token, video_id): """doc: http://open.youku.com/docs/doc?id=55 """ url = 'https://openapi.youku.com/v2/videos/favorite/create.json' data = { 'client_id': self.client_id, 'access_token': access_token, 'video_id': video_id } r = requests.post(url, data=data) check_error(r) return r.json()['id'] def destroy_favorite_video(self, access_token, video_id): """doc: http://open.youku.com/docs/doc?id=56 """ url = 'https://openapi.youku.com/v2/videos/favorite/destroy.json' data = { 'client_id': self.client_id, 'access_token': access_token, 'video_id': video_id } r = requests.post(url, data=data) check_error(r) return r.json()['id'] def find_videos_by_category(self, category, genre=None, period='today', orderby='view-count', page=1, count=20): """doc: http://open.youku.com/docs/doc?id=57 """ url = 'https://openapi.youku.com/v2/videos/by_category.json' params = { 'client_id': self.client_id, 'category': category, 'period': period, 'orderby': orderby, 'page': page, 'count': count } if genre: params['genre'] = genre r = requests.get(url, params=params) check_error(r) return r.json()
SBMC engages in a variety of initiatives to increase awareness of patent law and to promote principles of diversity and inclusion. Diversity in Technology and IP Law Scholarship and Internship – Powered by eBay and SBMC | SBMC is collaborating with eBay and Gonzaga Law on an award that combines scholarship support from the law school with a stipend from SBMC for a combined summer internship at SBMC and eBay. The award is established to create diversity and inclusion-focused opportunities in technology and intellectual property law for students at Gonzaga Law by providing these students with a unique blend of tuition assistance and practical, real-world experience through partners committed to improving efforts in diversity and inclusion both at the law school and also in the legal community. Link to apply here. Diversity and Inclusion Internships | We partner with Georgia Tech to support alternating semesters between university study at the school and our office to learn the nuances of the profession. Expanding Awareness of the Profession | Attorneys at SBMC host events and go out into the community to deliver presentations to undergraduate and high school students to increase awareness about careers in patent law.
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # -*- encoding: utf-8 -*- # # Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from migrate.changeset import UniqueConstraint from sqlalchemy import Table, Column, Index, ForeignKey, MetaData from sqlalchemy import DateTime, Integer, String, Text from ironic.openstack.common import log as logging LOG = logging.getLogger(__name__) ENGINE = 'InnoDB' CHARSET = 'utf8' def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine nodes = Table('nodes', meta, Column('id', Integer, primary_key=True, nullable=False), Column('uuid', String(length=36)), Column('power_info', Text), Column('cpu_arch', String(length=10)), Column('cpu_num', Integer), Column('memory', Integer), Column('local_storage_max', Integer), Column('task_state', String(length=255)), Column('image_path', String(length=255), nullable=True), Column('instance_uuid', String(length=36), nullable=True), Column('instance_name', String(length=255), nullable=True), Column('extra', Text), Column('created_at', DateTime), Column('updated_at', DateTime), mysql_engine=ENGINE, mysql_charset=CHARSET, ) ifaces = Table('ifaces', meta, Column('id', Integer, primary_key=True, nullable=False), Column('address', String(length=18)), Column('node_id', Integer, ForeignKey('nodes.id'), nullable=True), Column('extra', Text), Column('created_at', DateTime), Column('updated_at', DateTime), mysql_engine=ENGINE, mysql_charset=CHARSET, ) tables = [nodes, ifaces] for table in tables: try: table.create() except Exception: LOG.info(repr(table)) LOG.Exception(_('Exception while creating table.')) raise indexes = [ Index('node_cpu_mem_disk', nodes.c.cpu_num, nodes.c.memory, nodes.c.local_storage_max), Index('node_instance_uuid', nodes.c.instance_uuid), ] uniques = [ UniqueConstraint('uuid', table=nodes, name='node_uuid_ux'), UniqueConstraint('address', table=ifaces, name='iface_address_ux'), ] if migrate_engine.name == 'mysql' or migrate_engine.name == 'postgresql': for index in indexes: index.create(migrate_engine) for index in uniques: index.create(migrate_engine) def downgrade(migrate_engine): raise NotImplementedError('Downgrade from Folsom is unsupported.')
We're Always Open For Worship. Weekend worship services will always be held as scheduled, regardless of weather conditions. Your safety is very important to us - if you do not feel comfortable driving, please stay home and join us for our online livestream at 11:00 am. Call 515-422-9101 for pre-recorded information. Or check Facebook for your campus for latest updates.
# Generated by Django 2.2.10 on 2020-04-04 13:36 from django.db import migrations from itertools import chain from utils.data_migrations import stream_field_filter_map def imageicon_to_columns(block): image = { 'type': 'image', 'value':{ 'image': block['value']['image'], 'height': 400 } } heading = { 'type': 'heading', 'value': { 'title': block['value']['title'] } } icons = [] for icon in block['value']['icons']: icons.append({ 'title': icon['title'], 'subtitle': icon['description'], 'icon': icon['icon'] }) icon_group = { 'type': 'icons', 'value': { 'icons': icons } } if block['value']['image_alignment'] == "left": return { 'type': 'columns', 'value': { 'columns':[ { 'width': 6, 'content': [image] }, { 'width': 6, 'content': [ heading, icon_group ] } ] } } else: return { 'type': 'columns', 'value': { 'columns':[ { 'width': 6, 'content': [ heading, icon_group ] }, { 'width': 6, 'content': [image] } ] } } def apply_to_all_pages(apps, mapper): HomePage = apps.get_model('home', 'HomePage') WebPage = apps.get_model('home', 'WebPage') hps = HomePage.objects.all() wps = WebPage.objects.all(); for obj in chain(hps, wps): # There is a long-standing mistake that image-icons and image-description have swapped tags in the database. obj.body_en = stream_field_filter_map(obj.body_en, "image_description", mapper) obj.body_sv = stream_field_filter_map(obj.body_sv, "image_description", mapper) obj.save(); def forwards(apps, schema_editor): apply_to_all_pages(apps, imageicon_to_columns) class Migration(migrations.Migration): dependencies = [ ('home', '0039_auto_20200404_1529'), ] operations = [ migrations.RunPython(forwards) ]
Pirated Software Hurts Software Developers. Using Flash Slideshow Maker Avi Free Download crack, warez, password, serial numbers, torrent, keygen, registration codes, key generators is illegal and your business could subject you to lawsuits and leave your operating systems without patches. We do not host any torrent files or links of Flash Slideshow Maker Avi from depositfiles.com, rapidshare.com, any file sharing sites. All download links are direct full download from publisher sites or their selected mirrors. Avoid: oem software, old version, warez, serial, torrent, keygen, crack of Flash Slideshow Maker Avi. Consider: Flash Slideshow Maker Avi full version, full download, premium download, licensed copy.
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tests for swift.common.storage_policies """ import contextlib import six import logging import unittest import os import mock from functools import partial from six.moves.configparser import ConfigParser from tempfile import NamedTemporaryFile from test.unit import patch_policies, FakeRing, temptree, DEFAULT_TEST_EC_TYPE import swift.common.storage_policy from swift.common.storage_policy import ( StoragePolicyCollection, POLICIES, PolicyError, parse_storage_policies, reload_storage_policies, get_policy_string, split_policy_string, BaseStoragePolicy, StoragePolicy, ECStoragePolicy, REPL_POLICY, EC_POLICY, VALID_EC_TYPES, DEFAULT_EC_OBJECT_SEGMENT_SIZE, BindPortsCache) from swift.common.ring import RingData from swift.common.exceptions import RingLoadError from pyeclib.ec_iface import ECDriver class CapturingHandler(logging.Handler): def __init__(self): super(CapturingHandler, self).__init__() self._records = [] def emit(self, record): self._records.append(record) @contextlib.contextmanager def capture_logging(log_name): captured = CapturingHandler() logger = logging.getLogger(log_name) logger.addHandler(captured) try: yield captured._records finally: logger.removeHandler(captured) @BaseStoragePolicy.register('fake') class FakeStoragePolicy(BaseStoragePolicy): """ Test StoragePolicy class - the only user at the moment is test_validate_policies_type_invalid() """ def __init__(self, idx, name='', is_default=False, is_deprecated=False, object_ring=None): super(FakeStoragePolicy, self).__init__( idx, name, is_default, is_deprecated, object_ring) class TestStoragePolicies(unittest.TestCase): def _conf(self, conf_str): conf_str = "\n".join(line.strip() for line in conf_str.split("\n")) if six.PY2: conf = ConfigParser() else: conf = ConfigParser(strict=False) conf.readfp(six.StringIO(conf_str)) return conf def assertRaisesWithMessage(self, exc_class, message, f, *args, **kwargs): try: f(*args, **kwargs) except exc_class as err: err_msg = str(err) self.assertTrue(message in err_msg, 'Error message %r did not ' 'have expected substring %r' % (err_msg, message)) else: self.fail('%r did not raise %s' % (message, exc_class.__name__)) def test_policy_baseclass_instantiate(self): self.assertRaisesWithMessage(TypeError, "Can't instantiate BaseStoragePolicy", BaseStoragePolicy, 1, 'one') @patch_policies([ StoragePolicy(0, 'zero', is_default=True), StoragePolicy(1, 'one'), StoragePolicy(2, 'two'), StoragePolicy(3, 'three', is_deprecated=True), ECStoragePolicy(10, 'ten', ec_type=DEFAULT_TEST_EC_TYPE, ec_ndata=10, ec_nparity=4), ]) def test_swift_info(self): # the deprecated 'three' should not exist in expect expect = [{'aliases': 'zero', 'default': True, 'name': 'zero', }, {'aliases': 'two', 'name': 'two'}, {'aliases': 'one', 'name': 'one'}, {'aliases': 'ten', 'name': 'ten'}] swift_info = POLICIES.get_policy_info() self.assertEqual(sorted(expect, key=lambda k: k['name']), sorted(swift_info, key=lambda k: k['name'])) @patch_policies def test_get_policy_string(self): self.assertEqual(get_policy_string('something', 0), 'something') self.assertEqual(get_policy_string('something', None), 'something') self.assertEqual(get_policy_string('something', ''), 'something') self.assertEqual(get_policy_string('something', 1), 'something' + '-1') self.assertRaises(PolicyError, get_policy_string, 'something', 99) @patch_policies def test_split_policy_string(self): expectations = { 'something': ('something', POLICIES[0]), 'something-1': ('something', POLICIES[1]), 'tmp': ('tmp', POLICIES[0]), 'objects': ('objects', POLICIES[0]), 'tmp-1': ('tmp', POLICIES[1]), 'objects-1': ('objects', POLICIES[1]), 'objects-': PolicyError, 'objects-0': PolicyError, 'objects--1': ('objects-', POLICIES[1]), 'objects-+1': PolicyError, 'objects--': PolicyError, 'objects-foo': PolicyError, 'objects--bar': PolicyError, 'objects-+bar': PolicyError, # questionable, demonstrated as inverse of get_policy_string 'objects+0': ('objects+0', POLICIES[0]), '': ('', POLICIES[0]), '0': ('0', POLICIES[0]), '-1': ('', POLICIES[1]), } for policy_string, expected in expectations.items(): if expected == PolicyError: try: invalid = split_policy_string(policy_string) except PolicyError: continue # good else: self.fail('The string %r returned %r ' 'instead of raising a PolicyError' % (policy_string, invalid)) self.assertEqual(expected, split_policy_string(policy_string)) # should be inverse of get_policy_string self.assertEqual(policy_string, get_policy_string(*expected)) def test_defaults(self): self.assertGreater(len(POLICIES), 0) # test class functions default_policy = POLICIES.default self.assertTrue(default_policy.is_default) zero_policy = POLICIES.get_by_index(0) self.assertTrue(zero_policy.idx == 0) zero_policy_by_name = POLICIES.get_by_name(zero_policy.name) self.assertTrue(zero_policy_by_name.idx == 0) def test_storage_policy_repr(self): test_policies = [StoragePolicy(0, 'aay', True), StoragePolicy(1, 'bee', False), StoragePolicy(2, 'cee', False), ECStoragePolicy(10, 'ten', ec_type=DEFAULT_TEST_EC_TYPE, ec_ndata=10, ec_nparity=3), ECStoragePolicy(11, 'eleven', ec_type=DEFAULT_TEST_EC_TYPE, ec_ndata=10, ec_nparity=3, ec_duplication_factor=2)] policies = StoragePolicyCollection(test_policies) for policy in policies: policy_repr = repr(policy) self.assertTrue(policy.__class__.__name__ in policy_repr) self.assertTrue('is_default=%s' % policy.is_default in policy_repr) self.assertTrue('is_deprecated=%s' % policy.is_deprecated in policy_repr) self.assertTrue(policy.name in policy_repr) if policy.policy_type == EC_POLICY: self.assertTrue('ec_type=%s' % policy.ec_type in policy_repr) self.assertTrue('ec_ndata=%s' % policy.ec_ndata in policy_repr) self.assertTrue('ec_nparity=%s' % policy.ec_nparity in policy_repr) self.assertTrue('ec_segment_size=%s' % policy.ec_segment_size in policy_repr) if policy.ec_duplication_factor > 1: self.assertTrue('ec_duplication_factor=%s' % policy.ec_duplication_factor in policy_repr) collection_repr = repr(policies) collection_repr_lines = collection_repr.splitlines() self.assertTrue( policies.__class__.__name__ in collection_repr_lines[0]) self.assertEqual(len(policies), len(collection_repr_lines[1:-1])) for policy, line in zip(policies, collection_repr_lines[1:-1]): self.assertTrue(repr(policy) in line) with patch_policies(policies): self.assertEqual(repr(POLICIES), collection_repr) def test_validate_policies_defaults(self): # 0 explicit default test_policies = [StoragePolicy(0, 'zero', True), StoragePolicy(1, 'one', False), StoragePolicy(2, 'two', False)] policies = StoragePolicyCollection(test_policies) self.assertEqual(policies.default, test_policies[0]) self.assertEqual(policies.default.name, 'zero') # non-zero explicit default test_policies = [StoragePolicy(0, 'zero', False), StoragePolicy(1, 'one', False), StoragePolicy(2, 'two', True)] policies = StoragePolicyCollection(test_policies) self.assertEqual(policies.default, test_policies[2]) self.assertEqual(policies.default.name, 'two') # multiple defaults test_policies = [StoragePolicy(0, 'zero', False), StoragePolicy(1, 'one', True), StoragePolicy(2, 'two', True)] self.assertRaisesWithMessage( PolicyError, 'Duplicate default', StoragePolicyCollection, test_policies) # nothing specified test_policies = [] policies = StoragePolicyCollection(test_policies) self.assertEqual(policies.default, policies[0]) self.assertEqual(policies.default.name, 'Policy-0') # no default specified with only policy index 0 test_policies = [StoragePolicy(0, 'zero')] policies = StoragePolicyCollection(test_policies) self.assertEqual(policies.default, policies[0]) # no default specified with multiple policies test_policies = [StoragePolicy(0, 'zero', False), StoragePolicy(1, 'one', False), StoragePolicy(2, 'two', False)] self.assertRaisesWithMessage( PolicyError, 'Unable to find default policy', StoragePolicyCollection, test_policies) def test_deprecate_policies(self): # deprecation specified test_policies = [StoragePolicy(0, 'zero', True), StoragePolicy(1, 'one', False), StoragePolicy(2, 'two', False, is_deprecated=True)] policies = StoragePolicyCollection(test_policies) self.assertEqual(policies.default, test_policies[0]) self.assertEqual(policies.default.name, 'zero') self.assertEqual(len(policies), 3) # multiple policies requires default test_policies = [StoragePolicy(0, 'zero', False), StoragePolicy(1, 'one', False, is_deprecated=True), StoragePolicy(2, 'two', False)] self.assertRaisesWithMessage( PolicyError, 'Unable to find default policy', StoragePolicyCollection, test_policies) def test_validate_policies_indexes(self): # duplicate indexes test_policies = [StoragePolicy(0, 'zero', True), StoragePolicy(1, 'one', False), StoragePolicy(1, 'two', False)] self.assertRaises(PolicyError, StoragePolicyCollection, test_policies) def test_validate_policy_params(self): StoragePolicy(0, 'name') # sanity # bogus indexes self.assertRaises(PolicyError, FakeStoragePolicy, 'x', 'name') self.assertRaises(PolicyError, FakeStoragePolicy, -1, 'name') # non-zero Policy-0 self.assertRaisesWithMessage(PolicyError, 'reserved', FakeStoragePolicy, 1, 'policy-0') # deprecate default self.assertRaisesWithMessage( PolicyError, 'Deprecated policy can not be default', FakeStoragePolicy, 1, 'Policy-1', is_default=True, is_deprecated=True) # weird names names = ( '', 'name_foo', 'name\nfoo', 'name foo', u'name \u062a', 'name \xd8\xaa', ) for name in names: self.assertRaisesWithMessage(PolicyError, 'Invalid name', FakeStoragePolicy, 1, name) def test_validate_policies_names(self): # duplicate names test_policies = [StoragePolicy(0, 'zero', True), StoragePolicy(1, 'zero', False), StoragePolicy(2, 'two', False)] self.assertRaises(PolicyError, StoragePolicyCollection, test_policies) def test_validate_policies_type_default(self): # no type specified - make sure the policy is initialized to # DEFAULT_POLICY_TYPE test_policy = FakeStoragePolicy(0, 'zero', True) self.assertEqual(test_policy.policy_type, 'fake') def test_validate_policies_type_invalid(self): class BogusStoragePolicy(FakeStoragePolicy): policy_type = 'bogus' # unsupported policy type - initialization with FakeStoragePolicy self.assertRaisesWithMessage(PolicyError, 'Invalid type', BogusStoragePolicy, 1, 'one') def test_policies_type_attribute(self): test_policies = [ StoragePolicy(0, 'zero', is_default=True), StoragePolicy(1, 'one'), StoragePolicy(2, 'two'), StoragePolicy(3, 'three', is_deprecated=True), ECStoragePolicy(10, 'ten', ec_type=DEFAULT_TEST_EC_TYPE, ec_ndata=10, ec_nparity=3), ] policies = StoragePolicyCollection(test_policies) self.assertEqual(policies.get_by_index(0).policy_type, REPL_POLICY) self.assertEqual(policies.get_by_index(1).policy_type, REPL_POLICY) self.assertEqual(policies.get_by_index(2).policy_type, REPL_POLICY) self.assertEqual(policies.get_by_index(3).policy_type, REPL_POLICY) self.assertEqual(policies.get_by_index(10).policy_type, EC_POLICY) def test_names_are_normalized(self): test_policies = [StoragePolicy(0, 'zero', True), StoragePolicy(1, 'ZERO', False)] self.assertRaises(PolicyError, StoragePolicyCollection, test_policies) policies = StoragePolicyCollection([StoragePolicy(0, 'zEro', True), StoragePolicy(1, 'One', False)]) pol0 = policies[0] pol1 = policies[1] for name in ('zero', 'ZERO', 'zErO', 'ZeRo'): self.assertEqual(pol0, policies.get_by_name(name)) self.assertEqual(policies.get_by_name(name).name, 'zEro') for name in ('one', 'ONE', 'oNe', 'OnE'): self.assertEqual(pol1, policies.get_by_name(name)) self.assertEqual(policies.get_by_name(name).name, 'One') def test_multiple_names(self): # checking duplicate on insert test_policies = [StoragePolicy(0, 'zero', True), StoragePolicy(1, 'one', False, aliases='zero')] self.assertRaises(PolicyError, StoragePolicyCollection, test_policies) # checking correct retrival using other names test_policies = [StoragePolicy(0, 'zero', True, aliases='cero, kore'), StoragePolicy(1, 'one', False, aliases='uno, tahi'), StoragePolicy(2, 'two', False, aliases='dos, rua')] policies = StoragePolicyCollection(test_policies) for name in ('zero', 'cero', 'kore'): self.assertEqual(policies.get_by_name(name), test_policies[0]) for name in ('two', 'dos', 'rua'): self.assertEqual(policies.get_by_name(name), test_policies[2]) # Testing parsing of conf files/text good_conf = self._conf(""" [storage-policy:0] name = one aliases = uno, tahi default = yes """) policies = parse_storage_policies(good_conf) self.assertEqual(policies.get_by_name('one'), policies[0]) self.assertEqual(policies.get_by_name('one'), policies.get_by_name('tahi')) name_repeat_conf = self._conf(""" [storage-policy:0] name = one aliases = one default = yes """) # Test on line below should not generate errors. Repeat of main # name under aliases is permitted during construction # but only because automated testing requires it. policies = parse_storage_policies(name_repeat_conf) extra_commas_conf = self._conf(""" [storage-policy:0] name = one aliases = ,,one, , default = yes """) # Extra blank entries should be silently dropped policies = parse_storage_policies(extra_commas_conf) bad_conf = self._conf(""" [storage-policy:0] name = one aliases = uno, uno default = yes """) self.assertRaisesWithMessage(PolicyError, 'is already assigned to this policy', parse_storage_policies, bad_conf) def test_multiple_names_EC(self): # checking duplicate names on insert test_policies_ec = [ ECStoragePolicy( 0, 'ec8-2', aliases='zeus, jupiter', ec_type=DEFAULT_TEST_EC_TYPE, ec_ndata=8, ec_nparity=2, object_ring=FakeRing(replicas=8), is_default=True), ECStoragePolicy( 1, 'ec10-4', aliases='ec8-2', ec_type=DEFAULT_TEST_EC_TYPE, ec_ndata=10, ec_nparity=4, object_ring=FakeRing(replicas=10))] self.assertRaises(PolicyError, StoragePolicyCollection, test_policies_ec) # checking correct retrival using other names good_test_policies_EC = [ ECStoragePolicy(0, 'ec8-2', aliases='zeus, jupiter', ec_type=DEFAULT_TEST_EC_TYPE, ec_ndata=8, ec_nparity=2, object_ring=FakeRing(replicas=10), is_default=True), ECStoragePolicy(1, 'ec10-4', aliases='athena, minerva', ec_type=DEFAULT_TEST_EC_TYPE, ec_ndata=10, ec_nparity=4, object_ring=FakeRing(replicas=14)), ECStoragePolicy(2, 'ec4-2', aliases='poseidon, neptune', ec_type=DEFAULT_TEST_EC_TYPE, ec_ndata=4, ec_nparity=2, object_ring=FakeRing(replicas=6)), ECStoragePolicy(3, 'ec4-2-dup', aliases='uzuki, rin', ec_type=DEFAULT_TEST_EC_TYPE, ec_ndata=4, ec_nparity=2, ec_duplication_factor=2, object_ring=FakeRing(replicas=12)), ] ec_policies = StoragePolicyCollection(good_test_policies_EC) for name in ('ec8-2', 'zeus', 'jupiter'): self.assertEqual(ec_policies.get_by_name(name), ec_policies[0]) for name in ('ec10-4', 'athena', 'minerva'): self.assertEqual(ec_policies.get_by_name(name), ec_policies[1]) for name in ('ec4-2', 'poseidon', 'neptune'): self.assertEqual(ec_policies.get_by_name(name), ec_policies[2]) for name in ('ec4-2-dup', 'uzuki', 'rin'): self.assertEqual(ec_policies.get_by_name(name), ec_policies[3]) # Testing parsing of conf files/text good_ec_conf = self._conf(""" [storage-policy:0] name = ec8-2 aliases = zeus, jupiter policy_type = erasure_coding ec_type = %(ec_type)s default = yes ec_num_data_fragments = 8 ec_num_parity_fragments = 2 [storage-policy:1] name = ec10-4 aliases = poseidon, neptune policy_type = erasure_coding ec_type = %(ec_type)s ec_num_data_fragments = 10 ec_num_parity_fragments = 4 [storage-policy:2] name = ec4-2-dup aliases = uzuki, rin policy_type = erasure_coding ec_type = %(ec_type)s ec_num_data_fragments = 4 ec_num_parity_fragments = 2 ec_duplication_factor = 2 """ % {'ec_type': DEFAULT_TEST_EC_TYPE}) ec_policies = parse_storage_policies(good_ec_conf) self.assertEqual(ec_policies.get_by_name('ec8-2'), ec_policies[0]) self.assertEqual(ec_policies.get_by_name('ec10-4'), ec_policies.get_by_name('poseidon')) self.assertEqual(ec_policies.get_by_name('ec4-2-dup'), ec_policies.get_by_name('uzuki')) name_repeat_ec_conf = self._conf(""" [storage-policy:0] name = ec8-2 aliases = ec8-2 policy_type = erasure_coding ec_type = %(ec_type)s default = yes ec_num_data_fragments = 8 ec_num_parity_fragments = 2 """ % {'ec_type': DEFAULT_TEST_EC_TYPE}) # Test on line below should not generate errors. Repeat of main # name under aliases is permitted during construction # but only because automated testing requires it. ec_policies = parse_storage_policies(name_repeat_ec_conf) bad_ec_conf = self._conf(""" [storage-policy:0] name = ec8-2 aliases = zeus, zeus policy_type = erasure_coding ec_type = %(ec_type)s default = yes ec_num_data_fragments = 8 ec_num_parity_fragments = 2 """ % {'ec_type': DEFAULT_TEST_EC_TYPE}) self.assertRaisesWithMessage(PolicyError, 'is already assigned to this policy', parse_storage_policies, bad_ec_conf) def test_add_remove_names(self): test_policies = [StoragePolicy(0, 'zero', True), StoragePolicy(1, 'one', False), StoragePolicy(2, 'two', False)] policies = StoragePolicyCollection(test_policies) # add names policies.add_policy_alias(1, 'tahi') self.assertEqual(policies.get_by_name('tahi'), test_policies[1]) policies.add_policy_alias(2, 'rua', 'dos') self.assertEqual(policies.get_by_name('rua'), test_policies[2]) self.assertEqual(policies.get_by_name('dos'), test_policies[2]) self.assertRaisesWithMessage(PolicyError, 'Invalid name', policies.add_policy_alias, 2, 'double\n') self.assertRaisesWithMessage(PolicyError, 'Invalid name', policies.add_policy_alias, 2, '') # try to add existing name self.assertRaisesWithMessage(PolicyError, 'Duplicate name', policies.add_policy_alias, 2, 'two') self.assertRaisesWithMessage(PolicyError, 'Duplicate name', policies.add_policy_alias, 1, 'two') # remove name policies.remove_policy_alias('tahi') self.assertIsNone(policies.get_by_name('tahi')) # remove only name self.assertRaisesWithMessage(PolicyError, 'Policies must have at least one name.', policies.remove_policy_alias, 'zero') # remove non-existent name self.assertRaisesWithMessage(PolicyError, 'No policy with name', policies.remove_policy_alias, 'three') # remove default name policies.remove_policy_alias('two') self.assertIsNone(policies.get_by_name('two')) self.assertEqual(policies.get_by_index(2).name, 'rua') # change default name to a new name policies.change_policy_primary_name(2, 'two') self.assertEqual(policies.get_by_name('two'), test_policies[2]) self.assertEqual(policies.get_by_index(2).name, 'two') # change default name to an existing alias policies.change_policy_primary_name(2, 'dos') self.assertEqual(policies.get_by_index(2).name, 'dos') # change default name to a bad new name self.assertRaisesWithMessage(PolicyError, 'Invalid name', policies.change_policy_primary_name, 2, 'bad\nname') # change default name to a name belonging to another policy self.assertRaisesWithMessage(PolicyError, 'Other policy', policies.change_policy_primary_name, 1, 'dos') def test_deprecated_default(self): bad_conf = self._conf(""" [storage-policy:1] name = one deprecated = yes default = yes """) self.assertRaisesWithMessage( PolicyError, "Deprecated policy can not be default", parse_storage_policies, bad_conf) def test_multiple_policies_with_no_policy_index_zero(self): bad_conf = self._conf(""" [storage-policy:1] name = one default = yes """) # Policy-0 will not be implicitly added if other policies are defined self.assertRaisesWithMessage( PolicyError, "must specify a storage policy section " "for policy index 0", parse_storage_policies, bad_conf) @mock.patch.object(swift.common.storage_policy, 'VALID_EC_TYPES', ['isa_l_rs_vand', 'isa_l_rs_cauchy']) @mock.patch('swift.common.storage_policy.ECDriver') def test_known_bad_ec_config(self, mock_driver): good_conf = self._conf(""" [storage-policy:0] name = bad-policy policy_type = erasure_coding ec_type = isa_l_rs_cauchy ec_num_data_fragments = 10 ec_num_parity_fragments = 5 """) with capture_logging('swift.common.storage_policy') as records: parse_storage_policies(good_conf) mock_driver.assert_called_once() mock_driver.reset_mock() self.assertFalse([(r.levelname, r.msg) for r in records]) good_conf = self._conf(""" [storage-policy:0] name = bad-policy policy_type = erasure_coding ec_type = isa_l_rs_vand ec_num_data_fragments = 10 ec_num_parity_fragments = 4 """) with capture_logging('swift.common.storage_policy') as records: parse_storage_policies(good_conf) mock_driver.assert_called_once() mock_driver.reset_mock() self.assertFalse([(r.levelname, r.msg) for r in records]) bad_conf = self._conf(""" [storage-policy:0] name = bad-policy policy_type = erasure_coding ec_type = isa_l_rs_vand ec_num_data_fragments = 10 ec_num_parity_fragments = 5 """) with capture_logging('swift.common.storage_policy') as records, \ self.assertRaises(PolicyError) as exc_mgr: parse_storage_policies(bad_conf) self.assertEqual(exc_mgr.exception.args[0], 'Storage policy bad-policy uses an EC ' 'configuration known to harm data durability. This ' 'policy MUST be deprecated.') mock_driver.assert_not_called() mock_driver.reset_mock() self.assertEqual([r.levelname for r in records], ['WARNING']) for msg in ('known to harm data durability', 'Any data in this policy should be migrated', 'https://bugs.launchpad.net/swift/+bug/1639691'): self.assertIn(msg, records[0].msg) slightly_less_bad_conf = self._conf(""" [storage-policy:0] name = bad-policy policy_type = erasure_coding ec_type = isa_l_rs_vand ec_num_data_fragments = 10 ec_num_parity_fragments = 5 deprecated = true [storage-policy:1] name = good-policy policy_type = erasure_coding ec_type = isa_l_rs_cauchy ec_num_data_fragments = 10 ec_num_parity_fragments = 5 default = true """) with capture_logging('swift.common.storage_policy') as records: parse_storage_policies(slightly_less_bad_conf) self.assertEqual(2, mock_driver.call_count) mock_driver.reset_mock() self.assertEqual([r.levelname for r in records], ['WARNING']) for msg in ('known to harm data durability', 'Any data in this policy should be migrated', 'https://bugs.launchpad.net/swift/+bug/1639691'): self.assertIn(msg, records[0].msg) def test_no_default(self): orig_conf = self._conf(""" [storage-policy:0] name = zero [storage-policy:1] name = one default = yes """) policies = parse_storage_policies(orig_conf) self.assertEqual(policies.default, policies[1]) self.assertTrue(policies[0].name, 'Policy-0') bad_conf = self._conf(""" [storage-policy:0] name = zero [storage-policy:1] name = one deprecated = yes """) # multiple polices and no explicit default self.assertRaisesWithMessage( PolicyError, "Unable to find default", parse_storage_policies, bad_conf) good_conf = self._conf(""" [storage-policy:0] name = Policy-0 default = yes [storage-policy:1] name = one deprecated = yes """) policies = parse_storage_policies(good_conf) self.assertEqual(policies.default, policies[0]) self.assertTrue(policies[1].is_deprecated, True) def test_parse_storage_policies(self): # ValueError when deprecating policy 0 bad_conf = self._conf(""" [storage-policy:0] name = zero deprecated = yes [storage-policy:1] name = one deprecated = yes """) self.assertRaisesWithMessage( PolicyError, "Unable to find policy that's not deprecated", parse_storage_policies, bad_conf) bad_conf = self._conf(""" [storage-policy:] name = zero """) self.assertRaisesWithMessage(PolicyError, 'Invalid index', parse_storage_policies, bad_conf) bad_conf = self._conf(""" [storage-policy:-1] name = zero """) self.assertRaisesWithMessage(PolicyError, 'Invalid index', parse_storage_policies, bad_conf) bad_conf = self._conf(""" [storage-policy:x] name = zero """) self.assertRaisesWithMessage(PolicyError, 'Invalid index', parse_storage_policies, bad_conf) bad_conf = self._conf(""" [storage-policy:x-1] name = zero """) self.assertRaisesWithMessage(PolicyError, 'Invalid index', parse_storage_policies, bad_conf) bad_conf = self._conf(""" [storage-policy:x] name = zero """) self.assertRaisesWithMessage(PolicyError, 'Invalid index', parse_storage_policies, bad_conf) bad_conf = self._conf(""" [storage-policy:x:1] name = zero """) self.assertRaisesWithMessage(PolicyError, 'Invalid index', parse_storage_policies, bad_conf) bad_conf = self._conf(""" [storage-policy:1] name = zero boo = berries """) self.assertRaisesWithMessage(PolicyError, 'Invalid option', parse_storage_policies, bad_conf) bad_conf = self._conf(""" [storage-policy:0] name = """) self.assertRaisesWithMessage(PolicyError, 'Invalid name', parse_storage_policies, bad_conf) bad_conf = self._conf(""" [storage-policy:3] name = Policy-0 """) self.assertRaisesWithMessage(PolicyError, 'Invalid name', parse_storage_policies, bad_conf) bad_conf = self._conf(""" [storage-policy:1] name = policY-0 """) self.assertRaisesWithMessage(PolicyError, 'Invalid name', parse_storage_policies, bad_conf) bad_conf = self._conf(""" [storage-policy:0] name = one [storage-policy:1] name = ONE """) self.assertRaisesWithMessage(PolicyError, 'Duplicate name', parse_storage_policies, bad_conf) bad_conf = self._conf(""" [storage-policy:0] name = good_stuff """) self.assertRaisesWithMessage(PolicyError, 'Invalid name', parse_storage_policies, bad_conf) # policy_type = erasure_coding # missing ec_type, ec_num_data_fragments and ec_num_parity_fragments bad_conf = self._conf(""" [storage-policy:0] name = zero [storage-policy:1] name = ec10-4 policy_type = erasure_coding """) self.assertRaisesWithMessage(PolicyError, 'Missing ec_type', parse_storage_policies, bad_conf) # missing ec_type, but other options valid... bad_conf = self._conf(""" [storage-policy:0] name = zero [storage-policy:1] name = ec10-4 policy_type = erasure_coding ec_num_data_fragments = 10 ec_num_parity_fragments = 4 """) self.assertRaisesWithMessage(PolicyError, 'Missing ec_type', parse_storage_policies, bad_conf) # ec_type specified, but invalid... bad_conf = self._conf(""" [storage-policy:0] name = zero default = yes [storage-policy:1] name = ec10-4 policy_type = erasure_coding ec_type = garbage_alg ec_num_data_fragments = 10 ec_num_parity_fragments = 4 """) self.assertRaisesWithMessage(PolicyError, 'Wrong ec_type garbage_alg for policy ' 'ec10-4, should be one of "%s"' % (', '.join(VALID_EC_TYPES)), parse_storage_policies, bad_conf) # missing and invalid ec_num_parity_fragments bad_conf = self._conf(""" [storage-policy:0] name = zero [storage-policy:1] name = ec10-4 policy_type = erasure_coding ec_type = %(ec_type)s ec_num_data_fragments = 10 """ % {'ec_type': DEFAULT_TEST_EC_TYPE}) self.assertRaisesWithMessage(PolicyError, 'Invalid ec_num_parity_fragments', parse_storage_policies, bad_conf) for num_parity in ('-4', '0', 'x'): bad_conf = self._conf(""" [storage-policy:0] name = zero [storage-policy:1] name = ec10-4 policy_type = erasure_coding ec_type = %(ec_type)s ec_num_data_fragments = 10 ec_num_parity_fragments = %(num_parity)s """ % {'ec_type': DEFAULT_TEST_EC_TYPE, 'num_parity': num_parity}) self.assertRaisesWithMessage(PolicyError, 'Invalid ec_num_parity_fragments', parse_storage_policies, bad_conf) # missing and invalid ec_num_data_fragments bad_conf = self._conf(""" [storage-policy:0] name = zero [storage-policy:1] name = ec10-4 policy_type = erasure_coding ec_type = %(ec_type)s ec_num_parity_fragments = 4 """ % {'ec_type': DEFAULT_TEST_EC_TYPE}) self.assertRaisesWithMessage(PolicyError, 'Invalid ec_num_data_fragments', parse_storage_policies, bad_conf) for num_data in ('-10', '0', 'x'): bad_conf = self._conf(""" [storage-policy:0] name = zero [storage-policy:1] name = ec10-4 policy_type = erasure_coding ec_type = %(ec_type)s ec_num_data_fragments = %(num_data)s ec_num_parity_fragments = 4 """ % {'num_data': num_data, 'ec_type': DEFAULT_TEST_EC_TYPE}) self.assertRaisesWithMessage(PolicyError, 'Invalid ec_num_data_fragments', parse_storage_policies, bad_conf) # invalid ec_object_segment_size for segment_size in ('-4', '0', 'x'): bad_conf = self._conf(""" [storage-policy:0] name = zero [storage-policy:1] name = ec10-4 policy_type = erasure_coding ec_object_segment_size = %(segment_size)s ec_type = %(ec_type)s ec_num_data_fragments = 10 ec_num_parity_fragments = 4 """ % {'segment_size': segment_size, 'ec_type': DEFAULT_TEST_EC_TYPE}) self.assertRaisesWithMessage(PolicyError, 'Invalid ec_object_segment_size', parse_storage_policies, bad_conf) # Additional section added to ensure parser ignores other sections conf = self._conf(""" [some-other-section] foo = bar [storage-policy:0] name = zero [storage-policy:5] name = one default = yes [storage-policy:6] name = duplicate-sections-are-ignored [storage-policy:6] name = apple """) policies = parse_storage_policies(conf) self.assertEqual(True, policies.get_by_index(5).is_default) self.assertEqual(False, policies.get_by_index(0).is_default) self.assertEqual(False, policies.get_by_index(6).is_default) self.assertEqual("object", policies.get_by_name("zero").ring_name) self.assertEqual("object-5", policies.get_by_name("one").ring_name) self.assertEqual("object-6", policies.get_by_name("apple").ring_name) self.assertEqual(0, int(policies.get_by_name('zero'))) self.assertEqual(5, int(policies.get_by_name('one'))) self.assertEqual(6, int(policies.get_by_name('apple'))) self.assertEqual("zero", policies.get_by_index(0).name) self.assertEqual("zero", policies.get_by_index("0").name) self.assertEqual("one", policies.get_by_index(5).name) self.assertEqual("apple", policies.get_by_index(6).name) self.assertEqual("zero", policies.get_by_index(None).name) self.assertEqual("zero", policies.get_by_index('').name) self.assertEqual(policies.get_by_index(0), policies.legacy) def test_reload_invalid_storage_policies(self): conf = self._conf(""" [storage-policy:0] name = zero [storage-policy:00] name = double-zero """) with NamedTemporaryFile(mode='w+t') as f: conf.write(f) f.flush() with mock.patch('swift.common.utils.SWIFT_CONF_FILE', new=f.name): try: reload_storage_policies() except SystemExit as e: err_msg = str(e) else: self.fail('SystemExit not raised') parts = [ 'Invalid Storage Policy Configuration', 'Duplicate index', ] for expected in parts: self.assertTrue( expected in err_msg, '%s was not in %s' % (expected, err_msg)) def test_storage_policy_ordering(self): test_policies = StoragePolicyCollection([ StoragePolicy(0, 'zero', is_default=True), StoragePolicy(503, 'error'), StoragePolicy(204, 'empty'), StoragePolicy(404, 'missing'), ]) self.assertEqual([0, 204, 404, 503], [int(p) for p in sorted(list(test_policies))]) p503 = test_policies[503] self.assertTrue(501 < p503 < 507) def test_get_object_ring(self): test_policies = [StoragePolicy(0, 'aay', True), StoragePolicy(1, 'bee', False), StoragePolicy(2, 'cee', False)] policies = StoragePolicyCollection(test_policies) class NamedFakeRing(FakeRing): def __init__(self, swift_dir, ring_name=None): self.ring_name = ring_name super(NamedFakeRing, self).__init__() with mock.patch('swift.common.storage_policy.Ring', new=NamedFakeRing): for policy in policies: self.assertFalse(policy.object_ring) ring = policies.get_object_ring(int(policy), '/path/not/used') self.assertEqual(ring.ring_name, policy.ring_name) self.assertTrue(policy.object_ring) self.assertTrue(isinstance(policy.object_ring, NamedFakeRing)) def blow_up(*args, **kwargs): raise Exception('kaboom!') with mock.patch('swift.common.storage_policy.Ring', new=blow_up): for policy in policies: policy.load_ring('/path/not/used') expected = policies.get_object_ring(int(policy), '/path/not/used') self.assertEqual(policy.object_ring, expected) # bad policy index self.assertRaises(PolicyError, policies.get_object_ring, 99, '/path/not/used') def test_bind_ports_cache(self): test_policies = [StoragePolicy(0, 'aay', True), StoragePolicy(1, 'bee', False), StoragePolicy(2, 'cee', False)] my_ips = ['1.2.3.4', '2.3.4.5'] other_ips = ['3.4.5.6', '4.5.6.7'] bind_ip = my_ips[1] devs_by_ring_name1 = { 'object': [ # 'aay' {'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[0], 'port': 6006}, {'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[0], 'port': 6007}, {'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[1], 'port': 6008}, None, {'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1], 'port': 6009}], 'object-1': [ # 'bee' {'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[1], 'port': 6006}, # dupe {'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[0], 'port': 6010}, {'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[1], 'port': 6011}, {'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1], 'port': 6012}], 'object-2': [ # 'cee' {'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[0], 'port': 6010}, # on our IP and a not-us IP {'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[0], 'port': 6013}, None, {'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[1], 'port': 6014}, {'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1], 'port': 6015}], } devs_by_ring_name2 = { 'object': [ # 'aay' {'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[0], 'port': 6016}, {'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1], 'port': 6019}], 'object-1': [ # 'bee' {'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[1], 'port': 6016}, # dupe {'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1], 'port': 6022}], 'object-2': [ # 'cee' {'id': 0, 'zone': 0, 'region': 1, 'ip': my_ips[0], 'port': 6020}, {'id': 0, 'zone': 0, 'region': 1, 'ip': other_ips[1], 'port': 6025}], } ring_files = [ring_name + '.ring.gz' for ring_name in sorted(devs_by_ring_name1)] def _fake_load(gz_path, stub_objs, metadata_only=False): return RingData( devs=stub_objs[os.path.basename(gz_path)[:-8]], replica2part2dev_id=[], part_shift=24) with mock.patch( 'swift.common.storage_policy.RingData.load' ) as mock_ld, \ patch_policies(test_policies), \ mock.patch('swift.common.storage_policy.whataremyips') \ as mock_whataremyips, \ temptree(ring_files) as tempdir: mock_whataremyips.return_value = my_ips cache = BindPortsCache(tempdir, bind_ip) self.assertEqual([ mock.call(bind_ip), ], mock_whataremyips.mock_calls) mock_whataremyips.reset_mock() mock_ld.side_effect = partial(_fake_load, stub_objs=devs_by_ring_name1) self.assertEqual(set([ 6006, 6008, 6011, 6010, 6014, ]), cache.all_bind_ports_for_node()) self.assertEqual([ mock.call(os.path.join(tempdir, ring_files[0]), metadata_only=True), mock.call(os.path.join(tempdir, ring_files[1]), metadata_only=True), mock.call(os.path.join(tempdir, ring_files[2]), metadata_only=True), ], mock_ld.mock_calls) mock_ld.reset_mock() mock_ld.side_effect = partial(_fake_load, stub_objs=devs_by_ring_name2) self.assertEqual(set([ 6006, 6008, 6011, 6010, 6014, ]), cache.all_bind_ports_for_node()) self.assertEqual([], mock_ld.mock_calls) # but when all the file mtimes are made different, it'll # reload for gz_file in [os.path.join(tempdir, n) for n in ring_files]: os.utime(gz_file, (88, 88)) self.assertEqual(set([ 6016, 6020, ]), cache.all_bind_ports_for_node()) self.assertEqual([ mock.call(os.path.join(tempdir, ring_files[0]), metadata_only=True), mock.call(os.path.join(tempdir, ring_files[1]), metadata_only=True), mock.call(os.path.join(tempdir, ring_files[2]), metadata_only=True), ], mock_ld.mock_calls) mock_ld.reset_mock() # Don't do something stupid like crash if a ring file is missing. os.unlink(os.path.join(tempdir, 'object-2.ring.gz')) self.assertEqual(set([ 6016, 6020, ]), cache.all_bind_ports_for_node()) self.assertEqual([], mock_ld.mock_calls) # whataremyips() is only called in the constructor self.assertEqual([], mock_whataremyips.mock_calls) def test_singleton_passthrough(self): test_policies = [StoragePolicy(0, 'aay', True), StoragePolicy(1, 'bee', False), StoragePolicy(2, 'cee', False)] with patch_policies(test_policies): for policy in POLICIES: self.assertEqual(POLICIES[int(policy)], policy) def test_quorum_size_replication(self): expected_sizes = {1: 1, 2: 1, 3: 2, 4: 2, 5: 3} for n, expected in expected_sizes.items(): policy = StoragePolicy(0, 'zero', object_ring=FakeRing(replicas=n)) self.assertEqual(policy.quorum, expected) def test_quorum_size_erasure_coding(self): test_ec_policies = [ ECStoragePolicy(10, 'ec8-2', ec_type=DEFAULT_TEST_EC_TYPE, ec_ndata=8, ec_nparity=2), ECStoragePolicy(11, 'df10-6', ec_type='flat_xor_hd_4', ec_ndata=10, ec_nparity=6), ECStoragePolicy(12, 'ec4-2-dup', ec_type=DEFAULT_TEST_EC_TYPE, ec_ndata=4, ec_nparity=2, ec_duplication_factor=2), ] for ec_policy in test_ec_policies: k = ec_policy.ec_ndata expected_size = ( (k + ec_policy.pyeclib_driver.min_parity_fragments_needed()) * ec_policy.ec_duplication_factor ) self.assertEqual(expected_size, ec_policy.quorum) def test_validate_ring(self): test_policies = [ ECStoragePolicy(0, 'ec8-2', ec_type=DEFAULT_TEST_EC_TYPE, ec_ndata=8, ec_nparity=2, is_default=True), ECStoragePolicy(1, 'ec10-4', ec_type=DEFAULT_TEST_EC_TYPE, ec_ndata=10, ec_nparity=4), ECStoragePolicy(2, 'ec4-2', ec_type=DEFAULT_TEST_EC_TYPE, ec_ndata=4, ec_nparity=2), ECStoragePolicy(3, 'ec4-2-2dup', ec_type=DEFAULT_TEST_EC_TYPE, ec_ndata=4, ec_nparity=2, ec_duplication_factor=2) ] policies = StoragePolicyCollection(test_policies) class MockRingData(object): def __init__(self, num_replica): self.replica_count = num_replica def do_test(actual_load_ring_replicas): for policy, ring_replicas in zip(policies, actual_load_ring_replicas): with mock.patch('swift.common.ring.ring.RingData.load', return_value=MockRingData(ring_replicas)): necessary_replica_num = (policy.ec_n_unique_fragments * policy.ec_duplication_factor) with mock.patch( 'swift.common.ring.ring.validate_configuration'): msg = 'EC ring for policy %s needs to be configured ' \ 'with exactly %d replicas.' % \ (policy.name, necessary_replica_num) self.assertRaisesWithMessage(RingLoadError, msg, policy.load_ring, 'mock') # first, do somethign completely different do_test([8, 10, 7, 11]) # then again, closer to true, but fractional do_test([9.9, 14.1, 5.99999, 12.000000001]) def test_storage_policy_get_info(self): test_policies = [ StoragePolicy(0, 'zero', is_default=True), StoragePolicy(1, 'one', is_deprecated=True, aliases='tahi, uno'), ECStoragePolicy(10, 'ten', ec_type=DEFAULT_TEST_EC_TYPE, ec_ndata=10, ec_nparity=3), ECStoragePolicy(11, 'done', is_deprecated=True, ec_type=DEFAULT_TEST_EC_TYPE, ec_ndata=10, ec_nparity=3), ] policies = StoragePolicyCollection(test_policies) expected = { # default replication (0, True): { 'name': 'zero', 'aliases': 'zero', 'default': True, 'deprecated': False, 'policy_type': REPL_POLICY }, (0, False): { 'name': 'zero', 'aliases': 'zero', 'default': True, }, # deprecated replication (1, True): { 'name': 'one', 'aliases': 'one, tahi, uno', 'default': False, 'deprecated': True, 'policy_type': REPL_POLICY }, (1, False): { 'name': 'one', 'aliases': 'one, tahi, uno', 'deprecated': True, }, # enabled ec (10, True): { 'name': 'ten', 'aliases': 'ten', 'default': False, 'deprecated': False, 'policy_type': EC_POLICY, 'ec_type': DEFAULT_TEST_EC_TYPE, 'ec_num_data_fragments': 10, 'ec_num_parity_fragments': 3, 'ec_object_segment_size': DEFAULT_EC_OBJECT_SEGMENT_SIZE, 'ec_duplication_factor': 1, }, (10, False): { 'name': 'ten', 'aliases': 'ten', }, # deprecated ec (11, True): { 'name': 'done', 'aliases': 'done', 'default': False, 'deprecated': True, 'policy_type': EC_POLICY, 'ec_type': DEFAULT_TEST_EC_TYPE, 'ec_num_data_fragments': 10, 'ec_num_parity_fragments': 3, 'ec_object_segment_size': DEFAULT_EC_OBJECT_SEGMENT_SIZE, 'ec_duplication_factor': 1, }, (11, False): { 'name': 'done', 'aliases': 'done', 'deprecated': True, }, # enabled ec with ec_duplication (12, True): { 'name': 'twelve', 'aliases': 'twelve', 'default': False, 'deprecated': False, 'policy_type': EC_POLICY, 'ec_type': DEFAULT_TEST_EC_TYPE, 'ec_num_data_fragments': 10, 'ec_num_parity_fragments': 3, 'ec_object_segment_size': DEFAULT_EC_OBJECT_SEGMENT_SIZE, 'ec_duplication_factor': 2, }, (12, False): { 'name': 'twelve', 'aliases': 'twelve', }, } self.maxDiff = None for policy in policies: expected_info = expected[(int(policy), True)] self.assertEqual(policy.get_info(config=True), expected_info) expected_info = expected[(int(policy), False)] self.assertEqual(policy.get_info(config=False), expected_info) def test_ec_fragment_size_cached(self): policy = ECStoragePolicy( 0, 'ec2-1', ec_type=DEFAULT_TEST_EC_TYPE, ec_ndata=2, ec_nparity=1, object_ring=FakeRing(replicas=3), ec_segment_size=DEFAULT_EC_OBJECT_SEGMENT_SIZE, is_default=True) ec_driver = ECDriver(ec_type=DEFAULT_TEST_EC_TYPE, k=2, m=1) expected_fragment_size = ec_driver.get_segment_info( DEFAULT_EC_OBJECT_SEGMENT_SIZE, DEFAULT_EC_OBJECT_SEGMENT_SIZE)['fragment_size'] with mock.patch.object( policy.pyeclib_driver, 'get_segment_info') as fake: fake.return_value = { 'fragment_size': expected_fragment_size} for x in range(10): self.assertEqual(expected_fragment_size, policy.fragment_size) # pyeclib_driver.get_segment_info is called only once self.assertEqual(1, fake.call_count) if __name__ == '__main__': unittest.main()
30 qns. technical - 30 mins. - mostly on DBMS, Oracle(like what would be the output), C, C++, Java(2-3 qns. on class and constructors) and data structures. The technical test would be easy and general test would be little difficult. In the interview, they asked me the following questions. A macro in MS-word is used to group a sequence of jobs and make it as a button or keyboard shortcut. make it as a button and put in taskbar, sothat when you press it, you will be provided with a fixed table. ALTER TABLE table1 DROP COLUMN column1; In oracle 8,you can't. 3. can you store a image in oracle and by which datatype? Yes, you can and it can be acheived by using a BLOB (binary large object) type and store upto 4GB in a single column. 4. have you used reports and forms in oracle? 5. have you written applications to retrieve stored images in a table? 6. some DOS commands and UNIX. 8. some situation questions like what would you do if your company is burning,etc. test. And on the questions in the attached file. All the best.
from __future__ import unicode_literals from django.core import mail from django.utils import six from reviewboard.reviews.models import Review from reviewboard.webapi.resources import resources from reviewboard.webapi.tests.base import BaseWebAPITestCase from reviewboard.webapi.tests.mimetypes import (review_reply_item_mimetype, review_reply_list_mimetype) from reviewboard.webapi.tests.mixins import (BasicTestsMetaclass, ReviewRequestChildItemMixin, ReviewRequestChildListMixin) from reviewboard.webapi.tests.mixins_review import (ReviewItemMixin, ReviewListMixin) from reviewboard.webapi.tests.urls import (get_review_reply_item_url, get_review_reply_list_url) class BaseResourceTestCase(BaseWebAPITestCase): def _create_test_review(self, with_local_site=False): review_request = self.create_review_request( submitter=self.user, with_local_site=with_local_site) file_attachment = self.create_file_attachment(review_request) review_request.publish(review_request.submitter) review = self.create_review(review_request, publish=True) self.create_file_attachment_comment(review, file_attachment) return review @six.add_metaclass(BasicTestsMetaclass) class ResourceListTests(ReviewListMixin, ReviewRequestChildListMixin, BaseResourceTestCase): """Testing the ReviewReplyResource list APIs.""" fixtures = ['test_users'] sample_api_url = 'review-requests/<id>/reviews/<id>/replies/' resource = resources.review_reply def setup_review_request_child_test(self, review_request): review = self.create_review(review_request, publish=True) return (get_review_reply_list_url(review), review_reply_list_mimetype) def compare_item(self, item_rsp, reply): self.assertEqual(item_rsp['id'], reply.pk) self.assertEqual(item_rsp['body_top'], reply.body_top) self.assertEqual(item_rsp['body_bottom'], reply.body_bottom) if reply.body_top_rich_text: self.assertEqual(item_rsp['body_top_text_type'], 'markdown') else: self.assertEqual(item_rsp['body_top_text_type'], 'plain') if reply.body_bottom_rich_text: self.assertEqual(item_rsp['body_bottom_text_type'], 'markdown') else: self.assertEqual(item_rsp['body_bottom_text_type'], 'plain') # # HTTP GET tests # def setup_basic_get_test(self, user, with_local_site, local_site_name, populate_items): review_request = self.create_review_request( with_local_site=with_local_site, submitter=user, publish=True) review = self.create_review(review_request, publish=True) if populate_items: items = [self.create_reply(review, publish=True)] else: items = [] return (get_review_reply_list_url(review, local_site_name), review_reply_list_mimetype, items) def test_get_with_counts_only(self): """Testing the GET review-requests/<id>/reviews/<id>/replies/?counts-only=1 API """ review = self._create_test_review() self.create_reply(review, user=self.user, publish=True) rsp = self.api_get( '%s?counts-only=1' % get_review_reply_list_url(review), expected_mimetype=review_reply_list_mimetype) self.assertEqual(rsp['stat'], 'ok') self.assertEqual(rsp['count'], 1) # # HTTP POST tests # def setup_basic_post_test(self, user, with_local_site, local_site_name, post_valid_data): review_request = self.create_review_request( with_local_site=with_local_site, submitter=user, publish=True) review = self.create_review(review_request, publish=True) return (get_review_reply_list_url(review, local_site_name), review_reply_item_mimetype, {}, [review]) def check_post_result(self, user, rsp, review): reply = Review.objects.get(pk=rsp['reply']['id']) self.assertFalse(reply.body_top_rich_text) self.compare_item(rsp['reply'], reply) def test_post_with_body_top(self): """Testing the POST review-requests/<id>/reviews/<id>/replies/ API with body_top """ body_top = 'My Body Top' review_request = self.create_review_request(publish=True) review = self.create_review(review_request, publish=True) rsp = self.api_post( get_review_reply_list_url(review), {'body_top': body_top}, expected_mimetype=review_reply_item_mimetype) self.assertEqual(rsp['stat'], 'ok') reply = Review.objects.get(pk=rsp['reply']['id']) self.assertEqual(reply.body_top, body_top) def test_post_with_body_bottom(self): """Testing the POST review-requests/<id>/reviews/<id>/replies/ API with body_bottom """ body_bottom = 'My Body Bottom' review_request = self.create_review_request(publish=True) review = self.create_review(review_request, publish=True) rsp = self.api_post( get_review_reply_list_url(review), {'body_bottom': body_bottom}, expected_mimetype=review_reply_item_mimetype) self.assertEqual(rsp['stat'], 'ok') reply = Review.objects.get(pk=rsp['reply']['id']) self.assertEqual(reply.body_bottom, body_bottom) @six.add_metaclass(BasicTestsMetaclass) class ResourceItemTests(ReviewItemMixin, ReviewRequestChildItemMixin, BaseResourceTestCase): """Testing the ReviewReplyResource item APIs.""" fixtures = ['test_users'] sample_api_url = 'review-requests/<id>/reviews/<id>/replies/<id>/' resource = resources.review_reply def setup_review_request_child_test(self, review_request): review = self.create_review(review_request, publish=True) reply = self.create_reply(review, publish=True) return (get_review_reply_item_url(review, reply.pk), review_reply_item_mimetype) def compare_item(self, item_rsp, reply): self.assertEqual(item_rsp['id'], reply.pk) self.assertEqual(item_rsp['body_top'], reply.body_top) self.assertEqual(item_rsp['body_bottom'], reply.body_bottom) if reply.body_top_rich_text: self.assertEqual(item_rsp['body_top_text_type'], 'markdown') else: self.assertEqual(item_rsp['body_top_text_type'], 'plain') if reply.body_bottom_rich_text: self.assertEqual(item_rsp['body_bottom_text_type'], 'markdown') else: self.assertEqual(item_rsp['body_bottom_text_type'], 'plain') # # HTTP DELETE tests # def setup_basic_delete_test(self, user, with_local_site, local_site_name): review_request = self.create_review_request( with_local_site=with_local_site, submitter=user, publish=True) review = self.create_review(review_request, user=user, publish=True) reply = self.create_reply(review, user=user) return (get_review_reply_item_url(review, reply.pk, local_site_name), [reply, review]) def check_delete_result(self, user, reply, review): self.assertNotIn(reply, review.replies.all()) # # HTTP GET tests # def setup_basic_get_test(self, user, with_local_site, local_site_name): review_request = self.create_review_request( with_local_site=with_local_site, submitter=user, publish=True) review = self.create_review(review_request, user=user, publish=True) reply = self.create_reply(review, user=user) return (get_review_reply_item_url(review, reply.pk, local_site_name), review_reply_item_mimetype, reply) def test_get_not_modified(self): """Testing the GET review-requests/<id>/reviews/<id>/ with Not Modified response """ review_request = self.create_review_request(publish=True) review = self.create_review(review_request, publish=True) reply = self.create_reply(review, publish=True) self._testHttpCaching( get_review_reply_item_url(reply.base_reply_to, reply.id), check_etags=True) # # HTTP PUT tests # def setup_basic_put_test(self, user, with_local_site, local_site_name, put_valid_data): review_request = self.create_review_request( with_local_site=with_local_site, submitter=user, publish=True) review = self.create_review(review_request, user=user, publish=True) reply = self.create_reply(review, user=user) return (get_review_reply_item_url(review, reply.pk, local_site_name), review_reply_item_mimetype, { 'body_top': 'New body top', }, reply, []) def check_put_result(self, user, item_rsp, reply, *args): self.assertEqual(item_rsp['id'], reply.pk) self.assertEqual(item_rsp['body_top'], 'New body top') self.assertEqual(item_rsp['body_top_text_type'], 'plain') reply = Review.objects.get(pk=reply.pk) self.compare_item(item_rsp, reply) def test_put_with_publish(self): """Testing the PUT review-requests/<id>/reviews/<id>/replies/<id>/?public=1 API """ self.siteconfig.set('mail_send_review_mail', True) self.siteconfig.save() review_request = self.create_review_request(publish=True) review = self.create_review(review_request, publish=True) mail.outbox = [] rsp, response = self.api_post_with_response( get_review_reply_list_url(review), expected_mimetype=review_reply_item_mimetype) self.assertIn('Location', response) self.assertIn('stat', rsp) self.assertEqual(rsp['stat'], 'ok') rsp = self.api_put( response['Location'], { 'body_top': 'Test', 'public': True, }, expected_mimetype=review_reply_item_mimetype) self.assertEqual(rsp['stat'], 'ok') reply = Review.objects.get(pk=rsp['reply']['id']) self.assertEqual(reply.public, True) self.assertEqual(len(mail.outbox), 1) def test_put_with_publish_and_trivial(self): """Testing the PUT review-requests/<id>/draft/ API with trivial changes """ self.siteconfig.set('mail_send_review_mail', True) self.siteconfig.save() review_request = self.create_review_request(submitter=self.user, publish=True) review = self.create_review(review_request, publish=True) mail.outbox = [] rsp, response = self.api_post_with_response( get_review_reply_list_url(review), expected_mimetype=review_reply_item_mimetype) self.assertIn('Location', response) self.assertIn('stat', rsp) self.assertEqual(rsp['stat'], 'ok') rsp = self.api_put( response['Location'], { 'body_top': 'Test', 'public': True, 'trivial': True }, expected_mimetype=review_reply_item_mimetype) self.assertIn('stat', rsp) self.assertEqual(rsp['stat'], 'ok') self.assertIn('reply', rsp) self.assertIn('id', rsp['reply']) reply = Review.objects.get(pk=rsp['reply']['id']) self.assertTrue(reply.public) self.assertEqual(len(mail.outbox), 0)
Welcome to this gorgeous 2 bedroom, 2 bath, 1674 SF Condominium ideally situated in the prestigious Reserva Conchal, in the 62 Spanish Colonial-Style condominiums called Bougainvillea. These incredible ocean-view residences face the award-winning Robert Trent Jones II championship golf course. Bougainvillea 1101 is fully furnished, professionally decorated and ready to move in. As you access the front door, you can enjoy the open concept of the kitchen as well as the dining room and sitting area facing the incredible views of the exuberant vegetation, manicured golf course and the majestic Pacific Ocean. The fully equipped kitchen has granite counter tops as well as a nice breakfast bar. The large master bedroom is elegantly decorated with a large sliding door that faces the covered patio with the incredible vistas of the surroundings. It also has spacious walk in closet with lots of storage space. The large dining area is tastefully furnished and the comfortable sunken den steps out onto the large patio with very stylish furniture facing the golf course as well as the blue ocean. Some of the finishes in this condo include ceramic floors throughout the unit, central air conditioning, wrought-iron detailing and arched-shaped windows that frame the spectacular panorama of the golf course and ocean.
# Copyright (C) 2019 Open Source Integrators # <https://www.opensourceintegrators.com> # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl). from odoo import fields, models VOICENT_CONTACT_COLUMNS = [('Assigned To', 'Assigned To'), ('Business', 'Business'), ('Category', 'Category'), ('Contact Status', 'Contact Status'), ('Email', 'Email'), ('First Name', 'First Name (Required)'), ('Last Name', 'Last Name'), ('Lead Source', 'Lead Source'), ('Other', 'Other'), ('Phone', 'Phone (Required)')] VOICENT_REPLY = [('availableagents', 'Available Agents'), ('callback', 'Callback'), ('campid', 'Campaign ID'), ('campname', 'Campaign Name'), ('campsize', 'Campaign Size'), ('connected', 'Connected'), ('dnc', 'Contact DNC'), ('nophone', 'Contact No Phone'), ('disc', 'Disc. Number'), ('dropped', 'Dropped'), ('failed', 'Failed'), ('fax', 'Fax'), ('info', 'Info'), ('in', 'Interested'), ('lines', 'Lines'), ('linebusy', 'Line Busy'), ('live', 'Live Answer'), ('machine', 'Machine Answer'), ('made', 'Made'), ('maxlines', 'Max Lines'), ('noact', 'No Activity'), ('noanswer', 'No Answer'), ('notin', 'Not Interested'), ('notes', 'Notes'), ('optout', 'Opt Out'), ('serverr', 'Service Error'), ('status', 'Status'), ('totalagents', 'Total Agents'), ('wit', 'Wit')] MSGTYPE = [('audio', 'Audio'), ('ivr', 'IVR'), ('survey', 'Survey'), ('template', 'Template'), ('tts', 'Text-To-Speech')] class BackendVoicentCallLine(models.Model): _name = 'backend.voicent.call.line' _description = 'Voicent Backend Call Line' name = fields.Char(string='Name', required=True) sequence = fields.Integer(string='Sequence', default=0) applies_on = fields.Selection(string='Applies on', selection=[]) msgtype = fields.Selection(MSGTYPE, string='Message Type', required=True) msginfo = fields.Char(string='Message Info') backend_id = fields.Many2one( string='Backend', comodel_name='backend.voicent', ondelete='set null') reply_ids = fields.One2many('backend.voicent.call.line.reply', 'line_id', string="Replies") contact_ids = fields.One2many('backend.voicent.call.line.contact', 'line_id', string="Contact Info") class BackendVoicentCallLineContact(models.Model): _name = 'backend.voicent.call.line.contact' _description = 'Columns of the CSV file to provide the contact list' _order = 'sequence' name = fields.Selection(VOICENT_CONTACT_COLUMNS, string='Voicent Field', required=True) other = fields.Char(string='Other') sequence = fields.Integer(string='Sequence', default=0) field_domain = fields.Char(string='Odoo Field', required=True) default_value = fields.Char(string='Default Value', required=True) line_id = fields.Many2one( string='Call Line', comodel_name='backend.voicent.call.line', ondelete='set null') class BackendVoicentCallLineReply(models.Model): _name = 'backend.voicent.call.line.reply' _description = 'Reply to a Voicent Call' name = fields.Char(string='Name', required=True) line_id = fields.Many2one( string='Call Line', comodel_name='backend.voicent.call.line', ondelete='set null') reply_field = fields.Selection(VOICENT_REPLY, string="Voicent Reply Field", required=True) reply_value = fields.Char(string="Voicent Reply Value", required=True) action_id = fields.Many2one('ir.actions.server', string="Server Action", required=True, help="""If the Voicent reply field is equal to the Voicent reply value, the server action is executed.""")
Companions in East Albury 2640 NSW are definitely the most costly method to experience paid friendship. These companies have a tendency to be split in to high and also normal course companion solutions. These firms can bill prices of around EUR650 for 2 hrs with a companion. Listed below you will certainly discover info concerning 2 of the ideal high course solution companies in East Albury 2640 NSW. There are a multitude of various East Albury 2640 NSW companion firms running throughout the city, as well as it can be tough to understand which to rely on with such a fragile issue. While the majority of companies are credible as well as entirely expert, it is constantly beneficial to understand for certain which are excellent to handle.
# Check difference of APIs of two commits # Output is number of symbols added and removed. # You can list of those symbols as well # Projects that change exported symbols with each commit should not be used # as a built or install time dependency until they stabilize. import logging import os from gofedlib.utils import YELLOW, RED, BLUE, ENDC from gofedinfra.system.core.factory.actfactory import ActFactory from infra.system.core.factory.fakeactfactory import FakeActFactory from gofedlib.projectsignature.parser import ProjectSignatureParser from infra.system.artefacts.artefacts import ARTEFACT_GOLANG_PROJECTS_API_DIFF from cmdsignature.parser import CmdSignatureParser from gofedlib.utils import getScriptDir def checkOptions(options): if options.prefix != "" and options.prefix[-1] == '/': logging.error("--prefix can not end with '/'") exit(1) def displayApiDifference(data, options): color = options.color prefix = options.prefix data = data["data"] def print_removed(item): if color: return "%s-%s%s" % (RED, item, ENDC) else: return "-%s" % item def print_new(item): if color: return "%s+%s%s" % (BLUE, item, ENDC) else: return "+%s" % item def print_updated(item): if color: return "%s~%s%s" % (YELLOW, item, ENDC) else: return "~%s" % item # if no option set, print removed symbols if not options.all and not options.removed and not options.new and not options.updated: options.removed = True new = [] removed = [] updated = [] # print removed packages if (options.removed or options.all) and "removedpackages" in data: for package in data["removedpackages"]: if options.prefix == "": line = print_removed(package) else: line = print_removed("%s/%s" % (options.prefix, package)) if line: removed.append(line) # print new packages if (options.new or options.all) and "newpackages" in data: for package in data["newpackages"]: if options.prefix == "": line = print_new(package) else: line = print_new("%s/%s" % (options.prefix, package)) if line: new.append(line) # print updated packages if "updatedpackages" in data: for package in data["updatedpackages"]: if options.prefix == "": package_name = package["package"] else: package_name = "%s/%s" % (options.prefix, package["package"]) for symbol_type in package: if symbol_type == "package": continue if symbol_type == "functions": prefix = "function" elif symbol_type == "types": prefix = "type" elif symbol_type == "variables": prefix = "variable" else: raise ValueError("Unsupported symbol type: %s" % symbol_type) for state in package[symbol_type]: for symbol in package[symbol_type][state]: if state.startswith("new"): line = print_new("%s: new %s: %s" % (package_name, prefix, symbol)) if line and (options.new or options.all): new.append(line) if not options.sorted: print line if state.startswith("removed"): line = print_removed("%s: %s removed: %s" % (package_name, prefix, symbol)) if line and (options.removed or options.all): removed.append(line) if not options.sorted: print line if state.startswith("updated"): line = print_updated("%s: %s updated: %s" % (package_name, prefix, symbol)) if line and (options.updated or options.all): updated.append(line) if not options.sorted: print line if options.sorted: for line in sorted(new): print line for line in sorted(removed): print line for line in sorted(updated): print line if __name__ == "__main__": cur_dir = getScriptDir(__file__) gen_flags = "%s/%s.yml" % (cur_dir, os.path.basename(__file__).split(".")[0]) parser = CmdSignatureParser([gen_flags]).generate().parse() if not parser.check(): exit(1) options = parser.options() args = parser.args() checkOptions(options) try: reference_project_signature = ProjectSignatureParser().parse(options.reference) except ValueError as e: logging.error(e) exit(1) try: compare_with_project_signature = ProjectSignatureParser().parse(options.comparewith) except ValueError as e: logging.error(e) exit(1) data = {"reference": {}, "compared_with": {}} if reference_project_signature["provider_type"] == "upstream_repository": data["reference"] = { "type": "upstream_source_code", "repository": reference_project_signature["provider"], "commit": reference_project_signature["commit"] } else: data["reference"] = { "type": "user_directory", "resource": reference_project_signature["provider"]["location"] } if compare_with_project_signature["provider_type"] == "upstream_repository": data["compared_with"] = { "type": "upstream_source_code", "repository": compare_with_project_signature["provider"], "commit": compare_with_project_signature["commit"] } else: data["compared_with"] = { "type": "user_directory", "resource": compare_with_project_signature["provider"]["location"] } if options.dryrun: act_factory = FakeActFactory() else: act_factory = ActFactory() try: data = act_factory.bake("go-exported-api-diff").call(data) except Exception as e: logging.error(e) exit(1) displayApiDifference(data[ARTEFACT_GOLANG_PROJECTS_API_DIFF], options)
Born in Srinagar. Known as Santosh in Indian art circles. Died in New Delhi 10 March 1997.Won artist of the year award in New Delhi in 1984. Since 1953, Santosh held over 30 one man shows. In 1973, he received the Lalit Kala Akademi award and in 1977, the Padma Shree. 1947-53 Self-Taught Craftsman, Painter, Weaver.
# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """Floating IP action implementations""" from openstackclient.common import command from openstackclient.common import utils class AddFloatingIP(command.Command): """Add floating IP address to server""" def get_parser(self, prog_name): parser = super(AddFloatingIP, self).get_parser(prog_name) parser.add_argument( "ip_address", metavar="<ip-address>", help="IP address to add to server (name only)", ) parser.add_argument( "server", metavar="<server>", help="Server to receive the IP address (name or ID)", ) return parser def take_action(self, parsed_args): compute_client = self.app.client_manager.compute server = utils.find_resource( compute_client.servers, parsed_args.server) server.add_floating_ip(parsed_args.ip_address) class RemoveFloatingIP(command.Command): """Remove floating IP address from server""" def get_parser(self, prog_name): parser = super(RemoveFloatingIP, self).get_parser(prog_name) parser.add_argument( "ip_address", metavar="<ip-address>", help="IP address to remove from server (name only)", ) parser.add_argument( "server", metavar="<server>", help="Server to remove the IP address from (name or ID)", ) return parser def take_action(self, parsed_args): compute_client = self.app.client_manager.compute server = utils.find_resource( compute_client.servers, parsed_args.server) server.remove_floating_ip(parsed_args.ip_address)
IT IS MY STRONGEST PRAYER THAT AS PEOPLE IN THE DAYS OF THE BIBLE ENCOUNTERED THE ANOINTING OF GOD AND BECAME COMPLETELY CHANGED, YOU WILL ALSO EXPERIENCE THE ANOINTING OF GOD THROUGH THIS WEBSITE. YOU ARE NOT ON THIS EARTH BY ACCIDENT BUT BY THE DIVINE PLAN OF GOD FOR DESTINY TO BE FULFILLED, I BELIEVE I CARRY THAT ANOINTING WHICH WILL MAKE YOU TOTALLY CHANGED AND FULFILL YOUR DESTINY. THE MISSION OF GLORYHOUSE BIBLE CHURCH IS TO BRING GLORY TO GOD BY DEMOSTRATING HIS POWER ANS LOVE IN ADVANCING HIS KINGDOM TOWARDS THE TRANSFORMATION OF NATIONS,COMMUNITIES,FAMILIES AND INDIVIDUAL LIVES. 2. TO STAND IN PRAYER AND INTERCESSION FOR INDIVIDUALS,COMMUNITIES AND NATIONS TO ENABLE THEM EXPERIENCE THE MERCIES AND THE BLESSINGS OF GOD.
from django.core.management.base import BaseCommand, CommandError from legislators.models import LegislatorsProject, LegislatorsItem, BillsProject, BillsItem import core.models as cm import core.tasks as ct from django.db import transaction from django.utils import timezone import argparse import sys import pyopenstates import zipfile import os import json import core.models as cm class Command(BaseCommand): help = """ Creates job status objects for each of the states in openstates. A utility to update all legislators using the openstates API usage: python manage.py legislators_update """ def handle(self, *args, **options): metadata = pyopenstates.get_metadata() num_jobs_created = 0 num_jobs_modified = 0 job_timeout = 60*60*2 for state in metadata: obj, created = cm.LongJobState.objects.get_or_create( app_name = "legislators", name="update_state|{}|{}".format(state["name"], state["abbreviation"]), defaults={"job_period":60*60*24, "job_timeout": job_timeout, "most_recent_update": timezone.now() - timezone.timedelta(24*60*60)} ) if created: sys.stdout.write("Created job for state: {}\n".format(state["name"])) sys.stdout.flush() num_jobs_created += 1 elif obj.job_timeout != job_timeout: obj.job_timeout = job_timeout obj.save() sys.stdout.write("Modified timeout for state: {}\n".format(state["name"])) sys.stdout.flush() num_jobs_modified += 1 sys.stdout.write("Created {} jobs\n".format(num_jobs_created)) sys.stdout.write("Modified {} jobs\n".format(num_jobs_modified)) sys.stdout.write("DONE\n") sys.stdout.flush()
Cultural changes on a global and local scale have been fueled in one way or another by TV shows. Locally, variety shows are consistently influencing the way most Filipinos talk. And social scientists believe that teleseryes, like FPJ’s Ang Probinsyano, effectively preserve and promote Filipino family values and patriotism in a society that’s bombarded by various influences from abroad. These examples are just the tip of the iceberg. In an interview with The Hollywood Reporter, director Christopher Nolan said, “Sound is as important as picture. I don’t agree with the idea that you can only achieve clarity through dialogue. Clarity of story, clarity of emotions – I try to achieve that in a very layered way using all the different things at my disposal – picture and mksound,” he said. For more information about LG’s TVs and sound bars, please visit www.lg.com/ph.
# -*- coding: utf-8 -*- """API Serializers""" from collections import OrderedDict from itertools import chain from json import dumps from django.conf import settings from django.core.paginator import Paginator from drf_cached_instances.models import CachedQueryset from rest_framework.reverse import reverse from rest_framework.serializers import ( ModelSerializer, PrimaryKeyRelatedField, SerializerMethodField, ValidationError) from rest_framework.utils.serializer_helpers import ReturnDict from tools.resources import Collection, CollectionChangeset from .cache import Cache from .models import ( Browser, Feature, Maturity, Section, Specification, Support, Version) from .serializers import ( BrowserSerializer, FeatureSerializer, MaturitySerializer, SectionSerializer, SpecificationSerializer, SupportSerializer, VersionSerializer, omit_some) class ViewBrowserSerializer(BrowserSerializer): class Meta(BrowserSerializer.Meta): fields = omit_some(BrowserSerializer.Meta.fields, 'versions') class ViewMaturitySerializer(MaturitySerializer): class Meta(MaturitySerializer.Meta): fields = omit_some(MaturitySerializer.Meta.fields, 'specifications') class ViewSectionSerializer(SectionSerializer): class Meta(SectionSerializer.Meta): fields = omit_some(SectionSerializer.Meta.fields, 'features') class ViewSpecificationSerializer(SpecificationSerializer): class Meta(SpecificationSerializer.Meta): fields = omit_some(SpecificationSerializer.Meta.fields, 'sections') class ViewVersionSerializer(VersionSerializer): class Meta(VersionSerializer.Meta): fields = omit_some(VersionSerializer.Meta.fields, 'supports') read_only_fields = omit_some( VersionSerializer.Meta.read_only_fields, 'supports') # Map resource names to model, view serializer classes view_cls_by_name = { 'features': (Feature, FeatureSerializer), 'supports': (Support, SupportSerializer), 'maturities': (Maturity, ViewMaturitySerializer), 'specifications': (Specification, ViewSpecificationSerializer), 'sections': (Section, ViewSectionSerializer), 'browsers': (Browser, ViewBrowserSerializer), 'versions': (Version, ViewVersionSerializer), } class ViewFeatureListSerializer(ModelSerializer): """Get list of features""" url = SerializerMethodField() def get_url(self, obj): return reverse( 'viewfeatures-detail', kwargs={'pk': obj.id}, request=self.context['request']) class Meta: model = Feature fields = ( 'url', 'id', 'slug', 'mdn_uri', 'experimental', 'standardized', 'stable', 'obsolete', 'name') class DjangoResourceClient(object): """Implement tools.client.Client using Django native functions""" def url(self, resource_type, resource_id=None): """Use Django reverse to determine URL.""" if resource_type == 'maturities': singular = 'maturity' else: singular = resource_type[:-1] if resource_id: return reverse( singular + '-detail', kwargs={'pk': resource_id}) else: return reverse(singular + '-list') def open_changeset(self): """Skip opening changesets (opened at the request/view level).""" pass def close_changeset(self): """Skip closing changesets (closed at the request/view level).""" pass def update(self, resource_type, resource_id, resource): model_cls, serializer_cls = view_cls_by_name[resource_type] instance = model_cls.objects.get(id=resource_id) data = resource.copy() links = data.pop('links', {}) data.update(links) serializer = serializer_cls(instance=instance, data=data) assert serializer.is_valid(), serializer.errors serializer.save() def create(self, resource_type, resource): model_cls, serializer_cls = view_cls_by_name[resource_type] data = resource.copy() links = data.pop('links', {}) data.update(links) serializer = serializer_cls(data=data) assert serializer.is_valid(), serializer.errors obj = serializer.save() return {'id': obj.id} def delete(self, resource_type, resource_id): raise NotImplementedError("delete not implemented for safety") class FeatureExtra(object): """Handle new and updated data in a view_feature update""" def __init__(self, data, feature, context): self.data = data self.feature = feature self.context = context def is_valid(self): """Validate the linked data""" self.errors = {} self._process_data() self._validate_changes() return not self.errors def load_resource(self, resource_cls, data): """Load a resource, converting data to look like wire data Conversions: - Stringify IDs (5 -> "5") - Convert Date to ISO 8601 (2015-02-17) """ rdata = {} wlinks = getattr(resource_cls, '_writeable_link_fields', {}) rlinks = getattr(resource_cls, '_readonly_link_fields', {}) link_names = set(['id'] + list(wlinks.keys()) + list(rlinks.keys())) for key, value in data.items(): if key in link_names: if isinstance(value, list): raw_ids = value unlist = False else: raw_ids = [value] unlist = True ids = [] for i in raw_ids: if i is None: ids.append(None) else: ids.append(str(i)) if unlist: rdata[key] = ids[0] else: rdata[key] = ids else: rdata[key] = value return resource_cls(**rdata) def _process_data(self): """Load the linked data and compare to current data.""" assert not hasattr(self, 'changes') assert hasattr(self, 'errors') r_by_t = Collection.resource_by_type # Create and load collection of new data new_collection = Collection() for rtype, items in self.data.items(): resource_cls = r_by_t.get(rtype) if resource_cls: for seq, json_api_item in enumerate(items): item = json_api_item.copy() links = item.pop('links', {}) item.update(links) resource = self.load_resource(resource_cls, item) resource._seq = seq new_collection.add(resource) # Create native representation of current feature data current_collection = Collection(DjangoResourceClient()) feature_serializer = ViewFeatureSerializer(context=self.context) current_feature = feature_serializer.to_representation(self.feature) current_extra = current_feature.pop('_view_extra') del current_extra['meta'] # Load feature into new and current collection current_feature_resource = self.load_resource( r_by_t['features'], current_feature) current_collection.add(current_feature_resource) current_feature.update(self.feature._in_extra) current_feature['id'] = str(current_feature['id']) resource_feature = self.load_resource( r_by_t['features'], current_feature) resource_feature._seq = None new_collection.add(resource_feature) # Populate collection of current data for rtype, items in current_extra.items(): resource_cls = r_by_t[rtype] for item in items: resource = self.load_resource(resource_cls, item) current_collection.add(resource) # Add existing items not explicit in PUT content # This avoids 'delete' changes new_items = new_collection.get_all_by_data_id() for data_id, item in current_collection.get_all_by_data_id().items(): if data_id not in new_items: resource = r_by_t[item._resource_type]() resource.from_json_api(item.to_json_api()) resource._seq = None new_collection.add(resource) # Add existing items used in new collection to current collection # This avoids incorrect 'new' changes existing_items = current_collection.get_all_by_data_id() for data_id, item in new_collection.get_all_by_data_id().items(): if item.id: item_id = item.id.id int_id = None existing_item = existing_items.get(data_id) try: int_id = int(item_id) except ValueError: pass if int_id and (existing_item is None): rtype = item._resource_type resource_cls = r_by_t[rtype] model_cls, serializer_cls = view_cls_by_name[rtype] obj = model_cls.objects.get(id=int_id) serializer = serializer_cls() data = serializer.to_representation(obj) resource = self.load_resource(resource_cls, data) current_collection.add(resource) # Load the diff self.changeset = CollectionChangeset( current_collection, new_collection) assert not self.changeset.changes.get('deleted') def add_error(self, resource_type, seq, error_dict): """Add a validation error for a linked resource.""" self.errors.setdefault( resource_type, {}).setdefault(seq, {}).update(error_dict) def _validate_changes(self): """Validate the changes. Validation includes: - Field validation of properties - Disallow adding features outside of the target feature's subtree - Disallow additions of maturities Validation of links is not attempted, since most validation errors will be relations to new resources. This may miss links to "existing" resources that aren't in the database, but those will be DoesNotExist exceptions in _process_data. """ assert hasattr(self, 'changeset') assert hasattr(self, 'errors') assert not self.errors new_collection = self.changeset.new_collection resource_feature = new_collection.get('features', str(self.feature.id)) # Validate with DRF serializers for data_id, item in new_collection.get_all_by_data_id().items(): rtype = item._resource_type model_cls, serializer_cls = view_cls_by_name[rtype] seq = getattr(item, '_seq') if seq is None: continue # Does the ID imply an existing instance? int_id = None instance = None assert item.id item_id = item.id.id try: int_id = int(item_id) except ValueError: pass else: instance = model_cls.objects.get(id=int_id) # Validate the data with DRF serializer data = item.to_json_api()[rtype] links = data.pop('links', {}) data.update(links) serializer = serializer_cls(instance=instance, data=data) if not serializer.is_valid(): errors = {} # Discard errors in link fields, for now for fieldname, error in serializer.errors.items(): if fieldname not in links: errors[fieldname] = error if errors: self.add_error(rtype, seq, errors) # Validate that features are in the feature tree target_id = resource_feature.id.id for feature in new_collection.get_resources('features'): if feature.id.id == target_id: continue f = feature while (f and f.parent is not None and f.parent.id != target_id): f = new_collection.get('features', f.parent.id) if f is None or f.parent.id is None: error = ( "Feature must be a descendant of feature %s." % target_id) self.add_error('features', feature._seq, {'parent': error}) # Validate that "expert" objects are not added expert_resources = set(( 'maturities', 'specifications', 'versions', 'browsers')) add_error = ( 'Resource can not be created as part of this update. Create' ' first, and try again.') for item in self.changeset.changes['new'].values(): if item._resource_type in expert_resources: self.add_error( item._resource_type, item._seq, {'id': add_error}) # Validate that "expert" objects are not changed change_err = ( 'Field can not be changed from %s to %s as part of this update.' ' Update the resource by itself, and try again.') for item in self.changeset.changes['changed'].values(): if item._resource_type in expert_resources: rtype = item._resource_type new_json = dict(item.to_json_api()[rtype]) new_json.update(new_json.pop('links', {})) orig_json = dict(item._original.to_json_api()[rtype]) orig_json.update(orig_json.pop('links', {})) for key, value in orig_json.items(): if value != new_json.get(key, "(missing)"): err = change_err % (dumps(value), dumps(new_json[key])) self.add_error(rtype, item._seq, {key: err}) def save(self, **kwargs): """Commit changes to linked data""" self.changeset.change_original_collection() # Adding sub-features will change the MPTT tree through direct SQL. # Load the new tree data from the database before parent serializer # overwrites it with old values. tree_attrs = ['lft', 'rght', 'tree_id', 'level', 'parent'] db_feature = Feature.objects.only(*tree_attrs).get(id=self.feature.id) for attr in tree_attrs: setattr(self.feature, attr, getattr(db_feature, attr)) class ViewFeatureExtraSerializer(ModelSerializer): """Linked resources and metadata for ViewFeatureSerializer.""" browsers = ViewBrowserSerializer(source='all_browsers', many=True) features = FeatureSerializer(source='child_features', many=True) maturities = ViewMaturitySerializer(source='all_maturities', many=True) sections = ViewSectionSerializer(source='all_sections', many=True) specifications = ViewSpecificationSerializer(source='all_specs', many=True) supports = SupportSerializer(source='all_supports', many=True) versions = ViewVersionSerializer(source='all_versions', many=True) meta = SerializerMethodField() def add_sources(self, obj): """Add the sources used by the serializer fields.""" page = self.context['request'].GET.get('page', 1) per_page = settings.PAGINATE_VIEW_FEATURE if self.context['include_child_pages']: # Paginate the full descendant tree child_queryset = self.get_all_descendants(obj, per_page) paginated_child_features = Paginator(child_queryset, per_page) obj.page_child_features = paginated_child_features.page(page) obj.child_features = obj.page_child_features.object_list else: # Jut the row-level descendants, but un-paginated child_queryset = self.get_row_descendants(obj) obj.child_features = list(child_queryset.all()) # Load the remaining related instances section_pks = set(obj.sections.values_list('id', flat=True)) support_pks = set(obj.supports.values_list('id', flat=True)) for feature in obj.child_features: section_pks.update(feature.sections.values_list('id', flat=True)) support_pks.update(feature.supports.values_list('id', flat=True)) obj.all_sections = list(CachedQueryset( Cache(), Section.objects.all(), sorted(section_pks))) obj.all_supports = list(CachedQueryset( Cache(), Support.objects.all(), sorted(support_pks))) specification_pks = set() for section in obj.all_sections: specification_pks.add(section.specification.pk) obj.all_specs = list(CachedQueryset( Cache(), Specification.objects.all(), sorted(specification_pks))) maturity_pks = set() for specification in obj.all_specs: maturity_pks.add(specification.maturity.pk) obj.all_maturities = list(CachedQueryset( Cache(), Maturity.objects.all(), sorted(maturity_pks))) version_pks = set() for support in obj.all_supports: version_pks.add(support.version.pk) obj.all_versions = list(CachedQueryset( Cache(), Version.objects.all(), sorted(version_pks))) browser_pks = set() for version in obj.all_versions: browser_pks.add(version.browser.pk) obj.all_browsers = list(CachedQueryset( Cache(), Browser.objects.all(), sorted(browser_pks))) def get_all_descendants(self, obj, per_page): """Return a CachedQueryset of all the descendants This includes row features that model rows in the MDN table, and page features that model sub-pages on MDN, which may have row and subpage features of their own. """ if isinstance(obj, Feature): # It's a real Feature, not a cached proxy Feature obj.descendant_count = obj.get_descendant_count() descendant_pks = obj.get_descendants().values_list('pk', flat=True) elif obj.descendant_count <= per_page: # The cached PK list is enough to populate descendant_pks descendant_pks = obj.descendants.values_list('id', flat=True) else: # Load the real object to get the full list of descendants real_obj = Feature.objects.get(id=obj.id) descendant_pks = real_obj.get_descendants().values_list( 'pk', flat=True) return CachedQueryset( Cache(), Feature.objects.all(), descendant_pks) def get_row_descendants(self, obj): """Return a CachedQueryset of just the row descendants This includes row features, and subfeatures of rows that are also row features. See http://bit.ly/1MUSEFL for one example of spliting a large table into a hierarchy of features. """ row_descendant_pks = obj.row_descendants.values_list('id', flat=True) return CachedQueryset( Cache(), Feature.objects.all(), row_descendant_pks) def to_representation(self, obj): """Add addditonal data for the ViewFeatureSerializer. For most features, all the related data is cachable, and no database reads are required with a warm cache. For some features, such as the root node for CSS, the subtree is huge, and the descendant feature PKs won't fit in the cache. In these cases, a couple of database reads are required to get the descendant feature PKs, which are then paginated to reduce the huge amount of related data. """ # Load the paginated descendant features if obj is None: # This happens when OPTIONS is called from browsable API return None self.add_sources(obj) ret = super(ViewFeatureExtraSerializer, self).to_representation(obj) return ReturnDict(ret, serializer=self) def find_languages(self, obj): """Find languages used in feature view.""" languages = set() def add_langs(item): if hasattr(item, 'keys'): # pragma: nocover languages.update(item.keys()) for browser in obj.all_browsers: add_langs(browser.name) add_langs(browser.note) for feature in chain([obj], obj.child_features): add_langs(feature.mdn_uri) add_langs(feature.name) for maturity in obj.all_maturities: add_langs(maturity.name) for section in obj.all_sections: add_langs(section.number) add_langs(section.name) add_langs(section.subpath) add_langs(section.note) for spec in obj.all_specs: add_langs(spec.name) add_langs(spec.uri) for support in obj.all_supports: add_langs(support.note) for version in obj.all_versions: add_langs(version.release_notes_uri) add_langs(version.note) if 'zxx' in languages: # No linguistic content languages.remove('zxx') if 'en' in languages: languages.remove('en') return ['en'] + sorted(languages) else: return sorted(languages) def significant_changes(self, obj): """Determine what versions are important for support changes. A version is important if it is the first version with support information, or it changes support from the previous version. """ # Create lookup of ID/PK -> instances browsers = {} for browser in obj.all_browsers: # Cache version order browser.version_ids = browser.versions.values_list('id', flat=True) browsers[browser.id] = browser versions = dict( [(version.id, version) for version in obj.all_versions]) features = dict( [(feature.id, feature) for feature in obj.child_features]) features[obj.id] = obj # Create index of supported browser / version / features supported = [] for support in obj.all_supports: version = versions[support.version.pk] browser = browsers[version.browser.pk] version_order = browser.version_ids.index(version.id) feature = features[support.feature.pk] support_attrs = ( support.support, support.prefix, support.prefix_mandatory, support.alternate_name, support.alternate_mandatory, support.requires_config, support.default_config, support.protected, repr(support.note), ) supported.append(( feature.id, browser.id, version_order, version.id, support.id, support_attrs)) supported.sort() # Identify significant browser / version / supports by feature sig_features = {} last_f_id = None last_b_id = None last_support = None for f_id, b_id, _, v_id, s_id, support in supported: if last_f_id != f_id: last_support = None last_f_id = f_id if last_b_id != b_id: last_support = None last_b_id = b_id if last_support != support: sig_feature = sig_features.setdefault(f_id, OrderedDict()) sig_browser = sig_feature.setdefault(str(b_id), []) sig_browser.append(str(s_id)) last_support = support # Order significant features significant_changes = OrderedDict() for f_id in chain([obj.id], [f.id for f in obj.child_features]): significant_changes[str(f_id)] = sig_features.get(f_id, {}) return significant_changes def browser_tabs(self, obj): """Section and order the browser tabs. TODO: Move this logic into the database, API """ known_browsers = dict(( ('chrome', ('Desktop Browsers', 1)), ('firefox', ('Desktop Browsers', 2)), ('internet_explorer', ('Desktop Browsers', 3)), ('opera', ('Desktop Browsers', 4)), ('safari', ('Desktop Browsers', 5)), ('android', ('Mobile Browsers', 6)), ('chrome_for_android', ('Mobile Browsers', 7)), ('chrome_mobile', ('Mobile Browsers', 8)), ('firefox_mobile', ('Mobile Browsers', 9)), ('ie_mobile', ('Mobile Browsers', 10)), ('opera_mini', ('Mobile Browsers', 11)), ('opera_mobile', ('Mobile Browsers', 12)), ('safari_mobile', ('Mobile Browsers', 13)), ('blackberry', ('Mobile Browsers', 14)), ('firefox_os', ('Non-Browser Environments', 15)), )) next_other = 16 sections = [ 'Desktop Browsers', 'Mobile Browsers', 'Non-Browser Environments'] raw_tabs = dict((section, []) for section in sections) for browser in obj.all_browsers: try: section, order = known_browsers[browser.slug] except KeyError: section, order = ('Non-Browser Environments', next_other) next_other += 1 raw_tabs[section].append((order, browser.id)) tabs = [] for section in sections: browsers = raw_tabs[section] if browsers: browsers.sort() tabs.append(OrderedDict(( ('name', {'en': section}), ('browsers', [str(pk) for _, pk in browsers]), ))) return tabs def pagination(self, obj): """ Determine pagination for large feature trees. If page children are not included (the default), then no pagination is used, but the pagination object remains to make client logic easier. """ pagination = OrderedDict(( ('previous', None), ('next', None), )) if self.context['include_child_pages']: # When full descendant list, use pagination # The list can get huge when asking for root features like web-css pagination['count'] = obj.descendant_count url_kwargs = {'pk': obj.id} if self.context['format']: url_kwargs['format'] = self.context['format'] request = self.context['request'] url = reverse( 'viewfeatures-detail', kwargs=url_kwargs, request=request) if obj.page_child_features.has_previous(): page = obj.page_child_features.previous_page_number() pagination['previous'] = ( "%s?child_pages=1&page=%s" % (url, page)) if obj.page_child_features.has_next(): page = obj.page_child_features.next_page_number() pagination['next'] = ( "%s?child_pages=1&page=%s" % (url, page)) else: # Don't paginate results. The client probabaly wants to generate a # complete table, so pagination would get in the way. pagination['count'] = len(obj.child_features) return {'linked.features': pagination} def ordered_notes(self, obj, sig_features, tabs): """Gather notes from significant features.""" supports = dict( [(str(support.id), support) for support in obj.all_supports]) notes = [] for browsers in sig_features.values(): for section in tabs: for browser in section['browsers']: sig_supports = browsers.get(browser, []) for sig_support_pk in sig_supports: support = supports[sig_support_pk] if support.note: notes.append(sig_support_pk) return OrderedDict((note, i) for i, note in enumerate(notes, 1)) def get_meta(self, obj): """Assemble the metadata for the feature view.""" significant_changes = self.significant_changes(obj) browser_tabs = self.browser_tabs(obj) include_child_pages = self.context['include_child_pages'] pagination = self.pagination(obj) languages = self.find_languages(obj) notes = self.ordered_notes( obj, significant_changes, browser_tabs) meta = OrderedDict(( ('compat_table', OrderedDict(( ('supports', significant_changes), ('tabs', browser_tabs), ('child_pages', include_child_pages), ('pagination', pagination), ('languages', languages), ('notes', notes), ))),)) return meta def to_internal_value(self, data): self.instance = self.parent.instance assert self.instance self.add_sources(self.instance) self.instance._in_extra = self.parent._in_extra extra = FeatureExtra(data, self.instance, self.context) if extra.is_valid(): return {'_view_extra': extra} else: assert extra.errors raise ValidationError(extra.errors) class Meta: model = Feature fields = ( 'browsers', 'versions', 'supports', 'maturities', 'specifications', 'sections', 'features', 'meta') class ViewFeatureSerializer(FeatureSerializer): """Feature Serializer, plus related data and MDN browser compat logic""" _view_extra = ViewFeatureExtraSerializer(source='*') class Meta(FeatureSerializer.Meta): fields = FeatureSerializer.Meta.fields + ('_view_extra',) def to_internal_value(self, data): self._in_extra = { 'sections': data.pop('sections', []), 'supports': data.pop('supports', []), 'children': data.pop('children', []), } data = super(ViewFeatureSerializer, self).to_internal_value(data) return data def save(self, *args, **kwargs): """Save the feature plus linked elements. The save is done using DRF conventions; the _view_extra field is set to an object (FeatureExtra) that will same linked elements. The only wrinkle is that the changeset should not be auto-closed by any saved items. """ changeset = self.context['request'].changeset if changeset.id: # Already in an open changeset - client will close close_changeset = False else: close_changeset = True assert not changeset.user_id changeset.user = self.context['request'].user changeset.save() ret = super(ViewFeatureSerializer, self).save(*args, **kwargs) if hasattr(ret, '_view_extra'): ret._view_extra.save(*args, **kwargs) if close_changeset: changeset.closed = True changeset.save() return ret class ViewFeatureRowChildrenSerializer(ViewFeatureSerializer): """Adjust serializer when page children are omitted.""" children = PrimaryKeyRelatedField( many=True, queryset=Feature.objects.all(), source='row_children')
These Scrub Caps are just like the cap your Doctor or Nurse wears. Your little doctor or nurse will have hours of fun wearing this REAL colorful Scrub Cap. Caps have an adjustable tie on back for a perfect fit. Unisex for both Girl and Boy Doctors. These are REAL working Stethoscopes just like the ones used in Hospitals and Doctors Offices!
def detect_circles(np_image): """ Uses Hough transform to detect the radii and the centres of the "blobs" indicating the point of contact between the spheres """ import numpy as np import pylab as pl from skimage.transform import hough_circle from skimage.feature import peak_local_max from skimage.draw import circle_perimeter pl.close('all') min_rad = int(max(np_image.shape[0], np_image.shape[1]) / 4.0) max_rad = int(max(np_image.shape[0], np_image.shape[1]) / 2.0) step = 1 hough_radii = np.arange(min_rad, max_rad, step, np.float64) hough_res = hough_circle(np_image, hough_radii) centers = [] accums = [] radii = [] circles = [] # to get the outlines of the circles C = [] # to get the centres of the circles, in relation to the different areas # For each radius, extract one circle for radius, h in zip(hough_radii, hough_res): peaks = peak_local_max(h, num_peaks=1) centers.extend(peaks) accums.extend(h[peaks[:, 0], peaks[:, 1]]) radii.extend([radius]) for idx in np.argsort(accums)[::-1][:1]: center_x, center_y = centers[idx] C.append((center_x, center_y)) radius = radii[idx] cx, cy = circle_perimeter(center_y, center_x, np.int64(radius)) circles.append((cy, cx)) #np_image[circles[0][0], circles[0][1]] = 0 # pl.imshow(np_image) # pl.title('Circle detection on real image using Hough transform\n- optimised with image labelling algorithm -', fontdict={'fontsize': 20,'verticalalignment': 'baseline','horizontalalignment': 'center'}) # pl.colorbar() # pl.show() # C_cp = C # C = [] # # if radius % 2 != 0: # C.append((C_cp[0][0] + 0.5, C_cp[0][1] + 0.5)) # elif radius % 2 != 0: # C.append((C_cp[0][0] + 0.5, C_cp[0][1])) # elif radius % 2 != 0: # C.append((C_cp[0][0], C_cp[0][1] + 0.5)) # else: # C.append((C_cp[0][0], C_cp[0][1])) return C
New 2018 Chevrolet Silverado 1500 LT Silver Ice in Vernon, TX. 3GCUKREC9JG624783 For Sale near Altus OK, Burkburnett, Wichita Falls. This 2018 Chevrolet Silverado 1500 LT Crew Cab 4x4 is proudly offered by Vernon Auto Group. Feel the power of the 5.3L V8 Ecotec3 Engine as you take on all terrain with ease. Sit back in comfortable black cloth seats and let the state of the art navigation system keep you on track. Keep your family entertained with he integrated WiFi hotspot that can connect multiple devices at once. Call today for a deal you will not want to miss out on!
#!/usr/bin/env python from __future__ import absolute_import import sys from distutils.core import Command import subprocess from setuptools import setup import defusedxml class PyTest(Command): user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): errno = subprocess.call([sys.executable, "tests.py"]) raise SystemExit(errno) long_description = [] with open("README.txt") as f: long_description.append(f.read()) with open("CHANGES.txt") as f: long_description.append(f.read()) setup( name="defusedxml", version=defusedxml.__version__, cmdclass={"test": PyTest}, packages=["defusedxml"], author="Christian Heimes", author_email="christian@python.org", maintainer="Christian Heimes", maintainer_email="christian@python.org", url="https://github.com/tiran/defusedxml", download_url="https://pypi.python.org/pypi/defusedxml", keywords="xml bomb DoS", platforms="all", license="PSFL", description="XML bomb protection for Python stdlib modules", long_description="\n".join(long_description), classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: Python Software Foundation License", "Natural Language :: English", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Topic :: Text Processing :: Markup :: XML", ], )
The work of Dutch artist Theo Jansen will grace the ArtScience Museum at Singapore’s Marina Bay Sands casino resort until the end of September. In a press release issued on Friday, the property said it would show “Wind Walkers: Theo Jansen’s Strandbeests” at the museum, which is the main venue for cultural displays at Marina Bay Sands (pictured). The space opened in February 2011 and has hosted exhibitions by Leonardo da Vinci, M.C. Escher, Salvador Dalí, Andy Warhol and Vincent Van Gogh. With Wind Walkers, the casino owned by the United States-based operator Las Vegas Sands Corp, has scored a first for Southeast Asia; Jansen’s moving sculptures have never before been displayed there. The exhibition of 13 big, wind-powered machines that move like imaginary animals sees everyday materials blended with a dose of physics to create art. Jansen designed the machines for work on the beaches of the Netherlands, where they were intended to pile up sand to form dunes to protect the coast of the low-lying country from any rise in sea level caused by global climate change. Notable Strandbeests on show in Wind Walkers include Animaris Siamesis, which weighs more than 200kg and has 72 legs to propel it along a beach. The exhibition opened on Saturday. Marina Bay Sands has convention and exhibition facilities, 2,560 hotel rooms and suites, the rooftop Sands SkyPark, a shopping mall, restaurants, a theatre and a plaza for outdoor events. Just last week, the property announced it would open next year a Marquee-brand nightclub with an indoor Ferris wheel. The nightclub will occupy a space formerly used by one of the two theatres in the casino resort designed to attract live entertainment. Las Vegas Sands reported a 202.7-percent jump in company-wide first quarter profit in April. Group-wide, on U.S. generally accepted accounting principles basis, Las Vegas Sands’ net income for the three months ended March 31 was USS1.46 billion. The Marina Bay Sands posted adjusted property earnings before interest, taxation, depreciation and amortisation of US$541 million, up by 48.6 percent. Casino revenue at Marina Bay Sands was up by 32.5 percent year-on-year to US$652 million in the three months to March 30.
""" Test Project """ import time from collections import defaultdict from hstestcase import HSTestCase from hubstorage import ValueTooLarge class BatchUploaderTest(HSTestCase): def _job_and_writer(self, **writerargs): self.project.push_job(self.spidername) job = self.start_job() bu = self.hsclient.batchuploader w = bu.create_writer(job.items.url, auth=self.auth, **writerargs) return job, w def test_writer_batchsize(self): job, w = self._job_and_writer(size=10) for x in xrange(111): w.write({'x': x}) w.close() # this works only for small batches (previous size=10 and small data) # as internally HS may commit a single large request as many smaller # commits, each with different timestamps groups = defaultdict(int) for doc in job.items.list(meta=['_ts']): groups[doc['_ts']] += 1 self.assertEqual(len(groups), 12) def test_writer_maxitemsize(self): job, w = self._job_and_writer() m = w.maxitemsize self.assertRaisesRegexp( ValueTooLarge, 'Value exceeds max encoded size of 1048576 bytes:' ' \'{"b": "x+\\.\\.\\.\'', w.write, {'b': 'x' * m}) self.assertRaisesRegexp( ValueTooLarge, 'Value exceeds max encoded size of 1048576 bytes:' ' \'{"b+\\.\\.\\.\'', w.write, {'b'*m: 'x'}) self.assertRaisesRegexp( ValueTooLarge, 'Value exceeds max encoded size of 1048576 bytes:' ' \'{"b+\\.\\.\\.\'', w.write, {'b'*(m/2): 'x'*(m/2)}) def test_writer_contentencoding(self): for ce in ('identity', 'gzip'): job, w = self._job_and_writer(content_encoding=ce) for x in xrange(111): w.write({'x': x}) w.close() self.assertEqual(job.items.stats()['totals']['input_values'], 111) def test_writer_interval(self): job, w = self._job_and_writer(size=1000, interval=1) for x in xrange(111): w.write({'x': x}) if x == 50: time.sleep(2) w.close() groups = defaultdict(int) for doc in job.items.list(meta=['_ts']): groups[doc['_ts']] += 1 self.assertEqual(len(groups), 2)
Become an amateur botanist by growing and experimenting with different breeds of plants; discover what's needed for success via reports and log possible new breeds as your own. - Successfully grow plants and discover breeds in your log. - Change the temperature, water levels, soil types and/or plant pots. - Plant a seed or multiple varieties in a single pot to create crossbreeds. - Once all breeds are discovered, you have reached your goal! Height of game's window is bigger, then my screen height a bit :0 But game is still playable. Also, big thx for streaming my LD36 game on twitch. very fun game! I felt like a scientist, mixing and matching variable, trying to control things. Only complaint is that I wasn't sure what "Incompatible" was supposed to mean. I assumed it meant those breeds didn't mix, but later I found that they in fact did. Good job, an enjoyable experience, I'd like to play it on my phone in the subway. Keep up the good work! Thanks for the feedback, you actually were understanding what incompatible meant, unfortunately our code wasn't (Whoops!), added a quick fix for it, thanks for the heads up. loving the very idea of this game and actually managed to find all the plants! Scrolling down on the Findings screen with a mouse-wheel is very slow and sluggish. It's also quite complicated to "swipe" on a PC to bring up and bring down that board. Once I got the hang of controls though I really liked this game. Great feeling of experimenting with different pots, temperature and moisture levels! I also like how the interface is made. Pretty interesting. It kept me playing until I had all plants. However, it was mostly guess work and randomly tweaking values. Some form of hint system might have made me work with more direction. Graphics are very good, music is fairly suitable-maybe a little too mysterious. The concept is really nice, I would work a bit on controls as they didn't seem natural to me (but that might have been only my feeling though). Music matched the game quite well, and the idea is very catchy. That is really lovely. I enjoyed the art and the sound effects. Really gentle and soothing. Beautiful artwork! That last plant was my favorite (the black one with all the colors) I would love to have plushies of these! Brilliant and adorable plant designs. I loved the touch of having the clipboard to report the findings, and getting to experiment. Great job! I love this so much! Great work! Loved the art, and the parallax effect made it look extra good. Music fit well, too. I grew all the plants, and even grew a "Glorious" Mushaum. I liked the experimentation gameplay, and the note-taking made it feel more meaningful instead of just pure trial-and-error. The swipe to open the menu was kinda annoying with a mouse. Would have been nice if I could just click it or even just press enter or something. The game also didn't seem very tied-in to the theme, but I didn't really mind. I agree with other commenters that this would be a cool mobile game. I think I would like if there was some kind of limited resources (seeds, pots, etc.) so that you had to think more carefully about what experiments you're gonna perform. Or maybe experiments could take a certain amount of time, and you have to come back later to see the results. Like Neko Atsume, but for plants! The art really drew me into playing this, and I enjoyed it and unlocked all the possible plants on the clipboard. Like some of the other players, the window extended past the height of my screen and I wasn't actually able to grab the clipboard. I did manage to load the game in fullscreen, however, and it worked fine then. It's a really great game, with fantastic art. I had a lot of fun mixing and matching different combinations of the components and attempting to grow them. I was confused about how the lighting was to be recorded, so I'm glad the clipboard kept note of the successful attempts. I also liked being able to type in your records in the clipboard itself; it definitely added a sense of completion and progress. The scrolling for the clipboard is a little wonky; to scroll down, I had to scroll up first, then down, but it all worked out in the end. The music was a bit odd to hear at first, as I had played a browser game called Small Worlds where this music was used for a snowy area, but I think the waltz and mysterious feel help to set a good, wondrous mood for playing the game. Overall, it was right up my alley and I certainly felt like it was time well spent. Great graphics, good music, the interface seems like it would be very well designed for a phone, but on a computer the constant swiping gets a bit annoying. I managed to figure everything out except for Cutethulu, which I cannot get for the life of me. For some reason, every combination that doesn't result in a plant is incompatible, unless I managed to miss one. Very cool game though, and I loved the art! Great game! It might help to have a few more degrees of telling you how close you are, so you can tweak the parameters more. I loved that you can rename the plants on the clipboard! A very unique idea! I envy one of you (or maybe more) in your team actually having knowledge of botany. Gameplay is solid, well-made UI, beautiful graphics and sound. Thank You for the experience!
# -*- coding: utf-8 -*- # # Py6S documentation build configuration file, created by # sphinx-quickstart on Thu Feb 16 12:07:44 2012. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import os import sys import mock MOCK_MODULES = [ "numpy", "scipy", "matplotlib", "matplotlib.pyplot", "scipy.interpolate", ] for mod_name in MOCK_MODULES: sys.modules[mod_name] = mock.Mock() # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, "C:\_Work\Py6S\py6s") sys.path.insert(0, "/Users/robin/Documents/University/Py6S/py6s") # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ["sphinx.ext.autodoc", "sphinx.ext.intersphinx", "sphinx.ext.viewcode"] # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] # The suffix of source filenames. source_suffix = ".rst" # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = "index" # General information about the project. project = "Py6S" copyright = "2012, Robin Wilson" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = "1.9.0" # The full version, including alpha/beta/rc tags. release = "1.9.0" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = "default" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ["_static"] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = "Py6Sdoc" # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). "papersize": "a4paper", # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ("index", "Py6S.tex", "Py6S Documentation", "Robin Wilson", "manual"), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [("index", "py6s", "Py6S Documentation", ["Robin Wilson"], 1)] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( "index", "Py6S", "Py6S Documentation", "Robin Wilson", "Py6S", "One line description of project.", "Miscellaneous", ), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {"http://docs.python.org/": None} autodoc_member_order = "bysource"
Whether you want to improve your skills, expertise or career growth, with Certkingdom's training and certification resources help you achieve your goals. Our exams files feature hands-on tasks and real-world scenarios; in just a matter of days, you'll be more productive and embracing new technology standards. Our online resources and events enable you to focus on learning just what you want on your timeframe. You get access to every exams files and there continuously update our study materials; these exam updates are supplied free of charge to our valued customers. Get the best A2040-924 exam Training; as you study from our exam-files "Best Materials Great Results" Make yourself more valuable in today's competitive computer industry Certkingdom's preparation material includes the most excellent features, prepared by the same dedicated experts who have come together to offer an integrated solution. We provide the most excellent and simple method to pass your IBM IBM Certified System Administrator A2040-924 exam on the first attempt "GUARANTEED". will prepare you for your exam with guaranteed results, A2040-924 Study Guide. Your exam will download as a single A2040-924 PDF or complete A2040-924 testing engine as well as over +4000 other technical exam PDF and exam engine downloads. Forget buying your prep materials separately at three time the price of our unlimited access plan - skip the A2040-924 audio exams and select the one package that gives it all to you at your discretion: A2040-924 Study Materials featuring the exam engine. Beyond knowing the answer, and actually understanding the A2040-924 test questions puts you one step ahead of the test. Completely understanding a concept and reasoning behind how something works, makes your task second nature. Your A2040-924 quiz will melt in your hands if you know the logic behind the concepts. Any legitimate IBM IBM Certified System Administrator prep materials should enforce this style of learning - but you will be hard pressed to find more than a IBM IBM Certified System Administrator practice test anywhere other than Certkingdom. This is where your IBM IBM Certified System Administrator A2040-924 exam prep really takes off, in the testing your knowledge and ability to quickly come up with answers in the A2040-924 online tests. Using IBM Certified System Administrator A2040-924 practice exams is an excellent way to increase response time and queue certain answers to common issues. All IBM IBM Certified System Administrator online tests begin somewhere, and that is what the IBM IBM Certified System Administrator training course will do for you: create a foundation to build on. Study guides are essentially a detailed IBM IBM Certified System Administrator A2040-924 tutorial and are great introductions to new IBM IBM Certified System Administrator training courses as you advance. The content is always relevant, and compound again to make you pass your A2040-924 exams on the first attempt. You will frequently find these A2040-924 PDF files downloadable and can then archive or print them for extra reading or studying on-the-go. For some, this is the best way to get the latest IBM IBM Certified System Administrator A2040-924 training. However you decide to learn A2040-924 exam topics is up to you and your learning style. The Certkingdom IBM IBM Certified System Administrator products and tools are designed to work well with every learning style. Give us a try and sample our work. You'll be glad you did. * IBM Certified System Administrator A2040-924 prep files are frequently updated to maintain accuracy. Your courses will always be up to date. Get IBM Certified System Administrator ebooks from Certkingdom which contain real A2040-924 exam questions and answers. You WILL pass your IBM Certified System Administrator exam on the first attempt using only Certkingdom's IBM Certified System Administrator excellent preparation tools and tutorials. Thank You! I would just like to thank CertKingdom.com for the IBM IBM Certified System Administrator A2040-924 test guide that I bought a couple months ago and I took my test and pass overwhelmingly. I completed the test of 68 questions in about 90 minutes I must say that their Q & A with Explanation are very amazing and easy to learn. I passed the IBM IBM Certified System Administrator A2040-924 exam yesterday, and now it's on to security exam. Couldn't have done it with out you. Thanks very much.