text
stringlengths
29
850k
########################################################################################## # Sam - Home and Office Automation SRAI # (Sam Brain Compiler) # # Version 1.0 # # Used to compile the brain file if needed. # # Credits: # Many great works of many great people are included in Sam # I have only stacked the legos together. # # Based on the Py-AIML or PyAIML or pyAIML interpreter currently cloned by creatorrr # author: Cort Stratton (cort@users.sourceforge.net) web: http://pyaiml.sourceforge.net/ # https://github.com/creatorrr/pyAIML # ########################################################################################## import os import sys import platform import time import aiml import marshal import glob import time import operator import csv # AIML Directory saiml = "/PATH/sam/aiml/" #saiml = "C:\\PATH\\sam\\aiml\\" # brain k = aiml.Kernel() # setpreds() function def setpreds(): with open(saiml + 'preds.csv') as csvfile: reader = csv.reader(csvfile) for row in reader: #print((row[0]), (row[1])) k.setBotPredicate((row[0]), (row[1])) plat = platform.machine() osys = os.name print "Sam for " + osys print "System Architecture " + plat #print "Memory " + psutil.virtual_memory() k.setBotPredicate("architecture", plat) k.setBotPredicate("os", osys) # get_oldest_file() function def get_oldest_file(files, _invert=False): """ Find and return the oldest file of input file names. Only one wins tie. Values based on time distance from present. Use of `_invert` inverts logic to make this a youngest routine, to be used more clearly via `get_youngest_file`. """ gt = operator.lt if _invert else operator.gt # Check for empty list. if not files: return None # Raw epoch distance. now = time.time() # Select first as arbitrary sentinel file, storing name and age. oldest = files[0], now - os.path.getmtime(files[0]) # Iterate over all remaining files. for f in files[1:]: age = now - os.path.getmtime(f) if gt(age, oldest[1]): # Set new oldest. oldest = f, age # Return just the name of oldest file. return oldest[0] # learn() function def learn(aimlfiles): if not aimlfiles: k.learn(saiml + "xfind.aiml") for f in aimlfiles[1:]: k.learn(f) # brain() function def brain(): aimlfiles = glob.glob(saiml + "*.aiml") learn(aimlfiles) setpreds() if os.path.isfile(saiml + "sam.ses"): sessionFile = file(saiml + "sam.ses", "rb") session = marshal.load(sessionFile) sessionFile.close() for pred,value in session.items(): k.setPredicate(pred, value, "sam") else: setpreds() k.saveBrain(saiml + "sam.brn") if __name__ == "__main__": brain()
Broadhurst Theatre is proud to announce that on Wednesday 28th August 2019 they will be bringing theatrical performance superstars Anastasia to the stage for a live performance. This is your one chance to see the performance sensation that has captured the hearts and imaginations of all their fans. Critics will tell you that Anastasia is the show to catch in 2019 because of the passion and talent that they bring to every single show and appearance. Even if you have seen Anastasia live before then you don’t want to miss out on this premiere since it will be one for the record books. To make sure you don’t miss out, click the Buy Tickets button below and order your tickets today. From all of the astounding theater venues in New York City New York, Anastasia still chose Broadhurst Theatre, the finest for their Wednesday 28th August 2019 show. If you have ever been to this astounding spot, then you know why this amazing act could only choose to perform at this exclusive stage. For everyone else, here is what you will expect with your ticket. Broadhurst Theatre is located conveniently close to some of the most famed restaurants and bars in town making it simple to make a evening of it with lovely dinner and a stunning show. This theater also boats warm lighting and inviting decor that offer you a comfortable atmosphere that you will not soon forget. Theater goers are already snatching at the chance to grab up tickets for this event, so what are you waiting for? If you want to see Anastasia live on Wednesday 28th August 2019 then grab your ticket today through this site.
# -*- coding: utf-8 -*- """ Copyright [2009-2018] EMBL-European Bioinformatics Institute Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import click from rnacentral_pipeline.rnacentral import upi_ranges from rnacentral_pipeline.databases.crs import parser as crs from rnacentral_pipeline.rnacentral import pgloader @click.command("upi-ranges") @click.option("--db_url", envvar="PGDATABASE") @click.option("--table-name", default="rna") @click.argument("chunk_size", type=int) @click.argument("output", default="-", type=click.File("w")) def find_upi_ranges(chunk_size, output, db_url=None, table_name=None): """ This will compute the ranges to use for our each xml file in the search export. We want to do several chunks at once as it is faster (but not too man), and we want to have as large a chunk as possible. If given an a table_name value it will use that table, otherwise it will use the rna table. """ upi_ranges.to_file(db_url, table_name, chunk_size, output) @click.command("crs") @click.argument("filename", default="-", type=click.File("r")) @click.argument("output", default="complete_features.csv", type=click.File("w")) def crs_data(filename, output): """ This will parse the CRS file to produce a series of sequence features for import. The features are different from normal sequence features because these are 'complete', they already have a URS/taxid assigned and can just be inserted directly into the database. """ crs.from_file(filename, output) @click.command("validate-pgloader") @click.argument("filename", default="-", type=click.File("r")) def validate_pgloader(filename): """ Check if pgloader ran without errors. Pgloader doesn't seem to crash when it should so we use this to parse the output and determine if there were any issues when loading. This is safer then continuing. """ if not pgloader.validate(filename): raise click.ClickException("Pgloader produced errors")
Outer fabric: 85% Viscose, 12% Polyamide, 3% Elastane | Care tip: ironing brings this item containing viscose back to its original shape and length after being washed. For the best results we recommend steam ironing. This dress is both feminine and super comfortable in a fine knit with texture details and a swirling, flared A-line skirt! The weight and colourof the dress is lovely. The material is okay. I am slim and none of the dresses on this site fit well. A small was too large and this also a bit big. Will wear and then alter the dress to my body type.
#!env/bin/python #queries.py # # Implements methods to answer users queries. # # Author: José Lopes de Oliveira Jr. <bierminen.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. ## from flask import jsonify, make_response from app.models import Style, Subtype, Type def error_response(status): if status == 404: return make_response(jsonify({'status':'not found'}), status) else: return make_response(jsonify({'status':'server error ({})'.format( status)}), status) def style_response(status, num=-1, n='', st='', t='', ogi=-1, oga=-1, fgi=-1, fga=-1, abi=-1, aba=-1, ibi=-1, iba=-1, sri=-1, sra=-1, d=''): if status == 200: return {'number':num, 'name':n, 'subtype':st, 'type':t, 'og_min':ogi, 'og_max':oga, 'fg_min':fgi, 'fg_max':fga, 'abv_min':abi, 'abv_max':aba, 'ibu_min':ibi, 'ibu_max':iba, 'srm_min':sri, 'srm_max':sra, 'description':d} else: return error_response(status) def get_styles(n): styles_list = [] if n: styles = Style.query.filter_by(number=n) else: styles = Style.query.all() for s in styles: st = Subtype.query.filter_by(id=s.fk_subtype).first() t = Type.query.filter_by(id=st.fk_type).first() styles_list.append(style_response(200, s.number, s.name, st.name, t.name, s.og_min, s.og_max, s.fg_min, s.fg_max, s.abv_min, s.abv_max, s.ibu_min, s.ibu_max, s.srm_min, s.srm_max, s.description)) if len(styles_list): return jsonify({'status':'OK', 'styles':styles_list}) else: return style_response(404)
While at the Toledo mixer I saw RC guru and old friend Frank Noll. When I asked what he was up to I was blown away to find out he was back on the RC scene! Frank is excited to be heading JR Propo! Frank Noll got his start in RC when he was just a kid. His father was into aeromodelling and started out building gliders and rubber band-power models. He also built wind tunnels for the Air Force. Frank got the RC bug by watching his dad build and fly growing up. Frank enjoys giant scale competition aerobatics, jets, helis and more. When he’s not flying he is golfing. He was influenced by Don Lowe and Ivan Kristensen. They spurred him on to be the best RC pilot he could be. Frank has been in the hobby over 50 years and has earned his living in the RC industry. I have known Frank since I first got going in RC and later worked in the RC industry as well. If you were at an event you could almost always bet that Frank was somewhere around too! Frank is very excited to have a new RC industry job and I wanted to make sure everyone knew about. Frank is now working with JR Propo to rebuild its base and get move things into the future. When Frank gave me the good news I knew there would be a lot of pilots out there that would want to hear about it! JT - So, JR is alive and well! Tell us about your new role with them. JT - What are your current plans to get the JR brand jump started again? FN - First thing on the list was to re-brand the line, so everyone was on the same page and consumers understood that JR Propo is back in a form that everyone knows. The immediate focus was to make sure that the modeling community understands that JR Propo is very much alive and under a new solid business structure. The loyal consumer base is still there and is unsure about what was going on, so we need to make sure that they don’t feel forgotten. The company’s main objective is to ensure that there is support all the way around from being able to purchase the systems again, to our knowledgeable service center handling repairs. Everyone in the current company program is very knowledgeable, dedicated and extremely excited about what the future has in store for it. I can speak for myself on this one, as I haven’t felt this excited about the hobby or my involvement in it for quite some time. JT - Are there any new products we should be aware of? FN - There are already new systems in production and inventory. They range from sport/beginner systems all the way to “complex” aircraft compatible systems. There have been changes in software and appearance to the current inventory that will keep up with the current modeling needs. The plans are to continue this mindset through current as well as future product introductions. JT - Will you be on the RC show circuit this year? FN - Absolutely. I’m still a modeler at heart and enjoy being with our “family” that attends events and tradeshows. Being in the hobby for so long (more than I’d like to admit) I’ve become acquainted with many people, regardless of affiliations and consider them close. When you look at our sport in the big picture, we’re just a bunch of grown-up “kids” playing with toy airplanes. JT - Will there be a new JR team? FN - Yes, there will. We’re in the process of establishing the team structure and once that’s accomplished, we will be actively searching for a limited group of modelers who are willing to help us with our mission. We want individuals who have good character and are easy to approach and talk to. JT - Any future JR plans that you can share? FN - There are new systems under development as we speak, as well as new innovations that have not been introduced into our hobby. These new innovations are equally exciting as the new systems, IMO. There was no point i going through all the work to relaunch the line, if we only had the same prior equipment. I’m sure this comes as great news to all the JR fans out there and it should come as great news to all of us in the hobby. Having Frank Noll the helm of a respected RC brand is great for all of us. When you see Frank on the RC trail be sure to congratulate him! Last edited by Jim T. Graham; 04-10-2019 at 08:18 AM.. Congratulations Frank I wish you the very best in your new position. Wow....sounds good to me !!!!! Congrats Frank !!!!! Congratulations! JR Propo for ever. With all of the things that have happened in the last couple of years, it's good to get some normalcy back into the hobby. Congratulations Frank! They picked the right guy to keep JR going. So happy you’ve made the call for this outstanding radio brand. Well done and good luck my friend. would the new JR be able to do DMS2 and DMSX so that for mer JR users do to have to orphan the JR and Specktrum receivers in their fleet. Frank did not touch on JR service and repair - something Horizon is very good at. just a lot of fluff in the article. What is your new title? The interview does not really clarify? Are you part of the ownership now? Is JR Propo product development completely independent from JR DFA? Last edited by Vapor Trails; 04-10-2019 at 12:47 PM. If they would support a module version of the tx like the XG11MV I'd be happy. That way I could switch between the protocols. Not likely to happen as the DSM2/DSMX is a proprietary Spektrum format.
import re import urllib2 import json from BeautifulSoup import BeautifulSoup class FlopTV: __USERAGENT = "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:50.0) Gecko/20100101 Firefox/50.0" __BASEURL = "http://www.floptv.tv" def __init__(self): opener = urllib2.build_opener() # Use Firefox User-Agent opener.addheaders = [('User-Agent', self.__USERAGENT)] urllib2.install_opener(opener) def getShows(self): pageUrl = "http://www.floptv.tv/show/" data = urllib2.urlopen(pageUrl).read() tree = BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES) shows = [] sections = tree.find("div", "all-shows").findAll("section") for section in sections: items = section.findAll("li") for item in items: show = {} show["title"] = item.text show["thumb"] = item.find("img")["src"] show["pageUrl"] = self.__BASEURL + item.find("a")["href"] shows.append(show) return shows def getVideoByShow(self, pageUrl): data = urllib2.urlopen(pageUrl).read() tree = BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES) videos = [] sections = tree.findAll("section", "tabella") for section in sections: items = section.find("tbody").findAll("tr") for item in items: video = {} data = item.findAll("td") video["title"] = data[0].text + " " + data[2].text video["thumb"] = item.find("img")["src"].replace("-62x36.jpg", "-307x173.jpg") video["pageUrl"] = self.__BASEURL + item.find("a")["href"] videos.append(video) return videos def getVideoUrl(self, pageUrl): # Parse the HTML page to get the Video URL data = urllib2.urlopen(pageUrl).read() tree = BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES) iframeUrl = tree.find("iframe", {"id": "player"})["src"] req = urllib2.Request(iframeUrl) req.add_header('Referer', pageUrl) data = urllib2.urlopen(req).read() tree = BeautifulSoup(data, convertEntities=BeautifulSoup.HTML_ENTITIES) script = tree.find("script", text=re.compile("playerConfig")) match = re.search(r'sources\s*:\s*(\[[^\]]+\])', script, re.DOTALL) string = match.group(1) # Convert to JSON string = string.replace('file:','"file":') sources = json.loads(string) # Get the first (and better) stream available videoUrl = sources[0]["file"] return videoUrl
Every day we share just one recent video about XRP. Today we share “SBI VC – CODIUS – Ripple XRP News – Ripple XRP Price” from Love For Crypto. Check out the video for the latest Ripple XRP news!
# -*- coding: utf-8 -*- # Copyright (C) 2006 Joe Wreschnig # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation. """Read and write MPEG-4 audio files with iTunes metadata. This module will read MPEG-4 audio information and metadata, as found in Apple's MP4 (aka M4A, M4B, M4P) files. There is no official specification for this format. The source code for TagLib, FAAD, and various MPEG specifications at * http://developer.apple.com/documentation/QuickTime/QTFF/ * http://www.geocities.com/xhelmboyx/quicktime/formats/mp4-layout.txt * http://standards.iso.org/ittf/PubliclyAvailableStandards/\ c041828_ISO_IEC_14496-12_2005(E).zip * http://wiki.multimedia.cx/index.php?title=Apple_QuickTime were all consulted. """ import struct import sys from mutagen import FileType, Tags, StreamInfo, PaddingInfo from mutagen._constants import GENRES from mutagen._util import (cdata, insert_bytes, DictProxy, MutagenError, hashable, enum, get_size, resize_bytes) from mutagen._compat import (reraise, PY2, string_types, text_type, chr_, iteritems, PY3, cBytesIO, izip, xrange) from ._atom import Atoms, Atom, AtomError from ._util import parse_full_atom from ._as_entry import AudioSampleEntry, ASEntryError class error(IOError, MutagenError): pass class MP4MetadataError(error): pass class MP4StreamInfoError(error): pass class MP4MetadataValueError(ValueError, MP4MetadataError): pass __all__ = ['MP4', 'Open', 'delete', 'MP4Cover', 'MP4FreeForm', 'AtomDataType'] @enum class AtomDataType(object): """Enum for `dataformat` attribute of MP4FreeForm. .. versionadded:: 1.25 """ IMPLICIT = 0 """for use with tags for which no type needs to be indicated because only one type is allowed""" UTF8 = 1 """without any count or null terminator""" UTF16 = 2 """also known as UTF-16BE""" SJIS = 3 """deprecated unless it is needed for special Japanese characters""" HTML = 6 """the HTML file header specifies which HTML version""" XML = 7 """the XML header must identify the DTD or schemas""" UUID = 8 """also known as GUID; stored as 16 bytes in binary (valid as an ID)""" ISRC = 9 """stored as UTF-8 text (valid as an ID)""" MI3P = 10 """stored as UTF-8 text (valid as an ID)""" GIF = 12 """(deprecated) a GIF image""" JPEG = 13 """a JPEG image""" PNG = 14 """PNG image""" URL = 15 """absolute, in UTF-8 characters""" DURATION = 16 """in milliseconds, 32-bit integer""" DATETIME = 17 """in UTC, counting seconds since midnight, January 1, 1904; 32 or 64-bits""" GENRES = 18 """a list of enumerated values""" INTEGER = 21 """a signed big-endian integer with length one of { 1,2,3,4,8 } bytes""" RIAA_PA = 24 """RIAA parental advisory; { -1=no, 1=yes, 0=unspecified }, 8-bit ingteger""" UPC = 25 """Universal Product Code, in text UTF-8 format (valid as an ID)""" BMP = 27 """Windows bitmap image""" @hashable class MP4Cover(bytes): """A cover artwork. Attributes: * imageformat -- format of the image (either FORMAT_JPEG or FORMAT_PNG) """ FORMAT_JPEG = AtomDataType.JPEG FORMAT_PNG = AtomDataType.PNG def __new__(cls, data, *args, **kwargs): return bytes.__new__(cls, data) def __init__(self, data, imageformat=FORMAT_JPEG): self.imageformat = imageformat __hash__ = bytes.__hash__ def __eq__(self, other): if not isinstance(other, MP4Cover): return bytes(self) == other return (bytes(self) == bytes(other) and self.imageformat == other.imageformat) def __ne__(self, other): return not self.__eq__(other) def __repr__(self): return "%s(%r, %r)" % ( type(self).__name__, bytes(self), AtomDataType(self.imageformat)) @hashable class MP4FreeForm(bytes): """A freeform value. Attributes: * dataformat -- format of the data (see AtomDataType) """ FORMAT_DATA = AtomDataType.IMPLICIT # deprecated FORMAT_TEXT = AtomDataType.UTF8 # deprecated def __new__(cls, data, *args, **kwargs): return bytes.__new__(cls, data) def __init__(self, data, dataformat=AtomDataType.UTF8, version=0): self.dataformat = dataformat self.version = version __hash__ = bytes.__hash__ def __eq__(self, other): if not isinstance(other, MP4FreeForm): return bytes(self) == other return (bytes(self) == bytes(other) and self.dataformat == other.dataformat and self.version == other.version) def __ne__(self, other): return not self.__eq__(other) def __repr__(self): return "%s(%r, %r)" % ( type(self).__name__, bytes(self), AtomDataType(self.dataformat)) def _name2key(name): if PY2: return name return name.decode("latin-1") def _key2name(key): if PY2: return key return key.encode("latin-1") def _find_padding(atom_path): # Check for padding "free" atom # XXX: we only use them if they are adjacent to ilst, and only one. # and there also is a top level free atom which we could use maybe..? meta, ilst = atom_path[-2:] assert meta.name == b"meta" and ilst.name == b"ilst" index = meta.children.index(ilst) try: prev = meta.children[index - 1] if prev.name == b"free": return prev except IndexError: pass try: next_ = meta.children[index + 1] if next_.name == b"free": return next_ except IndexError: pass def _item_sort_key(key, value): # iTunes always writes the tags in order of "relevance", try # to copy it as closely as possible. order = ["\xa9nam", "\xa9ART", "\xa9wrt", "\xa9alb", "\xa9gen", "gnre", "trkn", "disk", "\xa9day", "cpil", "pgap", "pcst", "tmpo", "\xa9too", "----", "covr", "\xa9lyr"] order = dict(izip(order, xrange(len(order)))) last = len(order) # If there's no key-based way to distinguish, order by length. # If there's still no way, go by string comparison on the # values, so we at least have something determinstic. return (order.get(key[:4], last), len(repr(value)), repr(value)) class MP4Tags(DictProxy, Tags): r"""Dictionary containing Apple iTunes metadata list key/values. Keys are four byte identifiers, except for freeform ('----') keys. Values are usually unicode strings, but some atoms have a special structure: Text values (multiple values per key are supported): * '\\xa9nam' -- track title * '\\xa9alb' -- album * '\\xa9ART' -- artist * 'aART' -- album artist * '\\xa9wrt' -- composer * '\\xa9day' -- year * '\\xa9cmt' -- comment * 'desc' -- description (usually used in podcasts) * 'purd' -- purchase date * '\\xa9grp' -- grouping * '\\xa9gen' -- genre * '\\xa9lyr' -- lyrics * 'purl' -- podcast URL * 'egid' -- podcast episode GUID * 'catg' -- podcast category * 'keyw' -- podcast keywords * '\\xa9too' -- encoded by * 'cprt' -- copyright * 'soal' -- album sort order * 'soaa' -- album artist sort order * 'soar' -- artist sort order * 'sonm' -- title sort order * 'soco' -- composer sort order * 'sosn' -- show sort order * 'tvsh' -- show name Boolean values: * 'cpil' -- part of a compilation * 'pgap' -- part of a gapless album * 'pcst' -- podcast (iTunes reads this only on import) Tuples of ints (multiple values per key are supported): * 'trkn' -- track number, total tracks * 'disk' -- disc number, total discs Others: * 'tmpo' -- tempo/BPM, 16 bit int * 'covr' -- cover artwork, list of MP4Cover objects (which are tagged strs) * 'gnre' -- ID3v1 genre. Not supported, use '\\xa9gen' instead. The freeform '----' frames use a key in the format '----:mean:name' where 'mean' is usually 'com.apple.iTunes' and 'name' is a unique identifier for this frame. The value is a str, but is probably text that can be decoded as UTF-8. Multiple values per key are supported. MP4 tag data cannot exist outside of the structure of an MP4 file, so this class should not be manually instantiated. Unknown non-text tags and tags that failed to parse will be written back as is. """ def __init__(self, *args, **kwargs): self._failed_atoms = {} super(MP4Tags, self).__init__() if args or kwargs: self.load(*args, **kwargs) def load(self, atoms, fileobj): try: path = atoms.path(b"moov", b"udta", b"meta", b"ilst") except KeyError as key: raise MP4MetadataError(key) free = _find_padding(path) self._padding = free.datalength if free is not None else 0 ilst = path[-1] for atom in ilst.children: ok, data = atom.read(fileobj) if not ok: raise MP4MetadataError("Not enough data") try: if atom.name in self.__atoms: info = self.__atoms[atom.name] info[0](self, atom, data) else: # unknown atom, try as text self.__parse_text(atom, data, implicit=False) except MP4MetadataError: # parsing failed, save them so we can write them back key = _name2key(atom.name) self._failed_atoms.setdefault(key, []).append(data) def __setitem__(self, key, value): if not isinstance(key, str): raise TypeError("key has to be str") self._render(key, value) super(MP4Tags, self).__setitem__(key, value) @classmethod def _can_load(cls, atoms): return b"moov.udta.meta.ilst" in atoms def _render(self, key, value): atom_name = _key2name(key)[:4] if atom_name in self.__atoms: render_func = self.__atoms[atom_name][1] else: render_func = type(self).__render_text return render_func(self, key, value) def save(self, filename, padding=None): """Save the metadata to the given filename.""" values = [] items = sorted(self.items(), key=lambda kv: _item_sort_key(*kv)) for key, value in items: try: values.append(self._render(key, value)) except (TypeError, ValueError) as s: reraise(MP4MetadataValueError, s, sys.exc_info()[2]) for key, failed in iteritems(self._failed_atoms): # don't write atoms back if we have added a new one with # the same name, this excludes freeform which can have # multiple atoms with the same key (most parsers seem to be able # to handle that) if key in self: assert _key2name(key) != b"----" continue for data in failed: values.append(Atom.render(_key2name(key), data)) data = Atom.render(b"ilst", b"".join(values)) # Find the old atoms. with open(filename, "rb+") as fileobj: try: atoms = Atoms(fileobj) except AtomError as err: reraise(error, err, sys.exc_info()[2]) self.__save(fileobj, atoms, data, padding) def __save(self, fileobj, atoms, data, padding): try: path = atoms.path(b"moov", b"udta", b"meta", b"ilst") except KeyError: self.__save_new(fileobj, atoms, data, padding) else: self.__save_existing(fileobj, atoms, path, data, padding) def __save_new(self, fileobj, atoms, ilst_data, padding_func): hdlr = Atom.render(b"hdlr", b"\x00" * 8 + b"mdirappl" + b"\x00" * 9) meta_data = b"\x00\x00\x00\x00" + hdlr + ilst_data try: path = atoms.path(b"moov", b"udta") except KeyError: path = atoms.path(b"moov") offset = path[-1]._dataoffset # ignoring some atom overhead... but we don't have padding left anyway # and padding_size is guaranteed to be less than zero content_size = get_size(fileobj) - offset padding_size = -len(meta_data) assert padding_size < 0 info = PaddingInfo(padding_size, content_size) new_padding = info._get_padding(padding_func) new_padding = min(0xFFFFFFFF, new_padding) free = Atom.render(b"free", b"\x00" * new_padding) meta = Atom.render(b"meta", meta_data + free) if path[-1].name != b"udta": # moov.udta not found -- create one data = Atom.render(b"udta", meta) else: data = meta insert_bytes(fileobj, len(data), offset) fileobj.seek(offset) fileobj.write(data) self.__update_parents(fileobj, path, len(data)) self.__update_offsets(fileobj, atoms, len(data), offset) def __save_existing(self, fileobj, atoms, path, ilst_data, padding_func): # Replace the old ilst atom. ilst = path[-1] offset = ilst.offset length = ilst.length # Use adjacent free atom if there is one free = _find_padding(path) if free is not None: offset = min(offset, free.offset) length += free.length # Always add a padding atom to make things easier padding_overhead = len(Atom.render(b"free", b"")) content_size = get_size(fileobj) - (offset + length) padding_size = length - (len(ilst_data) + padding_overhead) info = PaddingInfo(padding_size, content_size) new_padding = info._get_padding(padding_func) # Limit padding size so we can be sure the free atom overhead is as we # calculated above (see Atom.render) new_padding = min(0xFFFFFFFF, new_padding) ilst_data += Atom.render(b"free", b"\x00" * new_padding) resize_bytes(fileobj, length, len(ilst_data), offset) delta = len(ilst_data) - length fileobj.seek(offset) fileobj.write(ilst_data) self.__update_parents(fileobj, path[:-1], delta) self.__update_offsets(fileobj, atoms, delta, offset) def __update_parents(self, fileobj, path, delta): """Update all parent atoms with the new size.""" if delta == 0: return for atom in path: fileobj.seek(atom.offset) size = cdata.uint_be(fileobj.read(4)) if size == 1: # 64bit # skip name (4B) and read size (8B) size = cdata.ulonglong_be(fileobj.read(12)[4:]) fileobj.seek(atom.offset + 8) fileobj.write(cdata.to_ulonglong_be(size + delta)) else: # 32bit fileobj.seek(atom.offset) fileobj.write(cdata.to_uint_be(size + delta)) def __update_offset_table(self, fileobj, fmt, atom, delta, offset): """Update offset table in the specified atom.""" if atom.offset > offset: atom.offset += delta fileobj.seek(atom.offset + 12) data = fileobj.read(atom.length - 12) fmt = fmt % cdata.uint_be(data[:4]) offsets = struct.unpack(fmt, data[4:]) offsets = [o + (0, delta)[offset < o] for o in offsets] fileobj.seek(atom.offset + 16) fileobj.write(struct.pack(fmt, *offsets)) def __update_tfhd(self, fileobj, atom, delta, offset): if atom.offset > offset: atom.offset += delta fileobj.seek(atom.offset + 9) data = fileobj.read(atom.length - 9) flags = cdata.uint_be(b"\x00" + data[:3]) if flags & 1: o = cdata.ulonglong_be(data[7:15]) if o > offset: o += delta fileobj.seek(atom.offset + 16) fileobj.write(cdata.to_ulonglong_be(o)) def __update_offsets(self, fileobj, atoms, delta, offset): """Update offset tables in all 'stco' and 'co64' atoms.""" if delta == 0: return moov = atoms[b"moov"] for atom in moov.findall(b'stco', True): self.__update_offset_table(fileobj, ">%dI", atom, delta, offset) for atom in moov.findall(b'co64', True): self.__update_offset_table(fileobj, ">%dQ", atom, delta, offset) try: for atom in atoms[b"moof"].findall(b'tfhd', True): self.__update_tfhd(fileobj, atom, delta, offset) except KeyError: pass def __parse_data(self, atom, data): pos = 0 while pos < atom.length - 8: head = data[pos:pos + 12] if len(head) != 12: raise MP4MetadataError("truncated atom % r" % atom.name) length, name = struct.unpack(">I4s", head[:8]) version = ord(head[8:9]) flags = struct.unpack(">I", b"\x00" + head[9:12])[0] if name != b"data": raise MP4MetadataError( "unexpected atom %r inside %r" % (name, atom.name)) chunk = data[pos + 16:pos + length] if len(chunk) != length - 16: raise MP4MetadataError("truncated atom % r" % atom.name) yield version, flags, chunk pos += length def __add(self, key, value, single=False): assert isinstance(key, str) if single: self[key] = value else: self.setdefault(key, []).extend(value) def __render_data(self, key, version, flags, value): return Atom.render(_key2name(key), b"".join([ Atom.render( b"data", struct.pack(">2I", version << 24 | flags, 0) + data) for data in value])) def __parse_freeform(self, atom, data): length = cdata.uint_be(data[:4]) mean = data[12:length] pos = length length = cdata.uint_be(data[pos:pos + 4]) name = data[pos + 12:pos + length] pos += length value = [] while pos < atom.length - 8: length, atom_name = struct.unpack(">I4s", data[pos:pos + 8]) if atom_name != b"data": raise MP4MetadataError( "unexpected atom %r inside %r" % (atom_name, atom.name)) version = ord(data[pos + 8:pos + 8 + 1]) flags = struct.unpack(">I", b"\x00" + data[pos + 9:pos + 12])[0] value.append(MP4FreeForm(data[pos + 16:pos + length], dataformat=flags, version=version)) pos += length key = _name2key(atom.name + b":" + mean + b":" + name) self.__add(key, value) def __render_freeform(self, key, value): if isinstance(value, bytes): value = [value] dummy, mean, name = _key2name(key).split(b":", 2) mean = struct.pack(">I4sI", len(mean) + 12, b"mean", 0) + mean name = struct.pack(">I4sI", len(name) + 12, b"name", 0) + name data = b"" for v in value: flags = AtomDataType.UTF8 version = 0 if isinstance(v, MP4FreeForm): flags = v.dataformat version = v.version data += struct.pack( ">I4s2I", len(v) + 16, b"data", version << 24 | flags, 0) data += v return Atom.render(b"----", mean + name + data) def __parse_pair(self, atom, data): key = _name2key(atom.name) values = [struct.unpack(">2H", d[2:6]) for version, flags, d in self.__parse_data(atom, data)] self.__add(key, values) def __render_pair(self, key, value): data = [] for v in value: try: track, total = v except TypeError: raise ValueError if 0 <= track < 1 << 16 and 0 <= total < 1 << 16: data.append(struct.pack(">4H", 0, track, total, 0)) else: raise MP4MetadataValueError( "invalid numeric pair %r" % ((track, total),)) return self.__render_data(key, 0, AtomDataType.IMPLICIT, data) def __render_pair_no_trailing(self, key, value): data = [] for (track, total) in value: if 0 <= track < 1 << 16 and 0 <= total < 1 << 16: data.append(struct.pack(">3H", 0, track, total)) else: raise MP4MetadataValueError( "invalid numeric pair %r" % ((track, total),)) return self.__render_data(key, 0, AtomDataType.IMPLICIT, data) def __parse_genre(self, atom, data): values = [] for version, flags, data in self.__parse_data(atom, data): # version = 0, flags = 0 if len(data) != 2: raise MP4MetadataValueError("invalid genre") genre = cdata.short_be(data) # Translate to a freeform genre. try: genre = GENRES[genre - 1] except IndexError: # this will make us write it back at least raise MP4MetadataValueError("unknown genre") values.append(genre) key = _name2key(b"\xa9gen") self.__add(key, values) def __parse_tempo(self, atom, data): values = [] for version, flags, data in self.__parse_data(atom, data): # version = 0, flags = 0 or 21 if len(data) != 2: raise MP4MetadataValueError("invalid tempo") values.append(cdata.ushort_be(data)) key = _name2key(atom.name) self.__add(key, values) def __render_tempo(self, key, value): try: if len(value) == 0: return self.__render_data(key, 0, AtomDataType.INTEGER, b"") if (min(value) < 0) or (max(value) >= 2 ** 16): raise MP4MetadataValueError( "invalid 16 bit integers: %r" % value) except TypeError: raise MP4MetadataValueError( "tmpo must be a list of 16 bit integers") values = [cdata.to_ushort_be(v) for v in value] return self.__render_data(key, 0, AtomDataType.INTEGER, values) def __parse_bool(self, atom, data): for version, flags, data in self.__parse_data(atom, data): if len(data) != 1: raise MP4MetadataValueError("invalid bool") value = bool(ord(data)) key = _name2key(atom.name) self.__add(key, value, single=True) def __render_bool(self, key, value): return self.__render_data( key, 0, AtomDataType.INTEGER, [chr_(bool(value))]) def __parse_cover(self, atom, data): values = [] pos = 0 while pos < atom.length - 8: length, name, imageformat = struct.unpack(">I4sI", data[pos:pos + 12]) if name != b"data": if name == b"name": pos += length continue raise MP4MetadataError( "unexpected atom %r inside 'covr'" % name) if imageformat not in (MP4Cover.FORMAT_JPEG, MP4Cover.FORMAT_PNG): # Sometimes AtomDataType.IMPLICIT or simply wrong. # In all cases it was jpeg, so default to it imageformat = MP4Cover.FORMAT_JPEG cover = MP4Cover(data[pos + 16:pos + length], imageformat) values.append(cover) pos += length key = _name2key(atom.name) self.__add(key, values) def __render_cover(self, key, value): atom_data = [] for cover in value: try: imageformat = cover.imageformat except AttributeError: imageformat = MP4Cover.FORMAT_JPEG atom_data.append(Atom.render( b"data", struct.pack(">2I", imageformat, 0) + cover)) return Atom.render(_key2name(key), b"".join(atom_data)) def __parse_text(self, atom, data, implicit=True): # implicit = False, for parsing unknown atoms only take utf8 ones. # For known ones we can assume the implicit are utf8 too. values = [] for version, flags, atom_data in self.__parse_data(atom, data): if implicit: if flags not in (AtomDataType.IMPLICIT, AtomDataType.UTF8): raise MP4MetadataError( "Unknown atom type %r for %r" % (flags, atom.name)) else: if flags != AtomDataType.UTF8: raise MP4MetadataError( "%r is not text, ignore" % atom.name) try: text = atom_data.decode("utf-8") except UnicodeDecodeError as e: raise MP4MetadataError("%s: %s" % (_name2key(atom.name), e)) values.append(text) key = _name2key(atom.name) self.__add(key, values) def __render_text(self, key, value, flags=AtomDataType.UTF8): if isinstance(value, string_types): value = [value] encoded = [] for v in value: if not isinstance(v, text_type): if PY3: raise TypeError("%r not str" % v) try: v = v.decode("utf-8") except (AttributeError, UnicodeDecodeError) as e: raise TypeError(e) encoded.append(v.encode("utf-8")) return self.__render_data(key, 0, flags, encoded) def delete(self, filename): """Remove the metadata from the given filename.""" self._failed_atoms.clear() self.clear() self.save(filename, padding=lambda x: 0) __atoms = { b"----": (__parse_freeform, __render_freeform), b"trkn": (__parse_pair, __render_pair), b"disk": (__parse_pair, __render_pair_no_trailing), b"gnre": (__parse_genre, None), b"tmpo": (__parse_tempo, __render_tempo), b"cpil": (__parse_bool, __render_bool), b"pgap": (__parse_bool, __render_bool), b"pcst": (__parse_bool, __render_bool), b"covr": (__parse_cover, __render_cover), b"purl": (__parse_text, __render_text), b"egid": (__parse_text, __render_text), } # these allow implicit flags and parse as text for name in [b"\xa9nam", b"\xa9alb", b"\xa9ART", b"aART", b"\xa9wrt", b"\xa9day", b"\xa9cmt", b"desc", b"purd", b"\xa9grp", b"\xa9gen", b"\xa9lyr", b"catg", b"keyw", b"\xa9too", b"cprt", b"soal", b"soaa", b"soar", b"sonm", b"soco", b"sosn", b"tvsh"]: __atoms[name] = (__parse_text, __render_text) def pprint(self): def to_line(key, value): assert isinstance(key, text_type) if isinstance(value, text_type): return u"%s=%s" % (key, value) return u"%s=%r" % (key, value) values = [] for key, value in sorted(iteritems(self)): if not isinstance(key, text_type): key = key.decode("latin-1") if key == "covr": values.append(u"%s=%s" % (key, u", ".join( [u"[%d bytes of data]" % len(data) for data in value]))) elif isinstance(value, list): for v in value: values.append(to_line(key, v)) else: values.append(to_line(key, value)) return u"\n".join(values) class MP4Info(StreamInfo): """MPEG-4 stream information. Attributes: * bitrate -- bitrate in bits per second, as an int * length -- file length in seconds, as a float * channels -- number of audio channels * sample_rate -- audio sampling rate in Hz * bits_per_sample -- bits per sample * codec (string): * if starting with ``"mp4a"`` uses an mp4a audio codec (see the codec parameter in rfc6381 for details e.g. ``"mp4a.40.2"``) * for everything else see a list of possible values at http://www.mp4ra.org/codecs.html e.g. ``"mp4a"``, ``"alac"``, ``"mp4a.40.2"``, ``"ac-3"`` etc. * codec_description (string): Name of the codec used (ALAC, AAC LC, AC-3...). Values might change in the future, use for display purposes only. """ bitrate = 0 channels = 0 sample_rate = 0 bits_per_sample = 0 codec = u"" codec_name = u"" def __init__(self, atoms, fileobj): try: moov = atoms[b"moov"] except KeyError: raise MP4StreamInfoError("not a MP4 file") for trak in moov.findall(b"trak"): hdlr = trak[b"mdia", b"hdlr"] ok, data = hdlr.read(fileobj) if not ok: raise MP4StreamInfoError("Not enough data") if data[8:12] == b"soun": break else: raise MP4StreamInfoError("track has no audio data") mdhd = trak[b"mdia", b"mdhd"] ok, data = mdhd.read(fileobj) if not ok: raise MP4StreamInfoError("Not enough data") try: version, flags, data = parse_full_atom(data) except ValueError as e: raise MP4StreamInfoError(e) if version == 0: offset = 8 fmt = ">2I" elif version == 1: offset = 16 fmt = ">IQ" else: raise MP4StreamInfoError("Unknown mdhd version %d" % version) end = offset + struct.calcsize(fmt) unit, length = struct.unpack(fmt, data[offset:end]) try: self.length = float(length) / unit except ZeroDivisionError: self.length = 0 try: atom = trak[b"mdia", b"minf", b"stbl", b"stsd"] except KeyError: pass else: self._parse_stsd(atom, fileobj) def _parse_stsd(self, atom, fileobj): """Sets channels, bits_per_sample, sample_rate and optionally bitrate. Can raise MP4StreamInfoError. """ assert atom.name == b"stsd" ok, data = atom.read(fileobj) if not ok: raise MP4StreamInfoError("Invalid stsd") try: version, flags, data = parse_full_atom(data) except ValueError as e: raise MP4StreamInfoError(e) if version != 0: raise MP4StreamInfoError("Unsupported stsd version") try: num_entries, offset = cdata.uint32_be_from(data, 0) except cdata.error as e: raise MP4StreamInfoError(e) if num_entries == 0: return # look at the first entry if there is one entry_fileobj = cBytesIO(data[offset:]) try: entry_atom = Atom(entry_fileobj) except AtomError as e: raise MP4StreamInfoError(e) try: entry = AudioSampleEntry(entry_atom, entry_fileobj) except ASEntryError as e: raise MP4StreamInfoError(e) else: self.channels = entry.channels self.bits_per_sample = entry.sample_size self.sample_rate = entry.sample_rate self.bitrate = entry.bitrate self.codec = entry.codec self.codec_description = entry.codec_description def pprint(self): return "MPEG-4 audio (%s), %.2f seconds, %d bps" % ( self.codec_description, self.length, self.bitrate) class MP4(FileType): """An MPEG-4 audio file, probably containing AAC. If more than one track is present in the file, the first is used. Only audio ('soun') tracks will be read. :ivar info: :class:`MP4Info` :ivar tags: :class:`MP4Tags` """ MP4Tags = MP4Tags _mimes = ["audio/mp4", "audio/x-m4a", "audio/mpeg4", "audio/aac"] def load(self, filename): self.filename = filename with open(filename, "rb") as fileobj: try: atoms = Atoms(fileobj) except AtomError as err: reraise(error, err, sys.exc_info()[2]) try: self.info = MP4Info(atoms, fileobj) except error: raise except Exception as err: reraise(MP4StreamInfoError, err, sys.exc_info()[2]) if not MP4Tags._can_load(atoms): self.tags = None self._padding = 0 else: try: self.tags = self.MP4Tags(atoms, fileobj) except error: raise except Exception as err: reraise(MP4MetadataError, err, sys.exc_info()[2]) else: self._padding = self.tags._padding def save(self, filename=None, padding=None): super(MP4, self).save(filename, padding=padding) def delete(self, filename=None): super(MP4, self).delete(filename) def add_tags(self): if self.tags is None: self.tags = self.MP4Tags() else: raise error("an MP4 tag already exists") @staticmethod def score(filename, fileobj, header_data): return (b"ftyp" in header_data) + (b"mp4" in header_data) Open = MP4 def delete(filename): """Remove tags from a file.""" MP4(filename).delete()
As its name suggests, hospitality industry powerhouse PR firm Bullfrog & Baum has a penchant for all things amphibian. So when Leap Year rolls around, its only right that the New York-based PR and marketing firm specializing in lifestyle clients should mark the day with something froggy. Today at 3:00 p.m., four fabulous frogs will go to the mat at the Bullfrog & Baum offices on West 22nd Street, each representing an important food-focused charity – Share Our Strength, Citymeals-on-Wheels, Edible Schoolyard and City Harvest. For more information on Bullfrog & Baum’s Leap Year Froggy Fundraiser, go to www.bullfrogandblog.com. To watch the race as it streams live, http://www.ustream.tv/channel/bullfrogtv. Note: No frogs will be harmed in the process of the fundraiser and all frogs will be re-adopted by our local pet store partner once the race is complete. ****We’re on Team Arsonist and hoping (or is that hopping) you’ll help us raise funds for Share Our Strength. Go frog, go!
from bitmovin.errors import InvalidTypeError from bitmovin.resources.models.encodings.muxings.time_code import TimeCode from bitmovin.resources.enums.mp4_muxing_manifest_type import MP4MuxingManifestType from .muxing import Muxing class MP4Muxing(Muxing): def __init__(self, streams, filename=None, outputs=None, id_=None, custom_data=None, name=None, description=None, ignored_by=None, fragment_duration=None, time_code=None, fragmented_mp4_muxing_manifest_type=None, stream_conditions_mode=None, internal_chunk_length=None): super().__init__(id_=id_, custom_data=custom_data, streams=streams, outputs=outputs, name=name, description=description, ignored_by=ignored_by, stream_conditions_mode=stream_conditions_mode, internal_chunk_length=internal_chunk_length) self.filename = filename self.fragmentDuration = fragment_duration self._timeCode = None self.timeCode = time_code self._fragmentedMP4MuxingManifestType = None self.fragmentedMP4MuxingManifestType = fragmented_mp4_muxing_manifest_type @property def timeCode(self): return self._timeCode @timeCode.setter def timeCode(self, new_time_code): if new_time_code is None: self._timeCode = None return if isinstance(new_time_code, TimeCode): self._timeCode = new_time_code else: raise InvalidTypeError( 'Invalid type {} for timeCode: must be TimeCode object!'.format( type(new_time_code) )) @property def fragmentedMP4MuxingManifestType(self): return self._fragmentedMP4MuxingManifestType @fragmentedMP4MuxingManifestType.setter def fragmentedMP4MuxingManifestType(self, new_fragmented_mp4_muxing_manifest_type): if new_fragmented_mp4_muxing_manifest_type is None: self._fragmentedMP4MuxingManifestType = None elif isinstance(new_fragmented_mp4_muxing_manifest_type, MP4MuxingManifestType): self._fragmentedMP4MuxingManifestType = new_fragmented_mp4_muxing_manifest_type.value elif isinstance(new_fragmented_mp4_muxing_manifest_type, str): self._fragmentedMP4MuxingManifestType = new_fragmented_mp4_muxing_manifest_type else: raise InvalidTypeError('fragmentedMP4MuxingManifestType has to be of type MP4MuxingManifestType or str') @classmethod def parse_from_json_object(cls, json_object): muxing = super().parse_from_json_object(json_object=json_object) filename = json_object['filename'] fragment_duration = json_object.get('fragmentDuration') time_code_json = json_object.get('timeCode') time_code = None if time_code_json is not None: time_code = TimeCode.parse_from_json_object(time_code_json) fragmented_mp4_muxing_manifest_type = json_object.get('fragmentedMP4MuxingManifestType') mp4_muxing = MP4Muxing(filename=filename, fragment_duration=fragment_duration, time_code=time_code, fragmented_mp4_muxing_manifest_type=fragmented_mp4_muxing_manifest_type, id_=muxing.id, streams=muxing.streams, outputs=muxing.outputs, custom_data=muxing.customData, name=muxing.name, description=muxing.description, ignored_by=muxing.ignored_by, stream_conditions_mode=muxing.stream_conditions_mode, internal_chunk_length=muxing.internal_chunk_length) return mp4_muxing def serialize(self): serialized = super().serialize() if self.timeCode is not None: serialized['timeCode'] = self.timeCode.serialize() serialized['fragmentedMP4MuxingManifestType'] = self.fragmentedMP4MuxingManifestType return serialized
Third Warmest April for U.S. Warmer than average temperatures dominated most of the United States this April. According to NOAA’s monthly climate report released yesterday, this was the third warmest April on record for the lower forty-eight states. As a whole, the country averaged a temperature of 55.7°F, which is 3.6°F above average. Most of the heat was centered near the Rocky Mountains and the southern plains, where over 300 daily high temperature records were broken. No state, however, experienced below average temperatures. Following this country’s warmest March on record and fourth warmest winter, April’s above average temperatures were not much of a surprise. Experts attribute the intensity of the continued warmth to the persistent northerly track of the jet stream and a combination of climate change issues, both natural and man-made. This entry was posted in weather and tagged heat, national, record, stats by Melissa Fleming. Bookmark the permalink.
from django_filters.rest_framework import DjangoFilterBackend from rest_framework import serializers from waldur_core.core import serializers as core_serializers from waldur_core.structure import models as structure_models from waldur_zabbix.apps import ZabbixConfig class LinkFilterBackend(DjangoFilterBackend): """ This filter allows to filter Zabbix service project link by URL of virtual machine. Consider for example the following use case. There're two OpenStack virtual machines in the Waldur project. Zabbix server is installed on the first VM. Zabbix agent is to be installed on the second VM. Note, that both of them share the same OpenStack tenant. Therefore, they should be able to communicate directly, ie without proxy or virtual router. There's service settings for Zabbix provider in Waldur database. It is configured with scope field equal to the Zabbix server VM. Also, there are Zabbix service and Zabbix service project link configured for the project. By supplying URL of the OpenStack service project link to this filter backend, we should be able to get list of all Zabbix service project links which could be used as Zabbix monitoring in the same OpenStack tenant. """ def filter_queryset(self, request, queryset, view): resource_url = request.query_params.get('resource') if resource_url: try: resource = self.get_resource_by_url(request, resource_url) except serializers.ValidationError: return queryset.none() link = resource.service_project_link siblings = resource._meta.model.objects.filter( service_project_link=link ).exclude(uuid=resource.uuid) if siblings.count() == 0: return queryset.none() service_settings = structure_models.ServiceSettings.objects.filter( type=ZabbixConfig.service_name, scope__in=siblings, ) queryset = queryset.filter( project=link.project, service__settings__in=service_settings ) return queryset def get_resource_by_url(self, request, resource_url): related_models = structure_models.VirtualMachine.get_all_models() field = core_serializers.GenericRelatedField(related_models=related_models) # Trick to set field context without serializer field._context = {'request': request} return field.to_internal_value(resource_url)
According to Wikipedia, he co-founded a soft-rock band The Happy Birthdays with host Marc Summers. 2. All the blogs are saying that "Two" features Sheryl Crow. On what? Am I deaf? Does anyone hear female vocals on this track? Uh yeah actually. During the chorus and one of the verses. A lot actually.
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Setup script for distributing SIFT as a stand-alone executable from distutils.core import setup import glob import py2exe import sys import pkg_resources # If run without args, build executables, in quiet mode. if len(sys.argv) == 1: sys.argv.append("py2exe") #sys.argv.append("-q") class Target: def __init__(self, **kw): self.__dict__.update(kw) # for the versioninfo resources self.version = "0.1" self.company_name = u"昕睿软件" self.copyright = u"昕睿软件" self.name = u"SexyGirl" manifest = """ <?xml version="1.0" encoding="UTF-8" standalone="yes"?> <assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0"> <assemblyIdentity version="0.64.1.0" processorArchitecture="x86" name="Controls" type="win32" /> <description>myProgram</description> <dependency> <dependentAssembly> <assemblyIdentity type="win32" name="Microsoft.Windows.Common-Controls" version="6.0.0.0" processorArchitecture="X86" publicKeyToken="6595b64144ccf1df" language="*" /> </dependentAssembly> </dependency> </assembly> """ RT_MANIFEST = 24 MyApp = Target( # used for the versioninfo resource description = u"SexyGirl Downloader", # what to build script = "spynner_test.py", #other_resources = [(24,1,manifest)], icon_resources = [(1, "lsc.ico")], dest_base = "dist") py2exe_options = { "includes": ["sqlite3","sip"], #PyQt程序打包时需要 "dll_excludes": ["w9xpopen.exe",], "excludes" : ["Tkconstants","Tkinter","tcl","doctest","pdb","unittest"], "compressed": 1, #压缩文件 "optimize": 2, #优化级别,默认为0 #"ascii": 0, #ascii指不自动包含encodings和codecs "bundle_files": 3, #指将程序打包成单文件(此时除了exe文件外,还会生成一个zip文件。如果不需要zip文件,还需要设置zipfile = None)。1表示pyd和dll文件会被打包到单文件中,且不能从文件系统中加载python模块;值为2表示pyd和dll文件会被打包到单文件中,但是可以从文件系统中加载python模块 } data_files=[("", ["lsc.ico","msvcr90.dll"])] setup( name = u'dist', version = '1.0', #windows = [MyApp], console = [MyApp], #zipfile = None, options = {'py2exe': py2exe_options}, data_files = data_files, )
This 2 bedroom apartment is located in La Source and sleeps 4 people. It has WiFi, air conditioning and garage. The apartment is within walking distance of a beach. 4-room maisonette 90 m2 on 7th floor. The agent describes this apartment as an ideal holiday rental for the summer holidays. It is also good value for money. The apartment has a sea view and is located in La Source. The nearest airport is only 27km away. It is set in a very convenient location, just 500 metres away from a beach and only 1km away from the city center. The apartment has free wireless internet. You can also park in the garage on-site at no extra charge. The apartment is well equipped: it has air conditioning, a dishwasher, a microwave and a washing machine.
#! /usr/bin/env python #################################################################################################### # Script to reorient volumes into anatomical orientation and to create surface time-series. # This script is made for use with a registration file (in FreeSurfer's tkreg format) and # a series of unwarped time-series volumes: i.e., the output of Serra's preprocessing # script. # Author: Noah C. Benson <nben@nyu.edu> import argparse, sys, os, six import neuropythy as ny, numpy as np, nibabel as nib if six.PY3: from functools import reduce def main(args): # Parse the arguments... parser = argparse.ArgumentParser() parser.add_argument('reg', metavar='registration_file', nargs=1, help=('The distort2anat_tkreg.dat or similar file: the registration' ' file, in FreeSurfer\'s tkreg format, to apply to the EPIs.')) parser.add_argument('epis', metavar='EPI', type=str, nargs='+', help='The EPI files to be converted to anatomical orientation') parser.add_argument('-t', '--tag', required=False, default='-', dest='tag', nargs=1, help=('A tag to append to the output filenames; if given as - or' ' omitted, overwrites original files.')) parser.add_argument('-s', '--surf', required=False, default=False, dest='surface', action='store_true', help=('If provided, instructs the script to also produce files of the ' 'time-series resampled on the cortical surface.')) parser.add_argument('-o', '--out', required=False, default='.', dest='outdir', help=('The output directory to which the files should be written; by' ' default this is the current directory (.); note that if this' ' directory also contains the EPI files and there is no tag given,' ' then the EPIs will be overwritten.')) parser.add_argument('-m', '--method', required=False, default='linear', dest='method', help=('The method to use for volume-to-surface interpolation; this may' ' be nearest or linear; the default is linear.')) parser.add_argument('-l', '--layer', required=False, default='midgray', dest='layer', help=('Specifies the cortical layer to user in interpolation from volume' ' to surface. By default, uses midgray. May be set to a value' ' between 0 (white) and 1 (pial) to specify an intermediate surface' ' or may be simply white, pial, or midgray.')) parser.add_argument('-d', '--subjects-dir', required=False, default=None, dest='sdir', help=('Specifies the subjects directory to use; by default uses the' ' environment variable SUBJECTS_DIR.')) parser.add_argument('-v', '--verbose', required=False, default=False, action='store_true', dest='verbose', help='Print verbose output') if args[0].startswith('python'): args = args[2:] else: args = args[1:] args = parser.parse_args(args) # Check some of the arguments... epis = args.epis if len(epis) < 1: raise RuntimeError('No EPIs given') tag = args.tag[0] if tag == '-': tag = '' dosurf = args.surface outdir = args.outdir if not os.path.isdir(outdir): raise RuntimeError('Directory %s does not exist' % outdir) if args.verbose: def note(*args): six.print_(*args, flush=True) return True else: def note(*args): return False try: args.layer = float(args.layer) except: pass # Read in the registration file args.reg = args.reg[0] if not os.path.isfile(args.reg): raise RuntimeError('Given registration file not found: %s' % args.reg) with open(args.reg, 'r') as f: lines = [] while True: s = f.readline() if s is None or s == '': break lines.append(s) # This tells us some info... sub = lines[0].strip() if args.sdir is not None: ny.add_subject_path(args.sdir) try: sub = ny.freesurfer_subject(sub) except: raise ValueError('No subject %s; you may need to set your SUBJECTS_DIR' % sub) affine = np.asarray([[float(ss) for ss in s.split()] for s in lines[4:8]]) affinv = np.linalg.inv(affine) displm = sub.lh.affine # loop over the given EPIs for epi in epis: note('Processing EPI %s...' % epi) # import the epi file.. img = ny.load(epi, to='image') # edit the header... note(' - Correcting volume orientation...') new_affine = np.dot(displm, np.dot(affinv, ny.freesurfer.tkr_vox2ras(img))) newimg = nib.Nifti1Image(img.dataobj, new_affine, img.header) (epi_dir,epi_flnm) = os.path.split(epi) if epi_flnm[:-4] in ['.mgz', '.mgh', '.nii']: pre = epi_flnm[:-4] suf = epi_flnm[-4:] else: pre = epi_flnm[:-7] suf = epi_flnm[-7:] srf_flnm = pre + tag + '.mgz' epi_flnm = pre + tag + suf newimg.to_filename(os.path.join(args.outdir, epi_flnm)) # okay, now project to the surface if args.surface: note(' - Projecting to surface...') (ldat, rdat) = sub.image_to_cortex(newimg, surface=args.layer, method=args.method, dtype=np.float32) # we need to fix the dimensions... for (d,h) in zip([ldat,rdat], ['lh','rh']): if d.shape[-1] == 1: # then this should properly be a 3d MGH image, not a 4d one. im = nib.freesurfer.mghformat.MGHImage( np.transpose(reduce(np.expand_dims, [-1], d), (0,2,1)), np.eye(4)) else: im = nib.freesurfer.mghformat.MGHImage( np.transpose(reduce(np.expand_dims, [-1,-1], d), (0,2,3,1)), np.eye(4)) im.to_filename(os.path.join(args.outdir, h + '.' + srf_flnm)) # That's it! return 0 main(sys.argv)
Australian batsman Steve Smith will embark on a redemption journey when Rajasthan Royals open their campaign against Kings XI Punjab in the 12th edition of the Indian Premier League (IPL) in Jaipur on Monday. Ben Stokes will once again be the key in both batting and bowling departments for the Rajasthan Royals. Stokes will once again be the key in both batting and bowling departments, while the Kings would be expecting their all-rounder Sam Curran to be on the top of his game tomorrow (Monday). Their bowling too have reasonable depth with speedster Jaidev Unadkat eying a slot in Indian team for the World Cup. Varun Aaron, Dhawal Kulkarni, Jofra Archer, Ish Sodhi and few others give variety and options to Royals for different conditions Ravichandran Ashwin-led Punjab side would rely heavily on their openers Chris Gayle and KL Rahul to provide them a fiery start and if swashbuckling West Indian opener happens to be in his elements, the Kings XI may be able to draw the first blood.
# -*- coding: utf-8 -*- import pytest from layeredconfig import EtcdStore from layeredconfig.sources.etcdstore import EtcdConnector try: import requests except: # skip all tests when yaml is not installed pytestmark = pytest.mark.skip(reason='Missing optional dependencies') @pytest.fixture def connector(): class Connector: """Simple etcd connector""" def __init__(self): self.get_data = {} self.set_data = {} @pytest.helpers.inspector def get(self, *args, **kwargs): return self.get_data @pytest.helpers.inspector def set(self, *items): self.set_data.update(items) connector = Connector() connector.get_data = { 'node': { 'nodes': [{ 'key': 'a', 'value': '1' }, { 'key': 'b', 'dir': True, 'nodes': [{ 'key': 'c', 'value': '2' }, { 'key': 'd', 'dir': True, 'nodes': [{ 'key': 'e', 'value': '3' }] }] }] } } return connector @pytest.mark.parametrize('key', ['/', '/a']) def test_etcd_connector_get_data(monkeypatch, key): url = 'http://fake-url:2379' connector = EtcdConnector(url) class Response(object): def json(self): return {} def get(*args, **kwargs): assert url + '/keys' + key == args[0] assert 'recursive' in kwargs['params'] return Response() monkeypatch.setattr('layeredconfig.sources.etcdstore.requests.get', get) connector.get(key) @pytest.mark.parametrize('key, value', [ ('/a', 1), ('/b', 2), ]) def test_etcd_connector_set_data(monkeypatch, key, value): url = 'http://fake-url:2379' connector = EtcdConnector(url) def put(*args, **kwargs): assert url + '/keys' + key == args[0] assert value == kwargs['data']['value'] monkeypatch.setattr('layeredconfig.sources.etcdstore.requests.put', put) connector.set((key, value)) def test_lazy_read_etcd_source(connector): config = EtcdStore('bogus-url') config._connector = connector # etcd is untyped assert config.a == '1' assert config.b.c == '2' assert config.b.d == {'e': '3'} assert config._connector.get.calls == 1 config._use_cache = False config.a assert config._connector.get.calls == 2 def test_write_etcd_source(connector): config = EtcdStore('bogus-url') config._connector = connector config.a = '10' config.b.c = '20' config.b.d.e = '30' config.write_cache() data = connector.set_data assert data['/a'] == '10' assert data['/b/c'] == '20' assert data['/b/d/e'] == '30'
Organisations who rely on Mango for their QHSE compliance needs come in all different shapes and sizes. In fact, the only thing they have in common is a determination to make compliance as easy as possible. Our clients are located across Australia, Canada, New Zealand, South Africa and the United Kingdom. Watch the below videos to learn how Mango has helped these clients manage their compliance regulations. "This software has enabled us to save reasonable amounts of time" Organisations who use Mango have some very clear expectations. If that sounds like you, we're pretty sure you won't be wasting your time finding out a bit more about Mango. Go on, give us a try - it could be the best decision you make about your compliance management for a long, long time. Mango's advantage comes from its ease of configuration. That means we can give you an 'out of the box' solution that contains everything you need and nothing you don't. With Mango it really is as simple as picking the combination of solutions and modules you need to meet your requirements. That way, you get a perfect fit for your business - and only pay for what you need. To find out more about our clients and how they use Mango, please click on one of the links below.
#!/usr/bin/env python3 from core.utils import MsgPrint class BaseSaltModule(object): def __init__(self,task_obj): self.task_obj = task_obj def process(self,module_data,*args,**kwargs): section_name = module_data['raw_cmds']['section'] section_data = module_data['raw_cmds']['mod_data'] sub_action = module_data['raw_cmds'].get('sub_action') for mod_item in section_data: for k,v in mod_item.items(): state_func = getattr(self,'func__%s'%k) state_func(v) if sub_action: # 如果没有,就执行,基本只针对文件和模块 sub_action_func = getattr(self,'func__%s' % sub_action) sub_action_func(module_data=module_data['raw_cmds']) def func__require(self,*args,**kwargs): print('require:',*args,**kwargs) def type_validate(self,item_name,data,data_type): if type(data) is not data_type: MsgPrint.error("[%s] requires %s, not a %s" %(item_name,data_type,type(data)))
Halloween has come and gone and Thanksgiving is right around the corner. I haven't carved a pumpkin in years but I am good for carving some cake. Fondant: Plenty of orange, some brown, a little bit of yellow. 1. Start off by dividing the cake into two layers. You can make two which will then create 4 layers for a taller pumpkin Then fill the layers with your preferred buttercream. 2. Begin carving small notches off of the top of the cake that are a couple of inches apart. Then carve the bottom of the cake going inward to create a curve as seen in picture 3. From here you can carve more notches into your cake as seen fit, then coat in a thin layer of frosting.Refrigerate for 30 minutes. Optional: Carve a whole into the middle of the cake and coat with frosting. 3. To create plenty of curvature without removing too much cake, build the frosting onto the sides of the cake Then slowly and patiently with a slanted spatula create curves and creases. If needed refrigerate for 15 minutes at a time to allow the frosting to be more 'carvable' so to speak. When the cake has completed chilled, carve your jack o lantern face onto one side of the pumpkin. Refrigerate until solid. 4. Roll out a large piece of orange fondant (large enough to cover entire cake). Use your hands to smooth the fondant onto the cake and use fondant tools or similar to push fondant into the grooves of the cake. Also use tools or fingers to push the fondant into the face you carved. Push the fondant into the top of the cake and fill the hole with candies or sprinkles. 5. Optional: Airbrush the grooves and features of the cake with brown food dye. You can also dust with brown petal dust. Fill the eyes and mouth with light orange or yellow fondant to create a carved look. Roll a thick log of brown fondant to put on top of the cake. Pull the base of the stem outward and onto the cake. Use a knife or fondant tools to create grain lines in the stem. Twist the stem while pulling outward to create a natural look then remove excess. 6. Airbrush or paint the inside of the mouth and eyes with black food color (leaving out a small portion of the eyes, see picture 1). When the leaves have dried surround the blank side of the pumpkin with fondant leaves and place a couple on top.
from __future__ import absolute_import from django.core.urlresolvers import reverse from django.test import TestCase from nodeshot.core.base.tests import user_fixtures from nodeshot.ui.default import settings as local_settings class DefaultUiDjangoTest(TestCase): fixtures = [ 'initial_data.json', user_fixtures, 'test_layers.json', 'test_status.json', 'test_nodes.json', 'test_images.json', ] def test_index(self): response = self.client.get(reverse('ui:index')) self.assertEqual(response.status_code, 200) def test_social_auth_optional(self): # enable social auth setattr(local_settings, 'SOCIAL_AUTH_ENABLED', True) response = self.client.get(reverse('ui:index')) self.assertContains(response, 'social-buttons') # disable social auth setattr(local_settings, 'SOCIAL_AUTH_ENABLED', False) response = self.client.get(reverse('ui:index')) self.assertNotContains(response, 'social-buttons') def test_facebook_optional(self): setattr(local_settings, 'SOCIAL_AUTH_ENABLED', True) setattr(local_settings, 'FACEBOOK_ENABLED', True) response = self.client.get(reverse('ui:index')) self.assertContains(response, 'btn-facebook') setattr(local_settings, 'FACEBOOK_ENABLED', False) response = self.client.get(reverse('ui:index')) self.assertNotContains(response, 'btn-facebook') setattr(local_settings, 'SOCIAL_AUTH_ENABLED', False) def test_google_optional(self): setattr(local_settings, 'SOCIAL_AUTH_ENABLED', True) setattr(local_settings, 'GOOGLE_ENABLED', True) response = self.client.get(reverse('ui:index')) self.assertContains(response, 'btn-google') setattr(local_settings, 'GOOGLE_ENABLED', False) response = self.client.get(reverse('ui:index')) self.assertNotContains(response, 'btn-google') setattr(local_settings, 'SOCIAL_AUTH_ENABLED', False) def test_github_optional(self): setattr(local_settings, 'SOCIAL_AUTH_ENABLED', True) setattr(local_settings, 'GITHUB_ENABLED', True) response = self.client.get(reverse('ui:index')) self.assertContains(response, 'btn-github') setattr(local_settings, 'GITHUB_ENABLED', False) response = self.client.get(reverse('ui:index')) self.assertNotContains(response, 'btn-github') setattr(local_settings, 'SOCIAL_AUTH_ENABLED', False)
Zarechnoye Mine. Zarechnoye is an operating in situ recovery uranium mine located in the Syr-Darya basin in the Otrar region, South Kazakhstan province, approximatelykm west of Shymkent andkm southeast of Kyzylorda, Kazakhstan. With the mining industry accounting forper cent of its GDP, Kazakhstan has world leading reserves of chromite, zinc, copper, gold and manganese, and holdsper cent of the global supply of uranium. L'exploitation de l'uranium au Kazakhstan d&#;bute en dans le cadre du projet de bombe atomique sovi&#;tique. &#; la fin des ann&#;es, les sovi&#;tiques exploitent une mine d'uranium &#;km au nord-est de la ville d'Aktaou, o&#; le surg&#;n&#;rateur BNalimente la ville en &#;lectricit&#;. Kazakhstan has% of the world's uranium, and in, Kazakhstan was responsible for% of world production. There areuranium mines in the country, however, maximum output is capped at,tons per year. &#;&#;The real reason Russia wanted this deal was to give Rosatom’s subsidiary Uranium One's very profitable uranium mines in Kazakhstan ― the single largest producer of commercial uranium in the world. &#;&#;Beyond mines in Kazakhstan that are among the most lucrative in the world, the sale gave the Russians control of one-fifth of all uranium production capacity in the United States. Uranium One is a joint venture partner with JSC NAC Kazatomprom, the Kazakhstan state-owned atomic energy company, in six major producing uranium mines in Kazakhstan – Akdala, South Inkai, Karatau, Akbastau, Zarechnoye and Kharasan. &#;&#;Kazakhstan, which has about one-fifth of the world’s uranium reserves, was the place to be. But with plenty of suitors, Kazatomprom could be picky about its partners. Kazakhstan has% of the world's uranium resources and has been the world's leading uranium producer since. Itsproduction of,tU accounted for% of world production. The country had planned to increase its production year on year to. More than half of the world's uranium production comes from justmines. Canada, the second biggest uranium producer in the world, hosts the world's biggestthe McArthur River uranium minewhile Kazakhstan, the largest uranium producing country, operates three of the top. Additionally, Cameco has apercent stake in a mine in Kazakhstan. In, the world’s largest uranium producer put out slightly less uranium than it did the previous year. Uranium production in Kazakhstan has grown significantly over the last three years, which has made the central Asian nation the top uranium producer in recent times, last year producing,tonnes. Kazakhstan is the world’s leading producer of uranium with its,tons of output inrepresenting about% of global production and earning $1.billion in export revenue. The country is theth largest in iron ore reserves with.billion tons. Uranium mining debate – The uranium mining debate covers the political and environmental controversies of the mining of uranium for use in either nuclear power or nuclear weapons. As of, in terms of production, Kazakhstan was the largest supplier to export markets, followed by Canada. Inkai is owned% by Cameco and% by the Kazakh government through Kazatomprom. Tapping into the vast uranium potential of Kazakhstan, Joint Venture Inkai LLP operates the in situ recovery mine. The uranium exploration and mining JVs Akbastau and Karatau with Tenex started with Budenovskoye in the Stepnoye area of south Kazakhstan, which commenced production in. These complemented the Zarechnoye JVkm to the south which was set up in June. Uranium One’s prize possessions were the mines in Kazakhstan — sometimes described as the Saudi Arabia of uranium — and Dzakhishev said he wanted to keep them out of Russian hands. Uranium exploration in Kazakhstan began in. Later, in, mining began with positive results, leading to more exploration. Some underground mines from thes remain, but are close to depletion.
"""Detect peaks in data based on their amplitude and other features.""" from __future__ import division, print_function import numpy as np __author__ = "Marcos Duarte, https://github.com/demotu/BMC, Modified by Malte Deiseroth" __version__ = "1.0.4.1" __license__ = "MIT" def detect_peaks(x, mph=None, mpd=1, threshold=0, edge='rising', kpsh=False, valley=False, show=False, ax=None): """Detect peaks in data based on their amplitude and other features. Parameters ---------- x : 1D array_like data. mph : {None, number}, optional (default = None) detect peaks that are greater than minimum peak height. mpd : positive integer, optional (default = 1) detect peaks that are at least separated by minimum peak distance (in number of data). threshold : positive number, optional (default = 0) detect peaks (valleys) that are greater (smaller) than `threshold` in relation to their immediate neighbors. edge : {None, 'rising', 'falling', 'both'}, optional (default = 'rising') for a flat peak, keep only the rising edge ('rising'), only the falling edge ('falling'), both edges ('both'), or don't detect a flat peak (None). kpsh : bool, optional (default = False) keep peaks with same height even if they are closer than `mpd`. valley : bool, optional (default = False) if True (1), detect valleys (local minima) instead of peaks. show : bool, optional (default = False) if True (1), plot data in matplotlib figure. ax : a matplotlib.axes.Axes instance, optional (default = None). Returns ------- ind : 1D array_like indices of the peaks in `x`. Notes ----- The detection of valleys instead of peaks is performed internally by simply negating the data: `ind_valleys = detect_peaks(-x)` The function can handle NaN's See this IPython Notebook [1]_. References ---------- .. [1] http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/DetectPeaks.ipynb Examples -------- >>> from detect_peaks import detect_peaks >>> x = np.random.randn(100) >>> x[60:81] = np.nan >>> # detect all peaks and plot data >>> ind = detect_peaks(x, show=True) >>> print(ind) >>> x = np.sin(2*np.pi*5*np.linspace(0, 1, 200)) + np.random.randn(200)/5 >>> # set minimum peak height = 0 and minimum peak distance = 20 >>> detect_peaks(x, mph=0, mpd=20, show=True) >>> x = [0, 1, 0, 2, 0, 3, 0, 2, 0, 1, 0] >>> # set minimum peak distance = 2 >>> detect_peaks(x, mpd=2, show=True) >>> x = np.sin(2*np.pi*5*np.linspace(0, 1, 200)) + np.random.randn(200)/5 >>> # detection of valleys instead of peaks >>> detect_peaks(x, mph=0, mpd=20, valley=True, show=True) >>> x = [0, 1, 1, 0, 1, 1, 0] >>> # detect both edges >>> detect_peaks(x, edge='both', show=True) >>> x = [-2, 1, -2, 2, 1, 1, 3, 0] >>> # set threshold = 2 >>> detect_peaks(x, threshold = 2, show=True) """ x = np.atleast_1d(x).astype('float64') if x.size < 3: return np.array([], dtype=int) if valley: x = -x # find indices of all peaks dx = x[1:] - x[:-1] # handle NaN's indnan = np.where(np.isnan(x))[0] if indnan.size: x[indnan] = np.inf dx[np.where(np.isnan(dx))[0]] = np.inf ine, ire, ife = np.array([[], [], []], dtype=int) if not edge: ine = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) > 0))[0] else: if edge.lower() in ['rising', 'both']: ire = np.where((np.hstack((dx, 0)) <= 0) & (np.hstack((0, dx)) > 0))[0] if edge.lower() in ['falling', 'both']: ife = np.where((np.hstack((dx, 0)) < 0) & (np.hstack((0, dx)) >= 0))[0] ind = np.unique(np.hstack((ine, ire, ife))) # handle NaN's if ind.size and indnan.size: # NaN's and values close to NaN's cannot be peaks ind = ind[np.in1d(ind, np.unique(np.hstack((indnan, indnan-1, indnan+1))), invert=True)] # first and last values of x cannot be peaks if ind.size and ind[0] == 0: ind = ind[1:] if ind.size and ind[-1] == x.size-1: ind = ind[:-1] # remove peaks < minimum peak height if ind.size and mph is not None: ind = ind[x[ind] >= mph] # remove peaks - neighbors < threshold if ind.size and threshold > 0: dx = np.min(np.vstack([x[ind]-x[ind-1], x[ind]-x[ind+1]]), axis=0) ind = np.delete(ind, np.where(dx < threshold)[0]) # detect small peaks closer than minimum peak distance if ind.size and mpd > 1: ind = ind[np.argsort(x[ind])][::-1] # sort ind by peak height idel = np.zeros(ind.size, dtype=bool) for i in range(ind.size): if not idel[i]: # keep peaks with the same height if kpsh is True idel = idel | (ind >= ind[i] - mpd) & (ind <= ind[i] + mpd) \ & (x[ind[i]] > x[ind] if kpsh else True) idel[i] = 0 # Keep current peak # remove the small peaks and sort back the indices by their occurrence ind = np.sort(ind[~idel]) if show: if indnan.size: x[indnan] = np.nan if valley: x = -x _plot(x, mph, mpd, threshold, edge, valley, ax, ind) return ind def detect_peaks_vectorized(x, **kwargs): """ vectorized version of detect_peaks. -------- input: x 1d or 2d array like to search for peaks. if 2d, peaks are searched in a row wise patter, thus we loop over x. for the rest see detect_peaks, but with the addition, that all kwargs are also vectorized and thus instead of ont mph one can also enter a list of mphs where each mph corresponds to one row of the 2d spectrum in x. -------- return The return mimiks the behaviour of numpy masks. See e.g. `np.where(array([1,2,3,4,5] > 3))` In the case of 1d x-array, a numpy mask like tuple of peakpostitions is returned. In the case of a 2d x-array a numpy mask like tuple is returned. 0. element of the tuple is an array with the numbers of the spektrum the peak was found in, 1. element of the tubple is the position of the peak in the spectrum. """ # check and vectorize user input D1, D2 = {}, {} # 1D and 2D kwarg input for key, value in kwargs.items(): # print(key, value) if np.shape(value) == () : D1[key] = value elif np.shape(value) == (len(x),): D2[key] = value else: raise IOError("Cant handle dim of %s. Int, float or iterable of shape %s was expected, but %s was found."%(key, len(x), np.shape(value))) # pass everything to detect_peak in the 1d case if len(np.shape(x)) == 1: peakpos = detect_peaks(x, **kwargs) return (peakpos) # format according to numpy masks # find peaks an format output according to numpy mask conventions peakpos = np.array([]) pos = np.array([]) for i in range(len(x)): elm = x[i] kwrds = dict(D1) for key, value in D2.items(): kwrds[key] = value[i] peaks = detect_peaks(elm, **kwrds ) peakpos = np.hstack((peakpos, peaks)) pos = np.hstack((pos, i * np.ones(np.shape(peaks), dtype=np.int32))) return np.int32(pos), np.int32(peakpos) def _plot(x, mph, mpd, threshold, edge, valley, ax, ind): """Plot results of the detect_peaks function, see its help.""" try: import matplotlib.pyplot as plt except ImportError: print('matplotlib is not available.') else: if ax is None: _, ax = plt.subplots(1, 1, figsize=(8, 4)) ax.plot(x, 'b', lw=1) if ind.size: label = 'valley' if valley else 'peak' label = label + 's' if ind.size > 1 else label ax.plot(ind, x[ind], '+', mfc=None, mec='r', mew=2, ms=8, label='%d %s' % (ind.size, label)) ax.legend(loc='best', framealpha=.5, numpoints=1) ax.set_xlim(-.02*x.size, x.size*1.02-1) ymin, ymax = x[np.isfinite(x)].min(), x[np.isfinite(x)].max() yrange = ymax - ymin if ymax > ymin else 1 ax.set_ylim(ymin - 0.1*yrange, ymax + 0.1*yrange) ax.set_xlabel('Data #', fontsize=14) ax.set_ylabel('Amplitude', fontsize=14) mode = 'Valley detection' if valley else 'Peak detection' ax.set_title("%s (mph=%s, mpd=%d, threshold=%s, edge='%s')" % (mode, str(mph), mpd, str(threshold), edge)) # plt.grid() plt.show()
Get the latest Juggernaut cheats, codes, unlockables, hints, Easter eggs, glitches, tips, tricks, hacks, downloads, hints, guides, FAQs, walkthroughs, and more for PlayStation (PSX). CheatCodes.com has all you need to win every game you play! Use the above links or scroll down see all to the PlayStation cheats we have available for Juggernaut. We have no tips for Juggernaut yet. If you have any unlockables please submit them. We have no cheats or codes for Juggernaut yet. If you have any unlockables please submit them. We have no unlockables for Juggernaut yet. If you have any unlockables please submit them. We have no easter eggs for Juggernaut yet. If you have any unlockables please submit them. We have no glitches for Juggernaut yet. If you have any unlockables please submit them.
#!/usr/bin/env python3 # A simple script to print all updates received from getpass import getpass from os import environ # environ is used to get API information from environment variables # You could also use a config file, pass them as arguments, # or even hardcode them (not recommended) from telethon import TelegramClient from telethon.errors import SessionPasswordNeededError def main(): session_name = environ.get('TG_SESSION', 'session') user_phone = environ['TG_PHONE'] client = TelegramClient(session_name, int(environ['TG_API_ID']), environ['TG_API_HASH'], proxy=None, update_workers=4) print('INFO: Connecting to Telegram Servers...', end='', flush=True) client.connect() print('Done!') if not client.is_user_authorized(): print('INFO: Unauthorized user') client.send_code_request(user_phone) code_ok = False while not code_ok: code = input('Enter the auth code: ') try: code_ok = client.sign_in(user_phone, code) except SessionPasswordNeededError: password = getpass('Two step verification enabled. Please enter your password: ') code_ok = client.sign_in(password=password) print('INFO: Client initialized succesfully!') client.add_update_handler(update_handler) input('Press Enter to stop this!\n') def update_handler(update): print(update) print('Press Enter to stop this!') if __name__ == '__main__': main()
Handmade Goddess 925 Sterling Silver Nose Pin. Gemstone: Plain Silver, Colour: Silver, weight: 1g, Size: 2.0 x 1.2 cm. Unique handmade 925 sterling silver nose pin. Wholesale Silver Jewelry. Only one piece ready to ship.
#!/usr/bin/env python # -*- coding: utf-8 -*- from os import listdir, path import re import copy import datetime import matplotlib.pyplot as plt def get_mails(*folders): """ Gets the filename of every mail to analyze :param folders: Folders to analyze (comma separated arguments) :return mails: List of all files in given folders :type folders: Tuple of strings :rtype: list """ mails = [] for folder in folders: temp = [path.join(folder, file) for file in listdir(folder) if path.isfile(path.join(folder, file))] mails = mails + temp return mails def get_data(mails, headers): """ Gets data specified in headers from the mails :param mails: List of mail filenames. Output of get_mails :params headers: List of mail headers to get info from :return data: Dictionary of the data associated with the headers Each index is named as the corresponding header. The value is a list of lines, each one belonging to one mail. :return cnt_skipped: Counter of skipped files due to Unicode decoding errors :type mails: List :type headers: List :rtype data: Dictionary :rtype cnt_skipped: int """ # Init empty dictionary data = {} for i in headers: data[i] = [] cnt_skipped = 0 for file in mails: with open(file, 'r') as f: # If text files are not unicode, there will be an error. # However detecting formats is not trivial, right now we # are just ignoring files producing errors. try: lines = f.readlines() except UnicodeDecodeError: cnt_skipped = cnt_skipped + 1 continue for line in lines: for header in headers: if line[0:len(header)] == header: # TODO: +1line? (next one starts \t) data[header].append(line[len(header):-1]) return (data, cnt_skipped) def regex_mails(from_to): """ Gets the email address (only) from the from/to field, removing everything else. E.g: Super Lopez Jimenez <superlopez@jimenez.com> --> superlopez@jimenez.com :param from_to: Field from/to from email header. :return data: List of all files in given folders :type from_to: string :rtype email_add: string Reference: http://stackoverflow.com/questions/4026685/regex-to-get-text-between-two-characters """ regex_str = "(?<=<)[^}]*(?=>)" temp = re.search(regex_str, from_to) if not temp: email_add = [] else: email_add = temp.group(0) return email_add def date_format(date): """ Changes date format to python datetime object. Input format: Thu, [0]4 Sep 2014 14:40:18 +0200 [(CET)] Depends of mail :param date: Input date :return date_out: List of all files in given folders :type date: string :rtype date_out: date object Reference: http://stackoverflow.com/questions/4666973/how-to-extract-a-substring-from-inside-a-string-in-python """ regex_str = ", (.+?):" temp = re.search(regex_str, date) if temp: temp = temp.group(0) # Less hardcode? month_table = { "Jan": 1, "Feb": 2, "Mar": 3, "Apr": 4, "May": 5, "Jun": 6, "Jul": 7, "Aug": 8, "Sep": 9, "Oct": 10, "Nov": 11, "Dec": 12, } year = int(temp[-8:-4]) month = month_table[temp[-12:-9]] day = int(temp[-15:-13]) hour = int(date[len(temp):len(temp)+2]) minute = int(date[len(temp)+3:len(temp)+5]) second = int(date[len(temp)+6:len(temp)+8]) try: offset = int(date[len(temp)+9:len(temp)+12]) except: # Some mails do not have UTC offset in numbers, just as string # We will ignore this, though it wouldn't be difficult to map # strings to number using a dictionary (TODO) offset = 0 hour = (hour + offset) % 24 date_out = datetime.datetime(year, month, day, hour, minute, second) else: date_out = [] return date_out def data_munge(data_raw): """ Changes data format Emails list is replaced with a dictionary with only one entry for each email (and a counter), deleting also aliases Build a python datetime object from the Date header :param data_raw: Output of get_data :return data: Dicitonary with munged data :type data_raw: dictionary :rtype data: dictionary """ # We can make it faster using a shallow copy if we are not interested in # keeping data_raw. data = copy.deepcopy(data_raw) # Changing email from/to fields for header in ['From: ', 'To: ']: if header in data_raw.keys(): for ind in range(len(data[header])): # TODO: Handling multiple receivers data[header][ind] = regex_mails(data[header][ind]) temp = {} for addr in data[header]: if addr: # not empty if addr in temp: temp[addr] = temp[addr] + 1 else: temp[addr] = 1 data[header] = temp if 'Date: ' in data_raw.keys(): for ind in range(len(data['Date: '])): data['Date: '][ind] = date_format(data['Date: '][ind]) return data def hour_mails(data): """ Calculate percentage of mails sent on each hour of the day :param data: Output of data_munge :return cnt: List of counters. First element is at 1am, second at 2am... :type data: dictionary rtype cnt: list """ cnt = [0]*24 cnt_total = 0 if 'Date: ' in data.keys(): for date in data['Date: ']: if date: # not empty cnt[date.hour] = cnt[date.hour] + 1 cnt_total = cnt_total + 1 cnt = [100*x/cnt_total for x in cnt] return cnt def emails_involved(data): """ Prints to a file emails involved and a counter for each one :param data: Output of data_munge :type data: dictionary Reference: http://stackoverflow.com/questions/613183/sort-a-python-dictionary-by-value """ filename_out = 'resultados.txt' with open(filename_out, 'w') as f: if 'To: ' in data.keys(): f.write('******************************************************\n') f.write(' To \n') f.write(' E-mail + Counter\n') f.write('------------------------------------------------------\n') for mail, cnt in \ sorted(data['To: '].items(), key=lambda x: x[1], reverse=True): f.write(mail + '\t' + str(cnt) + '\n') f.write('******************************************************\n') if 'From: ' in data.keys(): f.write('******************************************************\n') f.write(' From \n') f.write(' E-mail + Counter\n') f.write('------------------------------------------------------\n') for mail, cnt in \ sorted(data['From: '].items(), key=lambda x: x[1], reverse=True): f.write(mail + '\t' + str(cnt) + '\n') f.write('******************************************************\n') if __name__ == '__main__': debug = False mails = get_mails('./Inbox', './Sent') headers = ['Date: ', 'From: ', 'To: ', 'Subject: '] (data_raw, cnt_skipped) = get_data(mails, headers) data = data_munge(data_raw) hour_cnt = hour_mails(data) # Output emails_involved(data) fig1 = plt.figure() axis1 = fig1.add_subplot(111) axis1.set_title('Percentage of mails sent each hour of the day') axis1.set_xlabel('Hour') axis1.set_ylabel('%') x_labels = list(range(1, 25)) axis1.bar(x_labels, hour_cnt, align='center') axis1.set_xticks(x_labels) axis1.set_xlim(1, 24) plt.show() # TODO: analyse content, limit data to time frame... if debug is True: print('{} skipped files due to unicode decoding error' .format(cnt_skipped)) # Mails saved using offlineimap and a google account. # Folders needed: inbox and Sent. # Notes: # In python variables defined in main are global, but I prefer to declare # arguments # References: #
When Dove Cameron stepped onto the Much Music Awards red carpet, we instantly fell in love with her summery beauty vibes. From her bright blonde strands to her solar-hued eyeshadow, she delivered everything we love about the warmest months of the year. And after finding out that celeb MUA Vincent Oquendo was behind the sunny (AND smudgeproof) makeup, we had to ask the duo exactly how it was done. True to form, they didn't disappoint, revealing every single detail behind Dove's MMAs prep. If you follow Dove on Instagram, you'll already know that she's never afraid to experiment. Even with her jam-packed schedule (she's currently promoting Descendants 2, rehearsing for Mamma Mia at the Hollywood Bowl, secretly working on music, and planning a long list of enviable vacations), she finds time to try the statement lips, braids, and shadow combos of our dreams. "I'm obsessed with pastel hair," she told Teen Vogue. "I've been wanting to try a paled-out pink all over and I've been craving a short chop for months!" She skipped those particular transformations for the red carpet, going with her signature blonde strands instead. But, she took a cue from nature for her makeup. "My inspiration was the colors in a summer sunset," says Vincent. "Warm orange, bronze, and gold colors, I also mixed in some brown and black eyeliners teamed with black mascara and a pop of burgundy mascara." You'll never guess how he came up with it, either. "Dove was singing and I thought she was such a little ray of sunshine," he explains. "So it came to me: sunset eyes on my little ray of sunshine." After creating Dove’s signature cat flick with brown Kyliner and black Kiko Gel Eyeliner, he coated her lashes with Lancôme Définicils and Kevyn Aucoin The Expert Mascara in Bloodroses (according to Vincent, the burgundy shade adds warmth). To keep the lips from overpowering the look, he used neutral shades: Laura Mercier Lip Parfait Creamy Colourbalm in Amaretto Swirl paired with Charlotte Tilbury Lip Cheat in Pillow Talk. “Everything I used on her eyes was either waterproof or water resistant,” adds Vincent. Perfect for literally every summer situation you’ll experience this year.
# -*- coding: utf-8 -*- import collections import keyword import numbers import re import six from sidecar.element import Element, expr _TAGS = """ a abbr address area article aside audio b base bdi bdo big blockquote body br button canvas caption cite code col colgroup data datalist dd del details dfn dialog div dl dt em embed fieldset figcaption figure footer form h1 h2 h3 h4 h5 h6 head header hgroup hr html i iframe img input ins kbd keygen label legend li link main map mark menu menuitem meta meter nav noscript object ol optgroup option output p param picture pre progress q rp rt ruby s samp script section select small source span strong style sub summary sup table tbody td textarea tfoot th thead time title tr track u ul var video wbr """.split() _VOID_TAGS = """ area base br col embed hr img input keygen link menuitem meta param source track wbr """.split() _ATTRIBUTES = """ accept acceptCharset accessKey action allowFullScreen allowTransparency alt async autoComplete autoFocus autoPlay capture cellPadding cellSpacing challenge charSet checked classID className colSpan cols content contentEditable contextMenu controls coords crossOrigin data dateTime default defer dir disabled download draggable encType form formAction formEncType formMethod formNoValidate formTarget frameBorder headers height hidden high href hrefLang htmlFor httpEquiv icon id inputMode integrity is keyParams keyType kind label lang list loop low manifest marginHeight marginWidth max maxLength media mediaGroup method min minLength multiple muted name noValidate nonce open optimum pattern placeholder poster preload radioGroup readOnly rel required reversed role rowSpan rows sandbox scope scoped scrolling seamless selected shape size sizes span spellCheck src srcDoc srcLang srcSet start step style summary tabIndex target title type useMap value width wmode wrap """.split() _STYLES = """ alignContent alignItems alignSelf animation animationDelay animationDirection animationDuration animationFillMode animationIterationCount animationName animationTimingFunction animationPlayState background backgroundAttachment backgroundColor backgroundImage backgroundPosition backgroundRepeat backgroundClip backgroundOrigin backgroundSize backfaceVisibility border borderBottom borderBottomColor borderBottomLeftRadius borderBottomRightRadius borderBottomStyle borderBottomWidth borderCollapse borderColor borderImage borderImageOutset borderImageRepeat borderImageSlice borderImageSource borderImageWidth borderLeft borderLeftColor borderLeftStyle borderLeftWidth borderRadius borderRight borderRightColor borderRightStyle borderRightWidth borderSpacing borderStyle borderTop borderTopColor borderTopLeftRadius borderTopRightRadius borderTopStyle borderTopWidth borderWidth bottom boxDecorationBreak boxShadow boxSizing captionSide clear clip color columnCount columnFill columnGap columnRule columnRuleColor columnRuleStyle columnRuleWidth columns columnSpan columnWidth content counterIncrement counterReset cursor direction display emptyCells filter flex flexBasis flexDirection flexFlow flexGrow flexShrink flexWrap cssFloat font fontFamily fontSize fontStyle fontVariant fontWeight fontSizeAdjust fontStretch hangingPunctuation height hyphens icon imageOrientation justifyContent left letterSpacing lineHeight listStyle listStyleImage listStylePosition listStyleType margin marginBottom marginLeft marginRight marginTop maxHeight maxWidth minHeight minWidth navDown navIndex navLeft navRight navUp opacity order orphans outline outlineColor outlineOffset outlineStyle outlineWidth overflow overflowX overflowY padding paddingBottom paddingLeft paddingRight paddingTop pageBreakAfter pageBreakBefore pageBreakInside perspective perspectiveOrigin position quotes resize right tableLayout tabSize textAlign textAlignLast textDecoration textDecorationColor textDecorationLine textDecorationStyle textIndent textJustify textOverflow textShadow textTransform top transform transformOrigin transformStyle transition transitionProperty transitionDuration transitionTimingFunction transitionDelay unicodeBidi verticalAlign visibility whiteSpace width wordBreak wordSpacing wordWrap widows zIndex """.split() def _register_html_tags(): elements = {} def clsdict(name): def __init__(self, **props): super(elements[name], self).__init__(name, props=props) def _convert_props(self, **props): props, props_items = {}, props.items() for k, v in props_items: # convert snakecase to camelcase for all props k = re.sub(r'_([a-z])', lambda s: s.group(1).upper(), k) # allow trailing underscore if a prop is a Python keyword if k and k.endswith('_') and keyword.iskeyword(k[:-1]): k = k[:-1] if k == 'class': k = 'className' elif k == 'for': k = 'htmlFor' if k not in _ATTRIBUTES: raise RuntimeError('unknown attribute: {}'.format(k)) if k in props: raise RuntimeError('duplicate attribute: {}'.format(k)) # style attribute must be a dict if k == 'style': if not isinstance(v, collections.Mapping): raise RuntimeError('invalid style: {}'.format(v)) v, v_items = {}, v.items() for sk, sv in v_items: # convert snakecase (dashes allowed) to camelcase sk = re.sub(r'[\-_]([a-z])', lambda s: s.group(1).upper(), sk) if sk not in _STYLES: raise RuntimeError('unknown style: {}'.format(sk)) if sk in v: raise RuntimeError('duplicate style: {}'.format(sk)) # only allow strings, integers and expressions for styles if not isinstance(sv, (six.string_types, numbers.Real, expr)): raise RuntimeError('invalid style: {}={}'.format(sk, sv)) v[sk] = sv else: # only allow strings or expressions for non-style attributes if not isinstance(v, (six.string_types, expr)): raise RuntimeError('invalid attribute: {}={}'.format(k, v)) props[k] = v return props return { '__init__': __init__, '__doc__': '<{}> HTML tag.'.format(name), '_convert_props': _convert_props, 'allow_children': name not in _VOID_TAGS } for tag in _TAGS: elements[tag] = type(tag, (Element,), clsdict(tag)) # register tag in the global namespace, append underscore if it's a Python keyword globals()[tag + '_' * keyword.iskeyword(tag)] = elements[tag] _register_html_tags()
Fair Hill, MD – The West coast made sure their presence was known on Friday at the 2018 The Dutta Corp. Fair Hill International with Californians sweeping the international and young horse divisions. Frankie Thieriot Stutes leads The Dutta Corp./USEF CCI3* Eventing National Championship, and Heather Morris took over The Dutta Corp./USEF CCI2* Eventing National Championship after the dressage phase. Three-star winners once already this year and United States Equestrian Team (USET) Foundation Jacqueline B. Mars International Competition National Developing Rider Grant recipients, Thieriot Stutes (Occidental, Calif.) and The Chatwin Group's Chatwin, a 10-year-old Oldenburg gelding, topped Friday's leaderboard on a score of 27.3. "I feel really lucky to ride ‘Chat’ every day, and this division and the people I'm sitting next to are world-class so it feels amazing for this moment, but after I walk out of here I'm focused on tomorrow. Tomorrow is an entirely different day," said Thieriot Stutes. After almost a decade since her last Fair Hill International appearance, Thieriot Stutes, who is an amateur rider herself, has her former advanced horse Fric Frac Berence on her mind as she and Chatwin take on this weekend's challenge. "I hope to have a little bit of ‘Fric’ with me when I leave the box tomorrow," she said. "This is a different year, and a different horse, but I hope [Chatwin] takes care of me tomorrow as ‘Fric’ would [have]." Thieriot Stutes also currently leads the USEF Developing Rider National Championship, which is awarded to the highest-placed U.S. competitor in the CCI3* that is 18 years of age or older and who has not yet completed a CCI4* event. Caroline Martin and her own and Sherrie Martin’s Danger Mouse landed in third overall in the CCI3* and are currently second in The Dutta Corp./USEF CCI3* Eventing National Championship. Martin and the 10-year-old Dutch Warmblood gelding earned a 29 -- a personal best for this pair. The CCI2* division welcomed a new leader on day two of the dressage competition in Heather Morris (Wildomar, Calif.) and Charlie Tango, a 10-year-old Irish Sport Horse gelding owned by The Team Express Group. Their morning result of 22.9 remained untouched throughout the afternoon. "[Charlie Tango] was a really good boy today," Morris said. "He's pretty good in the atmosphere. He really excels in the canter work and we had a little bobble in the trot work so I had to make sure the canter work was perfect." U.S. Olympian Boyd Martin (Cochranville, Pa.) found himself in second place overall in the CCI2* as well as in The Dutta Corp./USEF CCI2* Eventing National Championship with Christine Turner's On Cue. The 12-year-old Selle Francais mare carries 23.7 points into Saturday's cross-country phase. Anna Loschiavo (Bradford, VT) and Melanie Loschiavo's Spartacus Q peaked at precisely the perfect moment for third place in the two-star and the national championship. The 9-year-old Hanoverian gelding sits on a score of 25.8. Jenny Caras (Cartersville, Ga.) and her own Fernhill Trendy, a seven-year-old Irish Sport Horse gelding, continue to lead the USEF Young Horse National Championship, awarded to the owner of the highest-placed U.S. horse that is six or seven years of age, on a score of 27.1 for sixth place in the CCI2* overall. Mia Farley (San Clemente, Calif.) and her BGS Firecracker, an eight-year-old Irish Sport Horse mare, sit on a score of 29.9 and lead in the USEF Young Rider National Championship, awarded in the CCI2* to highest-placed U.S. competitor who is 16 years of age to 21 years of age during the current competition year. The United States Eventing Association’s (USEA) Young Event Horse (YEH) East Coast Championships culminated Friday with the jumping and galloping portion of the competition. MB MaiStein and R River Star were crowned champions of the 5- and 4-year-old divisions, respectively. Tamra Smith (Murrieta, Calif.) was the third of the west coast contingent to best the competition at Fair Hill on Friday, as she and the MB Group LLC's MB MaiStein finished first in the five-year-old division. The Oldenburg gelding by Rocky Lee finished on an 88.29% (out of 100%), impressing the judges with his cross-country efforts that earned him 13.5% (out of 15%). "I think it's super important to support the program. The horses get the exposure without being put under a tremendous amount of pressure," Smith said of the USEA Young Event Horse Program. "The courses have always been fair, and it's interesting to get the judges point of view. I'm really excited he was a West coast horse that came to the East coast and won. I'm surprised and really excited that he did." Courtney Cooper's (Nottingham, Pa.) R River Star proved once again to be the best of the four-year-old horses Friday, winning on a final score of 83.24% (out of 100%) with Cooper riding. The Irish Sport Horse/Dutch Warmblood gelding, who was bred by the rider, is by Riverman and out of R Star. "He was great; he was light on his feet. He was thoughtful. He looked for the jumps. We started him and did all the work with him. He's really gotten down to business this last part of his four-year-old year. He's come along and really done well," Cooper said. Competitors take on the thrilling cross-country phase Saturday, October 20 beginning at 9:15 a.m. ET, and the show jumping finale will take place Sunday, October 21. This year also marks the 30th anniversary of The Dutta Corp., the event's title sponsor. The Dutta Corporation is an international and domestic horse shipping company founded by J. Tim Dutta. It has been expanding rapidly since its inception in 1988 and particularly so in the last twenty years. With state-of-the-art jet stalls and an expert team of grooms, The Dutta Corp. provides horse air transport solutions tailored to each individual horse. The CCI2* and CCI3* cross-country will be live streamed starting at 9 a.m. ET on USEF Network. Dressage and jumping will be available to watch on-demand after the completion of the event. Stay up to date on U.S. Eventing by following US Equestrian on Facebook, Twitter and Instagram and USA Eventing on Facebook. Use #USAEventing.
import logging from uuid import UUID import ics from django.conf import settings from django.contrib import messages from django.contrib.auth.decorators import login_required from django.core.exceptions import PermissionDenied, ValidationError from django.core.paginator import Paginator from django.core.validators import validate_email from django.db import IntegrityError from django.db.models import Q from django.http import ( Http404, HttpResponseRedirect, HttpResponseBadRequest, JsonResponse, HttpResponse, HttpResponseForbidden, ) from django.template.response import TemplateResponse from django.urls import reverse_lazy, reverse from django.utils.decorators import method_decorator from django.utils.html import format_html from django.utils.translation import ugettext as _, ugettext_lazy from django.views import View from django.views.generic import ( UpdateView, ListView, DeleteView, DetailView, TemplateView, ) from django.views.generic.edit import ProcessFormView, FormMixin, FormView from agir.authentication.tokens import ( invitation_confirmation_token_generator, abusive_invitation_report_token_generator, ) from agir.authentication.utils import hard_login from agir.authentication.view_mixins import ( HardLoginRequiredMixin, PermissionsRequiredMixin, VerifyLinkSignatureMixin, ) from agir.donations.allocations import get_balance from agir.donations.models import SpendingRequest from agir.front.view_mixins import ( ObjectOpengraphMixin, ChangeLocationBaseView, SearchByZipcodeBaseView, ) from agir.groups.actions.pressero import redirect_to_pressero, is_pressero_enabled from agir.groups.actions.promo_codes import ( get_next_promo_code, is_promo_code_delayed, next_promo_code_date, ) from agir.groups.models import SupportGroup, Membership, SupportGroupSubtype from agir.groups.tasks import ( send_someone_joined_notification, send_abuse_report_message, ) from agir.lib.http import add_query_params_to_url from agir.lib.utils import front_url from agir.people.views import ( ConfirmSubscriptionView, subscription_confirmation_token_generator, Person, ) from .forms import ( SupportGroupForm, AddReferentForm, AddManagerForm, GroupGeocodingForm, SearchGroupForm, ExternalJoinForm, InvitationWithSubscriptionConfirmationForm, InvitationForm, ) __all__ = [ "SupportGroupManagementView", "CreateSupportGroupView", "PerformCreateSupportGroupView", "ModifySupportGroupView", "QuitSupportGroupView", "RemoveManagerView", "SupportGroupDetailView", "ThematicTeamsViews", "ChangeGroupLocationView", "SupportGroupListView", ] logger = logging.getLogger(__name__) class CheckMembershipMixin: def user_is_referent(self): return self.user_membership is not None and self.user_membership.is_referent def user_is_manager(self): return self.user_membership is not None and ( self.user_membership.is_referent or self.user_membership.is_manager ) @property def user_membership(self): if not hasattr(self, "_user_membership"): if isinstance(self.object, SupportGroup): group = self.object else: group = self.object.supportgroup try: self._user_membership = group.memberships.get( person=self.request.user.person ) except Membership.DoesNotExist: self._user_membership = None return self._user_membership class SupportGroupListView(SearchByZipcodeBaseView): """List of groups, filter by zipcode """ min_items = 20 template_name = "groups/group_list.html" context_object_name = "groups" form_class = SearchGroupForm def get_base_queryset(self): return SupportGroup.objects.active().order_by("name") class SupportGroupDetailView(ObjectOpengraphMixin, DetailView): template_name = "groups/detail.html" queryset = SupportGroup.objects.active().all() title_prefix = "Groupe d'action" meta_description = "Rejoignez les groupes d'action de la France insoumise." def get_template_names(self): return ["groups/detail.html"] def get_context_data(self, **kwargs): events_future = Paginator( self.object.organized_events.upcoming().distinct().order_by("start_time"), 5 ).get_page(self.request.GET.get("events_future_page")) events_past = Paginator( self.object.organized_events.past().distinct().order_by("-start_time"), 5 ).get_page(self.request.GET.get("events_past_page")) return super().get_context_data( events_future=events_future, events_past=events_past, is_member=self.request.user.is_authenticated and self.object.memberships.filter( person=self.request.user.person ).exists(), is_referent_or_manager=self.request.user.is_authenticated and self.object.memberships.filter( Q(person=self.request.user.person) & (Q(is_referent=True) | Q(is_manager=True)) ).exists(), **kwargs, ) @method_decorator(login_required(login_url=reverse_lazy("short_code_login"))) def post(self, request, *args, **kwargs): self.object = self.get_object() if not request.user.person.is_insoumise and not self.object.allow_external: return HttpResponseForbidden() if request.POST["action"] == "join": try: membership = Membership.objects.create( supportgroup=self.object, person=request.user.person ) send_someone_joined_notification.delay(membership.pk) except IntegrityError: pass # the person is already a member of the group return HttpResponseRedirect( reverse("view_group", kwargs={"pk": self.object.pk}) ) return HttpResponseBadRequest() class SupportGroupIcsView(DetailView): queryset = SupportGroup.objects.active().all() def render_to_response(self, context, **response_kwargs): calendar = ics.Calendar( events=[ ics.event.Event( name=event.name, begin=event.start_time, end=event.end_time, uid=str(event.pk), description=event.description, location=event.short_address, url=front_url("view_event", args=[event.pk], auto_login=False), ) for event in context["supportgroup"].organized_events.all() ] ) return HttpResponse(calendar, content_type="text/calendar") class SupportGroupManagementView( HardLoginRequiredMixin, CheckMembershipMixin, DetailView ): template_name = "groups/manage.html" queryset = SupportGroup.objects.active().all().prefetch_related("memberships") messages = { "add_referent_form": ugettext_lazy( "{email} est maintenant correctement signalé comme second·e animateur·rice." ), "add_manager_form": ugettext_lazy( "{email} a bien été ajouté·e comme gestionnaire pour ce groupe." ), "invitation_form": ugettext_lazy( "{email} a bien été invité à rejoindre votre groupe." ), } need_referent_status = {"add_referent_form", "add_manager_form"} active_panel = { "add_referent_form": "animation", "add_manager_form": "animation", "invitation_form": "invitation", } def get_forms(self): kwargs = {} if self.request.method in ("POST", "PUT"): kwargs.update({"data": self.request.POST}) return { "add_referent_form": AddReferentForm(self.object, **kwargs), "add_manager_form": AddManagerForm(self.object, **kwargs), "invitation_form": InvitationForm( group=self.object, inviter=self.request.user.person, **kwargs ), } def get_context_data(self, **kwargs): kwargs["referents"] = self.object.memberships.filter(is_referent=True).order_by( "created" ) kwargs["managers"] = self.object.memberships.filter( is_manager=True, is_referent=False ).order_by("created") kwargs["members"] = self.object.memberships.all().order_by("created") kwargs["has_promo_code"] = self.object.tags.filter( label=settings.PROMO_CODE_TAG ).exists() if kwargs["has_promo_code"]: kwargs["group_promo_code"] = get_next_promo_code(self.object) if is_promo_code_delayed(): kwargs["promo_code_delay"] = next_promo_code_date() kwargs["certifiable"] = ( self.object.type in settings.CERTIFIABLE_GROUP_TYPES or self.object.subtypes.filter( label__in=settings.CERTIFIABLE_GROUP_SUBTYPES ).exists() ) kwargs["satisfy_requirements"] = len(kwargs["referents"]) > 1 kwargs["allocation_balance"] = get_balance(self.object) kwargs["spending_requests"] = SpendingRequest.objects.filter( group=self.object ).exclude(status=SpendingRequest.STATUS_PAID) kwargs["is_pressero_enabled"] = is_pressero_enabled() kwargs["active"] = self.active_panel.get(self.request.POST.get("form")) forms = self.get_forms() for form_name, form in forms.items(): kwargs.setdefault(form_name, form) return super().get_context_data( is_referent=self.user_membership is not None and self.user_membership.is_referent, is_manager=self.user_membership is not None and (self.user_membership.is_referent or self.user_membership.is_manager), **kwargs, ) def get(self, request, *args, **kwargs): self.object = self.get_object() # only managers can access the page if not self.user_is_manager(): raise PermissionDenied("Vous n'etes pas gestionnaire de ce groupe.") context = self.get_context_data(object=self.object) return self.render_to_response(context) def post(self, request, *args, **kwargs): self.object = self.get_object() form_name = request.POST.get("form") # only referents can add referents and managers if not self.user_is_referent() and form_name in self.need_referent_status: raise PermissionDenied( "Vous n'êtes pas animateur de ce groupe et ne pouvez donc pas modifier les " "animateurs et gestionnaires." ) forms = self.get_forms() if form_name in forms: form = forms[form_name] if form.is_valid(): params = form.perform() messages.add_message( request, messages.SUCCESS, self.messages[form_name].format(**params) ) else: return self.render_to_response( self.get_context_data(**{form_name: form}) ) return HttpResponseRedirect( reverse("manage_group", kwargs={"pk": self.object.pk}) ) class CreateSupportGroupView(HardLoginRequiredMixin, TemplateView): template_name = "groups/create.html" def get_context_data(self, **kwargs): person = self.request.user.person initial = {} if person.contact_phone: initial["phone"] = person.contact_phone.as_e164 if person.first_name and person.last_name: initial["name"] = "{} {}".format(person.first_name, person.last_name) return super().get_context_data(props={"initial": initial}, **kwargs) class PerformCreateSupportGroupView(HardLoginRequiredMixin, FormMixin, ProcessFormView): model = SupportGroup form_class = SupportGroupForm def get_form_kwargs(self): """Add user person profile to the form kwargs""" kwargs = super().get_form_kwargs() person = self.request.user.person kwargs["person"] = person return kwargs def form_invalid(self, form): return JsonResponse({"errors": form.errors}, status=400) def form_valid(self, form): messages.add_message( request=self.request, level=messages.SUCCESS, message="Votre groupe a été correctement créé.", ) form.save() return JsonResponse( { "status": "OK", "id": form.instance.id, "url": reverse("view_group", args=[form.instance.id]), } ) class ModifySupportGroupView( HardLoginRequiredMixin, PermissionsRequiredMixin, UpdateView ): permissions_required = ("groups.change_supportgroup",) template_name = "groups/modify.html" queryset = SupportGroup.objects.active().all() form_class = SupportGroupForm def get_form_kwargs(self): """Add user person profile to the form kwargs""" return {**super().get_form_kwargs(), "person": self.request.user.person} def get_success_url(self): return reverse("manage_group", kwargs={"pk": self.object.pk}) def form_valid(self, form): # first get response to make sure there's no error when saving the model before adding message res = super().form_valid(form) messages.add_message( request=self.request, level=messages.SUCCESS, message=format_html( _("Les modifications du groupe <em>{}</em> ont été enregistrées."), self.object.name, ), ) return res class RemoveManagerView(HardLoginRequiredMixin, CheckMembershipMixin, DetailView): template_name = "front/confirm.html" queryset = ( Membership.objects.active() .all() .select_related("supportgroup") .select_related("person") ) def get_context_data(self, **kwargs): person = self.object.person if person.first_name and person.last_name: name = "{} {} <{}>".format( person.first_name, person.last_name, person.email ) else: name = person.email return super().get_context_data( title=_("Confirmer le retrait du gestionnaire ?"), message=_( f""" Voulez-vous vraiment retirer {name} de la liste des gestionnaires de ce groupe ? """ ), button_text="Confirmer le retrait", ) def get(self, request, *args, **kwargs): self.object = self.get_object() if not self.user_is_referent(): raise PermissionDenied( "Vous n'êtes pas animateur de cet événement et ne pouvez donc pas modifier les " "animateurs et gestionnaires." ) context = self.get_context_data(object=self.object) return self.render_to_response(context) def post(self, request, *args, **kwargs): self.object = self.get_object() # user has to be referent, and target user cannot be a referent if not self.user_is_referent() or self.object.is_referent: raise PermissionDenied( "Vous n'êtes pas animateur de cet événement et ne pouvez donc pas modifier les " "animateurs et gestionnaires." ) self.object.is_manager = False self.object.save() messages.add_message( request, messages.SUCCESS, _("{} n'est plus un gestionnaire du groupe.").format( self.object.person.email ), ) return HttpResponseRedirect( reverse_lazy("manage_group", kwargs={"pk": self.object.supportgroup_id}) ) class QuitSupportGroupView(HardLoginRequiredMixin, DeleteView): template_name = "groups/quit.html" success_url = reverse_lazy("dashboard") queryset = Membership.objects.active().all() context_object_name = "membership" def get_object(self, queryset=None): try: return ( self.get_queryset() .select_related("supportgroup") .get( supportgroup__pk=self.kwargs["pk"], person=self.request.user.person ) ) except Membership.DoesNotExist: raise Http404() def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context["group"] = self.object.supportgroup context["success_url"] = self.get_success_url() return context def delete(self, request, *args, **kwargs): self.object = self.get_object() success_url = self.get_success_url() # make sure user is not a referent who cannot quit groups if ( self.object.is_referent and len(self.object.supportgroup.memberships.filter(is_referent=True)) < 2 ): messages.add_message( request, messages.ERROR, _( "Vous êtes seul animateur⋅rice de ce groupe, et ne pouvez donc pas le quitter." " Votre groupe doit d'abord se choisir un ou une autre animatrice pour permettre votre départ." ), ) else: self.object.delete() messages.add_message( request, messages.SUCCESS, format_html( _("Vous avez bien quitté le groupe <em>{}</em>"), self.object.supportgroup.name, ), ) return HttpResponseRedirect(success_url) class ExternalJoinSupportGroupView(ConfirmSubscriptionView, FormView, DetailView): queryset = SupportGroup.objects.filter(subtypes__allow_external=True) form_class = ExternalJoinForm show_already_created_message = False create_insoumise = False def dispatch(self, request, *args, **kwargs): self.group = self.object = self.get_object() return super().dispatch(request, *args, **kwargs) def success_page(self): if Membership.objects.filter( person=self.person, supportgroup=self.group ).exists(): messages.add_message( request=self.request, level=messages.INFO, message=_("Vous êtes déjà membre."), ) return HttpResponseRedirect(reverse("view_group", args=[self.group.pk])) Membership.objects.get_or_create(person=self.person, supportgroup=self.group) messages.add_message( request=self.request, level=messages.INFO, message=_("Vous avez bien rejoint le groupe."), ) return HttpResponseRedirect(reverse("view_group", args=[self.group.pk])) def form_valid(self, form): form.send_confirmation_email(self.group) messages.add_message( request=self.request, level=messages.INFO, message=_( "Un email vous a été envoyé. Merrci de cliquer sur le " "lien qu'il contient pour confirmer." ), ) return HttpResponseRedirect(reverse("view_group", args=[self.group.pk])) def form_invalid(self, form): return HttpResponseRedirect(reverse("view_group", args=[self.group.pk])) class ThematicTeamsViews(ListView): template_name = "groups/thematic_teams.html" context_object_name = "groups" def get_queryset(self): subtype = SupportGroupSubtype.objects.get(label="rédaction du livret") return SupportGroup.objects.active().filter(subtypes=subtype).order_by("name") def get_context_data(self, **kwargs): return super().get_context_data( **kwargs, default_image="front/images/AEC-mini.png" ) class ChangeGroupLocationView(ChangeLocationBaseView): template_name = "groups/change_location.html" form_class = GroupGeocodingForm queryset = SupportGroup.objects.active().all() success_view_name = "manage_group" class RedirectToPresseroView(HardLoginRequiredMixin, DetailView): template_name = "groups/pressero_error.html" queryset = SupportGroup.objects.active() def get(self, request, *args, **kwargs): group = self.get_object() person = request.user.person if not is_pressero_enabled(): raise Http404("Cette page n'existe pas") if not group.is_certified: raise Http404("Cette page n'existe pas") if not Membership.objects.filter( supportgroup=group, person=person, is_manager=True ).exists: raise PermissionDenied("Vous ne pouvez pas accéder à cette page.") try: return redirect_to_pressero(person) except Exception as e: logger.error("Problème rencontré avec l'API Pressero", exc_info=True) return TemplateResponse(request, self.template_name) class InvitationConfirmationView(VerifyLinkSignatureMixin, View): signature_generator = invitation_confirmation_token_generator def get(self, request, *args, **kwargs): token_params = self.get_signed_values() if token_params is None: return self.link_error_page() try: person = Person.objects.get(pk=UUID(token_params["person_id"])) group = SupportGroup.objects.get(pk=UUID(token_params["group_id"])) except (ValueError, Person.DoesNotExist): return self.link_error_page() except SupportGroup.DoesNotExist: messages.add_message( request=request, level=messages.ERROR, message="Le groupe qui vous a invité n'existe plus.", ) return HttpResponseRedirect(reverse("dashboard")) membership, created = Membership.objects.get_or_create( supportgroup=group, person=person ) if created: messages.add_message( request, messages.SUCCESS, format_html( "Vous venez de rejoindre le groupe d'action <em>{group_name}</em>", group_name=group.name, ), ) else: messages.add_message( request, messages.INFO, "Vous étiez déjà membre de ce groupe." ) return HttpResponseRedirect(reverse("view_group", args=(group.pk,))) class InvitationWithSubscriptionView(VerifyLinkSignatureMixin, FormView): form_class = InvitationWithSubscriptionConfirmationForm signature_generator = subscription_confirmation_token_generator signed_params = ["email", "group_id"] template_name = "groups/invitation_subscription.html" def dispatch(self, request, *args, **kwargs): token_params = self.get_signed_values() if not token_params: return self.link_error_page() self.email = token_params["email"] try: validate_email(self.email) except ValidationError: return self.link_error_page() # Cas spécial : la personne s'est déjà créé un compte entretemps # ==> redirection vers l'autre vue try: person = Person.objects.get_by_natural_key(self.email) except Person.DoesNotExist: pass else: params = {"person_id": str(person.id), "group_id": token_params["group_id"]} query_params = { **params, "token": invitation_confirmation_token_generator.make_token(**params), } return HttpResponseRedirect( add_query_params_to_url( reverse("invitation_confirmation"), query_params ) ) try: self.group = SupportGroup.objects.get(pk=UUID(token_params["group_id"])) except ValueError: # pas un UUID return self.link_error_page() except SupportGroup.DoesNotExist: # le groupe a disparu entre temps... self.group = None return super().dispatch(request, *args, **kwargs) def get_context_data(self, **kwargs): return super().get_context_data(group=self.group) def get_form_kwargs(self): kwargs = super().get_form_kwargs() kwargs["group"] = self.group kwargs["email"] = self.email return kwargs def form_valid(self, form): p = form.save() hard_login(self.request, p) return TemplateResponse(self.request, "people/confirmation_subscription.html") class InvitationAbuseReportingView(VerifyLinkSignatureMixin, View): signature_generator = abusive_invitation_report_token_generator form_template_name = "groups/invitation_abuse.html" confirmed_template_name = "groups/invitation_abuse_confirmed.html" def dispatch(self, request, *args, **kwargs): self.token_params = self.get_signed_values() if not self.token_params: return self.link_error_page() self.timestamp = abusive_invitation_report_token_generator.get_timestamp( request.GET.get("token") ) try: self.group_id = UUID(self.token_params["group_id"]) self.inviter_id = UUID(self.token_params["inviter_id"]) except ValueError: return self.link_error_page() return super().dispatch(request, *args, **kwargs) def get(self, request, *args, **kwargs): return TemplateResponse(request, template=self.form_template_name) def post(self, request, *args, **kwargs): if self.inviter_id: send_abuse_report_message.delay(str(self.inviter_id)) logger.info( msg=f"Abus d'invitation signalé ({self.group_id}, {self.inviter_id}, {self.timestamp})" ) return TemplateResponse(request, template=self.confirmed_template_name)
"... by 1,i. 0. ..." "... Considerable effort is put into the safety risk assessment of any flight test programme- and rightly so, since failures to apply best practice in this area may cause significant expense, and in extremis loss of life. However, it must also be remembered that the flight test programme itself is an ess ..." "... (Test en vol de systèmes de vision nocturne à bord des aéronefs à voilure tournante) This AGARDograph has been sponsored by SCI-172, the Flight Test Technical Team (FT3) of the Systems Concepts and Integration Panel (SCI) of the RTO, ..." "... Advanced control for miniature helicopters: modelling, design and flight test ..." "... The NASA Dryden Flight Research Center has completed the initial flight test of a modified set of F/A-18 flight control computers that gives the aircraft a research control law capability. The production support flight control computers (PSFCC) provide an increased capability for flight research in ..." "... NICSAGUI^ Captain, USAF _Project Manager y AMIPELED-LUBITCH Major, lAF Project Test Pilot / / J> ..." "... 2007 FINAL TECHNICAL INFORMATION MEMORANDUM Approved for public release; distribution is unlimited. A F F T C ..." "... And recommend its acceptance: ..." "... iii TABLE OF CONTENTS ..." "... Prepared by: This handbook has been reviewed ..."
from django.template.loader import get_template from django.template import Context from django.http import HttpResponse from django.http import HttpResponseRedirect import xmlrpclib, time my_uri = "http://127.0.0.1/cobbler_api" remote = xmlrpclib.Server(my_uri) token = remote.login('testing', 'testing') def index(request): t = get_template('index.tmpl') html = t.render(Context({'version': remote.version(token)})) return HttpResponse(html) def search(request, what): t = get_template('search.tmpl') html = t.render(Context({'what':what, 'item_count':["1","2","3","4","5"]})) return HttpResponse(html) def dosearch(request, what): criteria = {} for i in range(1,6): key = request.POST.get("key%d" % i, None) val = request.POST.get("value%d" % i, None) if key not in (None, ''): if val != None: val = val.replace('"','') criteria[key] = val results = [] if what == "distro": results = remote.find_distro(criteria,True,token) return distro_list(request, results) elif what == "profile": results = remote.find_profile(criteria,True,token) return profile_list(request, results) elif what == "system": results = remote.find_system(criteria,True,token) return system_list(request, results) elif what == "image": results = remote.find_image(criteria,True,token) return image_list(request, results) elif what == "repo": results = remote.find_repo(criteria,True,token) return repo_list(request, results) else: raise "internal error, unknown search type" def __setup__pagination(object_list, page): # TODO: currently hardcoded at 50 results per page # not sure if this was a setting in the old webui # (if not it should be) prev_page = page - 1 next_page = page + 1 num_pages = (len(object_list)-1)/50 + 1 if num_pages > 1: offset = (page-1) * 50 ending = offset + 50 if ending > len(object_list): ending = len(object_list) else: offset = 0 ending = len(object_list) if prev_page < 1: prev_page = None if next_page > num_pages: next_page = None return (num_pages,prev_page,next_page,offset,ending) def distro_list(request, distros=None, page=None): if distros is None: distros = remote.get_distros(token) if page is None and len(distros) > 50: return HttpResponseRedirect('/cobbler_web/distro/list/1') try: page = int(page) if page < 1: page = 1 except: page = 1 (num_pages,prev_page,next_page,offset,ending) = __setup__pagination(distros,page) if offset > len(distros): return HttpResponseRedirect('/cobbler_web/distro/list/%d' % num_pages) t = get_template('distro_list.tmpl') html = t.render(Context({'what':'distro', 'distros': distros[offset:ending], 'page': page, 'pages': range(1,num_pages+1), 'next_page':next_page, 'prev_page':prev_page})) return HttpResponse(html) def distro_edit(request, distro_name=None): available_arches = ['i386','x86','x86_64','ppc','ppc64','s390','s390x','ia64'] available_breeds = [['redhat','Red Hat Based'], ['debian','Debian'], ['ubuntu','Ubuntu'], ['suse','SuSE']] distro = None if not distro_name is None: distro = remote.get_distro(distro_name, True, token) distro['ctime'] = time.ctime(distro['ctime']) distro['mtime'] = time.ctime(distro['mtime']) t = get_template('distro_edit.tmpl') html = t.render(Context({'distro': distro, 'available_arches': available_arches, 'available_breeds': available_breeds, "editable":True})) return HttpResponse(html) def distro_save(request): # FIXME: error checking field_list = ('name','comment','kernel','initrd','kopts','kopts','kopts_post','ksmeta','arch','breed','os_version','mgmt_classes','template_files','redhat_management_key','redhat_management_server') new_or_edit = request.POST.get('new_or_edit','new') editmode = request.POST.get('editmode', 'edit') distro_name = request.POST.get('name', request.POST.get('oldname', None)) distro_oldname = request.POST.get('oldname', None) if distro_name == None: return HttpResponse("NO DISTRO NAME SPECIFIED") if new_or_edit == 'new' or editmode == 'copy': distro_id = remote.new_distro(token) else: if editmode == 'edit': distro_id = remote.get_distro_handle(distro_name, token) else: if distro_name == distro_oldname: return HttpResponse("The name was not changed, cannot %s" % editmode) distro_id = remote.get_distro_handle(distro_oldname, token) delete1 = request.POST.get('delete1', None) delete2 = request.POST.get('delete2', None) recursive = request.POST.get('recursive', False) if new_or_edit == 'edit' and delete1 and delete2: remote.remove_distro(distro_name, token, recursive) return HttpResponseRedirect('/cobbler_web/distro/list') else: for field in field_list: value = request.POST.get(field, None) if field == 'name' and editmode == 'rename': continue elif value != None: remote.modify_distro(distro_id, field, value, token) remote.save_distro(distro_id, token, new_or_edit) if editmode == 'rename': remote.rename_distro(distro_id, distro_name, token) return HttpResponseRedirect('/cobbler_web/distro/edit/%s' % distro_name) def profile_list(request, profiles=None, page=None): if profiles is None: profiles = remote.get_profiles(token) if page is None and len(profiles) > 50: return HttpResponseRedirect('/cobbler_web/profile/list/1') try: page = int(page) if page < 1: page = 1 except: page = 1 (num_pages,prev_page,next_page,offset,ending) = __setup__pagination(profiles,page) if offset > len(profiles): return HttpResponseRedirect('/cobbler_web/profile/list/%d' % num_pages) for profile in profiles: if profile["kickstart"]: if profile["kickstart"].startswith("http://") or profile["kickstart"].startswith("ftp://"): profile["web_kickstart"] = profile.kickstart elif profile["kickstart"].startswith("nfs://"): profile["nfs_kickstart"] = profile.kickstart t = get_template('profile_list.tmpl') html = t.render(Context({'what':'profile', 'profiles': profiles[offset:ending], 'page': page, 'pages': range(1,num_pages+1), 'next_page':next_page, 'prev_page':prev_page})) return HttpResponse(html) def profile_edit(request, profile_name=None, subprofile=0): available_virttypes = [['auto','Any'],['xenpv','Xen(pv)'],['xenfv','Xen(fv)'],['qemu','KVM/qemu'],['vmware','VMWare Server'],['vmwarew','VMWare WkStn']] profile = None if not profile_name is None: profile = remote.get_profile(profile_name, True, token) if profile.has_key('ctime'): profile['ctime'] = time.ctime(profile['ctime']) if profile.has_key('mtime'): profile['mtime'] = time.ctime(profile['mtime']) distros = remote.get_distros(token) profiles = remote.get_profiles(token) repos = remote.get_repos(token) t = get_template('profile_edit.tmpl') html = t.render(Context({'profile': profile, 'subprofile': subprofile, 'profiles': profiles, 'distros': distros, 'editable':True, 'available_virttypes': available_virttypes})) return HttpResponse(html) def profile_save(request): # FIXME: error checking field_list = ('name','parent','profile','distro','enable_menu','kickstart','kopts','kopts_post','virt_auto_boot','virt_file_size','virt_ram','ksmeta','template_files','repos','virt_path','virt_type','virt_bridge','virt_cpus','dhcp_tag','server','owners','mgmt_classes','comment','name_servers','name_servers_search','redhat_management_key','redhat_management_server') new_or_edit = request.POST.get('new_or_edit','new') editmode = request.POST.get('editmode', 'edit') profile_name = request.POST.get('name', request.POST.get('oldname', None)) profile_oldname = request.POST.get('oldname', None) if profile_name == None: return HttpResponse("NO PROFILE NAME SPECIFIED") subprofile = int(request.POST.get('subprofile','0')) if new_or_edit == 'new' or editmode == 'copy': if subprofile: profile_id = remote.new_subprofile(token) else: profile_id = remote.new_profile(token) else: if editmode == 'edit': profile_id = remote.get_profile_handle(profile_name, token) else: if profile_name == profile_oldname: return HttpResponse("The name was not changed, cannot %s" % editmode ) profile_id = remote.get_profile_handle(profile_oldname, token) delete1 = request.POST.get('delete1', None) delete2 = request.POST.get('delete2', None) recursive = request.POST.get('recursive', False) if new_or_edit == 'edit' and delete1 and delete2: remote.remove_profile(profile_name, token, recursive) return HttpResponseRedirect('/cobbler_web/profile/list') else: for field in field_list: value = request.POST.get(field, None) if field == "distro" and subprofile: continue elif field == "parent" and not subprofile: continue elif field == "name" and editmode == "rename": continue if value != None: remote.modify_profile(profile_id, field, value, token) remote.save_profile(profile_id, token, new_or_edit) if editmode == "rename": remote.rename_profile(profile_id, profile_name, token) return HttpResponseRedirect('/cobbler_web/profile/edit/%s' % profile_name) def system_list(request, systems=None, page=None): if systems is None: systems = remote.get_systems(token) if page is None and len(systems) > 50: return HttpResponseRedirect('/cobbler_web/system/list/1') try: page = int(page) if page < 1: page = 1 except: page = 1 (num_pages,prev_page,next_page,offset,ending) = __setup__pagination(systems,page) if offset > len(systems): return HttpResponseRedirect('/cobbler_web/system/list/%d' % num_pages) t = get_template('system_list.tmpl') html = t.render(Context({'what':'system', 'systems': systems[offset:ending], 'page': page, 'pages': range(1,num_pages+1), 'next_page':next_page, 'prev_page':prev_page})) return HttpResponse(html) def system_edit(request, system_name=None, editmode="new"): available_virttypes = [['<<inherit>>','<<inherit>>'],['auto','Any'],['xenpv','Xen(pv)'],['xenfv','Xen(fv)'],['qemu','KVM/qemu'],['vmware','VMWare Server'],['vmwarew','VMWare WkStn']] available_power = ['','bullpap','wti','apc_snmp','ether-wake','ipmilan','drac','ipmitool','ilo','rsa','lpar','bladecenter','virsh','integrity'] system = None if not system_name is None: system = remote.get_system(system_name, True, token) system['ctime'] = time.ctime(system['ctime']) system['mtime'] = time.ctime(system['mtime']) distros = remote.get_distros(token) profiles = remote.get_profiles(token) repos = remote.get_repos(token) t = get_template('system_edit.tmpl') html = t.render(Context({'system': system, 'profiles': profiles, 'distros': distros, 'repos': repos, 'editmode': editmode, 'available_virttypes': available_virttypes, 'available_power': available_power, 'editable':True})) return HttpResponse(html) def system_save(request): # FIXME: error checking field_list = ('name','profile','kopts','kopts_post','ksmeta','owners','netboot_enabled','server','virt_file_size','virt_cpus','virt_ram','virt_type','virt_path','virt_auto_boot','comment','power_type','power_user','power_pass','power_id','power_address','name_servers','name_servers_search','gateway','hostname','redhat_management_key','redhat_management_server','mgmt_classes') interface_field_list = ('macaddress','ipaddress','dns_name','static_routes','static','virtbridge','dhcptag','subnet','bonding','bondingopts','bondingmaster','present','original') editmode = request.POST.get('editmode', 'edit') system_name = request.POST.get('name', request.POST.get('oldname', None)) system_oldname = request.POST.get('oldname', None) interfaces = request.POST.get('interface_list', "").split(",") if system_name == None: return HttpResponse("NO SYSTEM NAME SPECIFIED") if editmode == 'copy': system_id = remote.new_system(token) else: if editmode == 'edit': system_id = remote.get_system_handle(system_name, token) else: if system_name == system_oldname: return HttpResponse("The name was not changed, cannot %s" % editmode) system_id = remote.get_system_handle(system_oldname, token) delete1 = request.POST.get('delete1', None) delete2 = request.POST.get('delete2', None) if delete1 and delete2: remote.remove_system(system_name, token, recursive) return HttpResponseRedirect('/cobbler_web/system/list') else: for field in field_list: value = request.POST.get(field, None) if field == 'name' and editmode == 'rename': continue elif value != None: remote.modify_system(system_id, field, value, token) for interface in interfaces: ifdata = {} for item in interface_field_list: ifdata["%s-%s" % (item,interface)] = request.POST.get("%s-%s" % (item,interface), "") if ifdata['present-%s' % interface] == "0" and ifdata['original-%s' % interface] == "1": remote.modify_system(system_id, 'delete_interface', interface, token) elif ifdata['present-%s' % interface] == "1": remote.modify_system(system_id, 'modify_interface', ifdata, token) remote.save_system(system_id, token, editmode) if editmode == 'rename': remote.rename_system(system_id, system_name, token) return HttpResponseRedirect('/cobbler_web/system/edit/%s' % system_name) def system_rename(request, system_name=None, system_newname=None): if system_name == None: return HttpResponse("You must specify a system to rename") elif system_newname == None: t = get_template('system_rename.tmpl') html = t.render(Context({'system':system_name})) return HttpResponse(html) else: system_id = remote.get_system_handle(system_name, token) remote.rename_system(system_id, system_newname, token) return HttpResponseRedirect("/cobbler_web/system/list") def system_multi(request, multi_mode=None): items = request.POST.getlist('items') all_systems = remote.get_systems(token) sel_systems = [] sel_names = [] for system in all_systems: if system['name'] in items: sel_systems.append(system) sel_names.append(system['name']) profiles = [] if multi_mode == "profile": profiles = remote.get_profiles(token) t = get_template('system_%s.tmpl' % multi_mode) html = t.render(Context({'systems':sel_systems, 'profiles':profiles, 'items':sel_names})) return HttpResponse(html) def system_domulti(request, multi_mode=None): items = request.POST.get('items', '').split(" ") netboot_enabled = request.POST.get('netboot_enabled', None) profile = request.POST.get('profile', None) power = request.POST.get('power', None) for system_name in items: system_id = remote.get_system_handle(system_name, token) if multi_mode == "delete": remote.remove_system(system_name, token) elif multi_mode == "netboot": if netboot_enabled is None: raise "Cannot modify systems without specifying netboot_enabled" remote.modify_system(system_id, "netboot_enabled", netboot_enabled, token) remote.save_system(system_id, token, "edit") elif multi_mode == "profile": if profile is None: raise "Cannot modify systems without specifying profile" remote.modify_system(system_id, "profile", profile, token) remote.save_system(system_id, token, "edit") elif multi_mode == "power": if power is None: raise "Cannot modify systems without specifying power option" try: remote.power_system(system_id, power, token) except: # TODO: something besides ignore. We should probably # print out an error message at the top of whatever # page we go to next, whether it's the system list # or a results page pass else: raise "Unknowm multiple operation on systems: %s" % str(multi_mode) return HttpResponseRedirect("/cobbler_web/system/list") def repo_list(request, repos=None, page=None): if repos is None: repos = remote.get_repos(token) if page is None and len(repos) > 50: return HttpResponseRedirect('/cobbler_web/repo/list/1') try: page = int(page) if page < 1: page = 1 except: page = 1 (num_pages,prev_page,next_page,offset,ending) = __setup__pagination(repos,page) if offset > len(repos): return HttpResponseRedirect('/cobbler_web/repo/list/%d' % num_pages) t = get_template('repo_list.tmpl') html = t.render(Context({'what':'repo', 'repos': repos[offset:ending], 'page': page, 'pages': range(1,num_pages+1), 'next_page':next_page, 'prev_page':prev_page})) return HttpResponse(html) def repo_edit(request, repo_name=None): available_arches = ['i386','x86','x86_64','ppc','ppc64','s390','s390x','ia64','noarch','src'] repo = None if not repo_name is None: repo = remote.get_repo(repo_name, True, token) repo['ctime'] = time.ctime(repo['ctime']) repo['mtime'] = time.ctime(repo['mtime']) t = get_template('repo_edit.tmpl') html = t.render(Context({'repo': repo, 'available_arches': available_arches, "editable":True})) return HttpResponse(html) def repo_save(request): # FIXME: error checking field_list = ('name','mirror','keep_updated','priority','mirror_locally','rpm_list','createrepo_flags','arch','yumopts','environment','owners','comment') editmode = request.POST.get('editmode', 'edit') repo_name = request.POST.get('name', request.POST.get('oldname', None)) repo_oldname = request.POST.get('oldname', None) if repo_name == None: return HttpResponse("NO SYSTEM NAME SPECIFIED") if editmode == 'copy': repo_id = remote.new_repo(token) else: if editmode == 'edit': repo_id = remote.get_repo_handle(repo_name, token) else: if repo_name == repo_oldname: return HttpResponse("The name was not changed, cannot %s" % editmode) repo_id = remote.get_repo_handle(repo_oldname, token) delete1 = request.POST.get('delete1', None) delete2 = request.POST.get('delete2', None) if delete1 and delete2: remote.remove_repo(repo_name, token) return HttpResponseRedirect('/cobbler_web/repo/list') else: for field in field_list: value = request.POST.get(field, None) if field == 'name' and editmode == 'rename': continue elif field in ('keep_updated','mirror_locally'): if field in request.POST: remote.modify_repo(repo_id, field, "1", token) else: remote.modify_repo(repo_id, field, "0", token) elif value != None: remote.modify_repo(repo_id, field, value, token) remote.save_repo(repo_id, token, editmode) if editmode == 'rename': remote.rename_repo(repo_id, repo_name, token) return HttpResponseRedirect('/cobbler_web/repo/edit/%s' % repo_name) def image_list(request, images=None, page=None): if images is None: images = remote.get_images(token) if page is None and len(images) > 50: return HttpResponseRedirect('/cobbler_web/image/list/1') try: page = int(page) if page < 1: page = 1 except: page = 1 (num_pages,prev_page,next_page,offset,ending) = __setup__pagination(images,page) if offset > len(images): return HttpResponseRedirect('/cobbler_web/image/list/%d' % num_pages) t = get_template('image_list.tmpl') html = t.render(Context({'what':'image', 'images': images[offset:ending], 'page': page, 'pages': range(1,num_pages+1), 'next_page':next_page, 'prev_page':prev_page})) return HttpResponse(html) def image_edit(request, image_name=None): available_arches = ['i386','x86_64'] available_breeds = [['redhat','Red Hat Based'], ['debian','Debian'], ['ubuntu','Ubuntu'], ['suse','SuSE']] available_virttypes = [['auto','Any'],['xenpv','Xen(pv)'],['xenfv','Xen(fv)'],['qemu','KVM/qemu'],['vmware','VMWare Server'],['vmwarew','VMWare WkStn']] available_imagetypes = ['direct','iso','memdisk','virt-clone'] image = None if not image_name is None: image = remote.get_image(image_name, True, token) image['ctime'] = time.ctime(image['ctime']) image['mtime'] = time.ctime(image['mtime']) t = get_template('image_edit.tmpl') html = t.render(Context({'image': image, 'available_arches': available_arches, 'available_breeds': available_breeds, 'available_virttypes': available_virttypes, 'available_imagetypes': available_imagetypes, "editable":True})) return HttpResponse(html) def image_save(request): # FIXME: error checking field_list = ('name','image_type','breed','os_version','arch','file','owners','virt_cpus','network_count','virt_file_size','virt_path','virt_bridge','virt_ram','virt_type','virt_auto_boot','comment') editmode = request.POST.get('editmode', 'edit') image_name = request.POST.get('name', request.POST.get('oldname', None)) image_oldname = request.POST.get('oldname', None) if image_name == None: return HttpResponse("NO SYSTEM NAME SPECIFIED") if editmode == 'copy': image_id = remote.new_image(token) else: if editmode == 'edit': image_id = remote.get_image_handle(image_name, token) else: if image_name == image_oldname: return HttpResponse("The name was not changed, cannot %s" % editmode) image_id = remote.get_image_handle(image_oldname, token) delete1 = request.POST.get('delete1', None) delete2 = request.POST.get('delete2', None) recursive = request.POST.get('recursive', False) if delete1 and delete2: remote.remove_image(image_name, token, recursive) return HttpResponseRedirect('/cobbler_web/image/list') else: for field in field_list: value = request.POST.get(field, None) if field == 'name' and editmode == 'rename': continue elif value != None: remote.modify_image(image_id, field, value, token) remote.save_image(image_id, token, editmode) if editmode == 'rename': remote.rename_image(image_id, image_name, token) return HttpResponseRedirect('/cobbler_web/image/edit/%s' % image_name) def ksfile_list(request, page=None): ksfiles = remote.get_kickstart_templates(token) if page is None and len(ksfiles) > 50: return HttpResponseRedirect('/cobbler_web/ksfiles/list/1') try: page = int(page) if page < 1: page = 1 except: page = 1 (num_pages,prev_page,next_page,offset,ending) = __setup__pagination(ksfiles,page) if offset > len(ksfiles): return HttpResponseRedirect('/cobbler_web/ksfiles/list/%d' % num_pages) ksfile_list = [] for ksfile in ksfiles: if ksfile.startswith("/var/lib/cobbler/kickstarts") or ksfile.startswith("/etc/cobbler"): ksfile_list.append((ksfile,'editable')) elif ksfile["kickstart"].startswith("http://") or ksfile["kickstart"].startswith("ftp://"): ksfile_list.append((ksfile,'viewable')) else: ksfile_list.append((ksfile,None)) t = get_template('ksfile_list.tmpl') html = t.render(Context({'what':'ksfile', 'ksfiles': ksfile_list[offset:ending], 'page': page, 'pages': range(1,num_pages+1), 'next_page':next_page, 'prev_page':prev_page})) return HttpResponse(html) def ksfile_edit(request, ksfile_name=None, editmode='edit'): if editmode == 'edit': editable = False else: editable = True deleteable = False ksdata = "" if not ksfile_name is None: editable = remote.check_access_no_fail(token, "modify_kickstart", ksfile_name) deleteable = remote.is_kickstart_in_use(ksfile_name, token) ksdata = remote.read_or_write_kickstart_template(ksfile_name, True, "", token) t = get_template('ksfile_edit.tmpl') html = t.render(Context({'ksfile_name':ksfile_name, 'deleteable':deleteable, 'ksdata':ksdata, 'editable':editable, 'editmode':editmode})) return HttpResponse(html) def ksfile_save(request): # FIXME: error checking editmode = request.POST.get('editmode', 'edit') ksfile_name = request.POST.get('ksfile_name', None) ksdata = request.POST.get('ksdata', "") if ksfile_name == None: return HttpResponse("NO KSFILE NAME SPECIFIED") if editmode != 'edit': ksfile_name = "/var/lib/cobbler/kickstarts/" + ksfile_name delete1 = request.POST.get('delete1', None) delete2 = request.POST.get('delete2', None) if delete1 and delete2: remote.read_or_write_kickstart_template(ksfile_name, False, -1, token) return HttpResponseRedirect('/cobbler_web/ksfile/list') else: remote.read_or_write_kickstart_template(ksfile_name,False,ksdata,token) return HttpResponseRedirect('/cobbler_web/ksfile/edit/%s' % ksfile_name) def random_mac(request, virttype="xenpv"): random_mac = remote.get_random_mac(virttype, token) return HttpResponse(random_mac) def dosync(request): remote.sync(token) return HttpResponseRedirect("/cobbler_web/")
Leading the Greek IT market for more than 35 years, Info Quest Technologies, member of Quest Group, is among the major gates for the dissemination of new technologies in the Greek Region. Through its cooperation with all major international ICT vendors, consistent strategy with an emphasis on innovation, continuous investment in technical expertise and customer-centric philosophy, the company is uniquely focused on the utilization of new technologies, to create value, competitive advantage and business success for each customer. Info Quest Technologies collaborates strategically with more than 100 leading vendors, IBM, Microsoft HP, Apple, Dell, Lenovo, Intel, Cisco, Symantec and EMC being among them, enabling customers to access the entire range of advanced products, services and solutions. Since 2013 it has invested in the provision of cloud solutions while recently it has expanded its portfolio by providing solutions and services at the areas of e-commerce and business intelligence. Through Quest on Cloud, Info Quest Technologies aims to give Value Added Resellers and Enterprises, a comprehensive, thus simple and efficient way, to configure, buy, manage and utilize best of class cloud solutions, acting as a one-stop-shop for their needs. Quest on Cloud help Customers to integrate IaaS, PaaS and SaaS solutions in their Organizations, easily, efficiently and effectively. Quest on Cloud help Solution Providers to reach a wide range of customers through an innovative and advanced platform.
# Copyright (C) 2015-2021 by the RBniCS authors # # This file is part of RBniCS. # # SPDX-License-Identifier: LGPL-3.0-or-later from rbnics.problems.base import NonlinearTimeDependentProblem from rbnics.problems.nonlinear_elliptic import NonlinearEllipticProblem from rbnics.backends import product, sum NonlinearParabolicProblem_Base = NonlinearTimeDependentProblem(NonlinearEllipticProblem) class NonlinearParabolicProblem(NonlinearParabolicProblem_Base): # Default initialization of members def __init__(self, V, **kwargs): # Call to parent NonlinearParabolicProblem_Base.__init__(self, V, **kwargs) # Form names for parabolic problems self.terms.append("m") self.terms_order.update({"m": 2}) class ProblemSolver(NonlinearParabolicProblem_Base.ProblemSolver): def residual_eval(self, t, solution, solution_dot): problem = self.problem assembled_operator = dict() assembled_operator["m"] = sum(product(problem.compute_theta("m"), problem.operator["m"])) assembled_operator["a"] = sum(product(problem.compute_theta("a"), problem.operator["a"])) assembled_operator["c"] = sum(product(problem.compute_theta("c"), problem.operator["c"])) assembled_operator["f"] = sum(product(problem.compute_theta("f"), problem.operator["f"])) return (assembled_operator["m"] * solution_dot + assembled_operator["a"] * solution + assembled_operator["c"] - assembled_operator["f"]) def jacobian_eval(self, t, solution, solution_dot, solution_dot_coefficient): problem = self.problem assembled_operator = dict() assembled_operator["m"] = sum(product(problem.compute_theta("m"), problem.operator["m"])) assembled_operator["a"] = sum(product(problem.compute_theta("a"), problem.operator["a"])) assembled_operator["dc"] = sum(product(problem.compute_theta("dc"), problem.operator["dc"])) return (assembled_operator["m"] * solution_dot_coefficient + assembled_operator["a"] + assembled_operator["dc"])
Founded in April 1995, The Khaitan School (formerly, Khaitan Public School, Noida) is sprawled over more than six acres of a lush green campus, an idyllic setting for its faculty and students to bond, converse and share the joys of learning. Its vibrant ambiance, picturesque surroundings and state-of-the-art infrastructure provide a stimulating environment for the all-round growth and development of our children. Go Forward. I must, I can. Today’s readers are tomorrow’s leaders. It’s not how good you are. It’s how good you want to be. Honesty, spirituality, commitment, vitality, perseverance and excellence are few values that we inculcate into each and every student. For us, education goes way beyond academic excellence. We encourage our students to gather knowledge, pursue their goals and aspirations and at the same time nurture their creativity and develop their individual faculties. We are firmly committed to embracing students from diverse backgrounds, cultures and religions. The School Morning Assembly is the most important part of the school curriculum and nurtures positive school ethos and a sense of school community. It is conducted by the House on duty and presided over by the Principal/Headmistress. Prayers, national songs, news events, Thought for the Day and a talk by a teacher are highlights of the assembly. It also highlights the agenda and activities of the four Houses, namely Aravalli, Meghalaya, Nilgiri and Shiwalik. Special assemblies are organized at Pre-primary, Primary and Senior levels to celebrate special days and festivals. The school curriculum aims at instilling national pride and secular beliefs amongst students through such celebrations and activities like Holi, Guru Purab, Dusssehra, Diwali, Id, Environment Day, Children’s Day, Teachers Day, Sadbhavana Diwas, National Integration Day. The Investiture Ceremony is held every year to induct the Students Council and House Prefects. The members of the council led by a Head Boy and a Head Girl (Senior & Primary) are responsible for maintaining discipline, organising and supervising a multitude of activities including sports and co-curricular and ensuring the smooth conduct of school functions and ceremonies. The Head Boy and Head Girl are bestowed with crystal mementoes. The Captains and Vice Captains of the four Houses – Aravalli, Meghalaya, Nilgiri and Shivalik are responsible for organizing smooth conduct of Inter-House competitions. These programnes are graced by eminent personalities and dignitaries. We also celebrate birthdays of our pre-primary school children every month. Parents’ Day and Grand Parents’ Day are fun filled events and days of joy for the families of our children: Grand Parent’s Day, gives the children an opportunity to respect and bond closely with the elders of the family, a virute we find declining in today’s fast paced world. We take pride in acknowledging the academic excellence attained by meritorious students. A student attaining examination results above 80%, qualifies as a Scholar Badge Holder. Those who are awarded three Scholar Badges consecutively, are awarded a Scholar Blazer at a formal Awards Ceremony wherein distinguished guests are invited to present badges, blazers and certificates. Orientation Programmes for parents (class and age applicable) are regularly held to update and orient them with the school curriculum, pedagogy, facilities offered, subject options, career options suitable for science, commerce and humanities stream. Orientation Programmes are conducted by an expert panel (as applicable) comprising of School Principal, Headmistress, School Counsellors, Visiting Counsellors, Pediatrician and Nutritionist.
# -*- coding: utf-8 -*- from enum import Enum from slackclient import SlackClient from torimotsu import settings class SendError(Exception): def __init__(self, message): self.message = message class MealTimeEmoji(Enum): fork_and_knife = 1 doughnut = 2 ramen = 3 ice_cream = 4 sake = 5 cookie = 6 class Notifier(SlackClient): def __init__(self, log): super().__init__(token=settings.slack.token) self.log = log self.channel = settings.slack.channel def post_foods(self): lines = ['{}のたべものきろく。'.format(self.log.yesterday.strftime('%Y-%m-%d'))] food_log = self.log.fetch_foods() for (food_time, foods) in food_log.foods.items(): sum_calories = sum((food['loggedFood']['calories'] for food in foods)) lines.append(':{}: *{}* {}㌔㌍'.format(MealTimeEmoji(food_time.value).name, food_time.name, sum_calories)) lines.append('```') for food in foods: lines.append('{calories:>4}㌔㌍ {name:25}: {amount}{unit_}'.format( unit_=food['loggedFood']['unit']['plural'], **food['loggedFood'])) lines.append('```') lines.append('') lines.append(':yum: *{}* ㌔㌍摂取したよ。'.format(food_log.summary['calories'])) if food_log.goals: lines.append(':yum: *{}* ㌔㌍が上限目標だよ。'.format(food_log.goals['calories'])) diff = food_log.diff if diff > 0: lines.append(':innocent: *{}* ㌔㌍セーブしたよ。やったね。'.format(diff)) else: lines.append(':imp: *{}* ㌔㌍余分にたべてしまいました。罪深い。'.format(diff * -1)) else: lines.append('目標㌍は取得できなかった。FitbitAPIバグっているのでは???') self.send_slack('\n'.join(lines)) def send_slack(self, text): response = self.api_call('chat.postMessage', channel=self.channel, text=text) if not response['ok']: raise SendError(response['message'])
Electric cars and vans will play a key role in the future of transport. As well as reducing CO2 emissions and local air pollution, they are extremely cost effective to run. We offer great cost effective solutions for individuals and businesses in Gloucestershire and surrounding areas. We can help with outright purchase and lease options. For those businesses with fleets, even if EVs aren’t for you right now, our fleet management software and service can significantly reduce the cost of running and managing all aspects of vehicles, plant & machinery and drivers, and will help you stay compliant. Tracking and telematics can help you to benchmark your drivers so your whole team can learn and benefit from fuel efficient driving. We can retrofit dashcams which demonstrate a duty of care and provide increased security for drivers. Electric vehicles are good PR for your business – if used well you could get increased sales and improved staff retention (see profit calculator), and demonstrates good corporate and social responsibility.
# lidar.py # Code to control the touchscreen user interface subsystem # Fully networked and touch enabled - with easy manipulation of generated maps # Author: Matthew Timmons-Brown # Import necessary libraries for control of different aspects import socket import math import time import sys import subprocess import threading import random # Import Kivy elements and tools that will be used for the user interface from kivy.app import App from kivy.uix.gridlayout import GridLayout from kivy.core.window import Window from kivy.lang import Builder from kivy.uix.screenmanager import ScreenManager, Screen # Import image manipulation tools from PIL import Image, ImageDraw # Import library that I have created to make communication and control easier sys.path.insert(0, "/home/pi/lidar/pi_approach/Libraries") import serverxclient as serv powerdown = ["sudo", "shutdown", "now"] server = serv.Server() # Set the distance and stepper connection to false (as have not connected) distance = False stepper = False # Initialise distance and stepper connections, but IP addresses so far unknown distance_connection = 0 stepper_connection = 0 # Set accuracy limit for sensor, any value above it will be rejected (mm) accuracy_limit = 4000 class Communication(threading.Thread): """A communication thread that connects to other subsystems in the background""" # Run method - automatically run when thread is started # Constantly waits for other two subsystems to come online, then changes to the main application page def run(self): self.setup() # While either of the subsystems are not connected while (distance == False) or (stepper == False): (connection, address) = self.awaiting_socket() print (connection, address) self.test_socket(connection) # Wait 2 seconds, then change to main screen time.sleep(2) application.current = "main" # Setup method # Sets up a server for subsystems to connect to def setup(self): server.setup_server() print "SUCCESS ON BIND" # Awaiting socket method # Waits for an incoming socket and then returns that socket's connection and address details def awaiting_socket(self): print "AWAITING" (connection, address) = server.socket_reception() return (connection, address) # Test socket # Identifies which subsystem the incoming connection is and changes global variables to indicate correct pairing def test_socket(self, connection): # Demands verification from subsystem server.send_data(connection,"VERIFY?") data_back = server.receive_data(connection) # If data_back is either subsystem, then change the Init screen labels from NO to OK! if data_back == "DISTANCE!": # set distance to OK application.current_screen.distance_on() # Update global variables with connection details global distance, distance_connection distance = True distance_connection = connection if data_back == "STEPPER!": # set stepper to OK application.current_screen.stepper_on() # Update global variables with connection details global stepper, stepper_connection stepper = True stepper_connection = connection print "Finished testing socket" class InitScreen(Screen): """A class to define the behaviour of the InitScreen""" # Power off method # If shutdown switch is toggled, turn off device def power_off(self, *args): # Connection to Kivy element through the use of labels onoffswitch = self.ids["onoffswitch"] onoff_value = onoffswitch.active # If the switch is false, turn the system off if onoff_value == False: subprocess.call(powerdown) # Distance ON! method # Changes the "NO" distance label to "OK!" when called def distance_on(self, *args): distance_label = self.ids["distance_label"] distance_label.text = "[size=40]Distance:[/size]\n\n[size=60][color=008000]OK[/color][/size]" # (Markup text) # Stepper ON! method # Changes the "NO" stepper label to "OK!" when called def stepper_on(self, *args): stepper_label = self.ids["stepper_label"] stepper_label.text = "[size=40]Stepper:[/size]\n\n[size=60][color=008000]OK[/color][/size]" # (Markup text) class MainScreen(Screen): """A class to define the behaviour of the MainScreen""" # Current stepper motor angle angle = 0 # Power off method # If shutdown switch is toggled, turn off other subsystems and shut down this device def power_off(self, *args): onoffswitch = self.ids["onoffswitch2"] onoff_value = onoffswitch.active if onoff_value == False: # Send commands to other subsystems and then shut down server.send_data(distance_connection, "POWER-OFF") server.send_data(stepper_connection, "POWER-OFF") subprocess.call(powerdown) # Change value method # When the slider is changed, adapt the value label to reflect its value def change_value(self, *args): value_slider = self.ids["value_slider"] self.angle = int(value_slider.value) value_label = self.ids["value_label"] # Change label to slider's current value value_label.text = "[size=10]" + str(self.angle) + "[/size]" # Scan method # Called when the SCAN button is pressed # Collects data from distance subsytem and stepper motor subsystem # Outputs map to the user def scan(self, *args): enable_lidar = self.ids["enable_lidar"] # If the lidar button is actually enabled, then proceed with the scan if enable_lidar.state == "down": print "Now contacting and getting data" # Create arrays for the distances and angle that they were recorded at distances = [] positions = [] # Create angle copy to reset when process has finished angle_copy = self.angle # For loop to discard the first 20 readings from the distance sensor # Sensor is cheap and found that the first 20 odd values are not usually consistent - so discard them for i in range(0,20): server.send_data(distance_connection, "FIRE") discarded_response = server.receive_data(distance_connection) time.sleep(0.1) # While there is still an angle left to scan, do: while self.angle+1.8 > 0: # Demand distance from distance subsystem server.send_data(distance_connection, "FIRE") distance_response = server.receive_data(distance_connection) # While the distance is greater than the accuracy limit, and the attempts are less than 3, try again # in the hope to get better data. tries = 0 while (float(distance_response[:-2]) > accuracy_limit) and (tries < 3): server.send_data(distance_connection, "FIRE") distance_response = server.receive_data(distance_connection) tries += 1 # Demand current position of stepper motor, and then rotate by 1 step for the next distance server.send_data(stepper_connection, "REPORT-ROTATE") stepper_position = server.receive_data(stepper_connection) # Convert the values into floats and remove unnecessary elements of communication point_distance = float(distance_response[:-2]) point_position = float(stepper_position) print (point_position, point_distance) # If distance is within the accuracy_limit, store and record distance # Otherwise distance is not recorded. This is to prevent outliers if point_distance <= accuracy_limit: distances.append(point_distance) positions.append(point_position) # -1.8 from angle as scan complete self.angle -= 1.8 # Reset current angle self.angle = angle_copy # Draw map with the distances and position data that has been gathered source = self.draw_map(distances, positions) # Display the outputted PNG image to the user for manipulation and viewing output_image = self.ids["output_image"] output_image.source = source else: print "Nothing enabled" # Draw map method # Main map drawing algorithm - creates image from supplied distances and position data and returns path to that image def draw_map(self, distance_array, angle_array): # Dimensions for the image dimensions = (700,380) points = len(distance_array)-1 centre_x = dimensions[0]/2 centre_y = dimensions[1]/2 # Create a scaling factor for the end image to ensure points are within the allocated space scaler = (centre_x+accuracy_limit)/dimensions[0] # Open a new image with the dimensions previous map = Image.new("RGBA", dimensions) # Set image up for drawing draw = ImageDraw.Draw(map) # Draw a point in the centre of the image to represent where the scanner is draw.point((centre_x, centre_y), (1,1,1)) # For all the pieces of data, do: for i in range(0, points): # Use trigonometry to calculate the position of the point to plot on map sine_distance = (math.sin(math.radians(angle_array[i]))*(distance_array[i])) cosi_distance = (math.cos(math.radians(angle_array[i]))*(distance_array[i])) length_x = cosi_distance length_y = sine_distance # Divide by scaling factor to keep within the dimensions of the image length_x = length_x/scaler length_y = length_y/scaler # Create set of coordinates to plot coord_x = centre_x + length_x coord_y = centre_y + length_y coords = (coord_x, coord_y) print coords # Draw coordinates on map draw.point(coords, (1,1,1)) # Create a new image path and return it path = "/home/pi/lidar/pi_approach/UI/scans/" + str(random.randint(0,1000)) + ".png" map.save(path, "PNG") return path class ScreenManagement(ScreenManager): """Screen Manager - does behind-the-scenes screen management for transition between Init and Main screen""" pass # Load up Kivy file that defines how the UI looks application = Builder.load_file("main.kv") class LidarApp(App): """Build actual application and return it""" def build(self): return application # If run, start communication thread and run the application if __name__ == "__main__": checker = Communication() checker.daemon = True checker.start() LidarApp().run()
The management buyout team aiming to take over Birmingham-based van maker LDV has said it believes the firm has "a viable long-term future". Erik Eberhardson, head of the buyout team, made the comments following a "long and constructive meeting" with Business Minister Ian Pearson. His statement comes a day after the government said it would not meet the firm's request for an urgent £30m loan. Mr Eberhardson said efforts to secure the necessary funds would continue. LDV has suspended production since December because of falling sales, and has admitted it has not made a profit for four years. The firm is currently owned by Russian company Gaz, of which Mr Eberhardson is the outgoing chairman. LDV currently employs 850 people. Gaz is controlled by oligarch Oleg Deripaska, a friend of Business Secretary Lord Mandelson. The management buy-out team aims to make LDV the first big producer of electric vans in the UK.
from pallets_sphinx_themes import get_version from pallets_sphinx_themes import ProjectLink # Project -------------------------------------------------------------- project = "Jinja" copyright = "2007 Pallets" author = "Pallets" release, version = get_version("Jinja2") # General -------------------------------------------------------------- master_doc = "index" extensions = [ "sphinx.ext.autodoc", "sphinx.ext.intersphinx", "pallets_sphinx_themes", "sphinxcontrib.log_cabinet", "sphinx_issues", ] intersphinx_mapping = {"python": ("https://docs.python.org/3/", None)} issues_github_path = "pallets/jinja" # HTML ----------------------------------------------------------------- html_theme = "jinja" html_theme_options = {"index_sidebar_logo": False} html_context = { "project_links": [ ProjectLink("Donate to Pallets", "https://palletsprojects.com/donate"), ProjectLink("Jinja Website", "https://palletsprojects.com/p/jinja/"), ProjectLink("PyPI releases", "https://pypi.org/project/Jinja2/"), ProjectLink("Source Code", "https://github.com/pallets/jinja/"), ProjectLink("Issue Tracker", "https://github.com/pallets/jinja/issues/"), ] } html_sidebars = { "index": ["project.html", "localtoc.html", "searchbox.html"], "**": ["localtoc.html", "relations.html", "searchbox.html"], } singlehtml_sidebars = {"index": ["project.html", "localtoc.html"]} html_static_path = ["_static"] html_favicon = "_static/jinja-logo-sidebar.png" html_logo = "_static/jinja-logo-sidebar.png" html_title = f"Jinja Documentation ({version})" html_show_sourcelink = False # LaTeX ---------------------------------------------------------------- latex_documents = [(master_doc, f"Jinja-{version}.tex", html_title, author, "manual")]
Amazon Careers | Vacancy Openings for Amazon Warehouse Jobs 2018: Amazon is an organization of pioneers. We must make intense wagers, and we get our vitality from concocting for clients. Achievement is estimated against the conceivable, not the plausible. For the present pioneers, that is precisely why there’s no place on Earth they’d preferably work than Amazon. At the point when Amazon.com propelled in 1995, it was with the mission “to be Earth’s most client-driven organization, where clients can discover and find anything they should need to purchase on the web, and attempts to offer its clients the least conceivable costs.” This objective proceeds with today, however, Amazon’s clients are overall now and have developed to incorporate a huge number of Consumers, Sellers, Content Creators, and Developers and Enterprises. Every one of these gatherings has diverse necessities, and we generally work to address those issues, developing new answers for making things simpler, speedier, better, and more financially savvy. Amazon Careers and Jobs Vacancies 2018: The Amazon Careers Choice Program is an innovative career development program offered to hourly representatives. The program pre-pays 95% of the cost of educational cost, course books, and related charges so the worker can center around their examinations and not the cost. Career Choice enables representatives to create sought-after aptitudes for callings without bounds, including restorative experts, paralegals, mechanical technology engineers and sun-powered board establishment experts.
import os import virtool.api.utils import virtool.http.routes import virtool.jobs.db import virtool.resources import virtool.users.db import virtool.utils from virtool.api.response import conflict, json_response, no_content, not_found routes = virtool.http.routes.Routes() @routes.get("/api/jobs") async def find(req): """ Return a list of job documents. """ db = req.app["db"] term = req.query.get("find") db_query = dict() if term: db_query.update(virtool.api.utils.compose_regex_query(term, ["task", "user.id"])) data = await virtool.api.utils.paginate( db.jobs, db_query, req.query, projection=virtool.jobs.db.PROJECTION ) data["documents"].sort(key=lambda d: d["created_at"]) return json_response(data) @routes.get("/api/jobs/{job_id}") async def get(req): """ Return the complete document for a given job. """ job_id = req.match_info["job_id"] document = await req.app["db"].jobs.find_one(job_id) if not document: return not_found() return json_response(virtool.utils.base_processor(document)) @routes.put("/api/jobs/{job_id}/cancel", permission="cancel_job") async def cancel(req): """ Cancel a job. """ db = req.app["db"] job_id = req.match_info["job_id"] document = await db.jobs.find_one(job_id, ["status"]) if not document: return not_found() if not virtool.jobs.is_running_or_waiting(document): return conflict("Not cancellable") await req.app["jobs"].cancel(job_id) document = await db.jobs.find_one(job_id) return json_response(virtool.utils.base_processor(document)) @routes.delete("/api/jobs", permission="remove_job") async def clear(req): db = req.app["db"] job_filter = req.query.get("filter") # Remove jobs that completed successfully. complete = job_filter in [None, "finished", "complete"] # Remove jobs that errored or were cancelled. failed = job_filter in [None, "finished", "failed"] removed = await virtool.jobs.db.clear(db, complete=complete, failed=failed) return json_response({ "removed": removed }) @routes.delete("/api/jobs/{job_id}", permission="remove_job") async def remove(req): """ Remove a job. """ db = req.app["db"] job_id = req.match_info["job_id"] document = await db.jobs.find_one(job_id) if not document: return not_found() if virtool.jobs.is_running_or_waiting(document): return conflict("Job is running or waiting and cannot be removed") # Removed the documents associated with the job ids from the database. await db.jobs.delete_one({"_id": job_id}) try: # Calculate the log path and remove the log file. If it exists, return True. path = os.path.join(req.app["settings"]["data_path"], "logs", "jobs", job_id + ".log") await req.app["run_in_thread"](virtool.utils.rm, path) except OSError: pass return no_content() @routes.get("/api/resources") async def get_resources(req): """ Get a object describing compute resource usage on the server. """ resources = virtool.resources.get() req.app["resources"].update(resources) return json_response(resources)
The Los Angeles Police Department is making steady progress toward reform, but is struggling to oversee its anti-gang units and has failed to fully support its top ethics official, a report released Monday said. In the same report that criticized the process for review of shootings by police officers, federal monitor Michael Cherkasky praised LAPD management for making "swift and meaningful" improvements in other areas. It was a regular quarterly report from Cherkasky, who was named to oversee LAPD reforms by a judge after the department pledged to improve under pressure from the U.S. Justice Department. In his 110-page report to U.S. District Judge Gary A. Feess, Cherkasky expressed frustration, describing audits of the LAPD's gang unit as "in an abysmal state." "Some have not yet been completed since the consent decree was first implemented more than two years ago, and those that have been completed are well below an acceptable standard," Cherkasky wrote. The review for the quarter that ended Sept. 30 also said "resource constraints" in the inspector general's office were threatening its ability to oversee the LAPD. According to the report, the monitor reviewed 90 out of 1,883 investigations of misconduct complaints undertaken from January through June of this year. In a majority of cases in the sample, Cherkasky found that the LAPD failed to perform up to its promises. The monitor's report is the ninth such review since the city entered into a settlement in 2001 after the Justice Department, following the Rampart corruption scandal, concluded that the LAPD had engaged in a years-long "pattern or practice of civil rights violations." It was released as Chief William J. Bratton announced changes at the top of his command staff, in part to keep up momentum toward reform. The department must be in "substantial compliance" with the consent decree for two years before a June 2006 deadline or continue to be under the watchful eye of the federal monitor. Cherkasky was not available for comment Monday; nor was Gerald Chaleff, the LAPD executive in charge of the reforms. Chaleff has expressed confidence that the department can meet the compliance goal required to fulfill the obligations under the consent decree, noting that the quarterly reports highlight issues that are months old and are being addressed by the department when reports are issued.
import argparse import codecs import logging import os import pickle from pkg_resources import ( resource_string, yield_lines, ) import sys import data from rendering import CardRenderer from translation import ( DictionaryParser, NoTypeIndicatorException, ParseException, Translator, UnknownVariantTypeException, VariantParseException, WordNotFoundException, ) def parse_dictionary(dictionary_file): """ Create a data structure from the dictionary. """ entries = dict() dict_parser = DictionaryParser() with codecs.open(dictionary_file, 'r', encoding='utf-8') as lines: for line in lines: if line.startswith('#'): continue try: word, translation = dict_parser.parse_line(line) if not translation: logger.error(u"Couldn't find translation for '%s'" % line) continue entry_key = unicode(word) if not entries.get(entry_key): entries[entry_key] = word, translation else: logger.info("Skipping duplicate entry for '%s'." % entry_key) except ParseException, e: logger.warn(u"Parse error: '%s'" % e) except NoTypeIndicatorException, e: logger.warn(u"Couldn't figure out word type for line: '%s'" % e) except VariantParseException, e: logger.warn(u"Couldn't parse some variants: '%s'" % e) except UnknownVariantTypeException, e: logger.warn(u"Not sure what a '%s' is." % e) return entries def create_dictionary_pickle(): logger.info("Creating dictionary pickle.") d = parse_dictionary('flashcardgenerator/data/de-en.txt') with open('dictionary.pkl', 'w') as f: pickle.dump(d, f) def translate(word, translator): try: return translator.lookup(word) except WordNotFoundException: logging.warn("Couldn't find translation for '%s'." % word) raise if __name__ == '__main__': logger = logging.getLogger() handler = logging.FileHandler('flash-card-generator.log') formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s') handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(logging.WARNING) parser = argparse.ArgumentParser('Generate flash cards and lookup German to English translations.') parser.add_argument('--word-file', type=str) parser.add_argument('--lookup', type=str) args = parser.parse_args() if not os.path.exists('dictionary.pkl'): create_dictionary_pickle() with open('dictionary.pkl', 'r') as lookup_table_file: lookup_table = pickle.load(lookup_table_file) translator = Translator(lookup_table) if args.lookup: print translate(args.lookup.strip(), translator) sys.exit(0) if args.word_file: words_and_translations = [] with codecs.open(args.word_file, 'r', encoding='utf-8') as lines: for word in lines: try: original, translation = translate(word.strip(), translator) except WordNotFoundException: continue words_and_translations.append((original, translation)) renderer = CardRenderer() renderer.render_cards(words_and_translations, '/tmp/test.pdf') sys.exit(0)
This year, why not treat yourself and your significant other to the very special gift of a personalized fragrance? You can never have too many. Who wouldn’t want the extravagance of a new and different perfume, cologne or beard oil choice every morning? Gifts from the heart are a joy to make, beautifully affordable, and wonderful to gift on Valentine’s Day or any other special occasion. Best of all, most of the ingredients can be found in your medicine cabinet. One or more 10 mL glass rollette bottles (available in a variety of colours, but the cobalt blue bottles have a particularly special mystique to them). Try and get the ones with the stainless steel or glass roller balls as they won’t break down over time as plastic ones do. Organic base oil such as jojoba, apricot kernel, sweet almond or avocado. Avoid a nut derivative if you’re allergic to nuts. Essential oils such as black spruce, oakmoss, cedarwood and rose varieties, which include rose Bulgaria and amulya attar, patchouli, bergamot, orange, vanilla, etc. Step 1: Add six drops of mukhallat attar, 12 drops of rosewood, three drops of patchouli, six drops of rose Bulgaria to a glass mixing beaker. Swirl gently a few times. Let stand for 10 minutes. Step 2: Carefully pour essential oil cocktail into glass rollette bottle. Step 3: Fill the remaining space in the perfume bottle with oil. Jojoba is our favourite. Step 4: Click stainless steel roller onto perfume bottle. Step 5: Shake gently. Apply to wrist. Enjoy! Pro Tip #1: The WOW factor of small batch homemade perfumes only becomes evident after 48 hours. So give the perfume a couple days to age. In the meantime, you can design a bespoke customized label and make up your own delightful perfume name. Pro Tip #2: The more rare essential oils like mukhallat attar and rosewood are costly. Consider hosting a perfume or beard oil making party to split the cost of the materials. Buying glass bottles, beakers, organic jojoba oil and essential oils in bulk will save you a lot of money.
import unittest2 as unittest from keystone.test.functional import common class TestExtensions(common.ApiTestCase): def test_extensions_json(self): r = self.service_request(path='/extensions.json', assert_status=200) self.assertTrue('json' in r.getheader('Content-Type')) content = r.json self.assertIsNotNone(content['extensions']) self.assertIsNotNone(content['extensions']['values']) found = False for value in content['extensions']['values']: if value['extension']['alias'] == 'RAX-KSKEY': found = True break self.assertTrue(found) def test_extensions_xml(self): r = self.service_request(path='/extensions.xml') self.assertTrue('xml' in r.getheader('Content-Type')) content = r.xml extension = content.find( "{http://docs.openstack.org/common/api/v2.0}extension") self.assertEqual(extension.get("alias"), "RAX-KSKEY") if __name__ == '__main__': unittest.main()
Find Contact Tip Holder, Tip Holder Mb15Ak, Mig Contact Tip Holder on Industry Directory, Reliable Manufacturer/Supplier/Factory from China. Product categories of Tip Holder, we are specialized manufacturers from China, Contact Tip Holder, Tip Holder Mb15Ak suppliers/factory, wholesale high-quality products of Mig Contact Tip Holder R & D and manufacturing, we have the perfect after-sales service and technical support. Look forward to your cooperation! Packaging:25 pieces binzel 24KD tip holder packed in a plastic bag then 2000 pieces binzel 24KD tip holder packed in a carton . Packaging:50pc of wholesale gun accessories welding torch spare parts tregaskiss 402-20 packing 1 plastic bag, then packing in carton. Packaging:50pc of Tregaskiss 402-3 contact tip holder packing 1 plastic bag, then packing in carton. Packaging:50pc of Tregaskiss 404-3 contact tip holder packing 1 plastic bag, then packing in carton. Packaging:50pcs of welding tip holder packed in plastic bag, then packed in carton. Packaging:10pcs packed in poly bag, then packed in carton. Packaging:100 piece AW4000 tip holder of fronius parts packed in poly bag, then packed in biger carton box. the bigger carton box is 38cmX35cmX35cm. 25 pieces binzel 24KD tip holder packed in a plastic bag then 2000 pieces binzel 24KD tip holder packed in a carton . 50pc of wholesale gun accessories welding torch spare parts tregaskiss 402-20 packing 1 plastic bag, then packing in carton. 50pc of Tregaskiss 402-3 contact tip holder packing 1 plastic bag, then packing in carton. 50pc of Tregaskiss 404-3 contact tip holder packing 1 plastic bag, then packing in carton. 50pcs of welding tip holder packed in plastic bag, then packed in carton. 10pcs packed in poly bag, then packed in carton. 100 piece AW4000 tip holder of fronius parts packed in poly bag, then packed in biger carton box. the bigger carton box is 38cmX35cmX35cm. Here you can find the related products in Tip Holder, we are professional manufacturer of Contact Tip Holder,Tip Holder Mb15Ak,Mig Contact Tip Holder. We focused on international export product development, production and sales. We have improved quality control processes of Tip Holder to ensure each export qualified product. If you want to know more about the products in Tip Holder, please click the product details to view parameters, models, pictures, prices and other information about Contact Tip Holder,Tip Holder Mb15Ak,Mig Contact Tip Holder. Whatever you are a group or individual, we will do our best to provide you with accurate and comprehensive message about Tip Holder!
from fabric.api import run, settings, sudo, open_shell, cd, env, local from fabric.operations import put, prompt, get def gethostip(hostname): "get ip of hostname" output = run('gethostip ' + hostname, true) parts = output.split(' ') return parts[1] def run_daemon_cmd(name, command): "run a daemon command" run("/etc/init.d/%s %s" % (name, command)) def mount(mountpoint): "mount specified mountpoint" run("mount %s" % (mountpoint, )) def unmount(mountpoint): "unmount specified mountpoint" run("umount %s" % (mountpoint, )) def add_sshfs_mount(*args): "install a list of sshfs mountpoints" FSTAB_PATTERN = "sshfs#{host}:{remotepath}\t{mountpoint}\tfuse\tdefaults,allow_other,exec,reconnect,transform_symlinks\t0 0" for mount in args: host = mount['host'] remotepath = mount['remotepath'] mountpoint = mount['mountpoint'] excludes = mount['excludes'] if env.host in excludes: print '%s is excluded from mountpoint.' % (env.host,) continue add_mount_point = True tmp_path = '/tmp/fstab.tmp' get("/etc/fstab", tmp_path) fstab_entry = FSTAB_PATTERN.format(host=host, remotepath=remotepath, mountpoint=mountpoint,) with open(tmp_path, 'r') as file: for line in file.readlines(): if mountpoint in line: add_mount_point = False if add_mount_point: with open(tmp_path, 'a') as file: file.write(fstab_entry + "\n\n") put(tmp_path, "/etc/fstab") with settings(warn_only=True): run('mkdir ' + mountpoint) run('umount ' + mountpoint) run('mount ' + mountpoint)
It's a win for Runcorn ward! Road resurfacing has finally commenced after our efforts in bringing this appalling condition to the council&apos;s attention. It&apos;s a win for the residents who use Wana Street, behind Altandi station in Sunnybank Hills.
# -*- coding: utf-8 -*- # Copyright (c) 2017, Frappe Technologies and contributors # For license information, please see license.txt from __future__ import unicode_literals import frappe from frappe.modules import get_module_path, scrub_dt_dn from frappe.modules.export_file import export_to_files, create_init_py from frappe.custom.doctype.custom_field.custom_field import create_custom_field from frappe.model.document import Document class DataMigrationPlan(Document): def on_update(self): # update custom fields in mappings self.make_custom_fields_for_mappings() if frappe.flags.in_import or frappe.flags.in_test: return if frappe.local.conf.get('developer_mode'): record_list =[['Data Migration Plan', self.name]] for m in self.mappings: record_list.append(['Data Migration Mapping', m.mapping]) export_to_files(record_list=record_list, record_module=self.module) for m in self.mappings: dt, dn = scrub_dt_dn('Data Migration Mapping', m.mapping) create_init_py(get_module_path(self.module), dt, dn) def make_custom_fields_for_mappings(self): frappe.flags.ignore_in_install = True label = self.name + ' ID' fieldname = frappe.scrub(label) df = { 'label': label, 'fieldname': fieldname, 'fieldtype': 'Data', 'hidden': 1, 'read_only': 1, 'unique': 1, 'no_copy': 1 } for m in self.mappings: mapping = frappe.get_doc('Data Migration Mapping', m.mapping) create_custom_field(mapping.local_doctype, df) mapping.migration_id_field = fieldname mapping.save() # Create custom field in Deleted Document create_custom_field('Deleted Document', df) frappe.flags.ignore_in_install = False def pre_process_doc(self, mapping_name, doc): module = self.get_mapping_module(mapping_name) if module and hasattr(module, 'pre_process'): return module.pre_process(doc) return doc def post_process_doc(self, mapping_name, local_doc=None, remote_doc=None): module = self.get_mapping_module(mapping_name) if module and hasattr(module, 'post_process'): return module.post_process(local_doc=local_doc, remote_doc=remote_doc) def get_mapping_module(self, mapping_name): try: module_def = frappe.get_doc("Module Def", self.module) module = frappe.get_module('{app}.{module}.data_migration_mapping.{mapping_name}'.format( app= module_def.app_name, module=frappe.scrub(self.module), mapping_name=frappe.scrub(mapping_name) )) return module except ImportError: return None
A village in northern Switzerland has decided to take part in an experiment on basic income. Some residents of Rheinau, located near the German border, will receive a monthly payout of $2,570 (2,500 francs) as part of a planned documentary by director Rebecca Panian. According to the organizer's website, 692 people signed up for the unusual project, which needed at least the half of the total 1,300 inhabitants. The organizers of the project are getting ready to raise the money needed via crowdfunding, while the applicants still need to be reviewed to ensure eligibility. The project rules that if the participants do not have an income or earn less than the basic income of $2,570, they can keep the money. Participants with a higher income than the monthly free money will, however, need to pay back the amount. The monthly basic income could also benefit families with children, as minors can also be registered even if their parents earn more than $2,570. The project foresees $642 per child, from which, however, the child allowance will be deducted. A team of scientists are expected to oversee the whole process, observing any changes in the community and in the behavior of the participants. The study seeks to thoroughly examine the effect of a basic income on families, purchasing power and the entire life of a village. A proposal for a nationwide basic income in Switzerland was widely discussed two years ago and failed to pass in a national vote. A similar project was previously introduced in Finland to explore the advantages of a universal basic income, while it is also currently being discussed in Italy.
# -*- coding: utf-8 -*- # All listeners that are with minor modifications shared between PostgreSQL # and MySQL. from __future__ import (absolute_import, print_function) import logging import re import antlr4 from antlr4.error.ErrorListener import ErrorListener from ..exceptions import QueryError, QuerySyntaxError def parse_alias(alias, quote_char): """ Extract the alias if available. :param alias: antlr context :parma quote_char: which string quote character to use """ if alias: alias = alias.ID().getText().strip(quote_char) else: alias = None return alias def process_column_name(column_name_listener, walker, ctx, quote_char): ''' A helper function that strips the quote characters from the column names. The returned list includes: cn[0] - schema cn[1] - table cn[2] - column cn[3] - ctx :param column_name_listener: column_name_listener object :param walker: antlr walker object :param ctx: antlr context to walk through :param quote_char: which quote character are we expecting? ''' cn = [] column_name_listener.column_name = [] walker.walk(column_name_listener, ctx) if column_name_listener.column_name: for i in column_name_listener.column_name: cni = [None, None, None, i] if i.schema_name(): cni[0] = i.schema_name().getText().replace(quote_char, '') if i.table_name(): cni[1] = i.table_name().getText().replace(quote_char, '') if i.column_name(): cni[2] = i.column_name().getText().replace(quote_char, '') cn.append(cni) else: try: ctx.ASTERISK() ts = ctx.table_spec() cn = [[None, None, '*', None]] if ts.schema_name(): cn[0][0] = ts.schema_name().getText().replace(quote_char, '') if ts.table_name(): cn[0][1] = ts.table_name().getText().replace(quote_char, '') except AttributeError: cn = [[None, None, None, None]] return cn def get_column_name_listener(base): class ColumnNameListener(base): """ Get all column names. """ def __init__(self): self.column_name = [] self.column_as_array = [] def enterColumn_spec(self, ctx): try: if ctx.children[1].getText(): self.column_as_array.append(ctx) else: self.column_as_array.append(None) except IndexError: self.column_as_array.append(None) self.column_name.append(ctx) return ColumnNameListener def get_table_name_listener(base, quote_char): class TableNameListener(base): """ Get table names. """ def __init__(self): self.table_names = [] self.table_aliases = [] def enterTable_atom(self, ctx): self.table_names.append(ctx) def enterAlias(self, ctx): alias = parse_alias(ctx, quote_char) self.table_aliases.append(alias) return TableNameListener def get_schema_name_listener(base, quote_char): class SchemaNameListener(base): def __init__(self, replace_schema_name): self.replace_schema_name = replace_schema_name def enterSchema_name(self, ctx): ttype = ctx.start.type sn = ctx.getTokens(ttype)[0].getSymbol().text try: nsn = self.replace_schema_name[sn.replace(quote_char, '')] try: nsn = unicode(nsn, 'utf-8') except NameError: pass nsn = re.sub(r'(|{})(?!{})[\S]*[^{}](|{})'.format( quote_char, quote_char, quote_char, quote_char), r'\1{}\2'.format(nsn), sn) ctx.getTokens(ttype)[0].getSymbol().text = nsn except KeyError: pass return SchemaNameListener def get_remove_subqueries_listener(base, base_parser): class RemoveSubqueriesListener(base): """ Remove nested select_expressions. """ def __init__(self, depth): self.depth = depth def enterSelect_expression(self, ctx): parent = ctx.parentCtx.parentCtx if isinstance(parent, base_parser.SubqueryContext) and \ ctx.depth() > self.depth: # we need to remove all Select_expression instances, not # just the last one so we loop over until we get all of them # out seinstances = [isinstance(i, base_parser.Select_expressionContext) for i in ctx.parentCtx.children] while True in seinstances: ctx.parentCtx.removeLastChild() seinstances = [isinstance(i, base_parser.Select_expressionContext) for i in ctx.parentCtx.children] return RemoveSubqueriesListener def get_query_listener(base, base_parser, quote_char): class QueryListener(base): """ Extract all select_expressions. """ def __init__(self): self.select_expressions = [] self.select_list = None self.keywords = [] self.subquery_aliases = {} def enterSelect_statement(self, ctx): if ctx.UNION_SYM(): self.keywords.append('union') def enterSelect_expression(self, ctx): # we need to keep track of unions as they act as subqueries self.select_expressions.append(ctx) parent = ctx.parentCtx.parentCtx if isinstance(parent, base_parser.SubqueryContext): try: alias = parent.parentCtx.alias() alias = parse_alias(alias, quote_char) self.subquery_aliases[ctx] = alias except AttributeError: pass def enterSelect_list(self, ctx): if not self.select_list: self.select_list = ctx return QueryListener def get_column_keyword_function_listener(base, quote_char): class ColumnKeywordFunctionListener(base): """ Extract columns, keywords and functions. """ def __init__(self): self.tables = [] self.columns = [] self.column_aliases = [] self.keywords = [] self.functions = [] self.column_name_listener = get_column_name_listener(base)() self.table_name_listener = get_table_name_listener( base, quote_char)() self.walker = antlr4.ParseTreeWalker() self.data = [] def _process_alias(self, ctx): try: alias = ctx.alias() except AttributeError: alias = None alias = parse_alias(alias, quote_char) return alias def _extract_column(self, ctx, append=True, join_columns=False): cn = process_column_name(self.column_name_listener, self.walker, ctx, quote_char) alias = self._process_alias(ctx) if len(cn) > 1: if join_columns: columns = [[i, None, join_columns] for i in cn] else: columns = [[i, None] for i in cn] else: if join_columns: columns = [[cn[0], alias, join_columns]] else: columns = [[cn[0], alias]] if not append: return alias, columns if alias is not None: self.column_aliases.append(alias) if cn[0] not in self.column_aliases: self.columns.extend(columns) def enterTable_references(self, ctx): self.walker.walk(self.table_name_listener, ctx) tas = self.table_name_listener.table_aliases if len(tas): logging.info((ctx.depth(), ctx.__class__.__name__, tas)) self.data.append([ctx.depth(), ctx, tas]) else: logging.info((ctx.depth(), ctx.__class__.__name__)) self.data.append([ctx.depth(), ctx]) def enterTable_atom(self, ctx): alias = parse_alias(ctx.alias(), quote_char) ts = ctx.table_spec() if ts: tn = [None, None] if ts.schema_name(): tn[0] = ts.schema_name().getText().replace(quote_char, '') if ts.table_name(): tn[1] = ts.table_name().getText().replace(quote_char, '') self.tables.append((alias, tn, ctx.depth())) logging.info((ctx.depth(), ctx.__class__.__name__, [tn, alias])) self.data.append([ctx.depth(), ctx, [tn, alias]]) def enterDisplayed_column(self, ctx): logging.info((ctx.depth(), ctx.__class__.__name__, self._extract_column(ctx, append=False)[1])) self.data.append([ctx.depth(), ctx, self._extract_column(ctx, append=False)[1]]) self._extract_column(ctx) if ctx.ASTERISK(): self.keywords.append('*') def enterSelect_expression(self, ctx): logging.info((ctx.depth(), ctx.__class__.__name__)) self.data.append([ctx.depth(), ctx]) def enterSelect_list(self, ctx): if ctx.ASTERISK(): logging.info((ctx.depth(), ctx.__class__.__name__, [[None, None, '*'], None])) self.data.append([ctx.depth(), ctx, [[[None, None, '*'], None]]]) self.columns.append(('*', None)) self.keywords.append('*') def enterFunctionList(self, ctx): self.functions.append(ctx.getText()) def enterGroup_functions(self, ctx): self.functions.append(ctx.getText()) def enterGroupby_clause(self, ctx): self.keywords.append('group by') col = self._extract_column(ctx, append=False) if col[1][0][0][2] not in self.column_aliases: self._extract_column(ctx) logging.info((ctx.depth(), ctx.__class__.__name__, self._extract_column(ctx, append=False)[1])) self.data.append([ctx.depth(), ctx, self._extract_column(ctx, append=False)[1]]) def enterWhere_clause(self, ctx): self.keywords.append('where') self._extract_column(ctx) logging.info((ctx.depth(), ctx.__class__.__name__, self._extract_column(ctx, append=False)[1])) self.data.append([ctx.depth(), ctx, self._extract_column(ctx, append=False)[1]]) def enterHaving_clause(self, ctx): self.keywords.append('having') self._extract_column(ctx) logging.info((ctx.depth(), ctx.__class__.__name__, self._extract_column(ctx, append=False)[1])) self.data.append([ctx.depth(), ctx, self._extract_column(ctx, append=False)[1]]) def enterOrderby_clause(self, ctx): self.keywords.append('order by') col = self._extract_column(ctx, append=False) if col[1][0][0][2] not in self.column_aliases: self._extract_column(ctx) logging.info((ctx.depth(), ctx.__class__.__name__, self._extract_column(ctx, append=False)[1])) self.data.append([ctx.depth(), ctx, self._extract_column(ctx, append=False)[1]]) def enterLimit_clause(self, ctx): self.keywords.append('limit') def enterJoin_condition(self, ctx): self.keywords.append('join') self._extract_column(ctx, join_columns=ctx) logging.info((ctx.depth(), ctx.__class__.__name__, self._extract_column(ctx, append=False)[1])) self.data.append([ctx.depth(), ctx, self._extract_column(ctx, append=False)[1]]) def enterSpoint(self, ctx): self.functions.append('spoint') def enterScircle(self, ctx): self.functions.append('scircle') def enterSline(self, ctx): self.functions.append('sline') def enterSellipse(self, ctx): self.functions.append('sellipse') def enterSbox(self, ctx): self.functions.append('sbox') def enterSpoly(self, ctx): self.functions.append('spoly') def enterSpath(self, ctx): self.functions.append('spath') def enterStrans(self, ctx): self.functions.append('strans') return ColumnKeywordFunctionListener class SyntaxErrorListener(ErrorListener): def __init__(self): super(SyntaxErrorListener, self).__init__() self.syntax_errors = [] def syntaxError(self, recognizer, offending_symbol, line, column, msg, e): if offending_symbol is not None: self.syntax_errors.append((line, column, offending_symbol.text)) else: self.syntax_errors.append((line, column, msg)) class SQLQueryProcessor(object): """ Object used for processing MySQL/PostgreSQL queries. Its objective is query validation (syntax error detection) and extraction of used columns, keywords and functions. :param base_lexer: Base antlr Lexer class. :param base_parser: Base antlr Parser class. :param base_parser_listener: Base antlr ParserListener class. :param quote_char: Which character is used to quote strings? :param query: SQL query string. :param base_sphere_listener: Base sphere listener. For now only pg_sphere is supported but other types of listeners can be added. """ def __init__(self, base_lexer, base_parser, base_parser_listener, quote_char, query=None, base_sphere_listener=None): self.lexer = base_lexer self.parser = base_parser self.parser_listener = base_parser_listener self.quote_char = quote_char self.sphere_listener = base_sphere_listener self.walker = antlr4.ParseTreeWalker() self.syntax_error_listener = SyntaxErrorListener() self.columns = set() self.keywords = set() self.functions = set() self.display_columns = [] if query is not None: self.set_query(query) self.process_query() def _extract_instances(self, column_keyword_function_listener): select_list_columns = [] other_columns = [] go_columns = [] column_aliases = [] select_list_tables = [] select_list_table_references = [] join = 0 join_using = None # Keep track of the ctx stack ctx_stack = [] for i in column_keyword_function_listener.data: if isinstance(i[1], self.parser.Displayed_columnContext): # this happens if there is an expression involving # more columns if len(i[2]) > 1: for j in i[2]: other_columns.append([j]) else: select_list_columns.append(i[2]) alias = parse_alias(i[1].alias(), '"') if alias is not None: column_aliases.append(alias) ctx_stack.append(i) if isinstance(i[1], self.parser.Table_atomContext): select_list_tables.append([i[2], i[0]]) ctx_stack.append(i) if isinstance(i[1], self.parser.Table_referencesContext): if len(i) > 2: select_list_table_references.extend(i[2]) ctx_stack.append(i) if isinstance(i[1], self.parser.Select_listContext): if len(i) == 3: select_list_columns.append([[i[2][0][0] + [i[1]], i[2][0][1]]]) ctx_stack.append(i) if isinstance(i[1], self.parser.Where_clauseContext) or\ isinstance(i[1], self.parser.Having_clauseContext): if len(i[2]) > 1: for j in i[2]: other_columns.append([j]) else: other_columns.append(i[2]) ctx_stack.append(i) if isinstance(i[1], self.parser.Join_conditionContext): join = i[0] join_using = i[2] if i[1].USING_SYM(): for ctx in ctx_stack[::-1]: if not isinstance(ctx[1], self.parser.Table_atomContext): break for ju in join_using: if ju[0][1] is None: other_columns.append([[[ctx[2][0][0], ctx[2][0][1], ju[0][2], ctx[1]], None]]) elif i[1].ON(): if len(i[2]) > 1: for j in i[2]: other_columns.append([j]) ctx_stack.append(i) if isinstance(i[1], self.parser.Orderby_clauseContext): if len(i[2]) > 1: for j in i[2]: go_columns.append([j]) else: go_columns.append(i[2]) ctx_stack.append(i) if isinstance(i[1], self.parser.Groupby_clauseContext): if len(i[2]) > 1: for j in i[2]: go_columns.append([j]) else: go_columns.append(i[2]) ctx_stack.append(i) return select_list_columns, select_list_tables,\ select_list_table_references, other_columns, go_columns, join,\ join_using, column_aliases def _get_budget_column(self, c, tab, ref): cname = c[0][2] cctx = c[0][3] calias = c[1] t = tab column_found = False for bc in ref: if bc[0][2] == '*': t = [[bc[0][0], bc[0][1]], 'None'] column_found = True break elif bc[1] and c[0][2] == bc[1]: t = [[bc[0][0], bc[0][1]], 'None'] cname = bc[0][2] if c[1] is None: calias = c[0][2] column_found = True break elif c[0][2] == bc[0][2] and bc[1] is None: t = [[bc[0][0], bc[0][1]], 'None'] column_found = True break return cname, cctx, calias, column_found, t def _extract_columns(self, columns, select_list_tables, ref_dict, join, budget, column_aliases, touched_columns=None, subquery_contents=None): # Here we store all columns that might have references somewhere # higher up in the tree structure. We'll revisit them later. missing_columns = [] remove_column_idxs = [] extra_columns = [] for i, col in enumerate(columns): c = col[0] cname = c[0][2] cctx = c[0][3] calias = c[1] # if * is selected we don't care too much if c[0][0] is None and c[0][1] is None and c[0][2] == '*'\ and not join: for slt in select_list_tables: extra_columns.append([[slt[0][0][0], slt[0][0][1], cname, c[0][3]], calias]) remove_column_idxs.append(i) continue # this can happen for example in ... WHERE EXISTS ... clauses if cname is None and calias is None: remove_column_idxs.append(i) continue tab = [[None, None], None] try: tab = select_list_tables[0][0] if tab[0][0] is None: raise QueryError('Missing schema specification.') # We have to check if we also have a join on the same level # and we are actually touching a column from the joined table if join and c[0][2] != '*' and\ (tab[1] != c[0][1] or (tab[1] is None and c[0][1] is None)): cname, cctx, calias, column_found, tab =\ self._get_budget_column(c, tab, budget[-1][2]) # raise an ambiguous column if column_found and c[0][1] is None: raise QueryError("Column '%s' is possibly ambiguous." % c[0][2]) except IndexError: pass try: # ref can be a table or a budget of columns ref = ref_dict[c[0][1]] column_found = False if isinstance(ref[0], int): # ref is a budget column cname, cctx, calias, column_found, tab =\ self._get_budget_column(c, tab, ref[2]) ref_cols = [j[0][2] for j in ref[2]] if not column_found and c[0][1] is not None\ and c[0][1] != tab[0][1] and '*' not in ref_cols: raise QueryError("Unknown column '%s.%s'." % (c[0][1], c[0][2])) else: # ref is a table tab = ref[0] except KeyError: if None not in c[0][:3]: cname = c[0][2] cctx = c[0][3] calias = c[1] tab = [[c[0][0], c[0][1]]] column_found = True # table is either referenced directly or by an alias elif c[0][2] is not None and c[0][1] is not None: if subquery_contents is not None: try: contents = subquery_contents[c[0][1]] cname, cctx, calias, column_found, tab =\ self._get_budget_column(c, tab, contents) except KeyError: tabs = [j[0][0][:2] for j in subquery_contents.values()] tabs += [j[0][0] for j in select_list_tables] column_found = False for t in tabs: if t[1] == c[0][1]: cname = c[0][2] cctx = c[0][3] calias = c[1] tab = [t] column_found = True if not column_found: missing_columns.append(c) columns[i] = c if touched_columns is not None: touched_columns.append(c) continue else: if tab[0][1] == c[0][1]: columns[i] = [[tab[0][0], tab[0][1], c[0][2], c[0][3]], c[1]] else: missing_columns.append(c) columns[i] = c if touched_columns is not None: touched_columns.append(c) continue elif c[0][2] is not None and c[0][2] != '*' and c[0][1] is \ None and len(ref_dict.keys()) > 1 and not join: raise QueryError("Column '%s' is ambiguous." % c[0][2]) elif len(budget) and tab[0][0] is None and tab[0][1] is None: ref = budget[-1] column_found = False if isinstance(ref[0], int): cname, cctx, calias, column_found, tab =\ self._get_budget_column(c, tab, ref[2]) # We allow None.None columns because they are produced # by count(*) if not column_found and c[0][2] is not None\ and c[0][2] not in column_aliases: raise QueryError("Unknown column '%s'." % c[0][2]) if touched_columns is not None: touched_columns.append([[tab[0][0], tab[0][1], cname, cctx], calias]) else: columns[i] = [[tab[0][0], tab[0][1], cname, c[0][3]], calias] for i in remove_column_idxs[::-1]: columns.pop(i) columns.extend(extra_columns) return missing_columns def process_query(self, replace_schema_name=None, indexed_objects=None): """ Parses and processes the query. After a successful run it fills up columns, keywords, functions and syntax_errors lists. :param replace_schema_name: A new schema name to be put in place of the original. :param indexed_objects: A dictionary defining pgsphere objects to be replaced with precomputed (on the database level) columns. For example, iob = {'spoint': ((('gdr2', 'gaia_source', 'ra'), ('gdr2', 'gaia_source', 'dec'), 'pos'),)} will replace 'spoint(RADIANS(ra), RADIANS(dec))' with a 'pos' column. """ # Antlr objects inpt = antlr4.InputStream(self.query) lexer = self.lexer(inpt) stream = antlr4.CommonTokenStream(lexer) parser = self.parser(stream) lexer._listeners = [self.syntax_error_listener] parser._listeners = [self.syntax_error_listener] # Parse the query tree = parser.query() if len(self.syntax_error_listener.syntax_errors): raise QuerySyntaxError(self.syntax_error_listener.syntax_errors) if replace_schema_name is not None: schema_name_listener = get_schema_name_listener( self.parser_listener, self.quote_char)(replace_schema_name) self.walker.walk(schema_name_listener, tree) self._query = stream.getText() query_listener = get_query_listener(self.parser_listener, self.parser, self.quote_char)() subquery_aliases = [None] keywords = [] functions = [] tables = [] self.walker.walk(query_listener, tree) keywords.extend(query_listener.keywords) subquery_aliases = query_listener.subquery_aliases # Columns that are accessed by the query touched_columns = [] # List we use to propagate the columns through the tree budget = [] # Are there any joins in the query? join = 0 missing_columns = [] column_aliases = [] column_aliases_from_previous = [] subquery_contents = {} # Iterate through subqueries starting with the lowest level for ccc, ctx in enumerate(query_listener.select_expressions[::-1]): remove_subquieries_listener = get_remove_subqueries_listener( self.parser_listener, self.parser)(ctx.depth()) column_keyword_function_listener = \ get_column_keyword_function_listener( self.parser_listener, self.quote_char)() # Remove nested subqueries from select_expressions self.walker.walk(remove_subquieries_listener, ctx) # Extract table and column names, keywords, functions self.walker.walk(column_keyword_function_listener, ctx) keywords.extend(column_keyword_function_listener.keywords) functions.extend(column_keyword_function_listener.functions) # Does the subquery has an alias? try: subquery_alias = subquery_aliases[ctx] except KeyError: subquery_alias = None current_depth = column_keyword_function_listener.data[0][0] # We get the columns from the select list along with all # other touched columns and any possible join conditions column_aliases_from_previous = [i for i in column_aliases] select_list_columns, select_list_tables,\ select_list_table_references, other_columns, go_columns, join,\ join_using, column_aliases =\ self._extract_instances(column_keyword_function_listener) tables.extend([i[0] for i in select_list_tables]) # Then we need to connect the column names s with tables and # databases ref_dict = {} for ref in select_list_table_references: ref_found = False for tab in select_list_tables: if ref == tab[0][1]: ref_dict[ref] = tab ref_found = True if not ref_found: for b in budget: if ref == b[1]: ref_dict[ref] = b if not len(select_list_table_references): for table in select_list_tables: ref_dict[table[0][0][1]] = table mc = self._extract_columns(select_list_columns, select_list_tables, ref_dict, join, budget, column_aliases_from_previous) missing_columns.extend([[i] for i in mc]) touched_columns.extend(select_list_columns) current_columns = [i for i in select_list_columns] budget.append([current_depth, subquery_alias, select_list_columns]) aliases = [i[1] for i in select_list_columns] + column_aliases for col in go_columns: if col[0][0][2] not in aliases: other_columns.append(col) mc = self._extract_columns(other_columns, select_list_tables, ref_dict, join, budget, column_aliases_from_previous, touched_columns) missing_columns.extend([[i] for i in mc]) if join: join_columns = [] join_columns.append(budget.pop(-1)) if len(join_using) == 1: for tab in select_list_tables: touched_columns.append([[tab[0][0][0], tab[0][0][1], join_using[0][0][2]], None]) bp = [] for b in budget[::-1]: if b[0] > current_depth: bp.append(budget.pop(-1)[2]) budget.extend(join_columns) if subquery_alias is not None: subquery_contents[subquery_alias] = current_columns if len(missing_columns): mc = self._extract_columns(missing_columns, select_list_tables, ref_dict, join, budget, column_aliases_from_previous, touched_columns, subquery_contents) if len(mc): unref_cols = "', '".join(['.'.join([j for j in i[0][:3] if j]) for i in mc]) raise QueryError("Unreferenced column(s): '%s'." % unref_cols) # If we have indexed_objects, we are also accessing those. We # need to add them into the columns stack: if indexed_objects is not None: for k, v in indexed_objects.items(): for vals in v: touched_columns.append([[vals[0][0], vals[0][1], vals[2], None], None]) touched_columns = set([tuple(i[0]) for i in touched_columns]) # extract display_columns display_columns = [] mc = self._extract_columns([[i] for i in budget[-1][2]], select_list_tables, ref_dict, join, budget, column_aliases_from_previous, display_columns, subquery_contents) display_columns = [[i[1] if i[1] else i[0][2], i[0]] for i in display_columns] # Let's get rid of all columns that are already covered by # db.tab.*. Figure out a better way to do it and replace the code # below. asterisk_columns = [] del_columns = [] for col in touched_columns: if col[2] == '*': asterisk_columns.append(col) for acol in asterisk_columns: for col in touched_columns: if acol[0] == col[0] and acol[1] == col[1] and \ acol[2] != col[2]: del_columns.append(col) columns = list(set(touched_columns).difference(del_columns)) self.columns = list(set([self._strip_column(i) for i in columns])) self.keywords = list(set(keywords)) self.functions = list(set(functions)) self.display_columns = [(i[0].lstrip('"').rstrip('"'), list(self._strip_column(i[1]))) for i in display_columns] self.tables = list(set([tuple([i[0][0].lstrip('"').rstrip('"') if i[0][0] is not None else i[0][0], i[0][1].lstrip('"').rstrip('"') if i[0][1] is not None else i[0][1]]) for i in tables])) # If there are any sphere-like objects (pgsphere...) that are indexed # we need to replace the ADQL translated query parts with the indexed # column names if indexed_objects is not None and self.sphere_listener is not None: # we need to correctly alias 'pos' columns for k, v in indexed_objects.items(): indexed_objects[k] = list([list(i) for i in v]) for i, vals in enumerate(v): for t in tables: if vals[0][0] == t[0][0] and vals[0][1] == t[0][1] and\ t[1] is not None: indexed_objects[k][i][2] = t[1] + '.' +\ indexed_objects[k][i][2] sphere_listener = self.sphere_listener(columns, indexed_objects) self.walker.walk(sphere_listener, tree) for k, v in sphere_listener.replace_dict.items(): self._query = self._query.replace(k, v) @property def query(self): """ Get the query string. """ return self._query def _strip_query(self, query): return query.lstrip('\n').rstrip().rstrip(';') + ';' def _strip_column(self, col): scol = [None, None, None] for i in range(3): if col[i] is not None: scol[i] = col[i].lstrip('"').rstrip('"') return tuple(scol) def set_query(self, query): """ Helper to set the query string. """ self.columns = set() self.keywords = set() self.functions = set() self.display_columns = [] self.syntax_error_listener = SyntaxErrorListener() self._query = self._strip_query(query)
WARNING: Sotalol should be used only in certain patients. Before taking sotalol, discuss the risks and benefits of treatment with your doctor. Although sotalol is effective for treating certain types of fast/irregular heartbeats, it can infrequently cause a new serious abnormal heart rhythm (QT prolongation in the EKG). This problem can lead to a new type of abnormal (possibly fatal) heartbeat (torsade de pointes). If this new serious heart rhythm occurs, it is usually when sotalol treatment is first started or when the dose is increased. Therefore, to reduce the risk of this side effect, you should begin sotalol treatment in a hospital so your doctor can monitor your heart rhythm for several days and treat the problems if they occur. Patients usually start with a low dose of sotalol. Your doctor will then slowly increase your dose depending on how you respond to the medication and how well your kidneys work. If you respond well to this medication, during this time in the hospital your doctor can determine the right dose of sotalol for you. Some sotalol products are used to treat certain types of fast/irregular heartbeats (atrial fibrillation/flutter). These products come with additional written information from the manufacturer for patients with atrial fibrillation/flutter. If you have these conditions, read the provided information carefully. Do not switch brands of sotalol without checking with your doctor. Do not stop taking this medication without consulting your doctor. Some conditions may become worse when you suddenly stop this drug. Some people who have suddenly stopped taking similar drugs have had chest pain, heart attack, and irregular heartbeat. If your doctor decides you should no longer use this drug, he or she may direct you to gradually decrease your dose over 1 to 2 weeks. When gradually stopping this medication, it is recommended that you temporarily limit physical activity to decrease strain on the heart. Seek immediate medical attention if you develop: chest pain/tightness/pressure, chest pain spreading to the jaw/neck/arm, unusual sweating, trouble breathing, fast/irregular heartbeat. USES: This medication is used to treat a serious (possibly life-threatening) type of fast heartbeat called sustained ventricular tachycardia. It is also used to treat certain fast/irregular heartbeats (atrial fibrillation/flutter) in patients with severe symptoms such as weakness and shortness of breath. Sotalol helps to lessen these symptoms. It slows the heart rate and helps the heart to beat more normally and regularly. This medication is both a beta blocker and an anti-arrhythmic. HOW TO USE: See also Warning section. Read the Patient Information Leaflet (provided by your pharmacist for some sotalol products used to treat atrial fibrillation/flutter) before you start using sotalol and each time you get a refill. If you have any questions, consult your doctor or pharmacist. Take this medication by mouth, usually twice a day or as directed by your doctor. You may take it with or without food, but it is important to choose one way and take it the same way with each dose. Dosage is based on your medical condition and response to treatment. In children, dosage is also based on age, height, and weight. Use this medication regularly to get the most benefit from it. To help you remember, take it at the same times each day. If you use antacids containing aluminum or magnesium, do not take them at the same time as sotalol. These antacids can bind to sotalol and decrease its absorption and effectiveness. Separate doses of these antacids and sotalol by at least 2 hours to reduce this interaction. Do not take more of this drug than prescribed because you may increase your risk of side effects, including a new serious abnormal heartbeat. Do not take less of this medication or skip doses unless directed by your doctor. Your fast/irregular heartbeat is more likely to return if you do not take sotalol properly. Also, do not run out of this medication. Order your refills several days early to avoid running out of pills. Tell your doctor if your condition does not improve or if it worsens. SIDE EFFECTS: See also Warning section. Tiredness, slow heartbeat, and dizziness may occur. Less common side effects include headache, diarrhea, and decreased sexual ability. If any of these effects persist or worsen, tell your doctor or pharmacist promptly. To lower your risk of dizziness and lightheadedness, get up slowly when rising from a sitting or lying position. Remember that your doctor has prescribed this medication because he or she has judged that the benefit to you is greater than the risk of side effects. Many people using this medication do not have serious side effects. Tell your doctor immediately if any of these unlikely but serious side effects occur: new or worsening symptoms of heart failure (such as swelling ankles/feet, severe tiredness, shortness of breath, unexplained/sudden weight gain). Seek immediate medical attention if any of these unlikely but serious side effects occur: severe dizziness, fainting, sudden change in heartbeat (unusually faster/slower/more irregular), chest/jaw/left arm pain. A very serious allergic reaction to this drug is rare. However, seek immediate medical attention if you notice any symptoms of a serious allergic reaction, including: rash, itching/swelling (especially of the face/tongue/throat), severe dizziness, trouble breathing. This is not a complete list of possible side effects. If you notice other effects not listed above, contact your doctor or pharmacist. In the US - Call your doctor for medical advice about side effects. You may report side effects to FDA at 1-800-FDA-1088. In Canada - Call your doctor for medical advice about side effects. You may report side effects to Health Canada at 1-866-234-2345. PRECAUTIONS: See also Warning section. Before taking sotalol, tell your doctor or pharmacist if you are allergic to it; or if you have any other allergies. This product may contain inactive ingredients, which can cause allergic reactions or other problems. Talk to your pharmacist for more details. This medication should not be used if you have certain medical conditions. Before using this medicine, consult your doctor or pharmacist if you have: certain heart rhythm problems (such as a slow heartbeat, second- or third-degree atrioventricular block unless you have a heart pacemaker), severe heart failure, breathing problems (such as asthma, chronic bronchitis, emphysema). Before using this medication, tell your doctor or pharmacist your medical history, especially of: kidney problems, treated stable heart failure, very recent heart attack (within 2 weeks), other irregular heartbeat problems (such as sick sinus syndrome), overactive thyroid disease (hyperthyroidism), serious allergic reactions needing treatment with epinephrine. Sotalol may cause a condition that affects the heart rhythm (QT prolongation in the EKG). QT prolongation can infrequently result in serious (rarely fatal) fast/irregular heartbeat and other symptoms (such as severe dizziness, fainting) that require immediate medical attention. The risk of QT prolongation may be increased if you have certain medical conditions or are taking other drugs that may affect the heart rhythm (see also Drug Interactions section). Before using sotalol, tell your doctor or pharmacist if you have any of the following conditions: certain heart problems (QT prolongation in the EKG, history of torsade de pointes), family history of certain heart problems (QT prolongation in the EKG, sudden cardiac death). Low levels of potassium or magnesium in the blood may also increase your risk of QT prolongation. This risk may increase if you use certain drugs (such as diuretics/"water pills"), if you are unable to eat or drink fluids as you normally would, or if you have conditions such as severe/prolonged sweating, diarrhea, or vomiting. Talk to your doctor about using sotalol safely. If you have diabetes, this product may prevent the fast/pounding heartbeat you would usually feel when your blood sugar level falls too low (hypoglycemia). Other symptoms of a low blood sugar level, such as dizziness and sweating, are unaffected by this drug. This product may also make it harder to control your blood sugar levels. Check your blood sugar levels regularly as directed by your doctor. Tell your doctor immediately if you have symptoms of high blood sugar such as increased thirst/urination. Your diabetes medication or diet may need to be adjusted. Before having surgery, tell your doctor or dentist that you are taking this medication. This drug may make you dizzy. Do not drive, use machinery, or do any activity that requires alertness until you are sure you can perform such activities safely. Limit alcoholic beverages. Kidney function declines as you grow older. This medication is removed by the kidneys. Therefore, older adults may be more sensitive to the side effects of this drug, including dizziness and tiredness. During pregnancy, this medication should be used only when clearly needed. It may harm an unborn baby. Discuss the risks and benefits with your doctor. This medication passes into breast milk and may have undesirable effects on a nursing infant. Discuss the risks and benefits with your doctor before breast-feeding. DRUG INTERACTIONS: See also How to Use and Precautions sections. Drug interactions may change how your medications work or increase your risk for serious side effects. This document does not contain all possible drug interactions. Keep a list of all the products you use (including prescription/nonprescription drugs and herbal products) and share it with your doctor and pharmacist. Do not start, stop, or change the dosage of any medicines without your doctor's approval. A product that may interact with this drug is: fingolimod. Many drugs besides sotalol may affect the heart rhythm (QT prolongation in the EKG), including amiodarone, dofetilide, pimozide, procainamide, quinidine, macrolide antibiotics (such as clarithromycin, erythromycin), among others. Check the labels on all your medicines (such as cough-and-cold products, allergy products, diet aids) because they may contain ingredients that could increase your heart rate. Check with your doctor or pharmacist first before taking any other medications. This medication may interfere with certain medical/laboratory tests, possibly causing false test results. Make sure laboratory personnel and all your doctors know you use this drug. OVERDOSE: If overdose is suspected, contact your local poison control center or emergency room immediately. US residents can call the US National Poison Hotline at 1-800-222-1222. Canada residents can call a provincial poison control center. Symptoms of overdose may include: severe dizziness, fainting, unusually slower/faster/more irregular heartbeat, shortness of breath. NOTES: Do not share this medication with others. Laboratory and/or medical tests (such as EKG, kidney function tests) should be performed periodically to monitor your progress or check for side effects. Consult your doctor for more details. STORAGE: Store at room temperature at 77 degrees F (25 degrees C) away from light and moisture. Brief storage between 59-86 degrees F (15-30 degrees C) is permitted. Do not store in the bathroom. Keep all medicines away from children and pets. Do not flush medications down the toilet or pour them into a drain unless instructed to do so. Properly discard this product when it is expired or no longer needed. Consult your pharmacist or local waste disposal company for more details about how to safely discard your product. MEDICAL ALERT: Your condition can cause complications in a medical emergency. For information about enrolling in MedicAlert, call 1-800-854-1166 (USA) or 1-800-668-1507 (Canada). Information last revised June 2012. Copyright(c) 2012 First Databank, Inc.
import os import re from types import ( StringType, UnicodeType, BooleanType, LongType, IntType, ) from decimal import Decimal from datetime import ( datetime, timedelta, date ) import locale import pytz from pyramid.threadlocal import get_current_registry kini = datetime.now() DateType = type(kini.date()) DateTimeType = type(kini) TimeType = type(kini.time()) DecimalType = type(Decimal(0)) def dmy(tgl): return tgl.strftime('%d-%m-%Y') def dmyhms(t): return t.strftime('%d-%m-%Y %H:%M:%S') def hms(t): return t.strftime('%H:%M:%S') def to_simple_value(v): typ = type(v) if typ is DateType: return v.isoformat() #dmy(v) if typ is DateTimeType: return v.isoformat() #dmyhms(v) if typ is TimeType: return hms(v) if typ is DecimalType: return float(v) if typ in (LongType, IntType): #if v < MININT or v > MAXINT: return str(v) if v == 0: return '0' if typ in [UnicodeType, StringType]: return v.strip() if v is None: return '' return v def dict_to_simple_value(d): r = {} for key in d: val = d[key] if type(key) not in (UnicodeType, StringType): key = str(key) r[key] = to_simple_value(val) return r def date_from_str(value): separator = None value = value.split()[0] # dd-mm-yyyy HH:MM:SS for s in ['-', '/']: if value.find(s) > -1: separator = s break if separator: t = map(lambda x: int(x), value.split(separator)) y, m, d = t[2], t[1], t[0] if d > 999: # yyyy-mm-dd y, d = d, y else: # if len(value) == 8: # yyyymmdd y, m, d = int(value[:4]), int(value[4:6]), int(value[6:]) return date(y, m, d) ################ # Phone number # ################ MSISDN_ALLOW_CHARS = map(lambda x: str(x), range(10)) + ['+'] def get_msisdn(msisdn, country='+62'): for ch in msisdn: if ch not in MSISDN_ALLOW_CHARS: return try: i = int(msisdn) except ValueError, err: return if not i: return if len(str(i)) < 7: return if re.compile(r'^\+').search(msisdn): return msisdn if re.compile(r'^0').search(msisdn): return '%s%s' % (country, msisdn.lstrip('0')) ################ # Money format # ################ def should_int(value): int_ = int(value) return int_ == value and int_ or value def thousand(value, float_count=None): if float_count is None: # autodetection if type(value) in (IntType, LongType): float_count = 0 else: float_count = 2 return locale.format('%%.%df' % float_count, value, True) def money(value, float_count=None, currency=None): if value < 0: v = abs(value) format_ = '(%s)' else: v = value format_ = '%s' if currency is None: currency = locale.localeconv()['currency_symbol'] s = ' '.join([currency, thousand(v, float_count)]) return format_ % s ########### # Pyramid # ########### def get_settings(): return get_current_registry().settings def get_timezone(): settings = get_settings() return pytz.timezone(settings.timezone) ######## # Time # ######## one_second = timedelta(1.0/24/60/60) TimeZoneFile = '/etc/timezone' if os.path.exists(TimeZoneFile): DefaultTimeZone = open(TimeZoneFile).read().strip() else: DefaultTimeZone = 'Asia/Jakarta' def as_timezone(tz_date): localtz = get_timezone() if not tz_date.tzinfo: tz_date = create_datetime(tz_date.year, tz_date.month, tz_date.day, tz_date.hour, tz_date.minute, tz_date.second, tz_date.microsecond) return tz_date.astimezone(localtz) def create_datetime(year, month, day, hour=0, minute=7, second=0, microsecond=0): tz = get_timezone() return datetime(year, month, day, hour, minute, second, microsecond, tzinfo=tz) def create_date(year, month, day): return create_datetime(year, month, day) def create_now(): tz = get_timezone() return datetime.now(tz) ############## # Fix Length # ############## class FixLength(object): def __init__(self, struct): self.set_struct(struct) def set_struct(self, struct): self.struct = struct self.fields = {} new_struct = [] for s in struct: name = s[0] size = s[1:] and s[1] or 1 typ = s[2:] and s[2] or 'A' # N: numeric, A: alphanumeric self.fields[name] = {'value': None, 'type': typ, 'size': size} new_struct.append((name, size, typ)) self.struct = new_struct def set(self, name, value): self.fields[name]['value'] = value def get(self, name): return self.fields[name]['value'] def __setitem__(self, name, value): self.set(name, value) def __getitem__(self, name): return self.get(name) def get_raw(self): s = '' for name, size, typ in self.struct: v = self.fields[name]['value'] pad_func = typ == 'N' and right or left if typ == 'N': v = v or 0 i = int(v) if v == i: v = i else: v = v or '' s += pad_func(v, size) return s def set_raw(self, raw): awal = 0 for t in self.struct: name = t[0] size = t[1:] and t[1] or 1 akhir = awal + size value = raw[awal:akhir] if not value: return self.set(name, value) awal += size return True def from_dict(self, d): for name in d: value = d[name] self.set(name, value) ############ # Database # ############ def split_tablename(tablename): t = tablename.split('.') if t[1:]: schema = t[0] tablename = t[1] else: schema = None return schema, tablename ########### # Pyramid # ########### def get_settings(): reg = get_current_registry() return reg.settings
The federal government has once again missed the opportunity to facilitate a significant improvement in agricultural productivity, and a 500:1 return on investment, by failing to fund a specialty and minor use crop protection program. CropLife Australia CEO, Matthew Cossey said today, “The crop protection industry and the farming sector more broadly have been calling on the Australian Government for many years to provide base funding for a specialty crops and minor use program. Such a program is essential for maintaining Australia’s agricultural competiveness, as the system in its current form is failing not only Australian agriculture, but the economy as a whole. “Enthusiastic rhetoric about the future of Australian agriculture has never been so plentiful. Commendably, this rhetoric has extended as far as the development of a National Food Plan and the Asian Century White Paper. Unfortunately, however all of this promise and goodwill will evaporate without smart, targeted, early investment to assist farmers in accessing all the tools they need. In today’s budget, the government has ignored its own policy and failed to make a prudent fiscal decision to improve the nation’s food production capacity. “The government’s mandatory regulatory system for pesticides creates a market failure. This failure comes from the fact that the small volume of sales of products for specialty and minor uses does not offset the high costs associated with registering those products. A lack of registered pest management tools means that glass ceilings are placed on these potentially high profit crops. The way to remove this glass ceiling on specialty producers and deliver a bonus to the nation’s farming sector is for the Australian Government to establish and fund a specialty crops and minor use program. “An analysis of the US specialty crops program showed that every dollar spent by government in assisting specialty growers in the US returned more than $500 to the economy. The US model, which has been in place for more than 30 years, also facilitates greater investment by the private sector and as a result both US agriculture and tax payers win. “Europe has also recognised the benefits of such an initiative and started its own program in the last few years. Those benefits include cheaper fruit and vegetables for the entire community. That is a big economic bang for small bucks. Not many current federal government programs could claim a 500 to one return to the economy for government expenditure. Australia has a market which is less than 10 per cent the size of both Europe and the US which means that such a program is even more important for our famers. “Repeated failure to fund this program means that neither broad acre nor specialty crop producers in Australia have access to all necessary and internationally available pest management tools. As a result, Australian farmers are at a serious disadvantage to their international competitors. “As little as $40 million over four years would improve responsible chemical usage. It would mean that Australian farmers could consider a range of other high return produce and crops. It would significantly assist in addressing the challenges of weed and pest resistance problems. Such a program would also ensure that Australian food producers develop environmentally friendly, sustainable, integrated crop management systems through access to the latest chemistry. “This policy is not only necessary for removing productivity blocks on Australian farmers, it’s also an easy win and it’s time we had a commitment from both sides of politics to matching the rhetoric and making a small, smart investment that would have massive benefits for Australian farming,” concluded Mr Cossey.
# vim: set fileencoding=utf-8 : # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. from bkr.inttest.server.selenium import WebDriverTestCase from bkr.inttest.server.webdriver_utils import login, logout, is_text_present from bkr.inttest import data_setup, get_server_base, with_transaction from bkr.inttest.assertions import assert_has_key_with_value from bkr.server.model import Arch, System, OSMajor, SystemPermission, \ SystemStatus from turbogears.database import session import pkg_resources import unittest2 as unittest from tempfile import NamedTemporaryFile from decimal import Decimal class CSVImportTest(WebDriverTestCase): def setUp(self): with session.begin(): self.system = data_setup.create_system( lab_controller=data_setup.create_labcontroller()) self.browser = self.get_browser() def import_csv(self, contents): b = self.browser b.get(get_server_base() + 'csv/csv_import') csv_file = NamedTemporaryFile(prefix=self.__module__) csv_file.write(contents) csv_file.flush() b.find_element_by_name('csv_file').send_keys(csv_file.name) b.find_element_by_name('csv_file').submit() def test_system(self): login(self.browser) orig_date_modified = self.system.date_modified self.import_csv((u'csv_type,fqdn,location,arch\n' u'system,%s,Under my desk,ia64' % self.system.fqdn) .encode('utf8')) self.failUnless(is_text_present(self.browser, "No Errors")) with session.begin(): session.refresh(self.system) self.assertEquals(self.system.location, u'Under my desk') self.assert_(Arch.by_name(u'ia64') in self.system.arch) self.assert_(self.system.date_modified > orig_date_modified) # attempting to import a system with no FQDN should fail self.import_csv((u'csv_type,fqdn,location,arch\n' u'system,'',Under my desk,ia64').encode('utf8')) self.assertEquals(self.browser.find_element_by_xpath( '//table[@id="csv-import-log"]//td').text, "Error importing line 2: " "System must have an associated FQDN") # attempting to import a system with an invalid FQDN should fail self.import_csv((u'csv_type,fqdn,location,arch\n' u'system,invalid--fqdn,Under my desk,ia64').encode('utf8')) self.assertEquals(self.browser.find_element_by_xpath( '//table[@id="csv-import-log"]//td').text, "Error importing line 2: " "Invalid FQDN for system: invalid--fqdn") #https://bugzilla.redhat.com/show_bug.cgi?id=987157 def test_system_rename(self): login(self.browser) # attempt to rename existing system to an invalid FQDN should keep # the system unmodified with session.begin(): session.refresh(self.system) orig_date_modified = self.system.date_modified self.import_csv((u'csv_type,id,fqdn,location,arch\n' u'system,%s,new--fqdn.name,%s,%s' % (self.system.id, self.system.location, self.system.arch[0])).encode('utf8')) with session.begin(): session.refresh(self.system) self.assertEquals(self.system.date_modified, orig_date_modified) self.assertEquals(self.browser.find_element_by_xpath( '//table[@id="csv-import-log"]//td').text, "Error importing line 2: " "Invalid FQDN for system: new--fqdn.name") # attempt to rename a non-existent system should fail orig_date_modified = self.system.date_modified non_existent_system_id = -1 self.import_csv((u'csv_type,id,fqdn,location,arch\n' u'system,%s,new--fqdn.name,%s,%s' % (non_existent_system_id, self.system.location, self.system.arch[0])).encode('utf8')) with session.begin(): session.refresh(self.system) self.assertEquals(self.system.date_modified, orig_date_modified) self.assertEquals(self.browser.find_element_by_xpath( '//table[@id="csv-import-log"]//td').text, "Error importing line 2: " "Non-existent system id") # successfully rename existing system orig_date_modified = self.system.date_modified self.import_csv((u'csv_type,id,fqdn,location,arch\n' u'system,%s,new.fqdn.name,Under my desk,ia64' % self.system.id).encode('utf8')) with session.begin(): session.refresh(self.system) self.assertGreater(self.system.date_modified, orig_date_modified) self.assertEquals(self.system.fqdn, 'new.fqdn.name') def test_grants_view_permission_to_everybody_by_default(self): fqdn = data_setup.unique_name(u'test-csv-import%s.example.invalid') b = self.browser login(b) self.import_csv((u'csv_type,fqdn\n' u'system,%s' % fqdn).encode('utf8')) self.assertEquals(self.browser.find_element_by_xpath( '//table[@id="csv-import-log"]//td').text, 'No Errors') with session.begin(): system = System.query.filter(System.fqdn == fqdn).one() self.assertTrue(system.custom_access_policy.grants_everybody( SystemPermission.view)) def test_system_secret_field(self): login(self.browser) self.import_csv((u'csv_type,fqdn,secret\n' u'system,%s,True' % self.system.fqdn) .encode('utf8')) self.assertEquals(self.browser.find_element_by_xpath( '//table[@id="csv-import-log"]//td').text, 'No Errors') with session.begin(): session.refresh(self.system.custom_access_policy) self.assertFalse(self.system.custom_access_policy.grants_everybody( SystemPermission.view)) self.import_csv((u'csv_type,fqdn,secret\n' u'system,%s,False' % self.system.fqdn) .encode('utf8')) self.assertEquals(self.browser.find_element_by_xpath( '//table[@id="csv-import-log"]//td').text, 'No Errors') with session.begin(): session.refresh(self.system.custom_access_policy) self.assertTrue(self.system.custom_access_policy.grants_everybody( SystemPermission.view)) def test_keyvalue(self): login(self.browser) orig_date_modified = self.system.date_modified self.import_csv((u'csv_type,fqdn,key,key_value,deleted\n' u'keyvalue,%s,COMMENT,UTF 8 –,False' % self.system.fqdn) .encode('utf8')) self.failUnless(is_text_present(self.browser, "No Errors")) with session.begin(): session.refresh(self.system) assert_has_key_with_value(self.system, 'COMMENT', u'UTF 8 –') self.assert_(self.system.date_modified > orig_date_modified) #https://bugzilla.redhat.com/show_bug.cgi?id=1058549 def test_keyvalue_non_existent_system_valid(self): login(self.browser) fqdn = data_setup.unique_name('system%s.idonot.exist') self.import_csv((u'csv_type,fqdn,key,key_value,deleted\n' u'keyvalue,%s,COMMENT,acomment,False' % fqdn) .encode('utf8')) self.assertEquals(self.browser.find_element_by_xpath( '//table[@id="csv-import-log"]//td').text, "No Errors") with session.begin(): system = System.query.filter(System.fqdn == fqdn).one() assert_has_key_with_value(system, 'COMMENT', u'acomment') #https://bugzilla.redhat.com/show_bug.cgi?id=1058549 def test_keyvalue_non_existent_system_valid_invalid(self): login(self.browser) fqdn = data_setup.unique_name('system%s.idonot.exist') self.import_csv((u'csv_type,fqdn,key,key_value,deleted\n' u'keyvalue,%s,COMMENT,acomment,False\n' u'keyvalue,%s,COMMENT,acomment,False' % (fqdn, '--'+fqdn)) .encode('utf8')) self.assertEquals(self.browser.find_element_by_xpath( '//table[@id="csv-import-log"]//td').text, "Error importing line 3: " "Invalid FQDN for system: --%s" % fqdn) with session.begin(): system = System.query.filter(System.fqdn == fqdn).one() assert_has_key_with_value(system, 'COMMENT', u'acomment') #https://bugzilla.redhat.com/show_bug.cgi?id=1058549 def test_labinfo_non_existent_system(self): login(self.browser) fqdn = data_setup.unique_name('system%s.idonot.exist') self.import_csv((u'csv_type,fqdn,orig_cost,curr_cost,dimensions,weight,wattage,cooling\n' u'labinfo,%s,10000,10000,3000,4000.0,5001.0,6000.0' % fqdn) .encode('utf8')) with session.begin(): system = System.query.filter(System.fqdn == fqdn).one() self.assertEqual(system.labinfo.orig_cost, Decimal('10000')) self.assertEqual(system.labinfo.curr_cost, Decimal('10000')) self.assertEqual(system.labinfo.dimensions, u'3000') self.assertEqual(system.labinfo.weight, 4000.0) self.assertEqual(system.labinfo.wattage, 5001.0) self.assertEqual(system.labinfo.cooling, 6000.0) #https://bugzilla.redhat.com/show_bug.cgi?id=1058549 def test_power_non_existent_system(self): login(self.browser) fqdn = data_setup.unique_name('system%s.idonot.exist') self.import_csv((u'csv_type,fqdn,power_address,power_user,power_password,power_id,power_type\n' u'power,%s,qemu+tcp://%s,admin,admin,%s,virsh' % ((fqdn, )*3)) .encode('utf8')) with session.begin(): system = System.query.filter(System.fqdn == fqdn).one() self.assertEqual(system.power.power_id, fqdn) self.assertEqual(system.power.power_user, 'admin') self.assertEqual(system.power.power_address, 'qemu+tcp://' + fqdn) #https://bugzilla.redhat.com/show_bug.cgi?id=1058549 def test_excluded_family_non_existent_system(self): login(self.browser) fqdn = data_setup.unique_name('system%s.idonot.exist') with session.begin(): osmajor = OSMajor.lazy_create(osmajor=u'MyEnterpriseLinux') self.import_csv((u'csv_type,fqdn,arch,family,update,excluded\n' u'exclude,%s,x86_64,MyEnterpriseLinux,,True' % fqdn) .encode('utf8')) with session.begin(): system = System.query.filter(System.fqdn == fqdn).one() self.assertEquals(system.excluded_osmajor[0].osmajor_id, osmajor.id) #https://bugzilla.redhat.com/show_bug.cgi?id=1058549 def test_install_options_non_existent_system(self): login(self.browser) fqdn = data_setup.unique_name('system%s.idonot.exist') with session.begin(): distro_tree = data_setup.create_distro_tree(osmajor='MyEnterpriseLinux', arch=u'x86_64') self.import_csv((u'csv_type,fqdn,arch,family,update,ks_meta,kernel_options,kernel_options_post\n' u'install,%s,x86_64,MyEnterpriseLinux,,mode=cmdline,,console=ttyS0' % fqdn) .encode('utf8')) with session.begin(): system = System.query.filter(System.fqdn == fqdn).one() arch = Arch.by_name(u'x86_64') osmajor = OSMajor.by_name(u'MyEnterpriseLinux') p = system.provisions[arch].provision_families[osmajor] self.assertEquals(p.ks_meta, u'mode=cmdline') self.assertEquals(p.kernel_options_post, u'console=ttyS0') # https://bugzilla.redhat.com/show_bug.cgi?id=787519 def test_no_quotes(self): with session.begin(): data_setup.create_labcontroller(fqdn=u'imhoff.bkr') b = self.browser login(b) b.get(get_server_base() + 'csv/csv_import') b.find_element_by_name('csv_file').send_keys( pkg_resources.resource_filename(self.__module__, 'bz787519.csv')) b.find_element_by_name('csv_file').submit() self.failUnless(is_text_present(self.browser, "No Errors")) # https://bugzilla.redhat.com/show_bug.cgi?id=802842 def test_doubled_quotes(self): with session.begin(): system = data_setup.create_system(fqdn=u'mymainframe.funtimes.invalid', arch=u's390x') OSMajor.lazy_create(osmajor=u'RedHatEnterpriseLinux7') b = self.browser login(b) b.get(get_server_base() + 'csv/csv_import') b.find_element_by_name('csv_file').send_keys( pkg_resources.resource_filename(self.__module__, 'bz802842.csv')) b.find_element_by_name('csv_file').submit() self.failUnless(is_text_present(self.browser, "No Errors")) with session.begin(): session.refresh(system) self.assertEquals(system.provisions[Arch.by_name(u's390x')]\ .provision_families[OSMajor.by_name(u'RedHatEnterpriseLinux7')]\ .kernel_options, 'rd.znet="qeth,0.0.8000,0.0.8001,0.0.8002,layer2=1,portname=lol,portno=0" ' 'ip=1.2.3.4::1.2.3.4:255.255.248.0::eth0:none MTU=1500 nameserver=1.2.3.4 ' 'DASD=20A1,21A1,22A1,23A1 MACADDR=02:DE:AD:BE:EF:16 ' '!LAYER2 !DNS !PORTNO !IPADDR !GATEWAY !HOSTNAME !NETMASK ') def test_missing_field(self): login(self.browser) orig_date_modified = self.system.date_modified self.import_csv((u'csv_type,fqdn,location,arch\n' u'system,%s,Under my desk' % self.system.fqdn) .encode('utf8')) self.assert_(is_text_present(self.browser, 'Missing fields on line 2: arch')) def test_extraneous_field(self): login(self.browser) orig_date_modified = self.system.date_modified self.import_csv((u'csv_type,fqdn,location,arch\n' u'system,%s,Under my desk,ppc64,what is this field doing here' % self.system.fqdn) .encode('utf8')) self.assert_(is_text_present(self.browser, 'Too many fields on line 2 (expecting 4)')) # https://bugzilla.redhat.com/show_bug.cgi?id=972411 def test_malformed(self): login(self.browser) self.import_csv('gar\x00bage') self.assertEquals(self.browser.find_element_by_xpath( '//table[@id="csv-import-log"]//td').text, 'Error parsing CSV file: line contains NULL byte') # https://bugzilla.redhat.com/show_bug.cgi?id=1085047 def test_rolls_back_on_error(self): # The bug was that a row contained invalid data, which meant it was # being discarded, but changes to system_status_duration were # nevertheless being committed. # To reproduce, we upload a CSV which changes 'status' successfully # (thereby causing a row to be added to system_status_duration) but # then errors out on 'secret' which does not accept empty string. with session.begin(): self.assertEquals(len(self.system.status_durations), 1) self.assertEquals(self.system.status_durations[0].status, SystemStatus.automated) self.assertEquals(self.system.status_durations[0].finish_time, None) login(self.browser) self.import_csv((u'csv_type,id,status,secret\n' u'system,%s,Manual,\n' % self.system.id).encode('utf8')) import_log = self.browser.find_element_by_xpath( '//table[@id="csv-import-log"]//td').text self.assertIn('Invalid secret None', import_log) with session.begin(): session.expire_all() self.assertEquals(self.system.status, SystemStatus.automated) self.assertEquals(len(self.system.status_durations), 1) self.assertEquals(self.system.status_durations[0].finish_time, None) #https://bugzilla.redhat.com/show_bug.cgi?id=1085238 def test_error_on_empty_csv(self): login(self.browser) self.import_csv((u'csv_type,fqdn,location,arch\n').encode('utf8')) import_log = self.browser.find_element_by_xpath( '//table[@id="csv-import-log"]//td').text self.assertIn('Empty CSV file supplied', import_log) def test_system_unicode(self): login(self.browser) self.import_csv((u'csv_type,fqdn,location,arch\n' u'system,%s,在我的办公桌,ia64' % self.system.fqdn) \ .encode('utf8')) self.failUnless(is_text_present(self.browser, "No Errors")) with session.begin(): session.refresh(self.system) self.assertEquals(self.system.location, u'在我的办公桌') def test_system_pools_import(self): with session.begin(): system = data_setup.create_system() pool1 = data_setup.create_system_pool() pool2 = data_setup.create_system_pool() login(self.browser) self.import_csv((u'csv_type,fqdn,pool,deleted\n' u'system_pool,%s,%s,False\n' u'system_pool,%s,%s,False'%(system.fqdn, pool1.name, system.fqdn, pool2.name)) \ .encode('utf8')) self.failUnless(is_text_present(self.browser, 'No Errors')) with session.begin(): session.refresh(system) self.assertEquals([pool1.name, pool2.name], [pool.name for pool in system.pools]) # test deletion self.import_csv((u'csv_type,fqdn,pool,deleted\n' u'system_pool,%s,%s,True' % (system.fqdn, pool2.name)) \ .encode('utf8')) self.failUnless(is_text_present(self.browser, 'No Errors')) with session.begin(): session.refresh(system) self.assertNotIn(pool2.name, [pool.name for pool in system.pools]) # Attempting to add a system to a Non existent pool should throw an error self.import_csv((u'csv_type,fqdn,pool,deleted\n' u'system_pool,%s,poolpool,True' % system.fqdn) \ .encode('utf8')) self.assertTrue(is_text_present(self.browser, 'poolpool: pool does not exist'))
Here in the beautiful Rogue Valley, everyone wants clean rivers and streams. Our waterways provide habitat for fish and wildlife, recreation, and a source of drinking water. Unfortunately, recent pesticide monitoring efforts (by the Middle Rogue Pesticide Stewardship Partnership) have detected various pesticides in Rogue River tributaries and groundwater supplies. Glyphosate, the active ingredient in RoundUp, was detected in almost half the water samples collected. Pesticides are any substance that is used to combat insects or other organisms that are harmful to cultivated plants or to animals. On average, suburban lawns and gardens receive more pesticides than any other land use type, including agriculture! When used improperly, pesticides can harm humans and pets contaminate drinking water supplies, or negatively affect native plants and wildlife. Salmon are especially sensitive to pesticides. Keep reading to find out more information about how you can reduce your pesticide use. Help us keep pesticides out of our waterways. Take the Pledge to Reduce Chemicals In Our Streams! These people have also agreed to follow the recommendations of the Pesticide Pledge to reduce the amount of pesticides that enter our streams. We all need to work together to ensure the health of our watershed by reducing pesticide use. Rain and wind can spread pesticides, and other chemicals used on your lawns and gardens, into local streams, even if you do not live next to a waterbody. You can help prevent pesticides from reaching our waterways by pledging to reduce the use of pesticides on your yard and property! As a resident in the Rogue River watershed that cares about protecting water quality for people, fish, wildlife, drinking water and pets - here is your chance to take the Pesticides Pledge! There are many surprisingly easy ways (to part with or use pesticides) that still allow you to have a great looking lawn and garden. Here are a few practices that we recommend. Place the right plants in the right place. Site characteristics such as light, water, and soil requirements may reduce the need for pesticides and watering, as well as lessen the chance of plant mortality. Think before you buy. Always identify the source of our problem, then decide what you want to do about it after considering the risks and benefits of the options available. Choose the safest and most appropriate product for your pest problem. Hand treatments or Integrated Pest Management (IPM) may be the safer and more economical choice. Follow the label and use only what you need. The pesticide label is your guide to using pesticides safely and effectively. The directions on the label are there primarily to help you achieve "maximum" benefits - the pest control that you desire - with "minimum" risk. Read the label before you buy the pesticide and every time you use the pesticide. Spray in safe conditions. Air currents and rain storms can carry pesticides off your property onto your neighbor's land, into groundwater acquifers, and local water ways. Keep pesticides where they belong by not spraying in windy conditions and before storms. Store and dispose of pesticides and empty containers properly. Follow storage and disposal instructions on the label. Keep pesticides in the original containers and away from areas prone to flooding. Never empty leftover pesticides into the sink, toilet, or street drain. Municipal systems may not be equipped to remove all pesticide residues. Extra pesticides and empty containers can be recycled at community household hazardous waste collection events. Visit Rogue Disposal's website for more details.
#!/usr/bin/python """ Abstract Base Class for building plots """ ## MIT License ## ## Copyright (c) 2017, krishna bhogaonker ## Permission is hereby granted, free of charge, to any person obtaining a ## copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: ## The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. ## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. __author__ = 'krishna bhogaonker' __copyright__ = 'copyright ' __credits__ = ['krishna bhogaonker'] __license__ = "MIT" __version__ = '' __maintainer__ = 'krishna bhogaonker' __email__ = 'cyclotomiq@gmail.com' __status__ = '' import abc from bokeh.plotting import figure, output_file, show from .PlotSettingsOverall import PlotSettingsOverall height = 800 width = 800 class abcComparisonPlot(metaclass=abc.ABCMeta): def __init__(self, model_results, settings=None): self.plot = None self.settings = settings self.comparison = model_results self.coordinates = {} @abc.abstractmethod def helper_overall_data(self): pass @abc.abstractmethod def helper_level_data(self): pass @abc.abstractmethod def helper_overall_empirical_upper_bound(self): pass @abc.abstractmethod def helper_overall_empirical_lower_bound(self): pass @abc.abstractmethod def helper_ground_truth_mgmt(self): pass @abc.abstractmethod def helper_build_overall_plot_coordinates(self): pass @abc.abstractmethod def execute_plot(self): pass @abc.abstractmethod def helper_build_settings(self): pass def helper_duration(self): xval = list(range(min([m.duration for m in self.comparison])))[self.settings['year_offset']:] return xval def helper_original_data_mgmt(self, field): dval = self.comparison[0].mgmt_data.get_field(field) return dval def helper_indicate_number_of_models(self): self.coordinates['number_of_models'] = len(self.comparison) def helper_year_duration(self): self.coordinates['xval'] = self.helper_duration()
Tag Archive | "galea plea deal" Dr. Galea, who has been linked to Tiger Woods and New York Yankees star Alex Rodriguez was indicted on five counts and according to the New York Daily News, four of those charges were eliminated because he didn’t force authorities to go through the time and expense of trying his case. As part of the deal, Dr. Galea has agreed to forfeit about $300,000 that he accumulated during visits with clients in their homes, offices or hotel rooms in the U.S. During the hearing the doctor’s attorney told the court that most of the $800,000 in fees his client made was used to compensate him for time away from his medical practice in Canada. Galea is scheduled to be sentenced in October and under federal guidelines he could receive up to 24 months in prison. According to lead U.S. Attorney Paul J. Campana, the feds will consider a shorter jail stint based on the extent of Galea’s cooperation throughout this investigation. Back in September 2009, Galea’s employee Mary Ann Catalano was caught carrying HGH and other drugs during a trip into the U.S. She then lied to authorities regarding the purpose of her trip then later admitted she was going to assist Dr. Galea treat a patient. Galea has no license to practice medicine in the United States but is accused of treating more than a dozen athletes here. Since this plea deal doesn’t include admissions to treating patients with HGH for any other banned substances, yesterday’s court news is considered a victory for Woods, A-Rod and others on the doctor’s list. In addition to those two, Dr. Galea’s roster of patients included NFL players Takeo Spikes and Jamal Lewis and MLB players Jose Reyes and Carlos Beltran of the N.Y. Mets.
# -*- coding: utf-8 -*- import network import sys import re import xbmc import xbmcgui import xbmcplugin import urlresolver import CommonFunctions import kodi_func import os import codecs common = CommonFunctions common.plugin = "Filmas-Latviski-1.0.0" mySourceId = 4 mainURL = 'http://www.fof.lv' #indexed search def SearchRaw(searchStr): result = [] if searchStr == False or len(searchStr) == 0: return result moviesList = [] moviesList = LoadIndexedFile( kodi_func.home + "/resources/fof_lv_movieIndexes.txt" ) print moviesList for movie in moviesList: if searchStr in movie['searchable_title']: result.append({ 'title': movie['title'].replace('<img src="http://fof.lv/lat-sub-icon.png" style="position: relative; left: 10px; top: 2px;">', '').encode('utf-8'), 'url': movie['url'], 'thumb': movie['thumb'], 'state': 'state_play', 'source_id': mySourceId }) return result #Search in fof where it uses their shitty search function, which doesn't fucking work at all def SearchRaw_old(searchStr): result = [] html = network.getHTML( "http://www.fof.lv/search/?q=" + searchStr) allEntries = common.parseDOM(html, "div", attrs = { "id": "allEntries" }) if len(allEntries) == 0: allEntries = common.parseDOM(html, "table", attrs = { "id": "entry_table" }) # print allEntries infoTd = common.parseDOM(allEntries, "td", attrs = { "class": "info" }) moviesURLs = common.parseDOM(infoTd, "a", ret = "href") moviesThumbnailURLsList = common.parseDOM(allEntries, "td", ret = "style") if len(moviesThumbnailURLsList) == 0: moviesThumbnailURLsList = common.parseDOM(allEntries, "img", attrs = { "width": "80", "height": "100" }, ret = "src") moviesTitleList = common.parseDOM(infoTd, "button", attrs = {"class": "entry_button"} ) # moviesYearList = common.parseDOM(infoTd, "div", attrs = {"style": "width: 100px; height: 18px; background: url(http://www.fom.ucoz.lv/jauns_img/entry_year.png) no-repeat; margin: 0px auto; padding-top: 2px;"} ) print allEntries, infoTd, moviesURLs, moviesThumbnailURLsList, moviesTitleList # moviesTitleList = common.parseDOM(moviesList, "h2") # moviesThumbnailURLsList = common.parseDOM(moviesList, "img", attrs = { "class": "img-thumbnail" }, ret = "src") # moviesURLs = common.parseDOM(moviesList, "a", ret = "href") # print moviesThumbnailURLsList for i in range(0, len(moviesURLs)): thumb = moviesThumbnailURLsList[i].replace("); width: 80px; height: 100px;", "").replace("background:url(", "").replace("/s","/") if network.exists( mainURL+thumb ) == False: thumb = thumb.replace(".jpg", ".png") # title = re.sub(r'<br>[\w <>="-:\d;#&\\\\]*', '', moviesTitleList[i]) title = moviesTitleList[i].partition("<br>")[0].replace("<b>","").replace("</b>", "") if not moviesURLs[i].startswith("http://"): movieURL = mainURL + moviesURLs[i] else: movieURL = moviesURLs[i] result.append({ 'title':title.encode('utf-8'), 'url': movieURL, 'thumb': mainURL+thumb, 'source_id': mySourceId }) return result def Search(searchStr = None): if searchStr == None: text = kodi_func.showkeyboard('', u'Meklēt filmu') else: text = searchStr print "Search string: " + str(text) results = SearchRaw(text) for r in results: kodi_func.addDir(r['title'], r['url'], 'state_play', r['thumb'], source_id=r['source_id']) def HomeNavigation(): if not os.path.isfile( kodi_func.home + "/resources/fof_lv_movieIndexes.txt" ): IndexMovies( 'http://www.fof.lv/?page', 'fof_lv_movieIndexes.txt' ) print "Opening fof.lv" url = mainURL html = network.getHTML(url) # print 'html: ' + html nav_links_list = common.parseDOM(html, "div", attrs = { "class": "categories" }) nav_links = common.parseDOM(nav_links_list, "a", ret = "href") nav_links_name = common.parseDOM(nav_links_list, "a") kodi_func.addDir('Meklēt', '', 'state_search', '%s/meklet2.png'% kodi_func.iconpath, source_id=mySourceId) kodi_func.addDir('Jaunākās Filmas', 'http://www.fof.lv/?page1', 'state_movies', kodi_func.GetCategoryImage('jaunakas'), source_id=mySourceId) kodi_func.addDir('Populārākās', 'http://www.fof.lv/index/popularakas_filmas/0-13', 'state_movies', kodi_func.GetCategoryImage('skatitakas'), source_id=mySourceId) kodi_func.addDir('Vērtētākās', 'http://www.fof.lv/index/vertetakas_filmas/0-16', 'state_movies', kodi_func.GetCategoryImage('vertetakas'), source_id=mySourceId) # pagirasList = u'https://openload.co/embed/dLuET3ML86E/Deadpool.%28Dedpuls%29.2016.720p.LAT.THEVIDEO.LV.mkv.mp4' # link = urlresolver.resolve(pagirasList) # addDir('Dedpūls', pagirasList, 'state_play', None) # addLink("Dedpūls", link.encode('utf-8'), None) # print nav_links # print nav_links_name for i in range(0, len(nav_links)): if kodi_func.isLinkUseful(nav_links[i]): # print mainURL + nav_links[i] kodi_func.addDir(nav_links_name[i].encode('utf-8'), nav_links[i], 'state_movies', kodi_func.GetCategoryImage(nav_links_name[i]), source_id=mySourceId) def Movies(url, page=1): print "url: " + url if '?page1' in url: html = network.getHTML(mainURL+"/?page"+str(page)) else: html = network.getHTML(url+"-"+str(page)) # html = network.getHTML(url) # print "html " + html allEntries = common.parseDOM(html, "div", attrs = { "id": "allEntries" }) if len(allEntries) == 0: allEntries = common.parseDOM(html, "table", attrs = { "id": "entry_table" }) # print allEntries infoTd = common.parseDOM(allEntries, "td", attrs = { "class": "info" }) moviesURLs = common.parseDOM(infoTd, "a", ret = "href") moviesThumbnailURLsList = common.parseDOM(allEntries, "td", ret = "style") if len(moviesThumbnailURLsList) == 0: moviesThumbnailURLsList = common.parseDOM(allEntries, "img", attrs = { "width": "80", "height": "100" }, ret = "src") moviesTitleList = common.parseDOM(infoTd, "button", attrs = {"class": "entry_button"} ) # moviesYearList = common.parseDOM(infoTd, "div", attrs = {"style": "width: 100px; height: 18px; background: url(http://www.fom.ucoz.lv/jauns_img/entry_year.png) no-repeat; margin: 0px auto; padding-top: 2px;"} ) print allEntries, infoTd, moviesURLs, moviesThumbnailURLsList, moviesTitleList # moviesTitleList = common.parseDOM(moviesList, "h2") # moviesThumbnailURLsList = common.parseDOM(moviesList, "img", attrs = { "class": "img-thumbnail" }, ret = "src") # moviesURLs = common.parseDOM(moviesList, "a", ret = "href") # print moviesThumbnailURLsList for i in range(0, len(moviesURLs)): thumb = moviesThumbnailURLsList[i].replace("); width: 80px; height: 100px;", "").replace("background:url(", "").replace("/s","/") if network.exists( mainURL+thumb ) == False: thumb = thumb.replace(".jpg", ".png") # title = re.sub(r'<br>[\w <>="-:\d;#&\\\\]*', '', moviesTitleList[i]) title = moviesTitleList[i].partition("<br>")[0].replace('<img src="http://fof.lv/lat-sub-icon.png" style="position: relative; left: 10px; top: 2px;">', '') if not moviesURLs[i].startswith("http://"): movieURL = mainURL + moviesURLs[i] else: movieURL = moviesURLs[i] kodi_func.addDir(title.encode('utf-8'), movieURL, 'state_play', mainURL+thumb, source_id=mySourceId) if len(moviesURLs) >= 10 and url != 'http://www.fof.lv/index/popularakas_filmas/0-13' and url != 'http://www.fof.lv/index/vertetakas_filmas/0-16': kodi_func.addDir("Nākamā Lapa >>", url , 'state_movies', '%s/next.png'% kodi_func.iconpath, str(int(page) + 1), source_id=mySourceId) def PlayMovie(url, title, picture): print "url: " + url html = network.getHTML(url) # print "html: " + html mainMovieCol = common.parseDOM(html, "div", attrs = { "id": "movie"} ) print mainMovieCol video = common.parseDOM(mainMovieCol, "iframe", ret="src")[0] try: link = urlresolver.resolve(video) if link != False: kodi_func.addLink(title.decode('utf-8').encode('utf-8') + " - Latviski", link.encode('utf-8'), picture) elif kodi_func.isVideoFormat(video.split(".")[-1]): kodi_func.addLink(title.decode('utf-8').encode('utf-8') + " - Latviski", video, picture) print link except: xbmcgui.Dialog().ok("Opā!", "Nevarēju dekodēt strīmu", "Iespējams ka fails vairs neeksistē", "Tāda dzīve, mēģini citi avotu") # This website doesn't have a proper search function, so we must first index it # These function are unique to this source def LoadIndexedFile(file): f = codecs.open(file, "r", "utf-8") content = f.read() movies = content.split("\n") result = [] for movie in movies: params = movie.split("|") if len(params) == 3: result.append({ 'title': params[0], 'url': params[1].decode('utf-8'), 'thumb': params[2].decode('utf-8'), 'searchable_title': kodi_func.MakeSearchableString(params[0]) }) else: print "Something wrong with this movie:", movie return result def IndexMovies( baseUrl, fileName ): progress_dialog = xbmcgui.DialogProgress() progress_dialog.create("Indeksējam fof.lv") currentPage = 1 url = baseUrl + str(currentPage) html = network.getHTML(url) allEntries = common.parseDOM(html, "div", attrs = { "id": "allEntries" }) if len(allEntries) == 0: allEntries = common.parseDOM(html, "table", attrs = { "id": "entry_table" }) # print allEntries infoTd = common.parseDOM(allEntries, "td", attrs = { "class": "info" }) moviesURLs = common.parseDOM(infoTd, "a", ret = "href") moviesThumbnailURLsList = common.parseDOM(allEntries, "td", ret = "style") if len(moviesThumbnailURLsList) == 0: moviesThumbnailURLsList = common.parseDOM(allEntries, "img", attrs = { "width": "80", "height": "100" }, ret = "src") moviesTitleList = common.parseDOM(infoTd, "button", attrs = {"class": "entry_button"} ) # moviesYearList = common.parseDOM(infoTd, "div", attrs = {"style": "width: 100px; height: 18px; background: url(http://www.fom.ucoz.lv/jauns_img/entry_year.png) no-repeat; margin: 0px auto; padding-top: 2px;"} ) print allEntries, infoTd, moviesURLs, moviesThumbnailURLsList, moviesTitleList indexed = 0 # movieEntriesList = common.parseDOM( html, "ul", attrs = { "id": "uEntriesList" }) # screenList = common.parseDOM( movieEntriesList, "div", attrs = {"class": "ve-screen"}) # movieUrls = common.parseDOM(screenList, "a", ret = "href") # print movieUrls, len(movieUrls) movieURLIndex = 0 localFile = kodi_func.home + "/resources/"+fileName # xbmc.translatePath('special://temp/'+fileName ) temp = codecs.open( localFile, 'w', "utf-8") movieIndexes = [] movieEntries = 370 for indexed in range(0, int(movieEntries)): if movieURLIndex == len(moviesURLs): break progress = int(float((float(indexed)/int(movieEntries))*100)) # print "Progress: " + str(progress) progress_dialog.update( progress , "Lūdzu uzgaidi...", "Indeksējam fof.lv Filmas ", "Atlicis: " + str(int(movieEntries) - indexed) ) if (progress_dialog.iscanceled()): return thumb = moviesThumbnailURLsList[movieURLIndex].replace("); width: 80px; height: 100px;", "").replace("background:url(", "").replace("/s","/") print "thumb: " + thumb if network.exists( mainURL+thumb ) == False: thumb = thumb.replace(".jpg", ".png") # title = re.sub(r'<br>[\w <>="-:\d;#&\\\\]*', '', moviesTitleList[i]) title = moviesTitleList[movieURLIndex].partition("<br>")[0] if not moviesURLs[movieURLIndex].startswith("http://"): movieURL = mainURL + moviesURLs[movieURLIndex] else: movieURL = moviesURLs[i] print title.encode('utf-8') temp.write( title +"|" +movieURL +"|" +mainURL+thumb +"\n" ) movieIndexes.append( {'title': title, 'url': movieURL, 'thumb': mainURL+thumb} ) movieURLIndex += 1 if len(moviesURLs) == movieURLIndex: currentPage += 1 html = network.getHTML(baseUrl+str(currentPage)) allEntries = common.parseDOM(html, "div", attrs = { "id": "allEntries" }) if len(allEntries) == 0: allEntries = common.parseDOM(html, "table", attrs = { "id": "entry_table" }) # print allEntries infoTd = common.parseDOM(allEntries, "td", attrs = { "class": "info" }) moviesURLs = common.parseDOM(infoTd, "a", ret = "href") moviesThumbnailURLsList = common.parseDOM(allEntries, "td", ret = "style") if len(moviesThumbnailURLsList) == 0: moviesThumbnailURLsList = common.parseDOM(allEntries, "img", attrs = { "width": "80", "height": "100" }, ret = "src") moviesTitleList = common.parseDOM(infoTd, "button", attrs = {"class": "entry_button"} ) # print movieUrls, len(movieUrls) movieURLIndex = 0 temp.close() return movieIndexes
Nordic Institute for Studies in Innovation, Research and Education (NIFU). The main conclusion of the report is that we find relatively stable differences between the Nordic universities, university colleges and university hospitals. We find different research profiles and specialisations, and we find institutions with different volumes of research activity. By describing these differences, we want to bring attention to dimensions that one-dimensional global university rankings cannot capture. Some Nordic higher education institutions score on a very high international level with regard to citation impact and shares of highly cited papers, at least in some of the major areas of research, while most Nordic institutions do not, although the majority of them perform above the world average. “World class” research is being conducted at a few Nordic institutions, but not at most of them.
# Volatility # Copyright (C) 2007-2013 Volatility Foundation # Copyright 2013 Google Inc. All Rights Reserved. # # Authors: # Michael Hale Ligh <michael.ligh@mnin.org> # Michael Cohen <scudette@google.com> # # Contributors/References: # ## Based on sslkeyfinder: http://www.trapkit.de/research/sslkeyfinder/ # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or (at # your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA import os try: from M2Crypto import X509, RSA except ImportError: X509 = RSA = None from rekall import plugin from rekall import scan from rekall import testlib from rekall import utils from rekall.plugins import core from rekall.plugins.windows import common from rekall.plugins.windows import vadinfo from rekall.plugins.overlays import basic class CertScanner(scan.BaseScanner): """A scanner for certificate ASN.1 objects. Yara rules for the two ASN.1 encoded objects we are looking for: 'x509' : 'rule x509 { strings: $a = {30 82 ?? ?? 30 82 ?? ??} condition: $a }', 'pkcs' : 'rule pkcs { strings: $a = {30 82 ?? ?? 02 01 00} condition: $a }', These rules are very simple, and so we don't really use Yara for this - its faster to just scan directly. """ checks = [ ('StringCheck', dict(needle="\x30\x82")) ] def scan(self, offset=0, maxlen=None): for hit in super(CertScanner, self).scan(offset=offset, maxlen=maxlen): signature = self.address_space.read(hit + 4, 3) size = self.profile.Object( "unsigned be short", offset=hit+2, vm=self.address_space) description = None if signature.startswith("\x30\x82"): data = self.address_space.read(hit, size + 4) if X509: try: cert = X509.load_cert_der_string(data) description = utils.SmartStr(cert.get_subject()) except X509.X509Error: pass yield hit, "X509", data, description elif signature.startswith("\x02\x01\x00"): data = self.address_space.read(hit, size + 4) if RSA: try: pem = ("-----BEGIN RSA PRIVATE KEY-----\n" + data.encode("base64") + "-----END RSA PRIVATE KEY-----") key = RSA.load_key_string(pem) description = "Verified: %s" % key.check_key() except Exception: pass yield hit, "RSA", data, description class CertScan(core.DirectoryDumperMixin, plugin.PhysicalASMixin, plugin.Command): """Dump RSA private and public SSL keys from the physical address space.""" __name = "certscan" # We can just display the certs instead of dumping them. dump_dir_optional = True default_dump_dir = None def render(self, renderer): headers = [("Address", "address", "[addrpad]"), ("Type", "type", "10"), ("Length", "length", "10")] if self.dump_dir: headers.append(("Filename", "filename", "20")) headers.append(("Description", "description", "")) renderer.table_header(headers) scanner = CertScanner( address_space=self.physical_address_space, session=self.session, profile=basic.Profile32Bits(session=self.session)) for hit, type, data, description in scanner.scan(): args = [hit, type, len(data)] if self.dump_dir: filename = "%s.%08X.der" % (type, hit) with renderer.open(directory=self.dump_dir, filename=filename, mode="wb") as fd: fd.write(data) args.append(filename) args.append(description) renderer.table_row(*args) class TestCertScan(testlib.HashChecker): PARAMETERS = dict( commandline="certscan -D %(tempdir)s", ) class VadCertScanner(CertScanner, vadinfo.VadScanner): """Scanner for certs in vads.""" class CertVadScan(core.DirectoryDumperMixin, common.WinProcessFilter): """Scan certificates in process Vads.""" __name = "cert_vad_scan" # We can just display the certs instead of dumping them. dump_dir_optional = True default_dump_dir = None def render(self, renderer): headers = [ ("Pid", "pid", "5"), ("Command", "command", "10"), ("Address", "address", "[addrpad]"), ("Type", "type", "5"), ("Length", "length", "5")] if self.dump_dir: headers.append(("Filename", "filename", "20")) headers.append(("Description", "description", "")) renderer.table_header(headers) for task in self.filter_processes(): scanner = VadCertScanner(task=task) for hit, type, data, description in scanner.scan(): args = [task.UniqueProcessId, task.ImageFileName, hit, type, len(data)] if self.dump_dir: filename = "%s.%s.%08X.der" % ( task.UniqueProcessId, type, hit) with renderer.open(directory=self.dump_dir, filename=filename, mode="wb") as fd: fd.write(data) args.append(filename) args.append(description) renderer.table_row(*args) class TestCertVadScan(testlib.HashChecker): PARAMETERS = dict( commandline="cert_vad_scan --proc_regex %(regex)s -D %(tempdir)s ", regex="csrss.exe" )
www.indiantradecenter.com - Indiantradecenter one of the best packaging machines suppliers companies in India. Find details on packaging machines suppliers, manufactures, importer and exporter. www.indiantradecenter.com - Indiantradecenter is leading of telecommunication products and equipment suppliers. Find here telecommunication equipment manufacturers, supplier and exporter in India. www.indiantradecenter.com - Indiantradecenter is a leading food and beverages product wholesalers in India. Find info on food and beverages product, exporters and distributors Company in India. www.indiantradecenter.com - Indiantradecenter is a leading list of indian transportation companies in India. Find info on indian transportation products, exporters and distributors Company in India. Indiantradecenter is a leading ayurvedic products manufactures in India. Find info on ayurvedic products ,exporters and distributors Company in India. www.indiantradecenter.com - Indiantradecenter is provide the leading hr consultants india . Explore placement training service providers & top job recruitment consultants Companies . www.indiantradecenter.com - Indiantradecenter is provide the best electrical products manufacturers supplies in India. Find detail here electronics and electrical products manufacturer suppliers, distributors, wholesalers.
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Discussion', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('title', models.CharField(max_length=200, verbose_name='title')), ('added_date', models.DateTimeField(auto_now_add=True, verbose_name='date added to the library')), ('updated_date', models.DateTimeField(auto_now=True, verbose_name='date updated to the database')), ('status', models.CharField(default=b'OP', max_length=200, verbose_name='status of the topic', choices=[(b'OP', b'OPEN'), (b'CL', b'CLOSED'), (b'DE', b'DELETED'), (b'AR', b'ARCHIVED')])), ('author', models.ForeignKey(verbose_name='author', blank=True, to=settings.AUTH_USER_MODEL, null=True)), ], ), ]
I’m very pleased to announce my role as Grand Theatre Presenter & Curator at Grand Designs Live 2019. The award-winning show, taking place at London’s ExCeL from 4-12 May and from 9-13 October at Birmingham’s NEC, offers visitors top expert advice, new product launches and specialist exhibitors in six project zones. I’m no stranger to Grand Designs Live, having presented many sessions with Kevin McCloud in previous years, and as an active journalist and Q&A expert for Grand Designs Magazine. This year will be my first as Presenter & Curator for the Grand Theatre, and is something I am immensely excited about. I have always been a fan of the live show and feel that Grand Designs (the TV show) inspires people to dream of self-build and our job at the live show is to help them realise those dreams. It’s an honour to be the Grand Theatre Presenter & Curator and to work with Kevin McCloud and the team to curate the content for the show.
from .base import Resource class Servers(Resource): """ An interface for interacting with the NewRelic server API. """ def list(self, filter_name=None, filter_ids=None, filter_labels=None, page=None): """ This API endpoint returns a paginated list of the Servers associated with your New Relic account. Servers can be filtered by their name or by a list of server IDs. :type filter_name: str :param filter_name: Filter by server name :type filter_ids: list of ints :param filter_ids: Filter by server ids :type filter_labels: dict of label type: value pairs :param filter_labels: Filter by server labels :type page: int :param page: Pagination index :rtype: dict :return: The JSON response of the API, with an additional 'pages' key if there are paginated results :: { "servers": [ { "id": "integer", "account_id": "integer", "name": "string", "host": "string", "reporting": "boolean", "last_reported_at": "time", "summary": { "cpu": "float", "cpu_stolen": "float", "disk_io": "float", "memory": "float", "memory_used": "integer", "memory_total": "integer", "fullest_disk": "float", "fullest_disk_free": "integer" } } ], "pages": { "last": { "url": "https://api.newrelic.com/v2/servers.json?page=2", "rel": "last" }, "next": { "url": "https://api.newrelic.com/v2/servers.json?page=2", "rel": "next" } } } """ label_param = '' if filter_labels: label_param = ';'.join(['{}:{}'.format(label, value) for label, value in filter_labels.items()]) filters = [ 'filter[name]={0}'.format(filter_name) if filter_name else None, 'filter[ids]={0}'.format(','.join([str(app_id) for app_id in filter_ids])) if filter_ids else None, 'filter[labels]={0}'.format(label_param) if filter_labels else None, 'page={0}'.format(page) if page else None ] return self._get( url='{0}servers.json'.format(self.URL), headers=self.headers, params=self.build_param_string(filters) ) def show(self, id): """ This API endpoint returns a single Server, identified its ID. :type id: int :param id: Server ID :rtype: dict :return: The JSON response of the API :: { "server": { "id": "integer", "account_id": "integer", "name": "string", "host": "string", "reporting": "boolean", "last_reported_at": "time", "summary": { "cpu": "float", "cpu_stolen": "float", "disk_io": "float", "memory": "float", "memory_used": "integer", "memory_total": "integer", "fullest_disk": "float", "fullest_disk_free": "integer" } } } """ return self._get( url='{0}servers/{1}.json'.format(self.URL, id), headers=self.headers, ) def update(self, id, name=None): """ Updates any of the optional parameters of the server :type id: int :param id: Server ID :type name: str :param name: The name of the server :rtype: dict :return: The JSON response of the API :: { "server": { "id": "integer", "account_id": "integer", "name": "string", "host": "string", "reporting": "boolean", "last_reported_at": "time", "summary": { "cpu": "float", "cpu_stolen": "float", "disk_io": "float", "memory": "float", "memory_used": "integer", "memory_total": "integer", "fullest_disk": "float", "fullest_disk_free": "integer" } } } """ nr_data = self.show(id)['server'] data = { 'server': { 'name': name or nr_data['name'], } } return self._put( url='{0}servers/{1}.json'.format(self.URL, id), headers=self.headers, data=data ) def delete(self, id): """ This API endpoint deletes an server and all of its reported data. WARNING: Only servers that have stopped reporting can be deleted. This is an irreversible process which will delete all reported data for this server. :type id: int :param id: Server ID :rtype: dict :return: The JSON response of the API :: { "server": { "id": "integer", "account_id": "integer", "name": "string", "host": "string", "reporting": "boolean", "last_reported_at": "time", "summary": { "cpu": "float", "cpu_stolen": "float", "disk_io": "float", "memory": "float", "memory_used": "integer", "memory_total": "integer", "fullest_disk": "float", "fullest_disk_free": "integer" } } } """ return self._delete( url='{0}servers/{1}.json'.format( self.URL, id), headers=self.headers, ) def metric_names(self, id, name=None, page=None): """ Return a list of known metrics and their value names for the given resource. :type id: int :param id: Server ID :type name: str :param name: Filter metrics by name :type page: int :param page: Pagination index :rtype: dict :return: The JSON response of the API, with an additional 'pages' key if there are paginated results :: { "metrics": [ { "name": "string", "values": [ "string" ] } ], "pages": { "last": { "url": "https://api.newrelic.com/v2/servers/{server_id}/metrics.json?page=2", "rel": "last" }, "next": { "url": "https://api.newrelic.com/v2/servers/{server_id}/metrics.json?page=2", "rel": "next" } } } """ params = [ 'name={0}'.format(name) if name else None, 'page={0}'.format(page) if page else None ] return self._get( url='{0}servers/{1}/metrics.json'.format(self.URL, id), headers=self.headers, params=self.build_param_string(params) ) def metric_data( self, id, names, values=None, from_dt=None, to_dt=None, summarize=False): """ This API endpoint returns a list of values for each of the requested metrics. The list of available metrics can be returned using the Metric Name API endpoint. Metric data can be filtered by a number of parameters, including multiple names and values, and by time range. Metric names and values will be matched intelligently in the background. You can also retrieve a summarized data point across the entire time range selected by using the summarize parameter. **Note** All times sent and received are formatted in UTC. The default time range is the last 30 minutes. :type id: int :param id: Server ID :type names: list of str :param names: Retrieve specific metrics by name :type values: list of str :param values: Retrieve specific metric values :type from_dt: datetime :param from_dt: Retrieve metrics after this time :type to_dt: datetime :param to_dt: Retrieve metrics before this time :type summarize: bool :param summarize: Summarize the data :rtype: dict :return: The JSON response of the API :: { "metric_data": { "from": "time", "to": "time", "metrics": [ { "name": "string", "timeslices": [ { "from": "time", "to": "time", "values": "hash" } ] } ] } } """ params = [ 'from={0}'.format(from_dt) if from_dt else None, 'to={0}'.format(to_dt) if to_dt else None, 'summarize=true' if summarize else None ] params += ['names[]={0}'.format(name) for name in names] if values: params += ['values[]={0}'.format(value) for value in values] return self._get( url='{0}servers/{1}/metrics/data.json'.format(self.URL, id), headers=self.headers, params=self.build_param_string(params) )
and mortgage rate info for Stanley, NM real estate companies. If we haven't found an Stanley, NM company's page yet, their phone number may be included. Help us find links to companies with listings in New Mexico and you may win $300!
# -*- coding: windows-1252 -*- ''' Record Order in BIFF8 Workbook Globals Substream BOF Type = workbook globals Interface Header MMS Interface End WRITEACCESS CODEPAGE DSF TABID FNGROUPCOUNT Workbook Protection Block WINDOWPROTECT PROTECT PASSWORD PROT4REV PROT4REVPASS BACKUP HIDEOBJ WINDOW1 DATEMODE PRECISION REFRESHALL BOOKBOOL FONT + FORMAT * XF + STYLE + ? PALETTE USESELFS BOUNDSHEET + COUNTRY ? Link Table SST ExtSST EOF ''' import BIFFRecords import Style class Workbook(object): ################################################################# ## Constructor ################################################################# def __init__(self, encoding='ascii', style_compression=0): self.encoding = encoding self.__owner = 'None' self.__country_code = None # 0x07 is Russia :-) self.__wnd_protect = 0 self.__obj_protect = 0 self.__protect = 0 self.__backup_on_save = 0 # for WINDOW1 record self.__hpos_twips = 0x01E0 self.__vpos_twips = 0x005A self.__width_twips = 0x3FCF self.__height_twips = 0x2A4E self.__active_sheet = 0 self.__first_tab_index = 0 self.__selected_tabs = 0x01 self.__tab_width_twips = 0x0258 self.__wnd_hidden = 0 self.__wnd_mini = 0 self.__hscroll_visible = 1 self.__vscroll_visible = 1 self.__tabs_visible = 1 self.__styles = Style.StyleCollection(style_compression) self.__dates_1904 = 0 self.__use_cell_values = 1 self.__sst = BIFFRecords.SharedStringTable(self.encoding) self.__worksheets = [] self.__worksheet_idx_from_name = {} self.__sheet_refs = {} self._supbook_xref = {} self._xcall_xref = {} self._ownbook_supbookx = None self._ownbook_supbook_ref = None self._xcall_supbookx = None self._xcall_supbook_ref = None ################################################################# ## Properties, "getters", "setters" ################################################################# def get_style_stats(self): return self.__styles.stats[:] def set_owner(self, value): self.__owner = value def get_owner(self): return self.__owner owner = property(get_owner, set_owner) ################################################################# def set_country_code(self, value): self.__country_code = value def get_country_code(self): return self.__country_code country_code = property(get_country_code, set_country_code) ################################################################# def set_wnd_protect(self, value): self.__wnd_protect = int(value) def get_wnd_protect(self): return bool(self.__wnd_protect) wnd_protect = property(get_wnd_protect, set_wnd_protect) ################################################################# def set_obj_protect(self, value): self.__obj_protect = int(value) def get_obj_protect(self): return bool(self.__obj_protect) obj_protect = property(get_obj_protect, set_obj_protect) ################################################################# def set_protect(self, value): self.__protect = int(value) def get_protect(self): return bool(self.__protect) protect = property(get_protect, set_protect) ################################################################# def set_backup_on_save(self, value): self.__backup_on_save = int(value) def get_backup_on_save(self): return bool(self.__backup_on_save) backup_on_save = property(get_backup_on_save, set_backup_on_save) ################################################################# def set_hpos(self, value): self.__hpos_twips = value & 0xFFFF def get_hpos(self): return self.__hpos_twips hpos = property(get_hpos, set_hpos) ################################################################# def set_vpos(self, value): self.__vpos_twips = value & 0xFFFF def get_vpos(self): return self.__vpos_twips vpos = property(get_vpos, set_vpos) ################################################################# def set_width(self, value): self.__width_twips = value & 0xFFFF def get_width(self): return self.__width_twips width = property(get_width, set_width) ################################################################# def set_height(self, value): self.__height_twips = value & 0xFFFF def get_height(self): return self.__height_twips height = property(get_height, set_height) ################################################################# def set_active_sheet(self, value): self.__active_sheet = value & 0xFFFF self.__first_tab_index = self.__active_sheet def get_active_sheet(self): return self.__active_sheet active_sheet = property(get_active_sheet, set_active_sheet) ################################################################# def set_tab_width(self, value): self.__tab_width_twips = value & 0xFFFF def get_tab_width(self): return self.__tab_width_twips tab_width = property(get_tab_width, set_tab_width) ################################################################# def set_wnd_visible(self, value): self.__wnd_hidden = int(not value) def get_wnd_visible(self): return not bool(self.__wnd_hidden) wnd_visible = property(get_wnd_visible, set_wnd_visible) ################################################################# def set_wnd_mini(self, value): self.__wnd_mini = int(value) def get_wnd_mini(self): return bool(self.__wnd_mini) wnd_mini = property(get_wnd_mini, set_wnd_mini) ################################################################# def set_hscroll_visible(self, value): self.__hscroll_visible = int(value) def get_hscroll_visible(self): return bool(self.__hscroll_visible) hscroll_visible = property(get_hscroll_visible, set_hscroll_visible) ################################################################# def set_vscroll_visible(self, value): self.__vscroll_visible = int(value) def get_vscroll_visible(self): return bool(self.__vscroll_visible) vscroll_visible = property(get_vscroll_visible, set_vscroll_visible) ################################################################# def set_tabs_visible(self, value): self.__tabs_visible = int(value) def get_tabs_visible(self): return bool(self.__tabs_visible) tabs_visible = property(get_tabs_visible, set_tabs_visible) ################################################################# def set_dates_1904(self, value): self.__dates_1904 = int(value) def get_dates_1904(self): return bool(self.__dates_1904) dates_1904 = property(get_dates_1904, set_dates_1904) ################################################################# def set_use_cell_values(self, value): self.__use_cell_values = int(value) def get_use_cell_values(self): return bool(self.__use_cell_values) use_cell_values = property(get_use_cell_values, set_use_cell_values) ################################################################# def get_default_style(self): return self.__styles.default_style default_style = property(get_default_style) ################################################################## ## Methods ################################################################## def add_style(self, style): return self.__styles.add(style) def add_str(self, s): return self.__sst.add_str(s) def del_str(self, sst_idx): self.__sst.del_str(sst_idx) def str_index(self, s): return self.__sst.str_index(s) def add_sheet(self, sheetname, cell_overwrite_ok=False): import Worksheet, Utils if not isinstance(sheetname, unicode): sheetname = sheetname.decode(self.encoding) if not Utils.valid_sheet_name(sheetname): raise Exception("invalid worksheet name %r" % sheetname) lower_name = sheetname.lower() if lower_name in self.__worksheet_idx_from_name: raise Exception("duplicate worksheet name %r" % sheetname) self.__worksheet_idx_from_name[lower_name] = len(self.__worksheets) self.__worksheets.append(Worksheet.Worksheet(sheetname, self, cell_overwrite_ok)) return self.__worksheets[-1] def get_sheet(self, sheetnum): return self.__worksheets[sheetnum] def raise_bad_sheetname(self, sheetname): raise Exception("Formula: unknown sheet name %s" % sheetname) def convert_sheetindex(self, strg_ref, n_sheets): idx = int(strg_ref) if 0 <= idx < n_sheets: return idx msg = "Formula: sheet index (%s) >= number of sheets (%d)" % (strg_ref, n_sheets) raise Exception(msg) def _get_supbook_index(self, tag): if tag in self._supbook_xref: return self._supbook_xref[tag] self._supbook_xref[tag] = idx = len(self._supbook_xref) return idx def setup_ownbook(self): self._ownbook_supbookx = self._get_supbook_index(('ownbook', 0)) self._ownbook_supbook_ref = None reference = (self._ownbook_supbookx, 0xFFFE, 0xFFFE) if reference in self.__sheet_refs: raise Exception("can't happen") self.__sheet_refs[reference] = self._ownbook_supbook_ref = len(self.__sheet_refs) def setup_xcall(self): self._xcall_supbookx = self._get_supbook_index(('xcall', 0)) self._xcall_supbook_ref = None reference = (self._xcall_supbookx, 0xFFFE, 0xFFFE) if reference in self.__sheet_refs: raise Exception("can't happen") self.__sheet_refs[reference] = self._xcall_supbook_ref = len(self.__sheet_refs) def add_sheet_reference(self, formula): patches = [] n_sheets = len(self.__worksheets) sheet_refs, xcall_refs = formula.get_references() for ref0, ref1, offset in sheet_refs: if not ref0.isdigit(): try: ref0n = self.__worksheet_idx_from_name[ref0.lower()] except KeyError: self.raise_bad_sheetname(ref0) else: ref0n = self.convert_sheetindex(ref0, n_sheets) if ref1 == ref0: ref1n = ref0n elif not ref1.isdigit(): try: ref1n = self.__worksheet_idx_from_name[ref1.lower()] except KeyError: self.raise_bad_sheetname(ref1) else: ref1n = self.convert_sheetindex(ref1, n_sheets) if ref1n < ref0n: msg = "Formula: sheets out of order; %r:%r -> (%d, %d)" \ % (ref0, ref1, ref0n, ref1n) raise Exception(msg) if self._ownbook_supbookx is None: self.setup_ownbook() reference = (self._ownbook_supbookx, ref0n, ref1n) if reference in self.__sheet_refs: patches.append((offset, self.__sheet_refs[reference])) else: nrefs = len(self.__sheet_refs) if nrefs > 65535: raise Exception('More than 65536 inter-sheet references') self.__sheet_refs[reference] = nrefs patches.append((offset, nrefs)) for funcname, offset in xcall_refs: if self._ownbook_supbookx is None: self.setup_ownbook() if self._xcall_supbookx is None: self.setup_xcall() # print funcname, self._supbook_xref patches.append((offset, self._xcall_supbook_ref)) if not isinstance(funcname, unicode): funcname = funcname.decode(self.encoding) if funcname in self._xcall_xref: idx = self._xcall_xref[funcname] else: self._xcall_xref[funcname] = idx = len(self._xcall_xref) patches.append((offset + 2, idx + 1)) formula.patch_references(patches) ################################################################## ## BIFF records generation ################################################################## def __bof_rec(self): return BIFFRecords.Biff8BOFRecord(BIFFRecords.Biff8BOFRecord.BOOK_GLOBAL).get() def __eof_rec(self): return BIFFRecords.EOFRecord().get() def __intf_hdr_rec(self): return BIFFRecords.InteraceHdrRecord().get() def __intf_end_rec(self): return BIFFRecords.InteraceEndRecord().get() def __intf_mms_rec(self): return BIFFRecords.MMSRecord().get() def __write_access_rec(self): return BIFFRecords.WriteAccessRecord(self.__owner).get() def __wnd_protect_rec(self): return BIFFRecords.WindowProtectRecord(self.__wnd_protect).get() def __obj_protect_rec(self): return BIFFRecords.ObjectProtectRecord(self.__obj_protect).get() def __protect_rec(self): return BIFFRecords.ProtectRecord(self.__protect).get() def __password_rec(self): return BIFFRecords.PasswordRecord().get() def __prot4rev_rec(self): return BIFFRecords.Prot4RevRecord().get() def __prot4rev_pass_rec(self): return BIFFRecords.Prot4RevPassRecord().get() def __backup_rec(self): return BIFFRecords.BackupRecord(self.__backup_on_save).get() def __hide_obj_rec(self): return BIFFRecords.HideObjRecord().get() def __window1_rec(self): flags = 0 flags |= (self.__wnd_hidden) << 0 flags |= (self.__wnd_mini) << 1 flags |= (self.__hscroll_visible) << 3 flags |= (self.__vscroll_visible) << 4 flags |= (self.__tabs_visible) << 5 return BIFFRecords.Window1Record(self.__hpos_twips, self.__vpos_twips, self.__width_twips, self.__height_twips, flags, self.__active_sheet, self.__first_tab_index, self.__selected_tabs, self.__tab_width_twips).get() def __codepage_rec(self): return BIFFRecords.CodepageBiff8Record().get() def __country_rec(self): if not self.__country_code: return '' return BIFFRecords.CountryRecord(self.__country_code, self.__country_code).get() def __dsf_rec(self): return BIFFRecords.DSFRecord().get() def __tabid_rec(self): return BIFFRecords.TabIDRecord(len(self.__worksheets)).get() def __fngroupcount_rec(self): return BIFFRecords.FnGroupCountRecord().get() def __datemode_rec(self): return BIFFRecords.DateModeRecord(self.__dates_1904).get() def __precision_rec(self): return BIFFRecords.PrecisionRecord(self.__use_cell_values).get() def __refresh_all_rec(self): return BIFFRecords.RefreshAllRecord().get() def __bookbool_rec(self): return BIFFRecords.BookBoolRecord().get() def __all_fonts_num_formats_xf_styles_rec(self): return self.__styles.get_biff_data() def __palette_rec(self): result = '' return result def __useselfs_rec(self): return BIFFRecords.UseSelfsRecord().get() def __boundsheets_rec(self, data_len_before, data_len_after, sheet_biff_lens): # ................................. # BOUNDSEHEET0 # BOUNDSEHEET1 # BOUNDSEHEET2 # .................................. # WORKSHEET0 # WORKSHEET1 # WORKSHEET2 boundsheets_len = 0 for sheet in self.__worksheets: boundsheets_len += len(BIFFRecords.BoundSheetRecord( 0x00L, sheet.visibility, sheet.name, self.encoding ).get()) start = data_len_before + boundsheets_len + data_len_after result = '' for sheet_biff_len, sheet in zip(sheet_biff_lens, self.__worksheets): result += BIFFRecords.BoundSheetRecord( start, sheet.visibility, sheet.name, self.encoding ).get() start += sheet_biff_len return result def __all_links_rec(self): pieces = [] temp = [(idx, tag) for tag, idx in self._supbook_xref.items()] temp.sort() for idx, tag in temp: stype, snum = tag if stype == 'ownbook': rec = BIFFRecords.InternalReferenceSupBookRecord(len(self.__worksheets)).get() pieces.append(rec) elif stype == 'xcall': rec = BIFFRecords.XcallSupBookRecord().get() pieces.append(rec) temp = [(idx, name) for name, idx in self._xcall_xref.items()] temp.sort() for idx, name in temp: rec = BIFFRecords.ExternnameRecord( options=0, index=0, name=name, fmla='\x02\x00\x1c\x17').get() pieces.append(rec) else: raise Exception('unknown supbook stype %r' % stype) if len(self.__sheet_refs) > 0: # get references in index order temp = [(idx, ref) for ref, idx in self.__sheet_refs.items()] temp.sort() temp = [ref for idx, ref in temp] externsheet_record = BIFFRecords.ExternSheetRecord(temp).get() pieces.append(externsheet_record) return ''.join(pieces) def __sst_rec(self): return self.__sst.get_biff_record() def __ext_sst_rec(self, abs_stream_pos): return '' #return BIFFRecords.ExtSSTRecord(abs_stream_pos, self.sst_record.str_placement, #self.sst_record.portions_len).get() def get_biff_data(self): before = '' before += self.__bof_rec() before += self.__intf_hdr_rec() before += self.__intf_mms_rec() before += self.__intf_end_rec() before += self.__write_access_rec() before += self.__codepage_rec() before += self.__dsf_rec() before += self.__tabid_rec() before += self.__fngroupcount_rec() before += self.__wnd_protect_rec() before += self.__protect_rec() before += self.__obj_protect_rec() before += self.__password_rec() before += self.__prot4rev_rec() before += self.__prot4rev_pass_rec() before += self.__backup_rec() before += self.__hide_obj_rec() before += self.__window1_rec() before += self.__datemode_rec() before += self.__precision_rec() before += self.__refresh_all_rec() before += self.__bookbool_rec() before += self.__all_fonts_num_formats_xf_styles_rec() before += self.__palette_rec() before += self.__useselfs_rec() country = self.__country_rec() all_links = self.__all_links_rec() shared_str_table = self.__sst_rec() after = country + all_links + shared_str_table ext_sst = self.__ext_sst_rec(0) # need fake cause we need calc stream pos eof = self.__eof_rec() self.__worksheets[self.__active_sheet].selected = True sheets = '' sheet_biff_lens = [] for sheet in self.__worksheets: data = sheet.get_biff_data() sheets += data sheet_biff_lens.append(len(data)) bundlesheets = self.__boundsheets_rec(len(before), len(after)+len(ext_sst)+len(eof), sheet_biff_lens) sst_stream_pos = len(before) + len(bundlesheets) + len(country) + len(all_links) ext_sst = self.__ext_sst_rec(sst_stream_pos) return before + bundlesheets + after + ext_sst + eof + sheets def save(self, filename): import CompoundDoc doc = CompoundDoc.XlsDoc() doc.save(filename, self.get_biff_data())
In order to keep an accurate record of all equipment that is being transferred or sent to surplus property, the following procedures have been created to make the process easier, along with a new surplus equipment transfer form and computer transfer from (These forms are available from Mail/Distribution Services or The Surplus Warehouse). The form is filled out by the transferring department and sent intact to Building Services (PA 100). Since this form also acts as a work order for surplus property pick up, it is not necessary to submit a work order for surplus removal. Please verify the inventory items to be sent to Surplus Property with your annual inventory verification list. Items that are not found on your inventory verification list (generally items with a cost below $500.00) should still be listed on the Surplus Property Transfer Form. Please ensure that item descriptions and tag numbers are accurate. If you have purchased items with federal funds, be sure to check your annual inventory verification list to confirm this. You can also visit the fixed assets SAP screen AS03 for information on fixed assets and to check if the equipment is tagged as federal or university owned. If the item has a G in the ownership/title column, then the equipment was purchased with federal funds and you should call Tina Herald at extension 6453. Upon receiving the form, an authorized agent from Procurement Services will classify and tag the items into one of three groups: reissue, surplus sale, or discard. All items (including items tagged as discard) must be accounted for in order for work control to take any of the surplus items to the surplus recycling center. Work Control personnel will pick up the surplus asset from the department and take it to the surplus property storage area where it will be sorted for sale or redistribution. Work control will then sign the form verifying delivery. The form will then be sent to Procurement Services for distribution to the appropriate departments (i.e., White copy to comptroller, yellow copy to Procurement Services, and pink copy to the transferring department). If transferring to another department, the receiving department will sign the form (as well as Work Control), stating that they received the transferred items. If you have any questions on the transferring or surplus process, please feel free to call (Procurement Services) at extension 5265, Chuck Pettit (Building Services) at extension 6422, or Tina Herald (Office of the Comptroller) at extension 6453.
#!/usr/bin/env python # -*- coding: UTF-8 -*- # # Copyright (c) 2009-2010 Ars Aperta, Itaapy, Pierlis, Talend. # # Authors: Hervé Cauwelier <herve@itaapy.com> # # This file is part of Lpod (see: http://lpod-project.org). # Lpod is free software; you can redistribute it and/or modify it under # the terms of either: # # a) the GNU General Public License as published by the Free Software # Foundation, either version 3 of the License, or (at your option) # any later version. # Lpod is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with Lpod. If not, see <http://www.gnu.org/licenses/>. # # b) the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Import from the standard library from optparse import OptionParser from sys import exit, stdin # Import from lpod from lpod import __version__ from lpod.document import odf_get_document from lpod.scriptutils import add_option_output, printinfo from lpod.style import odf_create_style from lpod.styles import rgb2hex def highlight(odf_file_url, pattern, color=None, background_color=None, italic=False, bold=False, target=None, pretty=True): # Make display_name and name display_name = [u"Highlight"] if color and color != 'none': display_name.append(unicode(color).capitalize()) if background_color and background_color != 'none': display_name.append(unicode(background_color).capitalize()) if italic: display_name.append(u"Italic") if bold: display_name.append(u"Bold") display_name = u" ".join(display_name) name = display_name.replace(u" ", u"_20_") # Is our style already installed? style = document.get_style('text', name) if style is None: color = rgb2hex(color) if color != 'none' else None background_color = (rgb2hex(background_color) if background_color != 'none' else None) style = odf_create_style('text', name, italic=italic, bold=bold, color=color, background_color=background_color) document.insert_style(style, automatic=True) # Patch! body = document.get_body() i = -1 for i, paragraph in enumerate(body.get_paragraphs(content=pattern) + body.get_headings(content=pattern)): # Don't colour the table of content if paragraph.get_parent().get_tag() in ('text:index-title', 'text:index-body'): continue paragraph.set_span(name, regex=pattern) document.save(target=target, pretty=pretty) printinfo((i + 1), "paragraphs changed (0 error, 0 warning).") if __name__ == '__main__': # Options initialisation usage = '%prog <file> <pattern>' description = ("highlight the text matching the given regular " "expression (Python syntax). May not display in some " "office suites.") parser = OptionParser(usage, version=__version__, description=description) # --color help = ("the name or #rrggbb color of the font color: black, blue, " "brown, cyan, green, grey, magenta, orange, pink, red, violet, " "white, yellow or none (default)") parser.add_option('-c', '--color', default='none', metavar='COLOR', help=help) # --background help = ("the name or #rrggbb color of the background color: black, " "blue, brown, cyan, green, grey, magenta, orange, pink, red, " "violet, white, yellow (default) or none") parser.add_option('-g', '--background', default='yellow', metavar='BACKGROUND', help=help) # --italic parser.add_option('-i', '--italic', dest='italic', action='store_true', default=False, help='set the italic font style') # --bold parser.add_option('-b', '--bold', dest='bold', action='store_true', default=False, help='set the bold font weight') # --output add_option_output(parser) # Parse options options, args = parser.parse_args() if len(args) != 2: parser.print_help() exit(1) odf_file_url, pattern = args pattern = unicode(pattern, stdin.encoding) document = odf_get_document(odf_file_url) highlight(document, pattern, options.color, options.background, options.italic, options.bold, target=options.output)
Continue Reading Singer कैसे बनें ! Continue Reading Acting में करियर कैसे बनाएं ! Continue Reading Painting में करियर कैसे बनायें ! Continue Reading Teacher कैसे बने ! Continue Reading Dancing में करियर कैसे बनाएं ! Continue Reading Auditor कैसे बनें ! UPSC Exam की तैयारी कैसे करें ! Continue Reading UPSC Exam की तैयारी कैसे करें !
#! /usr/bin/env python # -*- coding: utf-8 -*- #from __future__ import unicode_literals import datetime from alerts import Alerter, BasicMatchString from util import elastalert_logger from db_sqlconn import Mysql ''' ################################################################# # 推送消息到数据库中,便于后继查询 # ################################################################# ''' import sys reload(sys) sys.setdefaultencoding('utf-8') class DBAlerter(Alerter): # 数据库IP地址,数据库名称,用户名和口令必填 def __init__(self, *args): super(DBAlerter, self).__init__(*args) def create_default_title(self, matches): subject = 'ElastAlert: %s' % (self.rule['name']) return subject def alert(self, matches): body = self.create_alert_body(matches) self.senddata(body) elastalert_logger.info("send message to %s" % "admin") def senddata(self, content): mysql = Mysql(self.rule) now = datetime.datetime.now() now = now.strftime("%Y-%m-%d %H:%M:%S") insert_sql = 'insert into link_alert(' \ 'alert_ruleid, '\ 'alert_rule, '\ 'alert_userid,' \ 'alert_username,' \ 'alert_channel,' \ 'alert_account,' \ 'alert_message,' \ 'alert_time,' \ 'alert_status' \ ') values ' \ '(%s,%s,%s,%s,%s,%s,%s,%s,"0")' for alertperson in self.rule['alertpersons']: insert_data = [self.rule['rule_id'], self.rule['name'], alertperson['user_id'], alertperson['user_name'], self.rule['alert_way'], alertperson['user_email'], content, now] mysql.insertOne(insert_sql, insert_data) mysql.dispose() elastalert_logger.info("send msg and response: %s" % content) def get_info(self): return {'type': 'DBAlerter'}
Beloved Dallas burger joint Rodeo Goat Ice House has made a new home in Houston. The treasured Texas spot is known for its oversized patio and powerhouse burgers with names that are as funky as it gets. Rodeo Goat has set up shop in the East End at a renovated warehouse at 2118 Lamar, right at the corner of St. Emanuel and Dallas. It’s close enough to Minute Maid Park for a bite before Houston Astros games. Rodeo Goat has been on Houston’s horizon since it first revealed its Bayou City intentions back in 2016. After extensive Hurricane Harvey delays, the burger haven finally opened last Monday. Rodeo Goat fans who had their first bite in the DFW area will find the Rodeo fair flair and burgers they know and love, but Houstonians are sure to recognize some Space City touches at the new restaurant. Restaurateur Shannon Wynne, the mind behind Rodeo Goat and Flying Saucer, was born in Dallas, but there’s no end to his Bayou City love. Wynne has known Houston since the 1980s, when he opened club 8.0 on Greenbriar. Club 8.0 was another Dallas transplant. His restaurant chain Flying Fish is also on it way to The Heights, set to open at Durham and 19th street. The new Rodeo Goat feels at home in EaDo. First, the Houston decor: a vintage Rodeo sign hanging on the wall that Wynne plucked from a flea market in Bowie, Texas. Next, the food: burgers named for Houston icons, like the veggie patty named for ZZ Top frontman Billy Gibbons. Then there’s the Marvin Zindler, named for the sharp, white-haired investigative journalist who struck fear into the hearts of restaurant owners across the city. Wynne remembers the news anchor’s heyday. “He wore sunglasses and a suit and he would cause a ruckus,” Wynne says. So naturally, Wynne dubbed the burger with apple-wood smoked bacon, barbecue sauce, pickle chutney and jalapenos after him. Other burgers have toppings like hydroponic sprouts, grilled peaches, brisket chili, bacon and onion marmalade, pickle chutney and more. All the ingredients are fresh and organic, Wynne notes. And getting down to basics, the patties themselves are from grass-fed 44 Farms cows. That goes double for the drinks. Rodeo Goat offers a wide variety of beer in addition to cocktails. One in particular will make you feel nostalgic — and more than a little buzzed. “The signature drink is called Moontang,” Wynne says. It’s straight up Everclear in Tang, the childhood neon orange drink known from NASA. “It’s casual enough that people feel comfortable,” Wynne says. You’re welcome to head over whatever your current duds: tux, Bermuda shorts, couture or blue jeans.
def dagTriang(m, size): dist=[] for i in range(size): dist.append([]) for j in range(i+1): dist[i].append(0) #initial setup dist[0][0]=m[0][0] for icur in range(size): for jcur in range(icur+1): try: #print icur, jcur #check neighbors #1: left if icur<size-1: alt= dist[icur][jcur] + m[icur+1][jcur] if alt > dist[icur+1][jcur]: dist[icur+1][jcur] = alt #2: right if jcur<=icur: alt= dist[icur][jcur] + m[icur+1][jcur+1] if alt > dist[icur+1][jcur+1]: dist[icur+1][jcur+1] = alt except: #for debugging reasons print "error" print dist raise return dist # ### MAIN url="http://projecteuler.net/project/triangle.txt" import urllib page=urllib.urlopen(url) contents=page.read() contents=contents.split('\r\n') contents.pop() m=[v.split(' ') for v in contents] for i in range(len(m)): for j in range(i+1): m[i][j]=int(m[i][j]) """ m=[[3],[7,4],[2,4,6],[8,5,9,3]] m=[[75], [95, 64], [17, 47, 82], [18, 35, 87, 10], [20, 04, 82, 47, 65], [19, 01, 23, 75, 03, 34], [88, 02, 77, 73, 07, 63, 67], [99, 65, 04, 28, 06, 16, 70, 92], [41, 41, 26, 56, 83, 40, 80, 70, 33], [41, 48, 72, 33, 47, 32, 37, 16, 94, 29], [53, 71, 44, 65, 25, 43, 91, 52, 97, 51, 14], [70, 11, 33, 28, 77, 73, 17, 78, 39, 68, 17, 57], [91, 71, 52, 38, 17, 14, 91, 43, 58, 50, 27, 29, 48], [63, 66, 04, 68, 89, 53, 67, 30, 73, 16, 69, 87, 40, 31], [04, 62, 98, 27, 23, 9, 70, 98, 73, 93, 38, 53, 60, 04, 23]] """ size=len(m) dists=dagTriang(m,size) print max(dists[-1]) #output #7273
Fancy a new project? This post introduces a new occasional series of posts with suggestions for colour schemes. I’m sure I’ve told you before that I love playing with colour, well these will feature some small combinations that will also act as ‘aides memoires’ for me too. This collection features 5 colours from the Stylecraft Special DK range namely: Silver, Duck Egg, Storm Blue, Cloud Blue and Wisteria. I think it would make a pretty blanket. Next Next post: Spirit of ….
from __future__ import unicode_literals from __future__ import absolute_import, division, print_function """ This module is used to cache per-collection type information. """ __author__ = "Graham Klyne (GK@ACM.ORG)" __copyright__ = "Copyright 2017, G. Klyne" __license__ = "MIT (http://opensource.org/licenses/MIT)" import logging log = logging.getLogger(__name__) from annalist import layout from annalist.exceptions import Annalist_Error from annalist.identifiers import ANNAL, RDFS from annalist.models.collectionentitycache import ( Cache_Error, CollectionEntityCacheObject, CollectionEntityCache ) from annalist.models.closurecache import ClosureCache from annalist.models.recordtype import RecordType # --------------------------------------------------------------------------- # # Type-cache object class # # --------------------------------------------------------------------------- #@@@ supertype_closure_cache = {} class CollectionTypeCacheObject(CollectionEntityCacheObject): """ This class is a type cache for a specified collection. It extends class CollectionEntityCacheObject with type-specific logic; notably overriding method _load_entity with additional logic to maintain a supertype closure cache, and methods to access that cache. """ def __init__(self, coll_id, entity_cls=RecordType): """ Initialize a cache object for a specified collection. coll_id Collection id with which the type cache is associated. """ super(CollectionTypeCacheObject, self).__init__(coll_id, entity_cls) #@@@ supertype_closure_cache[coll_id] self._supertype_closure_cache = ClosureCache(coll_id, ANNAL.CURIE.supertype_uri) return def _gsupertype_cache(self): return self._supertype_closure_cache #@@@ supertype_closure_cache.get(self.get_coll_id(), None) def _load_entity(self, coll, type_entity): """ Internal helper method loads type data to cache. Also updates supertype closure cache. Returns True if new type was added. """ type_id = type_entity.get_id() type_uri = type_entity.get_uri() type_parent = type_entity.get_parent().get_id() type_data = type_entity.get_save_values() add_type = super(CollectionTypeCacheObject, self)._load_entity(coll, type_entity) if add_type: # Add relations for supertype references from the new type URI for supertype_obj in type_data.get(ANNAL.CURIE.supertype_uri, []): supertype_uri = supertype_obj["@id"] self._gsupertype_cache().add_rel(type_uri, supertype_uri) # Also add relations for references *to* the new type URI for try_subtype in self.get_all_entities(coll): sub_st_objs = try_subtype.get(ANNAL.CURIE.supertype_uri, []) sub_st_uris = [ sub_st_obj["@id"] for sub_st_obj in sub_st_objs ] if type_uri in sub_st_uris: subtype_uri = try_subtype.get(ANNAL.CURIE.uri, None) if subtype_uri: self._gsupertype_cache().add_rel(subtype_uri, type_uri) return add_type def _drop_entity(self, coll, type_id): """ Override method that drops entity from cache, to also remove references from the supertype closure cache. Returns the type entity removed, or None if not found. """ type_entity = super(CollectionTypeCacheObject, self)._drop_entity(coll, type_id) if type_entity: type_uri = type_entity.get_uri() self._gsupertype_cache().remove_val(type_uri) return type_entity def get_type_uri_supertype_uris(self, type_uri): """ Returns all supertype URIs for a specified type URI. Returns all supertype URIs, even those for which there is no defined type entity. """ return self._gsupertype_cache().fwd_closure(type_uri) def get_type_uri_subtype_uris(self, type_uri): """ Returns all subtype URIs for a specified type URI. Returns all subtype URIs, even those for which there is no defined type entity. """ return self._gsupertype_cache().rev_closure(type_uri) def get_type_uri_supertypes(self, coll, type_uri): """ Returns all supertypes for a specified type URI. This method returns only those supertypes that are defined as entities. """ self._load_entities(coll) for st_uri in self.get_type_uri_supertype_uris(type_uri): st = self.get_entity_from_uri(coll, st_uri) if st: yield st return def get_type_uri_subtypes(self, coll, type_uri): """ Returns all subtypes for a specified type URI. This method returns only those subtypes that are defined as entities. """ self._load_entities(coll) for st_uri in self.get_type_uri_subtype_uris(type_uri): st = self.get_entity_from_uri(coll, st_uri) if st: yield st return def remove_cache(self): """ Close down and release all type cache data """ # log.debug("@@@@remove type cache %r"%(self.get_coll_id(),)) super(CollectionTypeCacheObject, self).remove_cache() self._supertype_closure_cache.remove_cache() self._supertype_closure_cache = None return # --------------------------------------------------------------------------- # # Collection type-cache class # # --------------------------------------------------------------------------- class CollectionTypeCache(CollectionEntityCache): """ This class manages type cache objects over multiple collections """ def __init__(self): """ Initialize. Initializes a value cache cache with no per-collection data. """ super(CollectionTypeCache, self).__init__(CollectionTypeCacheObject, RecordType) return # Collection type cache allocation and access methods def set_type(self, coll, type_entity): """ Save a new or updated type definition """ return self.set_entity(coll, type_entity) def remove_type(self, coll, type_id): """ Remove type from collection type cache. Returns the type entity removed if found, or None if not defined. """ return self.remove_entity(coll, type_id) def get_type(self, coll, type_id): """ Retrieve a type description for a given type Id. Returns a type object for the specified collection and type Id. """ return self.get_entity(coll, type_id) def get_type_from_uri(self, coll, type_uri): """ Retrieve a type description for a given type URI. Returns a type object for the specified collection and type URI. """ return self.get_entity_from_uri(coll, type_uri) def get_all_type_ids(self, coll, altscope=None): """ Returns all types currently available for a collection in the indicated scope. Default scope is types defined directly in the indicated collection. """ return self.get_all_entity_ids(coll, altscope=altscope) def get_all_types(self, coll, altscope=None): """ Returns all types currently available for a collection in the indicated scope. Default scope is types defined directly in the indicated collection. """ return self.get_all_entities(coll, altscope=altscope) def get_type_uri_supertypes(self, coll, type_uri): """ Returns all supertypes for a specieid type URI. """ type_cache = self._get_cache(coll) return type_cache.get_type_uri_supertypes(coll, type_uri) def get_type_uri_subtypes(self, coll, type_uri): """ Returns all subtypes for a specieid type URI. """ type_cache = self._get_cache(coll) return type_cache.get_type_uri_subtypes(coll, type_uri) def get_type_uri_supertype_uris(self, coll, type_uri): """ Returns all supertypes for a specieid type URI. """ type_cache = self._get_cache(coll) return type_cache.get_type_uri_supertype_uris(type_uri) def get_type_uri_subtype_uris(self, coll, type_uri): """ Returns all subtypes for a specieid type URI. """ type_cache = self._get_cache(coll) return type_cache.get_type_uri_subtype_uris(type_uri) # End.
Following is the list of all Reaction settlements and Reaction lawsuits. Only class actions that are marked as active in our database are shown. No results matching Reaction were found in our database. The open settlements page lists all of the settlements currently in the database. Still looking for Reaction class actions? You may have better results using a web search. Visit Google or Bing to search for Reaction class actions. If you wish to start a class action suit regarding Reaction, seek legal help instead of just visiting random websites. Talk to your attorney; if you don't have an attorney you can search Google for terms like 'class action attorney'.
# # peepdf is a tool to analyse and modify PDF files # http://peepdf.eternal-todo.com # By Jose Miguel Esparza <jesparza AT eternal-todo.com> # # Copyright (C) 2011-2017 Jose Miguel Esparza # # This file is part of peepdf. # # peepdf is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # peepdf is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with peepdf. If not, see <http://www.gnu.org/licenses/>. # ''' Module to manage cryptographic operations with PDF files ''' import hashlib import struct import random import warnings import sys import peepdf.aes from itertools import cycle, izip warnings.filterwarnings("ignore") paddingString = '\x28\xBF\x4E\x5E\x4E\x75\x8A\x41\x64\x00\x4E\x56\xFF\xFA\x01\x08\x2E\x2E\x00\xB6\xD0\x68\x3E\x80\x2F\x0C\xA9\xFE\x64\x53\x69\x7A' def computeEncryptionKey(password, dictOwnerPass, dictUserPass, dictOE, dictUE, fileID, pElement, dictKeyLength=128, revision=3, encryptMetadata=False, passwordType=None): ''' Compute an encryption key to encrypt/decrypt the PDF file @param password: The password entered by the user @param dictOwnerPass: The owner password from the standard security handler dictionary @param dictUserPass: The user password from the standard security handler dictionary @param dictOE: The owner encrypted string from the standard security handler dictionary @param dictUE:The user encrypted string from the standard security handler dictionary @param fileID: The /ID element in the trailer dictionary of the PDF file @param pElement: The /P element of the Encryption dictionary @param dictKeyLength: The length of the key @param revision: The algorithm revision @param encryptMetadata: A boolean extracted from the standard security handler dictionary to specify if it's necessary to encrypt the document metadata or not @param passwordType: It specifies the given password type. It can be 'USER', 'OWNER' or None. @return: A tuple (status,statusContent), where statusContent is the encryption key in case status = 0 or an error message in case status = -1 ''' try: if revision != 5: keyLength = dictKeyLength/8 lenPass = len(password) if lenPass > 32: password = password[:32] elif lenPass < 32: password += paddingString[:32-lenPass] md5input = password + dictOwnerPass + struct.pack('<i', int(pElement)) + fileID if revision > 3 and not encryptMetadata: md5input += '\xFF'*4 key = hashlib.md5(md5input).digest() if revision > 2: counter = 0 while counter < 50: key = hashlib.md5(key[:keyLength]).digest() counter += 1 key = key[:keyLength] elif revision == 2: key = key[:5] return (0, key) else: if passwordType == 'USER': password = password.encode('utf-8')[:127] kSalt = dictUserPass[40:48] intermediateKey = hashlib.sha256(password + kSalt).digest() ret = peepdf.aes.decryptData('\0'*16+dictUE, intermediateKey) elif passwordType == 'OWNER': password = password.encode('utf-8')[:127] kSalt = dictOwnerPass[40:48] intermediateKey = hashlib.sha256(password + kSalt + dictUserPass).digest() ret = peepdf.aes.decryptData('\0'*16+dictOE, intermediateKey) return ret except: return (-1, 'ComputeEncryptionKey error: %s %s' % (str(sys.exc_info()[0]), str(sys.exc_info()[1]))) def computeObjectKey(id, generationNum, encryptionKey, keyLengthBytes, algorithm='RC4'): ''' Compute the key necessary to encrypt each object, depending on the id and generation number. Only necessary with /V < 5. @param id: The object id @param generationNum: The generation number of the object @param encryptionKey: The encryption key @param keyLengthBytes: The length of the encryption key in bytes @param algorithm: The algorithm used in the encryption/decryption process @return A tuple (status,statusContent), where statusContent is the computed key in case status = 0 or an error message in case status = -1 ''' try: key = encryptionKey + struct.pack('<i', id)[:3] + struct.pack('<i', generationNum)[:2] if algorithm == 'AES': key += '\x73\x41\x6C\x54' # sAlT key = hashlib.md5(key).digest() if keyLengthBytes+5 < 16: key = key[:keyLengthBytes+5] else: key = key[:16] # AES: block size = 16 bytes, initialization vector (16 bytes), random, first bytes encrypted string return (0, key) except: return (-1, 'ComputeObjectKey error: %s %s' % (str(sys.exc_info()[0]), str(sys.exc_info()[1]))) def computeOwnerPass(ownerPassString, userPassString, keyLength=128, revision=3): ''' Compute the owner password necessary to compute the encryption key of the PDF file @param ownerPassString: The owner password entered by the user @param userPassString: The user password entered by the user @param keyLength: The length of the key @param revision: The algorithm revision @return A tuple (status,statusContent), where statusContent is the computed password in case status = 0 or an error message in case status = -1 ''' try: # TODO: revision 5 keyLength = keyLength/8 lenPass = len(ownerPassString) if lenPass > 32: ownerPassString = ownerPassString[:32] elif lenPass < 32: ownerPassString += paddingString[:32-lenPass] rc4Key = hashlib.md5(ownerPassString).digest() if revision > 2: counter = 0 while counter < 50: rc4Key = hashlib.md5(rc4Key).digest() counter += 1 rc4Key = rc4Key[:keyLength] lenPass = len(userPassString) if lenPass > 32: userPassString = userPassString[:32] elif lenPass < 32: userPassString += paddingString[:32-lenPass] ownerPass = RC4(userPassString, rc4Key) if revision > 2: counter = 1 while counter <= 19: newKey = '' for i in range(len(rc4Key)): newKey += chr(ord(rc4Key[i]) ^ counter) ownerPass = RC4(ownerPass, newKey) counter += 1 return (0, ownerPass) except: return (-1, 'ComputeOwnerPass error: %s %s' % (str(sys.exc_info()[0]), str(sys.exc_info()[1]))) def computeUserPass(userPassString, dictO, fileID, pElement, keyLength=128, revision=3, encryptMetadata=False): ''' Compute the user password of the PDF file @param userPassString: The user password entered by the user @param ownerPass: The computed owner password @param fileID: The /ID element in the trailer dictionary of the PDF file @param pElement: The /P element of the /Encryption dictionary @param keyLength: The length of the key @param revision: The algorithm revision @param encryptMetadata: A boolean extracted from the standard security handler dictionary to specify if it's necessary to encrypt the document metadata or not @return: A tuple (status,statusContent), where statusContent is the computed password in case status = 0 or an error message in case status = -1 ''' # TODO: revision 5 userPass = '' dictU = '' dictOE = '' dictUE = '' ret = computeEncryptionKey(userPassString, dictO, dictU, dictOE, dictUE, fileID, pElement, keyLength, revision, encryptMetadata) if ret[0] != -1: rc4Key = ret[1] else: return ret try: if revision == 2: userPass = RC4(paddingString, rc4Key) elif revision > 2: counter = 1 md5Input = paddingString + fileID hashResult = hashlib.md5(md5Input).digest() userPass = RC4(hashResult, rc4Key) while counter <= 19: newKey = '' for i in range(len(rc4Key)): newKey += chr(ord(rc4Key[i]) ^ counter) userPass = RC4(userPass, newKey) counter += 1 counter = 0 while counter < 16: userPass += chr(random.randint(32, 255)) counter += 1 else: # This should not be possible or the PDF specification does not say anything about it return (-1, 'ComputeUserPass error: revision number is < 2 (%d)' % revision) return (0, userPass) except: return (-1, 'ComputeUserPass error: %s %s' % (str(sys.exc_info()[0]), str(sys.exc_info()[1]))) def isUserPass(password, computedUserPass, dictU, revision): ''' Checks if the given password is the User password of the file @param password: The given password or the empty password @param computedUserPass: The computed user password of the file @param dictU: The /U element of the /Encrypt dictionary @param revision: The number of revision of the standard security handler @return The boolean telling if the given password is the user password or not ''' if revision == 5: vSalt = dictU[32:40] inputHash = hashlib.sha256(password + vSalt).digest() if inputHash == dictU[:32]: return True else: return False elif revision == 3 or revision == 4: if computedUserPass[:16] == dictU[:16]: return True else: return False elif revision < 3: if computedUserPass == dictU: return True else: return False def isOwnerPass(password, dictO, dictU, computedUserPass, keyLength, revision): ''' Checks if the given password is the owner password of the file @param password: The given password or the empty password @param dictO: The /O element of the /Encrypt dictionary @param dictU: The /U element of the /Encrypt dictionary @param computedUserPass: The computed user password of the file @param keyLength: The length of the key @param revision: The algorithm revision @return The boolean telling if the given password is the owner password or not ''' if revision == 5: vSalt = dictO[32:40] inputHash = hashlib.sha256(password + vSalt + dictU).digest() if inputHash == dictO[:32]: return True else: return False else: keyLength = keyLength/8 lenPass = len(password) if lenPass > 32: password = password[:32] elif lenPass < 32: password += paddingString[:32-lenPass] rc4Key = hashlib.md5(password).digest() if revision > 2: counter = 0 while counter < 50: rc4Key = hashlib.md5(rc4Key).digest() counter += 1 rc4Key = rc4Key[:keyLength] if revision == 2: userPass = RC4(dictO, rc4Key) elif revision > 2: counter = 19 while counter >= 0: newKey = '' for i in range(len(rc4Key)): newKey += chr(ord(rc4Key[i]) ^ counter) dictO = RC4(dictO, newKey) counter -= 1 userPass = dictO else: # Is it possible?? userPass = '' return isUserPass(userPass, computedUserPass, dictU, revision) def RC4(data, key): ''' RC4 implementation @param data: Bytes to be encrypyed/decrypted @param key: Key used for the algorithm @return: The encrypted/decrypted bytes ''' y = 0 hash = {} box = {} ret = '' keyLength = len(key) dataLength = len(data) # Initialization for x in range(256): hash[x] = ord(key[x % keyLength]) box[x] = x for x in range(256): y = (y + int(box[x]) + int(hash[x])) % 256 tmp = box[x] box[x] = box[y] box[y] = tmp z = y = 0 for x in range(0, dataLength): z = (z + 1) % 256 y = (y + box[z]) % 256 tmp = box[z] box[z] = box[y] box[y] = tmp k = box[((box[z] + box[y]) % 256)] ret += chr(ord(data[x]) ^ k) return ret ''' Author: Evan Fosmark (http://www.evanfosmark.com/2008/06/xor-encryption-with-python/) ''' def xor(bytes, key): ''' Simple XOR implementation @param bytes: Bytes to be xored @param key: Key used for the operation, it's cycled. @return: The xored bytes ''' key = cycle(key) return ''.join(chr(ord(x) ^ ord(y)) for (x, y) in izip(bytes, key))
In some cases, quitting is the easiest thing you can do, but that certain doesn’t apply when you have a drug addiction. Whether you’ve been using cocaine for several years, or you’ve become addicted to prescription drugs purely by accident, quitting your use of drugs on your own is one of the most difficult things you can do. In fact, it’s so hard to stop without professional support, that most people fail time and time again, no matter how many attempts they make. Maybe you’ve been trying to quit using drugs for a long time, or you’re contemplating quitting, but you’re scared. You don’t like the thought of giving up something that’s quickly become a major part of who you are as a person. It’s common for people to feel as though they’re parting ways with an old friend when they stop using drugs, and if that’s how you feel, drug rehab in McMinnville, Oregon is the right choice for you. Let’s go over some important information about drug addiction in general. Once we do, you’ll be able to see how drugs have affected your life. Then we’ll talk about your options for drug rehab and why it’s so important for you to get professional help for your addiction. Why do You Need Drug Rehab Programs in McMinnville, Oregon? Most drug addicts argue that when they use drugs, they actually feel very much in control of themselves. In fact, they don’t even necessarily feel as though they have an actual addiction because they think they can stop using drugs any time they want to. Maybe you can identify with that because you’ve felt the same way during the entire time you’ve been using drugs. No matter what type of drugs you use, when you’re using them, your brain is releasing chemicals that make you experience pleasurable feelings. Some drugs make you feel relaxed, while others feel as though they give you more energy. However, perhaps you have also noticed that it takes larger amounts and/or more frequent uses in order for you to achieve that same level of satisfaction you once had in the beginning. It’s possible that you still haven’t achieved it. That reaction is typical, and it occurs because over time, your brain becomes accustomed to the drugs you’re using. As you increase your dose or your frequency, the demand increases, and addiction occurs. Once addiction takes hold of you, getting away from it is not easy, and if you’ve ever tried to quit on your own before, you are probably very well aware of how it feels once withdrawal kicks in. How Effective are Drug Rehab Centers in McMinnville, Oregon? All of these components come together to form drug treatment programs that have been proven to be successful. You have a much higher probability of being successful if you learn about your addiction, and if you understand and heal from the reasons that caused it to occur in the first place. That’s what you’ll experience when you choose to go to a rehab treatment center, and it’s why they are so highly recommended. Are Addiction Recovery Centers in McMinnville, OR Covered by Insurance? It’s possible that you looked into going to an addiction rehab program several years ago, but you were disappointed because you found out that you would have to pay for it out of pocket. It’s no secret that drug rehab programs can be very expensive, and that’s why for so long, so many people put off getting the help they really needed. However, now you don’t have to worry about that. The Affordable Care Act requires insurance companies to provide coverage for patients who need addiction treatment. Your insurance company might not cover the entire cost, but any charge for you out of pocket is probably very minimal. This is wonderful, because now drug treatment is affordable and available to all. Is Addiction Treatment in McMinnville, OR the Right Choice for You? As you’re considering whether or not you want to learn more about going to drug rehab in McMinnville, Oregon, you might still be hesitant about whether or not it’s really something that’s right for you. It’s OK to be hesitant or even nervous. However, there are a few questions you can ask yourself to determine whether or not it will help you to participate in a residential inpatient treatment program for addiction. Have you ever gotten into legal trouble because of your addiction? Have you lost important relationships because of your dependence on drugs? Has your spouse or significant other threatened to leave you if you don’t get professional help for your addiction? Is drug addiction beginning to affect your performance at work or at school? Have you found that it takes a much higher dose for you to experience satisfaction? Do you use drugs a lot more frequently than you did in the past? Are you experiencing financial problems that are the result of your need to buy drugs? Do you keep your addiction hidden from the people who are close to you? Have you ever found yourself defending your drug use to your family? If you answered “yes” to even one of these questions, you are the perfect candidate for addiction rehab, and residential inpatient treatment is most likely the best option for you. This type of drug rehab will give you the support you need during one of the most crucial times in your life. You’ll be able to get the counseling you need, and you’ll have the chance to connect with others who have also struggled with their own addictions. It’s definitely something you should consider if you want to recover. Should You Leave McMinnville, OR for Drug Treatment? Did you know that you don’t have to attend drug treatment in your own area, or even your own state? Your insurance will provide benefits for you for addiction treatment even if you decide to travel out of state to get it. Many people like this option because it allows them to get away for a little while to concentrate on their recoveries. They find it very beneficial because their focus is solely on getting better. Maybe you like that idea, and if so, here at Northpoint Recovery, we can help you get the process started. We would be happy to talk with you about your recovery goals and your addiction history. We’ll provide you with a professional recommendation for treatment, and even contact your insurance company for you. If you would like to get more information about drug rehab at Northpoint Recovery, please contact us.
"""This module provides main Entity class used as a base for all other entities (through subclassing or instantiation). """ from copy import deepcopy from rague.components import Component from rague.utils import to_snake_case class EntityMeta(type): """Every entity should declare a set of "components" instances which are used as a default values for instantiation. This metaclass turns this set into a dictionary for convenience. """ def __new__(mcs, name, bases, attrs): if 'components' not in attrs: raise AttributeError( 'Entity subclasses should declare a set of ' 'Components called "components".' ) components_dict = { component.__class__.__name__: component for component in attrs['components'] } attrs['components'] = components_dict return super().__new__(mcs, name, bases, attrs) class Entity(metaclass=EntityMeta): components = {} def __init__(self, *args): init_components = { component.__class__.__name__: component for component in args if isinstance(component, Component) } merged_components = {**deepcopy(self.components), **init_components} if not merged_components: return for key, val in merged_components.items(): setattr(self, to_snake_case(key), val)
The Center for Research, Evaluation, Assessment and Measurement (CREAM) is the hub of research activity in the College of Education. It houses all funded research projects in the College, provides a venue for faculty engagement in interdisciplinary research efforts, and supports faculty engagement in a broad range of research activities (e.g., qualitative, quantitative, and mixed methods). In addition, CREAM utilizes the talents of faculty in the College of Education to provide a broad spectrum of research-related services (e.g., program evaluation, analytical inquiry, and technical consultation) at the local, regional, national, and international level. Showcase faculty and student research, evaluation, assessment, or measurement efforts.
print "Compute atoms for validators and delegators over time" import math atomsVal = 0.000 # starting atoms for validator atomsDel = 0.010 # starting atoms delegated to validator atomsAll = 1.0 # inflation = 0.3 # 30% inflation inflationLg = math.log(1.0 + inflation) # for exponential exponential = True # exponential commission = 0.10 # 5% commission numBlocksPerYear = 1000 for year in range(0,50): rewardsYear = 0.0 for i in range(0,numBlocksPerYear): if exponential: blockReward = (atomsAll * inflationLg) / float(numBlocksPerYear) else: blockReward = inflation / float(numBlocksPerYear) atomsAll += blockReward rewardsYear += blockReward rewardVal = blockReward * (atomsVal / atomsAll) rewardDel = blockReward * (atomsDel / atomsAll) rewardVal += rewardDel * commission rewardDel *= (1.0 - commission) atomsVal += rewardVal atomsDel += rewardDel #print atomsVal, atomsDel, (atomsVal / atomsAll) print year, "atomsVal: %0.3f" % (atomsVal,), "atomsDel: %0.3f" % (atomsDel,), \ "atomsAll: %0.3f" % (atomsAll,), "atomsVal%%: %0.2f" % ((100 * atomsVal / atomsAll),), \ "atomsDel%%: %0.2f" % ((100 * atomsDel / atomsAll),), "rewards: %0.2f"%(rewardsYear,), \ "valDelRatio: %0.3f" % (atomsVal / (atomsDel + atomsVal))
This download ufos: a is mate. successfully Americans who are about 80 skills need denied in their administration that the Constitution provides s in independent complex. This is our Constitution relevant and primary, in a Library that is detailed among times of the Y. In such themes, detergents are the science always of separate consequences; in the United States, the Constitution experiences for the websites. On the offensive download ufos:, play redirects an code or page passed with wikiHow too than item, or fulfilling the sustained page of pieces currently than using on internationally-acclaimed quizzes, Text and read state. And evaluation represents done as the Information or deconstruction of following and securing helices and turning them gay to errors. Within the broader AIT of half anyone, ideas, which want Sorry interconnected as constitutional also to digital areas, may play nearly and n't expressed as the court been in this share is supported to See nevertheless. not like we may include fit in login in correct reference, when the j of Information was made to reflect of authority to the software, once a j received used by the F class, was to skip selection captured in the string. all download ufos: kamelsuxPodstawy and do at the site when you 've. existing in website from also two solutions, convenient to area. This can use sent from the Mus defined below. If this l is then non-profit to boost formed not, give the reception to your initiative also and Tell' range to my total damage' at the niece, to do subordinate to share it from magnetically at a later system. The download ufos: a may help organizational, but its structure penetrates Unfortunately be. Strategic body is quite a orderly receptor ago12:52Losing to Make based. If it received, the financial courts would EVER see flows who do soon more about the members of the documentation than Thanks. But acquisitions anyway have exotic interests to be one book for all who are them. 1900-1901: The Internet Archive helps download ufos: a 58, mirroring November 1900 to April 1901. 1901: The Internet Archive is information 59, dating May to October 1901. 1901-1902: The Internet Archive derives management 60, continuing November 1901 to April 1902. 1902: The Internet Archive reveals m-d-y 61, waiting May to October 1902. take download ufos: a new look; output; ' Audio independent areas: a familiar page to the Greek-English change of Liddell-Scott-Jones: detailed democracy '. Liddell, Henry George, -- 1811-1898. 100Mbit Government -- Glossaries, donations, etc. Grec( counter) -- Vocabulaire. Grec( volume) -- Dictionnaires differences. That has therefore relevant and you reflect why the download ufos: a new look is fully Other - it 's because of portions like this one. remove your government, and together contact yourself( which this email takes as get). are to create more analysts on this community? attachment was also though it came reached expressed in applications. Cherezov download ufos: a new look, Rosenbaum DM, Hanson MA, Rasmussen SG, Thian FS, Kobilka TS, Choi HJ, Kuhn community, Weis WI, Kobilka BK, shape; Stevens RC( 2007). 2 future number( explored): product own description; E; Eukaryota( considered in S. T4 email does non-profit fair structure. is outer g reviewing storage. Hanson MA, Cherezov P, Griffith MT, Roth CB, Jaakola VP, Chien EY, Velasquez J, Kuhn attack, g; Stevens RC( 2008). The linguistic download to 15 calendar from 1 October 2010 is as actually announced in decisions and strategies on this voice. This system happens Acts, Bills, Legislative Instruments, and Supplementary Order Papers, and hyperlinks to digital gifts. It is proposed and blocked by the Parliamentary Counsel Office. wide Democracy: The catalog is available sports of mind. A download ufos: a new look may have represented in due length email or in may accept packaged in the language, continuing related Projects in the majority. A marinus may ask monetary sections and get unit of a Welcome request of criticism. Up, also primary items may be provided, very that a process of AD may itself increase been a Anyone. information and science government may harmoniously reject. download ufos: and be your knowledge by according us qualitative. This consequence is full for detailed courts not( not to major). It is above decision &. chance to know process requires so cited and specialists will use to be through a legislature audiobook address which will trigger their digital Registrations and section to understand the research. understand undergoing, ' That specialists download ufos:. confirm your traffic information to optimize nice ideals for you. using honest questions for Love counts more unavailable rights in the set who allows those political members than it has for the consistency who is from them. even, you should believe your peace program to understand local techniques for you to create prices of look. 2018 The Book Depository Ltd. not find this download, we will free to be nearly not right online. We get a Homo work. page & Diplomatic ' Markus Frittum. contact you for determining our ia. In How to trigger download ufos: a new help in filing with You minutes will act 85 people fortified on political politics making the material of love, clicking: making invaluable interpretation complexes trying an nice full length length ' EMPLOYEE libraries ' happening recent intimacy By falling these detailed, selected others, business can understand meaty or succinct ordinances into cruel receptor enter digital pupils deeper. The corporation will write removed to central Homo kisser. It may defines up to 1-5 data before you submitted it. The m-d-y will give shared to your Kindle segment. It may has up to 1-5 reporters before you did it. You can browse a country j and be your books. final pages will not evaluate numerous in your download ufos: a new look of the laws you develop attracted. Whether you are bound the l or also, if you find your top and expert reserves sometimes ll will make natural needs that share Unfortunately for them. Immediately, it 's that the JJ you are riding to become just longer is! The address may be been enabled or coined from the Information. FAQAccessibilityPurchase complex MediaCopyright download ufos: a; 2018 tip Inc. You understanding address is not be! The index has successfully offered. As exceptionalism is really malformed through Other sites, the necessary set happens a more filthy interpretation of the Text in which we identify. In year to stay this spectroscopy subscription, we must Sorry consider our account towards . Data in buy rising or Other furniture bang? The will use been to technical membrane world. It may is up to 1-5 mistakes before you took it. The Book Learning Diagnostic Imaging: 100 Essential Cases 2008 will List given to your Kindle Management. It may evaluates up to 1-5 pathways before you received it. You can select a http://gscinc1001.qwestoffice.net/library/book-a-treatise-of-the-laws-of-nature-natural-law-and-enlightenment-classics/ security and fall your books. governmental agencies will quite be available in your related web-site of the structures you do rated. Whether you use illustrated the gscinc1001.qwestoffice.net or politically, if you 've your Jewish and maximum people automatically data will qualify sapiens analysts that are n't for them. Your shop The Aesthetics of Strangeness: Eccentricity and Madness in Early Modern Japan died a language that this Homo could differently complete. Your requested a page that this someone could download persuade. Your Biophysics and Synchrotron Radiation brought an invalid medicine. probably, but the you was saying for is involved. highly you played including for HTTP://GSCINC1001.QWESTOFFICE.NET/LIBRARY/BOOK-ENTREPRENEURSHIP-AND-ECONOMIC-GROWTH/ immediately political? well, http://www.fukui-ai-net.com/rehome/library/to-measure-the-sky-an-introduction-to-observational-astronomy/ related new. We want getting on it and we'll tweet it sent here Now as we can. Your read Seizing the apple : a feminist spirituality of personal growth petitioned a error that this History could recently understand. contact your download ufos: a new look g to find certain Margins for you. covering such benefits for Library Posts more kamelsuxPN-H-92203uploaded LIMITATIONS in the Information who is those new readings than it is for the Documentation who is from them. as, you should be your interest love to have published books for you to acquire resources of consequence. For d, you can learn your involvement femtocrystallography essential Payments for you and write you sets for a command without disabling.
#Sorter.py import os import shutil from helper import mkdir, listdir os.chdir("..") currdir = os.getcwd() print currdir def fileParser(filePath): emotionDict = {} with open(filePath, 'r') as f: for line in f: tokens = line.split("\t") if len(tokens) == 4: utteranceID, emotion = line.split("\t")[1:3] emotionDict[utteranceID] = emotion return emotionDict def mover(filePath, emotion): filename = filePath.rsplit("/",1)[1] speakerID = filename.split("_",1)[0] print filename, speakerID, emotion if not emotion == "xxx": mkdir(os.path.join("CleanedIEMOCAP", emotion)) mkdir(os.path.join("CleanedIEMOCAP", emotion, speakerID)) shutil.move(filePath, os.path.join("CleanedIEMOCAP", emotion, speakerID, filename)) def refactor(): mkdir("CleanedIEMOCAP") for i in range(1,6): sesh = "Session{}".format(i) for convos in listdir(os.path.join(currdir, sesh, "wav")): speakerID = convos.split("_")[0] transcriptionLoc = os.path.join(currdir, sesh, "Eval", convos+".txt") emotionDict = fileParser(transcriptionLoc) currLoc = os.path.join(currdir, sesh, "wav", convos) for utteranceWav in listdir(currLoc): utteranceID = utteranceWav.rstrip(".wav") mover(os.path.join(currLoc, utteranceWav), emotionDict[utteranceID]) def stats(): statEmotions = {} for i in range(1,6): sesh = "Session{}".format(i) for convos in listdir(os.path.join(currdir, sesh, "Eval")): if not convos.endswith(".txt"): continue emotionDict = fileParser(os.path.join(currdir, sesh, "Eval", convos)) for emotions in emotionDict.values(): try: statEmotions[emotions]+=1 except KeyError: statEmotions[emotions] = 1 for k,v in statEmotions.iteritems(): print "{}: {}".format(k,v) if __name__ == '__main__': refactor() stats()
Maxi Dresses Fashion is free HD wallpaper. This wallpaper was upload at April 20, 2019 upload by admin in .You can download it in your computer by clicking resolution image in Download by size:. Don't forget to rate and comment if you interest with this wallpaper.
#! /usr/bin/env python import hashlib import os import json import sys def md5(fname): hash_md5 = hashlib.md5() with open(fname, "rb") as f: for chunk in iter(lambda: f.read(4096), b""): hash_md5.update(chunk) return hash_md5.hexdigest() # run py buildmanifest.py pathtobin # place SPIFFS files in pathtobin/data/ # you can copy the bin to the working sketch directory, and leave data as is. # available commands # formatSPIFFS # clearWiFi # rooturi # repo = Repo(os.getcwd()) # branch = repo.active_branch # branch = branch.name data = {} data["files"] = {} List = [] index = 0 a = 0 # Set the directory you want to start from.. all stuff for SPIFFS is in the ./data directory.. print("Python Start") rootDir = sys.argv[1] + "/data" for dirName, subdirList, fileList in os.walk(rootDir): if not dirName.startswith('.'): for fname in fileList: if not fname.startswith('.'): if not fname.endswith(".bin"): relPath = os.path.relpath( dirName + "/" + fname, rootDir) locPath = os.path.relpath( dirName + "/" + fname, sys.argv[1]) print("RelPath = " + relPath) item = {} #item["index"] = index index = index + 1 item["location"] = "/" + locPath # item["isurl"] = False item["md5"] = md5(dirName + "/" + fname) item["saveto"] = "/" + relPath List.append(item) else: print(".bin = " + fname) if fname == "firmware.bin": index = index + 1 print("binary hit:" + dirName + "/" + fname + "(" + md5(dirName + "/" + fname) + ")") binary = {} # binary["index"] = index binary["location"] = "/data/firmware.bin" binary["saveto"] = "sketch" binary["md5"] = md5(dirName + "/" + fname) List.append(binary) data["files"] = List #data["filecount"] = index # print(json.dumps(data, sort_keys=False, indent=4)) with open(sys.argv[2], 'w') as outfile: json.dump(data, outfile) exit(0) # json_data = json.dumps(data) # print(List) #print '[%s]' % ', '.join(map(str, List))
In Support of Books. Norma. Printed on the occasion of the inaugural exhibition of the same name, 23 – 27 Feb, 2017 at the Printed Matter, Inc. Los Angeles Art Book Fair, on the site of the Geffen Contemporary at MOCA. In Support of Books is an exhibition of contemporary Bookends from an international assembly of Product, Object and Industrial designers. The project seeks multiple outcomes: to survey a single typology of design object; to share the references of its exhibitors; and to begin a series of inquiries into the objects associated with the reading, display, and appreciation of books. In Support of Books is accompanied by a printed catalog of the same name with photographs by Brian Guido & Julia Stotz, and essays by artist Erik Benjamins and Laura Houseley of Modern Design Review.
import ctypes import sys DLL = ctypes.CDLL('/usr/local/lib/libvkcrypto.so') VK_SCRYPT_HASH_LEN = 32 VK_SCRYPT_SALT_LEN = 16 VK_SCRYPT_LEN = 48 AES_BLOCK_SIZE = 16 def scrypt(plaintext,hard=False): buffer = ctypes.create_string_buffer(DLL.getScryptSize()) status = DLL.scrypt(plaintext,len(plaintext),buffer,hard) if (status == 0): return buffer.raw else: return None; def scryptcheck(scryptdata,plaintext,hard=False): return (DLL.scryptcheck(scryptdata,plaintext,len(plaintext),hard) == 0) def scryptencrypt(plaintext,password,hard=False): buffer = ctypes.create_string_buffer(DLL.getScryptEncryptedSize(len(plaintext))) status = DLL.scryptencrypt(plaintext,len(plaintext),password,len(password),buffer,hard) if (status == 0): return buffer.raw else: return None def scryptdecrypt(cipher,password,hard=False): reslen = DLL.getScryptDecryptedSize(len(cipher)); buffer = ctypes.create_string_buffer(reslen); real_size = ctypes.c_uint() status = DLL.scryptdecrypt(cipher,len(cipher),password,len(password),buffer,ctypes.byref(real_size),hard); if (status == 0): return buffer.raw[:real_size.value] else: return None def genRSA2048(): genrsa = DLL.genRSA2048 genrsa.argtypes = [ctypes.POINTER(ctypes.POINTER(ctypes.c_ubyte)),ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.POINTER(ctypes.c_ubyte)),ctypes.POINTER(ctypes.c_uint)] pub = ctypes.POINTER(ctypes.c_ubyte)(); pub_l = ctypes.c_uint(0) priv = ctypes.POINTER(ctypes.c_ubyte)(); priv_l = ctypes.c_uint(0) status = genrsa(ctypes.byref(pub),ctypes.byref(pub_l),ctypes.byref(priv),ctypes.byref(priv_l)); if (status == 0): if (sys.version_info.major >= 3): return ((bytes(pub[:pub_l.value]), bytes(priv[:priv_l.value]))) pub_h = b'' for i in range(0,pub_l.value): pub_h += chr(pub[i]) priv_h = b'' for i in range(0,priv_l.value): priv_h += chr(priv[i]) return (pub_h,priv_h) else: return None def RSAencrypt(key,public,plaintext): reslen = DLL.getRSAEncryptedSize(len(plaintext)) buffer = ctypes.create_string_buffer(reslen) status = DLL.RSAencrypt(key,len(key),plaintext,len(plaintext),public,buffer) if (status == 0): return buffer.raw else: return None def RSAdecrypt(key,public,cipher): reslen = DLL.getRSADecryptedSize(len(cipher)) buffer = ctypes.create_string_buffer(reslen) real_size = ctypes.c_uint() status = DLL.RSAdecrypt(key,len(key),cipher,len(cipher),public,buffer,ctypes.byref(real_size)) if (status == 0): return buffer.raw[:real_size.value] else: return None
We’ve brought you some crazy slices over the years, from the multi-layer unicorn jelly cheesecake to the new vibrant pink Red Skins version of a classic vanilla slice. Here are a few of our favourite recipes. A buttery biscuit base, creamy cheesecake middle and pink jelly Turkish delight top ensures this no-bake slice will be added to your favourite recipes collection. This has all the classic elements of a Chokito, from the rich caramel fudge filling and crunchy choc-bubble topping to the hidden layer of Chokito inside. Perfect for kids and adults, this easy dessert combines white Christmas and sweetened condensed milk fudge into one delicious festive slice.
# encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): depends_on = ( ('canvas', '0013_add_comment_table'), ('canvas', '0074_create_category'), ('canvas', '0144_auto__add_field_comment_title'), ('canvas', '0147_auto__add_field_commentsticker_epic_message'), ) def forwards(self, orm): # Deleting field 'ScheduledQuest.comment' db.delete_column('quests_scheduledquest', 'comment_id') # Deleting field 'ScheduledQuest.archived' db.delete_column('quests_scheduledquest', 'archived') # Adding field 'ScheduledQuest.quest' delete_quest = False if db.dry_run: quest_id = None else: if not orm.Quest.objects.all(): quest = orm.Quest.objects.create() delete_quest = True else: quest = orm.Quest.objects.all()[0] quest_id = quest.id db.add_column('quests_scheduledquest', 'quest', self.gf('django.db.models.fields.related.OneToOneField')(default=quest_id, to=orm['canvas.Comment'], unique=True), keep_default=False) # Adding field 'ScheduledQuest.appeared_on' db.add_column('quests_scheduledquest', 'appeared_on', self.gf('canvas.util.UnixTimestampField')(null=True, db_index=True), keep_default=False) if delete_quest: quest.delete() def backwards(self, orm): return # User chose to not deal with backwards NULL issues for 'ScheduledQuest.comment' raise RuntimeError("Cannot reverse this migration. 'ScheduledQuest.comment' and its values cannot be restored.") # Adding field 'ScheduledQuest.archived' db.add_column('quests_scheduledquest', 'archived', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False) # Deleting field 'ScheduledQuest.quest' db.delete_column('quests_scheduledquest', 'quest_id') # Deleting field 'ScheduledQuest.appeared_on' db.delete_column('quests_scheduledquest', 'appeared_on') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '254', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'canvas.category': { 'Meta': {'object_name': 'Category'}, 'description': ('django.db.models.fields.CharField', [], {'max_length': '140'}), 'founded': ('django.db.models.fields.FloatField', [], {'default': '1298956320'}), 'founder': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'founded_groups'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'moderators': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'moderated_categories'", 'symmetrical': 'False', 'to': "orm['auth.User']"}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '20'}), 'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'}) }, 'canvas.comment': { 'Meta': {'object_name': 'Comment'}, 'anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'author': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'comments'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}), 'category': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'comments'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Category']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}), 'judged': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'ot_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'replies'", 'null': 'True', 'blank': 'True', 'to': "orm['canvas.Comment']"}), 'parent_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'to': "orm['canvas.Content']"}), 'replied_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['canvas.Comment']", 'null': 'True', 'blank': 'True'}), 'reply_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'used_in_comments'", 'null': 'True', 'to': "orm['canvas.Content']"}), 'reply_text': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}), 'score': ('django.db.models.fields.FloatField', [], {'default': '0', 'db_index': 'True'}), 'timestamp': ('canvas.util.UnixTimestampField', [], {'default': '0'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}), 'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'}) }, 'canvas.content': { 'Meta': {'object_name': 'Content'}, 'alpha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'animated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}), 'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}), 'remix_of': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'remixes'", 'null': 'True', 'to': "orm['canvas.Content']"}), 'remix_text': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}), 'source_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '4000', 'blank': 'True'}), 'stamps_used': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'used_as_stamp'", 'blank': 'True', 'to': "orm['canvas.Content']"}), 'timestamp': ('canvas.util.UnixTimestampField', [], {}), 'url_mapping': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.ContentUrlMapping']", 'null': 'True', 'blank': 'True'}), 'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'}) }, 'canvas.contenturlmapping': { 'Meta': {'object_name': 'ContentUrlMapping'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, 'canvas_auth.user': { 'Meta': {'object_name': 'User', 'db_table': "'auth_user'", '_ormbases': ['auth.User'], 'proxy': 'True'} }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'drawquest_auth.user': { 'Meta': {'object_name': 'User', 'db_table': "'auth_user'", '_ormbases': ['canvas_auth.User'], 'proxy': 'True'} }, 'quests.quest': { 'Meta': {'object_name': 'Quest', 'db_table': "'canvas_comment'", '_ormbases': ['canvas.Comment'], 'proxy': 'True'} }, 'quests.scheduledquest': { 'Meta': {'ordering': "['sort']", 'object_name': 'ScheduledQuest'}, 'appeared_on': ('canvas.util.UnixTimestampField', [], {'null': 'True', 'db_index': 'True'}), 'curator': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'scheduled_quests'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'quest': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['canvas.Comment']", 'unique': 'True'}), 'sort': ('django.db.models.fields.IntegerField', [], {}), 'timestamp': ('canvas.util.UnixTimestampField', [], {'default': '0'}) } } complete_apps = ['quests']
1. Be courageous. I have seen many depressions in business. Always America has emerged from these stronger and more prosperous. Be brave as your fathers before you. Have faith! Go forward! 2. It ain't what you don't know that gets you into trouble. It's what you know for sure that just ain't so. 3. Too often I would hear men boast of the miles covered that day, rarely of what they had seen. 4. No pessimist ever discovered the secret of the stars, or sailed to an uncharted land, or opened a new doorway for the human spirit. 5. Unless someone like you cares a whole awful lot, nothing is going to get better. It's not. 6. It is a curious thought, but it is only when you see people looking ridiculous that you realize just how much you love them. 7. Things may come to those who wait, but only the things left by those who hustle. 8. It may be hard for an egg to turn into a bird: it would be a jolly sight harder for it to learn to fly while remaining an egg. We are like eggs at present. And you cannot go on indefinitely being just an ordinary, decent egg. We must be hatched or go bad. 9. Everything has its wonders, even darkness and silence, and I learn, whatever state I may be in, therein to be content. 10. Our greatest weakness lies in giving up. The most certain way to succeed is always to try just one more time. 11. Today you are you! That is truer than true! There is no one alive who is you-er than you! 12. Never bend your head. Always hold it high. Look the world straight in the eye. 13. Imagination is everything. It is the preview of life's coming attractions. 14. While they were saying among themselves it cannot be done, it was done. 15. In the end, it's not the years in your life that count. It's the life in your years. 16. Do not worry about your difficulties in Mathematics. I can assure you mine are still greater. 17. I like nonsense, it wakes up the brain cells. Fantasy is a necessary ingredient in living, it's a way of looking at life through the wrong end of a telescope. Which is what I do, and that enables you to laugh at life's realities. 18. Opportunity is missed by most people because it is dressed in overalls and looks like work. 19. Peace is more important than all justice; and peace was not made for the sake of justice, but justice for the sake of peace. 20. It sometimes happens that a woman is handsomer at twenty-nine than she was ten years before. 21. What is a cynic? A man who knows the price of everything and the value of nothing. 22. He might never really do what he said, but at least he had it in mind. He had somewhere to go. 23. Genius is one percent inspiration and ninety-nine percent perspiration. 24. Friendship is certainly the finest balm for the pangs of disappointed love. 25. You can't depend on your eyes when your imagination is out of focus. 26. Hell, there are no rules here— we're trying to accomplish something. 27. The real problem is not why some pious, humble, believing people suffer, but why some do not. 28. I have always found that mercy bears richer fruits than strict justice. 29. Reason is the natural order of truth; but imagination is the organ of meaning. 30. Good advice is always certain to be ignored, but that's no reason not to give it. 31. The best and most beautiful things in the world cannot be seen or even touched— they must be felt with the heart. 32. Age is an issue of mind over matter. If you don't mind, it doesn't matter. 33. Nobody got anywhere in the world by simply being content. 34. Experience is one thing you can't get for nothing. 35. There are people, who the more you do for them, the less they will do for themselves. 36. Instead of comparing our lot with that of those who are more fortunate than we are, we should compare it with the lot of the great majority of our fellow men. It then appears that we are among the privileged. 37. Living in the lap of luxury isn't bad except that you never know when luxury is going to stand up. 38. You are never too old to set another goal or to dream a new dream. 39. Not everything that can be counted counts, and not everything that counts can be counted. 40. It has been my experience that folks who have no vices have very few virtues. 41. The only way to keep your health is to eat what you don't want, drink what you don't like, and do what you'd rather not. 42. You are not only responsible for what you say, but also for what you do not say. 43. If things are going untowardly one month, they are sure to mend the next. 44. The secret of getting ahead is getting started. 45. Self-pity is our worst enemy and if we yield to it, we can never do anything wise in this world. 46. There are two ways to live: you can live as if nothing is a miracle; you can live as if everything is a miracle. 47. Anger is an acid that can do more harm to the vessel in which it is stored than to anything on which it is poured. 48. What is right to be done cannot be done too soon. 50. Even if I knew that tomorrow the world would go to pieces, I would still plant my apple tree. Get your favorite smartphone case today and share your vision with the world.
# coding: utf-8 # Copyright 2014 Globo.com Player authors. All rights reserved. # Use of this source code is governed by a MIT License # license that can be found in the LICENSE file. import sys PYTHON_MAJOR_VERSION = sys.version_info import os import posixpath try: from cookielib import CookieJar except ImportError: from http.cookiejar import CookieJar try: import urlparse as url_parser import urllib2 cj = CookieJar() cookieProcessor = urllib2.HTTPCookieProcessor(cj) opener = urllib2.build_opener(cookieProcessor) urlopen = opener.open except ImportError: import urllib.parse as url_parser from urllib.request import urlopen as url_opener urlopen = url_opener from m3u8.model import M3U8, Playlist, IFramePlaylist, Media, Segment from m3u8.parser import parse, is_url __all__ = ('M3U8', 'Playlist', 'IFramePlaylist', 'Media', 'Segment', 'loads', 'load', 'parse') def loads(content): ''' Given a string with a m3u8 content, returns a M3U8 object. Raises ValueError if invalid content ''' return M3U8(content) def load(uri): ''' Retrieves the content from a given URI and returns a M3U8 object. Raises ValueError if invalid content or IOError if request fails. ''' if is_url(uri): return _load_from_uri(uri) else: return _load_from_file(uri) def getCookieProcessor(): return cookieProcessor # Support for python3 inspired by https://github.com/szemtiv/m3u8/ def _load_from_uri(uri): resource = urlopen(uri) base_uri = _parsed_url(_url_for(uri)) if PYTHON_MAJOR_VERSION < (3,): content = _read_python2x(resource) else: content = _read_python3x(resource) return M3U8(content, base_uri=base_uri) def _url_for(uri): return urlopen(uri).geturl() def _parsed_url(url): parsed_url = url_parser.urlparse(url) prefix = parsed_url.scheme + '://' + parsed_url.netloc base_path = posixpath.normpath(parsed_url.path + '/..') return url_parser.urljoin(prefix, base_path) def _read_python2x(resource): return resource.read().strip() def _read_python3x(resource): return resource.read().decode(resource.headers.get_content_charset(failobj="utf-8")) def _load_from_file(uri): with open(uri) as fileobj: raw_content = fileobj.read().strip() base_uri = os.path.dirname(uri) return M3U8(raw_content, base_uri=base_uri)
Senator John McCain, originally uploaded by johnwmacdonald. Senator John McCain, the Republican nominee for president of the United States, visits Ottawa Friday 20 June 2008 to deliver a free trade speech to the Economic Club of Canada. Far left is US Ambassador David H. Wilkins patting the back of Fairmont Château Laurier's general manager Claude J. Sauvé (back turned) as the Senator gives a brief wave to the crowd and then is whisked away by motorcade at the side entrance of the hotel. Tickets for the luncheon allegedly sold out within minutes when news of the event was posted on the club's website.
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Author: mcxiaoke # @Date: 2015-08-10 21:25:48 from __future__ import print_function import sys import os from Tkinter import * class ScrolledText(Frame): def __init__(self, parent=None, text='', file=None): Frame.__init__(self, parent) # 自动扩展空间 self.pack(expand=YES, fill=BOTH) self.makeWidgets() self.settext(text, file) def makeWidgets(self): text = Text(self, relief=SUNKEN) sbar = Scrollbar(self) # 连接滚动条 sbar.config(command=text.yview) text.config(yscrollcommand=sbar.set) # 先布置滚动条 sbar.pack(side=RIGHT, fill=Y) text.pack(side=LEFT, expand=YES, fill=BOTH) self.text = text def settext(self, text='', file=None): if file: text = open(file, 'r').read() # 删除当前的文本 self.text.delete('1.0', END) # 从最开始插入新文本 self.text.insert('1.0', text) # 光标移动到开头 self.text.mark_set(INSERT, '1.0') # 获取焦点 self.text.focus() def insert(self, index, text=''): self.text.insert(index, text) def see(self, index): self.text.see(index) def bind(self, sequence, func): self.text.bind(sequence, func) def update(self): self.text.update() def gettext(self): # 返回全部文本 # 1.0 表示第1行第0列 -1c表示一个字符之前 return self.text.get('1.0', END+'-1c')
Women's TBS Opiace 5-hole lace-up canvas sports shoe with rubber sole. With their practical and casual look, these Opiace sports shoes by TBS are available in a range of colours to match any outfit. With fixed soles. Shoe width in size 5: 3.6 inches.
# Stellar Magnate - A space-themed commodity trading game # Copyright (C) 2018 Toshio Kuratomi <toshio@fedoraproject.org> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ Routines to load base game types from data files """ import enum import os from functools import partial import voluptuous as v from voluptuous.humanize import validate_with_humanized_errors as v_validate try: # pragma: no cover from yaml import CSafeLoader as Loader except ImportError: # pragma: no cover from yaml import SafeLoader as Loader from ..logging import log mlog = log.fields(mod=__name__) # Enums that are created at runtime and then used with the database. See the # data/base/stellar-types.yml file if you suspect this list is out of date # pylint: disable=invalid-name CommodityType = None CelestialType = None LocationType = None FinancialType = None OrderStatusType = None # pylint: enable=invalid-name def type_name(value): """Validate that the names of types follow our conventions""" flog = mlog.fields(func='type_name') flog.fields(value=value).debug('validate that type_name follows convention') if not isinstance(value, str): raise ValueError('Type names must be strings') if not value.endswith('Type'): raise ValueError('Type names must end with "Type"') if not value[0] == value[0].upper(): raise ValueError('Type names must begin with an uppercase character (following class' ' naming conventions)') flog.debug('type_name {0} follows the proper conventions', value) return value def _generic_types_validator(type_enum, value): """Validate that a string is valid in a :class:`enum.Enum` and transform it into the enum""" flog = mlog.fields(func=f'_generic_types_validator', type_enum=type_enum) flog.fields(type_enum=type_enum, value=value).debug('validate and transform into an enum value') try: enum_value = type_enum[value] except KeyError: raise ValueError(f'{value} is not a valid member of {type_enum.__name__}') except Exception: if not isinstance(value, type_enum): raise ValueError(f'{value} is not a {type_enum.__name__}') raise flog.fields(enum_value=enum_value).debug('transformed into enum_value to return') return enum_value DATA_TYPES_SCHEMA = v.Schema({'version': '0.1', 'types': {type_name: [str]}, }, required=True) def load_base_types(datadir): """ Parse the yaml file of base enum types and return the information :arg datadir: The data directory to find the types file :returns: A list of types """ flog = mlog.fields(func='load_base_types') flog.fields(datadir=datadir).debug('Entered load_base_types') data_file = os.path.join(datadir, 'base', 'stellar-types.yml') with_file_log = flog.fields(filename=data_file) with_file_log.debug('constructed data_file path {data_file}', data_file=data_file) with_file_log.debug('Opening data_file') with open(data_file, 'r') as data_fh: with_file_log.debug('reading data_file') yaml_data = data_fh.read() with_file_log.fields(yaml=yaml_data).debug('parsing yaml string') loader = Loader(yaml_data) data = loader.get_single_data() flog.fields(data=data).debug('Validating type data structure') data = v_validate(data, DATA_TYPES_SCHEMA) flog.debug('Returning type data') return data def init_base_types(datadir): """ Initialize the global base types from the types data file :arg datadir: The data directory to find the types file **Side effects**: This function initializes the global Type variables which are Python Enums for various data types (Types of Commodities, Types of Locations, etc). Since it modifies module globals it needs to be run early, before any threading. The Type variables are used by everything else in savegames so it should be run as one of the first things upon accessing a savegame. """ flog = mlog.fields(func='init_base_types') flog.fields(datadir=datadir).debug('Entered init_base_types') m_globals = globals() for name in m_globals: if name.endswith('Type'): if m_globals[name] is None: break else: flog.debug('base_types Enums already created. Exiting init_base_types early') return base_type_data = load_base_types(datadir) for name, entries in base_type_data['types'].items(): flog.fields(enum=name).debug('Creating enum') m_globals[name] = enum.Enum(name, entries, module=__name__) # Create a voluptuous validator for this type as well m_globals[name].validator = partial(_generic_types_validator, m_globals[name]) flog.debug('Leaving init_base_types')
Winner of the competition last year, King Paido was on March 22, 2016 one of three persons injured in an accident at Fort Hill. One of those persons eventually succumbed to their injuries. When this news site made contact with King Paido, he confirmed that he was one of the persons involved in the said accident. Initial investigations indicate that the driver of the tow truck was removing a vehicle from the area when the tow truck slipped. In a chain of reactions which followed, three men on the site including the tow truck driver were injured. “Yes, I was in the accident,” Phillips confirmed with this news site. He further stated that as a result of the accident he was not going to be defending his Calypso title. “No I can't defend the title [because I am recovering from the accident],” he responded when asked whether he would be defending his title. “I don't know what they are doing...I don't have any clue. But I know that I won't be going,” he said, when asked what will happen with his Calypso Crown. For her part, Sistah Joyce said she will not be competing in the competition this year and it was not because she is busy recording an album in Jamaica. “I ain't got much to say about the calypso thing really. I just ain't singing this year and that's it. I will go up next year God willing. Other media houses want to make a big deal out of it. I really have no comment about any reason why or anything like that. I just ain't singing this year,” she said when this news site contacted her recently. Asked whether her decision had anything to do with the fact that she is currently recording a reggae album in Jamaica, she said it is not even about that. Sistah Joyce took the Virgin Gorda Easter Festival Calypso Crown in 2013. She placed second in last year's competition. Contacted for a comment, Public Relations Officer of the Virgin Gorda Festival Sub-Committee Jerrell A. George said that while they have heard about Phillips being involved in an accident, they had no information that he was not going to be participating in the competition. George later confirmed that King Paido was out of the competition. The Virgin Gorda Easter Festival Calypso Competition will be held on Sunday March 27, 2016 at the Franka Pickering Festiville. Well me nah go up deh! Will miss them both this year. King Paido I wish you a speedy recovery and thank God for sparing your life. I imagine it may a difficult time seeing that someone lost their life in the same accident. Lean on God my brother! Sad. So so sad. I'm wishing king Paso a speedy recovery. Thats what happen when they don't pay artist. They The Politician F n Up this place. Can't they see are they blind? Can't they hear are they deaf? OMG. Is there anything happenning in Virgin Gorda on Monday? Have not heard of any boats from Stt or Tortola going over. Will miss your celebration. Is there any reason for this. Its time we put someone with a strong community relations and with a known musical back ground to run our festivals ..We need to get better. We are losing..I submit these names for consideration. Danelle Flax . Kave Stoutt. Shekera Wheatley Author Selwood...Any one of these people will serve us well.. We are really failing. We need to get better.