text
stringlengths
29
850k
map_title = "Pam" meta_title = "Tsoh Sliated" stats_title = "Stats" table_title = "Elbat Weiv" login_title = "Nigol" settings_title = "Sngittes" dashboard_title = "Draobhsad" units_role_cc = "tneilc" units_role_c = "yltsom tneilc" units_role_cs = "dexim tneilc/revres" units_role_s = "yltsom revres" units_role_ss = "revres" units_kilo = "K" units_mega = "M" units_giga = "G" units_bytes = "B" # bytes units_kbytes = "BK" units_mbytes = "BM" units_gbytes = "BG" units_tbytes = "BT" units_bps = "B/s" # bytes per second units_kbps = "BK/s" units_mbps = "BM/s" units_gbps = "BG/s" units_tbps = "BT/s" units_pps = "p/s" # packets per second units_kpps = "pK/s" units_mpps = "pM/s" units_gpps = "pG/s" units_tpps = "pT/s" stats_udips = "Euqinu noitanitsed PI sesserdda:" stats_usips = "Euqinu ecruos PI sesserdda:" stats_uips = "Euqinu PI sesserdda:" stats_ports = "Euqinu noitanitsed strop desu:" stats_sports = "Euqinu metsys strop desu (0..1023):" stats_uports = "Euqinu resu strop desu (1024..49151):" stats_pports = "Euqinu etavirp strop desu (49152..65535):" stats_ports_max = "Xam strop rof eno noitanitsed:" stats_ports_few = "Tnecrep fo snoitanitsed htiw rewef naht 10 strop: " stats_conns = "Latot rebmun fo tcnitsid snoitcennoc (edon -> edon:trop) derots:" stats_conns_many = "Rebmun fo tcnitsid snoitcennoc gnirrucco erom naht 100 semit:" stats_hosts = "Latot stsoh dedrocer" stats_overall = "Llarevo" stats_datasource = "Ecruosatad: {}" table_col_address = "Sserdda" table_col_alias = "Enamtsoh" table_col_conn_in = "Latot dnuobni snoitcennoc" table_col_conn_out = "Latot fnuobtuo snoitcennoc" table_col_role = "Elor (0 = tneilc, 1 = revres)" table_col_environment = "Tnemnorivne" table_col_tags = "Sgat" table_col_bytes = "Setyb deldnah" table_col_packets = "Stekcap deldnah" table_col_protocols = "Desu Slocotorp" table_proto_i = "(ni)" # in (inbound) table_proto_o = "(tuo)" # out (outbound) table_proto_io = "(i/o)" # in / out table_spread = "Stluser: {0} ot {1}" table_spread_none = "On gnihctam stluser." meta_none = "On tsoh dnuof ta siht sserdda" meta_src = "Ecruos PI" meta_dst = "Tsed. PI" meta_port = "Tsed. Trop" meta_ports = "Trop Dessecca" meta_links = "Tnuoc / Nim" meta_protocols = "Slocotorp" meta_sum_bytes = "Mus Setyb" meta_avg_bytes = "Gva Setyb" meta_sum_packets = "Mus Stekcap" meta_avg_packets = "Gva Stekcap" meta_avg_duration = "Gva Noitarud" meta_child_ip = "Sserdda" meta_child_name = "Eman" meta_child_count = "Evitca Stniopdne" meta_child_ratio = "Elor (0=tneilc, 1=revres)" login_LDAP_missing = "PADL eludom ton dellatsni. Tonnac mrofrep nigol.." login_LDAP_error = "Dluoc ton tcennoc ot PADL revres: {}. Kcehc noitarugifnoc." login_blank_pass = "Drowssap yam ton eb knalb." login_blank_user = "Resu yam ton eb knalb." login_invalid = "Dilavni slaitnederc." login_failed = "Nigol deliaf."
From the merchant: We offer personal training, beach group sessions and jiu-jitsu self defense classes. Free nutrition plan as well. From the merchant: BURN DOUBLE the amount of calories than a typical YOGA class! High Intensity warm up & light weights to sculpt! The facility strives to empower their clients with the knowledge and support necessary to achieve the maximum potential health. From the merchant: Burn calories in a fast and fun way with any of our classes: Spinning, TRX, Circuit Training and FIT COMBAT.
from pathlib import Path import pytest from pycldf.dataset import ParallelText @pytest.fixture def ds(tmpdir): ds = ParallelText.in_dir(str(tmpdir)) ds.add_component('FunctionalEquivalentTable') ds.add_component('FunctionalEquivalentsetTable') for fname in [ 'forms.csv', 'functionalEquivalents.csv', 'functionalEquivalentsets.csv', ]: src = Path(__file__).parent / 'data' / 'paralleltext_{0}'.format(fname) target = tmpdir.join(fname) target.write(src.read_text(encoding='utf-8').encode('utf8'), mode='wb') return ds def test_paralleltext(ds): ds.validate() assert len(list(ds[ds.primary_table])) == 9 def test_get_equivalent(ds): for fes in ds['FunctionalEquivalentsetTable']: if fes['Description'] == 'Jesus Christ': break else: raise ValueError # pragma: no cover equiv = [ ds.get_equivalent(r) for r in ds['FunctionalEquivalentTable'] if r['FunctionalEquivalentset_ID'] == fes['ID']] assert equiv == [['Jesu'], ['Jisas\u0268', 'Kiraisoy\xe1'], ['Jisas', 'Krais']]
Just outside Denver, Highlands Ranch is a beautiful suburb filled with parks, trails, and recreational areas. Highlands Ranch’s beauty can be attributed to its snaking “green belt”, providing more unincorporated areas. Residents and visitors can enjoy Colorado’s beauty at High Line Canal Trail, Chatfield State Park, Bluffs Regional Park Trail, Civic Green Park, and the Denver Botanic Gardens Chatfield Farms. Highlands Ranch Mansion is one the most beautiful displays of architecture in Colorado. WIth 27,000 square feet of space, the mansion is unique and breathtaking, and a very popular location for a dream wedding. It is by far the city’s greatest attraction. With just a short distance away from Denver, Highlands Ranch is the perfect place to live and visit, with its quaint, green beauty, and gorgeous architecture. Howl at the Moon is the country’s biggest name in Dueling Piano entertainment. Howl2GO delivers our amazing, high energy show to any venue in the Highlands Ranch area. When you bring Howl2GO to Highlands Ranch, you can be certain you’re getting the true Howl at the Moon experience, and with that comes the assurance you are booking only the best dueling piano performers in the country. Our fun, energetic performers are perfect for all events anywhere in the Highlands Ranch area. Howl2GO provides all the production you need to make your event look and sound amazing. We have top of the range PA sound systems and custom built lighting shows that you will not see anywhere else. Our competition simply does not begin to come close to our production! We also supply our custom built electric baby grand pianos that can be transported into any size or shape venue in the Highlands Ranch area. Regardless of how many guests you’re expecting, from 20 to 2000, we have you covered! Howl2GO Dueling Pianos by Howl at the Moon brings only the absolute best entertainers to your event. Whether you’re planning a corporate event in Highlands Ranch, a wedding or private party at your home, we’ll deliver customized entertainment leaving you and your guests dancing and singing along all night. We are the first choice for all types of school, church and charity fundraisers and we ensure all our songs and material is suitable for everyone in attendance. Telling your guests in Highlands Ranch that Howl at the Moon is providing the entertainment, is a sure fire way of attracting more people to your event! Dueling Pianos in Highlands Ranch is all we do! Howl2GO has a dedicated team of professionals on staff. From our sales manager who will help build out your vision, our production team who will communicate with your venue in Highlands Ranch, a wedding specialist and, of course, our world-class entertainers, picked specifically for your event. We only bring Howl at the Moon trained musicians, all of whom have experience playing at one of our locations nationwide. Since our inception in 1990, Howl at the Moon has been the number 1 place for bachelorette parties, and now Howl2GO has become the number one choice for Highlands Ranch area Dueling Piano wedding entertainment. From your ceremony, through announcements and you first dance until the last guests leave the dancefloor following your reception, our live Wedding music will keep everyone on the dance floor all night long. Choose us, the experts in Wedding entertainment, and don’t take a chance on a local act who will not provide the professional performance required of the biggest night of your life! We have a dedicated reviews page so you can read testimonials from hundreds of our thrilled clients in Highlands Ranch. Our Facebook page features regular updates, and following us on Instagram and Twitter means you’ll stay up to date on where we’re playing next near Highlands Ranch CO.
# 112. Path Sum # Given a binary tree and a sum, # determine if the tree has a root-to-leaf path # such that adding up all the values along the path equals the given sum. # # For example: # Given the below binary tree and sum = 22, # 5 # / \ # 4 8 # / / \ # 11 13 4 # / \ \ # 7 2 1 # # return true, as there exist a root-to-leaf path 5->4->11->2 which sum is 22. class TreeNode: def __init__(self, x): self.val = x self.left = None self.right = None class Solution: def hasPathSum(self, root, sum): if root is None: return False # leaf node if root.left is None and root.right is None and root.val == sum: return True # reduce sum by node value at each recursion return self.hasPathSum(root.left, sum - root.val) or self.hasPathSum(root.right, sum - root.val) if __name__ == "__main__": root = TreeNode(5) root.left = TreeNode(4) root.right = TreeNode(8) root.left.left = TreeNode(11) root.left.left.right = TreeNode(2) print Solution().hasPathSum(root, 22)
"There is no other country in the world, besides my own, whose way of life I like so much", said Parisian Christian Dior of England. "I love English traditions, English politeness, English architecture. I even love English cooking." And the love is very much reciprocated. One of the world's most popular high-end brands year after year, season after season, Christian Dior's label has proven a fitting legacy to the man himself long after his passing 61 years ago. Which is why the announcement of an upcoming exhibition dedicated to him and his fashion house back in July was both thrilling and unsurprising in equal measure. Hosted at the V&A, Christian Dior: Designer of Dreams will run from February 2 - July 14 2019 and will be the largest and most comprehensive Dior exhibition ever staged in the UK. It will be the biggest since 2015's sell-out success Alexander McQueen: Savage Beauty. Drawn from the extensive Dior archives, the exhibition will showcase "over 500 objects, with over 200 rare Haute Couture garments shown alongside accessories, fashion photography, film, perfume, make-up, illustrations, magazines, and Christian Dior’s personal possessions." And while the brand's story is adored by fashion fans and historians alike, it is the world's best-dressed celebrities who tend to showcase its pieces to the masses.
from string import ascii_lowercase from math import gcd # Constants kN = len(ascii_lowercase) kStartAscii = ord(ascii_lowercase[0]) kFILE_NAME = "TheHitchhikersGuidetotheGalaxy.txt" # Helpers def AsciiValue(char): return ord(char) - kStartAscii def CharValue(num): return chr(num + kStartAscii) def TransformText(original, key, add_sub): text = "" for idx,val in enumerate(original): lowr = val.lower() ascii_num = AsciiValue(lowr) i = idx % len(key) ascii_num += add_sub * AsciiValue(key[i]) ascii_num %= kN text += CharValue(ascii_num) return text def Encrypt(b,k): return TransformText(b,k, 1) def Decrypt(c, k): return TransformText(c,k, -1) def AllDivisors(num): divisors = [3] i = 4 while i*i <= num: if (num % i == 0): divisors.append(i) i += 1 return divisors def PossibleLen(num): return list(range (3, num // 2)) def FindLenghtOfKey(cripted_text): ct_len = len(cripted_text) possible_pattern_search = AllDivisors(ct_len) #print (possible_pattern_search) distances = [] for patter_len in possible_pattern_search: upper_bound = ct_len if ct_len % patter_len == 0 else ct_len - patter_len for i in range(0,upper_bound, patter_len): for j in range(i + patter_len, upper_bound, patter_len): matching = True for k in range(patter_len): #print (i,j,k, patter_len, ct_len) if (cripted_text[i+k] != cripted_text[j+k]): matching = False break if (matching): distances.append(j - i - patter_len) print (distances) l_d = len(distances) gcd_distances = distances[0] if (l_d >= 2): for d in range(1, l_d): gcd_distances = gcd(gcd_distances, distances[d]) return gcd_distances def BreakTextIntoBlocks(text, lengh_of_blocks): blocks = ["" for _ in range(lengh_of_blocks)] for ind, val in enumerate(text): blocks[ind % lengh_of_blocks] += val return blocks def FrequencyAnalysis(cripted_text): frequencies = [0 for _ in range(kN)] for c in cripted_text: frequencies[AsciiValue(c)] += 1 print (frequencies) return frequencies def FindKey(cripted_text): key_len = FindLenghtOfKey(cripted_text) text_blocks = BreakTextIntoBlocks(cripted_text, key_len) key = "" for block in text_blocks: freq = FrequencyAnalysis(block) print (CharValue(freq.index(max(freq)))) char_from_key = abs(freq.index(max(freq)) - AsciiValue('t')) key += CharValue(char_from_key) return key if __name__ == "__main__" : #print (Encrypt("abc", "abc")) #print (Encrypt("abc", "z")) #print (Encrypt("abc", "y")) #print (Decrypt("ace", "abc")) #print (Decrypt("zab", "z")) #print (Decrypt("yza", "y")) #print (FindLenghtOfKey(Encrypt("abcijkabcoplabc", "abc"))) #print (FindLenghtOfKey(Encrypt("abcijkabcoplabce", "abcd"))) #print (FindKey(Encrypt("abcijkabcoplabc", "abc"))) content = "" with open(kFILE_NAME) as f: content = f.readlines() #print(content) cripted_text = Encrypt(content[0], "abc") key = FindKey(cripted_text) print (key) original = Decrypt(cripted_text, key) print (original) print (content[0].lower()) print (content[0].lower() == original)
The full text and complete lyrics, as well as photographs from the original production. This musical tale of a domineering stage mother’s inadvertent creation of a burlesque stripper is now available in paperback for the first time. Hairspray is the 2003 Tony Award® winner for Best Musical!
# -*- coding: utf-8 -*- from __future__ import division, absolute_import, print_function from . import np as __np, linalg as __linalg, derivatives as __derivatives from .params import convergence_params from ..bases import mkmodel as __mk def newton_type(param): """ lin_solver params(1) : param as int param = 1 -> Jacobian solution param = 2 -> returns param = 1 and Hessian Matrix """ psi, jacobian, hessian = __derivatives.analytical(param) jacobian = __np.dot(__mk.maps['msas'],jacobian).T f = __np.dot(__mk.maps['msas'],psi) # add white noise to the Jacobian with std of the linear problem solver converfence criteria #jacobian += __np.random.normal(0,convergence_params['criteriaqmr'],jacobian.shape) # solution algorithm dcoverage = __np.array(__linalg.qmr(jacobian.T,-f*convergence_params['hfun'],\ tol = convergence_params['inner_criteria'],maxiter=convergence_params['inner_convtol'])[0]) if param == 2 and max(abs(dcoverage))<1: count = 0 fhessp = lambda dconv,M : __np.dot(M.T,dconv) dhess = __np.empty([len(psi),len(__mk.xsurface)]) vhess = __np.empty([len(psi),1]) while count <= convergence_params['convtolH']: for i in range(0,len(psi)): dhess[i,:] = fhessp(dcoverage,hessian[:,:,i]) vhess[i,:] = __np.dot(dhess[i,:],dcoverage) mhess = __np.dot(__mk.maps['msas'],dhess).T vhess = __np.dots(__mk.maps['msas'],vhess) dcoverage2 = __np.array(__linalg.qmr((jacobian+mhess).T,\ -(f+__np.dot(jacobian.T,dcoverage)+0.5*vhess)))[0] dcoverage += dcoverage2 count+=1 else: pass for i in range(len(dcoverage)): if __np.isnan(dcoverage[i]): dcoverage[i] = convergence_params['delta_min'] elif __np.isinf(dcoverage[i]): if dcoverage[i]>0: dcoverage[i] = convergence_params['delta_min'] else: dcoverage[i] = -convergence_params['delta_min'] else: pass return dcoverage
Calling from an unknown number? +48690760523? See reviews and leave yours!
#! /usr/bin/python import os import base64 def list_files(dir): list = [] for root, dirs, files in os.walk(dir): for file in files: fullpath = os.path.join(root, file).replace("\\", "/") list.append(fullpath) return list def remove_files(list, files): for file in files: if file in list: list.remove(file) def convert_image_files(output_file, image_files, property): if len(image_files) > 0: output = "var " + property + " = {\n" for file in image_files: print(file) id = file.replace("./", "") fin = open(file, "rb") binary = fin.read() fin.close() output += '\t"' + id + '" : "data:image/png;base64,' + base64.b64encode(binary).decode('ascii') + '",\n' output = output[0:-2] output += "\n};\n" with open(output_file, "w") as fout: fout.write(output) def concat_files(output_file, in_files): with open(output_file, "w", encoding='utf-8') as fout: for file in in_files: print(file) with open(file, encoding='utf-8') as fin: src = fin.read() fout.write(src) PROJECT_JS = "build/bin/HTML5Shooter.js" PROJECT_OPTIMIZED_JS = "build/bin/HTML5Shooter-min.js" SOURCE_MAP_JS = "build/bin/HTML5Shooter-min.js.map" # create gamelib.js GAMELIB_JS = "build/bin/gamelib.js" GAMELIB_HEADER = "gamelib/header.js" os.chdir("..") gamelib_files = list_files("gamelib") remove_files(gamelib_files, [GAMELIB_HEADER]) gamelib_files = [GAMELIB_HEADER] + gamelib_files concat_files(GAMELIB_JS, gamelib_files) # create images.js IMAGES_JS = "build/obj/images.js" os.chdir('resources/images'); image_files = list_files('.') convert_image_files("../../" + IMAGES_JS, image_files, 'IMAGES'); os.chdir("../../"); # create project .js files = [GAMELIB_JS, IMAGES_JS] + list_files("src") concat_files(PROJECT_JS, files) # create optimized project .js #compiler = "java -jar build/compiler.jar --compilation_level ADVANCED_OPTIMIZATIONS --js " + PROJECT_JS + " --js_output_file " + PROJECT_OPTIMIZED_JS + " --create_source_map " + SOURCE_MAP_JS + " --source_map_format=V3" #os.system(compiler) optimize = "uglifyjs " + PROJECT_JS + " > " + PROJECT_OPTIMIZED_JS os.system(optimize)
Competing for donor dollars requires many charities keep a full-time fundraiser on staff, or an entire department dedicated to donor management and fundraising events. But the right approach on your website relates the story of your organization while appealing to donors on a personal level, connecting with both their desire for social responsibility and need to show their peers their definition of a good life intertwines with your nonprofit events. Older donors won’t always understand the dazzle of current digital platforms, but they understand the tug of nostalgia. Use tintypes and black and white photos showing history as it was before your nonprofit began its work improving lives. Describe conditions and situations improved by your community efforts in crowdfunding and peer to peer fundraising; these recent terms describe small-town, localized efforts you mesh with modern nonprofit donor management software. Busy millennial donors hate taking time to read a text of more than a few words; get their attention with no words at all. Use video and Instagram posts featuring staff and clients of your nonprofit and couple the images with their own words. End each video or Instagram vignette with a donation request. No writer tells the story of your organization’s achievements better than those who benefit from its work or those who see the positive results every day. Your nonprofit’s relevance matters to all donors; the ability to trust, track their donations and see the money work is vital for a continued relationship. Refrain from political bashing or religious proselytizing, but appeal to potential donors using current news events. An example is a nonprofit providing funds for emergency food, shelter, clothing other basic assistance featuring news of the severe wildfires in the western U.S. The current events remind donors that anyone may need help at anytime and their donations help people just like themselves. Process Donation’s software provides a personalized focus for your outreach efforts. Our platform is your virtual assistant for event branding, marketing, ticket sales, staff communication, donation campaigns and management, emailing, financial reporting integration and planning the next successful event. Our app goes to events, the office or anywhere your nonprofit needs you. Reach us online for more information and increase your nonprofit’s reach today.
"""Print help text.""" # :license: MIT, see LICENSE for more details. import click from click import formatting from SoftLayer.CLI import core as cli_core from SoftLayer.CLI import environment from SoftLayer.shell import routes @click.command() @environment.pass_env @click.pass_context def cli(ctx, env): """Print shell help text.""" env.out("Welcome to the SoftLayer shell.") env.out("") formatter = formatting.HelpFormatter() commands = [] shell_commands = [] for name in cli_core.cli.list_commands(ctx): command = cli_core.cli.get_command(ctx, name) if command.short_help is None: command.short_help = command.help details = (name, command.short_help) if name in dict(routes.ALL_ROUTES): shell_commands.append(details) else: commands.append(details) with formatter.section('Shell Commands'): formatter.write_dl(shell_commands) with formatter.section('Commands'): formatter.write_dl(commands) for line in formatter.buffer: env.out(line, newline=False)
What thickness of sock do I need? The thickness of the socks you choose should reflect the climate you will be in. For hotter climates, a sock with a lower Thermal Rating or Climate Guide is recommended. For a colder climate where additional insulation and warmth is essential, a sock with a higher Thermal Rating or Climate Guide is recommended.
#!/usr/bin/env python # Copyright 2014-2020 Scalyr Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Script which pretty prints benchmark results for compression algorithms benchmarks. """ from __future__ import absolute_import from __future__ import print_function import sys import copy import json from io import open from collections import defaultdict from prettytable import PrettyTable # pylint: disable=import-error def main(): with open(sys.argv[1], "r") as fp: data = fp.read() data = json.loads(data) result = defaultdict(list) print(("=" * 100)) print("Compression") print(("=" * 100)) for benchmark in data["benchmarks"]: if benchmark["group"] != "compress": continue name = benchmark["name"] key = "=".join([str(x) for x in benchmark["params"]["log_tuple"]]) mean = benchmark["stats"]["mean"] * 1000 compression_ratio = round(benchmark["stats"]["compression_ratio"], 3) result[key].append((benchmark["name"], mean, compression_ratio)) for key in result.keys(): values = result[key] split = key.split("=") print("") print(("-" * 100)) print("") print(("%s %s bytes (-1 means whole file)" % (split[0], split[1]))) print("") print("Best by timing (less is better)") print("") table = PrettyTable() table.field_names = [ "name", "mean time in ms (less is better)", "compression ratio (more is better)", ] values1 = sorted(copy.copy(values), key=lambda x: x[1]) for name, mean, compression_ratio in values1: table.add_row((name, mean, compression_ratio)) print(table) print("") print("Best by compression ratio (more is better)") print("") table = PrettyTable() table.field_names = [ "name", "mean time in ms (less is better)", "compression ratio (more is better)", ] values2 = sorted(copy.copy(values), key=lambda x: x[2], reverse=True) for name, mean, compression_ratio in values2: table.add_row((name, mean, compression_ratio)) print(table) print("") print(("=" * 100)) print("") result = defaultdict(list) print(("=" * 100)) print("Decompression") print(("=" * 100)) for benchmark in data["benchmarks"]: if benchmark["group"] != "decompress": continue name = benchmark["name"] key = "=".join([str(x) for x in benchmark["params"]["log_tuple"]]) mean = benchmark["stats"]["mean"] * 1000 result[key].append((benchmark["name"], mean)) for key in result.keys(): values = result[key] split = key.split("=") print("") print(("-" * 100)) print("") print(("%s %s bytes (-1 means whole file)" % (split[0], split[1]))) print("") print("Best by timing (less is better)") print("") table = PrettyTable() table.field_names = ["name", "mean time in ms (less is better)"] values1 = sorted(copy.copy(values), key=lambda x: x[1]) for name, mean in values1: table.add_row((name, mean)) print(table) print("") print(("=" * 100)) print("") if __name__ == "__main__": main()
VIGEVANO - The conservative Lynyrd Skynyrd, the legendary American band, will play here again, this time @ castello di Vigevano castle (nice venue), near Milan, 13 June 2012. You know, the (Southern) Confederate flag they use, is sometimes considered racist, and it was used by Elvis Presley too (Liam's idol, who's even got his tattoo). In fact (but not for this) he was even accused of being a Nazi. The band played for Bush in 2004. They have the (American) eagle as symbol, as Manchester City. This is all so nationalist, so American pride, so right-wing, so Berlusconi lol, that Noel Gallagher would love it.
#!/usr/bin/env python # -*- coding: utf-8 -*- import log from MySQL import ConMySQL from AddRowWindowGTK import AddRowWindowGTK import csv import os from collections import deque, defaultdict import gi from gi.repository import Gtk as gtk from gi.repository import Gdk gi.require_version('Gtk', '3.0') def css(): css = b""" * { transition-property: color, background-color, border-color, background-image, padding, border-width; transition-duration: 1s; } /* font operate on entire GtkTreeView not for selected row */ GtkTreeView { text-shadow: 1px 1px 2px black, 0 0 1em blue, 0 0 0.2em blue; color: white; font: 1.5em Georgia, "Bitstream Charter", "URW Bookman L", "Century Schoolbook L", serif; font-weight: bold; font-style: italic;box-shadow: 5px 3px red;} GtkTreeView row:nth-child(even) { background-image: -gtk-gradient (linear, left top, left bottom, from (#d0e4f7), color-stop (0.5, darker (#d0e4f7)), to (#fdffff)); } GtkTreeView row:nth-child(odd) { background-image: -gtk-gradient (linear, left top, left bottom, from (yellow), color-stop (0.5, darker (yellow)), to (#fdffff)); } /* next line only border action operate */ GtkTreeView:selected{color: white; background: green; border-width: 1px; border-color: black;} /* next line for Gtk.TreeViewColumn */ column-header .button{color: white; background: purple;} * { -GtkWindow-resize-grip-default: false; } """ style_provider = gtk.CssProvider() style_provider.load_from_data(css) gtk.StyleContext.add_provider_for_screen( Gdk.Screen.get_default(), style_provider, gtk.STYLE_PROVIDER_PRIORITY_APPLICATION ) WINDOW_WIDTH = 460 WINDOW_HEIGHT = 244 class Window(): def __init__(self, configData={}): log.LOG("START __init__") # set config data self.configData = configData self.configData['history_file'] = os.path.expanduser("~") + "/.lom_history" self.configData['history'] = 50 self.configData['short'] = ['Title', 'Name', 'Keys'] self.configData['ip_MySQL'] = '172.19.20.19' if not os.path.exists(self.configData['lomrc']): self.setConfig() if not os.path.exists(self.configData['history_file']): with open(self.configData['history_file'], 'wb') as f: f.write("") self.getConfig() # Set MySQL IP ConMySQL.ip = self.configData['ip_MySQL'] # Parse glade XML self.gladefile = os.path.dirname(os.path.abspath(__file__)) + "/glade/MainWindow.glade" self.glade = gtk.Builder() self.glade.add_from_file(self.gladefile) self.glade.connect_signals(self) # get object self.component = {} self.component['set'] = {} self.component['search'] = gtk.ListStore(int, str, str, str, str, str, str) self.component['update'] = gtk.ListStore(int, str, str, str) self.component['add'] = {} self.component['type'] = gtk.TreeStore(str, int) self.component['news'] = gtk.ListStore(int, str, str, str, str, str, str) self.component['keys'] = gtk.ListStore(str) self.component['history'] = gtk.ListStore(int, str) self.window = self.glade.get_object("window") self.gridMain = self.glade.get_object("gridMain") self.entryCommandLine = self.glade.get_object("entryCommandLine") self.labelTitle = self.glade.get_object("labelTitle") self.labelText = None self.treeViewResult = None # set up history command self.history = deque(maxlen=int(self.configData['history'])) self.histpos = 0 self.getHisoryFromFile() # initial window self.initialWindow() # show all object self.window.show_all() # check info self.initialInfo() log.LOG("END __init__") def setConfig(self): log.LOG("START setConfig") tmp = dict(filter(lambda x: not x[0].startswith('_'), self.configData.items())) tmp['short'] = ' '.join(tmp['short']) with open(self.configData['lomrc'], 'wb') as csvfile: writer = csv.DictWriter(csvfile, tmp.keys()) writer.writeheader() writer.writerow(tmp) self.getConfig() log.LOG("END setConfig") def getConfig(self): log.LOG("START getConfig") with open(self.configData['lomrc']) as csvfile: reader = csv.DictReader(csvfile) for row in reader: for k, v in row.items(): self.configData[k] = v self.configData['short'] = self.configData['short'].split() log.LOG("END getConfig") def initialInfo(self): log.LOG("START initialInfo") # get news rows = ConMySQL.getNews(self.configData['user']) if rows: self.print_error_message("%d news from last check" % len(rows)) self.getNews() log.LOG("END initialInfo") def initialWindow(self): log.LOG("START initialWindow") self.window.add_events(Gdk.EventMask.BUTTON_PRESS_MASK) self.window.set_gravity(Gdk.Gravity.SOUTH_EAST) self.window.set_keep_above(True) self.window.set_resizable(False) self.window.set_decorated(False) self.entryCommandLine.connect('key_press_event', self.__key_function) self.commonLayout() log.LOG("END initialWindow") def __set_position(self, width=WINDOW_WIDTH, height=WINDOW_HEIGHT): log.LOG("START __set_position") (w, h) = width, height x = int(Gdk.Screen.get_default().get_width() * int(self.configData['_x'])) y = int(Gdk.Screen.get_default().get_height() * int(self.configData['_y'])) # Set position Left-Button log.LOG("(x,y) = (%s,%s) (w,h) = (%s,%s)" % (x, y, w, h)) self.window.move(x-w, y-h) log.LOG("END __set_position") def __key_function(self, entry, event): log.LOG("START __key_function") if event.keyval == Gdk.KEY_Return: self.entryCommandLine.emit_stop_by_name('key_press_event') elif event.keyval in (Gdk.KEY_KP_Up, Gdk.KEY_Up, Gdk.KEY_Page_Up): self.entryCommandLine.emit_stop_by_name('key_press_event') self.historyUp() elif event.keyval in (Gdk.KEY_KP_Down, Gdk.KEY_Down, Gdk.KEY_Page_Down): self.entryCommandLine.emit_stop_by_name('key_press_event') self.historyDown() elif event.keyval in (Gdk.KEY_D, Gdk.KEY_d) and\ event.state & Gdk.ModifierType.CONTROL_MASK: self.entryCommandLine.emit_stop_by_name('key_press_event') self.setHisoryFile() gtk.main_quit() self.window.destroy() log.LOG("END __key_function") def historyDown(self): log.LOG("START historyUp") if self.histpos > 0: self.entryCommandLine.set_text(self.history[self.histpos]) self.histpos = self.histpos - 1 log.LOG("END historyUp") def historyUp(self): log.LOG("START historyDown") if self.histpos < len(self.history) - 1: self.entryCommandLine.set_text(self.history[self.histpos]) self.histpos = self.histpos + 1 log.LOG("END historyDown") def setHisoryFile(self): with open(self.configData['history_file'], 'w') as f: f.write('\n'.join(self.history)) def getHisoryFromFile(self): with open(self.configData['history_file'], 'r') as f: self.history = deque(maxlen=int(self.configData['history'])) for row in f.read().split('\n'): self.history.append(row) def print_error_message(self, text="fill all fields"): log.LOG("START print_error_message") md = gtk.MessageDialog(self.window, type=gtk.MessageType.ERROR, buttons=gtk.ButtonsType.OK) md.set_position(gtk.WindowPosition.CENTER_ON_PARENT) md.set_markup(text) md.run() md.destroy() return None log.LOG("END print_error_message") def entry_dialog(self, message): log.LOG("START entry_dialog") dialog = gtk.MessageDialog(self.window, type=gtk.MessageType.QUESTION, buttons=gtk.ButtonsType.OK) dialog.set_position(gtk.WindowPosition.CENTER_ON_PARENT) dialog.set_markup(message) dialogBox = dialog.get_content_area() entry = gtk.Entry() entry.set_size_request(200, 0) dialogBox.pack_end(entry, False, False, 0) dialog.show_all() response = dialog.run() text = entry.get_text() dialog.destroy() if (response == gtk.ResponseType.OK) and (text != ''): return text else: return None log.LOG("END entry_dialog") def main(self): log.LOG("START main") "Run main loop" gtk.main() log.LOG("END main") def deleteEvent(self, widget, event): log.LOG("START deleteEvent") gtk.main_quit() log.LOG("END deleteEvent") def parserArgs(self, widget): log.LOG("START parserArgs") arg = escapePattern(widget.get_text()) rest = arg.split() self.histpos = 0 if rest and '\n' not in rest: self.history.appendleft(arg) command = rest.pop(0) if rest else "" self.commonLayout() if command in ['help', 'h']: self.getHelp(rest) elif command in ['set']: self.setOption(rest) elif command in ['search', 's']: self.search(rest) elif command in ['add', 'a']: self.addParser(rest) elif command in ['update', 'u']: self.updateRecord(rest) elif command in ['type', 't']: self.getTypeTree(rest) elif command in ['key', 'k']: self.getKeysList(rest) elif command in ['news', 'n']: self.getNews() elif command in ['history', 'his']: self.getHisory() elif command in ['open', 'o']: self.openWebBrowser(rest) elif command in ['exit', 'bye']: self.setHisoryFile() gtk.main_quit() self.window.destroy() elif command.isdigit(): self.getDigit(int(command)) log.LOG("END parserArgs") def commonLayout(self): log.LOG("START commonLayout") self.labelTitle.set_text("Library Of Mind") self.window.set_size_request(WINDOW_WIDTH, WINDOW_HEIGHT) self.entryCommandLine.set_text("") widget = self.gridMain.get_child_at(0, 1) if widget is not None: self.gridMain.remove(widget) widget = self.gridMain.get_child_at(0, 2) if widget is not None: self.gridMain.remove(widget) self.__set_position() log.LOG("END commonLayout") def labelLayout(self, text): log.LOG("START labelLayout") log.LOG("Create Scroll") sw = gtk.ScrolledWindow() sw.set_shadow_type(gtk.ShadowType.IN) sw.set_size_request(450, 200) sw.set_visible(True) sw.set_policy(gtk.PolicyType.AUTOMATIC, gtk.PolicyType.AUTOMATIC) self.gridMain.attach(sw, 0, 2, 1, 1) log.LOG("(0,1): %s" % self.gridMain.get_child_at(0, 1)) self.labelText = gtk.Label() self.labelText.set_markup(escape(text)) self.labelText.set_visible(True) self.labelText.set_selectable(True) self.labelText.props.valign = gtk.Align.START self.labelText.props.halign = gtk.Align.START sw.add(self.labelText) self.__set_position() log.LOG("END labelLayout") def treeViewLayout(self, model, activatedRow, getSelectedRow, search_col=0): """ Create treeView model -> GTK Storage """ log.LOG("START treeViewLayout") self.commonLayout() log.LOG("Create Scroll") sw = gtk.ScrolledWindow() sw.set_shadow_type(gtk.ShadowType.IN) sw.set_size_request(450, 200) sw.set_can_focus(True) sw.set_visible(True) sw.set_policy(gtk.PolicyType.AUTOMATIC, gtk.PolicyType.AUTOMATIC) self.gridMain.attach(sw, 0, 1, 1, 1) log.LOG("(0,1): %s" % self.gridMain.get_child_at(0, 1)) self.treeViewResult = gtk.TreeView() self.treeViewResult.set_size_request(450, 200) self.treeViewResult.set_visible(True) self.treeViewResult.set_can_focus(True) self.treeViewResult.set_model(model) self.treeViewResult.set_search_column(search_col) self.treeViewResult.connect("row-activated", activatedRow) self.treeViewResult.connect("cursor-changed", getSelectedRow) sw.add(self.treeViewResult) self.__set_position() log.LOG("END treeViewLayout") def doNothing(*arg): pass def getSelectedRow(self, widget): log.LOG("START getSelectedRow") text_row = """ <span color="#929287">Title: </span><span>{1}</span> <span color="#929287">Name: </span><span>{2}</span> <span color="#929287">Description:</span>\n <span>{4}</span>\n <span color="#929287">Keys: </span><span>{3}</span> <span color="#929287">Autor: </span><span weight="bold">{5}</span>\t<span color="#929287">Date: </span><span>{6}</span> """ selection = widget.get_selection() result = selection.get_selected() if result: model, iter = result wid = self.gridMain.get_child_at(0, 2) if wid is not None: self.gridMain.remove(wid) self.labelLayout(text_row.format(*model[iter])) self.__set_position(WINDOW_WIDTH, WINDOW_HEIGHT + 200 if self.configData['_size_200'] else 0) self.labelTitle.set_text("Search --> %s" % model[iter][2]) log.LOG("END getSelectedRow") def getSelectedRowType(self, widget, column, data): log.LOG("START getSelectedRowType") log.LOG("widget= %s path= %s column= %s data=%s" % (self, widget, column, data)) selection = widget.get_selection() result = selection.get_selected() if result: model, iter = result type_name = str(model.get_value(iter, 0)) type_id = model.get_value(iter, 1) typeData = ConMySQL.getTypeByTree() child = (type_name, type_id) id_type = ["-it", '[[:<:]]' + str(type_id) + '[[:>:]]'] for i in self.getIdFromTreeType(typeData, child): id_type.extend(["-it", '[[:<:]]' + str(i) + '[[:>:]]']) self.commonLayout() self.search(id_type) self.labelTitle.set_text("Type select --> %s" % type_name) log.LOG("END getSelectedRowType") def getExpandRow(self, widget): log.LOG("START getExpandRow") selection = widget.get_selection() result = selection.get_selected() if result: model, iter = result path = model.get_path(iter) widget.expand_to_path(path) log.LOG("END getExpandRow") def getIdFromTreeType(self, typeData, parentName=('LOM', 1)): log.LOG("START getIdFromTreeType") list_id = [] if not typeData.get(parentName): return list_id else: for child in typeData[parentName]: list_id.append(child[1]) if typeData.get(child): list_id.extend(self.getIdFromTreeType(typeData, child)) return list_id log.LOG("END getIdFromTreeType") def getSelectedRowKey(self, widget, column, data): log.LOG("START getSelectedRowKey") log.LOG("widget= %s path= %s column= %s data=%s" % (self, widget, column, data)) selection = widget.get_selection() result = selection.get_selected() if result: model, iter = result key_name = str(model.get_value(iter, 0)) id_type = ["-k", '[[:<:]]' + key_name + '[[:>:]]'] self.commonLayout() self.search(id_type) self.labelTitle.set_text("Key select --> %s" % key_name) log.LOG("END getSelectedRowKey") def getSelectedHis(self, widget, column, data): log.LOG("START getSelectedHis") log.LOG("widget= %s path= %s column= %s data=%s" % (self, widget, column, data)) selection = widget.get_selection() result = selection.get_selected() if result: model, iter = result self.commonLayout() self.entryCommandLine.set_text(str(model.get_value(iter, 1))) self.parserArgs(self.entryCommandLine) log.LOG("END getSelectedHis") def getSelectedUpdate(self, widget, column, data): log.LOG("START getSelectedUpdate") log.LOG("widget= %s path= %s column= %s data=%s" % (self, widget, column, data)) selection = widget.get_selection() result = selection.get_selected() if result: model, iter = result id_row = model[iter][0] self.commonLayout() gtkWindowUpdateRow = AddRowWindowGTK(self.configData['user'], id_row) gtkWindowUpdateRow.main() self.labelTitle.set_text("Update select --> %s" % model[iter][1]) log.LOG("END getSelectedUpdate") def getHelp(self, com): log.LOG("START getHelp") if com: helpList = ConMySQL.getHelp(' '.join(com))[0] if helpList['name'] == 'ALL': helpList = '<span color="red">INVALID SYNTAX</span>\n' + helpList['description'] else: helpList = helpList['description'] log.LOG("#### %s" % helpList) self.labelLayout(helpList) else: helpList = ConMySQL.getHelp()[0] helpList = helpList['description'] self.labelLayout(helpList) self.labelTitle.set_text("Help --> %s" % ' '.join(com) or 'All') log.LOG("END getHelp") def search(self, com): log.LOG("START search") # helper fun def checkRow(l, d, n): log.LOG("%s %s %s" % (l, d, n)) t = [] while not l[0].startswith('-'): t.append(l.pop(0)) if not l: break if not t: return self.print_error_message("Invalid syntax") else: dPattern[n].append(' '.join(t)) # clean TreeStore self.component['search'].clear() # Parse com dPattern = defaultdict(list) if com: if not com[0].startswith('-'): pattern = ' '.join(com) for name in ['name', 'type', 'description', 'key_list', 'name_a']: dPattern[name].append(pattern) else: while com: k = com.pop(0) if com: if k.lower() in ['-id', '-i']: checkRow(com, dPattern, 'id') elif k.lower() in ['-name', '-n ']: checkRow(com, dPattern, 'name') elif k.lower() in ['-type', '-t']: checkRow(com, dPattern, 'type') elif k.lower() in ['-description', '-desc', '-d']: checkRow(com, dPattern, 'description') elif k.lower() in ['-key', '-k']: checkRow(com, dPattern, 'key_list') elif k.lower() in ['-autor', '-a']: checkRow(com, dPattern, 'name_a') elif k.lower() in ['-id_type', '-it']: checkRow(com, dPattern, 'id_type') else: return self.print_error_message("Invalid syntax") if dPattern: rows = ConMySQL.getLibDefaultDick(dPattern) else: rows = ConMySQL.getLib() for row in rows: toadd = [row['id'], row['type'], row['name'], row['key_list'], row['description'], row['name_a'], row['date_a'].strftime("%Y-%m-%d %T")] self.component['search'].append(toadd) # Create, TreeView Layout self.treeViewLayout(self.component['search'], self.doNothing, self.getSelectedRow, 2) # create columns self.createColumns(self.treeViewResult, self.mapColumnNameToNumber(self.configData['short'])) self.labelTitle.set_text("Search --> %s" % (' '.join(com) if com else "All")) log.LOG("END search") def addRecord(self): log.LOG("START addRecord") gtkWindowAddRow = AddRowWindowGTK(self.configData['user']) gtkWindowAddRow.main() self.labelTitle.set_text("Add record") log.LOG("END addRecord") def addParser(self, com): log.LOG("START addParser") if com: if com[0].startswith('-'): if com[0] in ['-t', '-type']: if len(com) == 2: self.selectNewType(com[1]) else: self.selectNewType() else: self.print_error_message("Invalid syntax More in <tt>help add</tt>") else: self.print_error_message("Invalid syntax More in <tt>help add</tt>") else: self.addRecord() log.LOG("END addParser") def selectNewType(self, new_type=None): log.LOG("START selectNewType") self.component['type'].clear() typeData = ConMySQL.getTypeByTree() # Show all type by pattern if new_type: types = ConMySQL.getType(new_type) for type in types: child = (type['type'], type['id_type']) parent = self.component['type'].append(None, child) self.addRowToTreeView(typeData, child, parent) else: # Show all type self.addRowToTreeView(typeData) # Create, TreeView Layout self.treeViewLayout(self.component['type'], self.addNewTypeToSelected, self.doNothing) # create columns self.createColumns(self.treeViewResult, [(0, 'Type')]) self.labelTitle.set_text("Add new type. Please select parent type") log.LOG("END selectNewType") def addNewTypeToSelected(self, widget, column, data): log.LOG("START addNewTypeToSelected") log.LOG("widget= %s path= %s column= %s data=%s" % (self, widget, column, data)) selection = widget.get_selection() result = selection.get_selected() if result: model, iter = result type_name = str(model.get_value(iter, 0)) type_id = model.get_value(iter, 1) new_type = self.entry_dialog("Please entry new type to <tt>%s</tt>" % type_name) if new_type: ConMySQL.setType(new_type, type_id) self.commonLayout() self.labelTitle.set_text("Add new type '%s' to '%s'" % (new_type, type_name)) else: self.print_error_message("Name is empty More <tt>help add</tt>") log.LOG("END addNewTypeToSelected") def getTypeTree(self, com): log.LOG("START getTypeTree") log.LOG("START getType") # clean TreeStore self.component['type'].clear() typeData = ConMySQL.getTypeByTree() # Show all type by pattern if com: types = ConMySQL.getType(' '.join(com)) for type in types: child = (type['type'], type['id_type']) parent = self.component['type'].append(None, child) self.addRowToTreeView(typeData, child, parent) else: # Show all type self.addRowToTreeView(typeData) # Create, TreeView Layout self.treeViewLayout(self.component['type'], self.getSelectedRowType, self.getExpandRow) # create columns self.createColumns(self.treeViewResult, [(0, 'Type')]) self.labelTitle.set_text("Type --> %s" % (' '.join(com) if com else "All")) log.LOG("END getType") log.LOG("END getTypeTree") def updateRecord(self, com): log.LOG("START updateRecord") # clean TreeStore self.component['search'].clear() # Parse com dPattern = defaultdict(list) if com: pattern = ' '.join(com) for name in ['name', 'type', 'description', 'key_list', 'name_a']: dPattern[name].append(pattern) if dPattern: rows = ConMySQL.getLibDefaultDick(dPattern) else: rows = ConMySQL.getLib() for row in rows: toadd = [row['id'], row['type'], row['name'], row['key_list'], row['description'], row['name_a'], row['date_a'].strftime("%Y-%m-%d %T")] self.component['search'].append(toadd) # Create, TreeView Layout self.treeViewLayout(self.component['search'], self.getSelectedUpdate, self.doNothing, 2) # create columns self.createColumns(self.treeViewResult, self.mapColumnNameToNumber(self.configData['short'])) self.labelTitle.set_text("Update --> %s" % (' '.join(com) if com else "All")) log.LOG("END updateRecord") def createColumns(self, treeView, listColumnName): log.LOG("START createColumns") for i, name in listColumnName: rendererText = gtk.CellRendererText() column = gtk.TreeViewColumn(name, rendererText, text=i) column.set_clickable(True) column.set_sort_indicator(True) column.set_sort_column_id(0) treeView.append_column(column) log.LOG("END createColumns") def addRowToTreeView(self, typeData, parentName=('LOM', 1), parent=None): log.LOG("START addRowToTreeView") if not typeData.get(parentName): return else: for child in typeData[parentName]: newParent = self.component['type'].append(parent, [child[0], child[1]]) if typeData.get(child): self.addRowToTreeView(typeData, child, newParent) log.LOG("END addRowToTreeView") def getKeysList(self, com): log.LOG("START getKeysList") # clean TreeStore self.component['keys'].clear() if com: keys = ConMySQL.getUniqueKeys(' '.join(com)) else: keys = ConMySQL.getUniqueKeys() for key in keys: self.component['keys'].append([key['key_name']]) # Create, TreeView Layout self.treeViewLayout(self.component['keys'], self.getSelectedRowKey, self.doNothing) # create columns self.createColumns(self.treeViewResult, [(0, 'keys')]) self.labelTitle.set_text("Keys --> %s" % (' '.join(com) if com else "All")) log.LOG("END getKeysList") def mapColumnNameToNumber(self, nameList): mapNumber = { 'ID': 0, 'Title': 1, 'Name': 2, 'Keys': 3, 'Description': 4, 'name_a': 5, 'data_a': 6} return [(mapNumber[x], x) for x in nameList if x in mapNumber.keys()] def getNews(self): log.LOG("START getNews") # clean TreeStore self.component['news'].clear() rows = ConMySQL.getNews(self.configData['user']) ConMySQL.updateUser(self.configData['user']) for row in rows: toadd = [row['id'], row['type'], row['name'], row['key_list'], row['description'], row['name_a'], row['date_a'].strftime("%Y-%m-%d %T")] self.component['news'].append(toadd) # Create, TreeView Layout self.treeViewLayout(self.component['news'], self.doNothing, self.getSelectedRow, 2) # create columns self.createColumns(self.treeViewResult, self.mapColumnNameToNumber(self.configData['short'])) self.labelTitle.set_text("News") log.LOG("END getNews") def getDigit(self): log.LOG("START getDigit") pass log.LOG("END getDigit") def getHisory(self): log.LOG("START getDigit") # clean TreeStore self.component['history'].clear() for row in enumerate(self.history): self.component['history'].append(row) # Create, TreeView Layout self.treeViewLayout(self.component['history'], self.getSelectedHis, self.doNothing, 1) # create columns self.createColumns(self.treeViewResult, [(0, 'ID'), (1, 'History')]) self.labelTitle.set_text("History") log.LOG("END getDigit") def setOption(self, com): log.LOG("START setOption") if len(com) >= 2 and com[0] in self.configData.keys(): self.configData[com[0]] = ' '.join(com[1:]) elif not com: self.getConfig() message = "" for k, v in self.configData.items(): if not k.startswith('_'): message += "%s = %s\n" % (k, v) self.labelLayout(message) else: self.print_error_message('INVALID SYNTAX') self.setConfig() log.LOG("END setOption") def openWebBrowser(self, com): log.LOG("START openWebBrowser") import webbrowser browser = webbrowser.BackgroundBrowser("gnome-open") if len(com) >= 2 and com[0].startswith('-'): option = com.pop(0) if option in ['-s']: url = "http://stackoverflow.com/search?q=" + '+'.join(com) elif option in ['-u']: url = "http://unix.stackexchange.com/search?q=" + '+'.join(com) elif option in ['-g']: url = "https://www.google.pl/search?q=" + '+'.join(com) else: return self.print_error_message('INVALID SYNTAX') browser.open(url) else: self.print_error_message('INVALID SYNTAX') log.LOG("END openWebBrowser") def escape(s): "escape html markup" if isinstance(s, str): s = s.replace("&", "&amp;") s = s.replace("\<", "&lt;") s = s.replace("\>", "&gt;") return s def escapePattern(s): "escape html markup" if isinstance(s, str): s = s.replace("\<", "[[:<:]]") s = s.replace("\>", "[[:>:]]") return s
Welcome to our Talents page for Uther. Here, we give you an overview of how strong each of Uther's talents is. Then, we present several viable builds, before analyzing each talent row separately, so that you can make informed decisions. The Holy Radiance build seeks to provide Uther with as much uptime as possible on Devotion's Armor increasing effect. Key Talents include Wave of Light, which improves the duration of the effect on top of greatly reducing the Cooldown of Holy Radiance, and Benediction, which allows Uther to quickly recast the Ability. By nature, this build thrives against burst damage. Reward: After reducing damage 40 times, Holy Light's Mana cost is reduced from 80 to 60. Reward: After reducing damage 80 times, Holy Light's Mana cost is reduced to 40, and its range is increased by 50%. Quest: Damage or heal Heroes 60 times with Holy Radiance. Reward: Increase the duration of Devotion by 0.5 seconds. Reward: Basic Attacks reduce the cooldown of Hammer of Justice by 1.5 seconds. Silver Touch provides less overall Mana than the two alterative Talents. Additionally, the range bonus is seldom useful, given Uther's propoensity to staying close to whatever needs be healed. Wave of Light significantly improves what is arguably Uther's most important Ability by significantly reducing its Cooldown and Mana cost. The duration increase to Devotion's Armor-increasing effect furthermore contributes to Uther's ability to help with survivability. Hammer of the Lightbringer is a niche Talent that serves to punish melee-heavy team compositions. Under ideal circumstances, Hammer of the Lightbringer will provide more Mana regeneration than its two alternative Talents, on top of a hefty Cooldown reduction to Hammer of Justice. This second point is particularly valuable when combined with some of the other Talents that improve Hammer of Justice. Holy Shock opens up the possibility for Uther to deal a passable amount of burst damage at the cost of putting Holy Light on a short Cooldown. This can be useful when your team's strategy involves bursting down one specific target. An added benefit of this Talent is that it can trigger Devotion's effect on Uther himself at a relatively low Mana cost. The downside of the Talent, however, is that it is less than useful when you must use Holy Light to keep yourself or allies alive. In short, Holy Shock is a good choice when your team is the aggressor. Pursuit of Justice provides Uther with a reliable but modest movement speed increase which, ultimately, does little to increase his general utility. Holy Fire improves Uther's abysmal waveclear on top of providing him with with a decent incentive to use Basic Attacks against Heroes. One interesting aspect of the Talent is its ability to reveal Stealthed Heroes. Armor of Faith punishes opponents for stunning Uther himself. Unlike the alternative options available on this Talent tier, Armor of Faith cannot be used to help allies out against crowd control, which is generally the precursor to kills, reducing Uther's general utility. Guardian of Ancient Kings fulfills a function that is similar to Hand of Protection by granting your target a very significant amount of Armor should they find themselves under the effect of crowd control as opposed to simply freeing them. In addition, Guardian of Ancient Kings does not mitigate some crowd control that Hand of Protection does mitigate. It does, however, benefit from having a much lower Cooldown, making it more readily available. Hand of Protection is one of the most powerful cleansing effects available in the game. This is due to its built-in Cooldown reduction effect. Although it does require Uther to attack to take any real effect, having such an effect available often is invaluable. Still, the 50 Armor bonus provided by Guardian of Ancient Kings is considered more useful in terms of survivability. Hand of Protection can still be used to setup for Heroic Abilities that must be channeled-ability. As Divine Shield and Divine Storm are both situationally very powerful, the choice of one or the other is based on personal preference and team compositions. These Abilities are discussed in further details in the Abilities section of this guide. Divine Shield is your defensive option. It offers a unique effect that can single-handedly influence the outcome of any given fight. In theory, Divine Shield's invulnerability effect can prevent an infinite amount of damage from affecting your target for its duration. For this reason, it is often used to avoid burst damage against team compositions that rely on hard-engaging. The movement speed increase provided by Divine Shield also allows your target to effectively escape if their Health is left low from an engagement, or re-engage after being healed. Alternatively, Divine Shield can be used on a high-damage Hero, generally melee, to allow them to deliver their damage unscathed. Since many kills rely on the application of crowd control effects to hold a target in place while it is being damaged, the debuff removal and immunity effect provided by Divine Shield is just as valuable as the invulnerability itself. Divine Storm is your offensive option. Although such a reliable area of effect stun has useful defensive applications, notably for peeling, Divine Storm is typically used as part of a combo, either as the initiating component or as a follow-up. Alternatively, it can also be used to lock down Support Heroes (especially when used along Hammer of Justice for lengthy stuns) while your team kills off other Heroes before they can be healed or otherwise protected. Divine Storm combos particularly well with area of effect Abilities, and should be strongly considered if your team composition features two or more. For the next 5 seconds after using Holy Light, Uther's Basic Attacks heal him and nearby allied Heroes for 15% of the total amount healed by Holy Light. Blessed Champion has the potential of providing some incredible area of effect healing. This potential, however, hinges upon being able to reliably deliver Basic Attacks, something that is seldom possible. Blessed Champion is a good counterpick to double healer team compositions, as you will have more freedom to play aggressively with your Basic Attacks. Well Met provides a modest but reliable debuff that stacks with the Armor provided by Devotion in terms of damage reduction. The movement speed reduction is also useful as well when used with Hammer of the Lightbringer and Pursuit of Justice as they allow you to consistently maintain the debuff on your chosen target. Should you find yourself struggling to survive against caster-heavy team compositions, Spell Shield is your Talent of choice. Beacon of Light has a rather strange requirement — that of being below 50% Health — and an ultimately limited payoff. Still, this Talent could become popular if Armor of Faith does become popular itself for some extremely high healing output, though this remains to be seen. Tyr's Deliverance increases Uther's burst healing, granted you cast Holy Radiance before Holy Light. An interesting added benefit of this Talent is that it increases ALL healing received by the target, including that of Healing Wells, Healing Globes, and other Heroes. This Talent is thus very powerful when playing with other Healers. Benediction acts as a minor version of Rewind, allowing you to cast the same Ability twice in a row, on top of reducing this ability's Mana cost by 50. Benediction is notable for allowing you to cast Hammer of Justice twice to accomplish one of the game's longest targeted stun (for a total of 1.5 seconds, up to 3.25 seconds when combined with Divine Storm). When used with Holy Light, Benediction allows you to bypass a large portion of the Ability's Cooldown for a significant amount of single-target healing. Benediction is particularly powerful when used with Holy Radiance and when the Divine Protection Talent has been learned to grant a massive amount of Armor to several Heroes, including yourself. After Eternal Vanguard ends, Uther revives at the spirit's location with 50% of his maximum Health. This effect has a 180 second cooldown. Applying Devotion to a Hero that is already affected by Devotion increases their Armor to 50. Bulwark of Light is a niche Talent that is used to power up a single high-damage Hero. Such a strategy is generally used at the highest levels of play as it requires a very coordinated team to pull off. Lower level or otherwise unorganized players may see better results with other Talent options, though the ability to make a target completely invulnerable for 5 seconds can always be useful. Divine Hurricane simply increases the reliability of Divine Storm by allowing it to hit more targets. This increases Uther's effective threat range, and can be particularly surprising to unexpecting opponents. This Talent works well with combo-heavy teams that rely on quickly wrestling kills to gain an advantage. Redemption is one of the best Talents in the game. The utility of being able to die, expending your opponent's time and resources, to then be able to use Devotion's outstanding healing potential, to then revive near your team with a full Mana bar and a reasonable amount of Health simply cannot be overstated. The only thing that you need to ensure is that you move your spirit to a safe location to revive, as clever opponents will quickly switch their attention to you in your weakened state. Divine Protection is a subtle yet extremely impactful Talent that helps Uther with that which he is best at: mitigating burst damage. Being able to grant any ally 30 points of Amor on demand simply cannot be overstated. Pick this Talent over Redemption if you seldom find yourself dying. 14 Jan. 2018: Updated Radiance build. 28 Jun. 2017: Reviewed assessments of Level 4 and Level 13 talents. 29 Apr. 2017: Well Met is now situational (was not recommended before). 27 Apr. 2017: Updated page to include talent discussions.
from __future__ import print_function import copy from theano.compat import izip import numpy import theano from theano import Apply, scalar, config from theano import scalar as scal from six.moves import StringIO, xrange from theano.gof.utils import MethodNotDefined from theano.scalar import Scalar from theano.tensor.elemwise import (Elemwise, DimShuffle, CAReduceDtype) try: import pygpu from pygpu import gpuarray from pygpu.tools import ScalarArg, ArrayArg from pygpu.elemwise import ElemwiseKernel from pygpu.reduction import ReductionKernel from pygpu.gpuarray import dtype_to_typecode, dtype_to_ctype except ImportError: pass from .basic_ops import (as_gpuarray_variable, HideC, GpuKernelBase, Kernel, infer_context_name) from .type import GpuArrayType from .fp16_help import load_w, write_w def _is_scalar(v): False def make_argument(v, name): if _is_scalar(v): return ScalarArg(numpy.dtype(v.type.dtype), name) else: return ArrayArg(numpy.dtype(v.type.dtype), name) def ensure_allocated(storage, shape, dtype, ctx): odat = storage[0] if odat is not None: if odat.shape != shape: # It is unsafe to try to resize odat, # we have to allocate output storage. odat = None if odat is None: odat = pygpu.empty(shape, dtype=dtype, context=ctx) storage[0] = odat return odat def as_C_string_const(s): return '\n'.join('"%s\\n"' % (l.replace('"', '\\"')) for l in s.split('\n')) class GpuElemwise(GpuKernelBase, HideC, Elemwise): """ Elemwise on the GPU. """ nin = property(lambda self: self.scalar_op.nin) nout = property(lambda self: self.scalar_op.nout) _f16_ok = True def __str__(self): if self.name is not None: return self.name items = str(sorted(self.inplace_pattern.items())) return "GpuElemwise{%s}%s<gpuarray>" % (self.scalar_op, items) def make_node(self, *inputs): ctx_name = infer_context_name(*inputs) res = Elemwise.make_node(self, *inputs) outputs = [GpuArrayType(broadcastable=o.type.broadcastable, context_name=ctx_name, dtype=o.type.dtype)() for o in res.outputs] if len(outputs) > 1: raise NotImplementedError() inputs = [as_gpuarray_variable(i, ctx_name) for i in inputs] node = Apply(self, inputs, outputs) # Try to generate the kernel to catch SupportCodeErrors try: scal_ins = [scalar.get_scalar_type(i.dtype) for i in node.inputs] scal_out = [scalar.get_scalar_type(o.dtype) for o in node.outputs] fake_node = Apply(self.scalar_op, [i() for i in scal_ins], [o() for o in scal_out]) code = self.scalar_op.c_support_code_apply(fake_node, "test") if code: raise SupportCodeError(code) except MethodNotDefined: pass try: support_code = self.scalar_op.c_support_code() if (support_code.strip() != "#define THEANO_MACRO_MOD(x,y) (x % y)" and support_code.strip() != ""): # The macro is fine, the C++ struct is not. raise SupportCodeError(support_code) except MethodNotDefined: pass return node def get_params(self, node): return node.inputs[0].type.context def generate_kernel(self, node, nodename): inps = [make_argument(i, 'i%d' % (n,)) for n, i in enumerate(node.inputs)] scal_v_ins = [scalar.get_scalar_type(i.dtype) for i in node.inputs] outs = [make_argument(o, 'o%d' % (n,)) for n, o in enumerate(node.outputs) if n not in self.inplace_pattern] scal_v_outs = [scalar.get_scalar_type(o.dtype) for o in node.outputs] fake_node = Apply(self.scalar_op, [i() for i in scal_v_ins], [o() for o in scal_v_outs]) scal_in = [i.name + '[i]' if i.dtype != 'float16' else '__half2float(' + i.name + '[i])' for i in inps] scal_out = [] oi = 0 scal_f16 = [] for n in range(len(node.outputs)): if n in self.inplace_pattern: arg = inps[self.inplace_pattern[n]] else: arg = outs[oi] oi += 1 if arg.dtype == 'float16': scal_f16.append(('tmpf16%i' % (len(scal_f16),), arg)) scal_out.append(scal_f16[-1][0]) else: scal_out.append(arg.name + '[i]') kop = self.scalar_op.c_code(fake_node, nodename + '_scalar', scal_in, scal_out, dict(fail='return;')) if scal_f16: # if we have float16 scalars on output we have to wrap # them and insert a stand-in float32 variable since # float16 arithemtic is not available code = ["{"] for f in scal_f16: code.append('ga_float %s;' % (f[0],)) # XXX: The replace is an ugly hack to make sure temp # variables inthe middle are float32 code.append(kop.replace('npy_float16', 'ga_float')) for f in scal_f16: code.append('%s[i] = __float2half_rn(%s);' % (f[1].name, f[0])) code.append('}') kop = '\n'.join(code) support_code = "" try: # We accept only some c_support_code(). # This filter is done in the make_node() support_code += self.scalar_op.c_support_code() except MethodNotDefined: pass for npy, ga in [("npy_uint8", "ga_ubyte"), ("npy_uint16", "ga_ushort"), ("npy_uint32", "ga_uint"), ("npy_uint64", "ga_ulong"), ("npy_int8", "ga_byte"), ("npy_int16", "ga_short"), ("npy_int32", "ga_int"), ("npy_int64", "ga_long"), ("npy_float16", "ga_half"), ("npy_float32", "ga_float"), ("npy_float64", "ga_double"), ]: kop = kop.replace(npy, ga) return ElemwiseKernel(self.get_params(node), inps + outs, kop, preamble=support_code) def c_headers(self): return ['<numpy_compat.h>', '<gpuarray/types.h>'] def c_support_code(self): return self.scalar_op.c_support_code() def _gpu_kernel_code(self, node, nodename): # This is useless by itself, but will serve an eventual c_code # implementation k = self.generate_kernel(node, nodename) nd = node.inputs[0].type.ndim res = [] for i in range(0, nd + 1): res.append(k.render_basic(i, name="elem_" + str(i)) + ';') res.append(k.contig_src + ';') return '\n'.join(res) def gpu_kernels(self, node, nodename): src = self._gpu_kernel_code(node, nodename) nd = node.outputs[0].ndim params = ['uintp'] params.extend('uintp' for _ in range(nd)) num_inputs = len(node.inputs) num_outputs = len(node.outputs) for n in range(num_inputs + num_outputs): if (n - len(node.inputs)) in self.inplace_pattern: continue params.extend([gpuarray.GpuArray, 'uintp']) params.extend('intp' for _ in range(nd)) acc_dtype = getattr(self, 'acc_dtype', None) if acc_dtype is None: acc_dtype = node.outputs[0].type.dtype return [Kernel(code=src, name="elem_%d" % nd, params=params, flags=Kernel.get_flags(node.inputs[0].type.dtype, acc_dtype, node.outputs[0].type.dtype), objvar='elem_%d_%s' % (nd, nodename))] def c_code(self, node, name, inputs, outputs, sub): if node.inputs[0].type.context.kind != 'cuda': raise MethodNotDefined('cuda only') nd = node.outputs[0].ndim fail = sub["fail"] initial_dims = ','.join('1' for i in xrange(nd)) opname = str(self.scalar_op) ctx = sub['params'] # check that all inputs have valid dimensions emitted_inames = {} num_kernel_params = 1 + nd + len(inputs + outputs) * (2 + nd) code = """ size_t n_blocks = 0; size_t threads_per_block = 0; size_t numEls = 0; const ssize_t zero = 0; void *kernel_params[%(num_kernel_params)d] = {0}; int err; """ % locals() if nd > 0: code += """ size_t dims[%(nd)s] = {%(initial_dims)s}; """ % locals() else: code += """ size_t *dims = NULL; """ for idx, iname in enumerate(inputs): if iname in emitted_inames: assert emitted_inames[iname] is node.inputs[idx] continue broadcasts = map(int, node.inputs[idx].broadcastable) broadcasts = ', '.join(map(str, broadcasts)) nd = node.inputs[idx].ndim if nd > 0: code += """ int broadcasts_%(iname)s[%(nd)s] = {%(broadcasts)s}; """ % locals() else: code += """ int *broadcasts_%(iname)s = NULL; """ % locals() emitted_inames[iname] = node.inputs[idx] # check that all inputs have valid dimensions emitted_inames = {} for idx, iname in enumerate(inputs): if iname in emitted_inames: continue code += """ if (%(nd)s != PyGpuArray_NDIM(%(iname)s)) { PyErr_Format(PyExc_TypeError, "need %(nd)s dims, not %%u", PyGpuArray_NDIM(%(iname)s)); %(fail)s; } for (int i = 0; i< %(nd)s; ++i) { dims[i] = (dims[i] == 1) ? PyGpuArray_DIMS(%(iname)s)[i] : dims[i]; if ((!(broadcasts_%(iname)s[i] && PyGpuArray_DIMS(%(iname)s)[i] == 1)) && (dims[i] != PyGpuArray_DIMS(%(iname)s)[i])) { PyErr_Format(PyExc_ValueError, "GpuElemwise. Input dimension mis-match. Input" " %(idx)d (indices start at 0) has shape[%%d] == %%llu" ", but the output's size on that axis is %%llu.", i, (unsigned long long)PyGpuArray_DIMS(%(iname)s)[i], (unsigned long long)dims[i] ); %(fail)s; } } """ % locals() emitted_inames[iname] = True # check that all outputs have valid dimensions for idx, oname in enumerate(outputs): typecode = dtype_to_typecode(node.outputs[idx].dtype) if idx not in self.inplace_pattern.keys(): code += """ for (int i = 0; (i< %(nd)s) && (%(oname)s); ++i) { if (dims[i] != PyGpuArray_DIMS(%(oname)s)[i]) { Py_DECREF(%(oname)s); %(oname)s = NULL; } } if (%(oname)s && !GpuArray_CHKFLAGS(&(%(oname)s->ga), GA_C_CONTIGUOUS)) { Py_XDECREF(%(oname)s); %(oname)s = NULL; } if (NULL == %(oname)s) { %(oname)s = pygpu_empty(%(nd)d, dims, %(typecode)s, GA_C_ORDER, %(ctx)s, Py_None); if (!%(oname)s) { %(fail)s } } """ % locals() else: input_idx = self.inplace_pattern[idx] iname = inputs[input_idx] code += """ Py_XDECREF(%(oname)s); %(oname)s = %(iname)s; Py_INCREF(%(oname)s); for (int i = 0; (i< %(nd)s) && (%(oname)s); ++i) { if (dims[i] != PyGpuArray_DIMS(%(oname)s)[i]) { PyErr_Format(PyExc_ValueError, "GpuElemwise. Output dimension mis-match. Output" " %(idx)d (indices start at 0), working inplace" " on input %(input_idx)s, has shape[%%i] == %%llu" ", but the output's size on that axis is %%llu.", i, (unsigned long long)PyGpuArray_DIMS(%(oname)s)[i], (unsigned long long)dims[i] ); Py_DECREF(%(oname)s); %(oname)s = NULL; %(fail)s; } } """ % locals() z = outputs[0] code += """numEls = PyGpuArray_SIZE(%(z)s); //first use at least a full warp threads_per_block = std::min(numEls, (size_t)32); //WARP SIZE //next start adding multiprocessors // UP TO NUMBER OF MULTIPROCESSORS, use 30 for now. n_blocks = std::min(numEls/threads_per_block + (numEls %% threads_per_block?1:0), (size_t)30); // next start adding more warps per multiprocessor if (threads_per_block * n_blocks < numEls) threads_per_block = std::min(numEls/n_blocks, (size_t) 256); """ % locals() kname = 'elem_%d_%s' % (nd, name) param = ["(void *)&numEls"] for i in range(nd): param.append("(void *)&%(z)s->ga.dimensions[%(i)d]" % dict(z=outputs[0], i=i)) for n, (name, var) in enumerate(zip(inputs + outputs, node.inputs + node.outputs)): if (n - len(inputs)) in self.inplace_pattern: continue dtype = dtype_to_ctype(var.dtype) param.append("(void *)%(name)s->ga.data" % locals()) param.append("(void *)&%(name)s->ga.offset" % locals()) for i in range(nd): param.append("PyGpuArray_DIMS(%(name)s)[%(i)d] == 1 ? (void *)&zero: (void *)&PyGpuArray_STRIDES(%(name)s)[%(i)d]" % locals()) for n, p in enumerate(param): code += "kernel_params[%(n)d] = %(p)s;\n" % locals() code += """ err = GpuKernel_call(&%(kname)s, 1, &threads_per_block, &n_blocks, 0, kernel_params); if (err != GA_NO_ERROR) { PyErr_Format(PyExc_RuntimeError, "gpuarray error: %(kname)s: %%s.", GpuKernel_error(&%(kname)s, err)); %(fail)s; } """ % dict(kname=kname, fail=fail) if config.gpuarray.sync: code += """ err = GpuArray_sync(&%(z)s->ga); if (err != GA_NO_ERROR) { PyErr_Format(PyExc_RuntimeError, "gpuarray error: %(kname)s: %%s.", GpuKernel_error(&%(kname)s, err)); %(fail)s; } """ % locals() return str(code) def perform(self, node, inputs, output_storage, ctx): # Try to reuse the kernel from a previous call to hopefully # avoid recompiling if not hasattr(node, '_cache_elemwise_k'): node._cache_elemwise_k = self.generate_kernel(node, "kcode") out_shape = [] for values in izip(*[input.shape for input in inputs]): if any(v == 0 for v in values): # All non-broadcasted dimensions should be zero assert max(values) <= 1 out_shape.append(0) else: out_shape.append(max(values)) out_shape = tuple(out_shape) args = copy.copy(inputs) for n, (stor, out) in enumerate(izip(output_storage, node.outputs)): if n in self.inplace_pattern: stor[0] = inputs[self.inplace_pattern[n]] else: args.append(ensure_allocated(stor, out_shape, out.type.dtype, ctx)) node._cache_elemwise_k(*args, broadcast=True) if config.gpuarray.sync: output_storage[0][0].sync() def c_code_cache_version(self): ver = self.scalar_op.c_code_cache_version() if ver: return (4, ver) else: return ver class SupportCodeError(Exception): """ We do not support certain things (such as the C++ complex struct). """ class GpuDimShuffle(HideC, DimShuffle): """ DimShuffle on the GPU. """ _f16_ok = True def make_node(self, input): ctx_name = infer_context_name(input) res = DimShuffle.make_node(self, input) otype = GpuArrayType(dtype=res.outputs[0].type.dtype, broadcastable=res.outputs[0].type.broadcastable, context_name=ctx_name) input = as_gpuarray_variable(input, ctx_name) return Apply(self, [input], [otype()]) def __str__(self): if self.inplace: s = "InplaceGpuDimShuffle{%s}" else: s = "GpuDimShuffle{%s}" return s % (','.join(str(x) for x in self.new_order)) def perform(self, node, inp, out): input, = inp storage, = out res = input res = res.transpose(self.shuffle + self.drop) shape = list(res.shape[:len(self.shuffle)]) for augm in self.augment: shape.insert(augm, 1) res = res.reshape(shape) if not self.inplace: res = res.copy() storage[0] = res def c_support_code_apply(self, node, name): def copy_shape(nd_out): stmts = [] e = 0 for d in range(nd_out): if d in self.augment: stmts.append("sh[%s] = 1;" % (d,)) else: stmts.append("sh[%s] = tmp->ga.dimensions[%s];" % (d, e)) e += 1 return '\n '.join(stmts) return """ static const unsigned int %(name)s_ax[] = {%(shuffle)s}; static PyGpuArrayObject *%(name)s_f(PyGpuArrayObject *a) { PyGpuArrayObject *res, *tmp; size_t sh[%(nd_out)s]; tmp = pygpu_transpose(a, %(name)s_ax); if (!tmp) return NULL; %(copy_shape)s res = pygpu_reshape(tmp, %(nd_out)s, sh, GA_ANY_ORDER, 1, -1); Py_DECREF(tmp); return res; } """ % dict(shuffle=', '.join(str(a) for a in (self.shuffle + self.drop)), name=name, nd_out=len(self.new_order), copy_shape=copy_shape(len(self.new_order))) def c_code(self, node, name, inputs, outputs, sub): d = dict(name=name, fail=sub['fail'], inp=inputs[0], out=outputs[0], nd=len(self.input_broadcastable)) process = """ PyGpuArrayObject *tmp = NULL; if (%(inp)s->ga.nd != %(nd)s) { PyErr_SetString(PyExc_TypeError, "input nd"); %(fail)s } Py_XDECREF(%(out)s); %(out)s = %(name)s_f(%(inp)s); if (%(out)s == NULL) {%(fail)s} """ % d if not self.inplace: process += """ tmp = pygpu_copy(%(out)s, GA_ANY_ORDER); Py_DECREF(%(out)s); if (!tmp) { %(out)s = NULL; %(fail)s } %(out)s = tmp; """ % d return process def c_code_cache_version(self): return (5,) class GpuCAReduceCuda(GpuKernelBase, HideC, CAReduceDtype): """ GpuCAReduceCuda is a Reduction along some dimensions by a scalar op. Parameters ---------- reduce_mask The dimensions along which to reduce. The `reduce_mask` is a tuple of booleans (actually integers 0 or 1) that specify for each input dimension, whether to reduce it (1) or not (0). pre_scalar_op If present, must be a scalar op with only 1 input. We will execute it on the input value before reduction. Examples -------- When scalar_op is a theano.scalar.basic.Add instance: - reduce_mask == (1,) sums a vector to a scalar - reduce_mask == (1,0) computes the sum of each column in a matrix - reduce_mask == (0,1) computes the sum of each row in a matrix - reduce_mask == (1,1,1) computes the sum of all elements in a 3-tensor. Notes ----- Any reduce_mask of all zeros is a sort of 'copy', and may be removed during graph optimization. This Op is a work in progress. This op was recently upgraded from just GpuSum a general CAReduce. Not many code cases are supported for scalar_op being anything other than scal.Add instances yet. Important note: if you implement new cases for this op, be sure to benchmark them and make sure that they actually result in a speedup. GPUs are not especially well-suited to reduction operations so it is quite possible that the GPU might be slower for some cases. """ __props__ = ('axis', 'reduce_mask', 'dtype', 'acc_dtype', 'scalar_op', 'pre_scalar_op') _f16_ok = True def __init__(self, scalar_op, axis=None, reduce_mask=None, dtype=None, acc_dtype=None, pre_scalar_op=None): if reduce_mask is not None: reduce_mask = tuple(reduce_mask) self.reduce_mask = reduce_mask # used to make sure that calls to scalar op # have unique name arguments self._n_scalar_op_calls = 0 CAReduceDtype.__init__(self, scalar_op, axis=axis, dtype=dtype, acc_dtype=acc_dtype) self.pre_scalar_op = pre_scalar_op if pre_scalar_op: assert pre_scalar_op.nin == 1 def __str__(self): pre = "" if self.pre_scalar_op: pre = "pre=%s,red=" % str(self.pre_scalar_op) ax = '' if self.axis is not None: ax = '{%s}' % (', '.join(str(x) for x in self.axis),) return "GpuCAReduceCuda{%s%s}%s" % (pre, str(self.scalar_op), ax) def __setstate__(self, d): self.__dict__.update(d) # For unpickling of old ops. if not hasattr(self, "pre_scalar_op"): self.pre_scalar_op = None def make_node(self, x): x = as_gpuarray_variable(x, infer_context_name(x)) if x.type.context.kind != 'cuda': raise TypeError("GpuCAReduceCuda doesn't work for non-cuda devices") ret = super(GpuCAReduceCuda, self).make_node(x) self = copy.copy(self) self.axis = ret.op.axis if self.pre_scalar_op: # Currently we only tested pre_scalar_op that don't cause # upcast. assert Elemwise(self.pre_scalar_op)(x).dtype == x.dtype if self.reduce_mask is None: if self.axis is None: reduce_mask = [1] * x.type.ndim else: reduce_mask = [0] * x.type.ndim for a in self.axis: assert reduce_mask[a] == 0 reduce_mask[a] = 1 self.reduce_mask = tuple(reduce_mask) if (x.type.ndim != len(self.reduce_mask)): raise TypeError("x must have rank %i" % len(self.reduce_mask)) if ("complex" in x.dtype or "complex" in ret.outputs[0].dtype or "complex" in self._acc_dtype(x.dtype)): raise NotImplementedError("We don't support complex in gpu reduction") return Apply(self, [x], [GpuArrayType(ret.outputs[0].dtype, ret.outputs[0].type.broadcastable, context_name=x.type.context_name)()]) def get_params(self, node): return node.inputs[0].type.context def perform(self, node, inp, out, ctx): theano.Op.perform(self, node, inp, out, ctx) def supports_c_code(self, inputs): """ Returns True if the current op and reduce pattern has functioning C code. """ # If we don't even have the right method, we certainly # don't support the C code # (This is the test that used to be implemented by # local_gpu_sum) pattern = (''.join(str(i) for i in self.reduce_mask)) if not hasattr(self, 'c_code_reduce_%s' % pattern): return False # Now that this is a general reduction op, we might # have a method for a pattern, but that pattern # might not be implemented for the current scalar op. # To detect this more complicated situation, we # make fake arguments to c_code, try to run them, # and see if NotImplementedError gets raised. node = self.make_node(*inputs) name = 'fake_name' inp = ['fake_input_name_%d' % i for i in xrange(len(inputs))] out = ['fake_output_name_%d' % i for i in xrange(len(node.outputs))] sub = {'fail': 'fake failure code', 'params': 'fake context'} try: self.c_code(node, name, inp, out, sub) self.c_support_code_apply(node, name) except NotImplementedError: return False return True def c_headers(self): return ['<numpy_compat.h>', '<gpuarray/types.h>'] def c_code(self, node, name, inp, out, sub): x, = inp z, = out nd_in = node.inputs[0].type.ndim nd_out = node.outputs[0].type.ndim # For complex, we need to use theano_complex* in the c code to # have it run. But libgpuarray don't understand it. in_dtype = node.inputs[0].type.dtype_specs()[1] out_dtype = node.outputs[0].type.dtype_specs()[1] gin_dtype = "npy_" + node.inputs[0].dtype gout_dtype = "npy_" + node.outputs[0].dtype assert nd_in - nd_out == sum(self.reduce_mask) sio = StringIO() fail = sub['fail'] ctx = sub['params'] # check input print(""" if (PyGpuArray_NDIM(%(x)s) != %(nd_in)s) { PyErr_Format(PyExc_TypeError, "required nd=%(nd_in)s, got nd=%%u", PyGpuArray_NDIM(%(x)s)); %(fail)s; } """ % locals(), file=sio) # It might be nice to use a property of the op class to do this, # but tensor.elemwise.CAReduce has this exact same check so I guess # this is OK to do if self.scalar_op in [scal.minimum, scal.maximum]: conds = ["(PyGpuArray_DIMS(%s)[%d] == 0)" % (x, i) for i in xrange(nd_in) if self.reduce_mask[i]] assert len(conds) > 0 cond = "(" + " || ".join(conds) + ")" print(""" if %(cond)s { PyErr_Format(PyExc_ValueError," tried to reduce a 0-length axis."); %(fail)s; } """ % locals(), file=sio) # # alloc an output if we need one # # check the basics of out output print(""" if ( !%(z)s || (PyGpuArray_NDIM(%(z)s) != %(nd_out)s) """ % locals(), file=sio) # ensure that the output has the right non-reduced dimensions j = 0 for i in xrange(nd_in): if not self.reduce_mask[i]: print(" || (PyGpuArray_DIMS(%(z)s)[%(j)s] != PyGpuArray_DIMS(%(x)s)[%(i)d]) " % locals(), file=sio) j += 1 print(""" ) { """ % locals(), file=sio) if nd_out > 0: print("size_t new_dims[%(nd_out)s]; " % locals(), file=sio) else: print("size_t *new_dims=NULL; ", file=sio) j = 0 for i in xrange(nd_in): if not self.reduce_mask[i]: print('new_dims[%(j)s] = PyGpuArray_DIMS(%(x)s)[%(i)s];' % locals(), file=sio) j += 1 out_typecode = dtype_to_typecode(gout_dtype[4:]) print(""" Py_XDECREF(%(z)s); %(z)s = pygpu_empty(%(nd_out)s, new_dims, %(out_typecode)s, GA_C_ORDER, %(ctx)s, Py_None); if (NULL == %(z)s) { PyErr_Format(PyExc_RuntimeError, "Failed to allocate output"); %(fail)s; } } """ % locals(), file=sio) # \begin bracket the reduction in a check that there is # actually work to do if getattr(self.scalar_op, 'identity', None) == 0: zero_shp = "GpuArray_memset(&%(z)s->ga, 0)" % locals() # TODO: elif getattr(self.scalar_op, 'identity', None) == 1: else: scalar_op = self.scalar_op zero_shp = """ PyErr_Format(PyExc_NotImplementedError, "GpuCAReduceCuda not implemented when input shape is 0" " for this scalar_op: %(scalar_op)s"); %(fail)s; """ % locals() print(""" if (PyGpuArray_SIZE(%(z)s) && ! PyGpuArray_SIZE(%(x)s)){ %(zero_shp)s; } else if (PyGpuArray_SIZE(%(z)s)) { """ % locals(), file=sio) # # Now perform the reduction # if all(i == 1 for i in self.reduce_mask): # check if the tensor is ccontiguous, if true, use the c_code_reduce_ccontig code. # TODO: check if we are ccontiguous when we un-dimshuffle # TODO: if only some dims are ccontiguous, call version with less dims. print('if(%(x)s->ga.flags & GA_C_CONTIGUOUS){' % locals(), file=sio) self.c_code_reduce_ccontig(sio, node, name, x, z, fail) print("}else{", file=sio) getattr(self, 'c_code_reduce_%s' % (''.join(str(i) for i in self.reduce_mask)))( sio, node, name, x, z, fail) print("}", file=sio) else: getattr(self, 'c_code_reduce_%s' % (''.join( str(i) for i in self.reduce_mask)))(sio, node, name, x, z, fail) # \end bracket the reduction ... print(""" } """ % locals(), file=sio) return sio.getvalue() def _makecall(self, node, name, x, z, fail, pattern=None, extra_dims=(), extra_strides=()): """ Return a string for making a kernel call. The return value looks something like: .. code-block:: c ssize_t stride_A0 = PyGpuArray_STRIDES(%(x)s)[0]/sizeof(%(in_dtype)s); ssize_t stride_A1 = PyGpuArray_STRIDES(%(x)s)[1]/sizeof(%(in_dtype)s); ssize_t stride_Z0 = PyGpuArray_STRIDES(%(z)s)[0]/sizeof(%(out_dtype)s); if (verbose) printf("running kernel_reduce_10_%(name)s\\n"); size_t n_shared = sizeof(%(acc_dtype)s) * n_threads[0] * n_threads[1] * n_threads[2]; void *kernel_params[] = { (void *)&PyGpuArray_DIMS(%(x)s)[0], (void *)&PyGpuArray_DIMS(%(x)s)[1], (void *)%(x)s->ga.data, (void *)&%(x)s->ga.offset, (void *)&stride_A0, (void *)&stride_A1, (void *)%(z)s->ga.data, (void *)&%(z)s->ga.offset, (void *)&stride_Z0}; int err = GpuKernel_call(&%(k_var)s, 3, n_threads, n_blocks, n_shared, kernel_params); %(err_check)s """ in_dtype = "npy_" + node.inputs[0].dtype out_dtype = "npy_" + node.outputs[0].dtype acc_dtype = "npy_" + self._acc_dtype(node.inputs[0].dtype) sio = StringIO() if pattern is None: pattern = ''.join(str(c) for c in self.reduce_mask) ndim = len(self.reduce_mask) nd_out = ndim - sum(self.reduce_mask) shapes_format = "shape=(%s)" % ",".join(["%llu"] * node.inputs[0].ndim) shapes_data = ",".join(["(size_t) PyGpuArray_DIMS(%s)[%d]" % (x, i) for i in range(node.inputs[0].ndim)]) k_var = "kernel_reduce_%(pattern)s_%(name)s" % locals() params = [] for i in xrange(ndim): params.append("(void *)&PyGpuArray_DIMS(%(x)s)[%(i)s]" % locals()) for declaration, value in extra_dims: print(declaration % locals(), file=sio) params.append(value) params.append("(void *)%(x)s->ga.data" % locals()) params.append("(void *)&%(x)s->ga.offset" % locals()) for i in xrange(ndim): print(""" ssize_t stride_A%(i)d = PyGpuArray_STRIDES(%(x)s)[%(i)s]/sizeof(%(in_dtype)s); """ % locals(), file=sio) params.append("(void *)&stride_A%(i)d" % locals()) for declaration, value in extra_strides: print(declaration % locals(), file=sio) params.append(value) params.append("(void *)%(z)s->ga.data" % locals()) params.append("(void *)&%(z)s->ga.offset" % locals()) for i in xrange(nd_out): print(""" ssize_t stride_Z%(i)d = PyGpuArray_STRIDES(%(z)s)[%(i)s]/sizeof(%(out_dtype)s); """ % locals(), file=sio) params.append("(void *)&stride_Z%(i)d" % locals()) kernel_params = ', '.join(params) err_check = """ if (err != GA_NO_ERROR) { PyErr_Format(PyExc_RuntimeError, "gpuarray error: %(k_var)s: %%s.", GpuKernel_error(&%(k_var)s, err)); %(fail)s; } """ % locals() print(""" if (verbose) printf("running kernel_reduce_%(pattern)s_%(name)s\\n"); size_t n_shared = sizeof(%(acc_dtype)s) * n_threads[0] * n_threads[1] * n_threads[2]; void *kernel_params[] = { %(kernel_params)s }; if (verbose>1) printf("n_threads[0]=%%lu, n_threads[1]=%%lu, " "n_threads[2]=%%lu, n_threads=%%lu, " "n_blocks[0]=%%lu, n_blocks[1]=%%lu, n_blocks[2]=%%lu, " "n_blocks=%%lu, n_shared=%%d, %(shapes_format)s\\n", n_threads[0],n_threads[1], n_threads[2], n_threads[0]*n_threads[1]* n_threads[2], n_blocks[0],n_blocks[1],n_blocks[2], n_blocks[0]*n_blocks[1]*n_blocks[2], n_shared, %(shapes_data)s); int err = GpuKernel_call(&%(k_var)s, 3, n_threads, n_blocks, n_shared, kernel_params); %(err_check)s """ % locals(), file=sio) sync = "" if config.gpuarray.sync: sync = """ err = GpuArray_sync(&%(z)s->ga); %(err_check)s """ % locals() print(""" %(sync)s """ % locals(), file=sio) return sio.getvalue() def _k_decl(self, node, nodename, pattern=None, ndim=None, reduce_mask=None): """ Return a string to declare a kernel function. The result will look something like this: .. code-block:: c KERNEL void kernel_reduce_110_%(nodename)s( const ga_size d0, const ga_size d1, const ga_size d2, const %(in_type)s *A, const ga_size offset_A, const ga_ssize sA0, const ga_ssize sA1, const ga_ssize sA2, %(out_type)s * Z, const ga_size offset_Z, const ga_ssize sZ0) Since the nodename is unique, we don't need to put the name of the scalar_op in here. """ in_dtype = node.inputs[0].dtype out_dtype = node.outputs[0].dtype in_type = gpuarray.dtype_to_ctype(in_dtype) out_type = gpuarray.dtype_to_ctype(out_dtype) if reduce_mask is None: reduce_mask = self.reduce_mask if ndim is None: ndim = len(reduce_mask) if pattern is None: pattern = ''.join(str(i) for i in reduce_mask) kname = "kernel_reduce_%(pattern)s" % locals() k_var = "kernel_reduce_%(pattern)s_%(nodename)s" % locals() params = [] sio = StringIO() print(""" KERNEL void %(kname)s( """ % locals(), file=sio) for i in xrange(ndim): params.append('uintp') print(""" const ga_size d%(i)s, """ % locals(), file=sio) params.append(gpuarray.GpuArray) params.append('uintp') print(""" const %(in_type)s *A, const ga_size offset_A, """ % locals(), file=sio) for i in xrange(ndim): params.append('intp') print(""" const ga_ssize sA%(i)s, """ % locals(), file=sio) params.append(gpuarray.GpuArray) params.append('uintp') print(""" %(out_type)s * Z, const ga_size offset_Z """ % locals(), file=sio) for i in xrange(ndim - sum(reduce_mask)): params.append('intp') print(""" , const ga_ssize sZ%(i)s """ % locals(), file=sio) print(")", file=sio) return sio.getvalue(), kname, params, k_var def _k_init(self, node, nodename): in_dtype = node.inputs[0].dtype out_dtype = node.outputs[0].dtype acc_dtype = self._acc_dtype(node.inputs[0].dtype) # We need to use theano_complex* and not npy_complex* in_type = gpuarray.dtype_to_ctype(in_dtype) out_type = gpuarray.dtype_to_ctype(out_dtype) acc_type = gpuarray.dtype_to_ctype(acc_dtype) return """ const int threadCount = blockDim.x * blockDim.y * blockDim.z; const int threadNum = threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; extern __shared__ %(acc_type)s buf[]; %(acc_type)s myresult = 0; A = (const %(in_type)s *)(((char *)A)+offset_A); Z = (%(out_type)s *)(((char *)Z)+offset_Z); //This is caught in cuda/init.py when we init the gpu. I keep //it here to ease finding code that rely on this. if (warpSize != 32) { Z[0] = -666; return; } """ % locals() def _assign_init(self, first_item): """ This return the initial value for myresult. If the scalar op have an identity value, return it. Otherwise, check that the scalar op is maximum or minimum and return first_item. It should be the first element of the reduction. As the maximum and minimum of the same value don't change, this work. """ if hasattr(self.scalar_op, 'identity'): return str(self.scalar_op.identity) else: assert isinstance(self.scalar_op, (scal.Maximum, scal.Minimum)) if self.pre_scalar_op: # TODO: multiple dtypes # dtype = node.inputs[0].dtype dtype = 'float32' dummy_var = scal.Scalar(dtype=dtype)() dummy_node = self.pre_scalar_op.make_node(dummy_var) dummy_name = 'assign_init_pre_scalar_op' + str(self._n_scalar_op_calls) self._n_scalar_op_calls += 1 t = self.pre_scalar_op.c_code(dummy_node, dummy_name, (first_item,), ("",), {}) assert t.startswith(' = ') first_item = t[3:] if first_item[-1] == ';': first_item = first_item[:-1] return first_item def _assign_reduce(self, node, name, left, right, sub, pre): """ Parameters ---------- node The node argument to this op's c_code. name The name argument to this op's c_code. left A C code string identifying an lvalue. right A C code string identifying an expression. sub The sub argument to this op's c_code. pre If True, we will add the pre_scalar_op.c_code. Returns ------- str C code to reduce left and right, assigning the result to left. """ x, = node.inputs in_dtype = x.dtype out_dtype = node.outputs[0].dtype dummy_left = Scalar(dtype=out_dtype)() dummy_right = Scalar(dtype=in_dtype)() dummy_node = self.scalar_op.make_node(dummy_left, dummy_right) dummy_name = name + '_scalar_op' + str(self._n_scalar_op_calls) self._n_scalar_op_calls += 1 if pre and self.pre_scalar_op: assert left == "myresult" dummy_node = self.pre_scalar_op.make_node(dummy_left) dummy_name = name + '_scalar_op' + str(self._n_scalar_op_calls) self._n_scalar_op_calls += 1 t = self.pre_scalar_op.c_code(dummy_node, dummy_name, (right,), ("",), sub) assert t.startswith(' = ') right = t[3:] if right[-1] == ';': right = right[:-1] return self.scalar_op.c_code(dummy_node, dummy_name, (left, right), (left,), sub) def _k_reduce_buf(self, z_pos, node, name, sub): """ WRITEME Parameters ---------- node, name, sub These should be passed through from the original call to c_code. """ in_dtype = "npy_" + node.inputs[0].dtype out_dtype = "npy_" + node.outputs[0].dtype acc_dtype = "npy_" + self._acc_dtype(node.inputs[0].dtype) write_out = write_w(node.outputs[0].dtype) # This code (the code in new_version) is currently ignored. # Code produced later in this function is returned instead. # The code here works with all nvidia driver # But only for powers or multiples of 2! new_version = """ __syncthreads(); // some kernel do multiple reduction. buf[threadNum] = myresult; __syncthreads(); if (threadNum >= ((threadCount >> 1) * 2)) { int idx = threadNum - (threadCount >> 1) * 2;""" new_version += self._assign_reduce(node, name, 'buf[idx]', 'buf[threadNum]', sub, False) new_version += """ } __syncthreads(); // Works for power of 2 only. int nTotalThreads = threadCount; // Total number of active threads while(nTotalThreads > 1) { int halfPoint = (nTotalThreads >> 1); // divide by two // only the first half of the threads will be active. if (threadNum < halfPoint) { // Get the shared value stored by another thread %(acc_dtype)s temp = buf[threadNum + halfPoint]; """ new_version += self._assign_reduce(node, name, 'buf[threadNum]', 'temp', sub, False) new_version += """ } __syncthreads(); nTotalThreads = (nTotalThreads >> 1); // divide by two. } __syncthreads(); if (threadNum == 0) { %(z_pos)s = %(write_out)s(buf[0]); } __syncthreads();""" new_version = new_version % locals() current_version = """ __syncthreads(); // some kernel do multiple reduction. buf[threadNum] = myresult; __syncthreads(); // rest of function is handled by one warp if (threadNum < warpSize) { //round up all the partial sums into the first `warpSize` elements for (int i = threadNum + warpSize; i < threadCount; i += warpSize) { """ current_version += self._assign_reduce(node, name, 'myresult', 'buf[i]', sub, False) + """ } buf[threadNum] = myresult; /*Comment this optimization as it don't work on Fermi GPU. TODO: find why it don't work or put the GPU compute capability into the version // no sync because only one warp is running if(threadCount >32) {""" for num in [16, 8, 4, 2, 1]: current_version += self._assign_reduce(node, name, 'buf[threadNum]', 'buf[threadNum+%d]' % num, sub, False) current_version += """ """ current_version += """ if (threadNum == 0) { %(z_pos)s = %(write_out)s(buf[0]); } } else */ if (threadNum < 16) { //reduce so that threadNum 0 has the reduction of everything """ for num in [16, 8, 4, 2, 1]: this_if = "if (threadNum + %d < threadCount) " % num + \ self._assign_reduce(node, name, 'buf[threadNum]', 'buf[threadNum+%d]' % num, sub, False) current_version += this_if current_version += """ """ current_version += """ if (threadNum == 0) { %(z_pos)s = %(write_out)s(buf[0]); } } } """ current_version = current_version % locals() return current_version # Threads must be organized as: threadNum%nb_reduce correspond to the same sum # nb_reduce<=warpSize def _k_reduce_buf_multiple(self, z_pos, node, name, nb_reduce): reduce_fct = self._assign_reduce(node, name, 'myresult', 'buf[i]', {}, False) write_out = write_w(node.outputs[0].dtype) return """ __syncthreads(); // some kernel do multiple reduction. buf[threadNum] = myresult; __syncthreads(); // rest of function is handled by one warp if (threadNum < %(nb_reduce)s) { //round up all the partial sums into the first `nb_reduce` elements for (int i = threadNum + %(nb_reduce)s; i < threadCount; i += %(nb_reduce)s) { %(reduce_fct)s; } %(z_pos)s = %(write_out)s(myresult); } """ % locals() def c_code_reduce_ccontig(self, sio, node, name, x, z, fail): in_dtype = "npy_" + node.inputs[0].dtype out_dtype = "npy_" + node.outputs[0].dtype if getattr(self.scalar_op, 'identity', None) == 0: zero_shp = "GpuArray_memset(&%(z)s->ga, 0)" % locals() # TODO: elif getattr(self.scalar_op, 'identity', None) == 1: else: zero_shp = """ PyErr_Format(PyExc_NotImplementedError, "GpuCAReduceCuda not implemented when input shape is 0 for this scalar_op"); %(fail)s; """ % locals() acc_dtype = "npy_" + self._acc_dtype(node.inputs[0].dtype) k_var = "kernel_reduce_ccontig_%(name)s" % locals() err_check = """ if (err != GA_NO_ERROR) { PyErr_Format(PyExc_RuntimeError, "gpuarray error: %(k_var)s: %%s.", GpuKernel_error(&%(k_var)s, err)); %(fail)s; } """ % locals() sync = "" if config.gpuarray.sync: sync = """ err = GpuArray_sync(&%(z)s->ga); %(err_check)s """ % locals() print(""" { if(PyGpuArray_SIZE(%(x)s)==0){ %(zero_shp)s; }else{ int verbose = 0; size_t numEls = PyGpuArray_SIZE(%(x)s); size_t n_threads = std::min(numEls, (size_t) 256); size_t n_blocks = 1; void *kernel_params[] = {(void *)&numEls, (void *)%(x)s->ga.data, (void *)&%(x)s->ga.offset, (void *)%(z)s->ga.data, (void *)&%(z)s->ga.offset}; if (verbose) printf("running kernel_reduce_ccontig_%(name)s" " n_threads=%%llu, size=%%llu, ndim=%%u\\n", n_threads, numEls, PyGpuArray_NDIM(%(x)s)); size_t n_shared = sizeof(%(acc_dtype)s) * n_threads; int err = GpuKernel_call(&%(k_var)s, 1, &n_threads, &n_blocks, n_shared, kernel_params); %(err_check)s %(sync)s } } """ % locals(), file=sio) def c_code_reduce_1(self, sio, node, name, x, z, fail): makecall = self._makecall(node, name, x, z, fail) print(""" { int verbose = 0; size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t) 256), 1, 1}; size_t n_blocks[3] = {1, 1, 1}; %(makecall)s } """ % locals(), file=sio) def c_code_reduce_11(self, sio, node, name, x, z, fail): makecall = self._makecall(node, name, x, z, fail) print(""" { int verbose = 0; size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[1], (size_t) 256), 1, 1}; while (n_threads[1] * n_threads[0] <= 256) ++n_threads[1]; n_threads[1] -= 1; if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[0]) n_threads[1] = PyGpuArray_DIMS(%(x)s)[0]; size_t n_blocks[3] = {1, 1, 1}; %(makecall)s } """ % locals(), file=sio) def c_code_reduce_01X(self, sio, node, name, x, z, fail, N): """ Parameters ---------- N The number of 1 in the pattern N=1 -> 01, N=2 -> 011 N=3 ->0111 Work for N=1,2,3. """ assert N in [1, 2, 3] in_dtype = "npy_" + node.inputs[0].dtype out_dtype = "npy_" + node.outputs[0].dtype makecall = self._makecall(node, name, x, z, fail) N_pattern = ''.join(['1'] * N) param_dim = ",".join(["PyGpuArray_DIMS(%s)[%d]" % (x, i) for i in xrange(N + 1)]) strides_dim = ",".join(["PyGpuArray_STRIDES(%s)[%d]/sizeof(%s)" % (x, i, in_dtype) for i in xrange(N + 1)]) threads_y = """ //get as many y threads as we can fit while (n_threads[0] * (n_threads[1]+1) <= 256) { if (n_threads[1] < PyGpuArray_DIMS(%(x)s)[%(N)s-1]) n_threads[1] += 1; else break; }""" % locals() threads_z = """ //get as many z threads as we can fit while (n_threads[0] * n_threads[1] * (n_threads[2]+1) <= 256) { if (n_threads[2] < PyGpuArray_DIMS(%(x)s)[%(N)s-2]) n_threads[2] += 1; else break; } //Maximum for Fermi GPU on that dimensions. n_threads[2] = std::min(n_threads[2], (size_t)64); """ % locals() if len(self.reduce_mask) == 2: threads_y = '' threads_z = '' if len(self.reduce_mask) == 3: threads_z = '' print(""" { int verbose = 0; size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[%(N)s], (size_t) 256), 1, 1}; %(threads_y)s %(threads_z)s size_t n_blocks[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t) 4096), 1, 1}; %(makecall)s } """ % locals(), file=sio) def c_code_reduce_01(self, sio, node, name, x, z, fail): self.c_code_reduce_01X(sio, node, name, x, z, fail, 1) def c_code_reduce_011(self, sio, node, name, x, z, fail): self.c_code_reduce_01X(sio, node, name, x, z, fail, 2) def c_code_reduce_0111(self, sio, node, name, x, z, fail): self.c_code_reduce_01X(sio, node, name, x, z, fail, 3) def c_code_reduce_10(self, sio, node, name, x, z, fail): in_dtype = "npy_" + node.inputs[0].dtype out_dtype = "npy_" + node.outputs[0].dtype acc_dtype = "npy_" + self._acc_dtype(node.inputs[0].dtype) k_var = "kernel_reduce_10_%(name)s" % locals() err_check = """ if (err != GA_NO_ERROR) { PyErr_Format(PyExc_RuntimeError, "gpuarray error: %(k_var)s: %%s.", GpuKernel_error(%(k_var)s, err)); %(fail)s; } """ % locals() sync = "" if config.gpuarray.sync: sync = """ err = GpuArray_sync(&%(z)s->ga); %(err_check)s """ % locals() print(""" { int verbose = 0; if(PyGpuArray_STRIDES(%(x)s)[0]> PyGpuArray_STRIDES(%(x)s)[1]){ // If there are a lot of summations to do, then we can use simple parallelization - // use each thread to do one sum. // we might as well launch blocks of 32 threads because that's the warp size. // we could schedule more threads if we were maxing out the gridsize below, but // the gridsize is way more than the physical hardware and I think 32 threads // on a huge grid is enough to fully use the hardware. size_t n_threads[3] = {32, 1, 1}; // We kindof reshape the input implicitly to something 4D: // the shape A,B,C -> A, B, D, E // where C <= D*E < C+32 // where E==32 GpuKernel *%(k_var)s = &kernel_reduce_010_AD_%(name)s; size_t A = 1; size_t B = PyGpuArray_DIMS(%(x)s)[0]; size_t C = PyGpuArray_DIMS(%(x)s)[1]; size_t D = C/32; if (32*D < C) D+= 1; assert ((C <= 32*D) && (32*D < C+32)); // The gridsize would ideally be (A, D). But we do the following logic to make // sure we don't ask for a grid that is too big. size_t n_blocks[3] = {A, D, 1}; if (n_blocks[0] > 4096) n_blocks[0] = 4096; if (n_blocks[0]*n_blocks[1] > 4096) n_blocks[1] = 4096/n_blocks[0]; ssize_t stride_A0 = 1; ssize_t stride_A1 = PyGpuArray_STRIDES(%(x)s)[0]/sizeof(%(in_dtype)s); ssize_t stride_A2 = PyGpuArray_STRIDES(%(x)s)[1]/sizeof(%(in_dtype)s); ssize_t stride_Z0 = 1; ssize_t stride_Z1 = PyGpuArray_STRIDES(%(z)s)[0]/sizeof(%(out_dtype)s); void *kernel_params[] = { (void *)&A, (void *)&B, (void *)&C, (void *)&D, (void *)%(x)s->ga.data, (void *)&%(x)s->ga.offset, (void *)&stride_A0, (void *)&stride_A1, (void *)&stride_A2, (void *)%(z)s->ga.data, (void *)&%(z)s->ga.offset, (void *)&stride_Z0, (void *)&stride_Z1}; int err = GpuKernel_call(%(k_var)s, 3, n_threads, n_blocks, 0, kernel_params); %(err_check)s %(sync)s }else{ GpuKernel *%(k_var)s = &kernel_reduce_010_%(name)s; size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t) 256), 1, 1}; size_t n_blocks[3] = {1, std::min(PyGpuArray_DIMS(%(x)s)[1], (size_t) 4096), 1}; if (verbose) { fprintf(stderr, "running kernel_reduce_10_%(name)s n_blocks=(%%llu,%%llu)\\n", (unsigned long long)n_blocks[0], (unsigned long long)n_blocks[1]); } assert(PyGpuArray_DIMS(%(x)s)[1] == PyGpuArray_DIMS(%(z)s)[0]); size_t n_shared = sizeof(%(acc_dtype)s) * n_threads[0]; size_t dim_0 = 1; ssize_t stride_A0 = 1; ssize_t stride_A1 = PyGpuArray_STRIDES(%(x)s)[0]/sizeof(%(in_dtype)s); ssize_t stride_A2 = PyGpuArray_STRIDES(%(x)s)[1]/sizeof(%(in_dtype)s); ssize_t stride_Z0 = 1; ssize_t stride_Z1 = PyGpuArray_STRIDES(%(z)s)[0]/sizeof(%(out_dtype)s); void *kernel_params[] = { (void *)&dim_0, (void *)&PyGpuArray_DIMS(%(x)s)[0], (void *)&PyGpuArray_DIMS(%(x)s)[1], (void *)%(x)s->ga.data, (void *)&%(x)s->ga.offset, (void *)&stride_A0, (void *)&stride_A1, (void *)&stride_A2, (void *)%(z)s->ga.data, (void *)&%(z)s->ga.offset, (void *)&stride_Z0, (void *)&stride_Z1}; int err = GpuKernel_call(%(k_var)s, 3, n_threads, n_blocks, n_shared, kernel_params); %(err_check)s %(sync)s } } """ % locals(), file=sio) def c_code_reduce_010(self, sio, node, name, x, z, fail): makecall = self._makecall(node, name, x, z, fail) makecall_inner = self._makecall(node, name, x, z, fail, pattern="010_inner") pattern = ''.join(str(i) for i in self.reduce_mask) in_dtype = "npy_" + node.inputs[0].dtype out_dtype = "npy_" + node.outputs[0].dtype k_var = "kernel_reduce_010_AD_%(name)s" % locals() err_check = """ if (err != GA_NO_ERROR) { PyErr_Format(PyExc_RuntimeError, "gpuarray error: %(k_var)s: %%s.", GpuKernel_error(&%(k_var)s, err)); %(fail)s; } """ % locals() sync = "" if config.gpuarray.sync: sync = """ err = GpuArray_sync(&%(z)s->ga); %(err_check)s """ % locals() print(""" { //int n_summations = PyGpuArray_DIMS(%(x)s)[0] * PyGpuArray_DIMS(%(x)s)[2]; //if ((n_summations >= 15 * 32) && (PyGpuArray_DIMS(%(x)s)[2]>=16)) if (1) // if the alternative is less buggy, consider not using this branch { // If there are a lot of summations to do, then we can use simple parallelization - // use each thread to do one sum. // we might as well launch blocks of 32 threads because that's the warp size. // we could schedule more threads if we were maxing out the gridsize below, but // the gridsize is way more than the physical hardware and I think 32 threads // on a huge grid is enough to fully use the hardware. size_t n_threads[3] = {32, 1, 1}; // We kindof reshape the input implicitly to something 4D: // the shape A,B,C -> A, B, D, E // where C <= D*E < C+32 // where E==32 size_t A = PyGpuArray_DIMS(%(x)s)[0]; size_t B = PyGpuArray_DIMS(%(x)s)[1]; size_t C = PyGpuArray_DIMS(%(x)s)[2]; size_t D = C/32; if (32*D < C) D+= 1; assert ((C <= 32*D) && (32*D < C+32)); // The gridsize would ideally be (A, D). But we do the following logic to make // sure we don't ask for a grid that is too big. size_t n_blocks[3] = {A, D, 1}; if (n_blocks[0] > 4096) n_blocks[0] = 4096; if (n_blocks[0]*n_blocks[1] > 4096) n_blocks[1] = 4096/n_blocks[0]; ssize_t stride_A0 = PyGpuArray_STRIDES(%(x)s)[0]/sizeof(%(in_dtype)s); ssize_t stride_A1 = PyGpuArray_STRIDES(%(x)s)[1]/sizeof(%(in_dtype)s); ssize_t stride_A2 = PyGpuArray_STRIDES(%(x)s)[2]/sizeof(%(in_dtype)s); ssize_t stride_Z0 = PyGpuArray_STRIDES(%(z)s)[0]/sizeof(%(out_dtype)s); ssize_t stride_Z1 = PyGpuArray_STRIDES(%(z)s)[1]/sizeof(%(out_dtype)s); void *kernel_params[] = { (void *)&A, (void *)&B, (void *)&C, (void *)&D, (void *)%(x)s->ga.data, (void *)&%(x)s->ga.offset, (void *)&stride_A0, (void *)&stride_A1, (void *)&stride_A2, (void *)%(z)s->ga.data, (void *)&%(z)s->ga.offset, (void *)&stride_Z0, (void *)&stride_Z1}; int err = GpuKernel_call(&%(k_var)s, 3, n_threads, n_blocks, 0, kernel_params); %(err_check)s %(sync)s } else { int verbose = 2; size_t n_threads[3] = {std::min((size_t) 32, PyGpuArray_DIMS(%(x)s)[2]), 1, 1}; while( (n_threads[0]*(n_threads[1]+1)<=256) && (n_threads[1]<PyGpuArray_DIMS(%(x)s)[1])){ n_threads[1]++; } size_t n_blocks[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t)4096), 1, 1}; n_blocks[1] = std::min( ceil_intdiv(PyGpuArray_DIMS(%(x)s)[2], (size_t)n_threads[0]), (size_t)(4096 / n_blocks[0]) ); if(std::min(std::min(PyGpuArray_STRIDES(%(x)s)[0]/sizeof(%(in_dtype)s), PyGpuArray_STRIDES(%(x)s)[1]/sizeof(%(in_dtype)s)), PyGpuArray_STRIDES(%(x)s)[2]/sizeof(%(in_dtype)s)) ==PyGpuArray_STRIDES(%(x)s)[2]/sizeof(%(in_dtype)s) && n_blocks[1]==ceil_intdiv(PyGpuArray_DIMS(%(x)s)[2], (size_t)n_threads[0])){ if(verbose>1) printf("n_block.x.1=%%d, n_block.x.2=%%d, n_block.y.1=%%d, n_block.y.2=%%d,\\n", PyGpuArray_DIMS(%(x)s)[0],4096, ceil_intdiv(PyGpuArray_DIMS(%(x)s)[2],(size_t)n_threads[0]), (size_t)(4096 / n_blocks[0])); assert(n_threads[0]<=32); %(makecall_inner)s }else{ n_threads[0] = std::min(PyGpuArray_DIMS(%(x)s)[1], (size_t) 256); n_blocks[0] = std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t)4096); n_blocks[1] = std::min( PyGpuArray_DIMS(%(x)s)[2], (size_t)(4096 / n_blocks[0]) ); %(makecall)s } %(sync)s } } """ % locals(), file=sio) def c_code_reduce_0101(self, sio, node, name, x, z, fail): makecall = self._makecall(node, name, x, z, fail) print(""" { int verbose = 0; size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[3], (size_t) 256), 1, 1}; while (n_threads[0] * n_threads[1] <= 256) { if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[1]) break; n_threads[1] += 1; } n_threads[1] -= 1; size_t n_blocks[3] = {PyGpuArray_DIMS(%(x)s)[0], PyGpuArray_DIMS(%(x)s)[2], 1}; %(makecall)s } """ % locals(), file=sio) def c_code_reduce_100(self, sio, node, name, x, z, fail): makecall = self._makecall(node, name, x, z, fail) in_dtype = "npy_" + node.inputs[0].dtype out_dtype = "npy_" + node.outputs[0].dtype acc_dtype = "npy_" + self._acc_dtype(node.inputs[0].dtype) k_var = "kernel_reduce_010_AD_%(name)s" % locals() err_check = """ if (err != GA_NO_ERROR) { PyErr_Format(PyExc_RuntimeError, "gpuarray error: %(k_var)s: %%s.", GpuKernel_error(&%(k_var)s, err)); %(fail)s; } """ % locals() sync = "" if config.gpuarray.sync: sync = """ err = GpuArray_sync(&%(z)s->ga); %(err_check)s """ % locals() # use threadIdx.x for i0 # use blockIdx.x for i1 # use blockIdx.y for i2 print(""" { int verbose = 0; if (PyGpuArray_STRIDES(%(x)s)[2] != sizeof(%(in_dtype)s)){ printf("slow\\n"); size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t) 256), 1, 1}; size_t n_blocks[3] = {std::min(PyGpuArray_DIMS(%(x)s)[1], (size_t)4096), 1, 1}; while (n_blocks[0] * (n_blocks[1]+1) <= 4096 && n_blocks[1] <= PyGpuArray_DIMS(%(x)s)[2]) { n_blocks[1] += 1; } %(makecall)s } else { // reuse 010_AD kernel, we transpose the 2 first dim // See the reduction for the real 010_AD kernel for // explanation. We do this to get coalesced read. size_t n_threads[3] = {32, 1, 1}; size_t A = PyGpuArray_DIMS(%(x)s)[1]; size_t B = PyGpuArray_DIMS(%(x)s)[0]; size_t C = PyGpuArray_DIMS(%(x)s)[2]; size_t D = C/32; if (32*D < C) D+= 1; assert ((C <= 32*D) && (32*D < C+32)); // The gridsize would ideally be (A, D). But we do the following logic to make // sure we don't ask for a grid that is too big. size_t n_blocks[3] = {A, D, 1}; if (n_blocks[0] > 4096) n_blocks[0] = 4096; if (n_blocks[0]*n_blocks[1] > 4096) n_blocks[1] = 4096/n_blocks[0]; size_t n_shared = 0; ssize_t stride_A0 = PyGpuArray_STRIDES(%(x)s)[1]/sizeof(%(in_dtype)s); ssize_t stride_A1 = PyGpuArray_STRIDES(%(x)s)[0]/sizeof(%(in_dtype)s); ssize_t stride_A2 = PyGpuArray_STRIDES(%(x)s)[2]/sizeof(%(in_dtype)s); ssize_t stride_Z0 = PyGpuArray_STRIDES(%(z)s)[0]/sizeof(%(out_dtype)s); ssize_t stride_Z1 = PyGpuArray_STRIDES(%(z)s)[1]/sizeof(%(out_dtype)s); void *kernel_params[] = { (void *)&A, (void *)&B, (void *)&C, (void *)&D, (void *)%(x)s->ga.data, (void *)&%(x)s->ga.offset, (void *)&stride_A0, (void *)&stride_A1, (void *)&stride_A2, (void *)%(z)s->ga.data, (void *)&%(z)s->ga.offset, (void *)&stride_Z0, (void *)&stride_Z1}; int err = GpuKernel_call(&%(k_var)s, 3, n_threads, n_blocks, 0, kernel_params); %(err_check)s %(sync)s } } """ % locals(), file=sio) def c_code_reduce_110(self, sio, node, name, x, z, fail): makecall = self._makecall(node, name, x, z, fail) print(""" { int verbose = 0; size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[1], (size_t) 256), 1, 1}; while (n_threads[0]*n_threads[1] <= 256) { if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[0]) break; n_threads[1] += 1; } n_threads[1] -= 1; size_t n_blocks[3] = {PyGpuArray_DIMS(%(x)s)[2], 1, 1}; %(makecall)s } """ % locals(), file=sio) def c_code_reduce_001(self, sio, node, name, x, z, fail): makecall = self._makecall(node, name, x, z, fail) print(""" { int verbose = 0; size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[2], (size_t) 256), 1, 1}; size_t n_blocks[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t) 4096), 1, 1}; while (n_blocks[0] * n_blocks[1] <= 4096) { if (n_blocks[1] > PyGpuArray_DIMS(%(x)s)[1]) break; n_blocks[1] += 1; } n_blocks[1] -= 1; %(makecall)s } """ % locals(), file=sio) def c_code_reduce_101(self, sio, node, name, x, z, fail): makecall = self._makecall(node, name, x, z, fail, extra_dims=[("size_t one = 1;", "(void *) &one")], extra_strides=[("ssize_t sone = 1;", "(void *) &sone")], pattern="1011") print(""" { int verbose = 0; // size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[3], // (size_t) 256), 1, 1}; size_t n_threads[3] = {1, 1, 1}; while (n_threads[0] * (n_threads[1]+1) <= 256) ++n_threads[1]; if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[2]) n_threads[1] = PyGpuArray_DIMS(%(x)s)[2]; while (n_threads[0] * n_threads[1] * (n_threads[2]+1) <= 256) ++n_threads[2]; if (n_threads[2] > 64) n_threads[2] = 64; if (n_threads[2] > PyGpuArray_DIMS(%(x)s)[0]) n_threads[2] = PyGpuArray_DIMS(%(x)s)[0]; size_t n_blocks[3] = {PyGpuArray_DIMS(%(x)s)[1], 1, 1}; %(makecall)s } """ % locals(), file=sio) def c_code_reduce_111(self, sio, node, name, x, z, fail): makecall = self._makecall(node, name, x, z, fail) print(""" { int verbose = 0; size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[2], (size_t) 256), 1, 1}; //get as many y threads as we can fit while (n_threads[0] * n_threads[1] <= 256) { if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[1]) break; n_threads[1] += 1; } n_threads[1] -= 1; //get as many z threads as we can fit while (n_threads[0] * n_threads[1] * n_threads[2] <= 256) { if (n_threads[2] > PyGpuArray_DIMS(%(x)s)[0]) break; n_threads[2] += 1; } n_threads[2] -= 1; //Maximum for Fermi GPU on that dimensions. n_threads[2] = std::min(n_threads[2], (size_t)64); size_t n_blocks[3] = {1, 1, 1}; %(makecall)s } """ % locals(), file=sio) def c_code_reduce_0011(self, sio, node, name, x, z, fail): makecall = self._makecall(node, name, x, z, fail) in_dtype = "npy_" + node.inputs[0].dtype out_dtype = "npy_" + node.outputs[0].dtype acc_dtype = "npy_" + self._acc_dtype(node.inputs[0].dtype) print(""" { int verbose = 0; size_t n_blocks[3] = {std::min(PyGpuArray_DIMS(%(x)s)[0], (size_t) 4096), 1, 1}; while (n_blocks[0] * n_blocks[1] <= 4096 && n_blocks[1] < PyGpuArray_DIMS(%(x)s)[1]) { n_blocks[1] += 1; } size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[3], (size_t) 256), 1, 1}; while (n_threads[0] * n_threads[1] <= 256 && n_threads[1] < PyGpuArray_DIMS(%(x)s)[2] && n_threads[0] * n_threads[1] * sizeof(%(acc_dtype)s) <=(15*1024-200)) { n_threads[1] += 1; } %(makecall)s } """ % locals(), file=sio) def c_code_reduce_1111(self, sio, node, name, x, z, fail): makecall = self._makecall(node, name, x, z, fail) print(""" { int verbose = 0; size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[2], (size_t) 256), 1, 1}; //get as many y threads as we can fit while (n_threads[0] * n_threads[1] <= 256) { if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[1]) break; n_threads[1] += 1; } n_threads[1] -= 1; //get as many z threads as we can fit while (n_threads[0] * n_threads[1] * n_threads[2] <= 256) { if (n_threads[2] > PyGpuArray_DIMS(%(x)s)[0]) break; n_threads[2] += 1; } n_threads[2] -= 1; //Maximum for Fermi GPU on that dimensions. n_threads[2] = std::min(n_threads[2], (size_t)64); size_t n_blocks[3] = {1, 1, 1}; %(makecall)s } """ % locals(), file=sio) def c_code_reduce_1011(self, sio, node, name, x, z, fail): makecall = self._makecall(node, name, x, z, fail) print(""" { int verbose = 0; size_t n_threads[3] = {std::min(PyGpuArray_DIMS(%(x)s)[3], (size_t) 256), 1, 1}; while (n_threads[0] * (n_threads[1]+1) <= 256) ++n_threads[1]; if (n_threads[1] > PyGpuArray_DIMS(%(x)s)[2]) n_threads[1] = PyGpuArray_DIMS(%(x)s)[2]; while (n_threads[0] * n_threads[1] * (n_threads[2]+1) <= 256) ++n_threads[2]; if (n_threads[2] > 64) n_threads[2] = 64; if (n_threads[2] > PyGpuArray_DIMS(%(x)s)[0]) n_threads[2] = PyGpuArray_DIMS(%(x)s)[0]; size_t n_blocks[3] = {PyGpuArray_DIMS(%(x)s)[1], 1, 1}; %(makecall)s } """ % locals(), file=sio) def c_code_cache_version_apply(self, node): version = [18] # the version corresponding to the c code in this Op # now we insert versions for the ops on which we depend... scalar_node = Apply( self.scalar_op, [Scalar(dtype=input.type.dtype)() for input in node.inputs], [Scalar(dtype=output.type.dtype)() for output in node.outputs]) version.extend(self.scalar_op.c_code_cache_version_apply(scalar_node)) for i in node.inputs + node.outputs: version.extend(Scalar(dtype=i.type.dtype).c_code_cache_version()) version.extend(self.kernel_version(node)) if all(version): return tuple(version) else: return () def gpu_kernels(self, node, nodename): nd_in = len(self.reduce_mask) in_dtype = node.inputs[0].dtype out_dtype = node.outputs[0].dtype acc_dtype = self._acc_dtype(node.inputs[0].dtype) flags = Kernel.get_flags(in_dtype, acc_dtype, out_dtype) in_type = gpuarray.dtype_to_ctype(in_dtype) out_type = gpuarray.dtype_to_ctype(out_dtype) acc_type = gpuarray.dtype_to_ctype(acc_dtype) load_in = load_w(in_dtype) write_out = write_w(out_dtype) kernels = [] if all(i == 1 for i in self.reduce_mask): # this kernel is ok for up to a few thousand elements, but # it only runs on ONE multiprocessor reducebuf = self._k_reduce_buf('Z[0]', node, nodename, sub={}) reduce_fct = self._assign_reduce(node, nodename, "myresult", load_in + "(A[i0])", {}, True) reduce_init = self._assign_init(load_in + "(A[0])") kname = "kernel_reduce_ccontig" k_var = "kernel_reduce_ccontig_" + nodename sio = StringIO() print(""" KERNEL void %(kname)s( const ga_size d0, const %(in_type)s *A, const ga_size offset_A, %(out_type)s *Z, const ga_size offset_Z) { const int threadCount = blockDim.x; const int threadNum = threadIdx.x; extern __shared__ %(acc_type)s buf[]; %(acc_type)s myresult = %(reduce_init)s; A = (const %(in_type)s *)(((char *)A)+offset_A); Z = (%(out_type)s *)(((char *)Z)+offset_Z); if (warpSize != 32) { return; //TODO: set error code } for (int i0 = threadIdx.x; i0 < d0; i0 += blockDim.x) { %(reduce_fct)s } %(reducebuf)s } """ % locals(), file=sio) params = [ 'uintp', gpuarray.GpuArray, 'uintp', gpuarray.GpuArray, 'uintp' ] kernels.append(Kernel(code=sio.getvalue(), name=kname, params=params, flags=flags, objvar=k_var)) if self.reduce_mask == (1,): # this kernel is ok for up to a few thousand elements, but # it only runs on ONE multiprocessor reducebuf = self._k_reduce_buf('Z[0]', node, nodename, sub={}) reduce_fct = self._assign_reduce(node, nodename, "myresult", load_in + "(A[i0 * sA0])", {}, True) reduce_init = self._assign_init(load_in + "(A[0])") kname = "kernel_reduce_1" k_var = "kernel_reduce_1_" + nodename sio = StringIO() print(""" KERNEL void %(kname)s( const ga_size d0, const %(in_type)s *A, const ga_size offset_A, const ga_ssize sA0, %(out_type)s * Z, const ga_size offset_Z) { const int threadCount = blockDim.x; const int threadNum = threadIdx.x; extern __shared__ %(acc_type)s buf[]; %(acc_type)s myresult = %(reduce_init)s; A = (const %(in_type)s *)(((char *)A)+offset_A); Z = (%(out_type)s *)(((char *)Z)+offset_Z); if (warpSize != 32) { return; //TODO: set error code } for (int i0 = threadIdx.x; i0 < d0; i0 += blockDim.x) { %(reduce_fct)s } %(reducebuf)s } """ % locals(), file=sio) params = [ 'uintp', gpuarray.GpuArray, 'uintp', 'intp', gpuarray.GpuArray, 'uintp' ] kernels.append(Kernel(code=sio.getvalue(), name=kname, params=params, flags=flags, objvar=k_var)) if self.reduce_mask == (1, 1): # this kernel is ok for up to a few thousand elements, but # it only runs on ONE multiprocessor reducebuf = self._k_reduce_buf('Z[0]', node, nodename, sub={}) reduce_fct = self._assign_reduce(node, nodename, "myresult", load_in + "(A[i0 * sA0 + i1 * sA1])", {}, True) reduce_init = self._assign_init(load_in + "(A[0])") kname = "kernel_reduce_11" k_var = "kernel_reduce_11_" + nodename sio = StringIO() print(""" KERNEL void %(kname)s( const ga_size d0, const ga_size d1, const %(in_type)s *A, const ga_size offset_A, const ga_ssize sA0, const ga_ssize sA1, %(out_type)s * Z, const ga_size offset_Z) { const int threadCount = blockDim.x * blockDim.y; const int threadNum = threadIdx.y*blockDim.x + threadIdx.x; extern __shared__ %(acc_type)s buf[]; %(acc_type)s myresult = %(reduce_init)s; A = (const %(in_type)s *)(((char *)A)+offset_A); Z = (%(out_type)s *)(((char *)Z)+offset_Z); if (warpSize != 32) { return; //TODO: set error code } for (int i0 = threadIdx.y; i0 < d0; i0 += blockDim.y) { for (int i1 = threadIdx.x; i1 < d1; i1 += blockDim.x) { %(reduce_fct)s; } } %(reducebuf)s } """ % locals(), file=sio) params = [ 'uintp', 'uintp', gpuarray.GpuArray, 'uintp', 'intp', 'intp', gpuarray.GpuArray, 'uintp' ] kernels.append(Kernel(code=sio.getvalue(), name=kname, params=params, flags=flags, objvar=k_var)) # 01, 011, 0111 if (0 == self.reduce_mask[0] and all(self.reduce_mask[1:]) and nd_in in[2, 3, 4]): # this kernel uses one block for each row. # threads per block for each element per row. N_pattern = ''.join(['1'] * (nd_in - 1)) # TODO: is it faster to hardcode sA3, etc. in the later # code, rather than have the for_* variables declare them # and the later code use their names? if nd_in == 2: for_i1 = "for (int i1 = threadIdx.x; i1 < d1; i1 += blockDim.x)" first_i1 = 'threadIdx.x' sA1 = 'sA1' for_i2 = "int i2=0, sA2=0;" sA2 = '0' first_i2 = '0' for_i3 = "int i3=0, sA3=0;" sA3 = '0' first_i3 = '0' if nd_in == 3: for_i1 = "for (int i1 = threadIdx.y; i1 < d1; i1 += blockDim.y)" first_i1 = 'threadIdx.y' sA1 = 'sA1' for_i2 = "for (int i2 = threadIdx.x; i2 < d2; i2 += blockDim.x)" first_i2 = 'threadIdx.x' sA2 = 'sA2' for_i3 = "int i3=0, sA3=0;" first_i3 = 0 sA3 = '0' if nd_in == 4: for_i1 = "for (int i1 = threadIdx.z; i1 < d1; i1 += blockDim.z)" first_i1 = 'threadIdx.z' sA1 = 'sA1' for_i2 = "for (int i2 = threadIdx.y; i2 < d2; i2 += blockDim.y)" first_i2 = 'threadIdx.y' sA2 = 'sA2' for_i3 = "for (int i3 = threadIdx.x; i3 < d3; i3 += blockDim.x)" first_i3 = 'threadIdx.x' sA3 = 'sA3' reducebuf = self._k_reduce_buf('Z[i0 * sZ0]', node, nodename, sub={}) param_dim = ",".join(["const ga_size d%d" % i for i in xrange(nd_in)]) param_strides = ",".join(["const ga_ssize sA%d" % i for i in xrange(nd_in)]) decl, kname, params, k_var = self._k_decl(node, nodename) init = self._k_init(node, nodename) reduce_init = self._assign_init(load_in + "(A[%(first_i3)s * %(sA3)s + %(first_i2)s * %(sA2)s + %(first_i1)s * %(sA1)s + i0 * sA0])" % locals()) reduce_fct = self._assign_reduce( node, nodename, "myresult", load_in + "(A[i3 * sA3 + i2 * sA2 + i1 * sA1 + i0 * sA0])", {}, True) sio = StringIO() print(""" %(decl)s{ %(init)s for (int i0 = blockIdx.x; i0 < d0; i0 += gridDim.x){ myresult = %(reduce_init)s; %(for_i1)s{ %(for_i2)s{ %(for_i3)s{ %(reduce_fct)s; } } } %(reducebuf)s } } """ % locals(), file=sio) kernels.append(Kernel(code=sio.getvalue(), name=kname, params=params, flags=flags, objvar=k_var)) if self.reduce_mask == (0, 1, 0) or self.reduce_mask == (1, 0): # this kernel uses one block for each column, # threads per block for each element per column. # TODO: This kernel is pretty inefficient in terms of reading, because if A is # c_contiguous (typical case) then each warp is accessing non-contigous # memory (a segment of a column). reducebuf = self._k_reduce_buf('Z[i0 * sZ0 + i2*sZ1]', node, nodename, sub={}) reduce_fct = self._assign_reduce(node, nodename, "myresult", load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2])", {}, True) reduce_init = self._assign_init(load_in + "(A[i0 * sA0 + threadIdx.x * sA1 + i2 * sA2])") kname = "kernel_reduce_010" k_var = "kernel_reduce_010_" + nodename sio = StringIO() print(""" KERNEL void %(kname)s( const ga_size d0, const ga_size d1, const ga_size d2, const %(in_type)s *A, const ga_size offset_A, const ga_ssize sA0, const ga_ssize sA1, const ga_ssize sA2, %(out_type)s * Z, const ga_size offset_Z, const ga_ssize sZ0, const ga_ssize sZ1) { const int threadCount = blockDim.x; const int threadNum = threadIdx.x; extern __shared__ %(acc_type)s buf[]; A = (const %(in_type)s *)(((char *)A)+offset_A); Z = (%(out_type)s *)(((char *)Z)+offset_Z); if (warpSize != 32) { return; //TODO: set error code } for (int i0 = blockIdx.x; i0 < d0; i0 += gridDim.x) { for (int i2 = blockIdx.y; i2 < d2; i2 += gridDim.y) { %(acc_type)s myresult = %(reduce_init)s; for (int i1 = threadIdx.x; i1 < d1; i1 += blockDim.x) { %(reduce_fct)s; } %(reducebuf)s } } } """ % locals(), file=sio) params = [ 'uintp', 'uintp', 'uintp', gpuarray.GpuArray, 'uintp', 'intp', 'intp', 'intp', gpuarray.GpuArray, 'uintp', 'intp', 'intp' ] kernels.append(Kernel(code=sio.getvalue(), name=kname, params=params, flags=flags, objvar=k_var)) if self.reduce_mask in [(0, 1, 0), (1, 0), (1, 0, 0)]: reduce_fct = self._assign_reduce(node, nodename, "myresult", load_in + "(X[a * sX0 + b * sX1 + c * sX2])", {}, True) reduce_init = self._assign_init(load_in + "(X[a * sX0 + 0 * sX1 + c * sX2])") kname = "kernel_reduce_010_AD" k_var = "kernel_reduce_010_AD_" + nodename sio = StringIO() print(""" KERNEL void %(kname)s( const ga_size A, const ga_size B, const ga_size C, const ga_size D, const %(in_type)s *X, const ga_size offset_X, const ga_ssize sX0, const ga_ssize sX1, const ga_ssize sX2, %(out_type)s * Z, const ga_size offset_Z, const ga_ssize sZ0, const ga_ssize sZ1) { const int threadCount = blockDim.x; const int threadNum = threadIdx.x; %(acc_type)s myresult = 0; X = (const %(in_type)s *)(((char *)X)+offset_X); Z = (%(out_type)s *)(((char *)Z)+offset_Z); if (warpSize != 32) { return; //TODO: set error code } for (int a = blockIdx.x; a < A; a += gridDim.x) { for (int i2_D = blockIdx.y; i2_D < D; i2_D += gridDim.y) { int c = i2_D * 32 + threadIdx.x; if (c < C) { myresult = %(reduce_init)s; for (int b = 0; b < B; ++b) { %(reduce_fct)s; } Z[a * sZ0 + c * sZ1] = %(write_out)s(myresult); } } } } """ % locals(), file=sio) params = [ 'uintp', 'uintp', 'uintp', 'uintp', gpuarray.GpuArray, 'uintp', 'intp', 'intp', 'intp', gpuarray.GpuArray, 'uintp', 'intp', 'intp' ] kernels.append(Kernel(code=sio.getvalue(), name=kname, params=params, flags=flags, objvar=k_var)) if self.reduce_mask == (0, 1, 0): # # This kernel is optimized when the inner most dimensions # have the smallest stride. # this kernel uses one block for multiple column(up to 32TODO), # threads per block for each element per column. # thread.x = dim 2 contiguous # thread.y = dim 1 # block.x = dim 0 # block.y = dim 1 rest init = self._k_init(node, nodename) decl, kname, params, k_var = self._k_decl(node, nodename, pattern="010_inner") reducebuf = self._k_reduce_buf_multiple('Z[i0 * sZ0 + i2*sZ1]', node, nodename, 'blockDim.x') reduce_fct = self._assign_reduce(node, nodename, "myresult", load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2])", {}, True) reduce_init = self._assign_init(load_in + "(A[i0 * sA0 + 0 * sA1 + i2 * sA2])") sio = StringIO() print(""" %(decl)s { if(warpSize<blockDim.x){ //TODO: set error code Z[0] = -666; return; } %(init)s for (int i0 = blockIdx.x; i0 < d0; i0 += gridDim.x) { for (int i2 = blockIdx.y*blockDim.x+threadIdx.x; i2 < d2; i2 += gridDim.y*blockDim.x) { myresult = %(reduce_init)s; for (int i1 = threadIdx.y; i1 < d1; i1 += blockDim.y) { %(reduce_fct)s; } %(reducebuf)s } } } """ % locals(), file=sio) kernels.append(Kernel(code=sio.getvalue(), name=kname, params=params, flags=flags, objvar=k_var)) if self.reduce_mask == (1, 1, 0): # this kernel uses one block for each column, # threads per block for each element per column. # TODO: This kernel is pretty inefficient in terms of reading, because if A is # c_contiguous (typical case) then each warp is accessing non-contigous # memory (a segment of a column). reducebuf = self._k_reduce_buf('Z[blockIdx.x * sZ0]', node, nodename, sub={}) reduce_fct = self._assign_reduce(node, nodename, "myresult", load_in + "(A[i0 * sA0 + i1 * sA1 + blockIdx.x * sA2])", {}, True) reduce_init = self._assign_init(load_in + "(A[blockIdx.x * sA2])") kname = "kernel_reduce_110" k_var = "kernel_reduce_110_" + nodename sio = StringIO() print(""" KERNEL void %(kname)s( const ga_size d0, const ga_size d1, const ga_size d2, const %(in_type)s *A, const ga_size offset_A, const ga_ssize sA0, const ga_ssize sA1, const ga_ssize sA2, %(out_type)s * Z, const ga_size offset_Z, const ga_ssize sZ0) { const int threadCount = blockDim.x * blockDim.y; const int threadNum = threadIdx.y * blockDim.x + threadIdx.x; extern __shared__ %(acc_type)s buf[]; %(acc_type)s myresult = %(reduce_init)s; A = (const %(in_type)s *)(((char *)A)+offset_A); Z = (%(out_type)s *)(((char *)Z)+offset_Z); if (warpSize != 32) { //TODO: set error code Z[blockIdx.x * sZ0] = %(write_out)s(-666); return; } for (int i0 = threadIdx.y; i0 < d0; i0 += blockDim.y) { for (int i1 = threadIdx.x; i1 < d1; i1 += blockDim.x) { %(reduce_fct)s; } } %(reducebuf)s } """ % locals(), file=sio) params = [ 'uintp', 'uintp', 'uintp', gpuarray.GpuArray, 'uintp', 'intp', 'intp', 'intp', gpuarray.GpuArray, 'uintp', 'intp' ] kernels.append(Kernel(code=sio.getvalue(), name=kname, params=params, flags=flags, objvar=k_var)) if self.reduce_mask == (1, 0, 0): reducebuf = self._k_reduce_buf('Z[i1 * sZ0 + i2 * sZ1]', node, nodename, sub={}) decl, kname, params, k_var = self._k_decl(node, nodename) init = self._k_init(node, nodename) reduce_fct = self._assign_reduce(node, nodename, "myresult", load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2])", {}, True) reduce_init = self._assign_init(load_in + "(A[i1 * sA1 + i2 * sA2])") sio = StringIO() print(""" %(decl)s { %(init)s for (int i2 = blockIdx.y; i2 < d2; i2 += gridDim.y) { for (int i1 = blockIdx.x; i1 < d1; i1 += gridDim.x) { myresult = %(reduce_init)s; for (int i0 = threadIdx.x; i0 < d0; i0 += blockDim.x) { %(reduce_fct)s } %(reducebuf)s } } } """ % locals(), file=sio) kernels.append(Kernel(code=sio.getvalue(), name=kname, params=params, flags=flags, objvar=k_var)) if self.reduce_mask == (1, 1, 1): reducebuf = self._k_reduce_buf('Z[0]', node, nodename, sub={}) decl, kname, params, k_var = self._k_decl(node, nodename) init = self._k_init(node, nodename) reduce_fct = self._assign_reduce(node, nodename, "myresult", load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2])", {}, True) reduce_init = self._assign_init(load_in + "(A[0])") sio = StringIO() print(""" %(decl)s { %(init)s myresult = %(reduce_init)s; for (int i0 = threadIdx.z; i0 < d0; i0 += blockDim.z) { for (int i1 = threadIdx.y; i1 < d1; i1 += blockDim.y) { for (int i2 = threadIdx.x; i2 < d2; i2 += blockDim.x) { %(reduce_fct)s; } } } %(reducebuf)s } """ % locals(), file=sio) kernels.append(Kernel(code=sio.getvalue(), name=kname, params=params, flags=flags, objvar=k_var)) if self.reduce_mask == (0, 0, 1): # this kernel uses one block for each row, # threads per block for each element per row. reducebuf = self._k_reduce_buf('Z[i0 * sZ0 + i1 * sZ1]', node, nodename, sub={}) reduce_fct = self._assign_reduce(node, nodename, "myresult", load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2])", {}, True) reduce_init = self._assign_init(load_in + "(A[i0 * sA0 + i1 * sA1])") kname = "kernel_reduce_001" k_var = "kernel_reduce_001_" + nodename sio = StringIO() print(""" KERNEL void %(kname)s( const ga_size d0, const ga_size d1, const ga_size d2, const %(in_type)s *A, const ga_size offset_A, const ga_ssize sA0, const ga_ssize sA1, const ga_ssize sA2, %(out_type)s * Z, const ga_size offset_Z, const ga_ssize sZ0, const ga_ssize sZ1) { const int threadCount = blockDim.x; const int threadNum = threadIdx.x; extern __shared__ %(acc_type)s buf[]; A = (const %(in_type)s *)(((char *)A)+offset_A); Z = (%(out_type)s *)(((char *)Z)+offset_Z); if (warpSize != 32) { return; //TODO: set error code } for (int i0 = blockIdx.x; i0 < d0; i0 += gridDim.x) { for (int i1 = blockIdx.y; i1 < d1; i1 += gridDim.y) { %(acc_type)s myresult = %(reduce_init)s; for (int i2 = threadIdx.x; i2 < d2; i2 += blockDim.x) { %(reduce_fct)s; } %(reducebuf)s } } } """ % locals(), file=sio) params = [ 'uintp', 'uintp', 'uintp', gpuarray.GpuArray, 'uintp', 'intp', 'intp', 'intp', gpuarray.GpuArray, 'uintp', 'intp', 'intp' ] kernels.append(Kernel(code=sio.getvalue(), name=kname, params=params, flags=flags, objvar=k_var)) if self.reduce_mask == (0, 0, 1, 1): # this kernel uses one block for each row, # threads per block for each element per row. reducebuf = self._k_reduce_buf('Z[i0 * sZ0 + i1 * sZ1]', node, nodename, sub={}) decl, kname, params, k_var = self._k_decl(node, nodename) init = self._k_init(node, nodename) reduce_fct = self._assign_reduce(node, nodename, "myresult", load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2 + i3 * sA3])", {}, True) reduce_init = self._assign_init(load_in + "(A[i0 * sA0 + i1 * sA1])") sio = StringIO() print(""" %(decl)s { %(init)s for (int i0 = blockIdx.x; i0 < d0; i0 += gridDim.x) { for (int i1 = blockIdx.y; i1 < d1; i1 += gridDim.y) { %(acc_type)s myresult = %(reduce_init)s; for (int i2 = threadIdx.y; i2 < d2; i2 += blockDim.y) { for (int i3 = threadIdx.x; i3 < d3; i3 += blockDim.x) { %(reduce_fct)s; } } %(reducebuf)s } } } """ % locals(), file=sio) kernels.append(Kernel(code=sio.getvalue(), name=kname, params=params, flags=flags, objvar=k_var)) if self.reduce_mask == (0, 1, 0, 1): # this kernel uses one block for each row, # threads per block for each element per row. reducebuf = self._k_reduce_buf('Z[i0 * sZ0 + i2 * sZ1]', node, nodename, sub={}) decl, kname, params, k_var = self._k_decl(node, nodename) init = self._k_init(node, nodename) reduce_fct = self._assign_reduce(node, nodename, "myresult", load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2 + i3 * sA3])", {}, True) reduce_init = self._assign_init(load_in + "(A[i0 * sA0 + i2 * sA2])") sio = StringIO() print(""" %(decl)s { %(init)s for (int i0 = blockIdx.x; i0 < d0; i0 += gridDim.x) { for (int i2 = blockIdx.y; i2 < d2; i2 += gridDim.y) { %(acc_type)s myresult = %(reduce_init)s; for (int i1 = threadIdx.y; i1 < d1; i1 += blockDim.y) { for (int i3 = threadIdx.x; i3 < d3; i3 += blockDim.x) { %(reduce_fct)s; } } %(reducebuf)s } } } """ % locals(), file=sio) kernels.append(Kernel(code=sio.getvalue(), name=kname, params=params, flags=flags, objvar=k_var)) if self.reduce_mask == (1, 1, 1, 1): reducebuf = self._k_reduce_buf('Z[0]', node, nodename, sub={}) decl, kname, params, k_var = self._k_decl(node, nodename) init = self._k_init(node, nodename) reduce_fct = self._assign_reduce(node, nodename, "myresult", load_in + "(A[i0 * sA0 + i1 * sA1 + i2 * sA2 + i3 * sA3])", {}, True) reduce_init = self._assign_init(load_in + "(A[0])") sio = StringIO() print(""" %(decl)s { %(init)s myresult = %(reduce_init)s; for (int i0 = 0; i0 < d0; i0++) for (int i1 = threadIdx.z; i1 < d1; i1 += blockDim.z) { for (int i2 = threadIdx.y; i2 < d2; i2 += blockDim.y) { for (int i3 = threadIdx.x; i3 < d3; i3 += blockDim.x) { %(reduce_fct)s; } } } %(reducebuf)s } """ % locals(), file=sio) kernels.append(Kernel(code=sio.getvalue(), name=kname, params=params, flags=flags, objvar=k_var)) if self.reduce_mask == (1, 0, 1, 1) or self.reduce_mask == (1, 0, 1): reducebuf = self._k_reduce_buf('Z[blockIdx.x*sZ0]', node, nodename, sub={}) reduce_fct = self._assign_reduce(node, nodename, "myresult", load_in + "(A[i0 * sA0 + blockIdx.x * sA1 + i2 * sA2 + i3 * sA3])", {}, True) reduce_init = self._assign_init(load_in + "(A[blockIdx.x * sA1])") kname = "kernel_reduce_1011" k_var = "kernel_reduce_1011_" + nodename sio = StringIO() print(""" KERNEL void %(kname)s( const ga_size d0, const ga_size d1, const ga_size d2, const ga_size d3, const %(in_type)s *A, const ga_size offset_A, const ga_ssize sA0, const ga_ssize sA1, const ga_ssize sA2, const ga_ssize sA3, %(out_type)s * Z, const ga_size offset_Z, const ga_ssize sZ0) { const int threadCount = blockDim.x * blockDim.y * blockDim.z; const int threadNum = threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; extern __shared__ %(acc_type)s buf[]; %(acc_type)s myresult = %(reduce_init)s; A = (const %(in_type)s *)(((char *)A)+offset_A); Z = (%(out_type)s *)(((char *)Z)+offset_Z); if (warpSize != 32) { return; //TODO: set error code } for (int i0 = threadIdx.z; i0 < d0; i0 += blockDim.z) { for (int i2 = threadIdx.y; i2 < d2; i2 += blockDim.y) { for (int i3 = threadIdx.x; i3 < d3; i3 += blockDim.x) { %(reduce_fct)s; } } } %(reducebuf)s } """ % locals(), file=sio) params = [ 'uintp', 'uintp', 'uintp', 'uintp', gpuarray.GpuArray, 'uintp', 'intp', 'intp', 'intp', 'intp', gpuarray.GpuArray, 'uintp', 'intp' ] kernels.append(Kernel(code=sio.getvalue(), name=kname, params=params, flags=flags, objvar=k_var)) return kernels class GpuCAReduceCPY(GpuKernelBase, HideC, CAReduceDtype): """ CAReduce that reuse the python code from gpuarray. """ def __init__(self, scalar_op, axis=None, dtype=None, acc_dtype=None): if not hasattr(scalar_op, 'identity'): raise ValueError("No identity on scalar op") CAReduceDtype.__init__(self, scalar_op, axis=axis, dtype=dtype, acc_dtype=acc_dtype) def __str__(self): ax = '' if self.axis is not None: ax = '{%s}' % (', '.join(str(x) for x in self.axis),) return "GpuReduce{%s}%s" % (self.scalar_op, ax) def make_node(self, input): ctx_name = infer_context_name(input) res = CAReduceDtype.make_node(self, input) input = as_gpuarray_variable(input, ctx_name) otype = GpuArrayType(dtype=res.outputs[0].dtype, broadcastable=res.outputs[0].broadcastable, context_name=ctx_name) if res.op.axis is not None: redux = [] for i in range(len(input.type.broadcastable)): redux.append(i in res.op.axis) # since redux is just another way to describe what is in axis # it doesn't need to be compared in __eq__ or __hash__ res.op.redux = redux return Apply(res.op, [input], [otype()]) def get_params(self, node): return node.outputs[0].type.context def make_thunk(self, node, storage_map, compute_map, no_recycling): # cache the kernel object self.get_kernel_cache(node) return super(GpuCAReduceCPY, self).make_thunk( node, storage_map, compute_map, no_recycling) def get_kernel_cache(self, node): attr = '@cache_reduction_k' if self.axis is None: redux = [True] * node.inputs[0].ndim else: redux = self.redux if not hasattr(node, attr): acc_dtype = getattr(self, 'acc_dtype', None) if acc_dtype is None: acc_dtype = node.outputs[0].type.dtype if any(redux): setattr(node, attr, self.generate_kernel(node, acc_dtype, redux)) if any(redux): return getattr(node, attr) def gpu_kernels(self, node, name): if not any(getattr(self, 'redux', [node.inputs[0].ndim != 0])): # Some OpenCL compilers do not accept no-arguments kernels src = "KERNEL void reduk(GLOBAL_MEM float *a) {}" params = ['float32'] else: k = self.get_kernel_cache(node) _, src, _, _ = k._get_basic_kernel(k.init_local_size, node.inputs[0].ndim) nd = node.inputs[0].ndim params = ['uint32', gpuarray.GpuArray] params.extend('uint32' for _ in range(nd)) params.append(gpuarray.GpuArray) params.append('uint32') params.extend('int32' for _ in range(nd)) acc_dtype = getattr(self, 'acc_dtype', None) if acc_dtype is None: acc_dtype = node.outputs[0].type.dtype return [Kernel(code=src, name="reduk", params=params, flags=Kernel.get_flags(node.inputs[0].type.dtype, acc_dtype, node.outputs[0].type.dtype), objvar='k_reduk_' + name)] def c_code(self, node, name, inp, out, sub): if not any(getattr(self, 'redux', [node.inputs[0].ndim != 0])): # We special case the no-reduction case since the gpu # kernel has trouble handling it. return """ Py_XDECREF(%(out)s); %(out)s = pygpu_copy(%(inp)s, GA_ANY_ORDER); if (!%(out)s) { %(fail)s } if (%(sync)d) GpuArray_sync(&%(out)s->ga); """ % dict(out=out[0], inp=inp[0], fail=sub['fail'], sync=bool(config.gpuarray.sync)) k = self.get_kernel_cache(node) _, src, _, ls = k._get_basic_kernel(k.init_local_size, node.inputs[0].ndim) if self.axis is None: redux = [True] * node.inputs[0].ndim else: redux = self.redux acc_dtype = getattr(self, 'acc_dtype', None) if acc_dtype is None: acc_dtype = node.outputs[0].type.dtype input = inp[0] output = out[0] nd_out = node.outputs[0].ndim code = """ size_t gs = 1; size_t ls; unsigned int n = 1; unsigned int proxy_dim[%(nd_in)s]; unsigned int proxy_off; int proxy_str[%(nd_in)s]; void *args[%(n_args)s]; PyGpuArrayObject *tmp; int err; """ % dict(n_args=4 + (node.inputs[0].ndim * 2), nd_in=node.inputs[0].ndim) if nd_out != 0: code += """ size_t out_dims[%(nd_out)s]; int need_out = %(output)s == NULL || %(output)s->ga.nd != %(nd_out)s; """ % dict(nd_out=nd_out, output=output) j = 0 for i in range(node.inputs[0].ndim): if not self.redux[i]: code += """ out_dims[%(j)s] = %(input)s->ga.dimensions[%(i)s]; if (!need_out) need_out |= %(output)s->ga.dimensions[%(j)s] != out_dims[%(j)s]; """ % dict(j=j, i=i, input=input, output=output) j += 1 code += """ if (need_out) { %(output)s = pygpu_empty(%(nd_out)s, out_dims, %(out_type)s, GA_C_ORDER, %(ctx)s, Py_None); if (!%(output)s) { %(fail)s } } """ % dict(output=output, nd_out=nd_out, fail=sub['fail'], ctx=sub['params'], out_type=dtype_to_typecode(node.outputs[0].type.dtype)) else: code += """ if (%(output)s == NULL || %(output)s->ga.nd != 0) { Py_XDECREF(%(output)s); %(output)s = pygpu_empty(0, NULL, %(out_type)s, GA_C_ORDER, %(ctx)s, Py_None); if (!%(output)s) { %(fail)s } } """ % dict(output=output, fail=sub['fail'], ctx=sub['params'], out_type=dtype_to_typecode(node.outputs[0].type.dtype)) if acc_dtype != node.outputs[0].type.dtype: code += """ tmp = pygpu_empty(%(output)s->ga.nd, %(output)s->ga.dimensions, %(acc_type)s, GA_C_ORDER, %(ctx)s, Py_None); if (!tmp) %(fail)s """ % dict(output=output, fail=sub['fail'], ctx=sub['params'], acc_type=dtype_to_typecode(acc_dtype)) else: code += """ tmp = %(output)s; Py_INCREF(tmp); """ % dict(output=output) # We need the proxies since we are passing a pointer to the # data into the call and therefore we need a real copy of the # data in the proper type. code += """ args[0] = &n; args[1] = tmp->ga.data; """ % dict(output=output) p = 2 for i in range(node.inputs[0].ndim): code += """ proxy_dim[%(i)s] = %(input)s->ga.dimensions[%(i)s]; args[%(p)s] = &proxy_dim[%(i)s]; n *= %(input)s->ga.dimensions[%(i)s]; """ % dict(i=i, p=p, input=input) p += 1 if not redux[i]: code += "gs *= %(input)s->ga.dimensions[%(i)s];" % dict(input=input, i=i) code += """ args[%(p)s] = %(input)s->ga.data; proxy_off = %(input)s->ga.offset; args[%(p)s+1] = &proxy_off; """ % dict(p=p, input=input) p += 2 for i in range(node.inputs[0].ndim): code += """ proxy_str[%(i)s] = %(input)s->ga.strides[%(i)s]; args[%(p)s] = &proxy_str[%(i)s]; """ % dict(p=p, i=i, input=input) p += 1 code += """ if (gs == 0) gs = 1; n /= gs; ls = %(ls)s; err = GpuKernel_call(&%(k_var)s, 1, &ls, &gs, 0, args); if (err != GA_NO_ERROR) { PyErr_Format(PyExc_RuntimeError, "gpuarray error: GpuCAReduceCPY: %%s.", GpuKernel_error(&%(k_var)s, err)); %(fail)s } if (%(cast_out)d) { err = GpuArray_move(&%(output)s->ga, &tmp->ga); if (err != GA_NO_ERROR) { PyErr_Format(PyExc_RuntimeError, "gpuarray error: GpuCAReduceCPY [cast]: %%s.", GpuArray_error(&tmp->ga, err)); %(fail)s } } else { Py_XDECREF(%(output)s); %(output)s = tmp; } if (%(sync)d) { err = GpuArray_sync(&%(output)s->ga); if (err != GA_NO_ERROR) { PyErr_Format(PyExc_RuntimeError, "gpuarray error: GpuCAReduceCPY: %%s.", GpuKernel_error(&%(k_var)s, err)); %(fail)s } } """ % dict(k_var='k_reduk_' + name, sync=bool(config.gpuarray.sync), ls=ls, fail=sub['fail'], output=output, input=input, cast_out=bool(acc_dtype != node.outputs[0].type.dtype)) return code def c_code_cache_version(self): return (2, self.GpuKernelBase_version) def generate_kernel(self, node, odtype, redux): if isinstance(self.scalar_op, scalar.basic.Add): reduce_expr = "a + b" elif isinstance(self.scalar_op, scalar.basic.Mul): reduce_expr = "a * b" else: raise NotImplementedError() return ReductionKernel(node.inputs[0].type.context, odtype, self.scalar_op.identity, reduce_expr, redux, arguments=[make_argument(node.inputs[0], 'a')], init_nd=node.inputs[0].ndim) def perform(self, node, inp, out, ctx): input, = inp output, = out if self.axis is None: redux = [True] * input.ndim else: redux = self.redux if any(redux): output[0] = self.get_kernel_cache(node)(input).astype( copy=False, dtype=node.outputs[0].type.dtype) else: output[0] = pygpu.gpuarray.array(input, copy=True, dtype=node.outputs[0].type.dtype, context=ctx) # To allow reloading old pickled files GpuCAReduce = GpuCAReduceCPY
Welcome to Kangaroo Class! We are a Year 3/4 class who love to learn by exploring and having fun. We really enjoy reading a class book and taking inspiration from it to use in our writing. We also love using our Maths skills to solve the latest problem solving challenge or learning through history, geography and the creative curriculum in our exciting topic work. We hope you enjoy having a look around our website. Have a look below to see what we will be up to this term. The Ice Palace by Robert Swindells. We will be sharing and enjoying the stories and using them as a stimulus for writing instructions, poems and recounts, as well as describing characters and settings and role play. Through this work, both year groups learn concepts including word classes (adjectives/nouns/verbs/adverbs), expanded noun phrases and fronted adverbials. We will also develop more consistent use of spelling rules and punctuation. In our Maths lessons we will be focusing on a range of key skills; developing written methods, increasing speed, growing our knowledge of shapes, measures and data. Whenever possible we link Maths to real life situations and linking concepts to our Science and Topic work. Science – Rocks and Soil – We will be naming common rock and soil types. Investigating their properties and carrying out some simple experiments. We will be recording our findings and comparing results. Geography –We will be taking a trip around our planet; learning the names of continents, countries, capital cities and facts unique to some countries. History – Changes in Britain from the Iron Age. RE –Judaism – This term we are learning about Moses through stories of The Secret Baby and The Burning Bush. We will also be learning about synagogues as well as using our RE lessons to produce some extended writing and producing illustrations through storyboards. DT – Exploring textiles and simple sewing patterns. Food hygiene including peeling and pouring. Art – Drawing from experience using charcoal. Colour mixing and using acrylic paint. PE – Throwing and catching and then moving onto choreographing and performing dances. Music – Appraising, describing, comparing and evaluating music using the appropriate vocabulary. PE is on Monday and Thursday, though it is useful to keep PE kits in school from Monday –Friday in case we have any special sessions. Swimming will be on a Thursday afternoon, starting in term 5 for the Year 4 children. Our Year 4 children are also very fortunate to be learning how to play a variety of brass instruments. Look out for the date of our next concert! Making predictions about and discussing any TV programs that you may watch together. This really helps build an understanding of the world, as well as helping with comprehension and creative writing skills. Keep checking back to see what we have been up to! Mrs Addison and Mrs Kirby.
""" I'm modifying the halotools tpcf code to add a few more efficincies. One directly returns the correlation functions from subregions, so I can compute arbitary jackknifes more efficiently Another is to add a flag for do_auto1 and do_auto2. Sometimes, you wanna compute xi_gg and xi_gm but not xi_mm! """ from __future__ import absolute_import, division, unicode_literals import numpy as np from halotools.mock_observables import * from halotools.mock_observables.two_point_clustering import * from halotools.mock_observables.two_point_clustering.tpcf import _tpcf_process_args, _random_counts from halotools.mock_observables.two_point_clustering.tpcf_estimators import _TP_estimator, _TP_estimator_requirements from halotools.mock_observables.pair_counters import npairs_jackknife_3d from halotools.mock_observables.two_point_clustering.clustering_helpers import (process_optional_input_sample2, verify_tpcf_estimator, tpcf_estimator_dd_dr_rr_requirements) from halotools.mock_observables.two_point_clustering.tpcf_jackknife import \ _tpcf_jackknife_process_args,_enclose_in_box, get_subvolume_numbers, jrandom_counts np.seterr(divide='ignore', invalid='ignore') # ignore divide by zero in e.g. DD/RR __all__ = ['tpcf_subregions', 'tpcf'] # all lifted from duncan's code def tpcf_subregions(sample1, randoms, rbins, Nsub=[5, 5, 5], sample2=None, period=None, do_auto1=True, do_auto2=False, do_cross=True, estimator='Natural', num_threads=1, seed=None, RR=None): do_auto = do_auto1 or do_auto2 # process input parameters function_args = (sample1, randoms, rbins, Nsub, sample2, period, do_auto, do_cross, estimator, num_threads, seed) sample1, rbins, Nsub, sample2, randoms, period, do_auto, do_cross, num_threads,\ _sample1_is_sample2, PBCs = _tpcf_jackknife_process_args(*function_args) # determine box size the data occupies. # This is used in determining jackknife samples. if PBCs is False: sample1, sample2, randoms, Lbox = _enclose_in_box(sample1, sample2, randoms) else: Lbox = period do_DD, do_DR, do_RR = _TP_estimator_requirements(estimator) N1 = len(sample1) N2 = len(sample2) NR = len(randoms) j_index_1, N_sub_vol = cuboid_subvolume_labels(sample1, Nsub, Lbox) j_index_2, N_sub_vol = cuboid_subvolume_labels(sample2, Nsub, Lbox) j_index_random, N_sub_vol = cuboid_subvolume_labels(randoms, Nsub, Lbox) # number of points in each subvolume NR_subs = get_subvolume_numbers(j_index_random, N_sub_vol) N1_subs = get_subvolume_numbers(j_index_1, N_sub_vol) N2_subs = get_subvolume_numbers(j_index_2, N_sub_vol) # number of points in each jackknife sample N1_subs = N1 - N1_subs N2_subs = N2 - N2_subs NR_subs = NR - NR_subs # calculate all the pair counts # TODO need to modify this function D1D1, D1D2, D2D2 = jnpair_counts( sample1, sample2, j_index_1, j_index_2, N_sub_vol, rbins, period, num_threads, do_auto1, do_cross, do_auto2, _sample1_is_sample2) # pull out the full and sub sample results if _sample1_is_sample2: D1D1_full = D1D1[0, :] D1D1_sub = D1D1[1:, :] D1D2_full = D1D2[0, :] D1D2_sub = D1D2[1:, :] D2D2_full = D2D2[0, :] D2D2_sub = D2D2[1:, :] else: if do_auto1: D1D1_full = D1D1[0, :] D1D1_sub = D1D1[1:, :] if do_cross: D1D2_full = D1D2[0, :] D1D2_sub = D1D2[1:, :] if do_auto2: D2D2_full = D2D2[0, :] D2D2_sub = D2D2[1:, :] # do random counts # TODO figure out what of this i can skip? if RR is None: D1R, RR = jrandom_counts(sample1, randoms, j_index_1, j_index_random, N_sub_vol, rbins, period, 1, do_DR, do_RR) else: #use the precomputed RR D1R, RR_dummy= jrandom_counts(sample1, randoms, j_index_1, j_index_random, N_sub_vol, rbins, period, 1, do_DR, do_RR=False) print 'A' if _sample1_is_sample2: D2R = D1R else: if do_DR is True: D2R, RR_dummy = jrandom_counts(sample2, randoms, j_index_2, j_index_random, N_sub_vol, rbins, period, num_threads, do_DR, do_RR=False) else: D2R = None print 'B' if do_DR is True: D1R_full = D1R[0, :] D1R_sub = D1R[1:, :] D2R_full = D2R[0, :] D2R_sub = D2R[1:, :] else: D1R_full = None D1R_sub = None D2R_full = None D2R_sub = None if do_RR is True: RR_full = RR[0, :] RR_sub = RR[1:, :] else: RR_full = None RR_sub = None # calculate the correlation function for the subsamples outputs = [] print 'C' if do_auto1 or _sample1_is_sample2: xi_11_sub = _TP_estimator(D1D1_sub, D1R_sub, RR_sub, N1_subs, N1_subs, NR_subs, NR_subs, estimator) outputs.append(xi_11_sub) if do_cross: xi_12_sub = _TP_estimator(D1D2_sub, D1R_sub, RR_sub, N1_subs, N2_subs, NR_subs, NR_subs, estimator) outputs.append(xi_12_sub) if do_auto2: xi_22_sub = _TP_estimator(D2D2_sub, D2R_sub, RR_sub, N2_subs, N2_subs, NR_subs, NR_subs, estimator) outputs.append(xi_22_sub) return outputs[0] if len(outputs) ==1 else tuple(outputs) def tpcf(sample1, rbins, sample2=None, randoms=None, period=None, do_auto1=True, do_cross=True, do_auto2=False, estimator='Natural', num_threads=1, approx_cell1_size=None, approx_cell2_size=None, approx_cellran_size=None, RR_precomputed=None, NR_precomputed=None, seed=None, n_split = 20): r""" Calculate the real space two-point correlation function, :math:`\xi(r)`. Example calls to this function appear in the documentation below. See the :ref:`mock_obs_pos_formatting` documentation page for instructions on how to transform your coordinate position arrays into the format accepted by the ``sample1`` and ``sample2`` arguments. See also :ref:`galaxy_catalog_analysis_tutorial2` for example usage on a mock galaxy catalog. Parameters ---------- sample1 : array_like Npts1 x 3 numpy array containing 3-D positions of points. See the :ref:`mock_obs_pos_formatting` documentation page, or the Examples section below, for instructions on how to transform your coordinate position arrays into the format accepted by the ``sample1`` and ``sample2`` arguments. Length units are comoving and assumed to be in Mpc/h, here and throughout Halotools. rbins : array_like array of boundaries defining the real space radial bins in which pairs are counted. Length units are comoving and assumed to be in Mpc/h, here and throughout Halotools. sample2 : array_like, optional Npts2 x 3 array containing 3-D positions of points. Passing ``sample2`` as an input permits the calculation of the cross-correlation function. Default is None, in which case only the auto-correlation function will be calculated. randoms : array_like, optional Nran x 3 array containing 3-D positions of randomly distributed points. If no randoms are provided (the default option), calculation of the tpcf can proceed using analytical randoms (only valid for periodic boundary conditions). period : array_like, optional Length-3 sequence defining the periodic boundary conditions in each dimension. If you instead provide a single scalar, Lbox, period is assumed to be the same in all Cartesian directions. If set to None (the default option), PBCs are set to infinity, in which case ``randoms`` must be provided. Length units are comoving and assumed to be in Mpc/h, here and throughout Halotools. do_auto : boolean, optional Boolean determines whether the auto-correlation function will be calculated and returned. Default is True. do_cross : boolean, optional Boolean determines whether the cross-correlation function will be calculated and returned. Only relevant when ``sample2`` is also provided. Default is True for the case where ``sample2`` is provided, otherwise False. estimator : string, optional Statistical estimator for the tpcf. Options are 'Natural', 'Davis-Peebles', 'Hewett' , 'Hamilton', 'Landy-Szalay' Default is ``Natural``. num_threads : int, optional Number of threads to use in calculation, where parallelization is performed using the python ``multiprocessing`` module. Default is 1 for a purely serial calculation, in which case a multiprocessing Pool object will never be instantiated. A string 'max' may be used to indicate that the pair counters should use all available cores on the machine. approx_cell1_size : array_like, optional Length-3 array serving as a guess for the optimal manner by how points will be apportioned into subvolumes of the simulation box. The optimum choice unavoidably depends on the specs of your machine. Default choice is to use Lbox/10 in each dimension, which will return reasonable result performance for most use-cases. Performance can vary sensitively with this parameter, so it is highly recommended that you experiment with this parameter when carrying out performance-critical calculations. approx_cell2_size : array_like, optional Analogous to ``approx_cell1_size``, but for sample2. See comments for ``approx_cell1_size`` for details. approx_cellran_size : array_like, optional Analogous to ``approx_cell1_size``, but for randoms. See comments for ``approx_cell1_size`` for details. RR_precomputed : array_like, optional Array storing the number of RR-counts calculated in advance during a pre-processing phase. Must have the same length as *len(rbins)*. If the ``RR_precomputed`` argument is provided, you must also provide the ``NR_precomputed`` argument. Default is None. NR_precomputed : int, optional Number of points in the random sample used to calculate ``RR_precomputed``. If the ``NR_precomputed`` argument is provided, you must also provide the ``RR_precomputed`` argument. Default is None. seed : int, optional Random number seed used to randomly downsample data, if applicable. Default is None, in which case downsampling will be stochastic. Returns ------- correlation_function(s) : numpy.array *len(rbins)-1* length array containing the correlation function :math:`\xi(r)` computed in each of the bins defined by input ``rbins``. .. math:: 1 + \xi(r) \equiv \mathrm{DD}(r) / \mathrm{RR}(r), If ``estimator`` is set to 'Natural'. :math:`\mathrm{DD}(r)` is the number of sample pairs with separations equal to :math:`r`, calculated by the pair counter. :math:`\mathrm{RR}(r)` is the number of random pairs with separations equal to :math:`r`, and is counted internally using "analytic randoms" if ``randoms`` is set to None (see notes for an explanation), otherwise it is calculated using the pair counter. If ``sample2`` is passed as input (and if ``sample2`` is not exactly the same as ``sample1``), then three arrays of length *len(rbins)-1* are returned: .. math:: \xi_{11}(r), \xi_{12}(r), \xi_{22}(r), the autocorrelation of ``sample1``, the cross-correlation between ``sample1`` and ``sample2``, and the autocorrelation of ``sample2``, respectively. If ``do_auto`` or ``do_cross`` is set to False, the appropriate sequence of results is returned. Notes ----- For a higher-performance implementation of the tpcf function written in C, see the Corrfunc code written by Manodeep Sinha, available at https://github.com/manodeep/Corrfunc. Examples -------- For demonstration purposes we calculate the `tpcf` for halos in the `~halotools.sim_manager.FakeSim`. >>> from halotools.sim_manager import FakeSim >>> halocat = FakeSim() >>> x = halocat.halo_table['halo_x'] >>> y = halocat.halo_table['halo_y'] >>> z = halocat.halo_table['halo_z'] We transform our *x, y, z* points into the array shape used by the pair-counter by taking the transpose of the result of `numpy.vstack`. This boilerplate transformation is used throughout the `~halotools.mock_observables` sub-package: >>> sample1 = np.vstack((x,y,z)).T Alternatively, you may use the `~halotools.mock_observables.return_xyz_formatted_array` convenience function for this same purpose, which provides additional wrapper behavior around `numpy.vstack` such as placing points into redshift-space. >>> rbins = np.logspace(-1, 1, 10) >>> xi = tpcf(sample1, rbins, period=halocat.Lbox) See also -------- :ref:`galaxy_catalog_analysis_tutorial2` """ do_auto = do_auto1 or do_auto2 # check input arguments using clustering helper functions function_args = (sample1, rbins, sample2, randoms, period, do_auto, do_cross, estimator, num_threads, approx_cell1_size, approx_cell2_size, approx_cellran_size, RR_precomputed, NR_precomputed, seed) # pass arguments in, and get out processed arguments, plus some control flow variables (sample1, rbins, sample2, randoms, period, do_auto, do_cross, num_threads, _sample1_is_sample2, PBCs, RR_precomputed, NR_precomputed) = _tpcf_process_args(*function_args) # What needs to be done? do_DD, do_DR, do_RR = tpcf_estimator_dd_dr_rr_requirements[estimator] if RR_precomputed is not None: # overwrite do_RR as necessary do_RR = False # How many points are there (for normalization purposes)? N1 = len(sample1) N2 = len(sample2) if randoms is not None: NR = len(randoms) else: # set the number of randoms equal to the number of points in sample1 # this is arbitrarily set, but must remain consistent! if NR_precomputed is not None: NR = NR_precomputed else: NR = N1 # count data pairs D1D1, D1D2, D2D2 = _pair_counts(sample1, sample2, rbins, period, num_threads, do_auto1, do_cross, do_auto2, _sample1_is_sample2, approx_cell1_size, approx_cell2_size) # count random pairs # split this up over a few because randoms is large # TODO do they stack like this? split_randoms = np.array_split(randoms, n_split, axis = 0) D1R, D2R, RR = np.zeros((len(rbins))), np.zeros((len(rbins))), np.zeros((len(rbins))) #D1Rs = [] for i, _rand in enumerate(split_randoms): #print i, # don't diff here until after! _D1R, _D2R, _RR, = _random_counts(sample1, sample2, _rand, rbins, period, PBCs, num_threads, do_RR, do_DR, _sample1_is_sample2, approx_cell1_size, approx_cell2_size, approx_cellran_size, diff = False) #D1Rs.append(_D1R) if _D1R is not None: D1R+=_D1R if _D2R is not None: D2R+=_D2R if _RR is not None: RR+=_RR D1R=np.diff(D1R) D2R=np.diff(D2R) RR=np.diff(RR) if RR_precomputed is not None: RR = RR_precomputed # run results through the estimator and return relavent/user specified results. outputs = [] if do_auto1 or _sample1_is_sample2: xi_11 = _TP_estimator(D1D1, D1R, RR, N1, N1, NR, NR, estimator) outputs.append(xi_11) if do_cross: xi_12 = _TP_estimator(D1D2, D1R, RR, N1, N2, NR, NR, estimator) outputs.append(xi_12) if do_auto2: xi_22 = _TP_estimator(D2D2, D2R, RR, N2, N2, NR, NR, estimator) outputs.append(xi_22) return outputs[0] if len(outputs) ==1 else tuple(outputs) # overload to skip the xi_mm calculation def jnpair_counts(sample1, sample2, j_index_1, j_index_2, N_sub_vol, rbins, period, num_threads, do_auto1 = True, do_cross=False,do_auto2=False, _sample1_is_sample2=False): """ Count jackknife data pairs: DD """ if do_auto1 is True: D1D1 = npairs_jackknife_3d(sample1, sample1, rbins, period=period, jtags1=j_index_1, jtags2=j_index_1, N_samples=N_sub_vol, num_threads=num_threads) D1D1 = np.diff(D1D1, axis=1) else: D1D1 = None D2D2 = None if _sample1_is_sample2: D1D2 = D1D1 D2D2 = D1D1 else: if do_cross is True: D1D2 = npairs_jackknife_3d(sample1, sample2, rbins, period=period, jtags1=j_index_1, jtags2=j_index_2, N_samples=N_sub_vol, num_threads=num_threads) D1D2 = np.diff(D1D2, axis=1) else: D1D2 = None if do_auto2 is True: D2D2 = npairs_jackknife_3d(sample2, sample2, rbins, period=period, jtags1=j_index_2, jtags2=j_index_2, N_samples=N_sub_vol, num_threads=num_threads) D2D2 = np.diff(D2D2, axis=1) else: D2D2 = None return D1D1, D1D2, D2D2 def _pair_counts(sample1, sample2, rbins, period, num_threads, do_auto1, do_cross, do_auto2, _sample1_is_sample2, approx_cell1_size, approx_cell2_size): r""" Internal function used calculate DD-pairs during the calculation of the tpcf. """ if do_auto1 is True: D1D1 = npairs_3d(sample1, sample1, rbins, period=period, num_threads=num_threads, approx_cell1_size=approx_cell1_size, approx_cell2_size=approx_cell1_size) D1D1 = np.diff(D1D1) else: D1D1 = None D2D2 = None if _sample1_is_sample2: D1D2 = D1D1 D2D2 = D1D1 else: if do_cross is True: D1D2 = npairs_3d(sample1, sample2, rbins, period=period, num_threads=num_threads, approx_cell1_size=approx_cell1_size, approx_cell2_size=approx_cell2_size) D1D2 = np.diff(D1D2) else: D1D2 = None if do_auto2 is True: D2D2 = npairs_3d(sample2, sample2, rbins, period=period, num_threads=num_threads, approx_cell1_size=approx_cell2_size, approx_cell2_size=approx_cell2_size) D2D2 = np.diff(D2D2) else: D2D2 = None return D1D1, D1D2, D2D2 def _random_counts(sample1, sample2, randoms, rbins, period, PBCs, num_threads, do_RR, do_DR, _sample1_is_sample2, approx_cell1_size, approx_cell2_size, approx_cellran_size, diff = True): r""" Internal function used to random pairs during the calculation of the tpcf. There are two high level branches: 1. w/ or wo/ PBCs and randoms. 2. PBCs and analytical randoms There is also control flow governing whether RR and DR pairs are counted, as not all estimators need one or the other. Analytical counts are N**2*dv*rho, where dv is the volume of the spherical shells, which is the correct volume to use for a continious cubical volume with PBCs. Adding a function to do the diff after so you can split it up a little better """ def nball_volume(R, k=3): """ Calculate the volume of a n-shpere. This is used for the analytical randoms. """ return (np.pi**(k/2.0)/gamma(k/2.0+1.0))*R**k # randoms provided, so calculate random pair counts. if randoms is not None: if do_RR is True: RR = npairs_3d(randoms, randoms, rbins, period=period, num_threads=num_threads, approx_cell1_size=approx_cellran_size, approx_cell2_size=approx_cellran_size) if diff: RR = np.diff(RR) else: RR = None if do_DR is True: D1R = npairs_3d(sample1, randoms, rbins, period=period, num_threads=num_threads, approx_cell1_size=approx_cell1_size, approx_cell2_size=approx_cellran_size ) if diff: D1R = np.diff(D1R) else: D1R = None if _sample1_is_sample2: D2R = None else: if do_DR is True: D2R = npairs_3d(sample2, randoms, rbins, period=period, num_threads=num_threads, approx_cell1_size=approx_cell2_size, approx_cell2_size=approx_cellran_size) if diff: D2R = np.diff(D2R) else: D2R = None return D1R, D2R, RR # PBCs and no randoms--calculate randoms analytically. elif randoms is None: # set the number of randoms equal to the number of points in sample1 NR = len(sample1) # do volume calculations v = nball_volume(rbins) # volume of spheres dv = np.diff(v) # volume of shells global_volume = period.prod() # volume of simulation # calculate randoms for sample1 N1 = np.shape(sample1)[0] # number of points in sample1 rho1 = N1/global_volume # number density of points D1R = (NR)*(dv*rho1) # random counts are N**2*dv*rho # calculate randoms for sample2 N2 = np.shape(sample2)[0] # number of points in sample2 rho2 = N2/global_volume # number density of points D2R = (NR)*(dv*rho2) # random counts are N**2*dv*rho # calculate the random-random pairs. rhor = (NR**2)/global_volume RR = (dv*rhor) return D1R, D2R, RR
Can Forex Traders Achieve a Work-Life Balance? While I’m a big fan of allotting time and effort to improving your trading strategies, I also believe that too much trading could do you more harm than good. For one, few people are able to sustain the level of focus needed to trade forex for hours upon hours a day, everyday; mental fatigue will set in. Forcing yourself to analyze the currency markets and making more trades when you’re not completely focused could cost you some of your carefully planned trades. On top of that, working long hours can be bad for your health. Studies have shown that chronic stress (the type that can be caused by excessive work hours) can lead to heart disease. Now, you wouldn’t want that, right? What’s the point of all that hard work and long hours if you won’t be able to enjoy the fruits of your labor? Having a life outside of trading also reminds you that trading is a profession. This can help you become more emotionally-detached to the outcome of your trades, which is important for making good trading decisions. Specific to many forex traders, achieving a work-life balance is especially difficult. Unlike stock trading, almost ALL major news can affect currencies. It’s hard not to discuss the constant rain in the U.K. without wondering how much it will affect retail sales. Suddenly you may find yourself checking your open position on GBP/USD at 4 o’ clock in the morning! Work-life balance can also get out of whack for those who set up their office at home. The convenience of being able to take naps in your comfortable bed whenever you like can be a challenge. Some days you’ll be fighting the urge to take break from the markets knowing that it is open 24/7, and other days you’ll be doing all you can to ignore that alarm clock to spend another 5, 10, or 55 minutes in that cozy bed of yours. Achieving a work-life balance might be difficult, but it’s certainly not impossible. Here’s how you can get started. The first step is to prioritize. Determine how much you value your trading over your personal life. Can you check your forex trades at the dinner table without getting in trouble with your wife? Are you comfortable with rejecting nights out with friends in favor of your trades? Or are you the type who can easily walk away from the charts? It’s also important to draw the line on when to call it a day. Whether it’s the number of your trades, a certain win or loss percentage, or the end of a trading session, you have to know when to stop. It doesn’t matter what kind of schedule you make as long as you STICK TO IT. Again, remember that trading is also a profession. Sure, it requires passion, effort, and a lot of work. But in order to keep you on top of your game, you also have to take a break from it every now and then to rest, recharge and get your focus back.
#!/usr/bin/env python # -*- coding: utf-8 -*- """ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import re import sys import pymysql as sql import codecs import requests import base64 import random import json # ======================================================================== # IMPORTANT !!! # Devices that are not based on device template are not going to be migrated # * TemplateID (openDCIM) == Hardware Model (Device42) # Racks without height, are not going to be migrated # ======================================================================== # ====== MySQL Source (openDCIM) ====== # DB_IP = '' DB_PORT = '' DB_NAME = '' DB_USER = '' DB_PWD = '' # ====== Log settings ==================== # LOGFILE = 'migration.log' DEBUG = True # ====== Device42 upload settings ========= # D42_USER = '' D42_PWD = '' D42_URL = 'https://' DRY_RUN = False def is_valid_ip(ip): """Validates IP addresses. """ return is_valid_ipv4(ip) or is_valid_ipv6(ip) def is_valid_ipv4(ip): """Validates IPv4 addresses. """ pattern = re.compile(r""" ^ (?: # Dotted variants: (?: # Decimal 1-255 (no leading 0's) [3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2} | 0x0*[0-9a-f]{1,2} # Hexadecimal 0x0 - 0xFF (possible leading 0's) | 0+[1-3]?[0-7]{0,2} # Octal 0 - 0377 (possible leading 0's) ) (?: # Repeat 0-3 times, separated by a dot \. (?: [3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2} | 0x0*[0-9a-f]{1,2} | 0+[1-3]?[0-7]{0,2} ) ){0,3} | 0x0*[0-9a-f]{1,8} # Hexadecimal notation, 0x0 - 0xffffffff | 0+[0-3]?[0-7]{0,10} # Octal notation, 0 - 037777777777 | # Decimal notation, 1-4294967295: 429496729[0-5]|42949672[0-8]\d|4294967[01]\d\d|429496[0-6]\d{3}| 42949[0-5]\d{4}|4294[0-8]\d{5}|429[0-3]\d{6}|42[0-8]\d{7}| 4[01]\d{8}|[1-3]\d{0,9}|[4-9]\d{0,8} ) $ """, re.VERBOSE | re.IGNORECASE) return pattern.match(ip) is not None def is_valid_ipv6(ip): """Validates IPv6 addresses. """ pattern = re.compile(r""" ^ \s* # Leading whitespace (?!.*::.*::) # Only a single whildcard allowed (?:(?!:)|:(?=:)) # Colon iff it would be part of a wildcard (?: # Repeat 6 times: [0-9a-f]{0,4} # A group of at most four hexadecimal digits (?:(?<=::)|(?<!::):) # Colon unless preceeded by wildcard ){6} # (?: # Either [0-9a-f]{0,4} # Another group (?:(?<=::)|(?<!::):) # Colon unless preceeded by wildcard [0-9a-f]{0,4} # Last group (?: (?<=::) # Colon iff preceeded by exacly one colon | (?<!:) # | (?<=:) (?<!::) : # ) # OR | # A v4 address with NO leading zeros (?:25[0-4]|2[0-4]\d|1\d\d|[1-9]?\d) (?: \. (?:25[0-4]|2[0-4]\d|1\d\d|[1-9]?\d) ){3} ) \s* # Trailing whitespace $ """, re.VERBOSE | re.IGNORECASE | re.DOTALL) return pattern.match(ip) is not None class Logger(): def __init__(self, logfile): self.logfile = LOGFILE def writer(self, msg): if LOGFILE and LOGFILE != '': with codecs.open(self.logfile, 'a', encoding = 'utf-8') as f: f.write(msg.strip()+'\r\n') # \r\n for notepad try: print msg except: print msg.encode('ascii', 'ignore') + ' # < non-ASCII chars detected! >' class REST(): def __init__(self): self.password = D42_PWD self.username = D42_USER self.base_url = D42_URL self.racks = json.loads(self.get_racks()) def uploader(self, data, url): payload = data headers = { 'Authorization': 'Basic ' + base64.b64encode(self.username + ':' + self.password), 'Content-Type': 'application/x-www-form-urlencoded' } r = requests.post(url, data=payload, headers=headers, verify=False) msg = 'Status code: %s' % str(r.status_code) logger.writer(msg) if DEBUG: msg = unicode(payload) logger.writer(msg) msg = str(r.text) logger.writer(msg) def fetcher(self, url): headers = { 'Authorization': 'Basic ' + base64.b64encode(self.username + ':' + self.password), 'Content-Type': 'application/x-www-form-urlencoded' } r = requests.get(url, headers=headers, verify=False) msg = 'Status code: %s' % str(r.status_code) logger.writer(msg) if DEBUG: msg = str(r.text) logger.writer(msg) return r.text def post_ip(self, data): if DRY_RUN == False: url = self.base_url+'/api/ip/' msg = '\r\nPosting IP data to %s ' % url logger.writer(msg) self.uploader(data, url) def post_device(self, data): if DRY_RUN == False: url = self.base_url+'/api/1.0/device/' msg = '\r\nPosting device data to %s ' % url logger.writer(msg) self.uploader(data, url) def post_location(self, data): if DRY_RUN == False: url = self.base_url+'/api/1.0/buildings/' msg = '\r\nPosting location data to %s ' % url logger.writer(msg) self.uploader(data, url) def post_room(self, data): if DRY_RUN == False: url = self.base_url+'/api/1.0/rooms/' msg = '\r\nPosting room data to %s ' % url logger.writer(msg) self.uploader(data, url) def post_rack(self, data): if DRY_RUN == False: url = self.base_url+'/api/1.0/racks/' msg = '\r\nPosting rack data to %s ' % url logger.writer(msg) self.uploader(data, url) def post_pdu(self, data): if DRY_RUN == False: url = self.base_url+'/api/1.0/pdus/' msg = '\r\nPosting PDU data to %s ' % url logger.writer(msg) self.uploader(data, url) def post_pdu_update(self, data): if DRY_RUN == False: url = self.base_url+'/api/1.0/pdus/rack/' msg = '\r\nUpdating PDU data to %s ' % url logger.writer(msg) self.uploader(data, url) def post_pdu_model(self, data): if DRY_RUN == False: url = self.base_url+'/api/1.0/pdu_models/' msg = '\r\nPosting PDU models from %s ' % url logger.writer(msg) self.uploader(data, url) def get_pdu_models(self): if DRY_RUN == False: url = self.base_url+'/api/1.0/pdu_models/' msg = '\r\nFetching PDU models from %s ' % url logger.writer(msg) self.fetcher(url) def get_racks(self): if DRY_RUN == False: url = self.base_url+'/api/1.0/racks/' msg = '\r\nFetching racks from %s ' % url logger.writer(msg) data = self.fetcher(url) return data def get_rack_by_name(self, name): for rack in self.racks['racks']: if rack['name'] == name: return rack return None def get_devices(self): if DRY_RUN == False: url = self.base_url+'/api/1.0/devices/' msg = '\r\nFetching devices from %s ' % url logger.writer(msg) data = self.fetcher(url) return data def get_buildings(self): if DRY_RUN == False: url = self.base_url+'/api/1.0/buildings/' msg = '\r\nFetching buildings from %s ' % url logger.writer(msg) data = self.fetcher(url) return data def get_rooms(self): if DRY_RUN == False: url = self.base_url+'/api/1.0/rooms/' msg = '\r\nFetching rooms from %s ' % url logger.writer(msg) data = self.fetcher(url) return data def post_hardware(self, data): if DRY_RUN == False: url = self.base_url+'/api/1.0/hardwares/' msg = '\r\nAdding hardware data to %s ' % url logger.writer(msg) self.uploader(data, url) def post_device2rack(self, data): if DRY_RUN == False: url = self.base_url+'/api/1.0/device/rack/' msg = '\r\nAdding device to rack at %s ' % url logger.writer(msg) self.uploader(data, url) class DB(): def __init__(self): self.con = None self.tables = [] self.datacenters_dcim = {} self.rooms_dcim = {} self.racks_dcim = {} self.manufacturers = {} def connect(self): self.con = sql.connect(host=DB_IP, port=int(DB_PORT), db=DB_NAME, user=DB_USER, passwd=DB_PWD) def get_ips(self): net = {} adrese = [] if not self.con: self.connect() with self.con: cur = self.con.cursor() q = "SELECT PrimaryIP FROM fac_Device" cur.execute(q) ips = cur.fetchall() for line in ips: if line[0] != '': ip = line[0] if is_valid_ip(ip): net.update({'ipaddress':ip}) rest.post_ip(net) with self.con: cur = self.con.cursor() q = "SELECT IPAddress FROM fac_PowerDistribution" cur.execute(q) ips = cur.fetchall() for line in ips: if line[0] != '': ip = line[0] if is_valid_ip(ip): net.update({'ipaddress':ip}) rest.post_ip(net) def get_locations(self): building = {} if not self.con: self.connect() with self.con: cur = self.con.cursor() q = 'SELECT DatacenterID,Name,DeliveryAddress,Administrator FROM fac_DataCenter' cur.execute(q) data = cur.fetchall() for row in data: #building.clear() id, name, address, contact = row building.update({'name':name}) building.update({'address':address}) building.update({'contact_name':contact}) self.datacenters_dcim.update({id:name}) rest.post_location(building) def get_rooms(self): rooms = {} # get building IDs from D42 building_map = {} buildings = json.loads(rest.get_buildings()) for building in buildings['buildings']: building_map.update({building['name']:building['building_id']}) if not self.con: self.connect() with self.con: cur = self.con.cursor() q = 'SELECT ZoneID,DataCenterID,Description FROM fac_Zone' cur.execute(q) data = cur.fetchall() for row in data: room_id = row[0] dc = row[1] name = row[2] dc = self.datacenters_dcim[dc] dc_id = building_map[dc] rooms.update({'name':name}) rooms.update({'building_id':dc_id}) self.rooms_dcim.update({room_id:name}) rest.post_room(rooms) def get_racks(self): # get room IDs from D42 room_map = {} rooms = json.loads(rest.get_rooms()) for room in rooms['rooms']: room_map.update({room['name']:room['room_id']}) if not self.con: self.connect() with self.con: cur = self.con.cursor() q = 'SELECT CabinetID,DatacenterID,Location,CabinetHeight,ZoneID FROM fac_Cabinet' cur.execute(q) data = cur.fetchall() for row in data: rack = {} cid, did, name, height, room = row dc = self.datacenters_dcim[did] if height != 0: if name == '': rnd = str(random.randrange(101,9999)) name = 'Unknown'+rnd if room > 0: room = self.rooms_dcim[room] room_id = room_map[room] rack.update({'room_id':room_id}) d42_rack = rest.get_rack_by_name(name) if d42_rack: rack.update({'rack_id':d42_rack['rack_id']}) rack.update({'name':name}) rack.update({'size':height}) rack.update({'building':did}) self.racks_dcim.update({cid:name}) rest.post_rack(rack) def get_datacenter_from_id(self, id): if not self.con: self.connect() with self.con: cur = self.con.cursor() q = 'SELECT Name FROM fac_DataCenter where DataCenterID = %d' % id cur.execute(q) data = cur.fetchone() return data def get_room_from_cabinet(self, cabinetID): if not self.con: self.connect() with self.con: cur = self.con.cursor() q = 'SELECT DatacenterID,Location,Model FROM fac_Cabinet where CabinetID = %d' % cabinetID cur.execute(q) data = cur.fetchone() id, room, model = data datacenter = self.get_datacenter_from_id(id)[0] return datacenter, room, model def get_vendor_and_model(self, id): self.get_manufacturers() hardware = {} if not self.con: self.connect() with self.con: cur = self.con.cursor() q = 'SELECT ManufacturerID, Model FROM fac_DeviceTemplate WHERE TemplateID=%d' % id cur.execute(q) data = cur.fetchone() try: id, model = data except TypeError: return None, None vendor = self.manufacturers[id] return vendor, model def get_devices(self): device = {} device2rack = {} hardware = {} if not self.con: self.connect() with self.con: cur = self.con.cursor() q = 'SELECT Label, SerialNo, AssetTag, PrimaryIP, Cabinet,Position,Height,DeviceType,HalfDepth,BackSide, TemplateID FROM fac_Device' cur.execute(q) data = cur.fetchall() for row in data: name, serial_no, comment, ip, rackid, position, size, devicetype, halfdepth, backside, tid = row datacenter, room, rack_name = self.get_room_from_cabinet(rackid) vendor, model = self.get_vendor_and_model(tid) # post device device.update({'name':name}) device.update({'manufacturer':vendor}) device.update({'hardware':model}) device.update({'notes':comment}) if devicetype.lower() == 'cdu': device.update({'pdu_model':model}) rest.post_pdu(device) else: device.update({'serial_no':serial_no}) if devicetype.lower() == 'switch': device.update({'is_it_switch':'yes'}) rest.post_device(device) if rackid: #post device 2 rack device2rack.update({'device':name}) device2rack.update({'size':size}) #device2rack.update({'building':datacenter}) #device2rack.update({'room':room}) device2rack.update({'rack': self.racks_dcim[rackid]}) device2rack.update({'start_at':position-1}) if backside == '1': device2rack.update({'orientation':'back'}) rest.post_device2rack(device2rack) def get_manufacturers(self): if not self.con: self.connect() with self.con: cur = self.con.cursor() q = 'SELECT ManufacturerID, Name from fac_Manufacturer' cur.execute(q) data = cur.fetchall() for row in data: id, vendor = row self.manufacturers.update({id:vendor}) def get_depth(self, id): if not self.con: self.connect() with self.con: cur = self.con.cursor() q = 'SELECT HalfDepth FROM fac_Device WHERE TemplateID=%d' % id cur.execute(q) data = cur.fetchone() d = data[0] if d == 0: return 1 elif d ==1: return 2 def get_hardware(self): self.get_manufacturers() hardware = {} if not self.con: self.connect() with self.con: cur = self.con.cursor() q = 'SELECT TemplateID, ManufacturerID, Model, Height, Wattage, DeviceType, FrontPictureFile, RearPictureFile FROM fac_DeviceTemplate' cur.execute(q) data = cur.fetchall() for row in data: TemplateID, ManufacturerID, Model, Height, Wattage, DeviceType, FrontPictureFile, RearPictureFile = row try: depth = self.get_depth(TemplateID) except TypeError: continue vendor = self.manufacturers[ManufacturerID] hardware.update({'name':Model}) hardware.update({'size':Height}) hardware.update({'depth':depth}) hardware.update({'manufacturer':vendor}) hardware.update({'watts':Wattage}) if DeviceType.lower() == 'cdu': rest.post_pdu_model(hardware) else: hardware.update({'type':1}) ''' # to do if FrontPictureFile: hardware.update({'front_image':FrontPictureFile}) if RearPictureFile: hardware.update({'back_image':RearPictureFile}) ''' rest.post_hardware(hardware) def main(): db = DB() db.get_ips() db.get_locations() db.get_rooms() db.get_racks() db.get_hardware() db.get_devices() if __name__ == '__main__': logger = Logger(LOGFILE) rest = REST() main() print '\n[!] Done!' sys.exit()
In a rapidly shrinking world where technology changes at the blink of an eye and new socio-technological advances come hurtling at lightning speed, it is both imperative and logical to innovate. Innovations need change, a collaborative approach with ideas that can integrate and implement various possibilities into your lines of business in order to remain competitive, increase margins and expand operations. Viral Technologies Pvt. Ltd. helps you make this change possible. A change from the doubting to the possible, transformation of dreams to reality and a journey from the imaginary to its form. The Viral team is a young, dynamic and enthusiastic group of people with core focus in system design and mainly engaged in the development of intuitive and high quality, scalable solutions for its clients. Viral Technologies primarily works with clients who, due to the sheer pace of their growth and diverse range of automation needs, require specialized information technology support that is either not available internally or that does not justify permanent increase in personnel for time-specific project requirements. To these clients, our highly qualified, professionally trained and experienced team of Mobile Engineers and Project Consultants deliver consistent, high-quality and timely services on a continual and progressive basis. Through its pursuit to continually deliver timely, cost-effective, and quality services, the company is looking forward to be able to develop a long-term Client relationship that proves to be a mutually beneficial for both the involved parties. We at Viral believe in the philosophy that these relationships will form the basis through which the company continues to pursue new clients and projects. Our efforts are progressively focused on building a client-base of respected names, resulting in dynamic and consistent growth for the company, its people, as well as its business partners.
import os import re from app import db, config, socketio, app from app.library.formatters import formatted_file_data from app.models.file import File from app.models.package import Package from app.modules.mod_process.file_repository import FileRepository from app.modules.mod_process.status_map import StatusMap class ProcessRepository: # this dict holds all the currently active processes as id-instance pairs # example: {1: <...>, 2: <...>, ...} processes = {} # this controls whether or not the encoding processing is active # notice: do not modify this directly, but use set_encoding_active() encoding_active = False @staticmethod def set_encoding_active(new_state): """ change the state of whether encoding should be active or not to a new state :param new_state: should the encoding be active now """ ProcessRepository.encoding_active = new_state # notify client socketio.emit("active_changed", {"active": new_state}) # check if it's necessary to start new processes ProcessRepository.check_and_start_processes() @staticmethod def cancel_all_processes(): """ cancel all currently running Processes """ # iterate over a copy of processes because cancel_process modifies the dictionary # while we are iterating over it for file_id in ProcessRepository.processes.copy(): ProcessRepository.cancel_process(file_id) @staticmethod def is_running(file_id): return file_id in ProcessRepository.processes @staticmethod def cancel_process(file_id): """ cancel a specific Process :param file_id: the id of the file corresponding to the Process """ # stop thread ProcessRepository.processes[file_id].stop() # update status file = File.query.filter_by(id=file_id).first() file.status = StatusMap.failed.value file.clear() db.session.commit() # emit file_done event socketio.emit("file_done", {"data": formatted_file_data(file)}) # remove from processes dict ProcessRepository.processes.pop(file_id) @staticmethod def check_and_start_processes(): """ check if it's required to start new Processes and do so if needed """ while ProcessRepository.encoding_active: # grab next potential file to process file = FileRepository.get_queued_query().order_by(Package.position.asc(), File.position.asc()).first() if file is None or ProcessRepository.count_processes_active() >= config["general"].getint( "parallel_processes"): break # update file.status in DB file.status = StatusMap.processing.value db.session.commit() # start the Process from app.modules.mod_process.process import Process process = Process(file) process.daemon = True # todo debug # file.status = 0 # db.session.commit() # ProcessRepository.encoding_active = False # add to "processes" dict ProcessRepository.processes[file.id] = process process.start() # emit file_started event data = formatted_file_data(file) data["count_active"] = ProcessRepository.count_processes_active() data["count_queued"] = ProcessRepository.count_processes_queued() socketio.emit("file_started", {"data": data}) @staticmethod def count_processes_active(): """ :return: the amount of processes currently active """ return len(ProcessRepository.processes) @staticmethod def count_processes_queued(): """ :return: the amount of Files currently queued """ return FileRepository.get_queued_query().count() @staticmethod def count_processes_total(): """ :return: count of all Files that are in packages that are queued """ # return ProcessRepository.count_processes_active() + ProcessRepository.count_processes_queued() return Package.query.filter_by(queue=True).join(File).count() # TODO @staticmethod def file_done(file): """ will be called whenever a Process is finished :param file: the File object of the File that is done """ # delete from "processes" ProcessRepository.processes.pop(file.id) # remove original file from disk if desired if config.getboolean("encoding", "delete_old_file"): os.remove(file.filename) # rename file if desired if config.getboolean("encoding", "rename_enabled"): rename_search = config.get("encoding", "rename_search") rename_replace = config.get("encoding", "rename_replace") # get pathinfo pathinfo = os.path.split(file.filename) path = pathinfo[0] old_filename = pathinfo[1] # only rename if match occurs if re.match(rename_search, old_filename): new_filename = re.sub(rename_search, rename_replace, old_filename) # rename output_filename (created by ffmpeg, see process.py) to new_filename os.rename(path + os.sep + file.output_filename, path + os.sep + new_filename) # update status to "finished" db.session.query(File).filter_by(id=file.id).update(dict(status=StatusMap.finished.value)) db.session.commit() # check if it's necessary to start new processes ProcessRepository.check_and_start_processes() # notify client socketio.emit("file_done", { "data": { "id": file.id, "count_active": ProcessRepository.count_processes_active(), "count_queued": ProcessRepository.count_processes_queued(), "count_total": ProcessRepository.count_processes_total(), } }) app.logger.debug("Done with encoding of %s" % file.filename) @staticmethod def file_failed(file): """ will be called whenever a File fails :param file: the File object of the File that has failed """ # delete from "processes" ProcessRepository.processes.pop(file.id) # update status and set attributes to zero file = db.session.query(File).filter_by(id=file.id).first() file.status = StatusMap.failed.value file.clear() db.session.commit() # check if it's necessary to start new processes ProcessRepository.check_and_start_processes() # notify client socketio.emit("file_done", { "data": { "id": file.id, "count_active": ProcessRepository.count_processes_active(), "count_queued": ProcessRepository.count_processes_queued(), "count_total": ProcessRepository.count_processes_total(), } }) @staticmethod def file_progress(file): """ will be called whenever a file makes progress :param file: the File object of the File that has made progress """ # format data info = formatted_file_data(file) socketio.emit("file_progress", {"data": info})
If your idea of a new home is one that you can build to your own specifications with every convenience to your liking, new construction homes in Jacksonville are a great place to end your search. You can choose from a variety of different home models, amenities, square footage, and more in many available communities throughout the Jacksonville, Florida area. Several communities are currently offering new construction homes for sale in Jacksonville, such as Nocatee, Oakleaf Plantation and World Golf Village. From approximately $100,000 and up, you can find the new construction home you have been waiting for in Jacksonville, Florida. Imagine picking out all of the details for your new home from the number of bedrooms right down to the color of countertops and type of flooring - you have complete control. It is a great feeling to be the first owner of any new construction home, townhome or condo! Jacksonville is a thriving community and an economic staple for the state of Florida. The growth and prosperity of the city continue year after year and not surprisingly, property values continue to rise as well, making the purchase of a new construction home a good investment. These new construction homes, where you will find single-family dwellings, condos, and townhomes, remain in high demand throughout the city of Jacksonville. The data from the year 2005 show the median tax bill for real estate owners in Jacksonville was at $1,302. In 2006, median home values were recorded at $180,400, which makes the area also desirable for its affordability. Being such a large city, Jacksonville, Florida has over 160 schools for your child or children to attend. Several of these schools have been recognized by the state of Florida as Top Performing Schools. You will also find that the city of Jacksonville has two separate high schools with International Baccalaureate Diploma Programs. These are The Stanton College Preparatory School and The Paxon School for Advanced Studies. Impressively, there are also magnet schools offering programs in the Performing Arts for children who are interested in learning dance, theater, or music. One of the best things about living in Jacksonville is the excitement of the city life. There are countless places to shop, dine, dance, drink, explore and play. There are activities for families and people of all ages. There are beaches, golf, concerts and events that are continuously happening in the area. If you choose to purchase a new construction home in any part of Jacksonville, there will never be a lack of fun or things to do. Especially in Downtown Jacksonville, you will find museums, day cruises, art galleries, and a variety of other activities to choose from in your spare time. The official government website for Jacksonville, Florida shows six major sections of the city which are: Greater Arlington, North Jacksonville, Northwest Jacksonville, Southeast Jacksonville, Southwest Jacksonville, and Downtown Jacksonville (also known as the Urban Core). Recorded in the year 2005, there were 782,623 people reportedly living in Jacksonville with a total of 284,499 households. These numbers continue to increase year after year, keeping Jacksonville, Florida as the most twelfth most populous city in the United States. The demographics in Jacksonville show a fairly even mixture of different types of people. Statistics show about 64% of the population is Caucasian, about 34% of the city is African American, and about 2% is made up of other races. If you are truly in the market for new construction homes, a city like Jacksonville, Florida with continuous economic growth is the place to look. Even when the real estate market has hit a plateau, home values continue to rise in the Jacksonville area. The city offers great school and education for your children, countless employment opportunities, and all of the excitement and benefits of living in a major metropolitan area. No matter what your lifestyle is like, there is a new construction home for you in Jacksonville that will meet with your needs.
# bbref.py import datetime import logging import re from string import ascii_lowercase from bs4 import BeautifulSoup from dateutil.parser import * from nba.scraper import BasketballScraper from nba.dates import datetostr from nba.names import fuzzy_match from nba.pipelines.bbref import * from nba.player.nbacom import * class Scraper(BasketballScraper): ''' ''' def __init__(self, headers=None, cookies=None, cache_name=None): ''' Args: headers: cookies: cache_name: ''' logging.getLogger(__name__).addHandler(logging.NullHandler()) BasketballScraper.__init__(self, headers=headers, cookies=cookies, cache_name=cache_name) def players(self, initial): ''' Returns: content: dict with keys of alphabet ''' base_url = 'http://www.basketball-reference.com/players/{}/' return self.get(base_url.format(initial.lower())) def player_page(self, pid): ''' Gets page for individual player Args: pid(str): 'smithje01' Returns: str: HTML of page ''' base_url = 'http://www.basketball-reference.com/players/{}/{}.html' return self.get(base_url.format(pid[0].lower(), pid)) class Parser(): ''' ''' def __init__(self,**kwargs): ''' ''' logging.getLogger(__name__).addHandler(logging.NullHandler()) def players(self, content): ''' Parses page of players with same last initial (A, B, C, etc.) Args: content: HTML string Returns: list of dict ''' results = [] soup = BeautifulSoup(content, 'lxml') for row in soup.find('table', {'id': 'players'}).find('tbody').find_all('tr'): player = dict([(td['data-stat'], td.text) for td in row.find_all('td')]) player['source_player_id'] = row.find('th').get('data-append-csv') player['source_player_name'] = row.find('th').find('a').text th = row.find('th') if th.find('strong'): player['active'] = True else: player['active'] = False if player.get('pos'): player['source_player_position'] = player['pos'] player.pop('pos', None) results.append(player) return results def player_page(self, content, pid): ''' Parses player page Args: content: HTML string Returns: dict: source, source_player_id, source_player_name, source_player_position, source_player_dob, source_player_team_code, source_player_team_name ''' player = {'source': 'bref', 'source_player_id': pid} soup = BeautifulSoup(content, 'lxml') #source_player_name h1 = soup.find('h1', {'itemprop': 'name'}) if h1: player['source_player_name'] = h1.text #source_player_position positions = ['Shooting Guard', 'Power Forward and Small Forward', 'Small Forward', 'Center', 'Point Guard', 'Center and Power Forward', 'Power Forward and Center', 'Shooting Guard and Small Forward', 'Power Forward', 'Small Forward and Shooting Guard', 'Point Guard and Shooting Guard', 'Guard', 'Forward'] div = soup.find('div', {'itemtype': 'https://schema.org/Person'}) for p in div.find_all('p'): if 'Position:' in p.text: for line in [l.strip() for l in p.text.split('\n')]: if line in positions: player['source_player_position'] = line elif 'Team:' in p.text: a = p.find('a') if a: player['source_player_team_code'] = a['href'].split('/')[2] player['source_player_team_name'] = a.text # source_player_dob bd = soup.find('span', {'id': 'necro-birth'}) if bd: player['source_player_dob'] = bd.attrs.get('data-birth') return player class Agent(object): ''' Performs script-like tasks using NBA.com API ''' def __init__(self, db=None, cache_name='bbref-agent', cookies=None, table_names=None): ''' Args: cache_name (str): for scraper cache_name cookies: cookie jar db (NBAPostgres): instance table_names (dict): Database table names ''' logging.getLogger(__name__).addHandler(logging.NullHandler()) self.scraper = BBRefScraper(cache_name=cache_name, cookies=cookies) self.parser = BBRefParser() self.db = db self.bbref_players = {} def match_gamelog_player(self, gamelog_player): ''' Matches player from nbacom_gamelog with bbref player Args: gamelog_player (dict): Returns: dict ''' # gamelog player # {'PLAYER_ID': 2544, 'PLAYER_NAME': 'LeBron James', # 'TEAM_ABBREVIATION': 'CLE', 'TEAM_NAME': 'Cleveland Cavaliers'} # # bbref player # {'source': 'bref', source_player_dob': '1992-03-23', 'source_player_id': 'irvinky01', # 'source_player_name': 'Kyrie Irving', 'source_player_position': 'Point Guard', # 'source_player_team_code': 'BOS', 'source_player_team_name': 'Boston Celtics'} # bbref_players caches pages for each letter # helpful if doing more than a few players fn, ln = gamelog_player['PLAYER_NAME'].split() letter = ln[0].lower() if not self.bbref_players.get(letter): content = self.scraper.players(letter) self.bbref_players[letter] = self.parser.players(content) # step one: find all players with the same name matches = [p for p in self.bbref_players.get(letter) if p['source_player_name'] == gamelog_player['PLAYER_NAME']] # if no matches, then look for individual player page on bbref # newer players may not have been added to the letter index page ('a', 'b', 'c') if not matches: pid = bbref_player_id(fn, ln) logging.info('trying player page for {}'.format(pid)) content = self.scraper.player_page(pid) bbref_player = self.parser.player_page(content, pid) if bbref_player: return bbref_player # if there is only 1 match, then assume it is the right player # need to get the player page, which has the full position elif matches and len(matches) == 1: logging.info('add_gamelog_player: found 1 match') pid = matches[0].get('source_player_id') content = self.scraper.player_page(pid) bbref_player = self.parser.player_page(content, pid) if bbref_player: return bbref_player # if more than 1 match, then try to match team as well # very unlikely to have duplicate elif matches and len(matches) > 1: logging.info('add_gamelog_player: found >1 match') for match in matches: pn = gamelog_player['PLAYER_NAME'] pt = gamelog_player['TEAM_NAME'] for match in matches: bbrefn = match['source_player_name'] bbreft = match['source_player_team_name'] if (pn == bbrefn and pt == bbreft): pid = match.get('source_player_id') content = self.scraper.player_page(pid) bbref_player = self.parser.player_page(content, pid) if bbref_player: return bbref_player else: logging.info('no match for {}'.format(gamelog_player['PLAYER_NAME'])) return None def match_nbacom_player(self, nbacom_player): ''' Matches nbacom player (player v2015 resource) with bbref player Args: nbacom_player (dict): Returns: dict ''' # nbacom player # {'birthdate': datetime.datetime(1993, 8, 1, 0, 0), 'country': 'Spain', # 'display_first_last': 'Alex Abrines', 'draft_number': 32, 'draft_round': 2, 'draft_year': 2013, # 'first_name': 'Alex', 'from_year': 2016, 'height': 42, 'jersey': 8, # 'last_affiliation': 'Spain/Spain', 'last_name': 'Abrines', 'nbacom_player_id': 203518, # 'nbacom_position': 'G', 'school': '', 'weight': 190} # # bbref player # {'source': 'bref', source_player_dob': '1992-03-23', 'source_player_id': 'irvinky01', # 'source_player_name': 'Kyrie Irving', 'source_player_position': 'Point Guard', # 'source_player_team_code': 'BOS', 'source_player_team_name': 'Boston Celtics'} # bbref_players caches pages for each letter # helpful if doing more than a few players letter = nbacom_player['last_name'][0].lower() if not self.bbref_players.get(letter): content = self.scraper.players(letter) self.bbref_players[letter] = self.parser.players(content) # step one: find all players with the same name matches = [p for p in self.bbref_players.get(letter) if p['source_player_name'] == nbacom_player.get('display_first_last')] # if no matches, then look for individual player page on bbref # newer players may not have been added to the letter index page ('a', 'b', 'c') if not matches: pid = bbref_player_id(nbacom_player['first_name'], nbacom_player['last_name']) logging.info('trying player page for {}'.format(pid)) content = self.scraper.player_page(pid) bbref_player = self.parser.player_page(content, pid) if bbref_player: return bbref_player # if there is only 1 match, then assume it is the right player # need to get the player page, which has the full position elif matches and len(matches) == 1: logging.info('add_gamelog_player: found 1 match') pid = matches[0].get('source_player_id') content = self.scraper.player_page(pid) bbref_player = self.parser.player_page(content, pid) if bbref_player: return bbref_player # if more than 1 match, then try to match team as well # very unlikely to have duplicate elif matches and len(matches) > 1: logging.info('add_gamelog_player: found >1 match') for match in matches: dob = match['source_player_dob'] if dob == datetostr(nbacom_player.get('birthdate'), fmt='nba'): return match else: logging.info('no match for {}'.format(nbacom_player['display_first_last'])) return None def update_player_xref(self): ''' Updates player_xref table with bbref players Args: None Returns: None ''' nbacom_players_d = nbacom_xref(self.db) nbacom_players_d2 = nbacom_xref(self.db, with_pos=True) wanted = ['source', 'source_player_id', 'source_player_name', 'source_player_position'] # loop through each 'letter' page of players for letter in ascii_lowercase: if letter == 'x': continue logging.info('starting {}'.format(letter)) content = self.scraper.players(letter) for p in self.parser.players(content): # try direct name match first # if no match, then use fuzzy matching # if 1 match, then add to database # if more then 1 result, then consider positions as well match = nbacom_players_d.get(p['source_player_name'].lower()) if not match: # try fuzzy matching # TODO: implement fuzzy match if p.get('active'): logging.error('could not match {}'.format(p)) elif len(match) == 1: toins = {k: v for k, v in p.items() if k in wanted} toins['source'] = 'bbref' toins['nbacom_player_id'] = match[0]['nbacom_player_id'] toins['source_player_dob'] = datetime.datetime.strftime(parse(p['birth_date']), '%Y-%m-%d') self.db.insert_dict(toins, 'extra_misc.player_xref') else: key = '{}_{}'.format(p['source_player_name'], p['source_player_position']).lower() match2 = nbacom_players_d2.get(key) if not match2: if p.get('active'): match3 = fuzzy_match(key, list(nbacom_players_d2.keys())) if match3: try: toins = {k: v for k, v in p.items() if k in wanted} toins['source'] = 'bbref' toins['nbacom_player_id'] = nbacom_players_d2.get(match3).get('nbacom_player_id') toins['source_player_dob'] = datetime.datetime.strftime(parse(p['birth_date']), '%Y-%m-%d') self.db.insert_dict(toins, 'extra_misc.player_xref') except: logging.error('could not match {}'.format(p)) else: logging.error('could not match {}'.format(p)) elif match2 and len(match2) == 1: toins = {k: v for k, v in p.items() if k in wanted} toins['source'] = 'bbref' toins['nbacom_player_id'] = match2[0]['nbacom_player_id'] toins['source_player_dob'] = datetime.datetime.strftime(parse(p['birth_date']), '%Y-%m-%d') self.db.insert_dict(toins, 'extra_misc.player_xref') else: if p.get('active'): logging.error('could not match {}'.format(p)) ''' TODO: can match DOB for multiple players more accurate than fuzzy match wanted = ['source_player_id', 'source_player_position', 'source_player_name'] for m in tomatch: dob = parse(m.get('birth_date')).date() nbap = nbadb2.select_scalar(q.format(m['source_player_name'].split()[-1] , dob)) if nbap: toins = {k:v for k,v in m.items() if k in wanted} toins['source'] = 'bbref' toins['nbacom_player_id'] = nbap toins['source_player_dob'] = m['birth_date'] nbadb2._insert_dict(toins, 'extra_misc.player_xref') ''' if __name__ == '__main__': pass
Found 21 results for type Category: "Racing" Page 1 of 2. Showing 12 out of total 21. Starting at 1 and ending on 12.
from scrapy.exceptions import NotConfigured from scrapy.utils.request import request_httprepr from scrapy.utils.response import response_httprepr from scrapy.stats import stats from scrapy.conf import settings class DownloaderStats(object): def __init__(self): if not settings.getbool('DOWNLOADER_STATS'): raise NotConfigured def process_request(self, request, spider): stats.inc_value('downloader/request_count', spider=spider) stats.inc_value('downloader/request_method_count/%s' % request.method, spider=spider) reqlen = len(request_httprepr(request)) stats.inc_value('downloader/request_bytes', reqlen, spider=spider) def process_response(self, request, response, spider): stats.inc_value('downloader/response_count', spider=spider) stats.inc_value('downloader/response_status_count/%s' % response.status, spider=spider) reslen = len(response_httprepr(response)) stats.inc_value('downloader/response_bytes', reslen, spider=spider) return response def process_exception(self, request, exception, spider): ex_class = "%s.%s" % (exception.__class__.__module__, exception.__class__.__name__) stats.inc_value('downloader/exception_count', spider=spider) stats.inc_value('downloader/exception_type_count/%s' % ex_class, spider=spider)
Used ignition switch, attaches to the bottom of the barrel, Mazda MX5 & Eunos Roadster mk1. UM00596. Used ignition switch, attaches to the bottom of the barrel, Mazda MX5 & Eunos Roadster mk1.
from data_types.organization import Organization from data_types.repository import Repository from data_types.user import User from .base import EventBase class EventRepository(EventBase): def __init__(self, sdk): super(EventRepository, self).__init__(sdk) self.hook = None self.repository = None self.sender = None """ RepositoryEvent Triggered when someone creates a new repository in your organization. https://docs.github.com/en/developers/webhooks-and-events/webhook-events-and-payloads#repository """ async def process(self, payload, chat): """ Processes Repository event :param payload: JSON object with payload :param chat: current chat object :return: """ self.sdk.log("Repository event payload taken {}".format(payload)) try: self.repository = Repository(payload['repository']) self.organization = Organization(payload['organization']) self.sender = User(payload['sender']) except Exception as e: self.sdk.log('Cannot process RepositoryEvent payload because of {}'.format(e)) await self.send( chat['chat'], '🦍 <a href=\"{}\">{}</a> created a repository <a href=\"{}\">{}</a> in the {} organization'.format( self.sender.html_url, self.sender.login, self.repository.html_url, self.repository.full_name, self.organization.login), 'HTML' )
I have something for fabric scraps and folk art! I really do! I have it for paper scraps as well, but lately I have been wanting to work with fabric and stitching I have so many bags of fabric scraps that I have used for lots of projects over the years and decided to come up with this latest number (top picture) all from what I already had. I just love to work with "throw-a-ways" and make it come together to be something new. I have been really inspired by the art of Aminah Brenda Lynn Robinson's book "Symphonic Poem" which a good friend gave to me many years ago after an exhibit we saw of hers in Columbus. Ohio. She is an Ohio artist and her beautiful fabric artworks called "RagGonOns" are a spectacle. I also wanted to imitate one of my favorite pieces of art that I own that hangs where I see it everyday. It is a beautiful day today and I am eager to get working in the barn and outside! I am so excited about the barn sale coming up and feel like I have such cool fresh pieces I just cant wait!
#!/usr/bin/python # -*- coding: UTF-8 -*- import requests import sys import datetime import optparse import urllib2 from BeautifulSoup import BeautifulSoup import textwrap options = optparse.OptionParser(usage='%prog -r <Realm> -c <Character Name> --cs <options>', description='WoW API functions (https://github.com/blizzard/api-wow-docs) - OneSockThief') #Functions options_functions = optparse.OptionGroup(options, 'Supported functions') options_functions.add_option('--cs', '--charactersearch', action="store_true", dest='charactersearch', help='Character search / Information') options_functions.add_option('--ah', '--auctionsearch', action="store_true", dest='auctionsearch', help='Auction house search') options.add_option_group(options_functions) #Required parameters for other functions options.add_option('-r', '--realm', type='string', dest='realm', help='Realm to search/filter by') options.add_option("-c", "--character", type='string', dest="character", help="Search for a character by name") #Smaller functions, for character details character_group = optparse.OptionGroup(options, 'Detailed character information to use with Character Search (--cs)') character_group.add_option("--guild", action="store_true", dest="guild", help="Guild information") character_group.add_option("--items", action="store_true", dest="items", help="Current equipped items") character_group.add_option("--mounts", action="store_true", dest="mounts", help="Current mounts collected") character_group.add_option("--pvp", action="store_true", help="PvP stats") character_group.add_option("--quests", action="store_true", dest="quests", help="Current active quests") character_group.add_option("--reputation", action="store_true", dest="reputation", help="Current reputation level of appropriate factions") character_group.add_option("--stats", action="store_true", dest="stats", help="Currect character stats #pewpew") character_group.add_option("--talents", action="store_true", dest="talents", help="Current talent progres") character_group.add_option("--audit", action="store_true", dest="audit", help="Audit the character") options.add_option_group(character_group) base_url = "http://eu.battle.net/api/wow" def check_sub_character_options(): requests = [] if opts.guild: requests.append('guild') if opts.items: requests.append('items') if opts.mounts: requests.append('mounts') if opts.pvp: requests.append('pvp') if opts.quests: requests.append('quests') if opts.reputation: requests.append('reputation') if opts.stats: requests.append('stats') if opts.talents: requests.append('talents') if opts.audit: requests.append('audit') return requests def query_api(url): try: s = requests.get(url).json() pass except Exception: #raise e pass try: if s["reason"]: print "ERROR: " + s["reason"] sys.stop() except: return s def auction_house(realm): print "THIS SECTION IS BROKEN, EU AUCTION HOUSE IS MIA!" return url = base_url + "/api/wow/auction/data/" + realm s = query_api(url) print s def character_male_female(n): if n == 0: return "Male" if n == 1: return "Female" def character_class(n): url = base_url + "/data/character/classes" classes = query_api(url) for cclass in classes['classes']: if cclass['id'] == n: return cclass['name'] def character_race(n): url = base_url + "/data/character/races" races = query_api(url) for craces in races['races']: if craces['id'] == n: return craces['name'] def character_search(name, realm): fields = check_sub_character_options() url = base_url + "/character" print "\nCharacter search for " + name + " on " + realm + "\n" url = url + "/" + realm.title() url = url + "/" + name.title() #See if any sub fields are queried if fields: fields = ",".join(fields) url = url + "?fields=" + fields #Try and request the data from the API s = query_api(url) parse_char_info(s) #PRINT EXTRA INFO AT THE BOTTOM: if opts.guild: character_guild(s) if opts.items: character_items(s) if opts.mounts: character_mounts(s) if opts.pvp: character_pvp(s) if opts.quests: character_quests(s) if opts.reputation: character_reputation(s) if opts.stats: character_stats(s) if opts.talents: character_talents(s) if opts.audit: character_audit(s) def parse_char_info(char_api_data): print "Realm: " + str(char_api_data["realm"]) print "Name: " + str(char_api_data["name"]) print "Level: " + str(char_api_data["level"]) print "Class: " + character_class(char_api_data["class"]) print "Race: " + character_race(char_api_data["race"]) print "Calc Class: " + str(char_api_data["calcClass"]) print "Gender: " + character_male_female(char_api_data["gender"]) print "Achievement Points: " + str(char_api_data["achievementPoints"]) print "Total Honorable Kills: " + str(char_api_data["totalHonorableKills"]) print "Battlegroup: " + str(char_api_data["battlegroup"]) print "Last Modified: " + str(datetime.datetime.fromtimestamp(char_api_data["lastModified"]/1000).strftime('%Y-%m-%d %H:%M:%S')) print "Thumbnail: http://eu.battle.net/static-render/eu/" + str(char_api_data["thumbnail"]) def character_reputation(s): print "\n\tReputation:" names = [] for long_names in s["reputation"]: if (long_names["value"] != 0) and (long_names["standing"] > 0): names.append(long_names["name"]) longest = len(max(names, key=len)) for reps in s["reputation"]: minimum = str(reps["value"]) for x in xrange(3,5): if len(minimum) < x: minimum = minimum + " " if (reps["value"] != 0) and (reps["standing"] > 0): bar_length = 25 bar = reps["name"] calc = round(float(reps["value"]) / float(reps["max"]) * bar_length) empty = bar_length - calc #This is the length of the BAR calculate_empty_spaces = longest - len(bar) line = u'█' bar = bar + " "*calculate_empty_spaces + " (lvl:" + str(reps["standing"]) + ") " + minimum + " |" + line*int(calc) + " "*int(empty) + "| " + str(reps["max"]) print "\t" + bar def character_guild(s): print "\n\tGuild:" guild_info = s["guild"] print "\tName: " + guild_info["name"] print "\tTotal Achievement Points: " + str(guild_info["achievementPoints"]) print "\tTotal Members: " + str(guild_info["members"]) def character_pvp(s): twovtwo = s["pvp"]["brackets"]["ARENA_BRACKET_2v2"] threevthree = s["pvp"]["brackets"]["ARENA_BRACKET_3v3"] fivevfive = s["pvp"]["brackets"]["ARENA_BRACKET_5v5"] RBG = s["pvp"]["brackets"]["ARENA_BRACKET_RBG"] print "\n\tPvP Ratings:" print "\t2v2:" print "\t\tRating: " + str(twovtwo["rating"]) print "\t\tSeason Won: " + str(twovtwo["seasonWon"]) print "\t\tSeason Played: " + str(twovtwo["seasonPlayed"]) print "\t\tWeekly Won: " + str(twovtwo["weeklyWon"]) print "\t\tWeekly Played " + str(twovtwo["weeklyPlayed"]) print "\t3v3: " print "\t\tRating: " + str(threevthree["rating"]) print "\t\tSeason Won: " + str(threevthree["seasonWon"]) print "\t\tSeason Played: " + str(threevthree["seasonPlayed"]) print "\t\tWeekly Won: " + str(threevthree["weeklyWon"]) print "\t\tWeekly Played " + str(threevthree["weeklyPlayed"]) print "\t5v5: " print "\t\tRating: " + str(fivevfive["rating"]) print "\t\tSeason Won: " + str(fivevfive["seasonWon"]) print "\t\tSeason Played: " + str(fivevfive["seasonPlayed"]) print "\t\tWeekly Won: " + str(fivevfive["weeklyWon"]) print "\t\tWeekly Played " + str(fivevfive["weeklyPlayed"]) print "\tRated BG: " print "\t\tRating: " + str(RBG["rating"]) print "\t\tSeason Won: " + str(RBG["seasonWon"]) print "\t\tSeason Played: " + str(RBG["seasonPlayed"]) print "\t\tWeekly Won: " + str(RBG["weeklyWon"]) print "\t\tWeekly Played " + str(RBG["weeklyPlayed"]) def character_items(s): all_items = s["items"] print "\n\tItems:" print "\tHead: " + str(all_items["head"]["name"]) + " ilvl: " + str(all_items["head"]["itemLevel"]) print "\tShoulders: " + str(all_items["shoulder"]["name"]) + " ilvl: " + str(all_items["shoulder"]["itemLevel"]) print "\tNeck: " + str(all_items["neck"]["name"]) + " ilvl: " + str(all_items["neck"]["itemLevel"]) print "\tBack: " + str(all_items["back"]["name"]) + " ilvl: " + str(all_items["back"]["itemLevel"]) print "\tFeet: " + str(all_items["feet"]["name"]) + " ilvl: " + str(all_items["feet"]["itemLevel"]) print "\tWrist: " + str(all_items["wrist"]["name"]) + " ilvl: " + str(all_items["wrist"]["itemLevel"]) print "\tMain Hand: " + str(all_items["mainHand"]["name"]) + " ilvl: " + str(all_items["mainHand"]["itemLevel"]) print "\tOff Hand:" + str(all_items["head"]["name"]) + " ilvl: " + str(all_items["head"]["itemLevel"]) print "\tHands: " + str(all_items["hands"]["name"]) + " ilvl: " + str(all_items["hands"]["itemLevel"]) print "\tLegs: " + str(all_items["legs"]["name"]) + " ilvl: " + str(all_items["legs"]["itemLevel"]) print "\tWaist: " + str(all_items["waist"]["name"]) + " ilvl: " + str(all_items["waist"]["itemLevel"]) print "\tFinger 1: " + str(all_items["finger1"]["name"]) + " ilvl: " + str(all_items["finger1"]["itemLevel"]) print "\tFinger 2: " + str(all_items["finger2"]["name"]) + " ilvl: " + str(all_items["finger2"]["itemLevel"]) print "\tTrinket 1: " + str(all_items["trinket1"]["name"]) + " ilvl: " + str(all_items["trinket1"]["itemLevel"]) print "\tTrinket 2: " + str(all_items["trinket2"]["name"]) + " ilvl: " + str(all_items["trinket2"]["itemLevel"]) print "\tAverage ilvl: " + str(all_items["averageItemLevel"]) print "\tAverage ilvl Equipped: " + str(all_items["averageItemLevelEquipped"]) def character_mounts(s): mounts = s["mounts"]["collected"] print "\n\tMounts Collected:" for mount in mounts: print "\t" + mount["name"] def character_quests(s): quests = s["quests"] print "\n\tQuests:" quest_continue = query_yes_no("\tThis can take some time, do you want to continue?", None) if quest_continue == "yes": for quest in quests: quest_url = "http://www.wowhead.com/quest=" + str(quest) #Lets do something for the user, and warn him this might take long, because were grabbing the title etc. soup = BeautifulSoup(urllib2.urlopen(quest_url)) quest_name = soup.title.string.split('-') print "\t" + quest_name[0] + "(http://www.wowhead.com/quest=" + str(quest) + ")" else: return def character_stats(s): stats = s["stats"] longest_stat_name = [] for long_name in stats: longest_stat_name.append(long_name) longest_stat_name = len(max(longest_stat_name, key=len)) spacing = 30 spacing = spacing - longest_stat_name print "\n\tStats:" print "\t------Attributes------\r" print "\tHealth: " + str(stats["health"]) print "\tStrength: " + str(stats["str"]) print "\tAgility: " + str(stats["agi"]) print "\tIntellect: " + str(stats["int"]) print "\tStamina: " + str(stats["sta"]) print "\tPowertype: " + str(stats["powerType"]) print "\tPower: " + str(stats["power"]) print "\tAttack Power: " + str(stats["attackPower"]) print "\t------Attack------\r" print "\tMain hand dps: " + str(stats["mainHandDps"]) print "\tMain Hand DMG Max: " + str(stats["mainHandDmgMax"]) print "\tMain hand DMG Min: " + str(stats["mainHandDmgMin"]) print "\tMainhand Speed: " + str(stats["mainHandSpeed"]) print "\tOff-Hand DPS: " + str(stats["offHandDps"]) print "\tOff-Hand DMG Max: " + str(stats["offHandDmgMax"]) print "\tOff-Hand DMG Min: " + str(stats["offHandDmgMin"]) print "\tOff-Hand Speed: " + str(stats["offHandSpeed"]) print "\t------Spell------\r" print "\tSpell Power: " + str(stats["spellPower"]) print "\tSpell Crit: " + str(stats["spellCrit"]) print "\tSpell Penetration: " + str(stats["spellPen"]) print "\tMana Regen in Combat: " + str(stats["mana5Combat"]) print "\tMana Regen outside Combat: " + str(stats["mana5"]) print "\t------Defence------\r" print "\tArmor: " + str(stats["armor"]) print "\tDodge: " + str(stats["dodge"]) + "%" print "\tParry: " + str(stats["parry"]) + "%" print "\tBlock: " + str(stats["block"]) + "%" print "\t------Enhancements------\r" print "\tCrit: " + str(stats["crit"]) + "%" print "\tHaste: " + str(stats["haste"]) + "%" print "\tMastery: " + str(stats["mastery"]) + "%" print "\tSpirit: " + str(stats["spr"]) print "\tBonus Armor: " + str(stats["bonusArmor"]) print "\tMultistrike: " + str(stats["multistrike"]) + "%" print "\tVersatility: " + str(stats["versatility"]) + "%" print "\tLeech: " + str(stats["leech"]) + "%" print "\tAvoidance Rating: " + str(stats["avoidanceRating"]) + "%" def character_talents(s): print "\n\tTalents:" for talent in s["talents"]: try: if talent["selected"]: print "\tActive Talent:" except: print "\n\tSecondary Talent:" for tier in talent["talents"]: print "\tTier " + str(tier["tier"]+1) print "\t\tName: " + tier["spell"]["name"] print "\t\tCast Time: " + tier["spell"]["castTime"] try: if tier["spell"]["powerCost"]: print "\t\tPower Cost: " + tier["spell"]["powerCost"] except: pass spell_description = "\t\tDescription: " + tier["spell"]["description"].replace("\n","") print "\n\t\t".join(textwrap.wrap(spell_description, 64)) def character_audit(s): print "\n\tCharacter Audit:" if s["audit"]["missingLeatherworkerEnchants"] != {}: print "\tLeather Worker Enchants Missing:" for missing_leatherworker_enchant in s["audit"]["missingLeatherworkerEnchants"]: print "\t\t" + missingLeatherworkerEnchants if s["audit"]["emptyGlyphSlots"] > 0: print "\tTotal Empty Glyph Slots: " + str(s["audit"]["emptyGlyphSlots"]) if s["audit"]["itemsWithEmptySockets"] != {}: print "\tItems With Empty Sockets:" for empty_sockets in s["audit"]["itemsWithEmptySockets"]: print "\t\tItem: " + empty_sockets if s["audit"]["missingExtraSockets"] != {}: print "\tItems Missing Extra Sockets:" for missing_sockets in s["audit"]["missingExtraSockets"]: print "\t\tItem: " + missing_sockets if s["audit"]["emptySockets"] > 0: print "\tTotal Empty Sockets: " + str(s["audit"]["emptySockets"]) if s["audit"]["recommendedBeltBuckle"] != {}: buckle_description = "Description: " + s["audit"]["recommendedBeltBuckle"]["itemSpells"][0]["spell"]["description"].replace("\n","") print "\tRecommended Belt Buckle: " print "\t\t" + str(s["audit"]["recommendedBeltBuckle"]["itemSpells"][0]["spell"]["name"]) + " (" + buckle_description + ")" if s["audit"]["unenchantedItems"] != {}: print "\tUnenchanted Items:" for unenchanted_item in s["audit"]["unenchantedItems"]: print "\t\tItem: " + unenchanted_item if s["audit"]["numberOfIssues"] > 0: print "\tNumber of Issues: " + str(s["audit"]["numberOfIssues"]) if s["audit"]["noSpec"]: print "No Spec Detected!" def query_yes_no(question, default="yes"): valid = {"yes":"yes", "y":"yes", "ye":"yes", "no":"no", "n":"no"} if default == None: prompt = " [y/n] " elif default == "yes": prompt = " [Y/n] " elif default == "no": prompt = " [y/N] " else: raise ValueError("\tinvalid default answer: '%s'" % default) while 1: sys.stdout.write(question + prompt) choice = raw_input().lower() if default is not None and choice == '': return default elif choice in valid.keys(): return valid[choice] else: sys.stdout.write("\tPlease respond with 'yes' or 'no' "\ "(or 'y' or 'n').\n") #MAIN FUNCTION def main(): print " __ __ __ __ .__ " print "/ \ / \____/ \ / \ _____ ______ |__|" print "\ \/\/ / _ \ \/\/ / ______ \__ \ \____ \| |" print " \ ( <_> ) / /_____/ / __ \| |_> > |" print " \__/\ / \____/ \__/\ / python (____ / __/|__|" print " \/ \/ \/|__| " print " - @viljoenivan" global opts opts, args = options.parse_args() if len(sys.argv) == 1: options.print_help() return #Character stuff if opts.charactersearch: if opts.character and opts.realm: character_search(opts.character, opts.realm) #Auction House if opts.auctionsearch: if opts.realm: auction_house(opts.realm) if __name__ == '__main__': main()
By Michael Learmonth . Published on April 26, 2013. YouTube is taking a novel approach to winning TV ad dollars during this year's Upfront season: lowering prices. Google's video platform -- the world's biggest -- isn't exactly lowering ad rates, the cost-per-thousand that advertisers pay, but it is re-thinking the way it structures up front ad deals for premium content on the eve of its "Brandcast," a big show for marketers and agencies in Manhattan next week. Last year, fresh off its initial $100 million investment in original "channels," YouTube came out with ad packages that were charitably described as "aggressive." In order to sponsor one of the new channels, advertisers were asked to make commitments to spend in excess of $10 million across YouTube. A music package, for example, was listed at $62 million, according to documents obtained by Ad Age. YouTube did get several big marketers to commit in the lower 8-figures including Unilever, Toyota and GM. This year, YouTube is dropping those requirements in hopes of luring a wider swath of advertisers to shift budgets out of TV. The new minimum this year is about $1 million for what sales chief Lucas Watson calls "Lego blocks" of YouTube content that allow bigger advertisers to buy more and smaller advertisers to buy less. "Last year we were rigid; we got a few big advertisers with huge checks," said YouTube sales chief Lucas Watson. "We got a lot of feedback about being inflexible so now we are breaking them down into more manageable chunks." The strategy shift comes at a critical time for YouTube and all the participants of next week's NewFronts, an organized effort by digital publishers to participate in the TV ritual where ad commitments worth about $18 billion are negotiated between broadcast and cable TV. Those commitments represent a big slice of what ZenithOptimedia estimates is a $64 billion U.S. TV market. Digital publishers would very much like to be part of that mix, but haven't, until recently, invested in enough TV-like video content to make that a reality. That's starting to change. YouTube and Hulu heavily invested in original content last year; others like AOL, Yahoo are investing well into the eight figures. All are looking for what has come to be described as their "'House of Cards' moment," referring to Neflix's big investment in an original show that is perceived to have been a huge success. Last year, YouTube focused heavily on selling the original content in which it had invested -- new channels and shows like WIGS, Nerdist, Vice, Machinima Prime, Jay-Z's Life & Times and others that took monetary advances from Google against ad sales. That spurred some resentment among channels that didn't take YouTube funding and had been building audiences on the platform for years. "Last year we focused everybody's eyes on the 100 channels we had launched," Mr. Watson said. This year, he said, "forget who gave [the channels] a check and lets focus on who's building great audiences." Rather than specific channels, Google is selling what it calls "media packs," or packages of programs in a genre like sports, gaming, fashion, cooking, music, comedy or education in $1 million increments. An "intelligence" package will be organized around Rainn Wilson's "Soul Pancake" and its breakout star of the past year, an 8-year-old from Tennessee, Kid President. In a further bid to lure conservative TV advertisers, YouTube signed a deal with the Alliance for Family Entertainment, whose members include Unilever, Walmart and Subway, to create a family-friendly package across 32 channels on YouTube. Commitments from the members of AFE represent one of the bigger upfront deals YouTube is doing this year. Advertisers will know what shows and channels they're getting but unlike last year they won't be exclusive. Exclusivity around channels sounded great last year but it caused problems in practice because the same ads were shown over and over causing them to wear out sooner than expected. Mr. Watson described "growing pains" around one of the first exclusive sponsorships YouTube sold last year when Unilever took over Young Hollywood. "We didn't totally understand the ad fatigue," he said. That partnership was adjusted during the year. YouTube will continue to peddle exclusive sponsorships on tentpole events like the Coachella, which was sponsored by T-Mobile. But Google's biggest challenge to TV may be in its sheer size. WPP CEO Sir Martin Sorrell said Thursday that Google is about to become its biggest recipient of global ad dollars, unseating News Corp. in the coming year. Google has 12,000 salespeople knocking on doors of all TV advertisers big and small. This year, they've got a secret weapon designed to attack TV's biggest weakness: the expense of reaching light TV viewers. National TV buys can pretty easily reach heavy TV users, but advertisers have to spend more on reach and frequency to find the last few when they happen to tune in. This year, all of Google's salespeople will be armed with what they call an "Extra Reach Tool" on their laptops to show TV advertisers that those light TV viewers can be reached for less money on YouTube. "The tool sits on a laptop and ingests Nielsen TV data, mixes in YouTube and produces a customized report," Mr. Watson said. "More than half of campaigns would benefit from a 16% shift of TV to YouTube."
import json import logging import os import socket import urllib import urllib2 import urlparse class Trakt(object): CLIENT_ID = 'aa9cd9a641758c5c20f2076e657a199925a6d2409dcddd0c8737b0dc1e90b6b0' CLIENT_SECRET = 'c6a1b1d563a521b4b126efd8847cd18d2a5533a702997f6401dd6e8f48c83faa' USER_AGENT = 'plex-trakt-scrobbler' def __init__(self, cfg): self.logger = logging.getLogger(__name__) self.cfg = cfg ''' Common API methods ''' def get_session(self): if os.path.exists(self.cfg.get('plex-trakt-scrobbler', 'session')): sessfp = open(self.cfg.get('plex-trakt-scrobbler', 'session'), 'r') session = sessfp.read().strip() sessfp.close() return session def _do_trakt_post(self, url, data): f = urllib2.Request(url) f.add_header('User-Agent', self.USER_AGENT) try: res = urllib2.urlopen(f, data) return json.load(res) except urllib2.URLError, e: self.logger.error('Unable to submit post data {url} - {error}'.format( url=url, error=e)) raise def _get_auth_infos(self): args = { 'client_id': self.CLIENT_ID } url = urlparse.urlunparse(('https', 'api-v2launch.trakt.tv', '/oauth/device/code', '', '', '')) res = self._do_trakt_post(url, urllib.urlencode(args)) return res def _get_access_token(self, code): args = { 'client_id': self.CLIENT_ID, 'client_secret': self.CLIENT_SECRET, 'code': code, } url = urlparse.urlunparse(('https', 'api-v2launch.trakt.tv', '/oauth/device/token', '', '', '')) res = self._do_trakt_post(url, urllib.urlencode(args)) return res def trakt_auth(self): print '== Requesting trakt.tv auth ==' auth_infos = self._get_auth_infos() accepted = 'n' print '\nPlease do the following to authorize the scrobbler:\n\n1/ Connect on {auth_url}\n2/ Enter the code: {code}'.format( auth_url=auth_infos['verification_url'], code=auth_infos['user_code']) while accepted.lower() == 'n': print accepted = raw_input('Have you authorized me? [y/N] :') try: access_token_infos = self._get_access_token(auth_infos['device_code']) except urllib2.HTTPError, e: self.logger.error('Unable to send authorization request {error}'.format(error=e)) return False if not access_token_infos['refresh_token']: print access_token_infos['message'] return token = access_token_infos['access_token'] refresh_token = access_token_infos['refresh_token'] fp = open(self.cfg.get('plex-trakt-scrobbler', 'session'), 'w') fp.write(token) fp.close() fp = open(self.cfg.get('plex-trakt-scrobbler', 'session') + '_refresh', 'w') fp.write(refresh_token) fp.close() self.logger.info('Trak TV authorization successful.') def _do_trakt_auth_post(self, url, data): try: session = self.get_session() headers = { 'Content-Type': 'application/json', 'Authorization': 'Bearer ' + session, 'trakt-api-version': '2', 'trakt-api-key': self.CLIENT_ID } # timeout in seconds timeout = 5 socket.setdefaulttimeout(timeout) request = urllib2.Request(url, data, headers) response = urllib2.urlopen(request).read() self.logger.info('Response: {0}'.format(response)) return response except urllib2.HTTPError as e: self.logger.error('Unable to submit post data {url} - {error}'.format(url=url, error=e.reason)) raise def _do_trakt_auth_get(self, url): return self._do_trakt_auth_post(url, None) ''' Trakt TV API methods ''' def get_media(self, media_id, source): self.logger.info('Getting Media information with {source} id: {media_id} from trak.tv.' .format(source=source, media_id=media_id)) url = urlparse.urlunparse(('https', 'api-v2launch.trakt.tv', '/search', '', '', '')) url += '?id_type={source}&id={media_id}'.format(source=source, media_id=media_id) try: return self._do_trakt_auth_get(url) except: return None def get_movie(self, imdb_id): return self.get_media(imdb_id, 'imdb') def get_show(self, tvdb_id): return self.get_media(tvdb_id, 'tvdb') def scrobble_show(self, show_name, season_number, episode_number, progress, scrobble_type): self.logger.info( 'Scrobbling ({scrobble_type}) {show_name} - S{season_number}E{episode_number} - {progress} to trak.tv.' .format(show_name=show_name, scrobble_type=scrobble_type, season_number=season_number.zfill(2), episode_number=episode_number.zfill(2), progress=progress)) data = {} data['show'] = {} data['show']['title'] = show_name data['episode'] = {} data['episode']['season'] = int(season_number) data['episode']['number'] = int(episode_number) data['progress'] = int(progress) data['app_version'] = '1.0' data['app_date'] = '2014-09-22' json_data = json.dumps(data) url = urlparse.urlunparse(('https', 'api-v2launch.trakt.tv', '/scrobble/' + scrobble_type, '', '', '')) try: self._do_trakt_auth_post(url, json_data) except: return False return True def scrobble_movie(self, imdb_id, progress, scrobble_type): self.logger.info('Scrobbling ({scrobble_type}) {imdb_id} - {progress} to trak.tv.' .format(imdb_id=imdb_id, scrobble_type=scrobble_type, progress=progress)) data = {} data['movie'] = {} data['movie']['ids'] = {} data['movie']['ids']['imdb'] = imdb_id data['progress'] = int(progress) data['app_version'] = '1.0' data['app_date'] = '2014-09-22' json_data = json.dumps(data) url = urlparse.urlunparse(('https', 'api-v2launch.trakt.tv', '/scrobble/' + scrobble_type, '', '', '')) try: self._do_trakt_auth_post(url, json_data) except: return False return True
One of the most prestigious awards presented to an NCMPR member, the Communicator of the Year Award recognizes a communication professional at a two-year community or technical college who has demonstrated leadership in the area of college communication and marketing. Emphasis is placed on accomplishments within the past year. The Communicator Award is presented annually in each of NCMPR’s seven districts, and district winners are then considered for the national award. A nominee must be a designated member of NCMPR at the time of nomination, should have worked in marketing or communications at a member college for at least five years and should have demonstrated excellence in various aspects of the profession. A previous recipient of the district Communicator of the Year Award may be nominated again, provided that the individual did not receive the district award the previous five years nor has ever received the national Pacesetter Award. A new nomination packet must be completed. An individual may be selected as the national Communicator of the Year only once. Members of the Board of Directors may not be considered for this award while they remain in office. Those nominating individuals for the Communicator Award must be a designated member of NCMPR or affiliated with a member college. A member college may submit only one nomination each year. A nomination form must be completed online. If you nominate someone who wins a district Communicator Award, you'll get a $250 stipend to attend the 2019 national conference in San Antonio. The district Communicator Award winner gets a $625 stipend to attend the national conference.
#!/usr/bin/python # # MixMode.ai - Bro Intel Linter # # WHEN WHAT WHO # 03-04-2015 Initial development Aaron Eppert # 08-24-2015 Explicitly verify single character fields Aaron Eppert # 08-24-2015 GPL and pushed to GitHub Aaron Eppert # 08-25-2015 Small cleanups and proper exit codes for using # as a git pre-commit hook Aaron Eppert # 09-01-2015 Added column-based type verifications Aaron Eppert # 09-25-2015 Verify printable characters and escape in error Aaron Eppert # 10-07-2015 Added --psled and --warn-only options Aaron Eppert # 10-08-2015 Additional details - WARNING vs ERROR Aaron Eppert # 03-03-2016 Minor bugfix Peter McKay # 04-08-2016 Added Intel::NET support Aaron Eppert # 06-02-2017 Fixed line ending issue Aaron Eppert # 09-15-2017 Changed Intel::NET to Intel::SUBNET Kory Kyzar # 03-28-2018 Fixed IPv6 validation Aaron Eppert # 03-27-2019 Add Intel::PUBKEY_HASH and Intel::JA3 Aaron Eppert # 07-13-2019 Add CERT HASH validaion for using regex Juan Jaramillo # MD5, SHA1, SHA256, SHA512 hashes. import sys import re import string from optparse import OptionParser def write_stderr(msg): sys.stderr.write(msg + '\n') def warning_line(line, *objs): out = 'WARNING: Line %d - ' % (int(line)+1) for o in objs: out += o write_stderr(out) def error_line(line, *objs): out = 'ERROR: Line %d - ' % (int(line)+1) for o in objs: out += o write_stderr(out) def escape(c): if ord(c) > 31 and ord(c) < 127: return c c = ord(c) if c <= 0xff: return r'\x{0:02x}'.format(c) elif c <= '\uffff': return r'\u{0:04x}'.format(c) else: return r'\U{0:08x}'.format(c) def hex_escape(s): return ''.join(escape(c) for c in s) class bro_intel_indicator_return: OKAY = 0 WARNING = 1 ERROR = 2 ############################################################################### # class bro_intel_indicator_type # # This class is for handling the "indicator_type" fields within a Bro Intel # file. Note, each type of field has a specific handler. # class bro_intel_indicator_type: def __init__(self): self.__INDICATOR_TYPE_handler = {'Intel::ADDR': self.__handle_intel_addr, 'Intel::SUBNET': self.__handle_intel_subnet, 'Intel::URL': self.__handle_intel_url, 'Intel::SOFTWARE': self.__handle_intel_software, 'Intel::EMAIL': self.__handle_intel_email, 'Intel::DOMAIN': self.__handle_intel_domain, 'Intel::USER_NAME': self.__handle_intel_user_name, 'Intel::FILE_HASH': self.__handle_intel_file_hash, 'Intel::FILE_NAME': self.__handle_intel_file_name, 'Intel::CERT_HASH': self.__handle_intel_cert_hash, 'Intel::PUBKEY_HASH': self.__handle_intel_pubkey_hash, 'Intel::JA3': self.__handle_intel_ja3_hash} # Source: https://stackoverflow.com/questions/319279/how-to-validate-ip-address-in-python def __is_valid_ipv4_address(self, address): import socket try: socket.inet_pton(socket.AF_INET, address) except AttributeError: # no inet_pton here, sorry try: socket.inet_aton(address) except socket.error: return False return address.count('.') == 3 except socket.error: # not a valid address return False return True # Source: https://stackoverflow.com/questions/319279/how-to-validate-ip-address-in-python def __is_valid_ipv6_address(self, address): import socket try: socket.inet_pton(socket.AF_INET6, address) except socket.error: # not a valid address return False return True def __handle_intel_addr(self, indicator): ret = (bro_intel_indicator_return.OKAY, None) if self.__is_valid_ipv4_address(indicator) or self.__is_valid_ipv6_address(indicator): return ret return (bro_intel_indicator_return.ERROR, 'Invalid IP address') # In an effort to keep this script minimal and without requiring external # libraries, we will verify an Intel::SUBNET simply as: # # 0 <= octet < 255 # 0 <= netmask <= 32 # def __handle_intel_subnet(self, indicator): ret = (bro_intel_indicator_return.OKAY, None) if '/' in indicator: addr, net = indicator.split('/') if all([(int(x) >= 0 and int(x) < 255) for x in addr.split('.')]): if not (int(net) >= 0 and int(x) <= 32): ret = (bro_intel_indicator_return.ERROR, 'Invalid network block designation') else: ret = (bro_intel_indicator_return.ERROR, 'Invalid network address') else: ret = (bro_intel_indicator_return.ERROR, 'Invalid network designation') return ret # We will call this minimalist, but effective. def __handle_intel_url(self, indicator): ret = (bro_intel_indicator_return.OKAY, None) t_uri_present = re.findall(r'^https?://', indicator) if t_uri_present is not None and len(t_uri_present) > 0: ret = (bro_intel_indicator_return.WARNING, 'URI present (e.g. http(s)://)') else: rx = re.compile(r'^[https?://]?' # http:// or https:// r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6}\.?|' # domain... r'localhost|' # localhost... r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip r'(?::\d+)?' # optional port r'(?:/?|[/?]\S+)$', re.IGNORECASE) t = rx.search(indicator) if t: ret = (bro_intel_indicator_return.OKAY, None) return ret def __handle_intel_email(self, indicator): ret = (bro_intel_indicator_return.WARNING, 'Invalid email address') rx = r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)" t_email = re.findall(rx, indicator) if len(t_email) > 0: ret = (bro_intel_indicator_return.OKAY, None) return ret def __handle_intel_software(self, indicator): ret = (bro_intel_indicator_return.WARNING, 'Invalid software string') if len(indicator) > 0: ret = (bro_intel_indicator_return.OKAY, None) return ret def __handle_intel_domain(self, indicator): ret = (bro_intel_indicator_return.WARNING, 'Invalid domain name') rx = r'(?=^.{4,253}$)(^((?!-)[a-zA-Z0-9-]{1,63}(?<!-)\.)+[a-zA-Z]{2,63}$)' t_domain = re.findall(rx, indicator) if len(t_domain) > 0: if indicator in t_domain[0]: ret = (bro_intel_indicator_return.OKAY, None) return ret def __handle_intel_user_name(self, indicator): ret = (bro_intel_indicator_return.WARNING, 'Invalid username - %s' % (indicator)) if len(indicator) > 0: ret = (bro_intel_indicator_return.OKAY, None) return ret def __handle_intel_file_name(self, indicator): ret = (bro_intel_indicator_return.WARNING, 'Invalid username length') if len(indicator) > 0: ret = (bro_intel_indicator_return.OKAY, None) return ret # Pretty weak, but should suffice for now. def __handle_intel_file_hash(self, indicator): ret = (bro_intel_indicator_return.WARNING, 'Invalid hash length') VALID_HASH_LEN = {32: 'md5', 40: 'sha1', 64: 'sha256'} if VALID_HASH_LEN.get(len(indicator), None): ret = (bro_intel_indicator_return.OKAY, None) return ret def __handle_intel_cert_hash(self, indicator): ret = (bro_intel_indicator_return.WARNING, 'Invalid Intel::CERT_HASH - ISSUES %s' % (indicator)) hash_present = re.compile( r'^[0-9A-F]{32}$|' # MD5 r'^[0-9A-F]{40}$|' # SHA1 r'^[0-9A-F]{64}$|' # SHA256 r'^[0-9A-F]{128}$', re.IGNORECASE) # SHA512 t = hash_present.search(indicator) if t: ret = (bro_intel_indicator_return.OKAY, None) return ret def __handle_intel_pubkey_hash(self, indicator): return (bro_intel_indicator_return.WARNING, 'Intel::PUBKEY_HASH - Needs additional validation') def __handle_intel_ja3_hash(self, indicator): ret = (bro_intel_indicator_return.WARNING, 'Intel::JA3 - Needs additional validation') if len(indicator) == 32: ret = (bro_intel_indicator_return.OKAY, None) return ret def verify_indicator_type(self, indicator_type): ret = (bro_intel_indicator_return.ERROR, 'Invalid indicator - %s' % (indicator_type)) it = self.__INDICATOR_TYPE_handler.get(indicator_type, None) if it is not None: ret = (bro_intel_indicator_return.OKAY, None) return ret def correlate(self, indicator, indicator_type): ret = (bro_intel_indicator_return.WARNING, 'Could not correlate - %s with %s' % (indicator, indicator_type)) if len(indicator) > 1 and len(indicator_type) > 1: h = self.__INDICATOR_TYPE_handler.get(indicator_type, None) if h: ret = h(indicator) else: ret = (bro_intel_indicator_return.OKAY, None) return ret ############################################################################### # class bro_data_intel_field_values # # This class is for processing the individual Bro Intel fields and verifying # their validity. # # Note, it may be easily expanded via adding entries to self.__VERIFY within # the class constructor. # class bro_data_intel_field_values: EMPTY_FIELD_CHAR = '-' META_DO_NOTICE = ['T', 'F'] META_IF_IN = ['-', 'Conn::IN_ORIG', 'Conn::IN_RESP', 'Files::IN_HASH', 'Files::IN_NAME', 'DNS::IN_REQUEST', 'DNS::IN_RESPONSE', 'HTTP::IN_HOST_HEADER', 'HTTP::IN_REFERRER_HEADER', 'HTTP::IN_USER_AGENT_HEADER', 'HTTP::IN_X_FORWARDED_FOR_HEADER', 'HTTP::IN_URL', 'SMTP::IN_MAIL_FROM', 'SMTP::IN_RCPT_TO', 'SMTP::IN_FROM', 'SMTP::IN_TO', 'SMTP::IN_RECEIVED_HEADER', 'SMTP::IN_REPLY_TO', 'SMTP::IN_X_ORIGINATING_IP_HEADER', 'SMTP::IN_MESSAGE', 'SSL::IN_SERVER_CERT', 'SSL::IN_CLIENT_CERT', 'SSL::IN_SERVER_NAME', 'SMTP::IN_HEADER'] def __init__(self): self.__VERIFY = {'indicator': self.verify_indicator, 'indicator_type': self.verify_indicator_type, 'meta.do_notice': self.verify_meta_do_notice, 'meta.if_in': self.verify_meta_if_in, 'meta.desc': self.verify_meta_desc, 'meta.source': self.verify_meta_source, 'meta.cif_confidence': self.verify_meta_cif_confidence, 'meta.url': self.verify_meta_url, 'meta.whitelist': self.verify_meta_whitelist, 'meta.severity': self.verify_meta_severity, 'meta.cif_severity': self.verify_meta_cif_severity, 'meta.cif_impact': self.verify_meta_cif_impact} self.biit = bro_intel_indicator_type() def get_verifier(self, v): return self.__VERIFY.get(v, self.default) def __verify_chars(self, t): return all(ord(l) > 31 and ord(l) < 127 and l in string.printable for l in t) def __is_ignore_field(self, t): return self.EMPTY_FIELD_CHAR in t def verify_indicator(self, t): ret = (bro_intel_indicator_return.ERROR, 'Invalid indicator - %s' % (t)) if len(t) > 1 and self.__verify_chars(t): ret = (bro_intel_indicator_return.OKAY, None) return ret def verify_indicator_type(self, t): return self.biit.verify_indicator_type(t) def correlate_indictor_and_indicator_type(self, i, it): return self.biit.correlate(i, it) def verify_meta_do_notice(self, t): ret = (bro_intel_indicator_return.OKAY, None) t_ret = t in bro_data_intel_field_values.META_DO_NOTICE if not t_ret: ret = (bro_intel_indicator_return.ERROR, 'Invalid do_notice - %s' % (str(t))) return ret def verify_meta_if_in(self, t): ret = (bro_intel_indicator_return.OKAY, None) t_ret = t in bro_data_intel_field_values.META_IF_IN if not t_ret: ret = (bro_intel_indicator_return.ERROR, 'Invalid if_in - %s' % (str(t))) return ret def verify_meta_cif_confidence(self, t): ret = (bro_intel_indicator_return.ERROR, 'Invalid confidence - %s - Needs to be 1-100' % (str(t))) try: t_int = int(t) if isinstance(t_int, (int, long)) and (t_int > 0 and t_int < 100): ret = (bro_intel_indicator_return.OKAY, None) except ValueError: ret = (bro_intel_indicator_return.ERROR, 'Invalid confidence - %s - Needs to be 1-100' % (str(t))) return ret def verify_meta_desc(self, t): ret = (bro_intel_indicator_return.WARNING, 'Invalid desc - %s' % (t)) if self.__is_ignore_field(t): ret = (bro_intel_indicator_return.OKAY, None) elif len(t) > 1 and self.__verify_chars(t): ret = (bro_intel_indicator_return.OKAY, None) return ret def verify_meta_source(self, t): ret = (bro_intel_indicator_return.WARNING, 'Invalid source - %s' % (t)) if self.__is_ignore_field(t): ret = (bro_intel_indicator_return.OKAY, None) elif len(t) > 1 and self.__verify_chars(t): ret = (bro_intel_indicator_return.OKAY, None) return ret def verify_meta_url(self, t): ret = (bro_intel_indicator_return.WARNING, 'Invalid url - %s' % (t)) if self.__is_ignore_field(t): ret = (bro_intel_indicator_return.OKAY, None) elif len(t) > 1 and self.__verify_chars(t): ret = (bro_intel_indicator_return.OKAY, None) return ret def verify_meta_whitelist(self, t): ret = (bro_intel_indicator_return.OKAY, 'Invalid whitelist - %s' % (t)) if self.__is_ignore_field(t): ret = (bro_intel_indicator_return.OKAY, None) elif len(t) > 1 and self.__verify_chars(t): ret = (bro_intel_indicator_return.OKAY, None) return ret def verify_meta_severity(self, t): ret = (bro_intel_indicator_return.ERROR, 'Invalid severity - %s (valid: 1-10)' % (t)) try: t_int = int(t) if isinstance(t_int, (int, long)) and (t_int > 0 and t_int < 10): ret = (bro_intel_indicator_return.OKAY, None) except ValueError: ret = (bro_intel_indicator_return.ERROR, 'Invalid severity - %s (valid: 1-10)' % (t)) return ret def verify_meta_cif_severity(self, t): VALID_SEVERITY = ['-', 'low', 'medium', 'med', 'high'] ret = (bro_intel_indicator_return.ERROR, 'Invalid cif_severity - %s (valid: %s)' % (t, ','.join(VALID_SEVERITY))) if t in VALID_SEVERITY: ret = (bro_intel_indicator_return.OKAY, None) return ret def verify_meta_cif_impact(self, t): ret = (bro_intel_indicator_return.WARNING, 'Invalid cif_impact - %s' % (t)) if self.__is_ignore_field(t): ret = (bro_intel_indicator_return.OKAY, None) elif len(t) > 1 and self.__verify_chars(t): ret = (bro_intel_indicator_return.OKAY, None) return ret def default(self, t): ret = (bro_intel_indicator_return.WARNING, 'Invalid - %s' % (t)) write_stderr("Running default handler for: %s" % (t)) if self.__is_ignore_field(t): ret = (bro_intel_indicator_return.OKAY, None) elif len(t) > 1 and self.__verify_chars(t): ret = (bro_intel_indicator_return.OKAY, None) return ret ############################################################################### # class bro_intel_feed_verifier # # This is the control class for Bro Intel Feed verification # class bro_intel_feed_verifier: stock_required_fields = ['indicator', 'indicator_type', 'meta.source'] psled_required_fields = ['indicator', 'indicator_type', 'meta.source', 'meta.desc'] field_header_designator = '#fields' feed_rx = r'([\S]+)' feed_sep_rx = r'(\t)+' header_fields = [] def __init__(self, options): self.feed_file = options.feed_file self.psled = options.psled self.__feed_header_found = False self.__num_of_fields = 0 self.required_fields = bro_intel_feed_verifier.stock_required_fields self.warn_only = options.warn_only if self.psled is not None: self.required_fields = bro_intel_feed_verifier.psled_required_fields def __make_one_indexed(self, l): return map(lambda x: x+1, l) def __is_start_of_feed(self, l): ret = False if len(l) >= 2: if l[0] == self.field_header_designator: ret = True return ret def __are_header_fields_valid(self, l): ret = False _fields_found = [] if l[0] == self.field_header_designator: for index, item in enumerate(l): if index == 0: continue if item in self.required_fields: _fields_found.append(item) self.header_fields.append(item) t_list_diff = list(set(self.required_fields) - set(_fields_found)) if len(t_list_diff) == 0: ret = True else: warning_line(0, 'Fields missing: %s' % (','.join(t_list_diff))) return ret def __count_fields(self, l): return (len(l) - 1) ## # <0 - Too few fields # 0 - Proper field count # >0 - Too many fields ## def __verify_field_count(self, l): return (len(l) - self.__num_of_fields) def __verify_non_space(self, offset, l): ret = True r = [i for i, x in enumerate(l) if x == ' '] if len(r) > 0: warning_line(offset, 'Invalid empty field, offset %s' % (self.__make_one_indexed(r))) ret = False return ret def __get_field_contents(self, l): return l.split('\t') def __verify_field_sep(self, offset, l, is_header=False): ret = True field_seps = re.findall(self.feed_sep_rx, l, re.IGNORECASE) __field_total = self.__num_of_fields if is_header: __field_total += 1 if len(field_seps) >= __field_total: warning_line(offset, 'Excess field separators found') ret = False for index, item in enumerate(field_seps): for s in item: if s != '\t': warning_line(offset, 'Field separator incorrect in field offset %d' % (self.__make_one_indexed(index))) ret = False return ret def __verify_header(self, index, l): ret = False contents = self.__get_field_contents(l) if self.__is_start_of_feed(contents) and self.__are_header_fields_valid(contents): if not self.__feed_header_found: self.__num_of_fields = self.__count_fields(contents) if self.__verify_field_sep(index, l, is_header=True): ret = True self.__feed_header_found = True else: write_stderr("Invalid field separator found in header. Must be a tab.") else: warning_line(index, "Duplicate header found") return ret def __verify_fields(self, index, content): ret = (bro_intel_indicator_return.OKAY, None) reason = '' _fields_to_process = {} validator = bro_data_intel_field_values() # # Not thrilled about this, but we need it to pull out correlatable fields # since, order of the actual feed fields aren't guaranteed. Ugly for now, # but workable and can likely be optimized shortly. # for content_index, t in enumerate(content): _fields_to_process[self.header_fields[content_index]] = t for k in _fields_to_process: ret = validator.get_verifier(k)(_fields_to_process[k]) if len(ret) > 0 and ret[0] != bro_intel_indicator_return.OKAY: if all(ord(l) > 31 and ord(l) < 127 and l in string.printable for l in k): t_line = str(_fields_to_process[k]) t_line = hex_escape(t_line) warning_line(index, 'Invalid entry \"%s\" for column \"%s\"' % (str(t_line), str(k))) else: warning_line(index, 'Unprintable character found for column \"%s\"' % (str(k))) break if ret: # Special case to verify indicator with indicator_type c = validator.correlate_indictor_and_indicator_type(_fields_to_process['indicator'], _fields_to_process['indicator_type']) if c is not None: if c[0] == bro_intel_indicator_return.WARNING: warning_line(index, 'Indicator type \"%s\" possible issue with indicator: \"%s\"' % (_fields_to_process['indicator_type'], _fields_to_process['indicator'])) elif c[0] == bro_intel_indicator_return.ERROR: error_line(index, 'Indicator type \"%s\" possible issue with indicator: \"%s\"' % (_fields_to_process['indicator_type'], _fields_to_process['indicator'])) ret = c return ret def __verify_entry(self, index, l): ret = (bro_intel_indicator_return.ERROR, '') contents = self.__get_field_contents(l) _content_field_count = self.__verify_field_count(contents) _warn_str = None if _content_field_count == 0: if self.__verify_field_sep(index, l) and self.__verify_non_space(index, contents): ret = self.__verify_fields(index, contents) elif _content_field_count > 0: ret = (bro_intel_indicator_return.ERROR, 'Invalid number of fields - Found: %d, Header Fields: %d - Look for: EXTRA fields or tab seperators' % (len(contents), self.__num_of_fields)) elif _content_field_count < 0: ret = (bro_intel_indicator_return.ERROR, 'Invalid number of fields - Found: %d, Header Fields: %d - Look for: EMPTY fields' % (len(contents), self.__num_of_fields)) return ret def __load_feed(self, feed): with open(feed) as f: for line in f: t_line = line.rstrip('\r\n') if len(t_line): yield t_line def __handle_reporting(self, index, c): if c is not None: if c[0] == bro_intel_indicator_return.ERROR: error_line(index, 'Details - %s' % (c[1])) elif c[0] == bro_intel_indicator_return.WARNING: warning_line(index, c[1]) def verify(self): for index, l in enumerate(self.__load_feed(self.feed_file)): # Check the header if index == 0: if not self.__verify_header(index, l): error_line(index, "Invalid header") sys.exit(2) else: t_ret = self.__verify_entry(index, l) if t_ret[0] != bro_intel_indicator_return.OKAY: self.__handle_reporting(index, t_ret) if t_ret[0] == bro_intel_indicator_return.ERROR and self.warn_only is None: sys.exit(3) ############################################################################### # main() ############################################################################### def main(): parser = OptionParser() parser.add_option('-f', '--file', dest='feed_file', help='Bro Intel Feed to Verify') parser.add_option('--psled', action='store_true', dest='psled', help='Verify Intel meets PacketSled requirements') parser.add_option('--warn-only', action='store_true', dest='warn_only', help='Warn ONLY on errors, continue processing and report') (options, args) = parser.parse_args() if len(sys.argv) < 2: parser.print_help() sys.exit(1) bifv = bro_intel_feed_verifier(options) bifv.verify() ############################################################################### # __name__ checking ############################################################################### if __name__ == '__main__': main()
NANGLE, RICHARD (d. 1541?), bishop of Clonfert, came of an old Irish family settled in Mayo and Galway, and early entered the order of the Austin Friars, from whom he received his education. He was subsequently created doctor of divinity, and became provincial of his order in Ireland. In 1508 his earnest solicitations led to the foundation of the Augustinian friary at Galway (Ruddiman, Hist. of Galway, p. 272). On the death of Denis More, bishop of Clonfert, in 1534, Rowland Burke was appointed his successor by papal provision; but Henry VIII, who had determined to assert his right as head of the church in Ireland, in 1536 appointed Nangle, who was recommended to him by Archbishop Browne as being ‘not only well learned, but a right honest man, and one will set forth the Word of God in the Irish tongue.’ Nangle, however, was expelled from the see, and forced to remain shut up in Galway ‘for fear of Burgh and his complices’ (Gairdner, Letters and Papers of Henry VIII, xii. i. 1052; Carew MSS.) Henry therefore directed the deputy, Lord Grey, to prosecute the intruder under the Statute of Provisors; but nothing was done, and Burke remained in possession of the see. Nangle died apparently in 1541, and Burke received Henry's assent to his election on 24 Oct. of the same year.
# This file is generated by C:\Users\asd\src\34\scipy-0.15.1\setup.py # It contains system_info results at the time of building this package. __all__ = ["get_info","show"] atlas_threads_info={} openblas_info={} lapack_mkl_info={} blas_mkl_info={} lapack_opt_info={'library_dirs': ['C:\\local\\vendor\\binaries\\sse3'], 'libraries': ['lapack', 'f77blas', 'cblas', 'atlas'], 'define_macros': [('NO_ATLAS_INFO', -1)], 'language': 'f77'} atlas_blas_threads_info={} blas_opt_info={'library_dirs': ['C:\\local\\vendor\\binaries\\sse3'], 'libraries': ['f77blas', 'cblas', 'atlas'], 'define_macros': [('NO_ATLAS_INFO', -1)], 'language': 'c'} atlas_info={'library_dirs': ['C:\\local\\vendor\\binaries\\sse3'], 'libraries': ['lapack', 'f77blas', 'cblas', 'atlas'], 'define_macros': [('NO_ATLAS_INFO', -1)], 'language': 'f77'} atlas_blas_info={'library_dirs': ['C:\\local\\vendor\\binaries\\sse3'], 'libraries': ['f77blas', 'cblas', 'atlas'], 'define_macros': [('NO_ATLAS_INFO', -1)], 'language': 'c'} mkl_info={} def get_info(name): g = globals() return g.get(name, g.get(name + "_info", {})) def show(): for name,info_dict in globals().items(): if name[0] == "_" or type(info_dict) is not type({}): continue print(name + ":") if not info_dict: print(" NOT AVAILABLE") for k,v in info_dict.items(): v = str(v) if k == "sources" and len(v) > 200: v = v[:60] + " ...\n... " + v[-60:] print(" %s = %s" % (k,v))
Chef Gordon Ramsay stopped by The Ellen DeGeneres Show on Tuesday to cook up a vegan vegetable stir-fry. While giving Ellen a lesson in knife safety, he ended up slicing his finger. “Damn I haven’t done that for years,” the Michelin-starred chef exclaimed. Ten years to be exact. And without missing a beat Ramsay finished the stir-fry, bleeding finger and all.
import time from roglick.lib import libtcod from roglick.engine.ecs import SystemBase from roglick.components import FatigueComponent,PositionComponent from roglick.events import ClimbDownEvent,ClimbUpEvent,MoveEvent,QuitEvent,PreInputEvent from roglick.engine import event class InputSystem(SystemBase): # Define movement keys with corresponding (dx,dy) tuples MOVEMENT_KEYS = { libtcod.KEY_KP1: (-1,1), libtcod.KEY_KP2: (0,1), libtcod.KEY_KP3: (1,1), libtcod.KEY_KP4: (-1,0), libtcod.KEY_KP6: (1,0), libtcod.KEY_KP7: (-1,-1), libtcod.KEY_KP8: (0, -1), libtcod.KEY_KP9: (1,-1), 'y': (-1,-1), 'u': (1,-1), 'h': (-1,0), 'j': (0,1), 'k': (0,-1), 'l': (1,0), 'b': (-1,1), 'n': (1,1), } def execute(self): """Wait for player input, dispatching appropriate events.""" pc = self._entity_manager.pc pc_fatigue = self._entity_manager.get_component(pc, FatigueComponent) if pc_fatigue.fatigue > 0: # PC's still fatigued, need to wait until they can act return event.dispatch(PreInputEvent()) key = self.get_keypress() if key == libtcod.KEY_ESCAPE or libtcod.console_is_window_closed(): event.dispatch(QuitEvent()) #exit game # Movement keys if key in self.MOVEMENT_KEYS: event.dispatch(MoveEvent(pc, *self.MOVEMENT_KEYS[key])) elif key == '>': event.dispatch(ClimbDownEvent(pc)) elif key == '<': event.dispatch(ClimbUpEvent(pc)) def get_keypress(self): """Wrapper method for retrieving keypress events from the keyboard A bug(?) in libtcod means that the wait_for_keypress function actually returns key press AND release events, resulting in each tap of a key functioning as two "keypress" events. To work around this, we wait for a key and then test if it is actually in the 'pressed' state and, if not, wait again. This wrapper also checks for printable keys and translates key.c into the corresponding character. """ while True: key = libtcod.console_wait_for_keypress(True) #if not key.pressed and key.vk != libtcod.KEY_NONE: # # Make sure we actually get a pressed key # return self.get_keypress() if key.vk == libtcod.KEY_SHIFT or key.vk == libtcod.KEY_CONTROL: # We don't care about these keys, just ignore them continue if key.pressed: if key.vk == libtcod.KEY_F12: # Take a screenshot, pause briefly, then resume waiting libtcod.sys_save_screenshot() time.sleep(0.5) elif key.vk == libtcod.KEY_CHAR: # Translate key.c into its character reprsentation return chr(key.c) else: # Return the key code return key.vk elif key.vk == libtcod.KEY_NONE: # Ensure non-key events (e.g. closing the window) can propagate return None
1) Sit with your legs straight out in front of you, raising your pelvis on a blanket if your hips or groins are tight. Exhale, bend your knees, pull your heels toward your pelvis, then drop your knees out to the sides and press the soles of your feet together. 2) Bring your heels as close to your pelvis as you comfortably can. With the first and second finger and thumb, grasp the big toe of each foot. Always keep the outer edges of the feet firmly on the floor. If it isn’t possible to hold the toes, clasp each hand around the same-side ankle or shin. 3) Sit so that the pubis in front and the tailbone in back are equidistant from the floor. The perineum then will be approximately parallel to the floor and the pelvis in a neutral position. Firm the sacrum and shoulder blades against the back and lengthen the front torso through the top of the sternum. 4) Never force your knees down. Instead release the heads of the thigh bones toward the floor. When this action leads, the knees follow. 5) Stay in this pose anywhere from 1 to 5 minutes. Then inhale, lift your knees away from the floor, and extend the legs back to their original position.
#!/usr/bin/env python from multiprocessing import Process, Queue from multiprocessing.reduction import reduce_handle, rebuild_handle import os import socket import time import threading import json import Skateboard def server(s_to_client, PASSWD, addrnew, process_id, client_pipe): print('ServerClone is ok.') data = s_to_client.recv(4096) SERVERINFO = '@Author: East Evil\nDefault Message From Server\nAnd You Can Change This Information By Youself' if data: #print data fuck_json_0 = json.dumps(['', "%s\nPlease enter passwd:" % SERVERINFO]) s_to_client.sendall(fuck_json_0) data_0 = s_to_client.recv(4096) if data_0 == PASSWD: fuck_json_x = json.dumps(['', 'Permit access to login the server...\nInput a name for show you friends']) s_to_client.sendall(fuck_json_x) name_once = s_to_client.recv(4096) # message to staff [0] is command message_to_ec = ['UPDATE CLIENT SOCKET'] # message to staff [1] is socket owner name message_to_ec.append(name_once) # message to staff [2] is socket s_to_client_reduction = reduce_handle(s_to_client.fileno()) message_to_ec.append(s_to_client_reduction) # messaget to staff [3] is socket to recveive result from staff message_to_ec.append(process_id) # 0 1 2 3 # message send to ec [command, name, client_socket, process_id] # put into pipe client_pipe.send(message_to_ec) fuck_json = json.dumps(['Server Room', 'Ok, server get you name [%s]\nEnter the chat room...' % name_once]) s_to_client.sendall(fuck_json) Skateboard.smooth(s_to_client, client_pipe, name_once, process_id) else: print 'Error password' log_file = open('temp/log-server.log') t_0 = time.localtime() now_time_0 = "%d-%d-%d-%d:%d:%d" % (t_0.tm_year, t_0.tm_mon, t_0.tm_mday, t_0.tm_hour, t_0.tm_min, t_0.tm_sec) log_file.writelines("ip:%s, port:%s failed to login at %s\n" % (addrnew[0], addrnew[1], now_time_0)) log_file.close() err_json = json.dumps(['Error Password', 'Have no permission to enter the server']) s_to_client.sendall(err_json) self.CONNECTION_LIST.remove(s_to_client) s_to_client.close()
Helpful lecture, about letting go of ego and paying attention to the charts when trading; Allan uses Gold (DGP) as an example. In the past nine months, DGP (double beta ETF for Gold) has risen from $15.28 to $19.74, a gain of about 30%. This represents a Buy & Hold return, with the leverage inherent in the double beta construction of this instrument. Or, DGP can be traded with a rule-based trend following system. The system represented in the chart below traded DGP long and short during the same time period, September 2008 to July 2009. There were 12 trades, 10 winners and 2 losers. The total cumulative gain was $25.82, or about 170%. The average trade gained about 13%. This is a Daily chart, more trades and a greater cumulative total can be seen in intraday time frames, but for comparison sake, I want to compare Daily returns. We are talking about a system that trades about twice a month and yields about 5-6X the return of Buy and Hold. By now you should recognize the 3-line point break chart, embedded with the Blue Wave Trend Model, Precision CCI and Precision Moving Average. If you click on the Seeking Alpha link, you will find 47 articles written about Gold during the time frame charted above. That’s one site. Multiply that by 100 other sites writing about Gold. Ask yourself, how in the hell are you going to figure all that out and have any expectation of getting Gold’s investment story right?
# -*- coding: utf-8 -*- from nbsite.shared_conf import * # Declare information specific to this project. project = u'HoloViews' authors = u'PyViz developers' copyright = u'2019 ' + authors description = 'Stop plotting your data - annotate your data and let it visualize itself.' import holoviews version = release = holoviews.__version__ html_theme = 'sphinx_ioam_theme' html_static_path += ['_static'] html_theme_options = { 'logo': 'logo.png', 'favicon': 'favicon.ico', 'custom_css': 'holoviews.css' } nbbuild_cell_timeout = 360 extensions += ['nbsite.gallery'] templates_path = ['_templates'] nbsite_gallery_conf = { 'backends': ['bokeh', 'matplotlib', 'plotly'], 'galleries': {}, 'github_org': 'pyviz', 'github_project': 'holoviews' } if os.environ.get('HV_DOC_GALLERY') not in ('False', 'false', '0'): nbsite_gallery_conf['galleries']['gallery'] = { 'title': 'Gallery', 'sections': [ {'path': 'apps', 'title': 'Applications', 'skip': True}, 'demos' ] } if os.environ.get('HV_DOC_REF_GALLERY') not in ('False', 'false', '0'): nbsite_gallery_conf['galleries']['reference'] = { 'title': 'Reference Gallery', 'path': 'reference', 'sections': [ 'elements', 'containers', 'streams', 'apps' ] } MAIN_SITE = '//holoviews.org' html_context.update({ 'PROJECT': project, 'DESCRIPTION': description, 'AUTHOR': authors, 'VERSION': version, 'WEBSITE_SERVER': 'https:', # Links 'LINKS': ( ('Getting started', '/getting_started/index'), ('User Guide', '/user_guide/index'), ('Gallery', '/gallery/index'), ('Reference Gallery', '/reference/index'), ('API Docs', '/Reference_Manual/index'), ('FAQ', '/FAQ'), ('About', '/about') ), # About Links 'ABOUT': ( ('About', '/about.html') ), # Social links 'SOCIAL': ( ('Gitter', '//gitter.im/pyviz/pyviz'), ('Twitter', '//twitter.com/holoviews'), ('Github', '//github.com/pyviz/holoviews'), ), # Links for the docs sub navigation 'NAV': ( ('Getting started', 'getting_started/index'), ('User Guide', 'user_guide/index'), ('Gallery', 'gallery/index'), ('Reference Gallery', 'reference/index'), ('Releases', 'releases'), ('API', 'Reference_Manual/index'), ('FAQ', 'FAQ') ), 'js_includes': html_context['js_includes']+['holoviews.js'] })
The people of Haiti are eager to work themselves out of poverty; all they lack is the means to get started. The village men around Lake Azuei have traditionally been fishermen but dwindling fish stocks and an increase in depth have made it almost impossible to make a living. To make things worse the lake contains saline water, which has bleached the soil making farming nearly impossible too! The lack of income means limited access to healthcare, food and education. OB has already provided 20 impoverished families the equipment (cage, young fish, fish food) and training they need to farm fish . One batch of fish generates enough on-going income to restock with new fingerling fish and provide a profit, giving families food to eat and an income to live. We plan to expend this very successful project to include many more families and villages near large lakes throughout Haiti. To increase the fish farm program we already have in place in Madan Belize. » Supply and train additional families with all they need to start fish farming. This costs just £750 in total per farm/family. Success will be additional families starting to farm fish along the shoreline of Lake Azuei. Seeing these families become self sufficient. This project is all about the long term. Initially our plan will strengthen the village of Medan Belieze as a model, which we will then roll out. This will then revitalise communities all over Haiti providing them with work, food, finance and hope long in to the future. This is not about a quick fix but about sustainability and enabling the beautiful people of Haiti to stand strong on their own two feet. There is of course a risk that there would be another major earthquake in Haiti. The farms we already had in Haiti during the last earthquake sustained very little damage and the project continued as planned. Donors will receive a monthly newsletter, monthly email update as well as regular Facebook and Twitter updates on this project as well as other projects we have. This project is based in Haiti. Even before the massive earthquake in Jan 2010, Haiti was already listed as least developed nation in the Americas and was politically unstable. In 2003 it was estimated that about 80% of the population was living in poverty - although that is now estimated to be much higher. Food security is poor and in recent years there have been protests and riots over the rising cost and lack of access to food. Life for the majority Haitians is a struggle for survival. We currently work with families in the village of Medan Belize, but our expansion plans will incorporate thousands of additional families living near large lakes all over Haiti. This will also effect the wider communities bring more finance into the villages, suppling fresh fish throughout Haiti and therefore having a knock on effect into the wider communities. Operation Blessing is best placed to carry out this project as we already have a very successful and sustainable model in place, We have also built relationships and gained the trust of the haitian villagers. Operation Blessing is a non-profit humanitarian organisation that has helped more than 202 million people in more than 105 countries since it began in 1978. For the past five years OB has been awarded Charity Navigator's highest 4 star rating for fiscal efficiency in the US. Director of Caribbean Harvest, who specialise in breeding and raising Tilapia fish. Abe was recently honoured by Bill Clinton for his work in Haiti.
#!/usr/bin/python """ Single-player game - Nimmt6 with two players and open cards """ import game, player, card # 10.000 round / 5 seconds with Firstplayer ROUNDS = 10000 def play(): """ Main function which defines set of games for comuter bots or single game vs human""" bots = [ player.Firstplayer(), player.Randomplayer(), player.Minsumplayer(), player.Minimizenegative() ] mainbot = player.Minimizenegative() valuation = card.get_standard_value_of_card # valuation = card.get_single_value_of_card human = True human = False if not human: for bot in bots: score = 0 for round_number in range(ROUNDS): game_instance = game.Nimmtgame(seed = round_number, valuation = valuation) game_instance.add_player("bot: MAIN", mainbot) game_instance.add_player("bot: SIDE", bot) results = game_instance.start() if results[0] < results[1]: score += 1 elif results[1] < results[0]: score -= 1 print "%s : %s = %s" % (mainbot, bot, score) else: game_instance = game.Nimmtgame(verbose = True, valuation = valuation) game_instance.add_player("bot: MAIN", mainbot) game_instance.add_player("Human", player.Humanplayer()) results = game_instance.start() print results if results[0] < results[1]: print "Player A" elif results[0] == results[1]: print "Draw" else: print "Player B" if __name__ == "__main__": play()
Imagine that you’re within 140 characters of connecting with a customer, prospect, or influencer. How can you afford not to reach out? We’re talking Twitter, of course: the 300-million strong whirlpool of information that has emerged as a personal branding, relationship-building nirvana. Twitter pros have found ways to use the platform to score business and media deals -- they've even built relationships through developing successful Twitter personal brands. Yet, too many people have joined the community simply because they know they should be there, not because they’re strategic or focused. When it comes to those that have managed to scale their following and build a reputable brand on Twitter, there's a lot we can learn. To do this, I decided to go straight to the source, interviewing some well-known names with as many as hundreds of thousands of followers. Thanks to their insights and generosity, I put together a eight-step road map for developing your personal brand on Twitter. Check it out below. Step #1: Follow the leaders. Cheryl Burgess, author and CEO of Blue Focus Marketing, said she started off as a listener on Twitter, following people she admired like Kent Huffman, Tom Peters, and David Edelman, among others. You see, the beauty of Twitter is that you don’t have to go far to discover a successful marketing playbook. The platform gives you free reign to observe how the pros do it. Similarly, Neal Schaffer, CEO of Maximize Your Social and cofounder of The Social Tools Summit, says to follow people who are sharing a lot of content and who are omnipresent on Twitter. For Schaffer, that’s folks like Jeff Bullas, Mark Schaefer, Pam Moore, Lilach Bullock, Marsha Collier, and Glen Gilmore. Over time, in addition to observing Twitter luminaries at work, start to engage them. Influencers, like anyone, appreciate praise. But don’t expect an immediate home run. If the influencer eventually follows you -- or even better mentions you -- you’ve scored a coup. If you do directly reach out, see what you can offer in return – a mention in a blog post or article for instance. Burgess said she developed a relationship with Tom Peters by following him on Twitter and also recognizing him as part of a Twitter awards program she was running. If you’re wondering what impact influencers can have, consider this: Nearly 40% of Twitter users say they made a purchase as a direct result of a tweet from an influencer. Create Twitter lists of your mentors whom you can then easily monitor. Think of it as having a front row seat to your favorite performers. “Grouping my audience into categories, I see what’s happening across the world quickly and seek opportunities to help and respond,” says Mark Schaefer. Visualize your Twitter ecosystem using a tool like Mentionmapp. “Mentionmapp helps me decide whom to follow and the conversations I need to be part of,” says Burgess. Step #2: Define your brand. Clarify the type of person you want to be on Twitter. Think of this as an opportunity to showcase your capabilities, passions, and interests. Peg Fitzpatrick, social media speaker, trainer, and author, refers to this exercise as “defining the seeds of your brand.” Fitzpatrick advises selecting two or three main topics for your brand content -- for her personal brand, she zeroes in on media, her role as author (and speaker), and marketing. Focus on three seeds, or go super-niche with one main focus. Do this and “you’ll build a solid Twitter following that will love your content,” says Fitzpatrick. Step #3: Sharpen your profile. Don’t leave your Twitter profile to chance or whim. It’s your face to the world on Twitter. While most people will find you through your content, they’ll then check out your Twitter profile. Ensure that it defines your brand. Dump the default Twitter egg and use an image that highlights your brand, advises Burgess. Step #4: Create and curate great content. Repeat. Tweet negative things and you’ll be seen as a naysayer. Tweet helpful, insightful content and you’ll grow your reputation. If there’s a common thread among those with impressive Twitter brands, it's that they all post a steady stream of valuable content. “You can’t tweet enough,” urges Schaffer. This doesn't mean that you should aim for 100 tweets a day, but if you're seeing positive engagement, keep it going. Smith says she likes to spotlight up-and-coming bloggers and experts that not many people are tweeting about. “I want to give people a leg up and not just share the same super popular blog posts others were sharing,” she says. Don’t share content without identifying the source or the author, says Schaffer. Don’t simply say via @HubSpot or @HuffingtonPost. Take the trouble to also identify the author, who will appreciate the mention. Tweet with an image whenever possible. Posts with images on social media are 40X more likely to get shared. “I tweet 100% of my tweets with images,” says Schaffer. Twitter is a two-way street: If you reach out, people are likely to engage with you. “Put aside some time to reach out and engage with the tweets of your followers as well as influencers you would like to build a relationship with,” says Melonie Dodaro, a social selling speaker and trainer and author. Don’t expect, however, that you can outsource your engagement and be effective. All of the experts I spoke to, despite having massive number of followers, handle responses themselves. Don’t automate direct messages. Dietrich’s pet peeve is the auto direct message that encourages you to buy something from the person you just followed. Step #6: Test and analyze. Twitter gives you practically instant feedback. Almost as soon as you post something, you can see how it performs. Schaffer says he aggressively uses hashtags on Twitter so he can be found and manage his content, and also so he can compare how tweets with a certain hashtag perform against other hashtags. Take time to find the right tool for measuring and analyzing. “Find the one that makes sense to you and you’re comfortable with,” says Burgess, who uses Triberr for posting, RiteTag for finding the best hashtags, as well as several analytic tools. Step #7: Outsource strategically, if at all. According to Schaffer, you don’t need to outsource any of your Twitter efforts in the beginning. But once you start to scale your followers, consider outsourcing some of the administrative work. “Outsourcing content curation is one of the first areas busy business owners ought to consider. It's highly worthwhile and ensures your Twitter presence stays active and relevant,” says Smith. "I'm the only one that replies and engages, though,” she adds, “as I never actually delegate my conversations. I also live tweet events." Schaefer says, “I do 100% of my own tweets. I feel strongly that I don’t want to disappoint anybody. I never want to be in a position where somebody is engaging with me and then they discover that it’s not really me.” Schaefer says the only thing he outsources is some of the administration on his account, like managing followers. Now that you know what to do, you need to devote time each day to just doing it. Brenner’s advice? “You have to find the time to make small investments in social every day,” he says. Find your focus by following one course until successful and stick to it, says Smith. “Publish daily tweets around your chosen focus. But don’t forget to engage, too." These Twitter brand experts have cracked the code. And so can you if you follow these seven steps. Remember the adage: Success is no accident. You have to work at it. What are your best tips for building a memorable brand presence? Share them below.
# -*- coding: utf-8 -*- from duralex.AbstractVisitor import AbstractVisitor from .AddCommitMessageVisitor import int_to_roman from . import template from . import diff from duralex.alinea_parser import * import duralex.tree as tree from bs4 import BeautifulSoup import jinja2 import os import subprocess import tempfile from distutils.dir_util import copy_tree class CreateGitBookVisitor(AbstractVisitor): def __init__(self, args): self.gitbook_dir = args.gitbook self.tmp_dir = tempfile.mkdtemp() self.formats = args.gitbook_format super(CreateGitBookVisitor, self).__init__() def write_file(self, filename, data): f = open(self.tmp_dir + '/' + filename, 'w') f.write(data.encode('utf-8')) f.close() def get_article_commit_title(self, node): ancestors = get_node_ancestors(node) messages = [] for ancestor in ancestors: if 'type' not in ancestor: continue; if ancestor['type'] == tree.TYPE_BILL_ARTICLE: messages.append('Article ' + str(ancestor['order'])) elif ancestor['type'] == tree.TYPE_AMENDMENT: messages.append('Amendement ' + str(ancestor['id'])) elif ancestor['type'] == tree.TYPE_HEADER1: messages.append(int_to_roman(ancestor['order'])) elif ancestor['type'] == tree.TYPE_HEADER2: messages.append(unicode(ancestor['order']) + u'°') elif ancestor['type'] == tree.TYPE_HEADER3: messages.append(unicode(chr(ord('a') + ancestor['order'])) + u')') return ', '.join(messages[::-1]) def get_article_commit_diff(self, edit, target_title, target_href): if 'htmlDiff' in edit: soup = BeautifulSoup(edit['htmlDiff'], "html5lib") filename_div = soup.find('div', {'class': 'diff-filename'}) a_tag = soup.new_tag('a', href=target_href) a_tag.string = target_title filename_div.string = '' filename_div.append(a_tag) return unicode(soup.body.div) elif 'diff' in edit: process = subprocess.Popen( 'diff2html -i stdin -d word -o stdout --su hidden -s line', shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE ) out, err = process.communicate(input=edit['diff'].encode('utf-8') + '\n') soup = BeautifulSoup(out, "html5lib") return (str(list(soup.find_all('style'))[0]) + '\n\n' + unicode(soup.find('div', {'id': 'diff'}))) def get_commits(self, node): edit_nodes = filter_nodes(node, lambda n: 'type' in n and n['type'] == tree.TYPE_EDIT) commits = [] for edit_node in edit_nodes: article_refs = filter_nodes(edit_node, lambda n: n['type'] == tree.TYPE_ARTICLE_REFERENCE) # FIXME: amendment that targets a bill article and not a law/code article if len(article_refs) == 0: continue article_ref = article_refs[0] target_title, target_href = self.get_deep_link(self.get_edit_target_nodes(article_ref)) commits.append({ 'title': self.get_article_commit_title(edit_node), # remove the " ({reference list})" from the commit message since its already printed # in the header above 'description': re.sub(r' \(.*\)', '', edit_node['commitMessage'].splitlines()[0]) if 'commitMessage' in edit_node else None, 'diff': self.get_article_commit_diff(edit_node, target_title, target_href), 'target': { 'title': target_title, 'link': target_href } }) return commits def get_articles(self, node): articles = [] article_nodes = filter_nodes(node, lambda n: n['type'] == tree.TYPE_BILL_ARTICLE) for article_node in article_nodes: articles.append({ 'order': article_node['order'], 'content': article_node['content'], 'commits': self.get_commits(article_node), 'githubIssue': article_node['githubIssue'] if 'githubIssue' in article_node else None, 'gitlabIssue': article_node['gitlabIssue'] if 'gitlabIssue' in article_node else None }) return articles def get_amendments(self, node): amendments = [] amendment_nodes = filter_nodes(node, lambda n: n['type'] == tree.TYPE_AMENDMENT) for amendment_node in amendment_nodes: amendments.append({ 'id': amendment_node['id'], 'content': amendment_node['content'], 'commits': self.get_commits(amendment_node), 'signatories': amendment_node['signatories'], 'description': amendment_node['description'], }) return amendments def merge_dicts(self, *dict_args): """ Given any number of dicts, shallow copy and merge into a new dict, precedence goes to key value pairs in latter dicts. """ result = {} for dictionary in dict_args: result.update(dictionary) return result def visit_node(self, node): super(CreateGitBookVisitor, self).visit_node(node) if tree.is_root(node): edits = self.build_edit_matrix(node) articles = self.get_articles(node) amendments = self.get_amendments(node) modified_texts = self.get_modified_texts(edits) template_data = { 'title': self.get_book_title(node), 'url': node['url'], 'type': node['type'], 'description': node['description'], 'modified': modified_texts, 'articles': articles, 'amendments': amendments, 'tree': node, } if 'cocoricoVote' in node: template_data['cocorico_vote'] = node['cocoricoVote'] template.template_file( 'gitbook/book.json.j2', template_data, os.path.join(self.tmp_dir, 'book.json') ) template.template_file( 'gitbook/styles/website.css.j2', template_data, os.path.join(self.tmp_dir, 'styles/website.css') ) template.template_file( 'gitbook/SUMMARY.md.j2', template_data, os.path.join(self.tmp_dir, 'SUMMARY.md') ) template.template_file( 'gitbook/README.md.j2', template_data, os.path.join(self.tmp_dir, 'README.md') ) current_article = 0 for article in articles: template.template_file( 'gitbook/article.md.j2', self.merge_dicts(template_data, {'current_article': current_article}), os.path.join(self.tmp_dir, 'article-' + str(article['order']) + '.md') ) current_article += 1 current_amendment = 0 for amendment in amendments: template.template_file( 'gitbook/amendment.md.j2', self.merge_dicts(template_data, {'current_amendment': current_amendment}), os.path.join(self.tmp_dir, 'amendment-' + str(amendment['id']) + '.md') ) current_amendment += 1 current_article = 0 current_law = 0 for modified in modified_texts: template.template_file( 'gitbook/law.md.j2', self.merge_dicts(template_data, { 'current_law': current_law, }), os.path.join(self.tmp_dir, modified['law'] + '.md') ) for article in modified['articles']: template.template_file( 'gitbook/text.md.j2', self.merge_dicts(template_data, { 'current_law': current_law, 'current_article': current_article }), os.path.join(self.tmp_dir, modified['law'] + '-' + article['id'] + '.md') ) current_article += 1 current_law += 1 if 'html' in self.formats: self.cmd('gitbook install') self.cmd('gitbook build') if 'markdown' in self.formats: copy_tree(self.tmp_dir, self.gitbook_dir) else: copy_tree(os.path.join(self.tmp_dir, '_book'), self.gitbook_dir) else: copy_tree(self.tmp_dir, self.gitbook_dir) def cmd(self, command): process = subprocess.Popen( command, cwd=self.tmp_dir, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE ) return process.communicate() def get_book_title(self, root_node): title = '' if root_node['type'] == tree.TYPE_LAW_PROJECT: title = 'Projet De Loi' elif root_node['type'] == tree.TYPE_LAW_PROPOSAL: title = 'Proposition De Loi' if 'id' in root_node: title += u' N°' + str(root_node['id']) if 'legislature' in root_node: title += ', ' + str(root_node['legislature']) + u'ème législature' return title def patch(self, original, unified_diff): fd, filename = input_file = tempfile.mkstemp() os.write(fd, original.encode('utf-8')) process = subprocess.Popen( 'patch -r - -p0 --output=- ' + filename, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE ) out, err = process.communicate(input=unified_diff.encode('utf-8') + '\n') return ''.join(out).decode('utf-8') def get_deep_link(self, nodes): href = [] title = [] for node in nodes: if node['type'] == tree.TYPE_LAW_REFERENCE: title.append(u'Loi N°' + node['id']) href.append(node['id']) elif node['type'] == tree.TYPE_BILL_ARTICLE: title.append(u'Article ' + str(node['order'])) href.append(u'article-' + str(node['order']) + '.md#article-' + str(node['order'])) elif node['type'] == tree.TYPE_AMENDMENT: title.append(u'Amendment ' + node['id']) href.append(u'amendment-' + node['id'] + '.md#amendment-' + node['id']) elif node['type'] == tree.TYPE_ARTICLE_REFERENCE: title.append(u'Article ' + node['id']) href.append(node['id'] + '.md') elif node['type'] == tree.TYPE_HEADER1: title.append(int_to_roman(node['order'])) href.append(int_to_roman(node['order'])) elif node['type'] == tree.TYPE_HEADER2: title.append(unicode(node['order']) + u'°') href.append(str(node['order']) + u'°') elif ancestor['type'] == tree.TYPE_HEADER3: title.append(unicode(chr(ord('a') + ancestor['order'])) + u')') href.append(unicode(chr(ord('a') + ancestor['order'])) + u')') return (', '.join(title), '-'.join(href)) def get_edit_target_nodes(self, node): nodes = [] if tree.is_reference(node): nodes.append(node) nodes += filter( lambda n: tree.is_reference(n), get_node_ancestors(node) ) return sorted( nodes, key=lambda n: tree.TYPE_REFERENCE.index(n['type']) ) def get_edit_source_nodes(self, node): edit_source_types = [ tree.TYPE_AMENDMENT, tree.TYPE_BILL_ARTICLE, tree.TYPE_HEADER1, tree.TYPE_HEADER2, tree.TYPE_HEADER3, ] return sorted( filter( lambda n: 'type' in n and n['type'] in edit_source_types, get_node_ancestors(node) ), key=lambda n: edit_source_types.index(n['type']) ) def get_original_content(self, ref): if ref['type'] == tree.TYPE_BILL_ARTICLE_REFERENCE: bill_article = tree.filter_nodes( tree.get_root(ref), lambda n: n['type'] == tree.TYPE_BILL_ARTICLE and n['order'] == ref['order'] ) if len(bill_article) == 1: return bill_article[0]['content'] elif ref['type'] == tree.TYPE_ARTICLE_REFERENCE: f = open(ref['filename'], 'r') text = f.read().decode('utf-8') f.close() return text def get_modified_texts(self, edits): modified = [] edits = edits[tree.TYPE_BILL_ARTICLE] law_ids = set([i[0] for i in edits.keys()]) for law_id in law_ids: law_edits = {k: v for k, v in edits.iteritems() if k[0] == law_id} articles = [] for k, v in edits.iteritems(): law_ref = filter_nodes(v[0][-1], lambda n: n['type'] in [tree.TYPE_LAW_REFERENCE, tree.TYPE_CODE_REFERENCE] and n['id'] == k[0])[0] article_ref = filter_nodes(law_ref, lambda n: n['type'] == tree.TYPE_ARTICLE_REFERENCE and n['id'] == k[1])[0] original_text = self.get_original_content(article_ref) text = original_text commits = [] for edit_source in v: title, href = self.get_deep_link(edit_source) commits.append({'title': title, 'link': href}) edit_refs = filter_nodes(edit_source[-1], lambda n: n['type'] == tree.TYPE_EDIT) for edit_ref in edit_refs: if 'diff' in edit_ref: text = self.patch(text, edit_ref['diff']) article = { 'id': k[1], 'diff': diff.make_html_rich_diff(original_text, text), 'commits': commits } if 'gitlabHistory' in article_ref: article['gitlabHistory'] = article_ref['gitlabHistory'] if 'githubHistory' in article_ref: article['githubHistory'] = article_ref['githubHistory'] articles.append(article) articles = sorted(articles, key=lambda x: x['id'].replace('-', ' ')) modified.append({'law': law_id, 'articles': articles}) return modified def build_edit_matrix(self, node): edits = { tree.TYPE_BILL_ARTICLE: {}, tree.TYPE_AMENDMENT: {}, } # fetch bill articles targeting law articles self.build_edit_matrix_for_types( node, edits[tree.TYPE_BILL_ARTICLE], [tree.TYPE_BILL_ARTICLE], [tree.TYPE_ARTICLE_REFERENCE], [tree.TYPE_LAW_REFERENCE, tree.TYPE_CODE_REFERENCE] ) self.build_edit_matrix_for_types( node, edits[tree.TYPE_AMENDMENT], [tree.TYPE_AMENDMENT], [tree.TYPE_ARTICLE_REFERENCE], [tree.TYPE_LAW_REFERENCE, tree.TYPE_CODE_REFERENCE] ) # fetch amendments targeting bill articles # self.build_edit_matrix_for_types( # node, # edits, # [tree.TYPE_AMENDMENT], # [tree.TYPE_BILL_ARTICLE_REFERENCE], # None # ) return edits def build_edit_matrix_for_types(self, node, edits, source_type, target_type, repo_types): article_refs = [] sources = filter_nodes( node, lambda n: 'type' in n and n['type'] in source_type ) for source in sources: article_refs += filter_nodes( source, lambda n: 'type' in n and n['type'] in target_type ) for article_ref in article_refs: repo_refs = filter( lambda n: 'type' in n and n['type'] in repo_types, get_node_ancestors(article_ref) ) if len(repo_refs) != 0: key = (repo_refs[0]['id'], article_ref['id']) if key not in edits: edits[key] = [] edits[key].append(self.get_edit_source_nodes(article_ref))
Pastor Scott Lyons received his BS Degree in Early Childhood Education from and Bloomsburg University and a Master of Ministry from Kentucky Christian College. He taught fifth grade for the Pocono Mountain School District before becoming the Associate Minister at Sweet Valley Church of Christ in 1998. He and his family then moved to minister with Cross View Christian Church in Waynesville, Ohio. Scott enjoys visiting the ill and homebound and can occasionally be found substitute preaching. He also has a passion to assist people in finding their place to perform kingdom work through volunteering.
"""Review Board feature checkers.""" from __future__ import unicode_literals from django.conf import settings from djblets.features.checkers import SiteConfigFeatureChecker class RBFeatureChecker(SiteConfigFeatureChecker): """Feature checker that checks against a LocalSite's configuration. Features can be enabled/disabled on a per-LocalSite basis by setting the specified feature ID to either ``True`` or ``False`` in the ``enabled_features`` key in that LocalSite's :py:attr:`~reviewboard.sites.models.LocalSite.extra_data`` field. If the key is absent, this checker will check against the site configuration (and then the Django settings) to see if it is enabled or disabled globally. """ EXTRA_DATA_KEY = SiteConfigFeatureChecker.siteconfig_key def is_feature_enabled(self, feature_id, **kwargs): """Return whether a feature is enabled for a given ID. Features are strictly additive. That is, if a feature is enabled globally (e.g., via :py:class:`~djblets.siteconfig.models.SiteConfiguration` or via :file:`settings_local.py`), disabling it for a :py:class:`~reviewboard.site.models.LocalSite` will still result in the feature being available (i.e., this function will return ``True``). Args: feature_id (unicode): The unique identifier of the feature whose status is to be determined. **kwargs (dict): Additional keyword arguments. Keyword Args: request (django.http.HttpRequest, optional): An optional request. If this request is made against a LocalSite, that LocalSite will be used to look up the feature. Either this argument or ``local_site`` must be provided to enable checking against a LocalSite. If provided, it will be used to cache the results of the :py:class:`~reviewboard.site.models.LocalSite` lookup. local_site (reviewboard.site.models.LocalSite, optional): An optional local site. If provided, this LocalSite will be used to look up the status of the requested feature. Either this argument or ``request`` must be provided to enable checking against a LocalSite. force_check_user_local_sites (bool, optional): Force checking the Local Sites that the user is a member of. This is only used for unit tests, and disables some optimizations intended to stabilize query counts. Returns: bool: Whether or not the feature is enabled. """ local_site = kwargs.get('local_site') request = kwargs.get('request') force_check_user_local_sites = \ kwargs.get('force_check_user_local_sites', False) local_sites = [] if local_site: local_sites.append(local_site) elif request is not None: try: local_sites = request._user_local_sites_cache except AttributeError: if getattr(request, 'local_site', None): local_sites.append(request.local_site) # Note that if we're running unit tests, we don't really want # to bother checking other Local Site associations. They're not # going to come into play unless we're testing this logic # itself, and the generated number of queries becomes too # unpredictable whenever we introduce new features that aren't # enabled by default. if (request.user.is_authenticated() and (force_check_user_local_sites or not getattr(settings, 'RUNNING_TEST', False))): local_sites.extend(request.user.local_site.all()) request._user_local_sites_cache = local_sites for local_site in local_sites: if (local_site.extra_data and local_site.extra_data.get(self.EXTRA_DATA_KEY, {}).get(feature_id)): return True return super(RBFeatureChecker, self).is_feature_enabled(feature_id, **kwargs)
"I have found and drilled this Lake Michigan beachstone, hand strung it onto a quality waxed cotton cord and topped it with a rosy peach recycled glass Krobo artisan bead, all depicting the colors from this beautiful natural granite beachstone. This casual unisex necklace can be worn long as is - perhaps for layering with others, or you may snip and re-knot the cord to your liking and is 48 inches" Total prize value is $90!! Hop on board the Great Blog Train & travel with us to "Campbell River, British Columbia CANADA"!!
""" Markov takes out his Snakes and Ladders game, stares at the board and wonders: "If I can always roll the die to whatever number I want, what would be the least number of rolls to reach the destination?" Rules The game is played with a cubic die of faces numbered to . Starting from square , land on square with the exact roll of the die. If moving the number rolled would place the player beyond square , no move is made. If a player lands at the base of a ladder, the player must climb the ladder. Ladders go up only. If a player lands at the mouth of a snake, the player must go down the snake and come out through the tail. Snakes go down only. Function Description Complete the quickestWayUp function in the editor below. It should return an integer that represents the minimum number of moves required. quickestWayUp has the following parameter(s): ladders: a 2D integer array where each contains the start and end cell numbers of a ladder snakes: a 2D integer array where each contains the start and end cell numbers of a snake Input Format The first line contains the number of tests, . For each testcase: - The first line contains , the number of ladders. - Each of the next lines contains two space-separated integers, the start and end of a ladder. - The next line contains the integer , the number of snakes. - Each of the next lines contains two space-separated integers, the start and end of a snake. Constraints The board is always with squares numbered to . Neither square nor square will be the starting point of a ladder or snake. A square will have at most one endpoint from either a snake or a ladder. Output Format For each of the t test cases, print the least number of rolls to move from start to finish on a separate line. If there is no solution, print -1. Sample Input 2 3 32 62 42 68 12 98 7 95 13 97 25 93 37 79 27 75 19 49 47 67 17 4 8 52 6 80 26 42 2 72 9 51 19 39 11 37 29 81 3 59 5 79 23 53 7 43 33 77 21 Sample Output 3 5 """ #!/bin/python3 import math import os import random import re import sys from collections import defaultdict class Graph: def __init__(self): self.neighbours=defaultdict(list) def add_edge(self,u,v,dist): if dist >= 0: self.neighbours[u].append([v, dist]) else: self.neighbours[u] = [[v, 0]] def add_node(self, a): self.nodes[a] = [] def shortest_path(self): queue = [] visited = {} queue.append([0, 0]) while queue: index, rolls = queue.pop(0) if index in visited: continue visited[index] = rolls if index == 99: break for neighbour in self.neighbours[index]: if neighbour[0] not in visited: queue.append([neighbour[0], rolls + neighbour[1]]) if 99 in visited: return visited[99] else: return -1 # Complete the quickestWayUp function below. def quickestWayUp(ladders, snakes): g = Graph() for i in range(99): for j in range(1, 7): g.add_edge(i, i + j, 1) for ladder in ladders: g.add_edge(ladder[0]-1, ladder[1]-1, 0) for snake in snakes: g.add_edge(snake[0]-1, snake[1]-1, 0) return g.shortest_path() if __name__ == '__main__': fptr = sys.stdout t = int(input()) for t_itr in range(t): n = int(input()) ladders = [] for _ in range(n): ladders.append(list(map(int, input().rstrip().split()))) m = int(input()) snakes = [] for _ in range(m): snakes.append(list(map(int, input().rstrip().split()))) result = quickestWayUp(ladders, snakes) fptr.write(str(result) + '\n') fptr.close()
The Candidates Report can be viewed on Illinois Lawyer Now. View biographies of candidates for third vice-president and the board of governors. Ballots were distributed Wednesday, March 27, 2019. The deadline for voting is Tuesday, April 30, 2019 at 4:30 p.m. Central Time for all voting methods: e-ballot, paper or internet. All members of the Association (except non-lawyer members) with dues paid by March 1, 2019 are eligible to vote. What race(s) will be on my ballot? Eligible members with ISBA addresses in the areas in which there are contested elections may vote in the appropriate Board of Governors and Assembly race. Pursuant to the Policy and Procedures on Association Elections, when a vacancy is uncontested (candidates filing equaled or were less than the number of candidates to be elected) each candidate is automatically elected. What kind of ballot will I receive? What if I don't want to vote using the ballot I receive? Members receiving e-ballots may request a paper ballot as explained in the e-ballot email. The deadline to request a paper ballot is April 15, 2019. Members receiving paper ballots may vote via internet following instructions provided on the paper ballot.
if __name__ == '__main__': print("The upgrade script has changed. You need to execute the upgrade command again to update the data structure.") exit(0) import json import os import shutil from common import file def change_time_fomart(list_item): import time system_info = json.loads(file.read_file("./config/system.json")) if "time" in list_item and isinstance(list_item["time"], str): list_item["time"] = time.mktime(time.strptime(list_item["time"], system_info["Time_Format"])) return list_item def main(): if not os.path.exists("./backup"): os.mkdir("./backup") shutil.copytree("./config", "./backup/config") shutil.copytree("./document", "./backup/document") if os.path.exists("./templates/static/user_file"): shutil.copytree("./templates/static/user_file", "./backup/static/user_file") write_json = json.loads(file.read_file("./config/page.json")) write_json = list(map(change_time_fomart, write_json)) file.write_file("./config/page.json", file.json_format_dump(write_json)) for filename in os.listdir("./document/"): if filename.endswith(".json"): write_json = json.loads(file.read_file("./document/" + filename)) write_json = change_time_fomart(write_json) file.write_file("./document/" + filename, json_format_dump(write_json))
“Tradition, comradery, friendship and relatives” keep the Verboort Sausage & Sauerkraut Festival thriving after 82 years in the small Dutch community in Verboort, Oregon as described by Lois Verboort. The community of 500 works diligently under Giant Sequoia redwood trees to produce a festival attracting a sausage loving crowd of 10,000. Preparation for the sausage festival takes months. The sauerkraut alone takes six weeks to ferment in fifty gallon buckets. Longtime resident, Albert Vanderzanden marvels at the simple recipe “Its cabbage and salt!” he exclaims.Nearly all of the food served at the Verboort Dinner is local and handmade. Apples are plucked from nearby orchards and turned into applesauce. Sausage is ground and smoked on the parish grounds with a family recipe passed down through the generations, much pride is taken from the renowned original recipe.This tradition bonds the community together. Sr. Clare Vandecovering explains, “The children get so excited…that they get to have a part… everyone is very proud that they get a part.” She fondly remembers simpler times from days past and counts herself lucky to have been able to participate in both eras. More than just sausage, guests come to participate or volunteer for the event due to the friendly and welcoming lifestyle of those who reside in Verboort.Across the street from church grounds, a herd of cows chomp on lush green grass and pleasantly moo under the unscathed sky. With many local farms surrounding, teamwork and a neighbor’s helping hand is often needed for a projects’ completion. This friendly attitude from neighbors has helped mold the open and hardworking community that has become present day Verboort. The number of farms has severely decreased in the past one hundred years but the attitude and spirit has remained.The Catholic elementary school is at the heart of Verboort andmost of the event proceeds support the operations of the school. Passionate parishioners of the church work diligently to ensure the stability of both operations and core values of the school.It takes not only family, but the whole community who must ensure the future generations of Verboort. Sausage is not at the root of what makes this community, but it does allow the community an annual celebration of heritage. Longtime resident, Florence Crop notes that “If you are happy where you are, it is home” and she can’t think of any other place that she would rather live than under the Giant Sequoia redwood trees.
""" picture.py Author: Ethan Adner Credit: Hex color codes Assignment: Use the ggame library to "paint" a graphical picture of something (e.g. a house, a face or landscape). Use at least: 1. Three different Color objects. 2. Ten different Sprite objects. 3. One (or more) RectangleAsset objects. 4. One (or more) CircleAsset objects. 5. One (or more) EllipseAsset objects. 6. One (or more) PolygonAsset objects. See: https://github.com/HHS-IntroProgramming/Standards-and-Syllabus/wiki/Displaying-Graphics for general information on how to use ggame. See: http://brythonserver.github.io/ggame/ for detailed information on ggame. """ from ggame import App, Color, LineStyle, Sprite, RectangleAsset, CircleAsset, EllipseAsset, PolygonAsset # add your code here \/ \/ \/ red = Color(0xff0000, 1.0) green = Color(0x00ff00, 1.0) blue = Color(0x0000ff, 1.0) black = Color(0x000000, 1.0) orange = Color(0xff7400, 1.0) beige = Color(0xffffd8, 1.0) white = Color(0xffffff, 1.0) noline = LineStyle(2 , black) nlu = LineStyle(5 , blue) thickline = LineStyle(5 , red) thickline2 = LineStyle(5, orange) circle = CircleAsset(10, noline, beige) poly = PolygonAsset([(20,20), (30,40), (50,160), (20,100)], thickline, red) portal1 = EllipseAsset(40, 10, nlu, white) rectum = RectangleAsset(40, 60, thickline2, orange) portal2 = EllipseAsset(40, 10, thickline2, white) rectum2 = RectangleAsset(40, 30, thickline2, orange) legs = RectangleAsset(5, 30, thickline2, orange) #arm1 = #arm2 = arm1 = Sprite(legs, (130, 480)) arm2 = Sprite(legs, (30, 480)) arm1.rotation=-1 arm2.rotation=1 #arm1.roation=.5 Sprite(circle, (80, 478)) Sprite(poly, (90, 530)) Sprite(portal1, (80, 150)) Sprite(portal2, (80, 550)) Sprite(rectum, (60, 490)) Sprite(rectum2, (60, 150)) Sprite(legs, (60, 180)) Sprite(legs, (95, 180)) # add your code here /\ /\ /\ myapp = App() myapp.run()
Entries are now open for the next QLD event in the Maximum Adventure Race Series, at Enoggera, just 30 mins north of Brisbane CBD. If you missed out on a spot in our recent Sunshine Coast race in March, or if you loved it so much that you’re keen to do another, now’s your chance to have a go without waiting a whole year for the next one! This central location offers everything an adventure racer could hope for and is close enough for teams in the Brisbane area or those travelling up from the Gold Coast or down from the Sunshine Coast. With a great mix of mountain biking, trail running and kayaking as well as a Novice (beginner) and Classic course option, all you need is a teammate, a mountain bike and a sense of adventure! With kayaks provided by us and the option to hire a mountain bike if you’re in need, you really have no excuse not to sign up and experience the excitement that is adventure racing, first hand.
# Unofficial companion web-app for Elite: Dangerous (property of Frontier # Developments). Collector-Drone lets you manage blueprints and material # inventory for crafting engineer upgrades. # Copyright (C) 2016 Frederik Schumacher # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. class ServiceError(Exception): status_code = 400 def __init__(self, message, status_code=None, **payload): super(ServiceError, self).__init__() self.message = message if status_code is not None: self.status_code = status_code self.payload = payload def to_dict(self): rv = dict(self.payload or ()) rv['message'] = self.message return rv
I’m sharing this video of my wife’s cat to make you laugh with me. This cat is so funny! The legend is like the following. I left a few packets on the bed and the cat came into the room and saw them. She likes to know everything and therefore she started to investigate them. The first packet was the pink one. She dug in it and was not able to get our from it ))) I helped her and thought this will be the end of her investigations but.. She started again and I took a video of that.
import wx import os, sys class MyFrame(wx.Frame): def __init__(self, parent, id, title): wx.Frame.__init__(self, parent, id, title) panel = wx.Panel(self,-1) wx.StaticText(panel, -1, 'See Logs for results\ncaptures/key_found.txt', (45, 25), style=wx.ALIGN_CENTRE) self.CreateStatusBar() menuBar = wx.MenuBar() menu = wx.Menu() menu.Append(99, "&WPA2 Cracker", "Crack WPA/WPA2 handshakes") #~ menu.Append(100, "&NTLM Cracker", "Crack NTLM Hashes") #~ menu.Append(101, "&File Dialog", "Shows a File Dialog") #~ menu.Append(102, "&Page Setup Dialog", "Shows a Page Setup Dialog") #~ menu.Append(103, "&Font Dialog", "Shows a Font Dialog") #~ menu.Append(104, "&Directory Dialog", "Shows a Directory Dialog") #~ menu.Append(105, "&SingleChoice Dialog", "Shows a SingleChoice Dialog") #~ menu.Append(106, "&TextEntry Dialog", "Shows a TextEntry Dialog") menuBar.Append(menu, "&Cracker") self.SetMenuBar(menuBar) self.Bind(wx.EVT_MENU, self.openfile, id=99) #dlg.Destroy() #~ def results_output(self, e): #~ with open('captures/key_found.txt') as f: #~ for i in f: #~ #~ if i == "": #~ #~ wx.StaticText(panel, -1, "Key is not found: ", (45, 25), style=wx.ALIGN_CENTRE) #~ self.Centre() #~ else: #~ wx.StaticText(panel, -1, "Key is found: " + str(i), (45, 25), style=wx.ALIGN_CENTRE) #~ self.Centre() #~ self.Bind(wx.EVT_MENU, self.choosecolor, id=100) #~ self.Bind(wx.EVT_MENU, self.openfile, id=101) #~ self.Bind(wx.EVT_MENU, self.pagesetup, id=102) #~ self.Bind(wx.EVT_MENU, self.choosefont, id=103) #~ self.Bind(wx.EVT_MENU, self.opendir, id=104) #~ self.Bind(wx.EVT_MENU, self.singlechoice, id=105) #~ self.Bind(wx.EVT_MENU, self.textentry, id=106) #~ def message(self, event): #~ dlg = wx.MessageDialog(self, 'To save one life is as if you have saved the world.', 'Talmud', wx.OK|wx.ICON_INFORMATION) #~ dlg.ShowModal() #~ dlg.Destroy() #~ def choosecolor(self, event): #~ dlg = wx.ColourDialog(self) #~ dlg.GetColourData().SetChooseFull(True) #~ if dlg.ShowModal() == wx.ID_OK: #~ data = dlg.GetColourData() #~ self.SetStatusText('You selected: %s\n' % str(data.GetColour().Get())) #~ dlg.Destroy() def openfile(self, event): dlg = wx.FileDialog(self, "Choose a file", os.getcwd(), "", "*.*", wx.OPEN) if dlg.ShowModal() == wx.ID_OK: path = dlg.GetPath() mypath = os.path.basename(path) self.SetStatusText("You selected: %s" % mypath) os.system("gnome-terminal -x aircrack-ng -w "+str(path)+" captures/*.cap -l captures/key_found.txt") dlg.Destroy() #~ def pagesetup(self, event): #~ dlg = wx.PageSetupDialog(self) #~ if dlg.ShowModal() == wx.ID_OK: #~ data = dlg.GetPageSetupData() #~ tl = data.GetMarginTopLeft() #~ br = data.GetMarginBottomRight() #~ self.SetStatusText('Margins are: %s %s' % (str(tl), str(br))) #~ dlg.Destroy() #~ def choosefont(self, event): #~ default_font = wx.Font(10, wx.SWISS , wx.NORMAL, wx.NORMAL, False, "Verdana") #~ data = wx.FontData() #~ if sys.platform == 'win32': #~ data.EnableEffects(True) #~ data.SetAllowSymbols(False) #~ data.SetInitialFont(default_font) #~ data.SetRange(10, 30) #~ dlg = wx.FontDialog(self, data) #~ if dlg.ShowModal() == wx.ID_OK: #~ data = dlg.GetFontData() #~ font = data.GetChosenFont() #~ color = data.GetColour() #~ text = 'Face: %s, Size: %d, Color: %s' % (font.GetFaceName(), font.GetPointSize(), color.Get()) #~ self.SetStatusText(text) #~ dlg.Destroy() #~ #~ def opendir(self, event): #~ dlg = wx.DirDialog(self, "Choose a directory:", style=wx.DD_DEFAULT_STYLE | wx.DD_NEW_DIR_BUTTON) #~ if dlg.ShowModal() == wx.ID_OK: #~ self.SetStatusText('You selected: %s\n' % dlg.GetPath()) #~ dlg.Destroy() #~ #~ def singlechoice(self, event): #~ sins = ['Greed', 'Lust', 'Gluttony', 'Pride', 'Sloth', 'Envy', 'Wrath'] #~ dlg = wx.SingleChoiceDialog(self, 'Seven deadly sins', 'Which one?', sins, wx.CHOICEDLG_STYLE) #~ if dlg.ShowModal() == wx.ID_OK: #~ self.SetStatusText('You chose: %s\n' % dlg.GetStringSelection()) #~ dlg.Destroy() #~ #~ def textentry(self, event): #~ dlg = wx.TextEntryDialog(self, 'Enter some text','Text Entry') #~ dlg.SetValue("Default") #~ if dlg.ShowModal() == wx.ID_OK: #~ self.SetStatusText('You entered: %s\n' % dlg.GetValue()) #~ dlg.Destroy() class MyApp(wx.App): def OnInit(self): myframe = MyFrame(None, -1, "Cracker") myframe.CenterOnScreen() myframe.Show(True) return True #~
The oil is sold to customers in Mutare and surrounding areas. This is Mabvazuwa Sunflower Oil. Africa's best homegrown cooking oil. It benefits the Zimbabwe farmers who grow the seeds and the Zimbabwe factory workers who process it into oil. cooking oil crushing machines in zimbabwe How to Process Oilseed on a Small Scale Most countries in the world have large refineries producing cooking oil from This . Sunflower Oil Press Machine Drying Tower Drying tower in edible oil prtreatment Peanut Oil Refining MThe Introduction Of Soybean Oil Refining Process Soybean oil refining uses continuous oil refining methods in general.
#/usr/bin/env python import codecs import os from setuptools import setup, find_packages read = lambda filepath: codecs.open(filepath, 'r', 'utf-8').read() def exec_file(filepath, globalz=None, localz=None): exec(read(filepath), globalz, localz) # Load package meta from the pkgmeta module without loading the package. pkgmeta = {} exec_file(os.path.join(os.path.dirname(__file__), 'ecstatic', 'pkgmeta.py'), pkgmeta) setup( name=pkgmeta['__title__'], version=pkgmeta['__version__'], description='An expansion pack for django.contrib.staticfiles!', long_description=read(os.path.join(os.path.dirname(__file__), 'README.rst')), author=pkgmeta['__author__'], author_email='m@tthewwithanm.com', url='http://github.com/hzdg/django-ecstatic', download_url='http://github.com/hzdg/django-ecstatic/tarball/master', packages=find_packages(), zip_safe=False, include_package_data=True, tests_require=[ ], install_requires=[ 'Django>=1.4', 'django-appconf>=0.5', ], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 2.5', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Topic :: Utilities' ], )
Do it now… Be discovered. yourself to be amongst the best. Sign up now… Be discovered. Cast your singing power to the ultimate level. Be confident, be bold, be found and sing like the biggest star of your generation. U.S. International Discovery Band & Artist provides support to all aspiring artists who dare to dream, and have the hunger to become the next big star of their generation. We want all Best Gospel singers, Best Reggae music, Best Calypso music, Best Soca music, Best African music to cast their singing power to the ultimate level. Come conquer the stage with your beautiful voice. Be confident, be bold, be found, and sing like the biggest star of your generation. Remember the decision you make today has the ultimate power to shape your destiny. Sign up now to be discovered! U.S. International Discovery Band & Artist is a support group aiming to raise, sponsor, and encourage all talents to rise up to their dreams of super-stardom. Calling all RNB artists, Best Haitian Kompa Zouk music, Best Merengue music, Best Salsa music artists, soloists, bands, country singers, and rock virtuosos to roll and jam with us. The agency holds regular auditions to look for various talents with a natural inclination to perform and lead on stage. All languages are welcome. Sign up now to be discovered!
from rx import Observable from rx.internal.utils import adapt_call from rx.internal import extensionmethod import collections def _flat_map(source, selector): def projection(x, i): selector_result = selector(x, i) if isinstance(selector_result, collections.Iterable): result = Observable.from_(selector_result) else: result = Observable.from_future(selector_result) return result return source.map(projection).merge_observable() @extensionmethod(Observable, alias="flat_map") def select_many(self, selector, result_selector=None): """One of the Following: Projects each element of an observable sequence to an observable sequence and merges the resulting observable sequences into one observable sequence. 1 - source.select_many(lambda x: Observable.range(0, x)) Or: Projects each element of an observable sequence to an observable sequence, invokes the result selector for the source element and each of the corresponding inner sequence's elements, and merges the results into one observable sequence. 1 - source.select_many(lambda x: Observable.range(0, x), lambda x, y: x + y) Or: Projects each element of the source observable sequence to the other observable sequence and merges the resulting observable sequences into one observable sequence. 1 - source.select_many(Observable.from_([1,2,3])) Keyword arguments: selector -- A transform function to apply to each element or an observable sequence to project each element from the source sequence onto. result_selector -- [Optional] A transform function to apply to each element of the intermediate sequence. Returns an observable sequence whose elements are the result of invoking the one-to-many transform function collectionSelector on each element of the input sequence and then mapping each of those sequence elements and their corresponding source element to a result element. """ if result_selector: def projection(x, i): selector_result = selector(x, i) if isinstance(selector_result, collections.Iterable): result = Observable.from_(selector_result) else: result = Observable.from_future(selector_result) return result.map(lambda y: result_selector(x, y, i)) return self.flat_map(projection) if callable(selector): selector = adapt_call(selector) ret = _flat_map(self, selector) else: ret = _flat_map(self, lambda _,__: selector) return ret
The team at Homewatch CareGivers® is aware that it may take longer for someone living with ALS to eat a meal. In the early stages of the illness, a person may need more time to prepare foods as coordination skills decrease. There is the risk of aspirating (breathing in) food into the lungs, and this can possibly lead to pneumonia in someone diagnosed with ALS. This is one reason that someone living with ALS may opt for a feeding tube to continue meeting daily nutritional requirements. It is possible to continue eating solid foods after getting a feeding tube since it is still possible to enjoy the taste of foods. A health care provider can advise on the optimum time for an individual with ALS to consider having a feeding tube placed. Contact us today for more tips on eating with ALS, as well as information about our caregiving services.
# -*- coding: utf8 -*- # scraping import urllib2 from goose import Goose from bs4 import BeautifulSoup # db import shelve # time/timeout import signal from datetime import * from contextlib import contextmanager from time import time # others import itertools import sys from pprint import pprint from collections import OrderedDict """ articles_dict key: date (yymmdd) i.e. 20070701 value: dict with key: val -> url: (title, text) """ """ ------------- Generic Scraping ---------------""" # html def get_html(url): """"given a url returns html""" try: html = urllib2.urlopen(url).read() return html except urllib2.HTTPError, e: print "URL broke: %s" % url return None # tags def find_tags(html, tag_name, class_name=False, a_tag=False): """"find tags using beautifulsoup, options: use a class name, get anchor tags""" soup = BeautifulSoup(html) # get tag with class if specified if class_name: tags = soup.findAll(tag_name, { "class" : class_name }) else: tags = soup.findAll(tag_name) # get anchor tag if specified if a_tag: tags = [link.find("a")["href"] for link in tags] return tags # article def get_article(url): """get article title and text using goose""" g = Goose() article = g.extract(url=url) title = article.title text = article.cleaned_text return (title, text) class TimeoutException(Exception): pass def timeout(fun, limit, *args ): @contextmanager def time_limit(seconds): def signal_handler(signum, frame): raise TimeoutException, "Timed out!" signal.signal(signal.SIGALRM, signal_handler) signal.alarm(seconds) try: yield finally: signal.alarm(0) try: with time_limit(limit): return fun(*args) except TimeoutException, msg: print "Function timed out\n" return ("", "") """ ----------------- Helper ------------------""" def dates_in_interval(start_date, end_date): """ Returns list of calender dates in interval""" diff = end_date - start_date dates = [ start_date + timedelta(i) for i in range(diff.days + 1) ] return dates # for date in dates_in_range(date(2014,6,15), date(2014,7,15)): print date def store_num_articles(start_date, end_date): dates = dates_in_range(start_date, end_date) num_dates = len(dates) total_articles = 0 temp_dict = dict() # 1 main_dict = shelve.open("../Data/num_articles") dates_stored = [date for date in main_dict] main_dict.close() for i in range(num_dates): date = str(dates[i]) if date in dates_stored: print "Date: %s in dict" % date continue try: articles_list = timeout(article_links_on_date, 5, date) if isinstance(articles_list, list): num_articles = len(articles_list) total_articles += num_articles temp_dict[str(date)] = num_articles print "Date: %s, Num articles: %s" % ( date, num_articles ) except: "\nFailed to get articles list on date %s\n" % date # write to dict if i%20 == 0: main_dict = shelve.open("../Data/num_articles") main_dict.update(temp_dict) main_dict.close() temp_dict = dict() print "\nSuccessfully updated dict, date: %s\n" % ( date ) print "\nTotal articles: %s" % total_articles def print_num_articles(): d = shelve.open("../Data/num_articles") total_articles = 0 missing_dates = [] calender_dates = dates_in_range( date(2007,1,1), date(2014,7,26) ) ordered_dict = OrderedDict((datetime.strftime(datetime.strptime(k , '%Y-%m-%d'), '%Y-%m-%d'), v) for k, v in sorted(d.iteritems())) # print ordered dates in dict for my_date, num_articles in ordered_dict.items(): total_articles += num_articles print "Date: %s, Num articles: %s" % ( my_date, num_articles ) print "\nNum dates on calender: %d" % len(calender_dates) print "Num dates stored: %d" % len(ordered_dict) print "Total articles: %d" % total_articles # print and get missing dates print "\nMissing dates:" for my_date in calender_dates: if str(my_date) not in d: print my_date d[str(my_date)] = len(article_links_on_date(my_date)) d.close() # print_num_articles() """ --------------- Scraper Class ---------------""" class ArticleScraper(): def __init__(self, date, print_details=True): self.date = date self.date_str = str(date) self.path_to_data = "../Data/Articles/" self.reuters_article_links = [] # total articles on reuters self.corrupted_keys = [] # failed to read key from db self.pre_stored_links = [] # already stored in db and title not empty self.stored_links = [] # stored in current process self.crashed_links = [] # DB or Goose crashed while extracting self.empty_links = [] # Goose returned w/ empty title self.empty_db_links = [] self.print_details = print_details def get_article_links(self): """ :return: List of article urls for a given date """ reuters_date_format = self.date_str.replace("-","") url = "http://www.reuters.com/resources/archive/us/%s.html" % reuters_date_format html = get_html(url) # all links includes articles + video all_links = find_tags(html, 'div', 'headlineMed', a_tag=True) # remove video links self.reuters_article_links = [link for link in all_links if 'video' not in str(link)] return self.reuters_article_links def get_pre_stored_links(self, details=False): """ :return: List of stored articles for a given date """ main_db = shelve.open(self.path_to_data + self.date_str, 'r') for link in main_db: try: title, text = main_db[link] if title and text: self.log_link(link, "prestored-log", title, details) else: self.log_link(link, "empty-db", title) except: self.log_link(link, "corrupted-key") main_db.close() return self.pre_stored_links def store_article(self, link, temp_dict): """ :param temp_dict: temp dict to update main db :return: Store and log article """ try: title, text = timeout(get_article, 5, link) except: self.log_link(link, "crashed") return if title: temp_dict[link] = ( title, text ) self.log_link(link, "stored", title) else: self.log_link(link, "empty") def log_link(self, link, status, title="", details=True): """ :return: Store links in resp dict and print if asked """ if self.print_details and details: print "Status: %s, %s, %s" % (status, link, title) if status == "crashed": self.crashed_links.append(link) elif status == "empty": self.empty_links.append(link) elif status == "stored": self.stored_links.append(link) elif status == "prestored-log": self.pre_stored_links.append(link) elif status == "pprestored-nolog": pass elif status == "corrupted-key": self.corrupted_keys.append(link) elif status == "empty-db": self.empty_db_links.append(link) def update_main_db(self, temp_dict): """ :return: Update main db with temp dict to prevent corruption of db """ main_db = shelve.open(self.path_to_data + self.date_str, 'c') main_db.update(temp_dict) main_db.close() def print_read_results(self): """ :return: Print results after reading db """ if self.print_details: print "\n\nCorrupted keys:" for link in self.corrupted_keys: print link print "\n\nEmpty db links:" for link in self.empty_db_links: print link print "\nReuter's: %d" % len(self.get_article_links()) print "Pre-stored: %d" % len(self.pre_stored_links) print "Empty: %d" % len(self.empty_db_links) print "Corrupted keys: %d" % len(self.corrupted_keys) def print_store_results(self): """ :return: Print results after updating db """ if self.print_details: print "\nEmpty articles:" for link in self.empty_links: print link print "\nCrashed articles:" for link in self.crashed_links: print link print "\nReuter's: %d" % len(self.reuters_article_links) print "Stored: %d" % len(self.stored_links) print "Crashed: %d" % len(self.crashed_links) print "Empty: %d" % len(self.empty_links) def test_link(self, link): title, text = get_article(link) print title print text def run_read(self): """ :return: Print articles in db """ print "\n\nDate: %s" % self.date_str self.get_pre_stored_links(details=True) self.print_read_results() def run_store(self): """ :return: Update main db with temp dict to prevent corruption of db """ print "Date: %s" % self.date_str start_time = time() temp_dict = dict() article_links = self.get_article_links() num_articles = len(article_links) pre_stored_articles = self.get_pre_stored_links() # store, log and update main db for i in range(num_articles): link = article_links[i] # check if already stored if link in pre_stored_articles: self.log_link(link, "prestored-nolog") continue # store and log self.store_article(link, temp_dict) # open and update main db, clear temp dict if i%20 == 0: self.update_main_db(temp_dict) if self.print_details: print "\nSuccessfully updated dict, i: %d, num links: %d\n" % ( i, num_articles) # print results self.print_store_results() print "Time taken: %s sec" % str(time() - start_time) """ ------------- Main ---------------""" for i in range(3,4): my_date = date(2014,7,i) scraper = ArticleScraper(my_date, False) scraper.run_read()
Two "Bioengineer Your Impact" participants are close to finishing their prosthetic hand prototype. What is a bioengineer? What do bioengineers do? This is what 20 high school students from around the state hoped to find out at “Bioengineer Your Impact" on Saturday, October 22, 2016. Hosted by BMES, the (Biomedical Engineering Society), a student organization for Bioengineering (BIOE) undergrads, it was the organization’s first large outreach for high school students and was designed to pique the young visitors' interest in Bioengineering. During the day, highschoolers interacted with current BIOE students as student panels shared with the younger students about their experiences in research and as interns in industry. Plus, a variety of presentations and activities helped visitors discover the breadth of bioengineering career opportunities available to them in academia, industry, and the medical fields. A panel of Bioengineering students share about their experiences as interns in industry. During the event, visitors got lots of chances to interact with Illinois BIOE students. For instance, they heard from a couple of student panels; one comprised of both grad students and undergrads discussed some BIOE research applications. Another panel of BIOE undergrads shared about their experiences as interns in industry. Plus, to further help students understand research opportunities at Illinois, visitors toured several labs in the Institute for Genomic Biology. A high school participant makes the fingers of Aktar's prosthetic hand move. During the afternoon Design Challenge, participants teamed up to design prosthetic devices, then create prototypes using string, tape, cardboard tubes, plastic coils, etc. BMES Outreach Director, Katherine Kiang, and "Bioengineer Your Impact" co-chair, Jennifer Zupancic. According to the event co-chair, Jennifer Zupancic, a BIOE junior specializing in therapeutics, one goal of the outreach was to inform visitors about the breadth of careers available in Bioengineering. A high schooler has some fun during the design challenge. A high school student works on his team's prosthetic device during the design challenge. Like Zupancic, Kiang’s goal for the event was to show students how diverse of a career path each could have as a Bioengineering student. BIOE junior Jackie Chen shares with the high school students during the student panel on internships in industry. Two participants work on a hands-on activity. In the past, I have helped with several large outreach events to educate high school students as well as younger students about engineering in general through SWE. The events provided students with an opportunity to learn more about the various engineering majors from students currently studying them, and I wish I had had a similar opportunity in high school. Due to my interest specifically in bioengineering, I think it is important for high school students to be able to learn more about the field when deciding on a major. Shannon Tripp discuss the materials she and her teammates should use to create a prosthetic hand during the afternoon design challenge. And this was exactly why Shannon Tripp, a sophomore at Carl Sandburg High School in Orland Park, a Chicago suburb, participated in “Bioengineering Your Impact.” She is specifically exploring careers in Bioengineering and wanted to find out more about it. “Last year in biology, we learned about bioengineering, and I thought it was supercool!” she acknowledges. Tripp also particularly enjoyed Aktar's prosthetic hand presentation: "Yea, I thought that was phenomenal. It was amazing." Bianca Rubel (right), and a teammate create a prosthetic hand during the afternoon's design challenge. Another participant, Bianca Rubel, an eighth grader at University Laboratory High School in Urbana, came to the event because she enjoys events about engineering and science. "I love going to all of these engineering events. I've gone to a ton, and I always have a lot of fun." Does she intend to go into engineering? "I don't know, honestly," she reports. "I have no idea what I'm going to do, but engineering's definitely a possibility." BIOE student Michael Qian (right) works with a team of high school students creating their design project.
# -*- coding: utf-8 -*- import json def user_parser(file_path): with open(file_path, 'r') as f: raw_data = json.load(f) data = dict() data['username'] = raw_data['profile']['user']['username'] if 'socialStats' in raw_data['profile']['user']: data['followers'] = raw_data['profile']['user']['socialStats']['usersFollowedByCount'] data['following'] = raw_data['profile']['user']['socialStats']['usersFollowedCount'] else: data['followers'] = len(raw_data['followers']) data['following'] = len(raw_data['following']) data['lastPostCreatedAt'] = raw_data['profile']['user']['lastPostCreatedAt'] data['createdAt'] = raw_data['profile']['user']['createdAt'] data['postsInMonthlyTop100'] = raw_data['profile']['postsInMonthlyTop100'] if 'twitterScreenName' not in raw_data['profile']['user'] or raw_data['profile']['user']['twitterScreenName'] == '': data['twitter'] = 0 else: data['twitter'] = 1 if 'facebookAccountId' not in raw_data['profile']['user'] or raw_data['profile']['user']['facebookAccountId'] == '': data['facebook'] = 0 else: data['facebook'] = 1 if raw_data['profile']['user']['bio'] == '': data['bio'] = 0 else: data['bio'] = 1 data['posts'] = len(raw_data['latest']) data['highlights'] = len(raw_data['highlights']) data['responses'] = len(raw_data['responses']) data['recommends'] = len(raw_data['recommends']) data['authorTags'] = len(raw_data['profile']['authorTags']) data['collections'] = len(raw_data['profile']['collections']) data['topAuthorTags'] = len(raw_data['profile']['topAuthorTags']) data['interestTags'] = len(raw_data['profile']['interestTags']) return data def twitter_parser(file_path): data = dict() data['twitter_followers'] = '' data['twitter_friends'] = '' data['twitter_listed'] = '' data['twitter_statuses'] = '' data['twitter_favourites'] = '' data['twitter_description'] = '' if file_path == '': return data with open(file_path, 'r') as f: raw_data = json.load(f) if 'profile_user' in raw_data: raw_data = raw_data['profile_user'] else: return data data['twitter_followers'] = raw_data['followers_count'] data['twitter_friends'] = raw_data['friends_count'] data['twitter_listed'] = raw_data['listed_count'] data['twitter_statuses'] = raw_data['statuses_count'] data['twitter_favourites'] = raw_data['favourites_count'] if raw_data['description'] == '': data['twitter_description'] = 0 else: data['twitter_description'] = 1 return data
"What are your carpet cleaning prices?" Is the most frequent question the staff at Win-Win Cleaning Services get. Want to find a carpet cleaning company that offers fair carpet cleaning prices in Greenview? You just did! Carpeting is one of the most affordable floor coverings available. Carpeted surfaces in a home provide comfort and warmth. Families with young children like the security that a soft floor can give to the unsteady legs of toddlers. But carpets do need to be maintained to retain their appearance and functionality of serving as your home's biggest air filter. Win-Win Cleaning Services provides low cost carpet cleaning to Greenview homes and small businesses. To extend the life of your carpets and keep them fresh, you’ll want to have them professionally cleaned twice a year. Win-Win Cleaning Services is carpet cleaning contractor that has been serving residential and business clients since 2006. When you need to refresh your indoor spaces, call us at (530) 208-5320 to schedule a carpet cleaning appointment. The old adage, “you get what you pay for” has never been more true than in the carpet cleaning industry. Your carpet’s purchase price is reflected in the thickness and density of its fibers as well as the quality of the padding and installation. The value of a quality carpet cleaning services in Greenview can be just as noticeable. Win-Win Cleaning Services uses commercial-grade carpet extraction equipment for all Greenview carpet cleaning projects. Our trained technicians clean carpets using appropriate solutions and the proper amounts of water. Those ultra low-cost carpet cleaners have to make their money somehow. Often they use questionable methods, products and equipment that can damage your flooring and cost you more in the long term. Call (530) 208-5320 to get a fair price on cleaning your Greenview carpeting today. Win-Win Cleaning Services uses sound carpet cleaning methods that professionally deep clean Greenview's residential and commercial carpeting. Hot water extraction involves the use of hot water and safe cleaning solutions that reach deep into carpets to lift out ground in dirt. Win-Win Cleaning Services's carpet cleaning professionals in Greenview use powerful suction vacuums to quickly capture the dirt and excess water from carpets. After a steam cleaning is complete, your carpets will need just a few hours to dry. Win-Win Cleaning Services offers affordable carpet cleaning prices in Greenview . Ask about our carpet maintenance program and save even more! Call Win-Win Cleaning Services at (530) 208-5320 for a free assessment of your carpet cleaning needs. Room size, cleaning method, amount of ground in dirt, stain removal requirements and carpet type are the main factors that impact carpet cleaning prices for Greenview California homeowners. If you have old, tough-to-remove stains in your carpets, you can expect to see an increase in service price. An expensive wool carpet will cost more to clean than one that is made of synthetic materials. Your licensed and insured carpet cleaning contractor at Win-Win Cleaning Services will provide a fair price quote that includes a quality guarantee. Ready for fresh carpets? Call us at (530) 208-5320 for a free quote.
# Copyright (c) 2010 Cloud.com, Inc # Copyright 2012 Cloudbase Solutions Srl # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Management class for basic VM operations. """ import contextlib import functools import os import time from eventlet import timeout as etimeout from os_win import constants as os_win_const from os_win import exceptions as os_win_exc from os_win import utilsfactory from oslo_concurrency import processutils from oslo_log import log as logging from oslo_service import loopingcall from oslo_utils import excutils from oslo_utils import fileutils from oslo_utils import units from oslo_utils import uuidutils from nova.api.metadata import base as instance_metadata from nova.compute import vm_states import nova.conf from nova import exception from nova.i18n import _ from nova import objects from nova.objects import fields from nova import version from nova.virt import configdrive from nova.virt import hardware from nova.virt.hyperv import block_device_manager from nova.virt.hyperv import constants from nova.virt.hyperv import imagecache from nova.virt.hyperv import pathutils from nova.virt.hyperv import serialconsoleops from nova.virt.hyperv import vif as vif_utils from nova.virt.hyperv import volumeops LOG = logging.getLogger(__name__) CONF = nova.conf.CONF SHUTDOWN_TIME_INCREMENT = 5 REBOOT_TYPE_SOFT = 'SOFT' REBOOT_TYPE_HARD = 'HARD' VM_GENERATIONS = { constants.IMAGE_PROP_VM_GEN_1: constants.VM_GEN_1, constants.IMAGE_PROP_VM_GEN_2: constants.VM_GEN_2 } VM_GENERATIONS_CONTROLLER_TYPES = { constants.VM_GEN_1: constants.CTRL_TYPE_IDE, constants.VM_GEN_2: constants.CTRL_TYPE_SCSI } def check_admin_permissions(function): @functools.wraps(function) def wrapper(self, *args, **kwds): # Make sure the windows account has the required admin permissions. self._vmutils.check_admin_permissions() return function(self, *args, **kwds) return wrapper class VMOps(object): # The console log is stored in two files, each should have at most half of # the maximum console log size. _MAX_CONSOLE_LOG_FILE_SIZE = units.Mi / 2 _ROOT_DISK_CTRL_ADDR = 0 def __init__(self, virtapi=None): self._virtapi = virtapi self._vmutils = utilsfactory.get_vmutils() self._metricsutils = utilsfactory.get_metricsutils() self._vhdutils = utilsfactory.get_vhdutils() self._hostutils = utilsfactory.get_hostutils() self._migrutils = utilsfactory.get_migrationutils() self._pathutils = pathutils.PathUtils() self._volumeops = volumeops.VolumeOps() self._imagecache = imagecache.ImageCache() self._serial_console_ops = serialconsoleops.SerialConsoleOps() self._block_dev_man = ( block_device_manager.BlockDeviceInfoManager()) self._vif_driver = vif_utils.HyperVVIFDriver() def list_instance_uuids(self): instance_uuids = [] for (instance_name, notes) in self._vmutils.list_instance_notes(): if notes and uuidutils.is_uuid_like(notes[0]): instance_uuids.append(str(notes[0])) else: LOG.debug("Notes not found or not resembling a GUID for " "instance: %s", instance_name) return instance_uuids def list_instances(self): return self._vmutils.list_instances() def get_info(self, instance): """Get information about the VM.""" LOG.debug("get_info called for instance", instance=instance) instance_name = instance.name if not self._vmutils.vm_exists(instance_name): raise exception.InstanceNotFound(instance_id=instance.uuid) info = self._vmutils.get_vm_summary_info(instance_name) state = constants.HYPERV_POWER_STATE[info['EnabledState']] return hardware.InstanceInfo(state=state) def _create_root_device(self, context, instance, root_disk_info, vm_gen): path = None if root_disk_info['type'] == constants.DISK: path = self._create_root_vhd(context, instance) self.check_vm_image_type(instance.uuid, vm_gen, path) root_disk_info['path'] = path def _create_root_vhd(self, context, instance, rescue_image_id=None): is_rescue_vhd = rescue_image_id is not None base_vhd_path = self._imagecache.get_cached_image(context, instance, rescue_image_id) base_vhd_info = self._vhdutils.get_vhd_info(base_vhd_path) base_vhd_size = base_vhd_info['VirtualSize'] format_ext = base_vhd_path.split('.')[-1] root_vhd_path = self._pathutils.get_root_vhd_path(instance.name, format_ext, is_rescue_vhd) root_vhd_size = instance.flavor.root_gb * units.Gi try: if CONF.use_cow_images: LOG.debug("Creating differencing VHD. Parent: " "%(base_vhd_path)s, Target: %(root_vhd_path)s", {'base_vhd_path': base_vhd_path, 'root_vhd_path': root_vhd_path}, instance=instance) self._vhdutils.create_differencing_vhd(root_vhd_path, base_vhd_path) vhd_type = self._vhdutils.get_vhd_format(base_vhd_path) if vhd_type == constants.DISK_FORMAT_VHD: # The base image has already been resized. As differencing # vhdx images support it, the root image will be resized # instead if needed. return root_vhd_path else: LOG.debug("Copying VHD image %(base_vhd_path)s to target: " "%(root_vhd_path)s", {'base_vhd_path': base_vhd_path, 'root_vhd_path': root_vhd_path}, instance=instance) self._pathutils.copyfile(base_vhd_path, root_vhd_path) root_vhd_internal_size = ( self._vhdutils.get_internal_vhd_size_by_file_size( base_vhd_path, root_vhd_size)) if not is_rescue_vhd and self._is_resize_needed( root_vhd_path, base_vhd_size, root_vhd_internal_size, instance): self._vhdutils.resize_vhd(root_vhd_path, root_vhd_internal_size, is_file_max_size=False) except Exception: with excutils.save_and_reraise_exception(): if self._pathutils.exists(root_vhd_path): self._pathutils.remove(root_vhd_path) return root_vhd_path def _is_resize_needed(self, vhd_path, old_size, new_size, instance): if new_size < old_size: raise exception.FlavorDiskSmallerThanImage( flavor_size=new_size, image_size=old_size) elif new_size > old_size: LOG.debug("Resizing VHD %(vhd_path)s to new " "size %(new_size)s", {'new_size': new_size, 'vhd_path': vhd_path}, instance=instance) return True return False def _create_ephemerals(self, instance, ephemerals): for index, eph in enumerate(ephemerals): eph['format'] = self._vhdutils.get_best_supported_vhd_format() eph_name = "eph%s" % index eph['path'] = self._pathutils.get_ephemeral_vhd_path( instance.name, eph['format'], eph_name) self.create_ephemeral_disk(instance.name, eph) def create_ephemeral_disk(self, instance_name, eph_info): self._vhdutils.create_dynamic_vhd(eph_info['path'], eph_info['size'] * units.Gi) @staticmethod def _get_vif_metadata(context, instance_id): vifs = objects.VirtualInterfaceList.get_by_instance_uuid(context, instance_id) vif_metadata = [] for vif in vifs: if 'tag' in vif and vif.tag: device = objects.NetworkInterfaceMetadata( mac=vif.address, bus=objects.PCIDeviceBus(), tags=[vif.tag]) vif_metadata.append(device) return vif_metadata def _save_device_metadata(self, context, instance, block_device_info): """Builds a metadata object for instance devices, that maps the user provided tag to the hypervisor assigned device address. """ metadata = [] metadata.extend(self._get_vif_metadata(context, instance.uuid)) if block_device_info: metadata.extend(self._block_dev_man.get_bdm_metadata( context, instance, block_device_info)) if metadata: instance.device_metadata = objects.InstanceDeviceMetadata( devices=metadata) def set_boot_order(self, instance_name, vm_gen, block_device_info): boot_order = self._block_dev_man.get_boot_order( vm_gen, block_device_info) LOG.debug("Setting boot order for instance: %(instance_name)s: " "%(boot_order)s", {'instance_name': instance_name, 'boot_order': boot_order}) self._vmutils.set_boot_order(instance_name, boot_order) @check_admin_permissions def spawn(self, context, instance, image_meta, injected_files, admin_password, network_info, block_device_info=None): """Create a new VM and start it.""" LOG.info("Spawning new instance", instance=instance) instance_name = instance.name if self._vmutils.vm_exists(instance_name): raise exception.InstanceExists(name=instance_name) # Make sure we're starting with a clean slate. self._delete_disk_files(instance_name) vm_gen = self.get_image_vm_generation(instance.uuid, image_meta) self._block_dev_man.validate_and_update_bdi( instance, image_meta, vm_gen, block_device_info) root_device = block_device_info['root_disk'] self._create_root_device(context, instance, root_device, vm_gen) self._create_ephemerals(instance, block_device_info['ephemerals']) try: with self.wait_vif_plug_events(instance, network_info): # waiting will occur after the instance is created. self.create_instance(instance, network_info, root_device, block_device_info, vm_gen, image_meta) # This is supported starting from OVS version 2.5 self.plug_vifs(instance, network_info) self._save_device_metadata(context, instance, block_device_info) if configdrive.required_by(instance): configdrive_path = self._create_config_drive(context, instance, injected_files, admin_password, network_info) self.attach_config_drive(instance, configdrive_path, vm_gen) self.set_boot_order(instance.name, vm_gen, block_device_info) # vifs are already plugged in at this point. We waited on the vif # plug event previously when we created the instance. Skip the # plug vifs during power on in this case self.power_on(instance, network_info=network_info, should_plug_vifs=False) except Exception: with excutils.save_and_reraise_exception(): self.destroy(instance, network_info, block_device_info) @contextlib.contextmanager def wait_vif_plug_events(self, instance, network_info): timeout = CONF.vif_plugging_timeout try: # NOTE(claudiub): async calls to bind the neutron ports will be # done when network_info is being accessed. events = self._get_neutron_events(network_info) with self._virtapi.wait_for_instance_event( instance, events, deadline=timeout, error_callback=self._neutron_failed_callback): yield except etimeout.Timeout: # We never heard from Neutron LOG.warning('Timeout waiting for vif plugging callback for ' 'instance.', instance=instance) if CONF.vif_plugging_is_fatal: raise exception.VirtualInterfaceCreateException() except exception.PortBindingFailed: LOG.warning( "Neutron failed to bind a port to this host. Make sure that " "an L2 agent is alive and registered from this node (neutron " "Open vSwitch agent or Hyper-V agent), or make sure that " "neutron is configured with a mechanism driver that is able " "to bind ports to this host (OVN). If you are using neutron " "Hyper-V agent, make sure that networking-hyperv is installed " "on the neutron controller, and that the neutron-server was " "configured to use the 'hyperv' mechanism_driver.") raise def _neutron_failed_callback(self, event_name, instance): LOG.error('Neutron Reported failure on event %s', event_name, instance=instance) if CONF.vif_plugging_is_fatal: raise exception.VirtualInterfaceCreateException() def _get_neutron_events(self, network_info): # NOTE(danms): We need to collect any VIFs that are currently # down that we expect a down->up event for. Anything that is # already up will not undergo that transition, and for # anything that might be stale (cache-wise) assume it's # already up so we don't block on it. if CONF.vif_plugging_timeout: return [('network-vif-plugged', vif['id']) for vif in network_info if vif.get('active') is False] return [] def create_instance(self, instance, network_info, root_device, block_device_info, vm_gen, image_meta): instance_name = instance.name instance_path = os.path.join(CONF.instances_path, instance_name) secure_boot_enabled = self._requires_secure_boot(instance, image_meta, vm_gen) memory_per_numa_node, cpus_per_numa_node = ( self._get_instance_vnuma_config(instance, image_meta)) if memory_per_numa_node: LOG.debug("Instance requires vNUMA topology. Host's NUMA spanning " "has to be disabled in order for the instance to " "benefit from it.", instance=instance) if CONF.hyperv.dynamic_memory_ratio > 1.0: LOG.warning( "Instance vNUMA topology requested, but dynamic memory " "ratio is higher than 1.0 in nova.conf. Ignoring dynamic " "memory ratio option.", instance=instance) dynamic_memory_ratio = 1.0 vnuma_enabled = True else: dynamic_memory_ratio = CONF.hyperv.dynamic_memory_ratio vnuma_enabled = False if instance.pci_requests.requests: # NOTE(claudiub): if the instance requires PCI devices, its # host shutdown action MUST be shutdown. host_shutdown_action = os_win_const.HOST_SHUTDOWN_ACTION_SHUTDOWN else: host_shutdown_action = None self._vmutils.create_vm(instance_name, vnuma_enabled, vm_gen, instance_path, [instance.uuid]) self._vmutils.update_vm(instance_name, instance.flavor.memory_mb, memory_per_numa_node, instance.flavor.vcpus, cpus_per_numa_node, CONF.hyperv.limit_cpu_features, dynamic_memory_ratio, host_shutdown_action=host_shutdown_action, chassis_asset_tag=version.product_string()) self._configure_remotefx(instance, vm_gen) self._vmutils.create_scsi_controller(instance_name) self._attach_root_device(instance_name, root_device) self._attach_ephemerals(instance_name, block_device_info['ephemerals']) self._volumeops.attach_volumes( block_device_info['block_device_mapping'], instance_name) # For the moment, we use COM port 1 when getting the serial console # log as well as interactive sessions. In the future, the way in which # we consume instance serial ports may become configurable. # # Note that Hyper-V instances will always have 2 COM ports serial_ports = { constants.DEFAULT_SERIAL_CONSOLE_PORT: constants.SERIAL_PORT_TYPE_RW} self._create_vm_com_port_pipes(instance, serial_ports) for vif in network_info: LOG.debug('Creating nic for instance', instance=instance) self._vmutils.create_nic(instance_name, vif['id'], vif['address']) if CONF.hyperv.enable_instance_metrics_collection: self._metricsutils.enable_vm_metrics_collection(instance_name) self._set_instance_disk_qos_specs(instance) if secure_boot_enabled: certificate_required = self._requires_certificate(image_meta) self._vmutils.enable_secure_boot( instance.name, msft_ca_required=certificate_required) self._attach_pci_devices(instance) def _attach_pci_devices(self, instance): for pci_request in instance.pci_requests.requests: spec = pci_request.spec[0] for counter in range(pci_request.count): self._vmutils.add_pci_device(instance.name, spec['vendor_id'], spec['product_id']) def _get_instance_vnuma_config(self, instance, image_meta): """Returns the appropriate NUMA configuration for Hyper-V instances, given the desired instance NUMA topology. :param instance: instance containing the flavor and it's extra_specs, where the NUMA topology is defined. :param image_meta: image's metadata, containing properties related to the instance's NUMA topology. :returns: memory amount and number of vCPUs per NUMA node or (None, None), if instance NUMA topology was not requested. :raises exception.InstanceUnacceptable: If the given instance NUMA topology is not possible on Hyper-V, or if CPU pinning is required. """ instance_topology = hardware.numa_get_constraints(instance.flavor, image_meta) if not instance_topology: # instance NUMA topology was not requested. return None, None memory_per_numa_node = instance_topology.cells[0].memory cpus_per_numa_node = len(instance_topology.cells[0].cpuset) # TODO(stephenfin): We can avoid this check entirely if we rely on the # 'supports_pcpus' driver capability (via a trait), but we need to drop # support for the legacy 'vcpu_pin_set' path in the libvirt driver # first if instance_topology.cpu_policy not in ( None, fields.CPUAllocationPolicy.SHARED, ): raise exception.InstanceUnacceptable( reason=_("Hyper-V does not support CPU pinning."), instance_id=instance.uuid) # validate that the requested NUMA topology is not asymetric. # e.g.: it should be like: (X cpus, X cpus, Y cpus), where X == Y. # same with memory. for cell in instance_topology.cells: if len(cell.cpuset) != cpus_per_numa_node: reason = _("Hyper-V does not support NUMA topologies with " "uneven number of processors. (%(a)s != %(b)s)") % { 'a': len(cell.cpuset), 'b': cpus_per_numa_node} raise exception.InstanceUnacceptable(reason=reason, instance_id=instance.uuid) if cell.memory != memory_per_numa_node: reason = _("Hyper-V does not support NUMA topologies with " "uneven amounts of memory. (%(a)s != %(b)s)") % { 'a': cell.memory, 'b': memory_per_numa_node} raise exception.InstanceUnacceptable(reason=reason, instance_id=instance.uuid) return memory_per_numa_node, cpus_per_numa_node def _configure_remotefx(self, instance, vm_gen): extra_specs = instance.flavor.extra_specs remotefx_max_resolution = extra_specs.get( constants.FLAVOR_ESPEC_REMOTEFX_RES) if not remotefx_max_resolution: # RemoteFX not required. return if not CONF.hyperv.enable_remotefx: raise exception.InstanceUnacceptable( _("enable_remotefx configuration option needs to be set to " "True in order to use RemoteFX.")) if not self._hostutils.check_server_feature( self._hostutils.FEATURE_RDS_VIRTUALIZATION): raise exception.InstanceUnacceptable( _("The RDS-Virtualization feature must be installed in order " "to use RemoteFX.")) if not self._vmutils.vm_gen_supports_remotefx(vm_gen): raise exception.InstanceUnacceptable( _("RemoteFX is not supported on generation %s virtual " "machines on this version of Windows.") % vm_gen) instance_name = instance.name LOG.debug('Configuring RemoteFX for instance: %s', instance_name) remotefx_monitor_count = int(extra_specs.get( constants.FLAVOR_ESPEC_REMOTEFX_MONITORS) or 1) remotefx_vram = extra_specs.get( constants.FLAVOR_ESPEC_REMOTEFX_VRAM) vram_bytes = int(remotefx_vram) * units.Mi if remotefx_vram else None self._vmutils.enable_remotefx_video_adapter( instance_name, remotefx_monitor_count, remotefx_max_resolution, vram_bytes) def _attach_root_device(self, instance_name, root_dev_info): if root_dev_info['type'] == constants.VOLUME: self._volumeops.attach_volume(root_dev_info['connection_info'], instance_name, disk_bus=root_dev_info['disk_bus']) else: self._attach_drive(instance_name, root_dev_info['path'], root_dev_info['drive_addr'], root_dev_info['ctrl_disk_addr'], root_dev_info['disk_bus'], root_dev_info['type']) def _attach_ephemerals(self, instance_name, ephemerals): for eph in ephemerals: # if an ephemeral doesn't have a path, it might have been removed # during resize. if eph.get('path'): self._attach_drive( instance_name, eph['path'], eph['drive_addr'], eph['ctrl_disk_addr'], eph['disk_bus'], constants.BDI_DEVICE_TYPE_TO_DRIVE_TYPE[ eph['device_type']]) def _attach_drive(self, instance_name, path, drive_addr, ctrl_disk_addr, controller_type, drive_type=constants.DISK): if controller_type == constants.CTRL_TYPE_SCSI: self._vmutils.attach_scsi_drive(instance_name, path, drive_type) else: self._vmutils.attach_ide_drive(instance_name, path, drive_addr, ctrl_disk_addr, drive_type) def get_image_vm_generation(self, instance_id, image_meta): default_vm_gen = self._hostutils.get_default_vm_generation() image_prop_vm = image_meta.properties.get('hw_machine_type', default_vm_gen) if image_prop_vm not in self._hostutils.get_supported_vm_types(): reason = _('Requested VM Generation %s is not supported on ' 'this OS.') % image_prop_vm raise exception.InstanceUnacceptable(instance_id=instance_id, reason=reason) return VM_GENERATIONS[image_prop_vm] def check_vm_image_type(self, instance_id, vm_gen, root_vhd_path): if (vm_gen != constants.VM_GEN_1 and root_vhd_path and self._vhdutils.get_vhd_format( root_vhd_path) == constants.DISK_FORMAT_VHD): reason = _('Requested VM Generation %s, but provided VHD ' 'instead of VHDX.') % vm_gen raise exception.InstanceUnacceptable(instance_id=instance_id, reason=reason) def _requires_certificate(self, image_meta): os_type = image_meta.properties.get('os_type') if os_type == fields.OSType.WINDOWS: return False return True def _requires_secure_boot(self, instance, image_meta, vm_gen): """Checks whether the given instance requires Secure Boot. Secure Boot feature will be enabled by setting the "os_secure_boot" image property or the "os:secure_boot" flavor extra spec to required. :raises exception.InstanceUnacceptable: if the given image_meta has no os_type property set, or if the image property value and the flavor extra spec value are conflicting, or if Secure Boot is required, but the instance's VM generation is 1. """ img_secure_boot = image_meta.properties.get('os_secure_boot') flavor_secure_boot = instance.flavor.extra_specs.get( constants.FLAVOR_SPEC_SECURE_BOOT) requires_sb = False conflicting_values = False if flavor_secure_boot == fields.SecureBoot.REQUIRED: requires_sb = True if img_secure_boot == fields.SecureBoot.DISABLED: conflicting_values = True elif img_secure_boot == fields.SecureBoot.REQUIRED: requires_sb = True if flavor_secure_boot == fields.SecureBoot.DISABLED: conflicting_values = True if conflicting_values: reason = _( "Conflicting image metadata property and flavor extra_specs " "values: os_secure_boot (%(image_secure_boot)s) / " "os:secure_boot (%(flavor_secure_boot)s)") % { 'image_secure_boot': img_secure_boot, 'flavor_secure_boot': flavor_secure_boot} raise exception.InstanceUnacceptable(instance_id=instance.uuid, reason=reason) if requires_sb: if vm_gen != constants.VM_GEN_2: reason = _('Secure boot requires generation 2 VM.') raise exception.InstanceUnacceptable(instance_id=instance.uuid, reason=reason) os_type = image_meta.properties.get('os_type') if not os_type: reason = _('For secure boot, os_type must be specified in ' 'image properties.') raise exception.InstanceUnacceptable(instance_id=instance.uuid, reason=reason) return requires_sb def _create_config_drive(self, context, instance, injected_files, admin_password, network_info, rescue=False): if CONF.config_drive_format != 'iso9660': raise exception.ConfigDriveUnsupportedFormat( format=CONF.config_drive_format) LOG.info('Using config drive for instance', instance=instance) extra_md = {} if admin_password and CONF.hyperv.config_drive_inject_password: extra_md['admin_pass'] = admin_password inst_md = instance_metadata.InstanceMetadata( instance, content=injected_files, extra_md=extra_md, network_info=network_info) configdrive_path_iso = self._pathutils.get_configdrive_path( instance.name, constants.DVD_FORMAT, rescue=rescue) LOG.info('Creating config drive at %(path)s', {'path': configdrive_path_iso}, instance=instance) with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb: try: cdb.make_drive(configdrive_path_iso) except processutils.ProcessExecutionError as e: with excutils.save_and_reraise_exception(): LOG.error('Creating config drive failed with ' 'error: %s', e, instance=instance) if not CONF.hyperv.config_drive_cdrom: configdrive_path = self._pathutils.get_configdrive_path( instance.name, constants.DISK_FORMAT_VHD, rescue=rescue) processutils.execute(CONF.hyperv.qemu_img_cmd, 'convert', '-f', 'raw', '-O', 'vpc', configdrive_path_iso, configdrive_path, attempts=1) self._pathutils.remove(configdrive_path_iso) else: configdrive_path = configdrive_path_iso return configdrive_path def attach_config_drive(self, instance, configdrive_path, vm_gen): configdrive_ext = configdrive_path[(configdrive_path.rfind('.') + 1):] # Do the attach here and if there is a certain file format that isn't # supported in constants.DISK_FORMAT_MAP then bomb out. try: drive_type = constants.DISK_FORMAT_MAP[configdrive_ext] controller_type = VM_GENERATIONS_CONTROLLER_TYPES[vm_gen] self._attach_drive(instance.name, configdrive_path, 1, 0, controller_type, drive_type) except KeyError: raise exception.InvalidDiskFormat(disk_format=configdrive_ext) def _detach_config_drive(self, instance_name, rescue=False, delete=False): configdrive_path = self._pathutils.lookup_configdrive_path( instance_name, rescue=rescue) if configdrive_path: self._vmutils.detach_vm_disk(instance_name, configdrive_path, is_physical=False) if delete: self._pathutils.remove(configdrive_path) @serialconsoleops.instance_synchronized def _delete_disk_files(self, instance_name): # We want to avoid the situation in which serial console workers # are started while we perform this operation, preventing us from # deleting the instance log files (bug #1556189). This can happen # due to delayed instance lifecycle events. # # The unsynchronized method is being used to avoid a deadlock. self._serial_console_ops.stop_console_handler_unsync(instance_name) self._pathutils.get_instance_dir(instance_name, create_dir=False, remove_dir=True) def destroy(self, instance, network_info, block_device_info, destroy_disks=True): instance_name = instance.name LOG.info("Got request to destroy instance", instance=instance) try: if self._vmutils.vm_exists(instance_name): # Stop the VM first. self._vmutils.stop_vm_jobs(instance_name) self.power_off(instance) self._vmutils.destroy_vm(instance_name) elif self._migrutils.planned_vm_exists(instance_name): self._migrutils.destroy_existing_planned_vm(instance_name) else: LOG.debug("Instance not found", instance=instance) # NOTE(claudiub): The vifs should be unplugged and the volumes # should be disconnected even if the VM doesn't exist anymore, # so they are not leaked. self.unplug_vifs(instance, network_info) self._volumeops.disconnect_volumes(block_device_info) if destroy_disks: self._delete_disk_files(instance_name) except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Failed to destroy instance: %s', instance_name) def reboot(self, instance, network_info, reboot_type): """Reboot the specified instance.""" LOG.debug("Rebooting instance", instance=instance) if reboot_type == REBOOT_TYPE_SOFT: if self._soft_shutdown(instance): self.power_on(instance, network_info=network_info) return self._set_vm_state(instance, os_win_const.HYPERV_VM_STATE_REBOOT) def _soft_shutdown(self, instance, timeout=CONF.hyperv.wait_soft_reboot_seconds, retry_interval=SHUTDOWN_TIME_INCREMENT): """Perform a soft shutdown on the VM. :return: True if the instance was shutdown within time limit, False otherwise. """ LOG.debug("Performing Soft shutdown on instance", instance=instance) while timeout > 0: # Perform a soft shutdown on the instance. # Wait maximum timeout for the instance to be shutdown. # If it was not shutdown, retry until it succeeds or a maximum of # time waited is equal to timeout. wait_time = min(retry_interval, timeout) try: LOG.debug("Soft shutdown instance, timeout remaining: %d", timeout, instance=instance) self._vmutils.soft_shutdown_vm(instance.name) if self._wait_for_power_off(instance.name, wait_time): LOG.info("Soft shutdown succeeded.", instance=instance) return True except os_win_exc.HyperVException as e: # Exception is raised when trying to shutdown the instance # while it is still booting. LOG.debug("Soft shutdown failed: %s", e, instance=instance) time.sleep(wait_time) timeout -= retry_interval LOG.warning("Timed out while waiting for soft shutdown.", instance=instance) return False def pause(self, instance): """Pause VM instance.""" LOG.debug("Pause instance", instance=instance) self._set_vm_state(instance, os_win_const.HYPERV_VM_STATE_PAUSED) def unpause(self, instance): """Unpause paused VM instance.""" LOG.debug("Unpause instance", instance=instance) self._set_vm_state(instance, os_win_const.HYPERV_VM_STATE_ENABLED) def suspend(self, instance): """Suspend the specified instance.""" LOG.debug("Suspend instance", instance=instance) self._set_vm_state(instance, os_win_const.HYPERV_VM_STATE_SUSPENDED) def resume(self, instance): """Resume the suspended VM instance.""" LOG.debug("Resume instance", instance=instance) self._set_vm_state(instance, os_win_const.HYPERV_VM_STATE_ENABLED) def power_off(self, instance, timeout=0, retry_interval=0): """Power off the specified instance.""" LOG.debug("Power off instance", instance=instance) # We must make sure that the console log workers are stopped, # otherwise we won't be able to delete or move the VM log files. self._serial_console_ops.stop_console_handler(instance.name) if retry_interval <= 0: retry_interval = SHUTDOWN_TIME_INCREMENT try: if timeout and self._soft_shutdown(instance, timeout, retry_interval): return self._set_vm_state(instance, os_win_const.HYPERV_VM_STATE_DISABLED) except os_win_exc.HyperVVMNotFoundException: # The manager can call the stop API after receiving instance # power off events. If this is triggered when the instance # is being deleted, it might attempt to power off an unexisting # instance. We'll just pass in this case. LOG.debug("Instance not found. Skipping power off", instance=instance) def power_on(self, instance, block_device_info=None, network_info=None, should_plug_vifs=True): """Power on the specified instance.""" LOG.debug("Power on instance", instance=instance) if block_device_info: self._volumeops.fix_instance_volume_disk_paths(instance.name, block_device_info) if should_plug_vifs: self.plug_vifs(instance, network_info) self._set_vm_state(instance, os_win_const.HYPERV_VM_STATE_ENABLED) def _set_vm_state(self, instance, req_state): instance_name = instance.name try: self._vmutils.set_vm_state(instance_name, req_state) LOG.debug("Successfully changed state of VM %(instance_name)s" " to: %(req_state)s", {'instance_name': instance_name, 'req_state': req_state}) except Exception: with excutils.save_and_reraise_exception(): LOG.error("Failed to change vm state of %(instance_name)s" " to %(req_state)s", {'instance_name': instance_name, 'req_state': req_state}) def _get_vm_state(self, instance_name): summary_info = self._vmutils.get_vm_summary_info(instance_name) return summary_info['EnabledState'] def _wait_for_power_off(self, instance_name, time_limit): """Waiting for a VM to be in a disabled state. :return: True if the instance is shutdown within time_limit, False otherwise. """ desired_vm_states = [os_win_const.HYPERV_VM_STATE_DISABLED] def _check_vm_status(instance_name): if self._get_vm_state(instance_name) in desired_vm_states: raise loopingcall.LoopingCallDone() periodic_call = loopingcall.FixedIntervalLoopingCall(_check_vm_status, instance_name) try: # add a timeout to the periodic call. periodic_call.start(interval=SHUTDOWN_TIME_INCREMENT) etimeout.with_timeout(time_limit, periodic_call.wait) except etimeout.Timeout: # VM did not shutdown in the expected time_limit. return False finally: # stop the periodic call, in case of exceptions or Timeout. periodic_call.stop() return True def resume_state_on_host_boot(self, context, instance, network_info, block_device_info=None): """Resume guest state when a host is booted.""" self.power_on(instance, block_device_info, network_info) def _create_vm_com_port_pipes(self, instance, serial_ports): for port_number, port_type in serial_ports.items(): pipe_path = r'\\.\pipe\%s_%s' % (instance.uuid, port_type) self._vmutils.set_vm_serial_port_connection( instance.name, port_number, pipe_path) def copy_vm_dvd_disks(self, vm_name, dest_host): dvd_disk_paths = self._vmutils.get_vm_dvd_disk_paths(vm_name) dest_path = self._pathutils.get_instance_dir( vm_name, remote_server=dest_host) for path in dvd_disk_paths: self._pathutils.copyfile(path, dest_path) def plug_vifs(self, instance, network_info): if network_info: for vif in network_info: self._vif_driver.plug(instance, vif) def unplug_vifs(self, instance, network_info): if network_info: for vif in network_info: self._vif_driver.unplug(instance, vif) def _check_hotplug_available(self, instance): """Check whether attaching an interface is possible for the given instance. :returns: True if attaching / detaching interfaces is possible for the given instance. """ vm_state = self._get_vm_state(instance.name) if vm_state == os_win_const.HYPERV_VM_STATE_DISABLED: # can attach / detach interface to stopped VMs. return True if not self._hostutils.check_min_windows_version(10, 0): # TODO(claudiub): add set log level to error after string freeze. LOG.debug("vNIC hot plugging is supported only in newer " "versions than Windows Hyper-V / Server 2012 R2.") return False if (self._vmutils.get_vm_generation(instance.name) == constants.VM_GEN_1): # TODO(claudiub): add set log level to error after string freeze. LOG.debug("Cannot hot plug vNIC to a first generation VM.", instance=instance) return False return True def attach_interface(self, instance, vif): if not self._check_hotplug_available(instance): raise exception.InterfaceAttachFailed(instance_uuid=instance.uuid) LOG.debug('Attaching vif: %s', vif['id'], instance=instance) self._vmutils.create_nic(instance.name, vif['id'], vif['address']) self._vif_driver.plug(instance, vif) def detach_interface(self, instance, vif): try: if not self._check_hotplug_available(instance): raise exception.InterfaceDetachFailed( instance_uuid=instance.uuid) LOG.debug('Detaching vif: %s', vif['id'], instance=instance) self._vif_driver.unplug(instance, vif) self._vmutils.destroy_nic(instance.name, vif['id']) except os_win_exc.HyperVVMNotFoundException: # TODO(claudiub): add set log level to error after string freeze. LOG.debug("Instance not found during detach interface. It " "might have been destroyed beforehand.", instance=instance) raise exception.InterfaceDetachFailed(instance_uuid=instance.uuid) def rescue_instance(self, context, instance, network_info, image_meta, rescue_password): try: self._rescue_instance(context, instance, network_info, image_meta, rescue_password) except Exception as exc: with excutils.save_and_reraise_exception(): LOG.error("Instance rescue failed. Exception: %(exc)s. " "Attempting to unrescue the instance.", {'exc': exc}, instance=instance) self.unrescue_instance(instance) def _rescue_instance(self, context, instance, network_info, image_meta, rescue_password): rescue_image_id = image_meta.id or instance.image_ref rescue_vhd_path = self._create_root_vhd( context, instance, rescue_image_id=rescue_image_id) rescue_vm_gen = self.get_image_vm_generation(instance.uuid, image_meta) vm_gen = self._vmutils.get_vm_generation(instance.name) if rescue_vm_gen != vm_gen: err_msg = _('The requested rescue image requires a different VM ' 'generation than the actual rescued instance. ' 'Rescue image VM generation: %(rescue_vm_gen)s. ' 'Rescued instance VM generation: %(vm_gen)s.') % dict( rescue_vm_gen=rescue_vm_gen, vm_gen=vm_gen) raise exception.ImageUnacceptable(reason=err_msg, image_id=rescue_image_id) root_vhd_path = self._pathutils.lookup_root_vhd_path(instance.name) if not root_vhd_path: err_msg = _('Instance root disk image could not be found. ' 'Rescuing instances booted from volume is ' 'not supported.') raise exception.InstanceNotRescuable(reason=err_msg, instance_id=instance.uuid) controller_type = VM_GENERATIONS_CONTROLLER_TYPES[vm_gen] self._vmutils.detach_vm_disk(instance.name, root_vhd_path, is_physical=False) self._attach_drive(instance.name, rescue_vhd_path, 0, self._ROOT_DISK_CTRL_ADDR, controller_type) self._vmutils.attach_scsi_drive(instance.name, root_vhd_path, drive_type=constants.DISK) if configdrive.required_by(instance): self._detach_config_drive(instance.name) rescue_configdrive_path = self._create_config_drive( context, instance, injected_files=None, admin_password=rescue_password, network_info=network_info, rescue=True) self.attach_config_drive(instance, rescue_configdrive_path, vm_gen) self.power_on(instance) def unrescue_instance(self, instance): self.power_off(instance) root_vhd_path = self._pathutils.lookup_root_vhd_path(instance.name) rescue_vhd_path = self._pathutils.lookup_root_vhd_path(instance.name, rescue=True) if (instance.vm_state == vm_states.RESCUED and not (rescue_vhd_path and root_vhd_path)): err_msg = _('Missing instance root and/or rescue image. ' 'The instance cannot be unrescued.') raise exception.InstanceNotRescuable(reason=err_msg, instance_id=instance.uuid) vm_gen = self._vmutils.get_vm_generation(instance.name) controller_type = VM_GENERATIONS_CONTROLLER_TYPES[vm_gen] self._vmutils.detach_vm_disk(instance.name, root_vhd_path, is_physical=False) if rescue_vhd_path: self._vmutils.detach_vm_disk(instance.name, rescue_vhd_path, is_physical=False) fileutils.delete_if_exists(rescue_vhd_path) self._attach_drive(instance.name, root_vhd_path, 0, self._ROOT_DISK_CTRL_ADDR, controller_type) self._detach_config_drive(instance.name, rescue=True, delete=True) # Reattach the configdrive, if exists and not already attached. configdrive_path = self._pathutils.lookup_configdrive_path( instance.name) if configdrive_path and not self._vmutils.is_disk_attached( configdrive_path, is_physical=False): self.attach_config_drive(instance, configdrive_path, vm_gen) self.power_on(instance) def _set_instance_disk_qos_specs(self, instance): quota_specs = self._get_scoped_flavor_extra_specs(instance, 'quota') disk_total_bytes_sec = int( quota_specs.get('disk_total_bytes_sec') or 0) disk_total_iops_sec = int( quota_specs.get('disk_total_iops_sec') or self._volumeops.bytes_per_sec_to_iops(disk_total_bytes_sec)) if disk_total_iops_sec: local_disks = self._get_instance_local_disks(instance.name) for disk_path in local_disks: self._vmutils.set_disk_qos_specs(disk_path, disk_total_iops_sec) def _get_instance_local_disks(self, instance_name): instance_path = self._pathutils.get_instance_dir(instance_name) instance_disks = self._vmutils.get_vm_storage_paths(instance_name)[0] local_disks = [disk_path for disk_path in instance_disks if instance_path in disk_path] return local_disks def _get_scoped_flavor_extra_specs(self, instance, scope): extra_specs = instance.flavor.extra_specs or {} filtered_specs = {} for spec, value in extra_specs.items(): if ':' in spec: _scope, key = spec.split(':') if _scope == scope: filtered_specs[key] = value return filtered_specs
Set to soothing music and beautiful visuals of the magnificent foothills of the Sierra Nevada mountains, this program is designed to help viewers tap into the power of the chakras while learning deep relaxation. After explaining the benefits of channeling the chakras, teachers lead viewers through a series of yoga movements to stimulate each chakra. A lecture by Savitri Simpson, author of Chakras for Starters, is also included.
def merge_words(words, desc): """ >>> list(merge_words(["Wow", "Yo", "Yo"], "Wow and Yo Yo")) ['Wow', 'Yo Yo'] """ size = len(words) skipNext = False for i, word in enumerate(words): if skipNext: skipNext = False continue if i + 1 < size: comb = word + " " + words[i + 1] if desc.find(comb) != -1: yield comb skipNext = True else: yield word else: yield word def capitalized(letter): return letter.lower() == letter def get_keywords(text): """ >>> from collections import namedtuple >>> event = namedtuple('event', 'description keywords') >>> def put(i): print i >>> get_keywords("Go and see Vince Vincent") ['Go', 'Vince Vincent'] >>> get_keywords("GO AND SEE VINCE VINCENT") [] """ words = text.split(" ") # Get words longer than one letter words = filter(lambda word: len(word) > 1, words) # Filter not capitalized words words = filter(lambda word: not capitalized(word[0]), words) # Remove ALL CAPS words words = filter(lambda word: capitalized(word[1]), words) # Merge words that are adjacent words = list(merge_words(words, text)) return words
BRADES – The impact of climate change on Small Island Developing States (SIDS) including those in the Caribbean will be the focus of this weekend’s Caribbean Climate 2015, a conference bringing the French and English Caribbean nations together. Montserrat is to be represented by the Hon. Minister of Agriculture Claude Hogan who will join other CARICOM minister of Environment for the two-day event starting in Fort-de-France, Martinique, on 09 May 2015 then ending on Guadeloup with the President of the Republic of France. While SIDS produce a minuscule fraction of global greenhouse gas emissions (GHGs) because of their location either below or barely above sea level, they are among the most vulnerable to the effects of climate change such as sea-level rise and extreme weather conditions. Studies show that climate change is most likely to impact tourism, energy, water, agriculture, the human habitat and the economic infrastructure. Another significant threat is linked to the projected impact of climate change on human health, through an increase in the presence of vectors of tropical diseases, such as malaria and dengue, and the prevalence of respiratory illnesses. These diseases will affect the well-being and productivity of the workforce of the sub-region and compromise economic growth, competitiveness and development potential of the Caribbean Community. The CARICOM officials intend to send the message that the current response from the International Community to the threats posed by the impacts of climate change, including financial resources available is inadequate. “It has been underscored in a number of scientific reports that the global goal of limiting average temperature increase to below 2°C levels is inadequate for protecting fragile ecosystems in SIDS from the adverse impacts of climate change. In the case of the Caribbean some ecosystems are already experiencing the negative effects of climate change in that they are approaching the limits of their adaptive capacities. This situation poses major challenges for the livelihood and development of the people in the Caribbean,” a CARICOM communique noted.
"""Validators""" # pylint: disable=too-few-public-methods from formencode import Schema, validators, ForEach from ..utils.validation import ValidateISODate class PublicKeyValidator(Schema): """Public Key entity Validator""" id = validators.String() active = validators.Bool() date_created = ValidateISODate() date_expires = ValidateISODate() public_key = validators.String() key_type = validators.Int(if_missing=0, if_empty=0) allow_extra_fields = True class DirectoryUserDeviceLinkResponseValidator(Schema): """Directory User Device link response validator""" qrcode = validators.String() # URL code = validators.String(min=7) device_id = validators.String() allow_extra_fields = True class DirectoryGetDeviceResponseValidator(Schema): """Directory get Device response validator""" id = validators.String() name = validators.String() status = validators.Int() type = validators.String() allow_extra_fields = True class DirectoryGetSessionsValidator(Schema): """Directory get Sessions validator""" auth_request = validators.String() date_created = ValidateISODate() service_icon = validators.String() service_id = validators.String() service_name = validators.String() allow_extra_fields = True class DirectoryValidator(Schema): """Directory entity validator""" id = validators.String() service_ids = ForEach(validators.String()) sdk_keys = ForEach(validators.String()) premium = validators.Bool() name = validators.String() android_key = validators.String() ios_certificate_fingerprint = validators.String() active = validators.Bool() denial_context_inquiry_enabled = validators.Bool(if_empty=False, if_missing=False) webhook_url = validators.String() allow_extra_fields = True class DirectoryDeviceLinkCompletionValidator(Schema): """Directory User Device link completion validator""" type = validators.OneOf(['DEVICE_LINK_COMPLETION']) device_id = validators.String() device_public_key = validators.String() device_public_key_id = validators.String() allow_extra_fields = True class AuthorizationResponseValidator(Schema): """Authorization Response entity validator""" auth = validators.String() auth_jwe = validators.String(if_missing=None, if_empty=None) service_user_hash = validators.String() org_user_hash = validators.String() user_push_id = validators.String() public_key_id = validators.String() allow_extra_fields = True class AuthorizationResponsePackageValidator(Schema): """Authorization Response Package entity validator""" service_pins = ForEach() auth_request = validators.String() # UUID response = validators.Bool() device_id = validators.String() allow_extra_fields = True class AuthMethodsValidator(Schema): """Auth methods validator""" method = validators.String() set = validators.Bool(if_empty=None) active = validators.Bool(if_empty=None) allowed = validators.Bool(if_empty=None) supported = validators.Bool(if_empty=None) user_required = validators.Bool(if_empty=None) passed = validators.Bool(if_empty=None) error = validators.Bool(if_empty=None) class GeoFenceValidator(Schema): """ GeoFence Validator, can represent both GeoFence and GeoCircleFence """ name = validators.String(if_missing=None) latitude = validators.Number() longitude = validators.Number() radius = validators.Number() class GeoCircleFenceValidator(GeoFenceValidator): """ GeoFence Validator, can represent ONLY GeoCircleFence """ type = validators.OneOf(["GEO_CIRCLE"]) class TerritoryFenceValidator(Schema): """ TerritoryFence Validator""" name = validators.String(if_missing=None) type = validators.OneOf(["TERRITORY"], if_missing=None) country = validators.Regex(r"^[A-Z]{2}$", not_empty=True) administrative_area = validators.Regex(r"^[A-Z]{2}-[A-Z]{2}[A-Z]?$", if_missing=None) postal_code = validators.String(if_missing=None, if_empty=None) @staticmethod def _validate_python(value, _state): if not value["administrative_area"]: del value["administrative_area"] if not value["postal_code"]: del value["postal_code"] class FenceValidator(Schema): """Fence validator""" allow_extra_fields = True type = validators.OneOf(["GEO_CIRCLE", "TERRITORY"], if_missing=None) name = validators.String(if_missing=None) @staticmethod def _validate_python(value, _state): if not value["type"]: del value["type"] GeoFenceValidator().to_python(value) elif value["type"] == "GEO_CIRCLE": GeoCircleFenceValidator().to_python(value) elif value["type"] == "TERRITORY": TerritoryFenceValidator().to_python(value) class AuthPolicyValidator(Schema): """Auth policy validate for auth method insights""" requirement = validators.String(if_missing=None, if_empty=None) amount = validators.Number(if_missing=None) types = ForEach(validators.String(), if_missing=None) geofences = ForEach(FenceValidator(), if_missing=[], if_empty=[]) class PolicyTerritoryValidator(Schema): """Validates Territory fences inside policies""" allow_extra_fields = True country = validators.String(not_empty=True) administrative_area = validators.String(if_missing=None) postal_code = validators.String(if_missing=None, if_empty=None) class PolicyGeoCircleValidator(Schema): """Validates GeoCircle fences inside policies""" allow_extra_fields = True latitude = validators.Number(not_empty=True) longitude = validators.Number(not_empty=True) radius = validators.Number(not_empty=True) class PolicyFenceValidator(Schema): """Validates fence objects in policies""" allow_extra_fields = True type = validators.String(not_empty=True) name = validators.String(if_missing=None, not_empty=True) @staticmethod def _validate_other(value, state): if "type" in value: if value["type"] == "TERRITORY": value.update(PolicyTerritoryValidator().to_python( value, state)) elif value["type"] == "GEO_CIRCLE": value.update(PolicyGeoCircleValidator().to_python( value, state)) return value class ConditionalGeoFenceValidator(Schema): """Validates conditional geofence policies""" allow_extra_fields = True inside = validators.NotEmpty(accept_iterator=True) outside = validators.NotEmpty(accept_iterator=True) fences = ForEach(not_empty=True) @staticmethod def _validate_python(value, state): if 'inside' in value and 'outside' in value: value['inside'] = PolicyBaseValidator().to_python( value['inside'], state) value['outside'] = PolicyBaseValidator().to_python( value['outside'], state) return value class MethodAmountPolicyValidator(Schema): """Validates method amount policies""" allow_extra_fields = True amount = validators.Int(not_empty=True) class FactorsPolicyValidator(Schema): """Validates factors for policies""" allow_extra_fields = True factors = ForEach(validators.OneOf( ["KNOWLEDGE", "INHERENCE", "POSSESSION"]), not_empty=True) class PolicyBaseValidator(Schema): """Base policy validator for legacy and new policies""" allow_extra_fields = True type = validators.String(if_missing="LEGACY") fences = ForEach(PolicyFenceValidator()) @staticmethod def _validate_python(value, state): if value["type"] == "COND_GEO": value.update(ConditionalGeoFenceValidator().to_python( value, state)) elif value["type"] == "METHOD_AMOUNT": value.update(MethodAmountPolicyValidator().to_python(value, state)) elif value["type"] == "FACTORS": value.update(FactorsPolicyValidator().to_python(value, state)) elif value["type"] == "LEGACY": if "deny_rooted_jailbroken" in value: del value["deny_rooted_jailbroken"] if "deny_emulator_simulator" in value: del value["deny_emulator_simulator"] del value["fences"] return value class ServiceSecurityPolicyValidator(PolicyBaseValidator): """Service Policy validator""" allow_extra_fields = True deny_rooted_jailbroken = validators.Bool(if_missing=None) deny_emulator_simulator = validators.Bool(if_missing=None) class JWEAuthorizationResponsePackageValidator(Schema): """Authorization Response JWE payload entity validator""" service_pins = ForEach() auth_request = validators.String() # UUID type = validators.String() reason = validators.String() denial_reason = validators.String(if_missing=None, if_empty=None) device_id = validators.String() auth_policy = AuthPolicyValidator(if_missing=None) auth_methods = ForEach(AuthMethodsValidator()) allow_extra_fields = True class AuthorizeValidator(Schema): """Authorize entity validator""" auth_request = validators.String(not_empty=True) push_package = validators.String(if_missing=None, not_empty=True) device_ids = ForEach(validators.String(), if_missing=None) allow_extra_fields = True class AuthorizeSSEValidator(Schema): """Authorize server-sent-event (webhook) validator""" service_user_hash = validators.String() api_time = validators.String() allow_extra_fields = True class ServiceValidator(Schema): """Service entity validation""" id = validators.String() icon = validators.String() name = validators.String() description = validators.String() active = validators.Bool() callback_url = validators.String() allow_extra_fields = True class ServiceTOTPVerificationValidator(Schema): """Service TOTP verification validation""" valid = validators.Bool() allow_extra_fields = True class DirectoryUserTOTPValidator(Schema): """Directory TOTP post validator""" algorithm = validators.String() digits = validators.Int() period = validators.Int() secret = validators.String() allow_extra_fields = True
The profusion of new data sources along with analytic platforms that allow processing at scale and in real-time have brought machine learning (not a new concept – it started in the 1950s) to Main Street. Along with a rash of misconceptions, misperceptions and, yes, even fear. While we can’t tackle the whole problem and provide a comprehensive introduction to machine learning in a single article, we can start by highlighting five common misconceptions that will keep non-quants in the conversation. Certain problems and data lend themselves to machine learning: in simple terms, problems where accuracy is more important than interpretation and data that presents problems for traditional analysis techniques. Unlike most traditional statistical models, the models created by machine learning algorithms are often nonlinear and can have many thousands (and even billions!) of rules or parameters that define the model. So A plus B does not always equal C. Don’t confuse black box processing with blind faith. If the analytic mechanisms or - perhaps more specifically – the processing pathways are not clear or easily reproducible, how do you validate results? When it comes to machine learning the answer is deceptively simple. Does the algorithm accurately predict future events or result in desired outcomes? Are the outputs useful? That’s it. No more, no less. Machine learning done right can be characterized by the tag line: complicated methods, consumable results. The other nugget here? Machine learning should be integral to analytic discovery, not an adjunct activity. Machine learning is a tool in the analytics toolbox. Like any tool it must be thoughtfully applied lest it become the proverbial hammer looking for a nail. As machine learning emerged from academia, early adopters often found themselves expending significant time and effort on problems that could have been easily solved with traditional statistical algorithms. Certain problems and data lend themselves to machine learning: in simple terms, problems where accuracy is more important than interpretation and data that presents problems for traditional analysis techniques. For example, consider object recognition in images. We may not care to understand how the model works; we just care that the model identifies certain characters or objects in new images. Image datasets can be wider than they are deep, (because of the high number of pixels in HD images) and can contain many correlated variables (pixels that are close to one another often have very similar values). Wide data and correlated data can present problems for traditional regression analysis. When it comes to machine learning, a simple algorithm with more data can often beat a complicated algorithm with less data, even when the bigger data set is slightly dirtier. (No, I’m not arguing that data does not need to be processed before being used in machine learning algorithms.) Regardless, a cautionary note is in order here. For the unexperienced data scientist, more complicated might seem better. Or, the higher the accuracy the better. However, for many practical applications, minute improvements in model accuracy will not affect germane operational improvements. More data and features may also unnecessarily complicate the algorithm. There is a big difference between the real world and a Kaggle contest! The balancing act here is between complexity and the ability to consume. When should you call time? See “The Proof Is in the Pudding” above. How can results be applied? Machine learning is great at determining what to do, but not necessarily so good at defining how (a challenge that has dogged early robotics). What is the proper response? For instance, when a pattern emerges that has global health or political ramifications, what is the proper next step? Are results in line with expectations? Are there exceptions to be addressed? Consider Stanford and Google’s work in computer vision. While dang good, it was not foolproof. Goats got characterized as dogs, a field of tulips as hot air balloons. And, yes, these are inconsequent gaffes compared to more recently publicized mistakes, but you get the gist. Does the model need to be tuned excessively for realistic usage? The bottom line? While this was just a short introduction to machine learning, one thing we know for sure: it is still a collaboration between man and “machine." Kimberly Nevala is the Director of Business Strategies for SAS Best Practices, responsible for industry education, key client strategies and market analysis in the areas of business intelligence and analytics, data governance and master data management. She has more than 15 years of experience advising clients on the development and implementation of strategic customer and data management programs and managing mission-critical projects. How does one of the largest cities in the world use data for social good?
from django.conf.urls import url, include from . import views app_name = 'portal' urlpatterns = [ url(r'^$', views.IndexView.as_view(), name='index'), url(r'^competition/$', views.CompetitionIndex.as_view(), name='competition'), url(r'^venue/$', views.VenueIndex.as_view(), name="venues"), url(r'^venue/manage/$', views.VenueManagementView.as_view(), name="manage_venue"), url(r'^venue/create/$', views.VenueCreate.as_view(), name='create_venue'), url(r'^venue/(?P<slug>.+)/', views.VenueDetailView.as_view(), name="venue_detail"), url(r'^teams/$', views.TeamListView.as_view(), name='teams'), url(r'^teams/(?P<slug>[\w\-]+)/$', views.TeamDetailView.as_view(), name="team_profile"), url(r'^teams/(?P<slug>[\w\-]+)/edit/$', views.TeamEditView.as_view(), name="edit_team"), url(r'^experts/$', views.ExpertListView.as_view(), name='experts'), url(r'^expert/(?P<slug>[\w\.]+)/$', views.ExpertDetailView.as_view(), name='expert_profile'), url(r'^students/$', views.StudentListView.as_view(), name="students"), url(r'^student/(?P<slug>[\w\.]+)/', views.StudentDetailView.as_view(), name='student_profile'), url(r'^session/', include('SessionManagement.urls')), ]
For her debut solo exhibition at Tube Factory Art Space, Laura Ortiz Vega presents a new series of “thread paintings” inspired by the rhetoric surrounding President Trump’s proposed US-Mexico border wall. Vega takes as her departure point the now famous images of the eight border wall samples President Trump browsed in 2017 as they were being tested along the actual border between San Diego and Tijuana. Listening to the speeches Trump has given about the wall, and reading his tweets on the subject, Vega then extracted the eight adjectives the President most frequently used to describe the project. Each word an imposing declaration; each wall sample an impenetrable facade. Seizing the chance to subvert public perception of these messages, Vega presents the adjectives like graffiti on the border wall samples, turning each section of wall into a billboard advertising its own hyperbolically alleged attributes. Vega models her distinctive thread painting method after the traditional craft techniques of the indigenous Huichol people of western Mexico. She first covers a surface with cera de Campeche, a natural beeswax from the Mexican state of Campeche. She then “draws” on that surface with cotton Perlé embroidery thread, using a palette knife to embed the thread into the wax. It is a long, delicate, and sometimes messy process, and takes weeks to finish a single piece. The resulting image-object has a texture reminiscent of a woven textile, yet is inflexible. Vega was born in Mexico City in 1975. She studied Industrial Design at Universidad Iberoamericana in Mexico City, earning her BFA in 2000. Her work has been exhibited extensively, including at the Museo de Arte Popular de la Ciudad de Mexico, Lyons Weir Gallery in New York, The Shooting Gallery in San Francisco, Breeze Block Gallery in Portland, OR, and Galerie Ernst Hilger in Vienna, Austria. It has been featured at Zona MACO, Pulse LA and MIAMI, Houston Fine Art Fair, Art Chicago NEXT, Art Market San Francisco, Art Toronto, London Art Fair, PINTA Art Fair, and Supermarket Art Fair, Sweden. It was selected for the Tequila CENTENARIO Award at Zona MACO and was awarded with an Honorific Mention at the Bienal de Artes Visuales de Yucatán in 2009.
# -*- coding: utf-8 -*- import re import md5 import string from exceptions import Exception class TxtHash: __id = None __hw_type = None __regex = None __is_id = None __digs = None __regex_str = "([0-9a-f]{2})[^0-9a-f]?([0-9a-f]{2})[^0-9a-f]?([0-9a-f]{2})[^0-9a-f]?([0-9a-f]{2})[^0-9a-f]?([0-9a-f]{2})[^0-9a-f]?([0-9a-f]{2})" def __init__(self, is_id, clientid_or_mac, hw_type): self.__id = clientid_or_mac self.__hw_type = hw_type self.__is_id = is_id self.__digs = string.digits + string.lowercase self.__regex = re.compile(self.__regex_str, re.IGNORECASE) # Retorna o valor para o hw_type passado, só da suporte a Ethernet a principio def hw_type_to_int(self): return { 'Ethernet': 1, }[self.__hw_type] def __int2base(self, x, base): if x < 0: sign = -1 elif x == 0: return '0' else: sign = 1 x *= sign digits = [] while x: digits.append(self.__digs[x % base]) x /= base if sign < 0: digits.append('-') digits.reverse() return ''.join(digits) def __get_prefix(self): # Se for 31 indica que usou o campo "Client-Identifier" do DHCP para calcular o HASH if self.__is_id: return "31" else: return "00" def txt(self): try: mac_itens = [] # Pega os elementos do match da expressao regular for item in re.finditer(self.__regex, self.__id): for item2 in item.groups(): mac_itens.append(item2) # Pega o valor para o hw_type passado decimals = [self.hw_type_to_int()] # Converte cada elemento em decimal for item in mac_itens: a = int(item, 16) decimals.append(a) # Calcula o md5 m = md5.new() m.update(bytearray(decimals)) # Retorna junto com o prefixo return self.__get_prefix() + m.hexdigest() except Exception: raise Exception("Error calculating TXT hash.")
Still Tasty: Answering the age-old question: Does this mayo smell ok? , that we'd be able to make a real difference for so many small businesses and women-run companies, or that we'd end up with an incredible community of the coolest readers on the planet. (If we do say so ourselves).
from dynaconf import settings print("Read from settings.py:", settings.PYTHON_VAR) # noqa # BY DEFAULT 'development' is the current env print("Read from development_settings.py:", settings.PYTHON_DEV_VAR) # noqa # If ENV_FOR_DYNACONF=production is in envvars so # print("Read from production_settings.py:", settings.PYTHON_PROD_VAR) # noqa # global_ overrides previous configs print("Read from global_settings.py:", settings.PYTHON_GLOBAL_VAR) # noqa print("Read from settings.yaml:", settings.YAML_VAR) # noqa print("Read from settings.yml:", settings.YML_VAR) # noqa print("Read from settings.toml:", settings.TOML_VAR) # noqa print("Read from settings.tml:", settings.TML_VAR) # noqa print("Read from settings.ini:", settings.INI_VAR) # noqa print("Read from settings.conf:", settings.CONF_VAR) # noqa print("Read from settings.properties:", settings.PROPERTIES_VAR) # noqa print("Read from settings.json:", settings.JSON_VAR) # noqa print("Read from .env:", settings.ENV_VAR) # noqa print("Read from .env:", settings.WORKS) # noqa assertions = { "YAML_VAR": True, "YML_VAR": True, "TOML_VAR": True, "INI_VAR": "1", "CONF_VAR": "1", "PROPERTIES_VAR": "1", "JSON_VAR": True, "ENV_VAR": True, "WORKS": "multiple_sources", } for key, value in assertions.items(): found = settings.get(key) assert found == getattr(settings, key) assert found == value, f"expected: {key}: [{value}] found: [{found}]"
Mrs. Kylie Irvin is a native of Skaneateles Falls, New York and moved to Asheville, N.C. to continue her lifelong journey in music education and performance. Kylie has a diverse musical background in several instruments such as trombone, guitar, voice, and flute. Her musical diversity also spans multiple genres including rock, classical, musical theatre, and jazz. She received her degree in music education with a concentration in bass trombone from the Greatbatch School of Music at Houghton College in Western New York and studied instrumental conducting in graduate school at Houghton where she conducted the Houghton Philharmonia and the wind ensemble. From there she decided to move to Asheville, N.C. to cultivate her part in a larger musical community. Since her move to Asheville, she is grateful to have had many opportunities as a performer, teacher, and observer. She has directed rock band, musical theatre, performed in various ensembles as a vocalist and trombonist, and served others with music therapy. Each new experience has given her insight on how to teach students of all different ages, backgrounds, and abilities. Kylie currently teaches music at Candler and Pisgah Elementary for Buncombe County Schools and teaches private lessons at Asheville Music School. During the summer, she directs music at the Black Mountain Center for the Arts and volunteers for Girls Rock Asheville. She specializes in teaching others to read musical notation using the ‘solfege’ system and ear training. Kylie takes a holistic approach to voice lessons. She urges her students to look at the study of voice as a process and a way of life. Lessons with Kylie are positive and encouraging so that the singer may be comfortable to explore their own unique voice. She enjoys teaching ear training and theory to provide a deeper understanding of musicianship as required by the student. A typical lesson will involve reinforcement of proper vocal and breathing technique, an exploration of music theory, and working through repertoire that was chosen by the student. She is open and experienced in many styles such as classical, jazz, broadway, and rock, and uses classical and non traditional methods to help students to achieve their musical goals.
r"""Solve Stokes equations using a coupled formulation The Stokes equations are in strong form .. math:: -\nabla^2 u - \nabla p &= f \\ \nabla \cdot u &= h \\ u(x, y=\pm 1) &= 0 \\ u(x=\pm 1, y) &= 0 where :math:`f` and :math:`h` are given functions of space. In addition we require :math:`\int p d\ = 0`, which is achieved by fixing the coefficient :math:`\hat{p}_{0, 0} = 0`. We use a tensorproductspace with a composite Legendre for the Dirichlet space and a regular Legendre for the pressure space. To remove all nullspaces we use a P_{N} x P_{N-2} basis, with P_{N-2} for the pressure. """ import os import numpy as np from sympy import symbols, sin, cos from shenfun import * x, y = symbols("x,y", real=True) assert comm.Get_size() == 1, "Two non-periodic directions only have solver implemented for serial" # Some right hand side (manufactured solution) #uex = (cos(4*np.pi*x)+sin(2*np.pi*y))*(1-y**2)*(1-x**2) #uey = (sin(2*np.pi*x)+cos(6*np.pi*y))*(1-y**2)*(1-x**2) uex = (cos(2*np.pi*x)*sin(2*np.pi*y))*(1-y**2)*(1-x**2) uey = (-sin(2*np.pi*x)*cos(2*np.pi*y))*(1-x**2) pe = -0.1*sin(2*x)*sin(4*y) fx = -uex.diff(x, 2) - uex.diff(y, 2) - pe.diff(x, 1) fy = -uey.diff(x, 2) - uey.diff(y, 2) - pe.diff(y, 1) h = uex.diff(x, 1) + uey.diff(y, 1) N = (50, 50) family = 'Chebyshev' #family = 'Legendre' D0X = FunctionSpace(N[0], family, bc=(0, 0), scaled=True) D0Y = FunctionSpace(N[1], family, bc=(-sin(2*np.pi*x)*(1-x**2), -sin(2*np.pi*x)*(1-x**2)), scaled=True) D1Y = FunctionSpace(N[1], family, bc=(0, 0), scaled=True) PX = FunctionSpace(N[0], family) PY = FunctionSpace(N[1], family) TD = TensorProductSpace(comm, (D0X, D0Y)) TD1 = TensorProductSpace(comm, (D0X, D1Y)) Q = TensorProductSpace(comm, (PX, PY), modify_spaces_inplace=True) V = VectorSpace([TD1, TD]) VQ = CompositeSpace([V, Q]) # To get a P_N x P_{N-2} space, just pick the first N-2 items of the pressure basis # Note that this effectively sets P_N and P_{N-1} to zero, but still the basis uses # the same quadrature points as the Dirichlet basis, which is required for the inner # products. PX.slice = lambda: slice(0, PX.N-2) PY.slice = lambda: slice(0, PY.N-2) up = TrialFunction(VQ) vq = TestFunction(VQ) u, p = up v, q = vq # Assemble blocks of the complete block matrix if family.lower() == 'legendre': A00 = inner(grad(v), grad(u)) A01 = inner(div(v), p) else: A00 = inner(v, -div(grad(u))) A01 = inner(v, -grad(p)) A10 = inner(q, div(u)) M, BM = BlockMatrices(A00+A01+A10) # Note BM is boundary matrix uh_hat = Function(VQ) # Assemble right hand side fh = Array(VQ, buffer=(fx, fy, h)) f_, h_ = fh fh_hat = Function(VQ) f_hat, h_hat = fh_hat f_hat = inner(v, f_, output_array=f_hat) h_hat = inner(q, h_, output_array=h_hat) # Solve problem uh_hat = M.solve(fh_hat, u=uh_hat, constraints=((2, 0, 0),), BM=BM) # (2, N[0]-1, 0), # (2, N[0]*N[1]-1, 0), # (2, N[0]*N[1]-N[1], 0))) # Constraint for component 2 of mixed space # Move solution to regular Function up = uh_hat.backward() u_, p_ = up # Exact solution ux, uy = Array(V, buffer=(uex, uey)) pe = Array(Q, buffer=pe) # Compute error error = [comm.reduce(np.linalg.norm(ux-u_[0])), comm.reduce(np.linalg.norm(uy-u_[1])), comm.reduce(np.linalg.norm(pe-p_))] if comm.Get_rank() == 0: print('Error u v p') print(' %2.4e %2.4e %2.4e' %(error[0], error[1], error[2])) #assert np.all(abs(np.array(error)) < 1e-7), error if 'pytest' not in os.environ: import matplotlib.pyplot as plt plt.figure() X = TD.local_mesh(True) plt.contourf(X[0], X[1], p_, 100) plt.figure() plt.contourf(X[0], X[1], pe, 100) plt.figure() plt.quiver(X[0], X[1], u_[0], u_[1]) plt.figure() plt.quiver(X[0], X[1], ux, uy) plt.figure() plt.spy(M.diags()) plt.figure() plt.contourf(X[0], X[1], u_[0], 100) #plt.show()
Why Comcast Business in Mead, Washington? Get crystal-clear calling powered by our Gig-speed network. With advanced solutions that can grow with your Mead, Washington business, one-touch conference dialing, an easy-to-use mobile app, and reasonable monthly prices, you can finally go beyond the office — and we can go with you. Stay connected to your business from anywhere. Like, Mead, Washington. Sharp hi-res images let you see what’s happening, day or night.
from thinc.api import Model, noop from .parser_model import ParserStepModel def TransitionModel( tok2vec, lower, upper, resize_output, dropout=0.2, unseen_classes=set() ): """Set up a stepwise transition-based model""" if upper is None: has_upper = False upper = noop() else: has_upper = True # don't define nO for this object, because we can't dynamically change it return Model( name="parser_model", forward=forward, dims={"nI": tok2vec.get_dim("nI") if tok2vec.has_dim("nI") else None}, layers=[tok2vec, lower, upper], refs={"tok2vec": tok2vec, "lower": lower, "upper": upper}, init=init, attrs={ "has_upper": has_upper, "unseen_classes": set(unseen_classes), "resize_output": resize_output, }, ) def forward(model, X, is_train): step_model = ParserStepModel( X, model.layers, unseen_classes=model.attrs["unseen_classes"], train=is_train, has_upper=model.attrs["has_upper"], ) return step_model, step_model.finish_steps def init(model, X=None, Y=None): model.get_ref("tok2vec").initialize(X=X) lower = model.get_ref("lower") lower.initialize() if model.attrs["has_upper"]: statevecs = model.ops.alloc2f(2, lower.get_dim("nO")) model.get_ref("upper").initialize(X=statevecs)
*note: photo eye and reflector may differ in appearance from what is received as DIYGateOpeners.com uses a few sources for our brand, but quality and performance are always the same. The NIR retro-reflective photoeye can be used as a reversing sensor for commercial overhead doors, gates, and parking barriers. Compact design, mounting hardware and indicators make it easy to install. Water-tight design allows for outdoor installation. *Beam is not actually visible; it is displayed yellow in the above picture for informational purposes.
#!/usr/bin/env python # -*- coding: utf-8 -*- """ 1-1.py license BSD author chen_ji <wakamori111 at gmail.com> """ import datetime import random import sys class DayLife: """Life in a day.""" def __init__(self, date, life): """Set birth datetime and life.""" self.birthdate = date self.life = life finalyear = self.birthdate.year + self.life finaldate = datetime.datetime(finalyear, self.birthdate.month, self.birthdate.day) self.finaldate = finaldate - datetime.timedelta(days=1) def now(self): """Calculate current time.""" curdate = datetime.datetime.now() maxdays = (self.finaldate - self.birthdate).days curdays = (curdate - self.birthdate).days curtime = datetime.timedelta(days=1) / maxdays curtime = curtime * curdays return datetime.time( (curtime.seconds / 60) / 60, (curtime.seconds / 60) % 60, curtime.seconds % 60) if __name__ == '__main__': # options startyear = 1990 endyear = 2000 life = 80 print startyear, "<= a <=", endyear print "n =", life daycount = (datetime.datetime(endyear, 12, 31) - datetime.datetime(startyear, 1, 1)).days birthdate = datetime.datetime(startyear, 1, 1) + \ datetime.timedelta(days=random.randint(0, daycount)) args = sys.argv if len(args) == 4: year = int(args[1]) month = int(args[2]) date = int(args[3]) birthdate = datetime.datetime(year, month, date) print "birthdate:", birthdate.date() mylife = DayLife(birthdate, life) print "finaldate:", mylife.finaldate.date() print "today:", mylife.now()
There are lot of ways to rate how fundraising campaign is performing. Current amount funded, visits per day and times shared are a few of them. Of those named, sharing is the most overlooked. So much emphasis is put on the total number of dollars earned that we forget about it. Continue reading to find out more about the value of sharing.
__file__ = 'downloadGET_v1' __date__ = '11/12/2015' __author__ = 'ABREZNIC' import arcpy, zipfile, os, shutil, urllib, urllib2, json, glob # http://blogs.esri.com/esri/arcgis/2013/10/10/quick-tips-consuming-feature-services-with-geoprocessing/ district = arcpy.GetParameterAsText(0) username = arcpy.GetParameterAsText(1) password = arcpy.GetParameterAsText(2) output = arcpy.GetParameterAsText(3).replace("\\", os.sep) directory = arcpy.env.scratchFolder + os.sep + district + "_GET" if not os.path.exists(directory): os.makedirs(directory) else: shutil.rmtree(directory) os.makedirs(directory) arcpy.AddMessage("directory created.") baseURL = "http://services.arcgis.com/KTcxiTD9dsQw4r7Z/arcgis/rest/services/GET_Maintenance_AGO/FeatureServer/0/query" arcpy.AddMessage("url created.") if district == "Statewide": where = "1=1" else: where = "" def getObjectIDs(query): params = {'where': query, 'returnIdsOnly': 'true', 'token': token, 'f': 'json'} req = urllib2.Request(baseURL, urllib.urlencode(params)) response = urllib2.urlopen(req) data = json.load(response) array = data["objectIds"] array.sort() arcpy.AddMessage("Object IDs Found") return array def createFC(fs): arcpy.CreateFileGDB_management(directory, "TxDOT_GuardrailEndTreatments") fgdb = directory + os.sep + "TxDOT_GuardrailEndTreatments" arcpy.CopyFeatures_management(fs, fgdb + ".gdb" + os.sep + "GET_" + district + "Dist") newFC = fgdb + ".gdb" + os.sep + "GET_" + district + "Dist" arcpy.AddMessage("feature class created.") return newFC def updatedQuery(low, high, trigger): if low != high: addition = """ AND "OBJECTID" >= """ + str(low) + " AND " + """"OBJECTID" < """ + str(high) if trigger == 1: addition = """ AND "OBJECTID" >= """ + str(low) else: addition = """ AND "OBJECTID" = """ + str(low) newQuery = where + addition return newQuery try: arcpy.AddMessage('\nGenerating Token\n') server = baseURL.split("//")[1].split("/")[0] tokenURL = 'http://' + server + '/arcgis/tokens/?username=' + username + '&password=' + password + '&referer=http%3A%2F%2F' + server + '&f=json' req = urllib2.Request(tokenURL) response = urllib2.urlopen(req) data = json.load(response) token = data['token'] except: token = '' pass fields ='*' objectIDs = getObjectIDs(where) total = len(objectIDs) arcpy.AddMessage("Total: " + str(total)) totalFixed = total - 1 last = objectIDs[-1] low = 0 high = 1000 theFC = "" while low <= total: arcpy.AddMessage(low) min = objectIDs[low] try: max = objectIDs[high] trigger = 0 except: max = objectIDs[totalFixed] trigger = 1 OIDquery = updatedQuery(min, max, trigger) query = "?where={}&outFields={}&returnGeometry=true&f=json&token={}".format(OIDquery, fields, token) fsURL = baseURL + query fs = arcpy.FeatureSet() fs.load(fsURL) arcpy.AddMessage("select completed.") if low == 0: theFC = createFC(fs) else: arcpy.Append_management(fs, theFC, "NO_TEST") low += 1000 high += 1000 arcpy.AddMessage("packing up...") zipper = output if os.path.isfile(zipper): os.remove(zipper) arcpy.AddMessage("zipfile started.") if downloadFormat == "FGDB": newZipper = zipper[:-4] shutil.make_archive(newZipper, "zip", directory) elif downloadFormat == "SHP": zip = zipfile.ZipFile(zipper, 'w', zipfile.ZIP_DEFLATED) for filename in os.listdir(directory): if not filename.endswith('.lock'): zip.write(os.path.join(directory, filename), filename) zip.close() arcpy.AddMessage("zipfile completed.") arcpy.AddMessage("that's all folks!!")
Marriott International’s recent Security Analyst Meeting provided the most in-depth discus- sion yet on how it will fold in Starwood Hotels & Resorts. Now the third-largest brand in Marriott’s portfolio, Sheraton suffers from “poor consumer perception” in North America, according to global chief commercial officer Stephanie Linnartz. “With our deep operational expertise coupled with clear accountability around renovation cycles, brand standards audits and quality-assurance delivery, we can significantly improve customer perception of the brand.” The company has formed a council of franchisors for feedback around brand positioning, operations and design. Marriott also is reevaluating the brand’s service, food and beverage and quality assurance standards. For luxury, St. Regis is underrepresented globally, with only 38 open properties and 22 in the pipeline, Capuano said. “We can see a significant increase in St. Regis signings, in the range of about 10 deals a year.” And Linnartz said Marriott is revamping the 10-year-old Aloft brand’s guest room design to be more profitable for developers and more user friendly for customers. It’s also launching healthy grab-and-go options for Aloft’s food and beverage program. As for Element, Marriott is pivoting the health and well-being lifestyle brand to an extended-stay product. Capuano said the company’s success with Residence Inn indicates potential to broaden Element’s presence in that space. Though Marriott Rewards and Starwood Preferred Guest have been linked since the merger, Marriott doesn’t expect to complete integration of the platforms until late 2018. Marriott anticipates a renegotiation of terms for its branded credit cards, which could increase loyalty program contributions and reduce hotels’ card-processing costs. The Marriott Rewards Visa through JPMorgan Chase expires in 2018, while the SPG American Express card expires in 2020. Linnartz said that after the merger, the sales organization numbered 750 global corporate accounts that have “meaningful overlap.” On April 1, the company launched its new global sales organization and redeployed sales associates for those accounts. “This streamlined sales force should position us to increase revenue across group, extended-stay and business travel, and we will be ready for the annual special corporate pricing season, which begins in May,” Linnartz said. Legacy Starwood hotels in North America also have migrated to Marriott’s Expedia and Booking.com contract terms, which are more favorable by “200 to 400 basis points,” according to Linnartz. The company expects its scale and distribution strength—only about 10 percent of its bookings come from online travel agencies—to help it negotiate even better terms in the future.
from django.contrib.flatpages.models import FlatPage from django.http import HttpResponseRedirect from django.shortcuts import render_to_response from django.template import RequestContext from django.views.decorators.csrf import csrf_protect from localtv.admin import forms from localtv.decorators import require_site_admin from localtv.models import SiteSettings @require_site_admin @csrf_protect def index(request): headers = [ {'label': 'Page Name'}, {'label': 'URL'}] site_settings = SiteSettings.objects.get_current() flatpages = FlatPage.objects.filter(sites=site_settings.site) formset = forms.FlatPageFormSet(queryset=flatpages) form = forms.FlatPageForm() if request.method == 'GET': return render_to_response('localtv/admin/flatpages.html', {'formset': formset, 'form': form, 'headers': headers}, context_instance=RequestContext(request)) else: if request.POST.get('submit') == 'Add': form = forms.FlatPageForm(request.POST) if form.is_valid(): flatpage = form.save() flatpage.sites.add(site_settings.site) return HttpResponseRedirect(request.path + '?successful') return render_to_response('localtv/admin/flatpages.html', {'formset': formset, 'form': form, 'headers': headers}, context_instance=RequestContext(request)) else: formset = forms.FlatPageFormSet(request.POST, queryset=flatpages) if formset.is_valid(): formset.save() action = request.POST.get('bulk_action') if action == 'delete': for data in formset.cleaned_data: if data['BULK']: data['id'].delete() return HttpResponseRedirect(request.path + '?successful') else: return render_to_response( 'localtv/admin/flatpages.html', {'formset': formset, 'form': form, 'headers': headers}, context_instance=RequestContext(request))
Good Christmas Gifts For Fathers. This amazing picture collections about Good Christmas Gifts For Fathers is accessible to download. We collect this amazing picture from online and select the top for you. Good Christmas Gifts For Fathers pics and pictures collection that published here was properly chosen and uploaded by our team after choosing the ones which are best among the others. So, ultimately we make it and here these list ofamazing image for your inspiration and information purpose regarding the Good Christmas Gifts For Fathers as part of [blog] exclusive updates collection. So, take your time and get the best Good Christmas Gifts For Fathers pics and pictures posted here that suitable with your needs and use it for your own collection and personal use. Regarding Image information: Graphic has been published by admin and has been tagged by category in field. You might leave your review as evaluations to our web site value.
#!/usr/bin/env python import sys, SocketServer class JSONLogServer(SocketServer.BaseRequestHandler): """ Sample UDP server for receiving JSON messages. """ def handle_json(self, data): try: import json msg = json.loads(data) print("parsed json message:") for k in msg.keys(): print(" %s: %s" % (k, msg[k])) print except Exception, e: print("json parsing error: %s" % e) def handle_netstr(self, data): try: import netstring decoder = netstring.Decoder() keys = [ "username", "database", "remotehost", "debug_query_string", "elevel", "funcname", "sqlerrcode", "message", "detail", "hint", "context ", "instance_label", "timestamp" ] pos = 0 for field in decoder.feed(data): if pos < len(keys): k = keys[pos] print(" %s: %s" % (k, field)) pos += 1 except Exception, e: print("netstr parsing error: %s" % e) def handle_syslog(self, data): pass def handle(self): data = self.request[0].strip() print("raw message: %s" % data) if not data: return if data.startswith("{"): self.handle_json(data) elif data[0].isdigit(): self.handle_netstr(data) elif data[0] == '<': self.handle_syslog(data) if __name__ == "__main__": if len(sys.argv) < 2: PORT = 23456 else: PORT = int(sys.argv[1]) HOST = "" print("Listening on %s:%s" % (HOST, PORT)) server = SocketServer.UDPServer((HOST, PORT), JSONLogServer) server.serve_forever()
This protective leather travel holder is the best way to protect your razor whilst on the move. Available in 4 colours. Width - 6.5cm, Depth 3.5cm(when closed), Height - 15cm. This protective leather travel holder is the best way to protect your razor and brush whilst on the .. Protects your razor Top quality brown cowhide Stylish and practical This leather pouch from..
import datetime import time from email.utils import formatdate, parsedate from xml.sax.saxutils import escape, quoteattr from django.conf import settings from django.http import Http404, HttpResponse, HttpResponseNotAllowed, HttpResponseNotModified, StreamingHttpResponse from django.shortcuts import redirect from django.views.decorators.cache import cache_control, never_cache from django.views.decorators.csrf import csrf_exempt from django.views.decorators.gzip import gzip_page from django.views.decorators.http import require_POST import accounts.payment_plans as plans import analytics.log as analytics_log from .models import Podcast, PodcastEpisode, PodcastSlugMigration from accounts.models import UserSettings from analytics.analyze import get_request_ip from payments.models import RecurringTip from pinecast.helpers import get_object_or_404, json_response, render, reverse from pinecast.signatures import signer DEFAULT_EPISODE_PREFIX = 'S{season}E{episode} - ' VALID_SOURCES = ['direct', 'rss', 'jsonfeed', 'embed'] @never_cache def listen(req, episode_id): ep = get_object_or_404(PodcastEpisode, id=episode_id) source = req.GET.get('source', 'direct') if source in VALID_SOURCES: listen = analytics_log.get_listen_obj( ep=ep, source=source, req=req, ip=get_request_ip(req), ua=req.META.get('HTTP_USER_AGENT', 'Unknown'), timestamp=datetime.datetime.now(), ) analytics_log.commit_listens([listen]) return redirect(ep.get_raw_url()) def feed(req, podcast_slug): pod, redirect = _get_pod_or_redirect(podcast_slug) if redirect: return redirect episodes = pod.get_episodes(select_related=('audio', 'artwork', 'episodefeedbackprompt')) # Write the log of this to the analytics back-end(s) analytics_log.write_subscription(req, pod, is_private=False) caching_response = _handle_caching(req, pod, episodes) if caching_response: return caching_response return _gen_feed(req, pod, episodes) def feed_private(req, podcast_slug, subscriber): try: sm = PodcastSlugMigration.objects.select_related('podcast').get(migrate_from=podcast_slug) return redirect(reverse('feed_private', podcast_slug=sm.podcast.slug, subscriber=subscriber)) except PodcastSlugMigration.DoesNotExist: pass pod = get_object_or_404(Podcast, slug=podcast_slug) recurring_tip = get_object_or_404(RecurringTip, podcast=pod, tipper__uuid=subscriber, deactivated=False) setattr(recurring_tip, 'podcast', pod) # ✨magic optimization ✨ if not recurring_tip.eligible_to_access_private(): raise Http404() episodes = pod.get_episodes(include_private=True) # Write the log of this to the analytics back-end(s) analytics_log.write_subscription(req, pod, is_private=True) caching_response = _handle_caching(req, pod, episodes) if caching_response: return caching_response return _gen_feed(req, pod, episodes, is_private=True) if settings.FEED_GZIP: feed = gzip_page(feed) feed_private = gzip_page(feed_private) def _get_pod_or_redirect(slug): try: sm = PodcastSlugMigration.objects.select_related('podcast').get(migrate_from=slug) return None, redirect(reverse('feed', podcast_slug=sm.podcast.slug)) except PodcastSlugMigration.DoesNotExist: pass return get_object_or_404(Podcast.objects.select_related('cover_art', 'owner', 'site'), slug=slug), None def _handle_caching(req, pod, episodes): if req.method not in ('GET', 'HEAD'): return HttpResponseNotAllowed(permitted_methods=['GET', 'HEAD']) last_update = max(pod.last_feed_update, pod.last_feed_update, *[e.publish for e in episodes]) last_update = last_update - datetime.timedelta(microseconds=last_update.microsecond) expected_etag = last_update.isoformat() match_etag = req.META.get('HTTP_IF_NONE_MATCH') if match_etag: if match_etag.strip('"') == expected_etag: return HttpResponseNotModified() elif settings.DEBUG: print('Expected "{}" to match "{}"'.format(match_etag, expected_etag)) now = datetime.datetime.now() ims = req.META.get('HTTP_IF_MODIFIED_SINCE') if ims: try: ims_parsed = datetime.datetime(*parsedate(ims)[:6]) if ims_parsed >= last_update: return HttpResponseNotModified() elif settings.DEBUG: print(ims_parsed, last_update) except Exception as e: if settings.DEBUG: print(e) return None def _gen_feed(req, pod, episodes, is_private=False): start_time = datetime.datetime.now() items = [] is_demo = UserSettings.get_from_user(pod.owner).plan == plans.PLAN_DEMO channel_explicit_tag = '<itunes:explicit>%s</itunes:explicit>' % ('yes' if pod.is_explicit else 'no') if not isinstance(episodes, list): episodes = list(episodes) pod_is_serial = pod.episode_release_type == 'serial' # `1, 1,` because if there are no episodes, you'll get an `int object is not iterable` error newest_season = max(1, 1, *[x.season for x in episodes if x.season]) episodes_by_season = {} for ep in episodes: if not ep.season: continue season_set = episodes_by_season.setdefault(ep.season, set()) if not ep.season_episode: continue season_set.add(ep.season_episode) episodes_by_season.setdefault(newest_season, set()) # For empty podcasts episodes_without_nums_by_season = { season: sum(1 for e in episodes if (e.season == season or not e.season) and not e.season_episode) for season in episodes_by_season } episode_prefix = pod.serial_ep_prefix_format or DEFAULT_EPISODE_PREFIX # TODO: Make the database do this with `values()`? categories = sorted([c.category for c in pod.podcastcategory_set.all()], key=lambda c: len(c)) category_map = {} for cat in categories: spl = cat.split('/') cursor = category_map for i in spl: cursor.setdefault(i, {}) cursor = cursor[i] def render_cat(c): for k, v in c.items(): if not v: yield '<itunes:category text=%s />' % quoteattr(k) else: yield ( '<itunes:category text={cat}>{inner}</itunes:category>' .format(cat=quoteattr(k), inner='\n'.join(render_cat(v))) ) def generate_item(ep): ep_url = ep.get_url('rss') md_desc = ep.get_html_description(is_demo=is_demo) title = ep.title if pod_is_serial and ep.episode_type == 'full' and ep.season and ep.season_episode: title = episode_prefix.format(season=ep.season, episode=ep.season_episode)[:256] + title season = ep.season if ep.season else newest_season if ep.season_episode: season_episode = str(ep.season_episode) elif ep.episode_type != 'full': season_episode = 1 else: season_episode = episodes_without_nums_by_season[season] + len(episodes_by_season[season]) while season_episode in episodes_by_season[season]: season_episode -= 1 episodes_by_season[season].add(season_episode) episodes_without_nums_by_season[season] -= 1 season = str(season) season_episode = str(season_episode) yield ( '<item>' '<title>{title}</title>' '<description><![CDATA[{desc}]]></description>' '<link>{url}</link>' '<guid isPermaLink="false">{guid}</guid>' '<pubDate>{publish}</pubDate>' '<itunes:author>{author}</itunes:author>' '<itunes:subtitle>{subtitle}</itunes:subtitle>' '<itunes:image href={artwork} />' '<itunes:duration>{duration}</itunes:duration>'.format( title=escape(title), desc=md_desc, url=escape(ep.get_site_url() or ep_url), guid=escape(ep.get_guid()), publish=formatdate(time.mktime(ep.publish.timetuple())), author=escape(pod.author_name), subtitle=escape(ep.subtitle), artwork=quoteattr(ep.get_image_url()), duration=escape(ep.formatted_duration()), ) ) if title != ep.title: yield '<itunes:title>%s</itunes:title>' % escape(ep.title) if ep.explicit_override != PodcastEpisode.EXPLICIT_OVERRIDE_CHOICE_NONE: yield '<itunes:explicit>%s</itunes:explicit>' % ( 'yes' if ep.explicit_override == PodcastEpisode.EXPLICIT_OVERRIDE_CHOICE_EXPLICIT else 'clean') else: yield channel_explicit_tag if ep.audio: yield '<enclosure url=%s length=%s type=%s />' % ( quoteattr(ep_url), quoteattr(str(ep.audio.content_size)), quoteattr(ep.audio.content_type) ) if ep.episode_type != 'full': yield '<itunes:episodeType>%s</itunes:episodeType>' % escape(ep.episode_type) if pod_is_serial: yield '<itunes:season>%s</itunes:season>' % escape(season) if pod_is_serial and ep.episode_type == 'full': yield '<itunes:episode>%s</itunes:episode>' % escape(season_episode) if ep.copyright: yield '<dc:copyright>%s</dc:copyright>' % escape(ep.copyright) if ep.license: yield '<dc:rights>%s</dc:rights>' % escape(ep.license) yield '</item>' def generate_content(): yield ( '<?xml version="1.0" encoding="UTF-8"?>\n' '<?xml-stylesheet type="text/xsl" media="screen" href="/static/rss.xsl"?>\n' '<rss xmlns:atom="http://www.w3.org/2005/Atom"\n' ' xmlns:itunes="http://www.itunes.com/dtds/podcast-1.0.dtd"\n' ' xmlns:dc="http://purl.org/dc/elements/1.1/"\n' ' xmlns:pinecast="https://pinecast.com/rss-dtd/1.0/"\n' ' version="2.0">\n' '<channel>\n' '<title>{title}</title>\n' '<link>{homepage}</link>\n' '<atom:link href="{canonical}" rel="self" type="application/rss+xml" />\n' '<generator>Pinecast (https://pinecast.com)</generator>\n' '{pinecast_site}' '<language>{language}</language>'.format( title=escape(pod.name), homepage=escape(pod.homepage), canonical=escape(pod.canonical_feed_url()), language=escape(pod.language), pinecast_site=( '<pinecast:site>{}</pinecast:site>\n'.format(pod.get_site().get_domain()) if pod.get_site() else '' ), ) ) if pod.copyright: yield '<copyright>%s</copyright>' % escape(pod.copyright) if pod.episode_release_type != 'episodic': yield '<itunes:type>%s</itunes:type>' % escape(pod.episode_release_type) if pod.rss_redirect: yield '<itunes:new-feed-url>%s</itunes:new-feed-url>' % escape(pod.canonical_feed_url()) if pod.subtitle: yield '<itunes:subtitle>%s</itunes:subtitle>' % escape(pod.subtitle) yield ( '<itunes:author>{author}</itunes:author>\n' '<description><![CDATA[{description}]]></description>\n' '<itunes:owner>\n' '<itunes:name>{author_name}</itunes:name>\n' '<itunes:email>{owner_email}</itunes:email>\n' '</itunes:owner>\n' '{channel_explicit_tag}\n' '<itunes:image href={cover_art_attr} />\n' ''.format( author=escape(pod.author_name), description=pod.description, author_name=escape(pod.author_name), owner_email=escape(pod.owner.email), channel_explicit_tag=channel_explicit_tag, cover_art_attr=quoteattr(pod.get_cover_image_url()), cover_art=escape(pod.get_cover_image_url()), title=escape(pod.name), homepage=escape(pod.homepage), ) ) yield '\n'.join(render_cat(category_map)) for ep in episodes: if settings.FEED_STREAMING: yield '\n'.join(generate_item(ep)) else: yield from generate_item(ep) yield '</channel>\n</rss>' if UserSettings.get_from_user(pod.owner).plan == plans.PLAN_DEMO: if len(episodes) > 10: yield '<!-- This feed is truncated because the owner is not a paid customer. -->' else: yield '<!-- This feed will be truncated at 10 items because the owner is not a paid customer. -->' end_time = datetime.datetime.now() delta = end_time - start_time yield '<!-- generated in {}s {}us -->'.format(delta.seconds, delta.microseconds) if settings.DEBUG_TOOLBAR: yield '</body>' if pod.rss_redirect: resp = HttpResponse(status=301) resp.setdefault('Location', pod.rss_redirect) return resp user_agent = req.META.get('HTTP_USER_AGENT', '') if settings.DEBUG_TOOLBAR: content_type = 'text/html' else: content_type = 'text/xml' if user_agent.startswith('Mozilla') else 'application/rss+xml' content_type_with_encoding = content_type + '; charset=utf-8' if settings.FEED_STREAMING: resp = StreamingHttpResponse( (c + '\n' for c in generate_content()), content_type=content_type_with_encoding, status=200, ) else: resp = HttpResponse( '\n'.join(generate_content()), content_type=content_type_with_encoding, status=200, ) # Get the estimated last update timestamp last_update = max(pod.last_feed_update, pod.last_feed_update, *[e.publish for e in episodes]) # Shave off the microsecond component last_update = last_update - datetime.timedelta(microseconds=last_update.microsecond) resp.setdefault('ETag', 'W/"{}"'.format(last_update.isoformat())) resp.setdefault('Last-Modified', formatdate(time.mktime(last_update.timetuple()))) resp.setdefault('Access-Control-Allow-Origin', '*') resp.setdefault('Access-Control-Request-Method', 'GET') return resp @gzip_page @json_response(cors=True) def json_feed(req, podcast_slug): pod, redirect = _get_pod_or_redirect(podcast_slug) if redirect: return redirect is_demo = UserSettings.get_from_user(pod.owner).plan == plans.PLAN_DEMO pod_is_serial = pod.episode_release_type == 'serial' episode_prefix = pod.serial_ep_prefix_format or DEFAULT_EPISODE_PREFIX episodes = pod.get_episodes() out = { 'version': 'https://jsonfeed.org/version/1', 'title': pod.name, 'description': pod.description, 'icon': pod.get_cover_image_url(), 'author': {'name': pod.author_name}, 'feed_url': pod.canonical_feed_url(), 'items': [ { 'id': str(ep.id), 'url': ep.get_site_url() or ep.get_url('jsonfeed'), 'title': ( episode_prefix.format(season=ep.season, episode=ep.season_episode) + ep.title if pod_is_serial and ep.episode_type == 'full' and ep.season and ep.season_episode else ep.title ), 'content_html': ep.get_html_description(is_demo=is_demo), 'image': ep.get_image_url(), 'date_published': ep.publish.strftime('%Y-%m-%dT%H:%M:%SZ'), 'attachments': [ { 'url': ep.get_url('jsonfeed'), 'mime_type': ep.audio.content_type, 'size_in_bytes': ep.audio.content_size, 'duration_in_seconds': ep.duration, }, ], } for ep in episodes ] } if pod.homepage: out['home_page_url'] = pod.homepage return out PLAYER_THEMES = set(['minimal', 'thick', 'slim']) @gzip_page @cache_control(public=True, max_age=3600) def player(req, episode_id): ep = get_object_or_404(PodcastEpisode.objects.select_related('audio', 'artwork'), id=episode_id) pod = get_object_or_404(Podcast.objects.select_related('owner'), id=ep.podcast_id) setattr(ep, 'podcast', pod) if ep.check_is_private() and (not req.user or req.user.id != pod.owner): raise Http404() theme = 'minimal' if req.GET.get('theme') in PLAYER_THEMES: theme = req.GET.get('theme') ctx = {'episode': ep} if req.GET.get('card'): ctx['card'] = True resp = render(req, 'player/%s.html' % theme, ctx) # If the user is not a demo user, allow the player to be used outside the app. if UserSettings.user_meets_plan(ep.podcast.owner, plans.FEATURE_MIN_PLAYER): resp.xframe_options_exempt = True return resp @gzip_page @cache_control(public=True, max_age=3600) def player_latest(req, podcast_slug): pod = get_object_or_404(Podcast, slug__iexact=podcast_slug) eps = pod.get_episodes() if not eps: raise Http404() url = reverse('player', episode_id=str(eps[0].id)) theme = req.GET.get('theme', 'minimal') return redirect(url + '?theme={}'.format(theme)) @csrf_exempt @require_POST def update_duration(req): try: ep_id = signer.unsign(req.POST.get('ep_id', ''), max_age=3600).decode('utf-8') except Exception as e: return HttpResponse(status=400) ep = get_object_or_404(PodcastEpisode, id=ep_id) try: ep.duration = int(float(req.POST.get('duration', '0'))) except Exception as e: return HttpResponse(status=400) ep.save() return HttpResponse(status=204)
Questions about the app? Visit our Frequently Asked Questions page. Tax and shipping not included in purchase price.
#!/usr/bin/python # -*- coding: iso-8859-15 -*- """ $Id$ This file is part of the xsser project, http://xsser.sourceforge.net. Copyright (c) 2011/2012 psy <root@lordepsylon.net> - <epsylon@riseup.net> xsser is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation version 3 of the License. xsser is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with xsser; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA """ import urlparse import urllib2 import traceback urllib2.socket.setdefaulttimeout(5.0) from BeautifulSoup import BeautifulSoup DEBUG = 1 class Dorker(object): def __init__(self, engine='bing'): self._engine = engine def dork(self, search): """ Perform a search and return links. Uses -bing- engine by default. (http://en.wikipedia.org/wiki/List_of_search_engines) """ urlpar = None divid = None unpack_func = None css_class = None raw_extract = None html_tok = 'a' paging_arg = None # allow to do paging if self._engine == 'bing' or not self._engine: # works at 20-02-2011 search_url = "http://www.bing.com/search?q=" + urllib2.quote(search) divid = 'results_container' elif self._engine == 'scroogle': search_url = "http://www.scroogle.org/cgi-bin/nbbw.cgi?q=" + urllib2.quote(search) elif self._engine == 'altavista': # works at 20-02-2011 def altavista_func(href): href = href['href'] # http://search.yahoo.com/r/_ylt=A0oG7p45zGBNl0MAuhQPxQt.;_ylu=X3oDMTByMTNuNTZzBHNlYwNzcgRwb3MDMgRjb2xvA2FjMgR2dGlkAw--/SIG=11942um5m/EXP=1298275769/**http%3a//money.cnn.com/ if "**" in href: return {'href':urlparse.unquote(href[href.rfind('**')+2:])} #divid = 'results' -> in other altavista=? def raw_extract(html_data, encoding): results = [] for line in html_data.split("\n"): if "<a class='res'" in line and "http" in line: href = line[line.find("http"):line.rfind("'")] results.append({'href': href}) return results css_class = 'res' #unpack_func = altavista_func -> in otherS? #search_url = "http://us.yhs4.search.yahoo.com/yhs/search?fr=altavista&itag=ody&q=" + urllib2.quote(search) search_url = "http://es.altavista.com/web/results?fr=altavista&itag=ody&q=" + urllib2.quote(search) elif self._engine == 'duck': # seems hopeless at 20-02-2011 search_url = "https://duckduckgo.com/?q=" + urllib2.quote(search) elif self._engine == 'baidu': # works at 20-02-2011 #html_tok = 'span' #css_class = 'g' def raw_extract(html_data, encoding): results = [] pos = 0 while pos < len(html_data): pos = html_data.find('span class="g">', pos) if pos == -1: break; href = html_data[pos+15:html_data.find('<', pos)].strip() pos = pos + 1 if not href: continue href = href.split(" ")[0] if not href.startswith('http'): href = 'http://'+href results.append({'href': href}) return results search_url = "http://www.baidu.com/s?wd=" + urllib2.quote(search) elif self._engine == 'yandex': # works at 20-02-2011 def raw_extract(html_data, encoding): results = [] for line in html_data.split("\n"): if 'class="b-serp-url__link"' in line and "http" in line: href = line[line.find("http"):line.find('"', line.find("http")+10)] results.append({'href': href}) return results #css_class = 'b-serp-url__link' search_url = "http://yandex.ru/yandsearch?text=" + urllib2.quote(search) elif self._engine == 'yebol': divid = "Scrollbar-SearchResultsc" search_url = "http://www.yebol.com/a.jsp?x=0&y=0&key=" + urllib2.quote(search) elif self._engine == 'youdao': search_url = "http://www.youdao.com/search?q=" + urllib2.quote(search) #elif self._engine == 'ask': # not works # def raw_extract(html_data, encoding): # results = [] # prevline = "" # for line in html_data.split("\n"): # if 'class="title txt_lg"' in line and "http" in prevline: # href = prevline[prevline.find("http"):prevline.find('"', # prevline.find("http")+10)] # results.append({'href': href}) # prevline = line # return results # search_url = "http://www.ask.com/web?q=" + urllib2.quote(search) elif self._engine == 'google': # works at 11/11/2011 #def raw_extract(html_data, encoding): # results = [] # prevline = "" # for line in html_data.split("\n"): # if 'class="r"' in line and "http" in prevline: # href = prevline[prevline.find("http"):prevline.find('"', # prevline.find("http")+10)] # results.append({'href': href}) # prevline = line # return results search_url = "https://encrypted.google.com/search?hl=en&q=" + urllib2.quote(search) elif self._engine == 'yahoo': # works at 20-02-2011 def raw_extract(html_data, encoding): results = [] for line in html_data.split("\n"): if 'class="yschttl spt"' in line and "http" in line: href = line[line.find("http"):line.find('"', line.find("http")+10)] results.append({'href': href}) return results search_url = "http://search.yahoo.com/search?p=" + urllib2.quote(search) elif self._engine == 'sogou': search_url = "http://www.sogou.com/web?query=" + urllib2.quote(search) elif self._engine == 'rediff': search_url = "http://search1.rediff.com/dirsrch/default.asp?src=web&MT=" + urllib2.quote(search) elif self._engine == 'blekko': search_url = "http://blekko.com/ws/?q=" + urllib2.quote(search) elif self._engine == 'kosmix': # doesnt work properly def raw_extract(html_data, encoding): print html_data results = [] is_next = False for line in html_data.split("\n"): #if 'class="www_result_url"' in line and "http" in line: if '<h4>' in line and "http" in line: href = line[line.find("http"):line.find('"', line.find("http")+10)] results.append({'href': href}) is_next=False if is_next and "http" in line: href = line[line.find("http"):line.find('"', line.find("http")+10)] results.append({'href': href}) is_next=False elif '<h4>' in line: is_next=True else: is_next=False return results search_url = "http://www.kosmix.com/topic/lala?q=" + urllib2.quote(search) elif self._engine == 'search': # works at 20-02-2011 def raw_extract(html_data, encoding): results = [] for line in html_data.split("\n"): if 'class="www_result_url"' in line and "http" in line: #if 'class="www_result_title"' in line and "http" in line: href = line[line.find("http"):line.find('"', line.find("http")+10)] results.append({'href': href}) return results search_url = "http://www.search.ch/?q=" + urllib2.quote(search) elif self._engine == 'ifacnet': search_url = "http://www.ifacnet.com/?q=" + urllib2.quote(search) elif self._engine == 'bussines': search_url = "http://www.business.com/search/rslt_default.asp?vt=all&type=web&query=" + urllib2.quote(search) elif self._engine == 'globalspec': search_url = "http://search.globalspec.com/Search?query=" + urllib2.quote(search) elif self._engine == 'taptu': search_url = "http://www.taptu.com/search/lite/results?term=" + urllib2.quote(search) elif self._engine == 'topix': search_url = "http://www.topix.com/search/article?q=" + urllib2.quote(search) elif self._engine == 'hakia': search_url = "http://hakia.com/search?q=" + urllib2.quote(search) elif self._engine == 'leapfish': search_url = "http://www.leapfish.com/web.aspx?q=" + urllib2.quote(search) #elif self._engine == 'webcrawler': # works at 20-02-2011 # urlpar = "rawURL" # search_url = "http://www.webcrawler.com/webcrawler203/ws/results/Web/" + urllib2.quote(search) + "/1/417/TopNavigation/Relevance/iq=true/zoom=off/_iceUrlFlag=7?_IceUrl=true" elif self._engine == 'excite': search_url = "http://msxml.excite.com/excite/ws/results/Web/" + urllib2.quote(search) + "/1/0/0/Relevance/iq=true/zoom=off/_iceUrlFlag=7?_IceUrl=true" elif self._engine == 'yolink': search_url = "http://cloud.yolink.com/search/search?keywords=" + urllib2.quote(search) elif self._engine == 'lycos': search_url = "http://search.lycos.com/?tab=web&query=" + urllib2.quote(search) else: print "\nThis search engine is not allowed. Check dork.py file to see a complete list\n" try: self.search_url = search_url url = urllib2.urlopen(urllib2.Request(search_url, headers={'User-Agent': "Googlebot/2.1 (+http://www.google.com/bot.html"})) except urllib2.URLError, e: if DEBUG: traceback.print_exc() raise Exception("Internal error dorking: " + e.message) html_data = url.read() html_data = html_data.replace(">",">\n") html_data = html_data.replace("target=_",'target="_') html_data = html_data.replace('\ >','/>') html_data = html_data.replace('\>','/>') html_data = html_data.replace('"">','">') html_data = html_data.replace('</scr"+"ipt>','</script>') content_type = url.headers['content-type'] try: encoding = content_type.split(";")[1].split("=")[1].strip() except: encoding = 'utf-8' if raw_extract: links = raw_extract(html_data, encoding) else: try: soup = BeautifulSoup(html_data, fromEncoding=encoding) except Exception, e: traceback.print_exc() raise Exception("Internal error dorking:" + e.message) if divid: #print(html_data) soup = soup.find('div', {'id':divid}) if css_class: links = soup.findAll(html_tok, {'class':css_class}) else: links = soup.findAll(html_tok) found_links = [] if unpack_func: links = map(unpack_func, links) links = filter(lambda s: s, links) for link in links: try: href = str(link['href'].encode('utf-8')) except KeyError: # this link has no href pass else: if not href.startswith("/") and not "microsofttranslator" in href and not "bingj" in href and not "live.com" in href and not "scroogle" in href: if urlpar: parsed = urlparse.urlparse(href) q = urlparse.parse_qs(parsed.query) if urlpar in q and q[urlpar]: href = urlparse.unquote(q[urlpar][0]) found_links.append(href) else: found_links.append(href) return found_links if __name__ == '__main__': for a in ['google', 'altavista', 'yahoo', 'baidu', 'bing', 'webcrawler', 'youdao', 'yandex']: dork = Dorker(a) res = dork.dork("lorea") print a,len(res) for b in res: print " *", b
My third novel ‘The Gallery of Vanished Husbands’ tells the life story of Juliet Montague and her emergence from a conservative Jewish upbringing to the heart of ‘60s London and it’s thriving art scene, through a series of portraits of Juliet, each chapter of the novel hinging on a different painting. The research was a treat as it sanctioned hours of padding around the National Portrait Gallery and rifling through the online archives. These are a few of the portraits that helped to inspire the novel. I’m going to blog one each day for the next week. This portrait is on display in Room 6722. I adore Arturo Di Stefano’s painting of the writer Jan Morris. He conveys such a sense of a life lived – you can actually see it happening outside the window in the portrait. It feels like he has painted their conversation almost inadvertently as she tells him about her life, Italy, places she’s visited and loved. We’re peering into an intimate moment and catching a glimpse of the collusion between artist and subject. Di Stefano has not only captured a sense of Morris’s personality but also the experience of painting her – a delicious insight into the artistic process. I also very much like the cat – there’s something so personal and warm about it snoozing there in the corner.
from . import data_reduce import numpy as np from . import readout_classifier #import cvxopt #import cvxpy class tomography: def __init__(self, sz_measurer, pulse_generator, proj_seq, reconstruction_basis={}): self.sz_measurer = sz_measurer #self.adc = adc self.pulse_generator = pulse_generator self.proj_seq = proj_seq self.reconstruction_basis=reconstruction_basis self.adc_reducer = data_reduce.data_reduce(self.sz_measurer.adc) self.adc_reducer.filters['SZ'] = {k:v for k,v in self.sz_measurer.filter_binary.items()} self.adc_reducer.filters['SZ']['filter'] = lambda x: 1-2*self.sz_measurer.filter_binary_func(x) def get_points(self): points = { p:{} for p in self.proj_seq.keys() } points.update({p:{} for p in self.reconstruction_basis.keys()}) return points def get_dtype(self): dtypes = { p:float for p in self.proj_seq.keys() } dtypes.update({ p:float for p in self.reconstruction_basis.keys() }) return dtypes def set_prepare_seq(self, seq): self.prepare_seq = seq def measure(self): meas = {} for p in self.proj_seq.keys(): self.pulse_generator.set_seq(self.prepare_seq+self.proj_seq[p]['pulses']) meas[p] = np.real(np.mean(self.adc_reducer.measure()['SZ'])/2) proj_names = self.proj_seq.keys() basis_axes_names = self.reconstruction_basis.keys() #TODO: fix this norm stuff in accordance with theory basis_vector_norms = np.asarray([np.linalg.norm(self.reconstruction_basis[r]['operator']) for r in basis_axes_names]) if len(self.reconstruction_basis.keys()): reconstruction_matrix = np.real(np.asarray([[np.sum(self.proj_seq[p]['operator']*np.conj(self.reconstruction_basis[r]['operator'])) \ for r in basis_axes_names] \ for p in proj_names])) projections = np.linalg.lstsq(reconstruction_matrix, [meas[p] for p in proj_names])[0]*(basis_vector_norms**2) meas.update({k:v for k,v in zip(basis_axes_names, projections)}) return meas def get_opts(self): opts = { p:{} for p in self.proj_seq.keys()} opts.update ({ p:{} for p in self.reconstruction_basis.keys()}) return opts
Guthrie Govan recently filmed a demo of the Ditto X2 Looper from TC Electronics using his new signature Charvel guitar. The Aristocrats axeman wasted no time in creating an amazing loop that built and built for just over two minutes. “Born in Winter” comes off the quartet’s 2012 album L’Enfant Sauvages, and its video has the song playing over spacey imagery that could hypnotize anyone. Check out the clip below and click here for a list of Gojira’s spring tour dates with Mastodon and Kvelertak.