text
stringlengths
29
850k
#!/usr/bin/env python3 # -*- coding: UTF-8 -*- # Copyright 2017 Eddie Antonio Santos <easantos@ualberta.ca> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Allows one to set the language prior to running any of the scripts: Usage: sensibility [-l LANGUAGE] <command> [<args>] """ import os import sys from pathlib import Path from types import SimpleNamespace from typing import Dict, List, Tuple from sensibility._paths import REPOSITORY_ROOT bin_dir = REPOSITORY_ROOT / 'bin' def main() -> None: assert bin_dir.is_dir() args = parse_args() # Set up the environment env: Dict[str, str] = {} env.update(os.environ) # Set the language if defined. if args.language is not None: env.update(SENSIBILITY_LANGUAGE=args.language) if args.subcommand: run_subcommand(args.subcommand, env) else: list_commands() sys.exit(-1) def run_subcommand(command, env) -> None: bin, args = get_bin_and_argv(command) if not bin.exists(): usage_error("Unknown executable:", bin) os.execve(str(bin.absolute()), args, env) def list_commands() -> None: print("Please specify a subcommand:\n", file=sys.stderr) for bin in bin_dir.rglob('*'): if bin.is_dir() or not is_executable(bin): continue bin = bin.relative_to(bin_dir) subcommand = ' '.join(bin.parts) print(f"\t{subcommand}", file=sys.stderr) def get_bin_and_argv(command: List[str]) -> Tuple[Path, List[str]]: """ Returns the absolute path to the binary, AND the argument vector, including argv[0] (the command name). """ first_comp, = command[:1] # XXX: Only supports one-level subcommands if (bin_dir / first_comp).is_dir(): return bin_dir / first_comp / command[1], command[1:] else: return bin_dir / first_comp, command def is_executable(path: Path) -> bool: # access() is deprecated, but we're using it anyway! return os.access(path, os.X_OK) def parse_args(argv=sys.argv): """ Roll my own parse because argparse will swallow up arguments that don't belong to it. """ argv = argv[1:] args = SimpleNamespace() args.language = None args.subcommand = None # Parse options one by one. while argv: arg = argv.pop(0) if arg in ('-l', '--language'): args.language = argv.pop(0) elif arg.startswith('--language='): _, args.language = arg.split('=', 1) elif arg.startswith('-'): usage_error(f"Unknown argument {arg!r}") else: args.subcommand = [arg] + argv[:] break return args def usage_error(*args): print(f"{sys.argv[0]}:", *args, file=sys.stderr) sys.exit(2) if __name__ == '__main__': main()
Approximately two weeks after we receive your enrollment, CHA will mail you a detailed tour account statement (listing your tour cost and payment deadlines) along with an introductory travel newsletter with important information about obtaining your passport and our optional Travel Protection Plan. Your group leader may also provide travelers access to CHA’s Online Tour Account Center. Using your group number and your unique Tour Account Code, you’ll be able to log-on to your Tour Account Center to check your balance, make tour payments, look up important details about your tour including flight and hotel information, and more. For instructions on how to use your Tour Account Center, please call your group leader. Approximately one month before you leave, your group leader will give you a copy of CHA’s Pre-Departure Guide which contains a variety of useful travel information to prepare you for your trip with advice on packing, foreign currencies, shopping, weather, suggested reading and much more! Along with your travel guide, you will also receive a CHA backpack and luggage tag for use while you travel. Your airline ticket and list of assigned hotels will be given out by your teacher a few days before you leave. Remember, all the details of coordinating your tour with CHA’s staff will be accomplished through your group leader. To keep up-to-date on your tour, be sure to stay in regular contact with your group leader in the months before you travel and attend any meetings held for your group.
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """ Example Airflow DAG that uses Google AutoML services. """ import os from airflow import models from airflow.providers.google.cloud.hooks.automl import CloudAutoMLHook from airflow.providers.google.cloud.operators.automl import ( AutoMLCreateDatasetOperator, AutoMLDeleteDatasetOperator, AutoMLDeleteModelOperator, AutoMLImportDataOperator, AutoMLTrainModelOperator, ) from airflow.utils.dates import days_ago GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "your-project-id") GCP_AUTOML_LOCATION = os.environ.get("GCP_AUTOML_LOCATION", "us-central1") GCP_AUTOML_VIDEO_BUCKET = os.environ.get( "GCP_AUTOML_VIDEO_BUCKET", "gs://automl-video-demo-data/hmdb_split1.csv" ) # Example values DATASET_ID = "VCN123455678" # Example model MODEL = { "display_name": "auto_model_1", "dataset_id": DATASET_ID, "video_classification_model_metadata": {}, } # Example dataset DATASET = { "display_name": "test_video_dataset", "video_classification_dataset_metadata": {}, } IMPORT_INPUT_CONFIG = {"gcs_source": {"input_uris": [GCP_AUTOML_VIDEO_BUCKET]}} default_args = {"start_date": days_ago(1)} extract_object_id = CloudAutoMLHook.extract_object_id # Example DAG for AutoML Video Intelligence Classification with models.DAG( "example_automl_video", default_args=default_args, schedule_interval=None, # Override to match your needs user_defined_macros={"extract_object_id": extract_object_id}, tags=['example'], ) as example_dag: create_dataset_task = AutoMLCreateDatasetOperator( task_id="create_dataset_task", dataset=DATASET, location=GCP_AUTOML_LOCATION ) dataset_id = ( '{{ task_instance.xcom_pull("create_dataset_task", key="dataset_id") }}' ) import_dataset_task = AutoMLImportDataOperator( task_id="import_dataset_task", dataset_id=dataset_id, location=GCP_AUTOML_LOCATION, input_config=IMPORT_INPUT_CONFIG, ) MODEL["dataset_id"] = dataset_id create_model = AutoMLTrainModelOperator( task_id="create_model", model=MODEL, location=GCP_AUTOML_LOCATION ) model_id = "{{ task_instance.xcom_pull('create_model', key='model_id') }}" delete_model_task = AutoMLDeleteModelOperator( task_id="delete_model_task", model_id=model_id, location=GCP_AUTOML_LOCATION, project_id=GCP_PROJECT_ID, ) delete_datasets_task = AutoMLDeleteDatasetOperator( task_id="delete_datasets_task", dataset_id=dataset_id, location=GCP_AUTOML_LOCATION, project_id=GCP_PROJECT_ID, ) create_dataset_task >> import_dataset_task >> create_model >> \ delete_model_task >> delete_datasets_task
Poetically titled ‘The Land of the Rising Sun’, the endemic beauty and charm of Japan can be found as much in its unique and hugely engaging culture as it can in its spectacular natural beauty. Come to Japan to see the iconic scenes of snow-dusted Mount Fuji, parks flourishing in pink cherry blossom and exquisitely attired geishas shuffling across your path, but stay for a lifestyle that intriguingly blends distinctive age-old traditions with famously modern thinking. Plotting an itinerary for a holiday is a task for any person, but when the destination is a country of bustling mega-metropolises, pockets of peaceful enclaves and a custom truly like no other, be very prepared to leave any potential to-do list on the plane. Underpinned by its wonderfully unique culture, it wouldn’t be an exaggeration to claim that Japan is unlike any other destination in the world with its effortless blend of forward-thinking and tradition. Known as the ‘Land of the Rising Sun’, Japan is headlined by its spectacular capital city of Tokyo, widely regarded as the biggest in the world. No matter how you plan to visit Japan, there is certainly no substitute for allowing yourself to simply experience it. Book a Japan holiday with Travelbag and prepare to immerse yourself in an extraordinary culture like no other. Tokyo is a city where historic customs mingle with cutting-edge technology, a juxtaposition that means you’ll find a delicately small Shinto shrine plotted amidst a cluster of space-age skyscrapers, where stunningly peaceful gardens line up alongside the neon flashes of crowded stores and where a bolshie bout of sumo-wrestling dominates the sports headlines in Japan. In short, visiting Tokyo is as much about stumbling across something unexpected as it is about knowing what to do and where to go. Indeed, without a doubt, every corner in Tokyo has the potential to unravel a new surprise. Tokyo is merely the first step on a thrilling journey across Japan, however, and a ride on the pioneering Shinkansen (Bullet Train) has the potential to take you to many of the country’s foremost destinations. Head south to visit the thriving city of Osaka, in many ways a more condensed version of Tokyo and an excellent place to immerse yourself in a city noticeably more relaxed than its counterpart. It’s also a superb base from which to explore nearby Kyoto, a splendid city firmly rooted in Japanese history and an imperative stop on any Japan holiday. Walk for hours amidst the hundreds of exquisite Buddhist temples – including the iconic Golden Temple – and Shinto shrines, immersing oneself in the peaceful ambience of your sublime surroundings. If you’ve ever envisaged Japan to be tranquil temples, idyllic gardens and peaceful ponds, Kyoto is the fulfilment of your fantasy. Japan is replete with experiences every traveller must savour at one point in their life, not least admiring the views across towards the iconic Mt. Fuji, the snow-capped peak pressed against the deep blue of the sky behind it as rolling green landscapes unfurl in the foreground. The city of Hiroshima also represents an important stop on a Japan holiday, its reinvention following the atomic bomb that destroyed it – and signalled the end of World War II – underlined by its poignant and moving Peace Memorial Park. Wherever you go in Japan, there are countless opportunities to try something new, whether that involves residing in a traditional Ryokan guestroom, feasting on delicious local delicacies, from sushi to gyoza to okonomiyaki, sinking into a relaxing mountainside onsen, or having a go at the nation’s favourite partying past-time, karaoke. The weather in Japan is generally mild, which makes it an easy place for tourists and travellers to acclimatise to. The country has four distinct seasons: Winter, from December to February, and summer between June and August, when the country can get pretty hot and humid. Travelbag has been packaging holidays since 1979. Our package itineraries to the Japan combine airfares and hotel deals to deliver cheaper overall rates for your holiday. Use this page to find cheap Japan holiday packages using either the current deals above or the control panel at the top of the page. We’ve included our highlights and recommendations too. Found a cheaper Japan package holiday elsewhere? Contact us and we promise to beat the price. Call our customer services team on 0203 139 7074 or visit us at one of our branches to book your Japan getaway today. Our phone lines are open from 9am until 9pm Monday to Friday, 9am until 6pm on Saturday and 10am until 5pm on Sundays.
""" Copyright (c) 2012-2013 RockStor, Inc. <http://rockstor.com> This file is part of RockStor. RockStor is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. RockStor is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ from base_console import BaseConsole from share_iscsi_console import ShareIscsiConsole from rest_util import api_call class IscsiConsole(BaseConsole): def __init__(self, prompt): BaseConsole.__init__(self) self.prompt = prompt + ' Iscsi>' self.url = BaseConsole.url + 'sm/services/iscsi/' def do_status(self, args): iscsi_info = api_call(self.url) print iscsi_info def put_wrapper(self, args, command): input_data = {'command': command,} iscsi_info = api_call(self.url, data=input_data, calltype='put') print iscsi_info def do_start(self, args): return self.put_wrapper(args, 'start') def do_stop(self, args): return self.put_wrapper(args, 'stop') def do_share(self, args): input_share = args.split() if (len(input_share) > 0): si_console = ShareIscsiConsole(input_share[0]) if (len(input_share) > 1): si_console.onecmd(' '.join(input_share[1:])) else: si_console.cmdloop()
British Buyers Flock To Last Weeks A Place In The Sun Exhibition In London. As always we like to keep you updated with the latest property news, offers and gossip and we hope that you find this months newsletter informative. For more information about buying or selling a Spanish property do not hesitate to contact us below on our website. A Place In The Sun TV Show Shortlisted For An Award. We’re thrilled to announce that the much-admired Channel 4 TV show, A Place in the Sun, has been nominated for an award at this year’s TV Choice Awards! That’s right, fantastic TV presenters; Jasmine Harman, Jonnie Irwin, Danni Menzies, Laura Hamilton, Ben Hillman and Scarlette Douglas will duel against other TV titles for the crown in the ‘Best Daytime Show’ category later this year. A Place in the Sun debuted on Channel 4 in the year 2000 and has been opening up a world of opportunity to the British public ever since. With episodes of Home or Away, Winter Sun and Summer Sun airing all year round, the show has been inspiring individuals to swap their living rooms for sunnier climes and purchase their dream property abroad. We have appeared on the show and the whole team have always been very friendly and professional we wish them the best of luck. Top Tips On How To Shortlist Locations. 1 If you want to save your time and energy on trying to find your place in Spain then you should engage the services of a reputable Estate Agency such as www.chersun.com. We will be able to provide you recommendations of what are the best locations for you to consider based on your criteria. 2 Looking for a good property to rent out all year then you should consider a golf resort property. The golfers will rent in the winter months and in the summer the families will rent as they like the secure environment that golf resorts provide. Only 30% of buyers play golf which just goes to prove that you do not have to like golf to buy on a golf resort. 3 If you want to benefit from a cooler climate in the summer head for the coast, the pleasant sea breeze will cool you down compared to inland locations. If you do decide you prefer an inland property ensure you have access to a swimming pool as it can get red hot in the countryside. 4 Visiting in the winter and you want to avoid ghost towns then look at properties close to spanish towns which have all year life. 5 Finally location is the key when buying any property anywhere in the world therefore take your time do your research, visit the different locations and this will help you narrow down your search. Important Tips When You Have Completed The Purchase Of Your Property In Spain. You have now signed for your spanish property and the process has gone smoothly and you now have the keys for your new home in the sun. I have below some tips that may be useful to you after the property purchase. Ensure you have taken out building and contents insurance for the day of the completion as this can be easily overlooked. Get the locks changed so that you know who has the keys. With property in Spain being listed with a number of agents I strongly recommend you change the barrel of the lock so that you can be assured that there are no spare keys being handed around. Employ the services of a good property manager who can look after and inspect the property when you are not in the country.The cost will be approximately 300 euros per year but well worth it. Finally I strongly recommend you arrange a spanish will as this will ensure that your spanish assets such as property, bank account and house contents can be transferred to your benificiaries. We had a client who did not have a will and it took the family 2 years to sort out his estate. Like us On Facebook and Subscribe To Our YouTube Channel To Get The Latest Offers And VIdeo Tours. We have below a small selection of our best buys for this month along with video tours. Our latest review from Val and Kenny Morris who recently purchased on the popular La Torre Golf Resort, Murcia. Chersun Properties have been absolutely brilliant from start to finish in assisting us with purchasing our new property in Spain. Professional, experienced, knowledgeable and easy to do business with. When you’re buying a property abroad you need to put trust in people to help you make a big decision - we put our trust in Paul Cherry / Chersun Properties and they not only delivered but exceeded our expectations. The aftercare service is outstanding and they go out their way to help in every way they can...nothing is too much trouble. In summary, Chersun Properties delivered the WOW factor and I have and will no hesitation whatsoever in recommending them. Chersun We Are here To Help You.
""" Database dumping of in-memory datastore for QCss3 This module writes to database the content of a memory datastore using adapters. For example, if we want to write an object representing an a load balancer (implementing C{ILoadBalancer} interface), we will use C{IDatabaseWriter(lb).write(txn)} where C{lb} is the load balancer and C{txn} a transaction to use. """ from zope.interface import Interface, implements from twisted.python import components from qcss3.collector.datastore import ILoadBalancer, IVirtualServer, IRealServer, ISorryServer class IDatabaseWriter(Interface): """Interface to write an entity to the database""" def write(txn, id=None): """ Dump the current entity to database using the given transaction. @param txn: transaction to use to dump to the database @param id: unique id to use for the entity (if needed) """ class ActionWriterMixIn: def write_actions(self, txn, actions, lb, vs=None, rs=None): """Write actions to `action' table. @param txn: transaction to use to write actions to database @param actions: actions to write @param lb: loadbalancer @param vs: virtual server @param rs: real server """ txn.execute("DELETE FROM action WHERE lb=%(lb)s AND vs=%(vs)s AND rs=%(rs)s", {'lb': lb, 'vs': vs, 'rs': rs}) for key in actions: txn.execute("INSERT INTO action VALUES " "(%(lb)s, %(vs)s, %(rs)s, %(key)s, %(value)s)", { 'lb': lb, 'vs': vs, 'rs': rs, 'key': key, 'value': actions[key] }) class LoadBalancerWriter(ActionWriterMixIn): implements(IDatabaseWriter) def __init__(self, loadbalancer): self.loadbalancer = loadbalancer def write(self, txn, id=None): """Dump the loadbalancer to the database""" # Remove existing information txn.execute("UPDATE loadbalancer SET deleted=CURRENT_TIMESTAMP " "WHERE name=%(name)s AND deleted='infinity'", {'name': self.loadbalancer.name}) # Insert new information txn.execute("INSERT INTO loadbalancer " "(name, type, description) VALUES " "(%(name)s, %(kind)s, %(description)s)", { 'name': self.loadbalancer.name, 'kind': self.loadbalancer.kind, 'description': self.loadbalancer.description }) # Then write virtual servers information virtualservers = self.loadbalancer.virtualservers for virtualserver in virtualservers: IDatabaseWriter( virtualservers[virtualserver]).write(txn, (self.loadbalancer.name, virtualserver)) self.write_actions(txn, self.loadbalancer.actions, self.loadbalancer.name) class VirtualServerWriter(ActionWriterMixIn): implements(IDatabaseWriter) def __init__(self, virtualserver): self.virtualserver = virtualserver def write(self, txn, id=None): """ Dump the virtual server to the database. @param id: (name of loadbalancer, ID of the virtual server) """ lb, vs = id # Remove existing information txn.execute("UPDATE virtualserver SET deleted=CURRENT_TIMESTAMP " "WHERE lb=%(lb)s AND vs=%(vs)s AND deleted='infinity'", {'lb': lb, 'vs': vs}) # Insert new information txn.execute("INSERT INTO virtualserver " "(lb, vs, name, vip, protocol, mode) VALUES " "(%(lb)s, %(vs)s, %(name)s, %(vip)s, %(protocol)s, %(mode)s)", {'lb': lb, 'vs': vs, 'name': self.virtualserver.name, 'vip': self.virtualserver.vip, 'protocol': self.virtualserver.protocol, 'mode': self.virtualserver.mode}) # Insert extra information for key in self.virtualserver.extra: txn.execute("INSERT INTO virtualserver_extra " "(lb, vs, key, value) VALUES " "(%(lb)s, %(vs)s, %(key)s, %(value)s)", { 'lb': lb, 'vs': vs, 'key': key, 'value': self.virtualserver.extra[key] }) # Insert real servers realservers = self.virtualserver.realservers for realserver in realservers: IDatabaseWriter( realservers[realserver]).write(txn, (lb, vs, realserver)) self.write_actions(txn, self.virtualserver.actions, lb, vs) class RealOrSorryServerWriter(ActionWriterMixIn): implements(IDatabaseWriter) def __init__(self, realserver): self.realserver = realserver def write(self, txn, id=None): """ Dump the real/sorry server to the database. @param id: (name of load balancer, ID of the virtualserver, ID of the real server) """ lb, vs, rs = id # Remove existing information txn.execute("UPDATE realserver SET deleted=CURRENT_TIMESTAMP " "WHERE lb=%(lb)s AND vs=%(vs)s AND rs=%(rs)s " "AND deleted='infinity'", {'lb': lb, 'vs': vs, 'rs': rs}) # Insert new information weight = None if IRealServer.providedBy(self.realserver): weight = self.realserver.weight txn.execute("INSERT INTO realserver " "(lb, vs, rs, name, rip, port, protocol, weight, rstate, sorry) " "VALUES " "(%(lb)s, %(vs)s, %(rs)s, %(name)s, %(rip)s, " "%(port)s, %(protocol)s, %(weight)s, %(rstate)s, %(sorry)s)", {'lb': lb, 'vs': vs, 'rs': rs, 'name': self.realserver.name, 'rip': self.realserver.rip, 'port': self.realserver.rport, 'protocol': self.realserver.protocol, 'weight': weight, 'rstate': self.realserver.state, 'sorry': ISorryServer.providedBy(self.realserver) }) # Insert extra information for key in self.realserver.extra: txn.execute("INSERT INTO realserver_extra VALUES " "(%(lb)s, %(vs)s, %(rs)s, %(key)s, %(value)s)", { 'lb': lb, 'vs': vs, 'rs': rs, 'key': key, 'value': self.realserver.extra[key] }) self.write_actions(txn, self.realserver.actions, lb, vs, rs) components.registerAdapter( LoadBalancerWriter, ILoadBalancer, IDatabaseWriter) components.registerAdapter( VirtualServerWriter, IVirtualServer, IDatabaseWriter) components.registerAdapter( RealOrSorryServerWriter, IRealServer, IDatabaseWriter) components.registerAdapter( RealOrSorryServerWriter, ISorryServer, IDatabaseWriter)
ANTIQUE EUREKA LONDON RARE ELECTROMAGNETIC CLOCK ENGLAND c. 1910 BRASS UNDER DOME. GREAT EUREKA UNDER GLASS DOME. THIS VERSION OF THE BRITISH EUREKA WAS MADE IN GERMANY, UNDER LICENSE FROM EUREKA. MEASURES 15 INCHES HIGH, AND 8 3/4 INCHES IN DIAMETER AT BASE (UNTESTED). The item “ANTIQUE EUREKA LONDON RARE ELECTROMAGNETIC CLOCK ENGLAND c. 1910 BRASS UNDER DOME” is in sale since Tuesday, October 31, 2017. This item is in the category “Collectibles\Clocks\Antique (Pre-1930)\Shelf, Mantel”. The seller is “otempora!” and is located in Phoenixville, Pennsylvania. This item can be shipped worldwide.
''' ''' # targetregexp = r'\/{forum}\/(\d*)-(\d*)\-[\w|\-]*([vb]-?i*-?r-?t|se(?:x|ks|kas)|eb(?:at|i)|t-?r-?a-?h|(?:-ja|ischu)-(?:m\-|j\-|zh\-|devushk|par(?:en|nja)|hozja)|ots[o|\-]s|rolevit|-sis[\-|e][kc]|v(?:-pop|du(?:i\-|va))|rabyn|droch|[ob]?liz(?:at\-|va[it])|hentai|shlju(?:hu|shk)|kisk[au]-(?:vsja|mokr)|do-orgazm|shali|min-?et|nakaz(?:iva|hi|at)|(?:parni|devushki)-kto-hochet|hoch(?:u|esh)-tak-)[\w|\-]*\-read\.shtml' s_id = r'(\d+)\-(\d+)\-[\w\-]*(?:\-read)?\.[sz]?html' s_topic = r'(\d+-\d+\-[\w|\-]*(?:\-read)?\.[sz]?html)' s_uni = r'((\d+)-(\d+)\-[\w|\-]*(?:\-read)?\.[sz]?html)' ud_prefix = r'http:\/\/(?:(\w+)\.)?(\w+\.\w+)\/(?:[\w._-]+\/)?' udf_prefix = r'http:\/\/(?:(\w+)\.)?(\w+\.\w+)\/(?:([\w._-]+)\/)?' sub_prefix = r'http:\/\/(?:{0}\.)?{1}\/(?:{2}\/)?' ds_u_prefix = r'http:\/\/(?:(\w+)\.)?{0}\/(?:[\w._-]+\/)?' f_udi = ud_prefix + s_id # -> (user, domain, (id1, id2)) f_udfi = udf_prefix + s_id # -> (user, domain, forum, (id1, id2)) f_udft = udf_prefix + s_topic # -> (user, domain, forum, topic) f_udfti = udf_prefix + s_uni # -> (user, domain, forum, topic, (id1, id2)) f_sub_id = sub_prefix + s_id # -> (id1, id2) f_sub_topic = sub_prefix + s_topic # -> (topic) picregexp = r'(http\:\/\/i\d+\.{0}\/\d+\/\d+\/\d+\/\d+\/\d+\/Picture\d*\.jpe?g)' chashregexp = r'value\=\'?(\w+)\'?.*?name\=\'?cahash\'?' # Regexp for captcha hash. wait5min_register = r'Пожалуйста, подождите 5 минут и попробуйте зарегистрировать пользователя снова.' wait5min_uni = r'<font color=ff0000>' # Stupid one, isn't it? aregexp = r'http:\/\/a(\d)\.{0}\/i\/captcha\/' # Regexp for auth server. var_login = r'var user_login = \'(\w*)\';' # Parse current login from js var. imgregexp = r'\[image-\w*-\w*-http:\/\/a{0}.{1}\/i\/temp\/\d*\/[\w.]*\]' # wtf is this? captchaurl = 'http://a{0}.{1}/i/captcha/{2}.png' hashinmail = r'http:\/\/{0}\/p\/validate_user_email\.cgi\?p(?:=|&#61;)(\w+)' show_link_options = r"showLinksOptions\(this,\s*?'\w+?',\s*?'(\d+?)',\s*?\d,\s*?\d,\s*?\d\)" img_codes = r'(\[image-original-none-[\w:\.\/]+?\])' deobfuscate_html = r'<script.*?>.*?dеobfuscate_html\s*?\(.*?\).*?<\/script>' r302_found = r'302 Found' r502_bad_gateway = r'502 Bad Gateway' class getposts: addcomment = r"AddComment\s*?\(\s*?[\'\"](.+?)[\'\"]\s*?\)\s*?;" setlastcomment = r"setLastComment\s*?\(\s*?[\'\"]?(\d+?)[\'\"]?\s*?\)\s*?;" cookie = r"document.cookie\s*?=\s*?[\'\"](.*)[\'\"]\s*?;" runchecker = r"runChecker\s*?\(\s*?\)\s*?;"
Membership entitles you to a subscription to our journal published three times a year, our e-newsletter sent six times a year, access to our online membership directory and job listings, members’ discount to our national conference and other events, and the intangible benefits of networking with colleagues. Memberships may be paid online or by invoice. For those who wish to have their church or organization pay for their membership, an invoice can be requested when filling out the application. This will be immediately emailed to your inbox, which you can then submit to your institution for payment. Memberships paid by invoice will not be activated until payment is received. Memberships paid online are activated immediately. Online payments are processed through PayPal where you can use a credit card or your PayPal account. We communicate primarily through email, so please be sure your email program will accept emails from admin@uccma.org and mailings@uccma.org. After submitting your application you will immediately receive several emails, so be sure to check your inbox. List your organization in the Institution field (for institutional memberships only). In the primary address fields list the postal address to which you wish the journal to be mailed. Fill in other required fields as listed. These instructions will remain here on the next page. One Year Membership - $50.00 (USD) Subscription period: 1 year No automatically recurring payments A membership for one individual. Two Year Membership - $90.00 (USD) Subscription period: 2 years No automatically recurring payments A membership for one individual. Institutional Membership - $75.00 (USD) Bundle (up to 6 members) Subscription period: 1 year No automatically recurring payments A membership level for up to six people representing one church, UCC conference office or other organization. Each member of the group will receive full website access, member discounts to events and all emailed newsletters and notices. One person serves as the administrator for the group and manages the membership (i.e. applies for membership, receives renewal notices, adds or removes group members, etc.). One journal subscription will be mailed in the name of the group administrator to the primary address listed.
# coding = utf-8 # specflatfielding range # excludeleak = false # range = [198.0, 203.0] print "Processing second case" lineSpec = isLineSpec(slicedCubes) shortRange = isShortRange(obs) slicedFrames = specFlatFieldRange(slicedFrames, useSplinesModel=True, excludeLeaks=False, calTree=calTree, copy=copyCube, selectedRange=[198.0, 203.0], wlIntervalKnots={1:2.0, 2:3.0, 3:2.0}) copyCube = False maskNotFF = True slicedCubes = specFrames2PacsCube(slicedFrames) slicedCubes = centerRaDecMetaData(slicedCubes) # Building the wavelength grids for each slice # Used cal file: wavelengthGrid upsample = getUpsample(obs) waveGrid = wavelengthGrid(slicedCubes, oversample = 2, upsample = upsample, calTree = calTree) # Active masks slicedCubes = activateMasks(slicedCubes, String1d(["GLITCH", "UNCLEANCHOP", "SATURATION", "GRATMOVE", "BADFITPIX", "BADPIXELS"]), exclusive = True, copy = copyCube) # Flag the remaining outliers (sigma-clipping in wavelength domain), # with default parameters here slicedCubes = specFlagOutliers(slicedCubes, waveGrid) # Rebin all cubes on consistent wavelength grids masksForRebinning = String1d(["OUTOFBAND", "GLITCH", "UNCLEANCHOP", "SATURATION", "GRATMOVE", "BADFITPIX", "OUTLIERS", "BADPIXELS"]) masksForRebinning.append("NOTFFED") slicedCubes = activateMasks(slicedCubes, masksForRebinning, exclusive = True) slicedRebinnedCubes = specWaveRebin(slicedCubes, waveGrid) print slicedRebinnedCubes.refs.size() # Only continue if there is at least one slice leftover after red-leak filtering if slicedRebinnedCubes.refs.size() > 0: # Select only the slices in the PACS cube which are also in the rebinned cube slicedCubes = selectSlices(slicedCubes, refContext = slicedRebinnedCubes) # Combine the nod-A & nod-B rebinned cubes. # All cubes at the same raster position are averaged. # This is the final science-grade product for spatially undersampled # rasters and single pointings slicedRebinnedCubes = specAddNodCubes(slicedRebinnedCubes) # compute ra/dec meta keywords slicedRebinnedCubes = centerRaDecMetaData(slicedRebinnedCubes) # convert the cubes to a table slicedTable = pacsSpecCubeToTable(slicedRebinnedCubes) # Compute equidistant wavelength grid for equidistant regridding equidistantWaveGrid = wavelengthGrid(slicedCubes, oversample = 2, upsample = upsample, calTree = calTree, regularGrid = True, fracMinBinSize = 0.35) # determine mapping algorithm and parameters driz, pixelSize, interpolatePixelSize, oversampleSpace, upsampleSpace, pixFrac, source, mapType = determineMappingAlgorithm(slicedRebinnedCubes,camera) # Mosaic, per wavelength range, all raster pointings into a single cube slicedDrizzledCubes = None slicedDrizzledEquidistantCubes = None slicedInterpolatedCubes = None slicedInterpolatedEquidistantCubes = None slicedProjectedEquidistantCubes = None if driz: oversampleWave = 2 upsampleWave = upsample waveGridForDrizzle = wavelengthGrid(slicedCubes, oversample = oversampleWave, upsample = upsampleWave, calTree = calTree) equidistantWaveGridForDrizzle = wavelengthGrid(slicedCubes, oversample = oversampleWave, upsample = upsampleWave, calTree = calTree, regularGrid = True, fracMinBinSize = 0.35) spaceGrid = spatialGrid(slicedCubes, wavelengthGrid = waveGridForDrizzle, oversample = oversampleSpace, upsample = upsampleSpace, pixfrac = pixFrac, calTree = calTree) slicedDrizzledCubes = drizzle(slicedCubes, wavelengthGrid = waveGridForDrizzle, spatialGrid = spaceGrid)[0] slicedDrizzledCubes = centerRaDecMetaData(slicedDrizzledCubes) sink.saveWhenMemoryShort(slicedDrizzledCubes) slicedDrizzledEquidistantCubes = specRegridWavelength(slicedDrizzledCubes, equidistantWaveGridForDrizzle) sink.saveWhenMemoryShort(slicedDrizzledEquidistantCubes) slicedProjectedCubes = specProject(slicedRebinnedCubes, cubeWithOutputGrid = slicedDrizzledCubes) del spaceGrid, waveGridForDrizzle, equidistantWaveGridForDrizzle del oversampleWave, upsampleWave else: slicedProjectedCubes = specProject(slicedRebinnedCubes, outputPixelsize = pixelSize) if mapType != "oversampled": slicedInterpolatedCubes = specInterpolate(slicedRebinnedCubes, outputPixelsize = interpolatePixelSize) slicedInterpolatedCubes = centerRaDecMetaData(slicedInterpolatedCubes) if (mapType=="nyquist" or mapType=="oversampled"): slicedProjectedEquidistantCubes = specRegridWavelength(slicedProjectedCubes, equidistantWaveGrid) else: slicedInterpolatedEquidistantCubes = specRegridWavelength(slicedInterpolatedCubes, equidistantWaveGrid) slicedProjectedCubes = centerRaDecMetaData(slicedProjectedCubes) sink.saveWhenMemoryShort(slicedProjectedCubes) # do a pointsource extraction for the pointed observations only # should applied a rangespec of linespec process?? spectra1d = None if source=='point': if isRangeSpec(obs): c1_2nd, c9_2nd, c129_2nd = extractCentralSpectrum(slicedRebinnedCubes, smoothing = 'filter', width = 50, preFilterWidth = 15, calTree = calTree) else: c1_2nd, c9_2nd, c129_2nd = extractCentralSpectrum(slicedRebinnedCubes, smoothing = 'median', calTree = calTree) spectra1d = fillPacsCentralSpectra(slicedRebinnedCubes, ptSrcSpec = c1_2nd, ptSrc3x3Spec = c9_2nd) # del c1_2nd, c9_2nd, c129_2nd slicedRebinnedCubes.meta.set("sanitycheck",StringParameter("test2")) # update the level 2 of the ObservationContext obs = updatePacsObservation(obs, 2.0, [slicedCubes, slicedRebinnedCubes, slicedProjectedCubes, slicedDrizzledCubes, slicedTable, slicedInterpolatedCubes, slicedDrizzledEquidistantCubes, slicedInterpolatedEquidistantCubes, slicedProjectedEquidistantCubes]) # remove variables to cleanup memory del slicedTable, equidistantWaveGrid, driz, pixelSize, interpolatePixelSize del oversampleSpace, upsampleSpace, pixFrac, source, mapType del slicedDrizzledCubes, slicedDrizzledEquidistantCubes del slicedInterpolatedCubes, slicedInterpolatedEquidistantCubes del slicedProjectedCubes, slicedProjectedEquidistantCubes else: LOGGER.warning("No slices left anymore after filtering red-leak and out-of-band slices.") # Delete some variables (memory clean-up) del slicedFrames del maskNotFF, upsample, waveGrid, masksForRebinning, slicedRebinnedCubes # restore default sink state restoreOldSinkState()
There are a few ways to type emoji and emoticons in Outlook if you want to insert a smiley face in an email or otherwise add some personality to your messages. On desktop versions of Microsoft Outlook, certain emoticons are automatically replaced with symbols when you type them using your keyboard. On web versions of Outlook, you can add emoji using a menu. Traditionally, you can type certain emoticons, meaning strings of text that represent an emotion, and they will be replaced by face images in Outlook. For example, you can type the string ":-)" to insert a smiley face in Outlook and ":-(" to insert a frowning face. This is handled through the AutoCorrect options, so if you want to disable or change how these features work, you can configure it through Outlook's AutoCorrect menu. You can also insert additional symbols, including emoticons and other characters such as hearts, by clicking the "Insert" tab in Microsoft Outlook, then clicking the "Symbol" button. Click "More Symbols" if you don't see the symbol you want, and browse various fonts for interesting characters. Until recently, the AutoCorrect smileys were inserted using Microsoft's Wingdings font, so they wouldn't show up correctly on systems that didn't have this font installed. More recently, the company has switched to using standardized emoji that should show up on most modern computers and smart phones. If you're using Outlook or Hotmail on the web, you can insert emoji using a built-in menu. While you're composing an email, click to place the cursor where you want the emoji to appear. Then, click the "Emoji" button in the bottom toolbar, below the message. Scroll through or search to find the emoji you want, then click on it to insert it into the message. If you're using Outlook or any other program on an Apple macOS computer, you can insert emoji and other symbols through a built-in menu accessible from most apps. While you're typing, place the cursor where you want to place the emoji, then click the "Edit" menu. From there, click "Emoji & Symbols." Scroll through the list or use the search box to find the emoji you want and click to insert it into your email or document. If you're ever using an unfamiliar system and aren't sure how to insert an emoji into an email or document, you can try searching for the emoji online and using your operating system's copy and paste functionality to insert it.
# Copyright 2015 Curtis Sand # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time import functools from logging import debug from .resources import Resources # Using a global to hold the delayed events for now. # Also, delayed events are currently executed through the delayed_event_trigger # rather than a separate event thread. DELAYED_EVENTS = [] # TODO: # - make engine queries threadsafe # - ensure that only queries are used to interact with the model # - write a class that will run its own thread for managing # - Move event handling from engine into the event manager class # - remove all uses of delayed_event_trigger def update_trigger(func): """Decorator to trigger an update before given method is called.""" @functools.wraps(func) def new_function(*args, **kwargs): if len(args) > 0 and hasattr(args[0], 'update'): args[0].update() return func(*args, **kwargs) return new_function def delayed_event_trigger(func): """Decorator to trigger delayed events before calling a method.""" @functools.wraps(func) def new_function(*args, **kwargs): if hasattr(delayed_event_trigger, 'CALLABLE'): debug('Performing Delayed Actions...') delayed_event_trigger.CALLABLE() return func(*args, **kwargs) return new_function def calculate_update_increments(last_update, new_time=None): """Determine the number of updated increments between last_update and now. """ if not new_time: new_t = time.time() else: new_t = new_time return new_t - last_update class ResourceUpdater(object): """Helper class to handle updating a Planet's resources based on income. Attributes "new_time", "difference", "resources" will be unset until the update() method is called. """ def __init__(self, last_update, resources, rates, max_resources=None): self.last_update = last_update self.original_resources = resources self.rates = rates self.max_resources = max_resources self.new_time, self.difference, self.resources = [None, None, None] def update(self): """Calculate the new value of resources for planet.""" self.new_time = time.time() increments = calculate_update_increments(self.last_update, new_time=self.new_time) self.difference = Resources() self.resources = self.original_resources.copy() for res in self.original_resources: self.difference[res] = self.rates[res] * increments if self.max_resources: new_val = min(self.resources[res] + self.difference[res], self.max_resources[res]) else: new_val = self.resources[res] + self.difference[res] self.resources[res] = new_val return self.resources, self.new_time class DelayedEvent(object): """Perform an action after some delay. :descriptor: A string describing the event. :delay: A number representing the number of seconds to delay. :action: A callable to be executed after the delay. When triggered, if the period of delay has passed, the provided action callable will be executed. If the event triggered it will return True otherwise it will return None. When triggered the attribute "triggered" will change from False to True unless an exception was thrown by the action callable. Once the "triggered" attribute is set to True the event cannot be re-triggered. When triggering events, the trigger time can be passed in as the keyword "_time" otherwise time.time() will be used. """ def __init__(self, descriptor, delay, action, *args, **kwargs): self.descriptor = descriptor self.delay = delay self.action = action self.args = args self.kwargs = kwargs self.trigger_time = time.time() + delay self.triggered = False def is_delay_over(self, _time=None): if not _time: _time = time.time() return _time >= self.trigger_time def __call__(self, _time=None): if not self.is_delay_over(_time): return if not self.triggered: debug('Triggering event "{}"...'.format(self.descriptor)) self.action(*self.args, **self.kwargs) self.triggered = True return True
30-40 mmHg No Silicone Border; ideal for those who are allergic to Silicone or those who simply prefer to use adhesive lotion to help keep their stockings up. If you are looking for Thigh Highs with Silicone Border, please see the appropriate pages. Thigh High without Silicone Border and Thigh High with Waist Attachment available in: Jobst Relief, Juzo Dynamic Varin, Juzo Soft, Mediven Plus, Sigvaris 500 Natural Rubber, Sigvaris 860 Select Comfort, Sigvaris 960 Value Care. Open Toe or Closed Toe, Regular, Short, or Petite Length Options.
""" Support for inheritance of fields down an XBlock hierarchy. """ from datetime import datetime from pytz import UTC from xmodule.partitions.partitions import UserPartition from xblock.fields import Scope, Boolean, String, Float, XBlockMixin, Dict, Integer, List from xblock.runtime import KeyValueStore, KvsFieldData from xmodule.fields import Date, Timedelta class UserPartitionList(List): """Special List class for listing UserPartitions""" def from_json(self, values): return [UserPartition.from_json(v) for v in values] def to_json(self, values): return [user_partition.to_json() for user_partition in values] class InheritanceMixin(XBlockMixin): """Field definitions for inheritable fields.""" graded = Boolean( help="Whether this module contributes to the final course grade", scope=Scope.settings, default=False, ) start = Date( help="Start time when this module is visible", default=datetime(2030, 1, 1, tzinfo=UTC), scope=Scope.settings ) due = Date( help="Date that this problem is due by", scope=Scope.settings, ) extended_due = Date( help="Date that this problem is due by for a particular student. This " "can be set by an instructor, and will override the global due " "date if it is set to a date that is later than the global due " "date.", default=None, scope=Scope.user_state, ) course_edit_method = String( help="Method with which this course is edited.", default="Studio", scope=Scope.settings ) giturl = String( help="url root for course data git repository", scope=Scope.settings, ) xqa_key = String(help="DO NOT USE", scope=Scope.settings) annotation_storage_url = String(help="Location of Annotation backend", scope=Scope.settings, default="http://your_annotation_storage.com", display_name="Url for Annotation Storage") annotation_token_secret = String(help="Secret string for annotation storage", scope=Scope.settings, default="xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx", display_name="Secret Token String for Annotation") graceperiod = Timedelta( help="Amount of time after the due date that submissions will be accepted", scope=Scope.settings, ) showanswer = String( help="When to show the problem answer to the student", scope=Scope.settings, default="finished", ) rerandomize = String( help="When to rerandomize the problem", scope=Scope.settings, default="never", ) days_early_for_beta = Float( help="Number of days early to show content to beta users", scope=Scope.settings, default=None, ) static_asset_path = String( help="Path to use for static assets - overrides Studio c4x://", scope=Scope.settings, default='', ) text_customization = Dict( help="String customization substitutions for particular locations", scope=Scope.settings, ) use_latex_compiler = Boolean( help="Enable LaTeX templates?", default=False, scope=Scope.settings ) max_attempts = Integer( display_name="Maximum Attempts", help=("Defines the number of times a student can try to answer this problem. " "If the value is not set, infinite attempts are allowed."), values={"min": 0}, scope=Scope.settings ) matlab_api_key = String( display_name="Matlab API key", help="Enter the API key provided by MathWorks for accessing the MATLAB Hosted Service. " "This key is granted for exclusive use by this course for the specified duration. " "Please do not share the API key with other courses and notify MathWorks immediately " "if you believe the key is exposed or compromised. To obtain a key for your course, " "or to report and issue, please contact moocsupport@mathworks.com", scope=Scope.settings ) # This is should be scoped to content, but since it's defined in the policy # file, it is currently scoped to settings. user_partitions = UserPartitionList( help="The list of group configurations for partitioning students in content experiments.", default=[], scope=Scope.settings ) def compute_inherited_metadata(descriptor): """Given a descriptor, traverse all of its descendants and do metadata inheritance. Should be called on a CourseDescriptor after importing a course. NOTE: This means that there is no such thing as lazy loading at the moment--this accesses all the children.""" if descriptor.has_children: parent_metadata = descriptor.xblock_kvs.inherited_settings.copy() # add any of descriptor's explicitly set fields to the inheriting list for field in InheritanceMixin.fields.values(): if field.is_set_on(descriptor): # inherited_settings values are json repr parent_metadata[field.name] = field.read_json(descriptor) for child in descriptor.get_children(): inherit_metadata(child, parent_metadata) compute_inherited_metadata(child) def inherit_metadata(descriptor, inherited_data): """ Updates this module with metadata inherited from a containing module. Only metadata specified in self.inheritable_metadata will be inherited `inherited_data`: A dictionary mapping field names to the values that they should inherit """ try: descriptor.xblock_kvs.inherited_settings = inherited_data except AttributeError: # the kvs doesn't have inherited_settings probably b/c it's an error module pass def own_metadata(module): """ Return a dictionary that contains only non-inherited field keys, mapped to their serialized values """ return module.get_explicitly_set_fields_by_scope(Scope.settings) class InheritingFieldData(KvsFieldData): """A `FieldData` implementation that can inherit value from parents to children.""" def __init__(self, inheritable_names, **kwargs): """ `inheritable_names` is a list of names that can be inherited from parents. """ super(InheritingFieldData, self).__init__(**kwargs) self.inheritable_names = set(inheritable_names) def default(self, block, name): """ The default for an inheritable name is found on a parent. """ if name in self.inheritable_names and block.parent is not None: parent = block.get_parent() if parent: return getattr(parent, name) super(InheritingFieldData, self).default(block, name) def inheriting_field_data(kvs): """Create an InheritanceFieldData that inherits the names in InheritanceMixin.""" return InheritingFieldData( inheritable_names=InheritanceMixin.fields.keys(), kvs=kvs, ) class InheritanceKeyValueStore(KeyValueStore): """ Common superclass for kvs's which know about inheritance of settings. Offers simple dict-based storage of fields and lookup of inherited values. Note: inherited_settings is a dict of key to json values (internal xblock field repr) """ def __init__(self, initial_values=None, inherited_settings=None): super(InheritanceKeyValueStore, self).__init__() self.inherited_settings = inherited_settings or {} self._fields = initial_values or {} def get(self, key): return self._fields[key.field_name] def set(self, key, value): # xml backed courses are read-only, but they do have some computed fields self._fields[key.field_name] = value def delete(self, key): del self._fields[key.field_name] def has(self, key): return key.field_name in self._fields def default(self, key): """ Check to see if the default should be from inheritance rather than from the field's global default """ return self.inherited_settings[key.field_name]
Using our free SEO "Keyword Suggest" keyword analyzer you can run the keyword analysis "nikon scope commercials" in detail. In this section you can find synonyms for the word "nikon scope commercials", similar queries, as well as a gallery of images showing the full picture of possible uses for this word (Expressions). In the future, you can use the information to create your website, blog or to start an advertising company. The information is updated once a month.
# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*- # # Copyright (C) 2018 Canonical Ltd # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os import shlex from .._base_provider import Provider from ._instance_info import InstanceInfo from ._multipass_command import MultipassCommand class Multipass(Provider): """A multipass provider for snapcraft to execute its lifecycle.""" def _run(self, command) -> None: self._multipass_cmd.execute(instance_name=self.instance_name, command=command) def _launch(self) -> None: self._multipass_cmd.launch(instance_name=self.instance_name, image="16.04") def _mount(self, *, mountpoint: str, dev_or_path: str) -> None: target = "{}:{}".format(self.instance_name, mountpoint) self._multipass_cmd.mount(source=dev_or_path, target=target) def _mount_snaps_directory(self) -> None: # https://github.com/snapcore/snapd/blob/master/dirs/dirs.go # CoreLibExecDir path = os.path.join(os.path.sep, "var", "lib", "snapd", "snaps") self._mount(mountpoint=self._SNAPS_MOUNTPOINT, dev_or_path=path) def _push_file(self, *, source: str, destination: str) -> None: destination = "{}:{}".format(self.instance_name, destination) self._multipass_cmd.copy_files(source=source, destination=destination) def __init__(self, *, project, echoer) -> None: super().__init__(project=project, echoer=echoer) self._multipass_cmd = MultipassCommand() self._instance_info = None # type: InstanceInfo def create(self) -> None: """Create the multipass instance and setup the build environment.""" self.launch_instance() self._instance_info = self._get_instance_info() self.setup_snapcraft() def destroy(self) -> None: """Destroy the instance, trying to stop it first.""" if self._instance_info is None: return if not self._instance_info.is_stopped(): self._multipass_cmd.stop(instance_name=self.instance_name) self._multipass_cmd.delete(instance_name=self.instance_name) def provision_project(self, tarball: str) -> None: """Provision the multipass instance with the project to work with.""" # TODO add instance check. # Step 0, sanitize the input tarball = shlex.quote(tarball) # First create a working directory self._multipass_cmd.execute( command=["mkdir", self.project_dir], instance_name=self.instance_name ) # Then copy the tarball over destination = "{}:{}".format(self.instance_name, tarball) self._multipass_cmd.copy_files(source=tarball, destination=destination) # Finally extract it into project_dir. extract_cmd = ["tar", "-xvf", tarball, "-C", self.project_dir] self._multipass_cmd.execute( command=extract_cmd, instance_name=self.instance_name ) def build_project(self) -> None: # TODO add instance check. # Use the full path as /snap/bin is not in PATH. snapcraft_cmd = "cd {}; /snap/bin/snapcraft snap --output {}".format( self.project_dir, self.snap_filename ) self._multipass_cmd.execute( command=["sh", "-c", snapcraft_cmd], instance_name=self.instance_name ) def retrieve_snap(self) -> str: # TODO add instance check. source = "{}:{}/{}".format( self.instance_name, self.project_dir, self.snap_filename ) self._multipass_cmd.copy_files(source=source, destination=self.snap_filename) return self.snap_filename def _get_instance_info(self): instance_info_raw = self._multipass_cmd.info( instance_name=self.instance_name, output_format="json" ) return InstanceInfo.from_json( instance_name=self.instance_name, json_info=instance_info_raw.decode() )
The long waited NBL season under the new FUBA presidency Nasser Sserunjoji kick-started on Friday 12th at Lugogo MTN Arena. Defending champions Kcca Leopards in the Women’s category did not have the kind of an opener they would wish when JKL Lady Dolphins’ Jamila Nansinkombi unleashed her beast mode. Lady Dolphins went on to defeat Kcca Leopards 70-49 with Jamila Nansinkombi scoring a 20 point mark on the evening as well as running away with player of the game. In the final fixture on the evening, defending champions City Oilers claimed a 69-66 victory over Betway Power in the men’s opener. Oilers who came from 10 points down at half time proved they are still the team to beat if anyone has any intentions for the league when Ivan Muhwezi showed value for Oilers as he played key figure in the come back that later gave Oliers a 3 point victory despite not having their head Coach Mandy Juruni on touchline. Muhwezi who scooped player of the game managed to score 16 points on the evening, one behind Power’s Syrus Kiviri who had a great debut scoring 17 points the highest on the evening though they could not earn his side victory.
# coding: utf-8 from __future__ import unicode_literals import os from py.io import TerminalWriter from box_manage_users.tc_client import TCClient from box_manage_users.util import setup_logging class Script(object): """ Script base class. Configures logging and outputs duck ascii art. """ _title = 'Base Script' _message = 'Instructions' _verbose_log_filename = os.path.join('logs', 'verbose.log') _failure_log_filename = os.path.join('logs', 'failure.log') _overview_log_filename = os.path.join('logs', 'overview.log') def __init__(self): self._logger = setup_logging(name='console') self._tw = TerminalWriter() self._tw.sep('#', self._title, green=True, bold=True) self._client = TCClient() self._logger.info( 'Great! Let\'s get this going!\n' ' _ _ _\n' ' >(.)__ <(.)__ =(.)__\n' ' (___/ (___/ (___/ \n' ) self._verbose_logger = setup_logging(self._verbose_log_filename, debug=True) self._fail_logger = setup_logging(self._failure_log_filename, name='failures') self._overview_logger = setup_logging(self._overview_log_filename, name='overview') def run(self): """ Runs the script. Intended to be overridden by base classes. """ self._tw.sep('#', 'Process Complete!', green=True, bold=True) def get_user_id_from_email_address(self, email): """ Given an email address, find the user in the enterprise that has that email address and return that user's id. :param email: User's email address for which to retrieve the user ID. :return: The user ID of the user with the given email address. """ user = self._client.get_user_by_email(email) if user is None: self._fail_logger.warning('No user with login %s. Could not deprovision.', email) self._overview_logger.warning('No user with login %s. Could not deprovision.', email) return None return user.id
Sorry! No routes available from Kaila devi at this moment. Click here to go to Home. Kaila Devi is a village Panchayat located in the Karauli district of Rajasthan. It is situated in between Karauli (26 km from Kaila Devi) and Jaipur (146 km from Kaila Devi). There are good budget hotels available in this region such as Hotel Sapphire (Tariff – Rs. 880), Neel Samudra Guest House (Tariff – Rs. 750), Lucky Lodge (Tariff – Rs. 563) and others. The official language of Kaila Devi is Hindi. The important festival celebrated in this region is Kaila Devi Annual Fair in the month of March – April. Around 20, 00,000 devotees from Uttar Pradesh, Madhya Pradesh, and Rajasthan will gather here to get the blessings of Goddess Kaila Devi. Tourist places to be visited nearby Kaila Devi are Ranthambore Sanctuary, Nakkash ki Devi & Gomti Dham, Shri Mahavirji Temple, Mehandipur Balaji Temple and Barbasin Temple. Jaipur International Airport is the nearest airport at a distance of 121 km. The nearest railway station to Kaila Devi is Gangapur city which is at a distance of 22 km. This place connects to major cities and towns such as Karauli (23 km), Gangapur City (34 km), Hindaun City (53 km), Jaipur (170 km), Bharatpur (90 km), Mathura (220 km), Agra (225 km) and Delhi (325 km). Visit this place with the help of ticketgoose.com, India’s leading online bus ticketing portal.
import numpy as np from map import map_pts_wts from gaussian_quad import gaussxw def piessens(N, x0, nonsingular_N = -1): """ Quadrature points and weights for integrating a function with form f(x) / (x - x0) on the interval [-1, 1] Uses the 2N point gauss rule derived in Piessens (1970) Almost certainly suboptimal, but it's very simple and it works. Exact for polynomials of order 4N. """ if nonsingular_N == -1: nonsingular_N = N nonsingular_N = nonsingular_N N = N x0 = x0 # Split the interval into two sections. One is properly integrable. # The other is symmetric about the singularity point and must be # computed using as a cauchy principal value integral. if x0 < 0.0: pv_start = -1.0 pv_length = 2 * (x0 + 1) proper_length = 2.0 - pv_length proper_start = pv_start + pv_length else: pv_length = 2 * (-x0 + 1) pv_start = 1.0 - pv_length proper_start = -1.0 proper_length = 2.0 - pv_length # the interval without the singularity gx, gw = gaussxw(nonsingular_N) x, w = map_pts_wts(gx, gw, proper_start, proper_start + proper_length) # Get the points for the singular part using Piessen's method x_sing, w_sing = piessen_method(N, pv_start, pv_length, x0) # Finished! x = np.append(x, x_sing) w = np.append(w, w_sing) return x,w def piessen_method(N, pv_start, pv_length, x0, add_singularity = True): x_base, w_base = piessen_neg_one_to_one_nodes(N) # Convert to the interval from [pv_start, pv_start + pv_length] x = (pv_length / 2) * x_base + \ (2 * pv_start + pv_length) / 2.0 # No need to scale the weights because the linear factor in the 1/r # exactly cancels the jacobian. w = w_base # If we don't factor out the 1 / (x - x0) of the quadratured function, # so we must account for it here. if add_singularity: w *= x - x0 return x, w def piessen_neg_one_to_one_nodes(N): """Piessen nodes and weights for [-1, 1]""" if N % 2 == 1: raise Exception("Piessens method requires an even quadrature " + "order") gx, gw = gaussxw(2 * N) x = gx w = gw / gx return x, w
Here you can prepare Current Affairs Online Test. Click the button for Current Affairs 100% free full practice test. Current affairs mainly are a type of broadcast journalism that investigates and discusses the recent news stories. It might include the news is still happening. Mostly the discussion revolves around why, where and how a story has happened. The subject current affairs is bit different from the normal news programmes that report news stories as quickly as they can. Because those news programmes do not have as much investigations as current affairs programmes. Furthermore, it is different from the magazine show format, where events are discussed instantly. Mainly the current affairs include the study of the matters that are happening in the country or the issues that have already happened. This page contains the online test of current affairs. Here we have uploaded a huge collection of questions related to current affairs that will assist you in improving your knowledge and skills. You can take this test as many times as you want.
#!/usr/bin/env python3 import sys import re def extract(s): return [int(x) for x in re.findall(r'-?\d+', s)] UP = (0, -1) DOWN = (0, 1) LEFT = (-1, 0) RIGHT = (1, 0) DIRS = { 'E': RIGHT, 'W': LEFT, 'N': UP, 'S': DOWN } ROT = 'NESW' def add(v1, v2): return tuple(x + y for x, y in zip(v1, v2)) def mul(k, v2): return tuple(k * y for y in v2) def main(args): # data = [x.split('\n') for x in sys.stdin.read().split('\n\n')] data = [s.strip() for s in sys.stdin] pos = (0, 0) dir = 'E' for line in data: cmd = line[0] arg = int(line[1:]) if cmd in DIRS: pos = add(pos, mul(arg, DIRS[cmd])) elif cmd in 'LR': arg //= 90 i = ROT.index(dir) m = 1 if cmd == 'R' else -1 i += m*arg dir = ROT[i % 4] elif cmd == 'F': pos = add(pos, mul(arg, DIRS[dir])) print(pos) # My original solution had this and worked, lol # print(abs(pos[0] + pos[1])) print(abs(pos[0]) + abs(pos[1])) if __name__ == '__main__': sys.exit(main(sys.argv))
The full-service practice, McLaren Engineering Group, welcomed local officials and other visitors on Thursday to the new space at 530 Chestnut Ridge Road, the newest among its 10 offices nationwide. Located off the Garden State Parkway, the location currently houses a staff of 130, but has capacity for more than 250 professionals. The debut comes six months after McLaren announced its plans to grow in the state and take space in Woodcliff Lake, under a 12-year lease with Keystone Property Group. The firm’s other work in New Jersey includes mixed-use developments at 197 and 207 Van Vorst in Jersey City, the Pulaski Skyway bridge in Kearny, a recreational pier in Weehawken and automated signage at Newark Liberty International Airport. Its team has also worked on wharf reconstruction at Port Newark-Elizabeth and the Babbio Center at Stevens Institute of Technology in Hoboken, while working to improve over 300 miles of coastline in the Port of New York and New Jersey, according to a news release. McLaren said it planned to hire several dozen professionals in Woodcliff Lake the coming months, including engineers for entry-level, project management and senior leadership roles. The firm is also looking to add administrative professionals in accounting, information technology, human resources and marketing.
""" .. code-block:: javascript import { Image } from 'react-native' A Layer Group with Bitmap as child or a Shape Layer with an Image as a fill can be tagged as **Image**. The name of the layer will be the name of the image source generated. """ from sketch_components.utils import combine_styles, small_camel_case from .commons import StyleSheet from .component import Component from ..props import Prop, Props, PropTypes, EdgeInsetsPropType, \ StyleSheetPropType, ImageSourcePropType, \ ImageStylePropTypes class Image(Component): def __init__(self, source_name=None, source_path=None, props=None, parent=None, layer=None): super(self.__class__, self).__init__(parent=parent, layer=layer) self.name = 'Image' self.path = 'react-native' self.is_default = False self.props = Props({ 'style': StyleSheetPropType(ImageStylePropTypes), 'source': ImageSourcePropType, 'defaultSource': PropTypes.oneOfType([PropTypes.shape({ 'uri': PropTypes.string, 'width': PropTypes.number, 'height': PropTypes.number, 'scale': PropTypes.number}), PropTypes.number]), 'accessible': PropTypes.bool, 'accessibilityLabel': PropTypes.node, 'blurRadius': PropTypes.number, 'capInsets': EdgeInsetsPropType, 'resizeMethod': PropTypes.oneOf(['auto', 'resize', 'scale']), 'resizeMode': PropTypes.oneOf( ['cover', 'contain', 'stretch', 'repeat', 'center']), 'testID': PropTypes.string, 'onLayout': PropTypes.func, 'onLoadStart': PropTypes.func, 'onProgress': PropTypes.func, 'onError': PropTypes.func, 'onPartialLoad': PropTypes.func, 'onLoad': PropTypes.func, 'onLoadEnd': PropTypes.func }) self.update_props(props) self.is_self_closing = True self.source_name = source_name self.source_path = source_path def update_dependencies(self): for child in self.children: self.dependencies.add(child.import_statement()) if not child.is_exportable_component(): child.update_dependencies() self.dependencies.update(child.dependencies) from mako.template import Template if self.source_name and self.source_path: image_source_dependency = Template( """import ${name} from '${path}'""").render( name=self.source_name, path=self.source_path) self.dependencies.add(image_source_dependency) @classmethod def create_component(cls, sketch_layer, parent=None): if sketch_layer.component: props = sketch_layer.component.get_react_native_props() else: props = dict() style = props.get('style', Prop(dict())).value source_path = '' source_name = '' if sketch_layer.name is not None and sketch_layer.image is not None: source_name = small_camel_case(sketch_layer.name) sketch_layer.image.set_image_source(source_name) source_path = sketch_layer.image.get_image_source() elif sketch_layer.is_layer_group(): image_layer = None for layer in sketch_layer.layers: if layer.is_image_layer(): image_layer = layer if (image_layer is not None and image_layer.name is not None and image_layer.image is not None): source_name = small_camel_case(image_layer.name) image_layer.image.set_image_source(source_name) source_path = image_layer.image.get_image_source() elif sketch_layer.is_shape_group(): if (sketch_layer.style and sketch_layer.style.fills and sketch_layer.style.fills[0].image): source_name = small_camel_case(sketch_layer.name) sketch_layer.style.fills[0].image.set_image_source(source_name) source_path = sketch_layer.style.fills[ 0].image.get_image_source() if props.get('source', None) is None and source_name != '': props['source'] = Prop(source_name, is_literal=True) elif props.get('source') is not None: source_path = '' source_name = '' props.update( {'style': StyleSheet( combine_styles(sketch_layer.get_css_view_styles(), style), name=sketch_layer.name)}) component = Image(source_name=source_name, source_path=source_path, props=props, parent=parent, layer=sketch_layer) for layer in sketch_layer.layers: if (layer.is_shape_group() and layer.hasClippingMask or layer.is_rectangle_shape() or layer.is_oval_shape()): child = layer.get_react_component(pass_styles=True, parent=component) if child: component.add_child(child) # TODO: Remove after bug in react native is fixed if (component.props['style'].styles['borderRadius']) is not None and ( component.props['style'].styles['borderRadius'] > 0): component.props['style'].styles['backgroundColor'] = None return component
Everyone’s lymphoma story is unique and special. By simply sharing your story, you can impact countless lives. Stories of Hope is LRF’s ongoing effort to give those affected by lymphoma a voice. To read other stories of hope, please click on the links on the right hand side of the page. To submit your own story of hope, click here.
"""Views, both for UI presentation and for api calls. UI views have no prefix, api calls are prefixed with 'api' """ from functools import wraps from django.shortcuts import render from django.http import (HttpResponse, HttpResponseNotAllowed, HttpResponseRedirect, HttpResponseForbidden, HttpResponseNotFound) from django.contrib.auth import logout as django_logout from django.contrib.auth.views import login as django_login from django.contrib.auth.decorators import login_required from django.views.decorators.csrf import csrf_exempt import json import db from django.core.exceptions import ObjectDoesNotExist def api_call(methods=None, needs_login=False): """Enforces valid http method has been used, if `needs_login` - validates the user is authenticated, converts KeyError into Http404. :param methods: valid http methods ('GET', 'POST', 'PUT', 'DELETE') :param needs_login: if authenticated user session needs to be present :returns: decorated view """ if not methods: methods = ['GET'] def decorator(f): @wraps(f) def wrapper(request, *args, **kwds): if needs_login and not request.user.is_authenticated(): return HttpResponseForbidden('Unauthorized/timed out user') if request.method not in methods: return HttpResponseNotAllowed(methods) res = None try: res = f(request, *args, **kwds) except (KeyError, ObjectDoesNotExist): pass if not res: return HttpResponseNotFound('Resource not found') return HttpResponse( json.dumps(res, indent=4), mimetype='application/json') # do not need csfr for REST api...? return csrf_exempt(wrapper) return decorator ##### html views def logout(request): django_logout(request) return HttpResponseRedirect('/') def blog_explorer(request): return render( request, 'spudblog/blog_explorer.html', {'blogs': db.get_blogs()}) @login_required def my_blogs(request): return render( request, 'spudblog/my_blogs.html', {'blogs': db.get_blogs(request.user.id), 'user_name': request.user.username}) ##### API @api_call() def all(request): """Debug api call, lists all users, their blogs and posts.""" return db.all_as_json() @api_call() def full_blog(request, blog_id): """Gets full blog, with title, id, posts.""" blog_id = long(blog_id) return db.get_full_blog(blog_id).as_json() @api_call(methods=['POST', 'PUT', 'DELETE'], needs_login=True) def blog(request, blog_id): """CRUD operations on blog, ie. create, update and delete (no fetch, that's done within :func:`views.full-blog`).""" if request.method == 'POST': blog = json.loads(request.body) return db.create_blog(request.user.id, blog).as_json() elif request.method == 'PUT': blog_id = long(blog_id) blog = json.loads(request.body) blog['id'] = blog_id # whether id's set or not... return db.update_blog(blog).as_json() elif request.method == 'DELETE': blog_id = long(blog_id) return db.del_blog(blog_id) @api_call(methods=['POST', 'PUT', 'DELETE'], needs_login=True) def post(request, post_id): """CRUD operations on post, ie. create, update and delete (no fetch, that's done within :func:`views.full-blog`.""" if request.method == 'POST': post = json.loads(request.body) blog_id = post.pop('blog_id') return db.create_post(blog_id, post).as_json() elif request.method == 'PUT': post_id = long(post_id) post = json.loads(request.body) return db.update_post(post).as_json() elif request.method == 'DELETE': post_id = long(post_id) return db.del_post(post_id)
Thomas S. Zetah, 78, of Motley died February 14, 2018, at his home, with his family around him. Services were held Feb. 19 at St. Michaels Catholic Church in Motley with Father Joe Korf officiating. Interment was at Motley Public Cemetery. Tom was born May 14, 1939 in Staples to Sylvester and Veronica (Schmidt) Zetah. He grew up on the family farm and graduated from Motley High School in 1957. He was united in marriage to Karen Anderson of Pillager on October 20, 1960 at St. Michaels Catholic Church. They lived on the family farm in Motley until they retired from farming in 1996, and moved to their home on the river in Pillager. Tom and his two brothers, Jerry and Donny started and operated the KOA Campground from 1972-1988. He owned and operated S&N Hardware in Motley from 1979-2012, later renaming the store Tom’s Hardware Hank. Tom enjoyed hunting, fishing, gardening, predicting the weather, garage sales and spending time with his children and grandchildren. Tom is survived by his wife of 57 years; one daughter, Sue (Wade) Cantleberry of Motley; five sons, Gary (Vicki) of Tulsa, OK, Terry (Sherri), Scott (Cindy), Mark all of Motley and David (Josetta) of Monticello; 13 grandchildren; four great-grandchildren; one brother, Donald of Brainerd and three sisters, Barbara Nichols of Fridley, Rebecca Thielen of Minneapolis and Theresa Krueger of Maple Grove. He was preceded in death by his parents; two brothers, Jerry and Francis; two brothers-in-law, Bryan and Mike.
#!/usr/bin/env python3 # # python script to convert the handwritten chapter .texi files, which include # the generated files for each function, to DocBook XML # # all we care about is the content of the refentries, so all this needs to do is # convert the @include of the makedoc generated .def files to xi:include of the # makedocbook generated .xml files. # from __future__ import print_function import sys import re def main(): first_node = True print ('<?xml version="1.0" encoding="UTF-8"?>') print ('<!DOCTYPE chapter PUBLIC "-//OASIS//DTD DocBook V4.5//EN" "http://www.oasis-open.org/docbook/xml/4.5/docbookx.dtd">') for l in sys.stdin.readlines(): l = l.rstrip() # transform @file{foo} to <filename>foo</filename> l = re.sub("@file{(.*?)}", "<filename>\\1</filename>", l) if l.startswith("@node"): l = l.replace("@node", "", 1) l = l.strip() l = l.lower() if first_node: print ('<chapter id="%s" xmlns:xi="http://www.w3.org/2001/XInclude">' % l.replace(' ', '_')) first_node = False elif l.startswith("@chapter "): l = l.replace("@chapter ", "", 1) print ('<title>%s</title>' % l) elif l.startswith("@include "): l = l.replace("@include ", "", 1) l = l.replace(".def", ".xml", 1) print ('<xi:include href="%s"/>' % l.strip()) print ('</chapter>') if __name__ == "__main__" : main()
Just because the weather is bad doesn’t mean the fishing has to be! We talked last week about how to find feeding inshore fish in bad weather, and we’re continuing the conversation with these tips on how to fish during windy days. With just a few simple tricks, you can use the wind to your advantage and have a great day out on the water. Watch the video below for the secrets on how to leverage the wind, plus see how I did this on a recent trip to catch a nice redfish. Plan your trip with the wind. If the wind is coming from the north, start at the northernmost spot and drift down south to your other spots. Position your boat or kayak perpendicular to the wind, which will slow your drift down. Have a drift sock or drift anchor ready to slow your drift down even more if necessary. Use a push pole or trolling motor to guide your drift towards your target areas. Don’t forget to fish during the drifts between your target areas. That’s when I caught this nice red! Have any questions about fishing in the wind? And if you want more detailed videos where we share our exact spots and strategies, check out our SUPER-Community here. Tony, howbout’cha?! i recently found myself challenged by windy conditions while fishing pre and post cold front. 1st 2 days being sunny and breezy from south/southeast and shifting midday from the north/northwest for the following 2, I found myself adjusting to the shift quite challenging. But even in 30mph winds we were able to find redfish while wading the heaviest gusts of the day. Safety being of utmost concern when wind does pick up maybe you can elaborate the conditions that signal red flag warnings and such and how it relates to craft size and capability to handle such conditions as well as how to safely identify a return route to dock if you do get caught up in drastic weather changes such that create dangerous chop to cross. I.e. hug the shallow shoreline on the leeward side upon your return. I think too many of us underestimate the wind in our haste to go fishing and the drastic conditions it can create especially to an under-experienced boater. Food for thought, man. Good stuff, Tony………on days like that, it’s sure nice to employ 2 vehicles and float from one to the other. A question for Tony not related to your good video of fishing in the wind . I am looking for a t bar kayak carrier that’s fits into receiver hitch on a p/ up truck carrying kayak into the bead & extending out the back . I know you podcasted a video last year what mfg did you use ? I use the T-Bone bed extender made by Boonedox USA. It’s a bit pricey but its much lighter than the cheaper brands. The bar that extends from the hitch is also curved, which helps when backing down or going up slopes to avoid scraping the ground. I have a pretty low truck too.
# # Copyright (c) 2016 nexB Inc. and others. All rights reserved. # http://nexb.com and https://github.com/nexB/scancode-toolkit/ # The ScanCode software is licensed under the Apache License version 2.0. # Data generated with ScanCode require an acknowledgment. # ScanCode is a trademark of nexB Inc. # # You may not use this software except in compliance with the License. # You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed # under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR # CONDITIONS OF ANY KIND, either express or implied. See the License for the # specific language governing permissions and limitations under the License. # # When you publish or redistribute any data created with ScanCode or any ScanCode # derivative work, you must accompany this data with the following acknowledgment: # # Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES # OR CONDITIONS OF ANY KIND, either express or implied. No content created from # ScanCode should be considered or used as legal advice. Consult an Attorney # for any legal advice. # ScanCode is a free software code scanning tool from nexB Inc. and others. # Visit https://github.com/nexB/scancode-toolkit/ for support and download. # -*- coding: UTF-8 -*- from __future__ import print_function from __future__ import absolute_import from __future__ import unicode_literals import codecs import copy import sqlite3 from collections import OrderedDict from operator import itemgetter from os.path import abspath from os.path import basename from os.path import dirname from os.path import exists from os.path import expanduser from os.path import isfile from os.path import join import os import simplejson as json import time from commoncode import fileutils """ Format scans outputs. """ def get_template(templates_dir, template_name='template.html'): # @ReservedAssignment """ Given a template directory, load and return the template file in the template_name file found in that directory. """ from jinja2 import Environment, FileSystemLoader env = Environment(loader=FileSystemLoader(templates_dir)) template = env.get_template(template_name) return template def get_template_dir(format): # @ReservedAssignment """ Given a format string return the corresponding standard template directory. """ return join(dirname(__file__), 'templates', format) def as_html_app(scanned_path, output_file): """ Return an HTML string built from a list of results and the html-app template. """ template = get_template(get_template_dir('html-app')) _, assets_dir = get_html_app_files_dirs(output_file) return template.render(assets_dir=assets_dir, scanned_path=scanned_path) def get_html_app_help(output_filename): """ Return an HTML string containing html-app help page with a reference back to the main app """ template = get_template(get_template_dir('html-app'), template_name='help_template.html') return template.render(main_app=output_filename) class HtmlAppAssetCopyWarning(Exception): pass class HtmlAppAssetCopyError(Exception): pass def is_stdout(output_file): return output_file.name == '<stdout>' def get_html_app_files_dirs(output_file): """ Return a tuple of (parent_dir, dir_name) directory named after the `output_file` file object file_base_name (stripped from extension) and a `_files` suffix Return empty strings if output is to stdout. """ if is_stdout(output_file): return '', '' file_name = output_file.name parent_dir = dirname(file_name) dir_name = fileutils.file_base_name(file_name) + '_files' return parent_dir, dir_name def create_html_app_assets(results, output_file): """ Given an html-app output_file, create the corresponding `_files` directory and copy the assets to this directory. The target directory is deleted if it exists. Raise HtmlAppAssetCopyWarning if the output_file is <stdout> or HtmlAppAssetCopyError if the copy was not possible. """ try: if is_stdout(output_file): raise HtmlAppAssetCopyWarning() assets_dir = join(get_template_dir('html-app'), 'assets') # delete old assets tgt_dirs = get_html_app_files_dirs(output_file) target_dir = join(*tgt_dirs) if exists(target_dir): fileutils.delete(target_dir) # copy assets fileutils.copytree(assets_dir, target_dir) # write json data root_path, assets_dir = get_html_app_files_dirs(output_file) with codecs.open(join(root_path, assets_dir, 'data.json'), 'wb', encoding='utf-8') as f: f.write('data=') json.dump(results, f, iterable_as_array=True) # create help file with codecs.open(join(root_path, assets_dir, 'help.html'), 'wb', encoding='utf-8') as f: f.write(get_html_app_help(basename(output_file.name))) except HtmlAppAssetCopyWarning, w: raise w except Exception, e: raise HtmlAppAssetCopyError(e) def isNetdis(liceses,conn): netdisliceses = [] netdisLi = [] cursor = conn.execute("SELECT NAME from NETDISTRIBUTIONLICENSE") for row in cursor: netdisLi.append(row[0]) for licese in liceses: if(licese in netdisLi): netdisliceses.append(licese) return netdisliceses sameLi = [] def isSameLi(liceses,conn): sameLi = [] cursor = conn.execute("SELECT NAME from sameLicense") for row in cursor: sameLi.append(row[0]) sameliceses = [] for licese in liceses: if(licese in sameLi): sameliceses.append(licese) return sameliceses openLi = [] def isOpensource(liceses,conn): openLi = [] cursor = conn.execute("SELECT NAME from OPENSOURCELICENSE") for row in cursor: openLi.append(row[0]) openliceses = [] for licese in liceses: if(licese in openLi): openliceses.append(licese) return openliceses notPatLi = [] def isNotPatent(liceses,conn): notPatLi = [] cursor = conn.execute("SELECT NAME from NOTPATENTLICENSE") for row in cursor: notPatLi.append(row[0]) notPatliceses = [] for licese in liceses: if(licese in notPatLi): notPatliceses.append(licese) return notPatliceses def isModified(liceses,conn): ModLi = [] cursor = conn.execute("SELECT NAME from ModIFYLICENSE") for row in cursor: ModLi.append(row[0]) modliceses = [] for licese in liceses: if(licese in ModLi): modliceses.append(licese) return modliceses def isTrademark(liceses,conn): TMLi = [] cursor = conn.execute("SELECT NAME from TRADEMARKLICENSE") for row in cursor: TMLi.append(row[0]) TMliceses = [] for licese in liceses: if(licese in TMLi): TMliceses.append(licese) return TMliceses def mayConflict(liceses,conn): maycficeses = [] for licese in liceses: if(licese in sameLi): temp = [] for li in sameLi: if (li != licese): temp.append(li) maycficeses.append({'licenses':licese,'maycf':temp}) return maycficeses def isConflict(liceses,conn): confliceses = [] for i in range(len(liceses)): for j in range(i+1,len(liceses)): isflag = False if((liceses[i] in sameLi) and (liceses[j] in sameLi) and not(('gpl' in liceses[j]) and ('gpl' in liceses[i]))): isflag = True if(isflag): confliceses.append([liceses[i],liceses[j]]) return confliceses def printre(liceselist,loacllist): templist = [] for item in liceselist: templocal1 = [] for local in loacllist: if (local.has_key(item)): templocal1.append(local[item]) templocal1.sort() templist.append({'licenses':item,'loacal':templocal1}) return templist def countlicense(liceselist,loacllist): templist = [] for item in liceselist: tempcount = 0 for local in loacllist: if (local.has_key(item)): tempcount +=1 templist.append({'licenses':item,'count':tempcount}) return templist def printconf(liceselist,loacllist): templist = [] for item in liceselist: templocal1 = [] templocal2 = [] for local in loacllist: if (local.has_key(item[0])): templocal1.append(local[item[0]]) if (local.has_key(item[1])): templocal2.append(local[item[1]]) templist.append({'licenses1':item[0],'loacal1':templocal1,'licenses2':item[1],'loacal2':templocal2}) return templist def as_template(scanned_files, files_count,output_file, template='html',): """ Return an string built from a list of results and the provided template. The template defaults to the standard HTML template format or can point to the path of a custom template file. """ from licensedcode.models import get_licenses conn = sqlite3.connect('data.db') if template == 'html': template = get_template(get_template_dir('html')) else: # load a custom template tpath = fileutils.as_posixpath(abspath(expanduser(template))) assert isfile(tpath) tdir = fileutils.parent_directory(tpath) tfile = fileutils.file_name(tpath) template = get_template(tdir, tfile) converted = OrderedDict() converted_infos = OrderedDict() converted_packages = OrderedDict() licenses = {} LICENSES = 'licenses' COPYRIGHTS = 'copyrights' PACKAGES = 'packages' URLS = 'urls' EMAILS = 'emails' liceses1 = [] licessloacl = [] # Create a flattened data dict keyed by path for scanned_file in scanned_files: path = scanned_file['path'] results = [] if COPYRIGHTS in scanned_file: for entry in scanned_file[COPYRIGHTS]: results.append({ 'start': entry['start_line'], 'end': entry['end_line'], 'what': 'copyright', # NOTE: we display one statement per line. 'value': '\n'.join(entry['statements']), }) if LICENSES in scanned_file: for entry in scanned_file[LICENSES]: results.append({ 'start': entry['start_line'], 'end': entry['end_line'], 'what': 'license', 'value': entry['key'], }) if entry['key'] not in licenses: licenses[entry['key']] = entry entry['object'] = get_licenses().get(entry['key']) if results: converted[path] = sorted(results, key=itemgetter('start')) for k in converted[path]: if(k['what']=='license'): licessloacl.append({k['value']:path}) if(not (k['value'] in liceses1)): liceses1.append(k['value']) # this is klunky: we need to drop templates entirely converted_infos[path] = OrderedDict() for name, value in scanned_file.items(): if name in (LICENSES, PACKAGES, COPYRIGHTS, EMAILS, URLS): continue converted_infos[path][name] = value if PACKAGES in scanned_file: converted_packages[path] = scanned_file[PACKAGES] licenses = OrderedDict(sorted(licenses.items())) ISOTIMEFORMAT='-%Y-%m-' scantime = time.strftime(ISOTIMEFORMAT,time.localtime()) filename = os.path.basename(output_file.name).rsplit(scantime,1)[0] files = { 'filename':filename, 'filecount':files_count, 'scantime':os.path.basename(output_file.name).rsplit(filename,1)[1][1:-5], 'license_length':len(licenses), 'license_count':countlicense(licenses.keys(),licessloacl), 'isSameLi':printre(isSameLi(liceses1,conn),licessloacl), 'isNetdis':printre(isNetdis(liceses1,conn),licessloacl), 'isOpensource':printre(isOpensource(liceses1,conn),licessloacl), 'isNotPatent':printre(isNotPatent(liceses1,conn),licessloacl), 'isModified':printre(isModified(liceses1,conn),licessloacl), 'isTrademark':printre(isTrademark(liceses1,conn),licessloacl), 'isConflict':printconf(isConflict(liceses1,conn),licessloacl), 'mayConflict':mayConflict(liceses1,conn), 'license_copyright': converted, 'infos': converted_infos, 'packages': converted_packages } return template.generate(files=files, licenses=licenses)
​​STATE CONFERENCE UPDATE: The deadline to reserve your hotel room is MAY 4, 2019, and we are currently at or near our room block capacity. We are working with hotel management to secure additional rooms at a nearby sister venue, and will ensure that anyone registered for the conference prior to the May 4, 2019 deadline can reserve a room. Please continue to call the Hilton phone number listed to book your accommodations. Thank you! Use code CPZ to receive the group rate of $179 per night, plus half off resort fees. STEP 1: Please complete and submit the registration form below before proceeding to Step 2. Submit a registration form for each conference attendee. When adding more than one attendee, you must hit the back button to refresh the registration form. STEP 2: Once the registration forms are submitted for each conference attendee, you may proceed with purchasing base registrations, mobile tours, and/or extra tickets. PLEASE NOTE: TO MAKE PAYMENTS, THE WEBSITE MUST BE OPENED WITH CHROME OR FIREFOX BROWSER. Opening website with Internet Explorer will cause processing failures. STEP 4: (Optional) Add extra tickets for the really fun events. This program schedule is a work in progress and subject to change. Author Tim Dorsey is Thursday's Lunch Speaker! Your full conference registration includes this event, but you can purchase extra lunch tickets for your guests HERE! Thursday evening's reception is a "can't miss" event! Come network with your planning community at the authentic Conch Republic Seafood Company. It doesn't get more Key West then this! Your full conference registration includes this event, but you can purchase extra event tickets HERE.
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Deleting field 'MashLog.status' db.delete_column('brew_mashlog', 'status') # Adding field 'MashLog.active_mashing_step_state' db.add_column('brew_mashlog', 'active_mashing_step_state', self.gf('django.db.models.fields.CharField')(default=None, max_length=1), keep_default=False) def backwards(self, orm): # User chose to not deal with backwards NULL issues for 'MashLog.status' raise RuntimeError("Cannot reverse this migration. 'MashLog.status' and its values cannot be restored.") # Deleting field 'MashLog.active_mashing_step_state' db.delete_column('brew_mashlog', 'active_mashing_step_state') models = { 'brew.batch': { 'Meta': {'object_name': 'Batch'}, 'brewing_date': ('django.db.models.fields.DateTimeField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'mashing_scheme': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['brew.MashingScheme']"}), 'number': ('django.db.models.fields.IntegerField', [], {'max_length': '3'}) }, 'brew.mashingscheme': { 'Meta': {'object_name': 'MashingScheme'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}) }, 'brew.mashingstep': { 'Meta': {'object_name': 'MashingStep'}, 'degrees': ('django.db.models.fields.CharField', [], {'max_length': '3'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'mashing_scheme': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['brew.MashingScheme']"}), 'minutes': ('django.db.models.fields.CharField', [], {'max_length': '3'}) }, 'brew.mashlog': { 'Meta': {'object_name': 'MashLog'}, 'active_mashing_step': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['brew.MashingStep']"}), 'active_mashing_step_state': ('django.db.models.fields.CharField', [], {'max_length': '1'}), 'batch': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['brew.Batch']"}), 'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'degrees': ('django.db.models.fields.FloatField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, 'brew.variable': { 'Meta': {'object_name': 'Variable'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'key': ('django.db.models.fields.CharField', [], {'max_length': '127'}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '255'}) } } complete_apps = ['brew']
Setup a Bright Bingo Account through the image above, to get a 200% welcome bonus on your first deposit. This Bright Bingo welcome bonus, gives you a big buffer and the ability to leverage your bets. Bright Bingo is ranked as one of the Best Online Casinos by Askgamblers, with tons of Different Games and Super Fast Withdrawals. This HUGE Brightbingo.com bonus is only for a limited time ~ Don’t miss it! How can I get my Bright Bingo casino bonus?
#coding: utf-8 import os #import random from django.db import models from django.contrib.auth.models import User from django.core.files.storage import default_storage from django.db.models import Max from django.utils.translation import ugettext_lazy as _ from generic_images.signals import image_saved, image_deleted from generic_images.managers import AttachedImageManager from generic_utils.models import GenericModelBase class BaseImageModel(models.Model): ''' Simple abstract Model class with image field. .. attribute:: image ``models.ImageField`` ''' def get_upload_path(self, filename): ''' Override this to customize upload path ''' raise NotImplementedError def _upload_path_wrapper(self, filename): return self.get_upload_path(filename) image = models.ImageField(_('Image'), upload_to=_upload_path_wrapper) class Meta: abstract = True class ReplaceOldImageModel(BaseImageModel): ''' Abstract Model class with image field. If the file for image is re-uploaded, old file is deleted. ''' def _replace_old_image(self): ''' Override this in subclass if you don't want image replacing or want to customize image replacing ''' try: old_obj = self.__class__.objects.get(pk=self.pk) if old_obj.image.path != self.image.path: path = old_obj.image.path default_storage.delete(path) except self.__class__.DoesNotExist: pass def save(self, *args, **kwargs): if self.pk: self._replace_old_image() super(ReplaceOldImageModel, self).save(*args, **kwargs) class Meta: abstract = True class AbstractAttachedImage(ReplaceOldImageModel, GenericModelBase): ''' Abstract Image model that can be attached to any other Django model using generic relations. .. attribute:: is_main BooleanField. Whether the image is the main image for object. This field is set to False automatically for all images attached to same object if image with is_main=True is saved to ensure that there is only 1 main image for object. .. attribute:: order IntegerField to support ordered image sets. On creation it is set to max(id)+1. ''' user = models.ForeignKey(User, blank=True, null=True, verbose_name=_('User')) '''A ForeignKey to associated user, for example user who uploaded image. Can be empty.''' caption = models.TextField(_('Caption'), null=True, blank=True) 'TextField caption for image' is_main = models.BooleanField(_('Main image'), default=False) order = models.IntegerField(_('Order'), default=0) objects = AttachedImageManager() '''Default manager of :class:`~generic_images.managers.AttachedImageManager` type.''' def next(self): ''' Returns next image for same content_object and None if image is the last. ''' try: return self.__class__.objects.for_model(self.content_object, self.content_type).\ filter(order__lt=self.order).order_by('-order')[0] except IndexError: return None def previous(self): ''' Returns previous image for same content_object and None if image is the first. ''' try: return self.__class__.objects.for_model(self.content_object, self.content_type).\ filter(order__gt=self.order).order_by('order')[0] except IndexError: return None def get_order_in_album(self, reversed_ordering=True): ''' Returns image order number. It is calculated as (number+1) of images attached to the same content_object whose order is greater (if 'reverse_ordering' is True) or lesser (if 'reverse_ordering' is False) than image's order. ''' lookup = 'order__gt' if reversed_ordering else 'order__lt' return self.__class__.objects.\ for_model(self.content_object, self.content_type).\ filter(**{lookup: self.order}).count() + 1 def _get_next_pk(self): max_pk = self.__class__.objects.aggregate(m=Max('pk'))['m'] or 0 return max_pk+1 # def put_as_last(self): # """ Sets order to max(order)+1 for self.content_object # """ # last = self.__class__.objects.exclude(id=self.id).\ # filter( # object_id = self.object_id, # content_type = self.content_type, # ).aggregate(max_order=Max('order'))['max_order'] or 0 # self.order = last+1 def get_file_name(self, filename): ''' Returns file name (without path and extenstion) for uploaded image. Default is 'max(pk)+1'. Override this in subclass or assign another functions per-instance if you want different file names (ex: random string). ''' # alphabet = "1234567890abcdefghijklmnopqrstuvwxyz" # # 1e25 variants # return ''.join([random.choice(alphabet) for i in xrange(16)]) # anyway _get_next_pk is needed for setting `order` field return str(self._get_next_pk()) def get_upload_path(self, filename): ''' Override this in proxy subclass to customize upload path. Default upload path is :file:`/media/images/<user.id>/<filename>.<ext>` or :file:`/media/images/common/<filename>.<ext>` if user is not set. ``<filename>`` is returned by :meth:`~generic_images.models.AbstractAttachedImage.get_file_name` method. By default it is probable id of new image (it is predicted as it is unknown at this stage). ''' user_folder = str(self.user.pk) if self.user else 'common' root, ext = os.path.splitext(filename) return os.path.join('media', 'images', user_folder, self.get_file_name(filename) + ext) def save(self, *args, **kwargs): send_signal = getattr(self, 'send_signal', True) if self.is_main: related_images = self.__class__.objects.filter( content_type=self.content_type, object_id=self.object_id ) related_images.update(is_main=False) if not self.pk: # object is created if not self.order: # order is not set self.order = self._get_next_pk() # let it be max(pk)+1 super(AbstractAttachedImage, self).save(*args, **kwargs) if send_signal: image_saved.send(sender = self.content_type.model_class(), instance = self) def delete(self, *args, **kwargs): send_signal = getattr(self, 'send_signal', True) super(AbstractAttachedImage, self).delete(*args, **kwargs) if send_signal: image_deleted.send(sender = self.content_type.model_class(), instance = self) def __unicode__(self): try: if self.user: return u"AttachedImage #%d for [%s] by [%s]" % ( self.pk, self.content_object, self.user) else: return u"AttachedImage #%d for [%s]" % ( self.pk, self.content_object,) except: try: return u"AttachedImage #%d" % (self.pk) except TypeError: return u"new AttachedImage" class Meta: abstract=True class AttachedImage(AbstractAttachedImage): ''' Image model that can be attached to any other Django model using generic relations. It is simply non-abstract subclass of :class:`~generic_images.models.AbstractAttachedImage` ''' class Meta: ordering = ['-order']
Obviously Bob was talking about Texas real estate and development when mentioning changin’ and growing waters around us. While we don’t necessarily have growing waters, the developments and communities in West Tarrant and Parker County sure are a changin’. In Part One of this recent series of Fort Worth growth, we looked at Walsh and how this new 7,200 acre master planned community will alter the housing landscape forever. Part Two focused on Walsh and how it is being developed for today and tomorrow with forward-thinking amenities and infrastructure.
import cx_Oracle import getpass #gets password without echoing import random import datetime class Patient_Info_Update(): def __init__(self): pass def main(self, credentials): self.con = cx_Oracle.connect(credentials[0] + '/' + \ credentials[1] + '@gwynne.cs.ualberta.ca:1521/CRS') state = self.getInputs() if state == 0: return 1 self.executeStatement(state) self.con.close() return 1 def printOptions(self): print() print("[1] Enter new Patient") print("[2] Edit Existing Patient") print("[3] Return to main menu.") def getInputs(self): while 1: self.name_update = False self.address_update = False self.birth_update = False self.phone_update = False self.printOptions() ans = input("Enter a choice: ") if ans == "1": self.HCN = self.getUniqueHCN() self.printSeparator() self.name = self.getName() go=True self.printSeparator() while go: self.address,go = self.getAddress() go=True self.printSeparator() while go: self.birth,go = self.getBirthDate() self.printSeparator() self.phone = self.getPhone() self.printSeparator() print("Patient Name: " + self.name) print("Patient Address: " + self.address) print("Patient Birth Date: " + self.birth) print("Patient Phone Number: " + self.phone) print() while 1: conf = input("Confirm information (y/n): ") if conf == "y": print("Information confirmed.") return 1 elif conf == "n": print("Information not confirmed, returning to start.") break else: print("Invalid choice, pick 'y' or 'n'") elif ans == "2": go=True self.printSeparator() while go: self.patient,go = self.getPatient() not_done = True while not_done: curs = self.con.cursor() curs.execute("select * from patient where health_care_no=" + str(self.patient)) rows = curs.fetchall() print() for row in rows: list1=[] counter=0 for x in row: if counter == 3: if x is not None: x=(x.strftime("%Y-%m-%d %H:%M:%S")) x=x[:-9] counter+=1 list1.append(x) print("Current Information: " + str(tuple(list1))) print("[1] Update patient name.") print("[2] Update patient address.") print("[3] Update patient birth date.") print("[4] Update patient phone number.") print("[5] Return to menu.") check = input("Enter an option: ") if check == "1": self.printSeparator() self.name = self.getName() self.name_update = True ask = input("Update another value? (y/n): ") while 1: if ask == "y": break elif ask == "n": not_done = False break else: print("Invalid input. ") print() elif check == "2": go=True self.printSeparator() while go: self.address,go = self.getAddress() self.address_update = True ask = input("Update another value? (y/n): ") while 1: if ask == "y": break elif ask == "n": not_done = False break else: print("Invalid input. ") print() elif check == "3": go=True self.printSeparator() while go: self.birth,go = self.getBirthDate() self.birth_update = True ask = input("Update another value? (y/n): ") while 1: if ask == "y": break elif ask == "n": not_done = False break else: print("Invalid input. ") print() elif check == "4": self.printSeparator() self.phone = self.getPhone() self.phone_update = True ask = input("Update another value? (y/n): ") while 1: if ask == "y": break elif ask == "n": not_done = False break else: print("Invalid input. ") print() elif check == "5": break else: print("Invalid input.") print() self.printSeparator() if self.name_update: print("Patient Name: " + self.name) if self.address_update: print("Patient Address: " + self.address) if self.birth_update: print("Patient Birth Date: " + self.birth) if self.phone_update: print("Patient Phone Number: " + self.phone) print() while 1: conf = input("Confirm updates (y/n): ") if conf == "y": print("Information confirmed.") return 2 elif conf == "n": print("Information not confirmed, returning to start.") break else: print("Invalid choice, pick 'y' or 'n'") elif ans == "3": return 0 else: print("Invalid choice.") def input_check(input): try: check = eval(input) if check not in [1,2,3,4,5]: return 0 else: return check except: return 0 def getPhone(self): ans = True while ans: print() phone = input("Input Patient Phone Number (10-digits): ") if phone.isdigit() and len(phone) == 10: reply = input("Confirm patient number :: " + phone + " :: (y/n): ") if reply == "y": ans = False elif reply == "n": print("Phone incorrect, returning to start.") else: print("Invalid input, returning to start.") else: print("Invalid input. Enter phone as a single number without spaces or dashes.") print() return phone def getName(self): ans = True while ans: print() name = input("Input Patient Name: ") reply = input("Confirm patient name :: " + name + " :: (y/n): ") if reply == "y": ans = False elif reply == "n": print("Name incorrect, enter again.") else: print("Invalid input, enter again.") return name def getAddress(self): not_allowed = [chr(34), chr(39)] ans = True while ans: print() address = input("Enter Address: ") reply = input("Confirm patient address :: " + address + " :: (y/n): ") if reply == "y": for each in address: if each in not_allowed: print("Apostrophe and Quotation characters are disallowed.") return False, True if len(address) > 200: print("Address entry exceeds character limit of 200.") return False, True else: return address, False elif reply == "n": print("Address incorrect, enter again.") else: print("Invalid input, enter again.") def getBirthDate(self): ans = True while ans: print() string = input('Enter Birth Date "yyyy/mm/dd": ') if len(string) != 10: print("Invalid input.") return False, True else: year = string[0:4] month = string[5:7] day = string[8:] correctDate = None if self.isNumber(year) and self.isNumber(month) and self.isNumber(day) and string[4] == "/" and string[7] == "/": try: newDate = datetime.datetime(int(year),int(month),int(day)) correctDate = True except ValueError: correctDate = False if correctDate: reply = input("Confirm patient birth date :: " + string + " :: (y/n): ") if reply == "y": return string,False elif reply == "n": print("Birth date incorrect, enter again.") else: print("Invalid input, enter again.") else: print("Invalid date.") return False, True def goodNumber(self,string,case): if case == "D": curs = self.con.cursor() curs.execute("select * from doctor where employee_no like'"+string+"'") rows = curs.fetchall() if len(rows) == 0: return False else: return True elif case == "T": curs = self.con.cursor() curs.execute("select * from test_record where test_id like '"+string+"'") rows = curs.fetchall() if len(rows) ==0: return False else: return True else: curs = self.con.cursor() curs.execute("select * from patient where health_care_no like'"+string+"'") rows = curs.fetchall() if len(rows) == 0: return False else: return True def isReal(self,string,case): if case == "D": curs = self.con.cursor() curs.execute("select * from doctor d, patient p where d.health_care_no=p.health_care_no and p.name like'"+string+"'") rows = curs.fetchall() if len(rows) == 0: return False else: return True elif case == "T": curs = self.con.cursor() curs.execute("select * from test_type where test_name like'"+string+"'") rows = curs.fetchall() if len(rows) == 0: return False else: return True elif case == "L": curs = self.con.cursor() curs.execute("select * from medical_lab where lab_name like '"+string+"'") rows = curs.fetchall() if len(rows) == 0: return False else: return True elif case == "R": curs = self.con.cursor() curs.execute("select * from test_record where test_id like '"+string+"'") rows = curs.fetchall() if len(rows) == 0: return False else: return True else: curs = self.con.cursor() curs.execute("select * from patient where name like'"+string+"'") rows = curs.fetchall() if len(rows) == 0: return False else: return True def isNumber(self, string): return string.isdigit() # returns the patient_no on success def getPatient(self): curs = self.con.cursor() curs.execute("select name,health_care_no from patient p") rows = curs.fetchall() for row in rows: print(row) string = input('Enter Patient name or number: ') if self.isNumber(string): if self.goodNumber(string,"P"): return int(string),False else: print("Invalid health care number.") print() return False,True else: if self.isReal(string,"P"): return self.getPatientNumber(string),False else: print(string,"is not a real patient, try again") return False,True def getPatientNumber(self,string): curs = self.con.cursor() curs.execute("select * from patient p where p.name like '"+string+"'") rows = curs.fetchall() tmp = [] if len(rows) > 1: while 1: print() print("Health Care Number | Name | Address | Date of Birth | Phone number") for row in rows: print(row) tmp.append(str(row[0])) pick = input("Enter ID of correct patient: ") if pick in tmp: return pick else: print("Incorrect value, enter valid ID of correct patient.") else: return rows[0][0] def printSeparator(self): print("") print("-----------------------") print("") def getUniqueHCN(self): curs = self.con.cursor() curs.execute("select health_care_no from patient") rows = curs.fetchall() while (True): health_care_no = random.randint(0, 10**3) if all(health_care_no != row[0] for row in rows): return health_care_no def executeStatement(self, state): print("******EXECUTING STATEMENT******") curs = self.con.cursor() if state == 1: try: curs.execute("insert into patient values (" + str(self.HCN) + ", '" + str(self.name) + "', '" + str(self.address) + "', TO_DATE('" + str(self.birth) + "', 'YYYY-MM-DD'), '" + str(self.phone) + "')") except: self.printSeparator() print("SQL Database Violation. Remember, Name and Address are a unique key.") elif state == 2: if self.name_update and self.address_update: curs.execute("select name, address from patient") rows = curs.fetchall() for row in rows: if row[0] == self.name and row[1] == self.address: self.printSeparator() print("SQL Database Violation. Name and Address are a unique key.") self.printSeparator() return 0 if self.name_update: try: curs.execute("update patient set name='" + str(self.name) + "' where health_care_no=" + str(self.patient)) except: self.printSeparator() print("SQL Database Violation. Remember, Name and Address are a unique key.") self.printSeparator() if self.address_update: try: curs.execute("update patient set address='" + str(self.address) + "' where health_care_no=" + str(self.patient)) except: self.printSeparator() print("SQL Database Violation. Remember, Name and Address are a unique key.") self.printSeparator() if self.birth_update: curs.execute("update patient set birth_day=TO_DATE('" + str(self.birth) + "', 'YYYY-MM-DD') where health_care_no=" + str(self.patient)) if self.phone_update: curs.execute("update patient set phone='" + str(self.phone) + "' where health_care_no=" + str(self.patient)) self.printSeparator() self.con.commit()
Sure, VR/AR/360 production would be fun to integrate in a marketing campaign, but it's too costly, right? Actually, it doesn't have to be. The post VR/AR/360 Production: Making the Case Inside an Organization appeared first on Onlinevideo.net - Online Video Marketing Strategies, News, and Tips.
# -*- coding: utf-8 -*- # Copyright 2020 Green Valley Belgium NV # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # @@license_version:1.7@@ import json import os import jinja2 import webapp2 from babel import dates, Locale from jinja2 import StrictUndefined, Undefined from mcfw.rpc import serialize_complex_value from rogerthat.bizz import channel from rogerthat.bizz.communities.communities import get_community from rogerthat.bizz.communities.models import AppFeatures from rogerthat.bizz.registration import get_headers_for_consent from rogerthat.bizz.session import set_service_identity from rogerthat.consts import DEBUG from rogerthat.dal.profile import get_service_profile from rogerthat.models import ServiceIdentity from rogerthat.pages.legal import get_version_content, DOC_TERMS_SERVICE, get_current_document_version from rogerthat.pages.login import SessionHandler from rogerthat.rpc import users from rogerthat.translations import DEFAULT_LANGUAGE from rogerthat.utils.channel import send_message_to_session from shop.bizz import get_organization_types, update_customer_consents from shop.dal import get_customer from solutions import translate, translations, COMMON_JS_KEYS from solutions.common.bizz import OrganizationType, SolutionModule from solutions.common.bizz.functionalities import get_functionalities from solutions.common.bizz.settings import get_service_info from solutions.common.consts import UNITS, UNIT_SYMBOLS, UNIT_PIECE, UNIT_LITER, UNIT_KG, UNIT_GRAM, UNIT_HOUR, \ UNIT_MINUTE, ORDER_TYPE_SIMPLE, ORDER_TYPE_ADVANCED, UNIT_PLATTER, UNIT_SESSION, UNIT_PERSON, UNIT_DAY, CURRENCIES from solutions.common.dal import get_solution_settings, get_restaurant_menu, get_solution_email_settings, \ get_solution_settings_or_identity_settings from solutions.common.models import SolutionQR, SolutionServiceConsent from solutions.common.models.properties import MenuItemTO from solutions.common.to import SolutionEmailSettingsTO from solutions.flex import SOLUTION_FLEX from solutions.jinja_extensions import TranslateExtension JINJA_ENVIRONMENT = jinja2.Environment( loader=jinja2.FileSystemLoader([os.path.join(os.path.dirname(__file__), 'templates'), os.path.join(os.path.dirname(__file__), '..', 'common', 'templates')]), extensions=[TranslateExtension], undefined=StrictUndefined if DEBUG else Undefined) DEFAULT_JS_TEMPLATES = [ 'inbox_messages', 'inbox_detail_messages', 'inbox_send_message_to_services', 'qanda_question_table', 'qanda_question_modules', 'qanda_question_detail', 'settings/settings_branding', 'settings/settings_branding_preview', 'settings/app_user_roles', 'settings/app_user_admins', 'settings/app_user_add_roles', 'settings/try_publish_changes', 'functionalities/functionality', ] MODULES_JS_TEMPLATE_MAPPING = { SolutionModule.AGENDA: [ 'events_add', 'events_add_dates', 'events', 'events_events', 'events_settings', 'events_calendar_settings', 'events_uitcalendar_settings' ], SolutionModule.APPOINTMENT: [ 'timeframe_template' ], SolutionModule.CITY_APP: [ 'services/service', 'services/service_form', 'services/modules_list', 'services/service_search', 'services/service_export', 'settings/app_settings', 'settings/paddle' ], SolutionModule.DISCUSSION_GROUPS: [ 'discussion_groups/discussion_groups_list', 'discussion_groups/discussion_groups_put' ], SolutionModule.GROUP_PURCHASE: [ 'group_purchase', 'group_purchase_subscriptions' ], SolutionModule.LOYALTY: [ 'loyalty_slides', 'loyalty_slide_add', 'loyalty_tablets', 'loyalty_tablet_modal', 'loyalty_scans', 'loyalty_scans_redeem_stamps_modal', 'loyalty_lottery_add_modal', 'loyalty_customer_visits_detail_modal', 'loyalty_customer_visits_detail', 'loyalty_customer_visit', 'loyalty_lottery_history', 'loyalty_export' ], SolutionModule.MENU: [ 'menu', 'menu_additem', 'menu_editdescription', 'menu_edit_image', 'menu_import' ], SolutionModule.ORDER: [ 'order', 'order_list', 'pause_orders_modal', 'timeframe_template', 'menu', 'menu_import', 'menu_additem', 'menu_editdescription', 'menu_edit_image', 'payments', 'payconiq_nl', ], SolutionModule.PHARMACY_ORDER: [ 'pharmacy_order', 'pharmacy_order_list' ], SolutionModule.RESTAURANT_RESERVATION: [ 'reservation_addshift', 'reservation_addtable', 'reservation_broken_reservations', 'reservation_delete_table_confirmation', 'reservation_editreservation', 'reservation_edittables', 'reservation_no_shift_found', 'reservation_shiftcontents', 'reservation_tablecontents', 'reservation_update_reservation_tables', 'reservations' ], SolutionModule.REPAIR: [ 'repair_order' ], SolutionModule.SANDWICH_BAR: [ 'sandwiches_order_inbox_detail', 'sandwiches_list_item' ], SolutionModule.STATIC_CONTENT: [ 'static_content/static_content_select_icon', 'static_content/static_content' ], SolutionModule.HIDDEN_CITY_WIDE_LOTTERY: [ 'loyalty_lottery_add_modal', 'loyalty_customer_visits_detail_modal', 'loyalty_customer_visits_detail', 'loyalty_customer_visit', 'loyalty_lottery_history', 'loyalty_slides', 'loyalty_slide_add' ], } class FlexHomeHandler(webapp2.RequestHandler): def _get_location_templates(self, service_user, language): tmpl_params = {'language': language, 'debug': DEBUG, 'service_user_email': service_user} templates = {} templates_to_get = {'location'} for tmpl in templates_to_get: templates[tmpl] = JINJA_ENVIRONMENT.get_template(tmpl + '.html').render(tmpl_params) templates = json.dumps(templates) return templates def _get_templates(self, lang, currency, modules): # type: (str, str, list[str]) -> str tmpl_params = { 'language': lang or DEFAULT_LANGUAGE, 'debug': DEBUG, 'currency': currency, } templates = {} templates_to_get = set(DEFAULT_JS_TEMPLATES) for module in modules: for tmpl in MODULES_JS_TEMPLATE_MAPPING.get(module, []): templates_to_get.add(tmpl) for tmpl in templates_to_get: templates[tmpl] = JINJA_ENVIRONMENT.get_template(tmpl + '.html').render(tmpl_params) templates = json.dumps(templates) return templates def _get_qr_codes(self, sln_settings, service_identity): if SolutionModule.QR_CODES in sln_settings.modules: return SolutionQR.list_by_user(sln_settings.service_user, service_identity, sln_settings.solution) else: return [] def _get_days(self, language): return [(k, v.capitalize()) for k, v in dates.get_day_names('wide', locale=language).items()] def _get_months(self, language, width): return [v.capitalize() for _, v in dates.get_month_names(width, locale=language).items()] def _get_day_str(self, language, day): return dates.get_day_names('wide', locale=language)[day].capitalize() def _get_week_days(self, language): return [self._get_day_str(language, day) for day in [6, 0, 1, 2, 3, 4, 5]] def get(self): service_user = users.get_current_user() if not service_user: self.redirect("/ourcityapp") return sln_settings = get_solution_settings(service_user) if not sln_settings or sln_settings.solution != SOLUTION_FLEX: self.redirect("/ourcityapp") return session_ = users.get_current_session() lang = sln_settings.main_language or DEFAULT_LANGUAGE all_translations = {key: translate(lang, key) for key in translations[DEFAULT_LANGUAGE]} for other_key, key in COMMON_JS_KEYS.iteritems(): all_translations[other_key] = all_translations[key] service_identity = session_.service_identity if session_.service_identity else ServiceIdentity.DEFAULT service_info = get_service_info(service_user, service_identity) if sln_settings.identities: if not session_.service_identity: jinja_template = JINJA_ENVIRONMENT.get_template('locations.html') params = { 'language': lang, 'debug': DEBUG, 'templates': self._get_location_templates(service_user, lang), 'service_name': service_info.name, 'service_user_email': service_user.email().encode("utf-8"), 'currency': service_info.currency, 'translations': json.dumps(all_translations) } channel.append_firebase_params(params) self.response.out.write(jinja_template.render(params)) return elif session_.service_identity: session_ = set_service_identity(session_, None) # Dont require terms of use for: # - shop users (admins) # - cities logging in on other services their dashboard (layout_only) # - cirklo-only customers must_check_tos = not session_.layout_only and not session_.shop and not sln_settings.ciklo_vouchers_only() service_profile = get_service_profile(service_user) if must_check_tos: lastest_tos_version = get_current_document_version(DOC_TERMS_SERVICE) if service_profile.tos_version != lastest_tos_version: self.redirect('/terms') return sln_i_settings = get_solution_settings_or_identity_settings(sln_settings, service_identity) customer = get_customer(service_user) jinja_template = JINJA_ENVIRONMENT.get_template('index.html') days = self._get_days(lang) day_flags = [(pow(2, day_num), day_name) for day_num, day_name in days] months = self._get_months(lang, 'wide') months_short = self._get_months(lang, 'abbreviated') week_days = self._get_week_days(lang) loyalty_version = self.request.get("loyalty") community = get_community(service_profile.community_id) locale = Locale.parse(lang) currency_symbols = {currency: locale.currency_symbols.get(currency, currency) for currency in CURRENCIES} consts = { 'UNIT_PIECE': UNIT_PIECE, 'UNIT_LITER': UNIT_LITER, 'UNIT_KG': UNIT_KG, 'UNIT_GRAM': UNIT_GRAM, 'UNIT_HOUR': UNIT_HOUR, 'UNIT_MINUTE': UNIT_MINUTE, 'UNIT_DAY': UNIT_DAY, 'UNIT_PERSON': UNIT_PERSON, 'UNIT_SESSION': UNIT_SESSION, 'UNIT_PLATTER': UNIT_PLATTER, 'ORDER_TYPE_SIMPLE': ORDER_TYPE_SIMPLE, 'ORDER_TYPE_ADVANCED': ORDER_TYPE_ADVANCED, 'ORDER_ITEM_VISIBLE_IN_MENU': MenuItemTO.VISIBLE_IN_MENU, 'ORDER_ITEM_VISIBLE_IN_ORDER': MenuItemTO.VISIBLE_IN_ORDER, 'ORGANIZATION_TYPES': { 'CITY': OrganizationType.CITY, 'EMERGENCY': OrganizationType.EMERGENCY, 'PROFIT': OrganizationType.PROFIT, 'NON_PROFIT': OrganizationType.NON_PROFIT, }, 'CURRENCY_SYMBOLS': currency_symbols } functionality_modules = functionality_info = None if community.signup_enabled: functionality_modules, functionality_info = map(json.dumps, get_functionalities( lang, sln_settings.modules, sln_settings.get_activated_modules(), community)) is_city = service_user == community.main_service_user news_review_enabled = AppFeatures.NEWS_REVIEW in community.features default_router_location = u'#/functionalities' if sln_settings.ciklo_vouchers_only(): default_router_location = u'#/vouchers' elif not functionality_modules: default_router_location = u'#/news' organization_types = get_organization_types(customer, community.default_app, lang, include_all=True) currency = service_info.currency params = {'language': lang, 'sln_settings': sln_settings, 'sln_i_settings': sln_i_settings, 'hidden_by_city': sln_settings.hidden_by_city, 'debug': DEBUG, 'templates': self._get_templates(lang, currency, sln_settings.modules), 'service_name': service_info.name, 'service_user_email': service_user.email().encode("utf-8"), 'service_identity': service_identity, 'qr_codes': self._get_qr_codes(sln_settings, service_identity), 'SolutionModule': SolutionModule, 'days': days, 'day_flags': day_flags, 'months': months, 'months_short': months_short, 'week_days': week_days, 'customer': customer, 'loyalty': True if loyalty_version else False, 'functionality_modules': functionality_modules, 'functionality_info': functionality_info, 'email_settings': json.dumps(serialize_complex_value( SolutionEmailSettingsTO.fromModel(get_solution_email_settings(), service_user), SolutionEmailSettingsTO, False)), 'currency': currency, 'is_layout_user': session_.layout_only if session_ else False, 'UNITS': json.dumps(UNITS), 'UNIT_SYMBOLS': json.dumps(UNIT_SYMBOLS), 'CONSTS': consts, 'CONSTS_JSON': json.dumps(consts), 'modules': json.dumps(sln_settings.modules), 'provisioned_modules': json.dumps(sln_settings.provisioned_modules), 'translations': json.dumps(all_translations), 'organization_types': organization_types, 'organization_types_json': json.dumps(dict(organization_types)), 'is_city': is_city, 'news_review_enabled': news_review_enabled, 'can_edit_paddle': is_city and session_.shop, 'default_router_location': default_router_location } if SolutionModule.BULK_INVITE in sln_settings.modules: params['bulk_invite_message'] = translate(lang, "settings-bulk-invite-message", app_name=community.name) params['menu'] = get_restaurant_menu(service_user) if SolutionModule.MENU in sln_settings.modules else None channel.append_firebase_params(params) self.response.out.write(jinja_template.render(params)) class FlexLogoutHandler(SessionHandler): def get(self): service_user = users.get_current_user() sln_settings = get_solution_settings(service_user) if not sln_settings or sln_settings.solution != SOLUTION_FLEX or not sln_settings.identities: self.stop_session() return self.redirect('/ourcityapp') session_ = users.get_current_session() if session_.service_identity: session_ = set_service_identity(session_, None) send_message_to_session(service_user, session_, u"solutions.common.locations.update", si=None) self.redirect('/ourcityapp') class TermsAndConditionsHandler(SessionHandler): def get(self): service_user = users.get_current_user() if not service_user: self.redirect('/ourcityapp') return sln_settings = get_solution_settings(service_user) if not sln_settings: self.stop_session() return self.redirect('/ourcityapp') lang = sln_settings.main_language version = get_current_document_version(DOC_TERMS_SERVICE) params = { 'tac': get_version_content(lang, DOC_TERMS_SERVICE, version), 'tac_version': version, 'language': lang, 'show_email_checkboxes': get_customer(service_user) is not None, } jinja_template = JINJA_ENVIRONMENT.get_template('terms.html') self.response.out.write(jinja_template.render(params)) def post(self): service_user = users.get_current_user() if not service_user: self.redirect('/ourcityapp') return sln_settings = get_solution_settings(service_user) if not sln_settings: self.stop_session() return self.redirect('/ourcityapp') version = long(self.request.get('version')) or get_current_document_version(DOC_TERMS_SERVICE) customer = get_customer(service_user) if customer: context = u'User terms' update_customer_consents(customer.user_email, { SolutionServiceConsent.TYPE_NEWSLETTER: self.request.get( SolutionServiceConsent.TYPE_NEWSLETTER) == 'on', SolutionServiceConsent.TYPE_EMAIL_MARKETING: self.request.get( SolutionServiceConsent.TYPE_EMAIL_MARKETING) == 'on' }, get_headers_for_consent(self.request), context) service_profile = get_service_profile(service_user) service_profile.tos_version = version service_profile.put() self.redirect('/')
Rank patches are essential to completing your uniform. Sewn to military specifications, the TG rank patch is Army-approved for the Scorpion Operational Camouflage Pattern (OCP). Sewn with regulation black thread, it will match your ACU while simply and effectively communicating the proper insignia. Securing to your uniform with a hook velcro field, there’s no alteration required and it’s sure to remain firmly in place.
import numpy as np import sys #Usage: #python thisprog.py threshold numofnetworks #Will randomly initialize numofnetworks neural networks and train them until the error on a training set is less than threshold #Will then try to interpolate between these networks while keeping error below that of threshold. #Will tabulate the number of connected components found in this way #Simple network: Given three integers a,b,c, [-100,100] chooses three random x-values, and evaluates #the quadratic function a*x^2 + b*x + c at those values. def func(x,a,b,c): return x*x*a + x*b + c def generatecandidate3(a,b,c): candidate = [np.random.random() for x in xrange(1)] candidatesolutions = [func(x,a,b,c) for x in candidate] return candidate, candidatesolutions import copy alpha,hidden_dim,hidden_dim2 = (.001,12,4) threshrange = np.linspace(.03,.1,101) thresh = threshrange[int(sys.argv[1])%100] synapses = [] #Testing starting in the same place #synapse0 = 2*np.random.random((1,hidden_dim)) - 1 #synapse1 = 2*np.random.random((hidden_dim,hidden_dim2)) - 1 #synapse2 = 2*np.random.random((hidden_dim2,1)) - 1 for i in xrange(int(sys.argv[2])): synapse_0 = 2*np.random.random((1,hidden_dim)) - 1 synapse_1 = 2*np.random.random((hidden_dim,1)) - 1 #synapse_2 = 2*np.random.random((hidden_dim2,1)) - 1 #synapse_0 = copy.deepcopy(synapse0) #synapse_1 = copy.deepcopy(synapse1) #synapse_2 = copy.deepcopy(synapse2) #remove the comment to get random initialization stopcond = True while stopcond: #print 'epoch:' + str(e) X = [] y = [] for i in xrange(10000): a,b = generatecandidate3(.5,.25,.1) X.append(a) y.append(b) X= np.array(X) y=np.array(y) j = 0 while stopcond: #if j%5000 == 0: print j layer_1 = 1/(1+np.exp(-(np.dot(X,synapse_0)))) #if(False): # dropout_percent = .1 # layer_1 *= np.random.binomial([np.ones((len(X),hidden_dim))],1-dropout_percent)[0] * (1.0/(1-dropout_percent)) layer_2 = 1/(1+np.exp(-(np.dot(layer_1,synapse_1)))) #if(True): # dropout_percent = .2 # layer_2 *= np.random.binomial([np.ones((len(layer_1),hidden_dim2))],1-dropout_percent)[0] * (1.0/(1-dropout_percent)) #layer_3 = 1/(1+np.exp(-(np.dot(layer_2,synapse_2)))) #if(False): # dropout_percent = .25 # layer_2 *= np.random.binomial([np.ones((len(layer_2),2))],1-dropout_percent)[0] * (1.0/(1-dropout_percent)) layer_2_delta = (layer_2- y)*(layer_2*(1-layer_2)) #layer_2_delta = layer_3_delta.dot(synapse_2.T) * (layer_2 * (1-layer_2)) layer_1_delta = layer_2_delta.dot(synapse_1.T) * (layer_1 * (1-layer_1)) #synapse_2 -= (alpha * layer_2.T.dot(layer_3_delta)) synapse_1 -= (alpha * layer_1.T.dot(layer_2_delta)) synapse_0 -= (alpha * X.T.dot(layer_1_delta)) # how much did we miss the target value? layer_2_error = layer_2 - y if (j%50) == 0: print "Error after "+str(j)+" iterations:" + str(np.mean(np.abs(layer_2_error))) if np.mean(np.abs(layer_2_error)) < thresh: #print "Changing stopcond!" stopcond = False j+=1 #remove the comment to get random initialization synapses.append([synapse_0,synapse_1])#,synapse_2]) #Idea: Take two networks as input. Construct string connecting two nework with "beads" along the string. #Stochastically (monte carlo? simulated annealing?) wiggle the beads until the max on the beads is minimized from random import gauss import copy def make_rand_vector(dims): vec = [gauss(0, 1) for i in range(dims)] mag = sum(x**2 for x in vec) ** .5 return [x/mag for x in vec] #Definition for test set: '''X = [] y = [] for i in xrange(100): j = i/100. a,b = [[j],[func(j,.5,.25,.1)]] X.append(a) y.append(b) X= np.array(X) y=np.array(y)''' #returns a later thats t-between synapse1 and synapse2 (t ranges from 0 to 1) def synapse_interpolate(synapse1, synapse2, t): return (synapse2-synapse1)*t + synapse1 X = [] y = [] def GenTest(X, y): X = [] y = [] for i in xrange(1000): a,b = generatecandidate3(.5,.25,.1) X.append(a) y.append(b) return np.array(X), np.array(y) X, y = GenTest(X,y) #Simple container to hold the weights defined on the beads class WeightString: def __init__(self, w1, w2, numbeads, threshold, springk): self.w1 = w1 self.w2 = w2 self.beads = [] self.velocity = [] self.threshold = threshold self.springk = springk for n in xrange(numbeads): beaddata = [] for k in xrange(len(self.w1)): beaddata.append(synapse_interpolate(self.w1[k],self.w2[k], (n + 1.)/(numbeads+1.))) self.beads.append(beaddata) self.velocity = copy.deepcopy(self.beads) for b in self.velocity: for v in b: v = 0.*v #self.beads.reverse() self.InitialEnergy = self.SpringEnergy() self.AllBeads = copy.deepcopy(self.beads) self.AllBeads.insert(0,self.w1) self.AllBeads.append(self.w2) self.ConvergedList = [False for f in xrange(len(self.AllBeads))] self.ConvergedList[0] = True self.ConvergedList[-1] = True def SpringNorm(self, order): total = 0. #Energy between mobile beads for i,b in enumerate(self.AllBeads): if i < len(self.AllBeads)-1: #print "Tallying energy between bead " + str(i) + " and bead " + str(i+1) subtotal = 0. for j in xrange(len(b)): subtotal += np.linalg.norm(np.subtract(self.AllBeads[i][j],self.AllBeads[i+1][j]),ord=order)#/len(self.beads[0][j]) total+=subtotal return total#/len(self.beads) def SpringEnergy(self): total = 0. #Energy between the pinned, immobile weight and the first bead subtotal = 0. for j in xrange(len(self.beads[0])): subtotal += np.linalg.norm(np.subtract(self.w1[j],self.beads[0][j]),ord=2)/len(self.beads[0][j]) total+=subtotal #Energy between mobile beads for i,b in enumerate(self.beads): if i < len(self.beads)-1: #print "Tallying energy between bead " + str(i) + " and bead " + str(i+1) subtotal = 0. for j in xrange(len(b)): subtotal += np.linalg.norm(np.subtract(self.beads[i][j],self.beads[i+1][j]),ord=2)/len(self.beads[0][j]) total+=subtotal #Energy between pinned, immobile final weights, and the last bead subtotal = 0. for j in xrange(len(self.beads[-1])): subtotal += np.linalg.norm(np.subtract(self.w2[j],self.beads[-1][j]),ord=2)/len(self.beads[0][j]) total+=subtotal return total/len(self.beads) def SGDBead(self, bead, X, y): layers = [] l1 = 1/(1+np.exp(-(np.dot(X,self.AllBeads[bead][0])))) layers.append(l1) for i,b in enumerate(self.AllBeads[bead][1:]): l = 1/(1+np.exp(-(np.dot(layers[-1],b)))) layers.append(l) layersdelta = [] l3 = (layers[-1] - y)*(layers[-1]*(1-layers[-1])) #+ (1./regparam)*OldSpringEnergy*np.ones(np.shape(y)) layersdelta.append(l3) for i,l in enumerate(layers[:-1]): ldelta = layersdelta[-1].dot(self.AllBeads[bead][-1-i].T) * (layers[:-1][-1-i]) * (1- (layers[:-1][-1-i])) layersdelta.append(ldelta) for i in xrange(len(layers)-1): if -i-1 != 0: self.AllBeads[bead][-i-1] -= .001*layers[-i-2].T.dot(layersdelta[i]) else: self.AllBeads[bead][0] -= .001*X.T.dot(layersdelta[-1]) finalerror = (layers[-1] - y) return np.mean(np.abs(finalerror)) #monte carlo update step def UpdateBead(self, temperature, bead, X, y): regparam = 100. OldSpringEnergy = self.SpringEnergy() OldMax = [EvalNet(b,X)-y for b in self.beads] OldMaxError = max([np.mean(np.abs(om)) for om in OldMax]) oe = OldSpringEnergy/100000. + OldMaxError #print "Old SE: " + str(OldSpringEnergy) #print "Old Max: " + str(OldMax) ####print "Oldmaxerror: " + str(OldMaxError) oldweight = copy.deepcopy(self.beads[bead]) layers = [] #print bead[0] l1 = 1/(1+np.exp(-(np.dot(X,self.beads[bead][0])))) layers.append(l1) for i,b in enumerate(self.beads[bead][1:]): l = 1/(1+np.exp(-(np.dot(layers[-1],b)))) layers.append(l) #layer_3_delta = (layer_3- y)*(layer_3*(1-layer_3)) #layer_2_delta = layer_3_delta.dot(synapse_2.T) * (layer_2 * (1-layer_2)) #layer_1_delta = layer_2_delta.dot(synapse_1.T) * (layer_1 * (1-layer_1)) #layersdelta = [] layersdelta = [] l3 = (layers[-1] - y)*(layers[-1]*(1-layers[-1])) #+ (1./regparam)*OldSpringEnergy*np.ones(np.shape(y)) layersdelta.append(l3) for i,l in enumerate(layers[:-1]): ldelta = layersdelta[-1].dot(self.beads[bead][-1-i].T) * (layers[:-1][-1-i]) * (1- (layers[:-1][-1-i])) layersdelta.append(ldelta) for i in xrange(len(layers)-1): #print i #print self.beads[bead][-i-1] #rint layers[-i-2].T #print layersdelta[-i-1] #print layers[-i-2].T.dot(layersdelta[-i-1]) if -i-1 != 0: self.beads[bead][-i-1] -= .1*layers[-i-2].T.dot(layersdelta[i]) else: self.beads[bead][0] -= .1*X.T.dot(layersdelta[-1]) #The code below regularizes the network so that they stay near each other in weight space '''if bead == 0: self.beads[bead][-i-1] -= (np.subtract(self.beads[bead][-i-1],self.w1[-i-1]) + np.subtract(self.beads[bead+1][-i-1],self.beads[bead][-i-1]))/regparam if bead == len(self.beads)-1: self.beads[bead][-i-1] -= (np.subtract(self.w2[-i-1],self.beads[bead][-i-1]) + np.subtract(self.beads[bead][-i-1],self.beads[bead-1][-i-1]))/regparam if (bead > 0 and bead < len(self.beads)-1): self.beads[bead][-i-1] -= (np.subtract(self.beads[bead+1][-i-1],self.beads[bead][-i-1]) + \ np.subtract(self.beads[bead][-i-1],self.beads[bead-1][-i-1]))/regparam''' #layers.reverse() # how much did we miss the target value? NewSpringEnergy = self.SpringEnergy() finalerror = (layers[-1] - y) #(1./regparam)*NewSpringEnergy*np.ones(np.shape(y)) NewMaxError = np.mean(np.abs(finalerror)) #print "New SE: " + str(NewSpringEnergy) #print "Old Max: " + str(OldMax) ####print "Newmaxerror: " + str(NewMaxError) ne = NewSpringEnergy/100000. + NewMaxError #print "Newtotal: " + str(ne) ####print "\n" myrand = np.random.rand() ####print "rand is: " + str(myrand) + " and boltzmann weight is " + str(np.exp(-(NewSpringEnergy - OldSpringEnergy)/temperature)) if NewSpringEnergy > OldSpringEnergy: #if NewSpringEnergy > self.InitialEnergy: if NewMaxError > OldMaxError: self.beads[bead]=oldweight else: if myrand > np.exp(-(NewSpringEnergy - OldSpringEnergy)/temperature): #if myrand > np.exp(-(NewSpringEnergy - self.InitialEnergy)/temperature): #print "Rejecting proposal" self.beads[bead]=oldweight return True #def JUST MAKE A PURE KINETIC EVOLVER, SWAP BETWEEN KINETIC EVOLUTION AND GRADIENT DESCENT def UpdateKinetic(self, dt, k): for bead in xrange(len(self.beads)): for i in xrange(len(self.beads[bead])): self.beads[bead][i] += dt*self.velocity[bead][i] for bead in xrange(len(self.beads)): for i in xrange(len(self.beads[bead])): if bead == 0: self.velocity[bead][i] += -dt*k*(np.subtract(self.beads[bead][i],self.w1[i]) + np.subtract(self.beads[bead+1][i],self.beads[bead][i])) if bead == len(self.beads)-1: self.velocity[bead][i] += -dt*k*(np.subtract(self.w2[i],self.beads[bead][i]) + np.subtract(self.beads[bead][i],self.beads[bead-1][i])) if (bead > 0 and bead < len(self.beads)-1): self.velocity[bead][i] += -dt*k*(np.subtract(self.beads[bead+1][i],self.beads[bead][i]) + \ np.subtract(self.beads[bead][i],self.beads[bead-1][i])) #self.velocity[bead][i] -= .1*self.velocity[bead][i] #monte carlo update step def UpdateBeadPureKinetic(self, temperature, bead): OldSpringEnergy = self.SpringEnergy() #OldMax = [EvalNet(b,X)-y for b in self.beads] #OldMaxError = max([np.mean(np.abs(om)) for om in OldMax]) #oe = OldSpringEnergy/100000. + OldMaxError ##print "Old SE: " + str(OldSpringEnergy) #print "Old Max: " + str(OldMax) #print "Oldmaxerror: " + str(OldMaxError) #print "Oldtotal: " + str(oe) oldweight = copy.deepcopy(self.beads[bead]) randupdates = [] for i,syn in enumerate(self.beads[bead]): #create random perturbation to weight matrix with correct shape addtobead = np.reshape(make_rand_vector(syn.size),syn.shape) #add it to this particular bead self.beads[bead][i]+=.1*addtobead NewSpringEnergy = self.SpringEnergy() #NewMax = [EvalNet(b,X)-y for b in self.beads] #NewMaxError = max([np.mean(np.abs(om)) for om in OldMax]) ##print "New SE: " + str(OldSpringEnergy) #print "Old Max: " + str(OldMax) #print "Newmaxerror: " + str(OldMaxError) #ne = NewSpringEnergy/100000. + NewMaxError #print "Newtotal: " + str(ne) ##print "\n" #Gibbs sampling #if OldSpringError/100. + OldMaxError < NewSpringError/100. + NewMaxError: myrand = np.random.rand() ##print "rand is: " + str(myrand) + " and boltzmann weight is " + str(np.exp(-(NewSpringEnergy - OldSpringEnergy)/temperature)) if NewSpringEnergy > OldSpringEnergy: if myrand > np.exp(-(NewSpringEnergy - OldSpringEnergy)/temperature): ##print "Rejecting proposal" self.beads[bead]=oldweight return True test = WeightString(synapses[0],synapses[1],5,1,1) #Simple function to evaluate network def EvalNet(net, X): layer_1 = 1/(1+np.exp(-(np.dot(X,net[0])))) layer_2 = 1/(1+np.exp(-(np.dot(layer_1,net[1])))) #layer_3 = 1/(1+np.exp(-(np.dot(layer_2,net[2])))) # how much did we miss the target value? #layer_3_error = layer_3 - y return layer_2 def BeadError(X, y, bead): X= np.array(X) y=np.array(y) layer_1 = 1/(1+np.exp(-(np.dot(X,bead[0])))) layer_2 = 1/(1+np.exp(-(np.dot(layer_1,bead[1])))) #layer_3 = 1/(1+np.exp(-(np.dot(layer_2,bead[2])))) # how much did we miss the target value? layer_2_error = layer_2 - y return np.mean(np.abs(layer_2_error)) def InterpBeadError(X, y, bead1, bead2, write = False, name = "00"): '''X = [] y = [] for i in xrange(1000): a,b = generatecandidate3(.5,.25,.1) X.append(a) y.append(b)''' X= np.array(X) y=np.array(y) errors = [] for tt in xrange(100): #Should make this architecture independent at some point t = tt/100. layer_1 = 1/(1+np.exp(-(np.dot(X,synapse_interpolate(bead1[0],bead2[0],t))))) layer_2 = 1/(1+np.exp(-(np.dot(layer_1,synapse_interpolate(bead1[1],bead2[1],t))))) #layer_3 = 1/(1+np.exp(-(np.dot(layer_2,synapse_interpolate(bead1[2],bead2[2],t))))) # how much did we miss the target value? layer_2_error = layer_2 - y errors.append(np.mean(np.abs(layer_2_error))) if write == True: with open("f" + str(name) + ".out",'w+') as f: for e in errors: f.write(str(e) + "\n") return max(errors) results = [] connecteddict = {} for i1 in xrange(len(synapses)): connecteddict[i1] = 'not connected' for i1 in xrange(len(synapses)): #print i1 for i2 in xrange(len(synapses)): if i2 > i1 and ((connecteddict[i1] != connecteddict[i2]) or (connecteddict[i1] == 'not connected' or connecteddict[i2] == 'not connected')) : test = WeightString(synapses[i1],synapses[i2],1,1,1) training_threshold = thresh depth = 0 d_max = 10 #Check error between beads #Alg: for each bead at depth i, SGD until converged. #For beads with max error along path too large, add another bead between them, repeat while (depth < d_max): X, y = GenTest(X,y) counter = 0 for i,c in enumerate(test.ConvergedList): if c == False: error = BeadError(X, y, test.AllBeads[i]) #print error while error > .5 * training_threshold and counter < 40000: counter += 1 error = test.SGDBead(i, X, y) if counter%5000==0: print counter print error test.ConvergedList[i] = True #print test.ConvergedList interperrors = [] for b in xrange(len(test.AllBeads)-1): e = InterpBeadError(X,y,test.AllBeads[b],test.AllBeads[b+1]) interperrors.append(e) #print interperrors if max(interperrors) < training_threshold: depth = 2*d_max #print test.ConvergedList #print test.SpringNorm(2) #print "Done!" else: #Interperrors stores the maximum error on the path between beads #shift index to account for added beads shift = 0 for i, ie in enumerate(interperrors): if ie > training_threshold: beaddata = [] for k in xrange(len(test.w1)): beaddata.append(synapse_interpolate(test.AllBeads[i+shift][k],test.AllBeads[i+shift+1][k], .5)) test.AllBeads.insert(i+shift+1,beaddata) test.ConvergedList.insert(i+shift+1, False) shift+=1 #print test.ConvergedList #print test.SpringNorm(2) #print d_max depth += 1 if depth == 2*d_max: results.append([i1,i2,test.SpringNorm(2),"Connected"]) if connecteddict[i1] == 'not connected' and connecteddict[i2] == 'not connected': connecteddict[i1] = i1 connecteddict[i2] = i1 if connecteddict[i1] == 'not connected': connecteddict[i1] = connecteddict[i2] else: if connecteddict[i2] == 'not connected': connecteddict[i2] = connecteddict[i1] else: if connecteddict[i1] != 'not connected' and connecteddict[i2] != 'not connected': hold = connecteddict[i2] connecteddict[i2] = connecteddict[i1] for h in xrange(len(synapses)): if connecteddict[h] == hold: connecteddict[h] = connecteddict[i1] else: results.append([i1,i2,test.SpringNorm(2),"Disconnected"]) #print results[-1] uniquecomps = [] totalcomps = 0 for i in xrange(len(synapses)): if not (connecteddict[i] in uniquecomps): uniquecomps.append(connecteddict[i]) if connecteddict[i] == 'not connected': totalcomps += 1 #print i,connecteddict[i] notconoffset = 0 if 'not connected' in uniquecomps: notconoffset = -1 print "Thresh: " + str(thresh) print "Comps: " + str(len(uniquecomps) + notconoffset + totalcomps) #for i in xrange(len(synapses)): # print connecteddict[i] connsum = [] for r in results: if r[3] == "Connected": connsum.append(r[2]) #print r[2] print "***" print np.average(connsum) print np.std(connsum)
In the statement following the last [. . . ]meeting of the year the FOMC noted that some further measured policy firming was likely to be necessary. ecb. europa. eu Dans la d�claration qui a suivi la [. . . ]derni�re r�union de l&apos;ann�e, le FOMC a indiqu� que de nouveaux resserrements mod�r�s [. . . ]de la politique mon�taire seraient vraisemblablement n�cessaires. ecb. europa. eu I solemnly declare upon my honour and conscience that I will speak[. . . ] the truth, the whole truth and nothing but[. . . ] the truth and that my statement will be in accordance [. We are excited to announce a new vision and strategy for New Life Christian Ministries Singles&apos; Ministry! New Life&apos;s Single&apos;s Ministry is on the rise! The single life can be unique and exciting. At times you may enjoy the freedom and independence, and at other times, you may long just to be in the company of others. Come on out and enjoy the fellowship, build relationships with people who share common life experiences and share the journey together. New Life&apos;s single adult ministry exists to connect single adults with the New Life family. The guru of all gurus in carburetor function was without a doubt an obscure Dutch-Swiss dude named Danny. OK, his full name was Daniel Bernoulli and it’s possible you may have heard of him. Besides being an uber-smart mathematician/statistician/physicist/author, he put into words the theory of why carburetors work. Of course, unless you are also an über-smart physicist, you might not get all the little details and decipher the squiggly lines in his writing. That means that when it’s time to bolt a brand-new, shiny fuel mixer on your bitchin 383 with double hump heads and three-quarter race cam, you’re better off talking with the tech guys at the company you bought it from in order to get it running to the best of its ability. This is a sample personal statement written by our professional writer. Please note that our website is scanned by various anti-plagiarism software, so do not attempt to copy/paste this personal statement. You will get caught and your university career will be over before it has begun! To get a custom-written personal statement, just complete the order form and we will write an original personal statement, based specifically on the information you give us, which will never be published or resold. This is my application to the University of Greenwich, to study the MBA course.
# -*- coding: utf-8 -*- """ grigri.tools ~~~~~~~~~~~~~~~~~~ Miscellaneous functions for dealing with type-checking. """ from datetime import datetime, date from math import pi, sin, cos, atan2, sqrt, floor, ceil import pandas as pd from dateutil.parser import parse def is_null(*args): """ Returns the first non-null value. Similar to T-SQL COALESCE() function. >>> is_null(None, float('nan'), 'hello') 'hello' """ for k in args: if not pd.isnull(k): return k # if everything is null then return the last argument return args[-1] def is_numeric(n): """ Tests if an object is interpretable as a number. >>> is_numeric('1') True """ try: float(n) return True except (ValueError, TypeError): return False def is_date(dt, strict=True): """ Tests if an object is interpretable as a datetime object. :param dt: object to test as a datetime :param strict: If set to `False` will also try to interpret strings as dates. """ if isinstance(dt, (datetime, date)): return True if not strict: try: if dt not in (' ', '-', ''): parse(dt) return True except (AttributeError, ValueError): pass return False def is_empty(data): """ Checks if an object, particularly :class:`Series` and :class:`DataFrame`, contains any values. .. note:: ``DataFrame`` objects have an ``empty`` attribute, but ``Series`` don't. This function allows you to check both data structures using only one function. """ try: return not bool(data) except ValueError: pass try: return data.empty # Series objects do not have an empty method, so check # if there are any values except AttributeError: if data.tolist(): return False return True def percent_change(before, after): """ Return percent change increase or decrease between two numbers. >>> percent_change(100, 110) 0.1 """ try: return (1. * after - before) / before except ZeroDivisionError: return float('nan') def find_column_name(frame, column_name): """ Searches for the desired column in a DataFrame and returns its name. This situation arises when you pull data from a data source (e.g. SQL) and you know the column name is installationid, but case-sensitivity may be an issue. """ column_match = column_name.lower() for col in frame.columns: if col.lower() == column_match: return col raise KeyError("Cannot find column in DataFrame: %s" % column_name) def split_sequence(data, n): """ Splits a Series or DataFrame (or any list-like object) into chunks. :param data: List-like data-structure (list, DataFrame, Series,...) to be split. :param n: Number of chunks to split `data` into. Chunks will be as equal size as possible. .. warning:: Data is not sorted by ``split_sequence`` and will be split as given. You must pre-sort if necessary. """ L = len(data) split_size = int(ceil(1. * L / n)) return (data[i:i+ split_size] for i in range(0, L, split_size)) def ditto(frames, meth, *args, **kwargs): """ Applies the same method to one or more DataFrames. Any args or kwargs necessary to call `meth` should also be passed in. :param frames: List of DataFrames. :param meth: Name of DataFrame method to call on each DataFrame in `frames`. """ return [getattr(frame, meth)(*args, **kwargs) for frame in frames]
This is a one-stop business. They've got a good variety of groceries you forgot at the big store as well as a full gas station. They also can make to order sandwiches for you. We stop here sometimes after getting gas @ Ruby's Arco. We can get sandwiches to go, a soda or snacks, and a lotto ticket or two. Their sandwiches are excellent. We're always finding something we need @ Joe's. From hiking and fishing gear to a part for the boat to snowmobile pants to .... And we can always stop in at the TicketMaster Outlet and pick up tickets for the game. They've got lots of things there. My car has been worked on here for a number of different things. The staff is nice to deal with and does good work. If your warranty is up like mine, you need someone you can trust. If you're looking for a housewarming gift, you can likely find it here. They have lots of inventory - many items in popular decorating themes. Even if you're not looking for a theme item, they have candle holders, clocks, frames, etc. I buy most of my husband's work jeans here. They're 2nds and discounted, but fit and wear as well as the others. This way I don't feel bad if he only wears them for a month or so and rips them or spills something on them that won't come out - I can just toss 'em and I still got my money's worth. You can always find something cute here - and it'll go on sale, too. I can spend more than my allowance on cute kids' clothes at this store. At the end of the season, they sometimes put their clearance on an additional 25% off. That's when you see the best bargains. My husband is a jeans and t-shirts kind of guy. But when he needs to dress it up a little, I've always found great items here. Their cost is really reasonable and I can always find something - They even have suits for under $100.
#------------------------------------------------------------------------------- # coding=utf8 # Name: 模块1 # Purpose: # # Author: zhx # # Created: 10/05/2016 # Copyright: (c) zhx 2016 # Licence: <your licence> #------------------------------------------------------------------------------- import openpyxl import jieba threshold = 2140 popular = 0 def main(): cctv_data = openpyxl.load_workbook("cctv.xlsx") cctv_keywords = openpyxl.load_workbook("cctv_keywords.xlsx") cctv_new = openpyxl.Workbook() new_sheet = cctv_new.active #print cctv_data.get_sheet_names() sheet1 = cctv_keywords["Sheet"] sheet2 = cctv_data["Sheet"] words = {} for r in xrange(1,36003): word = sheet1.cell(row=r,column=1).value word_min = sheet1.cell(row=r,column=2).value word_max = sheet1.cell(row=r,column=3).value word_mean = sheet1.cell(row=r,column=4).value words[word] = [word_min,word_max,word_mean] for r in xrange(2,4749): print r content = sheet2.cell(row=r,column=3).value time = sheet2.cell(row=r,column=11).value like = sheet2.cell(row=r,column=5).value repost = sheet2.cell(row=r,column=6).value if like == '赞': like = '0' if repost =='转发': repost = '0' like_repost = int(like)+int(repost) if like_repost>threshold: popular =1 else: popular =0 hour = int(time[1:3]) minute =int (time[4:]) time = hour*60 + minute new_sheet.cell(row=r,column=10).value = time new_sheet.cell(row=r,column=11).value = like_repost if content ==None: continue print r seg_list = jieba.cut(content, cut_all = True) wordsplite = ' '.join(seg_list) wordsplite = wordsplite.split(' ') maxlike = 0 max_word ='' min_word ='' mean_word='' minlike = 9999999 tmplist = [] tmpdic ={} for w in wordsplite: if words.has_key(w): tmpdic[w] =int(words[w][2]) tmplist.append(int(words[w][2])) likes = int(words[w][2]) if likes<minlike: minlike = likes min_word = w if likes>maxlike: maxlike = likes max_word = w else: continue if len(tmplist)!=0: tmplist.sort() mean = tmplist[int(len(tmplist)/2)] for w in tmpdic: if tmpdic[w]==mean: mean_word =w if min_word!='': new_sheet.cell(row=r,column=1).value = words[min_word][0] new_sheet.cell(row=r,column=2).value = words[min_word][1] new_sheet.cell(row=r,column=3).value = words[min_word][2] if max_word!='': new_sheet.cell(row=r,column=4).value = words[max_word][0] new_sheet.cell(row=r,column=5).value = words[max_word][1] new_sheet.cell(row=r,column=6).value = words[max_word][2] if mean_word!='': new_sheet.cell(row=r,column=7).value = words[mean_word][0] new_sheet.cell(row=r,column=8).value = words[mean_word][1] new_sheet.cell(row=r,column=9).value = words[mean_word][2] cctv_new.save("train_feature_keyword_reg.xlsx") main()
The line of communication is forever open with our brand-new activity feed. When a new task falls onto our plate, it can get lost without the proper organization. Our latest dashboard will let you know what’s being done to drive tons of traffic straight to your business. Depending on the type of marketing services you get, which we hope is all of them, you’ll have the ability to assign functions to its respective manager. You’ll know – even before they do – who are performing the task. When a task is completed, you’ll see who finished it, what they did, and when they finished. You won’t have to wonder about the status of work because you’ll have an all-access pass. If you’ve never had the opportunity to benefit from marketing, our categories give you a brief description of the task. Of course, we’ll give you all the details, but having specific groups works best for your business’s needs.
# encoding: utf-8 import json import requests BASE_URL = 'https://api.pushed.co' API_VERSION = '1' PUSH = 'push' OAUTH = 'oauth' ACCESS_TOKEN = 'oauth/access_token' USER_AGENT = 'python-pushed/0.1.4' class Pushed(object): '''Pushed.co API client class. Param: app_key -> A Pushed.co application key app_secret -> The secret authorizing the application key ''' def __init__(self, app_key, app_secret): self.app_key, self.app_secret = app_key, app_secret def push_app(self, content, content_url=None): '''Push a notification to a Pushed application. Param: content -> content of Pushed notification message content_url (optional) -> enrich message with URL Returns Shipment ID as string ''' parameters = { 'app_key': self.app_key, 'app_secret': self.app_secret } return self._push(content, 'app', parameters, content_url) def push_channel(self, content, channel, content_url=None): '''Push a notification to a Pushed channel. Param: content -> content of Pushed notification message channel -> string identifying a Pushed channel content_url (optional) -> enrich message with URL Returns Shipment ID as string ''' parameters = { 'app_key': self.app_key, 'app_secret': self.app_secret, 'target_alias': channel } return self._push(content, 'channel', parameters, content_url) def push_user(self, content, access_token, content_url=None): '''Push a notification to a specific pushed user. Param: content -> content of Pushed notification message access_token -> OAuth access token content_url (optional) -> enrich message with URL Returns Shipment ID as string ''' parameters = { 'app_key': self.app_key, 'app_secret': self.app_secret, 'access_token': access_token } return self._push(content, 'user', parameters, content_url) def push_pushed_id(self, content, pushed_id, content_url=None): '''Push a notification to a specific pushed user by Pushed ID. Param: content -> content of Pushed notification message pushed_id -> user's pushed ID content_url (optional) -> enrich message with URL Returns Shipment ID as string ''' parameters = { 'app_key': self.app_key, 'app_secret': self.app_secret, 'pushed_id': pushed_id, 'target_alias': 'Nothing' # Required, but seems unused } return self._push(content, 'pushed_id', parameters, content_url) def _push(self, content, target_type, parameters={}, content_url=None): parameters.update( { 'content': content, 'target_type': target_type } ) if content_url is not None: parameters.update( { 'content_type': 'url', 'content_extra': content_url } ) push_uri = "/".join([BASE_URL, API_VERSION, PUSH]) success, response = self._request(push_uri, parameters) if success: return response['response']['data']['shipment'] else: raise PushedAPIError( response['error']['type'], response['error']['message'] ) def access_token(self, code): '''Exchange a temporary OAuth2 code for an access token. Param: code -> temporary OAuth2 code from a Pushed callback Returns access token as string ''' parameters = {"code": code} access_uri = "/".join([BASE_URL, API_VERSION, ACCESS_TOKEN]) # RFC non-compliant response prevents use of standard OAuth modules success, response = self._request(access_uri, parameters) if success: return response['response']['data']['access_token'] else: raise PushedAPIError( response['error']['type'], response['error']['message'] ) def _request(self, url, parameters): headers = { 'Content-Type': 'application/json', 'User-Agent': USER_AGENT } r = requests.post(url, data=json.dumps(parameters), headers=headers) return ( str(r.status_code).startswith('2'), r.json() ) def authorization_link(self, redirect_uri): '''Construct OAuth2 authorization link. Params: redirect_uri -> URI for receiving callback with token Returns authorization URL as string ''' args = '?client_id=%s&redirect_uri=%s' % ( self.app_key, redirect_uri ) uri = "/".join([BASE_URL, API_VERSION, OAUTH, args]) return uri class PushedAPIError(Exception): '''Raise when an API request does not return a success status'''
YouTube Creators are digital citizens who create content for the YouTube platform. From vlogs to “how-to’s” to “listicles”, anyone who makes videos for YouTube can be considered a YouTube Creator and can make money from video creation, too. YouTube Studio is the channel hub for YouTube Creators, allowing its users to easily manage settings, content, and analytics. Earlier last year, YouTube unveiled changes to the dashboard, offering creators more insight into visitors to their channel and content. In another update, YouTube also improved live streaming options, making it easier for creators to go live from any device. Last week, the largest video streaming website unveiled new policies that its creators must follow when uploading content. One noticeable change is an added section for videos relating to dangerous pranks. YouTube has previously tackled pranks in the harmful and dangerous content category of its overall policies. Following a series of disturbing Bird Box challenge videos and Tide Pods eating challenges, YouTube felt the need to take a tougher stance. YouTube has issues a warning to YouTube creators that videos cheering on dangerous challenges and pranks are in violation of its community guidelines and will result in strikes. Recurrent transgressions could lead to account termination. “We’ve updated our external guidelines to make it clear that we prohibit challenges presenting a risk of serious danger or death, and pranks that make victims believe they’re in serious physical danger, or cause children to experience severe emotional distress,” the execs at YouTube stated. Given how dangerous some of the prank acts are, one would assume that such acts would result in a channel being terminated. Unfortunately that does not always happen on YouTube unless they are repeat offenses (3 or more incidents) in a short period of time. YouTube’s policy is to hand out a strike and remove the video, but that strike(s) disappears after 90 days. While a creator may lose some privileges in those 90 days, including the ability to live stream, they could technically return to normalcy once the strike(s) are removed. New to YouTube Community Guidelines are custom thumbnail violations. YouTube will issue YouTube Creators a strike for uploading custom thumbnails that purposely violate their policies and try to con their viewers. Examples of thumbnails that could earn you a strike include ones that contain adult material and graphic violence. YouTube will also issue strikes to YouTube creators who add external links to sites that purposely violate policies. These include links to adult content, malware, or spam. YouTube is giving users a “grace period” to make changes to content if needed and/or remove videos that are in violation of community guidelines. If content that violates YouTube’s community guidelines related to custom thumbnails, external links, challenges, and pranks are removed within the next two month the channel will not be penalized. YouTube personnel are highly trained. They peruse also possible violations before issuing a warning or termination. If you feel that your video does not violate the Community Guidelines and was removed in error, you can appeal the strike(s) by following the instructions. Sign in to YouTube from your computer. Click your account icon > Creator Studio. In the left menu, click Channel > Status and features. Go to the Community Guidelines Status section. It is very rare for YouTube to reverse its decision. If your appeal is denied, you will have to wait 3 months for the strike to expire. If you earn 3 strikes within 3 months, your account will be terminated. If you have a YouTube channel, we encourage you to browse through the community guidelines and make sure that your videos are in compliance with them. When in doubt, delete the videos. yourDMAC is committed to your success in content marketing (e.g., blogging). Our online digital marketing course sets up all professionals, whether novice or experienced, for success and helps them develop winning strategies that transforms their business. Alternatively, we offer customized corporate training workshops to senior executives and their staff. We work collaboratively with all businesses, both big and small, to understand their corporate vision, goals and objectives and help develop winning strategies that will lead them to success in the least amount of time. Contact us today and see how we can put your digital marketing career on the right track. 0 responses on "YouTube Tightens Rules around Pranks and Dangerous Challenges | Social Media Marketing Training"
### Likelihood storage for bayleaf ### Author: David Schlueter ### Vanderbilt University Department of Biostatistics ### July 10, 2017 import theano.tensor as tt import numpy as np import theano.tensor as tt from pymc3.distributions import Continuous, draw_values, generate_samples, Bound, transforms ## Base class from pymc3 class PositiveContinuous(Continuous): """Base class for positive continuous distributions""" def __init__(self, transform=transforms.log, *args, **kwargs): super(PositiveContinuous, self).__init__( transform=transform, *args, **kwargs) ################################################################################ ###################### Univariate Parametric Models ############################ ################################################################################ class Exponential_Censored(PositiveContinuous): """ Exponential censored log-likelihood. .. math:: ======== ==================================================== ======== ==================================================== Parameters ---------- alpha : float For exponential model, set = 1 . """ def __init__(self, alpha, indep, *args, **kwargs): super(Exponential_Censored, self).__init__(*args, **kwargs) self.indep = indep = tt.as_tensor_variable(indep) def logp(self, value, event): indep = self.indep indep = tt.exp(indep) return event * tt.log(indep) - indep * value class Weibull_Censored(PositiveContinuous): """ Weibull censored log-likelihood. .. math:: ======== ==================================================== ======== ==================================================== Parameters ---------- alpha : float Shape parameter (alpha > 0). """ def __init__(self, alpha, indep, *args, **kwargs): super(Weibull_Censored, self).__init__(*args, **kwargs) self.alpha = alpha = tt.as_tensor_variable(alpha) self.indep = indep = tt.as_tensor_variable(indep) def logp(self, value, event): indep = self.indep alpha = self.alpha indep = tt.exp(indep) return event*(tt.log(alpha) + tt.log(indep) + (alpha-1)*tt.log(value))- (indep * value**alpha) ## CoxPH w/ weibull baseline hazard class WeibullPH(PositiveContinuous): """ Cox PH censored log-likelihood with weibull baseline hazard .. math:: ======== ==================================================== ======== ==================================================== Parameters ---------- alpha : float Shape parameter (alpha > 0). """ def __init__(self, alpha, lam, indep, *args, **kwargs): super(WeibullPH, self).__init__(*args, **kwargs) self.alpha = alpha = tt.as_tensor_variable(alpha) self.lam = lam = tt.as_tensor_variable(lam) self.indep = indep = tt.as_tensor_variable(indep) # Weibull survival likelihood, accounting for censoring def logp(self, value, event): indep = self.indep alpha = self.alpha lam = self.lam indep = tt.exp(indep) return event*(tt.log(alpha) + tt.log(lam) + tt.log(indep) + (alpha-1)*tt.log(value)) - (lam*indep * value**alpha) #class ExtremeValue_Censored(PositiveContinuous): # """ # Extreme Value censored log-likelihood. # .. math:: # ======== ==================================================== # ======== ==================================================== # Parameters ## ---------- # alpha : float # Shape parameter (alpha > 0). # """ # def __init__(self, alpha, indep, *args, **kwargs): # super(ExtremeValue_Censored, self).__init__(*args, **kwargs) # self.alpha = alpha = tt.as_tensor_variable(alpha) # self.indep = indep = tt.as_tensor_variable(indep) # Extreme Value survival likelihood, accounting for censoring # def logp(self, value, event): # indep = self.indep # alpha = self.alpha # return event*(tt.log(alpha)+(alpha*value)+indep) - tt.exp(indep+alpha*value) #### TO ADD: Gamma, Log-Normal ################################################################################ ###################### Univariate Semi-Parametric Models ############################ ################################################################################ #### To Add, Piecewise exponential ############################################################################### ###################### Multivariate Parametric Models ############################ ################################################################################ #### To Add, Gamma frailty with Weibull Baseline hazard ############################################################################### ###################### Multivariate Parametric Models ########################## ################################################################################ ############################# Copula Likelihoods ############################### class Clayton_Censored(PositiveContinuous): """ ## we will modify this to include flexible specification of the baseline hazard, for now though we will just assume a weibull form i each dimension Bivariate Clayton Censored Model .. math:: ======== ==================================================== ======== ==================================================== Parameters ---------- """ def __init__(self, alpha, indep_1, indep_2, rho_1, lam_1, rho_2, lam_2, *args, **kwargs): super(Clayton_Censored, self).__init__(*args, **kwargs) self.alpha = alpha = tt.as_tensor_variable(alpha) self.indep_1 = indep_1 = tt.as_tensor_variable(indep_1) self.lam_1 = lam_1 = tt.as_tensor_variable(lam_1) self.rho_1 = rho_1 = tt.as_tensor_variable(rho_1) self.indep_2 = indep_2 = tt.as_tensor_variable(indep_2) self.lam_2 = lam_2 = tt.as_tensor_variable(lam_2) self.rho_2 = rho_2 = tt.as_tensor_variable(rho_2) def logp(self, time_1, time_2, delta_1, delta_2): """ time_1: array time in the first dimension. time_2: array time in the second dimension. delta_1: array event indicator in the first dimension. delta_2: array event indicator in the second dimension. """ ## define local instances of the globally initiated variables alpha = self.alpha indep_1 = self.indep_1 lam_1 = self.lam_1 rho_1 = self.rho_1 indep_2 = self.indep_2 lam_2 = self.lam_2 rho_2 = self.rho_2 ### Now define survival quantities ### Baseline quantities # H(t) = lam*t^{rho} base_cum_hazard_1 = lam_1*time_1**(rho_1) base_cum_hazard_2 = lam_2*time_2**(rho_2) # h(t) = lam*rho*t^{rho-1} base_hazard_1 = lam_1*rho_1*time_1**(rho_1-1) base_hazard_2 = lam_2*rho_2*time_2**(rho_2-1) # h(t|X) = h(t)*exp(X'β) conditional_hazard_1 = base_hazard_1 * tt.exp(indep_1) conditional_hazard_2 = base_hazard_2 * tt.exp(indep_2) # H(t|X) = H(t)*exp(X'β) conditional_cum_hazard_1 = base_cum_hazard_1 * tt.exp(indep_1) conditional_cum_hazard_2 = base_cum_hazard_2 * tt.exp(indep_2) # S(t|X) = exp(-H(t|X)) surv_1 = tt.exp(-conditional_cum_hazard_1) surv_2 = tt.exp(-conditional_cum_hazard_2) ## f(t|X) = S(t|X)*h(t|X) density_1 = conditional_hazard_1 * surv_1 density_2 = conditional_hazard_2 * surv_2 ### Copula derivatives: ### Copula derivatives: log_clayton_copula = (-alpha)**(-1)*tt.log(surv_1**(-alpha)+surv_2**(-alpha)-1) log_d_clayton_copula_s1 = -(alpha+1)*tt.log(surv_1)-((alpha+1)/alpha)*tt.log(surv_1**(-alpha)+surv_2**(-alpha)-1) log_d_clayton_copula_s2 = -(alpha+1)*tt.log(surv_2)-((alpha+1)/alpha)*tt.log(surv_1**(-alpha)+surv_2**(-alpha)-1) log_d2_clayton_copula_s1_s2 = tt.log(alpha+1)+(-(alpha+1))*tt.log(surv_1*surv_2)-((2*alpha+1)/alpha)*tt.log(surv_1**(-alpha)+surv_2**(-alpha)-1) ### different parts of log likelihood first = delta_1*delta_2*(log_d2_clayton_copula_s1_s2+tt.log(density_1)+tt.log(density_2)) second = delta_1*(1-delta_2)*(log_d_clayton_copula_s1+tt.log(density_1)) third = delta_2*(1-delta_1)*(log_d_clayton_copula_s2+tt.log(density_2)) fourth = (1-delta_1)*(1-delta_2)*log_clayton_copula return first + second + third + fourth class Clayton_Censored_Trans(PositiveContinuous): """ ## we will modify this to include flexible specification of the baseline hazard, for now though we will just assume a weibull form i each dimension Bivariate Joe Censored Model .. math:: ======== ==================================================== ======== ==================================================== Parameters ---------- """ def __init__(self, alpha, indep_1, indep_2, rho_1, lam_1, rho_2, lam_2, r_1, r_2, *args, **kwargs): super(Clayton_Censored_Trans, self).__init__(*args, **kwargs) self.alpha = alpha = tt.as_tensor_variable(alpha) ## Parameters for first dimension self.indep_1 = indep_1 = tt.as_tensor_variable(indep_1) self.lam_1 = lam_1 = tt.as_tensor_variable(lam_1) self.rho_1 = rho_1 = tt.as_tensor_variable(rho_1) self.r_1 = r_1 = tt.as_tensor_variable(r_1) ## Parameters for second dimension self.indep_2 = indep_2 = tt.as_tensor_variable(indep_2) self.lam_2 = lam_2 = tt.as_tensor_variable(lam_2) self.rho_2 = rho_2 = tt.as_tensor_variable(rho_2) self.r_2 = r_2 = tt.as_tensor_variable(r_2) def logp(self, time_1, time_2, delta_1, delta_2): """ time_1: array time in the first dimension. time_2: array time in the second dimension. delta_1: array event indicator in the first dimension. delta_2: array event indicator in the second dimension. """ ## define local instances of the globally initiated variables alpha = self.alpha indep_1 = self.indep_1 lam_1 = self.lam_1 rho_1 = self.rho_1 r_1 = self.r_1 indep_2 = self.indep_2 lam_2 = self.lam_2 rho_2 = self.rho_2 r_2 = self.r_2 ### Now define survival quantities ### Baseline quantities # H(t) = lam*t^{rho} base_cum_hazard_1 = lam_1*time_1**(rho_1) base_cum_hazard_2 = lam_2*time_2**(rho_2) # h(t) = lam*rho*t^{rho-1} base_hazard_1 = lam_1*rho_1*time_1**(rho_1-1) base_hazard_2 = lam_2*rho_2*time_2**(rho_2-1) # h(t|X) = h(t)*exp(X'β) #conditional_hazard_1 = base_hazard_1 * tt.exp(indep_1) #conditional_hazard_2 = base_hazard_2 * tt.exp(indep_2) # H(t|X) = log(1+r*H(t)*exp(X'β))/r conditional_cum_hazard_1 = tt.log(1 + r_1 * base_cum_hazard_1 * tt.exp(indep_1))/r_1 conditional_cum_hazard_2 = tt.log(1 + r_2 * base_cum_hazard_2 * tt.exp(indep_2))/r_2 # S(t|X) = exp(-H(t|X)) surv_1 = tt.exp(-conditional_cum_hazard_1) surv_2 = tt.exp(-conditional_cum_hazard_2) ## f(t|X) = S(t|X)*h(t|X) density_1 = base_hazard_1*tt.exp(indep_1)*(1+r_1*base_cum_hazard_1*tt.exp(indep_1))**-(1+r_1**(-1)) density_2 = base_hazard_2*tt.exp(indep_2)*(1+r_2*base_cum_hazard_2*tt.exp(indep_2))**-(1+r_2**(-1)) ### Copula derivatives: log_clayton_copula = (-alpha)**(-1)*tt.log(surv_1**(-alpha)+surv_2**(-alpha)-1) log_d_clayton_copula_s1 = -(alpha+1)*tt.log(surv_1)-((alpha+1)/alpha)*tt.log(surv_1**(-alpha)+surv_2**(-alpha)-1) log_d_clayton_copula_s2 = -(alpha+1)*tt.log(surv_2)-((alpha+1)/alpha)*tt.log(surv_1**(-alpha)+surv_2**(-alpha)-1) log_d2_clayton_copula_s1_s2 = tt.log(alpha+1)+(-(alpha+1))*tt.log(surv_1*surv_2)-((2*alpha+1)/alpha)*tt.log(surv_1**(-alpha)+surv_2**(-alpha)-1) ### different parts of log likelihood first = delta_1*delta_2*(log_d2_clayton_copula_s1_s2+tt.log(density_1)+tt.log(density_2)) second = delta_1*(1-delta_2)*(log_d_clayton_copula_s1+tt.log(density_1)) third = delta_2*(1-delta_1)*(log_d_clayton_copula_s2+tt.log(density_2)) fourth = (1-delta_1)*(1-delta_2)*log_clayton_copula return first + second + third + fourth #class ExtremeValue_Censored(PositiveContinuous):
The December 2015 issue of TT&C is online! Head em up! Move 'em out!
import sys from typing import Optional, Tuple if sys.version_info >= (3, 8): from typing import Literal else: from typing_extensions import Literal # pragma: no cover from ._loop import loop_last from .console import Console, ConsoleOptions, RenderableType, RenderResult from .control import Control from .segment import ControlType, Segment from .style import StyleType from .text import Text VerticalOverflowMethod = Literal["crop", "ellipsis", "visible"] class LiveRender: """Creates a renderable that may be updated. Args: renderable (RenderableType): Any renderable object. style (StyleType, optional): An optional style to apply to the renderable. Defaults to "". """ def __init__( self, renderable: RenderableType, style: StyleType = "", vertical_overflow: VerticalOverflowMethod = "ellipsis", ) -> None: self.renderable = renderable self.style = style self.vertical_overflow = vertical_overflow self._shape: Optional[Tuple[int, int]] = None def set_renderable(self, renderable: RenderableType) -> None: """Set a new renderable. Args: renderable (RenderableType): Any renderable object, including str. """ self.renderable = renderable def position_cursor(self) -> Control: """Get control codes to move cursor to beginning of live render. Returns: Control: A control instance that may be printed. """ if self._shape is not None: _, height = self._shape return Control( ControlType.CARRIAGE_RETURN, (ControlType.ERASE_IN_LINE, 2), *( ( (ControlType.CURSOR_UP, 1), (ControlType.ERASE_IN_LINE, 2), ) * (height - 1) ) ) return Control() def restore_cursor(self) -> Control: """Get control codes to clear the render and restore the cursor to its previous position. Returns: Control: A Control instance that may be printed. """ if self._shape is not None: _, height = self._shape return Control( ControlType.CARRIAGE_RETURN, *((ControlType.CURSOR_UP, 1), (ControlType.ERASE_IN_LINE, 2)) * height ) return Control() def __rich_console__( self, console: Console, options: ConsoleOptions ) -> RenderResult: renderable = self.renderable _Segment = Segment style = console.get_style(self.style) lines = console.render_lines(renderable, options, style=style, pad=False) shape = _Segment.get_shape(lines) _, height = shape if height > options.size.height: if self.vertical_overflow == "crop": lines = lines[: options.size.height] shape = _Segment.get_shape(lines) elif self.vertical_overflow == "ellipsis": lines = lines[: (options.size.height - 1)] overflow_text = Text( "...", overflow="crop", justify="center", end="", style="live.ellipsis", ) lines.append(list(console.render(overflow_text))) shape = _Segment.get_shape(lines) self._shape = shape for last, line in loop_last(lines): yield from line if not last: yield _Segment.line()
Certifying practitioners in the safety profession. Protecting what matters most. BCSP is the leader in high quality credentialing for safety, health, and environmental practitioners. Any organization such as a union, construction firm, project owner or government agency, can join the STS and STSC Sponsorship Program.
# -*- coding: utf-8 -*- # Generated by Django 1.9.1 on 2016-01-23 19:51 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('public', '0003_auto_20150518_1613'), ] operations = [ migrations.CreateModel( name='Message', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('content', models.TextField(max_length=5000, verbose_name=b'\xe5\x86\x85\xe5\xae\xb9')), ('unread', models.BooleanField(default=True, verbose_name=b'\xe6\x9c\xaa\xe8\xaf\xbb')), ('is_spam', models.BooleanField(default=False, verbose_name=b'\xe5\x9e\x83\xe5\x9c\xbe')), ('trashed', models.BooleanField(default=False, verbose_name=b'\xe5\x9b\x9e\xe6\x94\xb6\xe7\xab\x99')), ('created_at', models.DateTimeField(auto_now_add=True)), ], ), migrations.AlterField( model_name='user', name='groups', field=models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups'), ), migrations.AlterField( model_name='user', name='last_login', field=models.DateTimeField(blank=True, null=True, verbose_name='last login'), ), migrations.AddField( model_name='message', name='come', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='message', name='reply', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='public.Message'), ), migrations.AddField( model_name='message', name='to', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL), ), ]
Only the members of the group, followers of the fanpage or friends of an account see the posts(news, announcements, photos) that are there. Merely because Facebook has 600 million users does not mean that all can see everything. Facebook entries do not show up in the main search engines (Google, Bing, Yahoo, Goodsearch) either. So very few really have access to what the posts are doing on Facebook. One the other hand the VFW WebCom Networks free websites that are provided to each and every Dept,district or post across the VFW are accessible to every internet user around the world. There are lots of veterans, troopers and others that use 'Search engines" and if your post is using their VFW Webcom website they will find you. I have fanpages set up for my dept, district and post and when I post a new story to one of my VFW WebCom websites I take a link to that story and post the link to the facebook fanpages bringing the followers to the VFW Webcom website. Anyone with other thoughts on this? and yes I try to post need to get in a habit of doing it!!!!! But we are still hiting some stumbling blocks here in FLA. This group stuff has got me confused can't tell who ownes what sad!
#!/usr/bin/env python # # Copyright 2011 Splunk, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"): you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # UNDONE: Need to locate installed Python on Windows """Common 'wrapper' script used to invoke an 'external' Python scripts. This module is intended to be invoked using Splunk's internal Python stack and uses the subprocess module to execute another Python script using the platform's installed Python stack.""" from os import path from subprocess import Popen, STDOUT import sys def extern(fname): """Invoke the given 'external' python script.""" run([fname] + sys.argv[1:]) def run(argv): process = Popen(["/usr/bin/python"] + argv, env={}, stderr=STDOUT) process.communicate() process.wait() if __name__ == "__main__": run(sys.argv[1:])
Tenk deg moderne design som ikke tar noe fra komforten, storslått interir og en service som forutser, men aldri antar. Enkelte hotell får du kun booket til vanlig pris, så hvis en aktiv m rabattkode ikke.. BoardShop rabattkoder i September 2018 : Legg til e-posten din og få tilsendt BoardShop rabattkoder helt gratis! Translation all your personal information is completely safe. And we&apos;ll do it without the elitist attitude so common in.. for an answer to this question now. Our team is checking Strength Shop USA&apos;s customer service pages and FAQs and other online sources for an answer. Plus, add 3 or more items to your cart to qualify for free shipping. Ground shipping varies by location or is free with orders of 5,000 or more. This question has 0 votes by Our team of shopping analysts is out looking for the answer to this question now. This promotion was activated 3 weeks ago and it expires on Friday, September 14, 2018. Rogue Fitness discount code. Added 8 months ago Ongoing deal Coupon details More FlagHouse promo codes Strength Shop USA is offering a 10 off promo code on their website. This promotion was activated 2 months ago and it expires on Sunday, September 16, 2018. For a limited time, you can 5 off your next buy at Fitness Direct. We also have dedicated websites for Canada, EU, and Australia too. 60Off sale, up to 60 Off Hot Deals. Once you apply the promo code, your savings should be applied, and you can complete checkout. Without a factory, workers wind up unemployed or in service industry jobs for minimum wage, with less money to spend, they are more likely to purchase cheap, imported goods from large retail box stores. Have you shopped at Rogue Fitness?
# # Copyright (c) 2013 Pavol Rusnak # # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in # the Software without restriction, including without limitation the rights to # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies # of the Software, and to permit persons to whom the Software is furnished to do # so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # import binascii import hashlib import hmac import os import sys import unicodedata from pbkdf2 import PBKDF2 PBKDF2_ROUNDS = 2048 class Mnemonic(object): def __init__(self, language): self.radix = 2048 with open('%s/%s.txt' % (self._get_directory(), language), 'r') as f: self.wordlist = [w.strip() for w in f.readlines()] if len(self.wordlist) != self.radix: raise Exception('Wordlist should contain %d words, but it contains %d words.' % (self.radix, len(self.wordlist))) @classmethod def _get_directory(cls): return os.path.join(os.path.dirname(__file__), 'wordlist') @classmethod def list_languages(cls): return [ f.split('.')[0] for f in os.listdir(cls._get_directory()) if f.endswith('.txt') ] @classmethod def normalize_string(cls, txt): if isinstance(txt, str if sys.version < '3' else bytes): utxt = txt.decode('utf8') elif isinstance(txt, unicode if sys.version < '3' else str): utxt = txt else: raise Exception("String value expected") return unicodedata.normalize('NFKD', utxt) @classmethod def detect_language(cls, code): first = code.split(' ')[0] languages = cls.list_languages() for lang in languages: mnemo = cls(lang) if first in mnemo.wordlist: return lang raise Exception("Language not detected") def generate(self, strength = 128): if strength % 32 > 0: raise Exception('Strength should be divisible by 32, but it is not (%d).' % strength) return self.to_mnemonic(os.urandom(strength // 8)) def to_mnemonic(self, data): if len(data) % 4 > 0: raise Exception('Data length in bits should be divisible by 32, but it is not (%d bytes = %d bits).' % (len(data), len(data) * 8)) h = hashlib.sha256(data).hexdigest() b = bin(int(binascii.hexlify(data), 16))[2:].zfill(len(data) * 8) + \ bin(int(h, 16))[2:].zfill(256)[:len(data) * 8 // 32] result = [] for i in range(len(b) // 11): idx = int(b[i * 11:(i + 1) * 11], 2) result.append(self.wordlist[idx]) if self.detect_language(' '.join(result)) == 'japanese': # Japanese must be joined by ideographic space. result_phrase = '\xe3\x80\x80'.join(result) else: result_phrase = ' '.join(result) return result_phrase def check(self, mnemonic): if self.detect_language(mnemonic.replace('\xe3\x80\x80', ' ')) == 'japanese': mnemonic = mnemonic.replace('\xe3\x80\x80', ' ') # Japanese will likely input with ideographic space. mnemonic = mnemonic.split(' ') if len(mnemonic) % 3 > 0: return False try: idx = map(lambda x: bin(self.wordlist.index(x))[2:].zfill(11), mnemonic) b = ''.join(idx) except: return False l = len(b) d = b[:l // 33 * 32] h = b[-l // 33:] nd = binascii.unhexlify(hex(int(d, 2))[2:].rstrip('L').zfill(l // 33 * 8)) nh = bin(int(hashlib.sha256(nd).hexdigest(), 16))[2:].zfill(256)[:l // 33] return h == nh @classmethod def to_seed(cls, mnemonic, passphrase = ''): mnemonic = cls.normalize_string(mnemonic) passphrase = cls.normalize_string(passphrase) return PBKDF2(mnemonic, u'mnemonic' + passphrase, iterations=PBKDF2_ROUNDS, macmodule=hmac, digestmodule=hashlib.sha512).read(64)
ChemIDplus - 14116-04-2 - XNNKPNJEMYLRGN-UHFFFAOYSA-N - 1,3-Propanediol, 2-methyl-2-(((5,6,7,8-tetrahydro-2-naphthyl)oxy)methyl)- - Similar structures search, synonyms, formulas, resource links, and other chemical information. mouse LD50 intraperitoneal 865mg/kg (865mg/kg) Annales Pharmaceutiques Francaises. Vol. 24, Pg. 429, 1966.
import cv2 from Perceptron1 import predict import numpy as np import subsampling import Blur def generarImagenesVideo(video,tamx,tamy): cap = cv2.VideoCapture(video) i = 1 while True: ret, frame = cap.read() i += 1 #Probar cambiando el tiempo if i%5 ==0: gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) smallImage = Blur.aplicarDiferenciasGaussianas(gray, 64, 64, 9, 9, 4, 9, 9, 8) #==== matriz a vector ================================================================ vector = np.resize(smallImage,(1,smallImage.size)) vectorImage = np.concatenate(([[1]], vector), axis=1) pred = predict(vectorImage) lista = pred.tolist() peak = pred.max() neuron = lista.index(peak) + 1 #neuron activado y buscado en la lista rojo = 0 azul = 0 verde = 0 if peak > 0.7: if neuron == 1: titulo = "FUEGO" rojo = 255 else: if neuron == 2: titulo = "ALERTA" verde = 255 else: titulo = "" azul = 255 else: titulo = " " font = cv2.FONT_HERSHEY_SIMPLEX cv2.putText(frame, titulo ,(19,90), font, 2, (azul,verde,rojo),5) #cv2.imshow("nitidaGris", gray) cv2.imshow("original", frame) k = cv2.waitKey(90) if k == 90: break cv2.destroyAllWindows() def pintarFuego(frame, font): cv2.putText(frame, "FUEGO", (19, 90), font, 2, (0, 0, 255), 5) def pintarAlerta(frame, font): cv2.putText(frame, "ALERTA", (19, 90), font, 2, (0, 255, 0), 5) def pintarAnomalia(frame, font): cv2.putText(frame, "", (19, 90), font, 2, (255, 0, 0), 5) dir = 'fuego.mp4' generarImagenesVideo(dir,64,64)
I'm impressed with the way garbage is dealt with here. All recyclable packaging and containers are collected free of charge every other week and there's a weekly collection of 'real' garbage, which costs $2 per bag. You buy little blue labels in advance and stick them on the bags when you put them out. As the recycling is so comprehensive, we find we're only generating one bag of rubbish a fortnight - so we spend $52 a year (£26) for this service, which seems reasonable. I've been composting all the vegetable waste we produce in two plastic compost bins that were here when we arrived, but it seems this isn't that common a practice here. Recently the County advertised in the local paper, asking people to apply for a rotating composter as part of a County-wide trial. I've always fancied one of those, so I applied and was thrilled to find out the other week that I had been selected to take part, along with 19 other households around the County (75 applications had been received in all). Last night there was a meeting about the trial and I got to collect my new toy. There were some interesting talks, including one from Doug Parker, who farms on an organically-certified farm in South Marysburgh. It was great to listen to someone who is so passionate about compost (which, by the way, is mainly pronounced to rhyme with 'post' around here). He had even brought some of his compost along for everyone to examine. Then two representatives of the manufacturers described the features of the composter. It's supposed to be pest-proof (which is good, as we had a rat living in our compost bin in Sale for a while), although they had heard of one installation in Vermont which was regularly played with by a bear. This morning Mike and I set up the composter and I placed it next to the back door (this involved shovelling out a big snow drift). It's made by Sun-Mar, who started out making composting toilets and this is built on the same principles. The rotating drum speeds up the composting process, so that compost can be extracted from the centre in as little as two to four weeks. Although it might have to get a bit warmer outside before that's achievable, I suspect. The retail price of one of these is $250. I've paid $50 to take part in the trial, but I think $30 of that is refundable at the end of it, so I'll get my composter for $20. Bargain!
import sys import time import string CURSES_ON = True try: import urwid except ImportError: CURSES_ON = False import srddl.core.frontend_loader as scf import srddl.core.helpers as sch import srddl.data as sd LOGS = open('/tmp/curses_logs.txt', 'w') def _write(*args): LOGS.write(' '.join(str(s) for s in args) + '\n') LOGS.flush() class Curses(scf.Frontend): class Meta: name = 'curses' help = 'awesome curses ui!' enabled = CURSES_ON and sys.stdin.isatty() def process(self, args): cw = CursesMainWindow() cw.main() class KeyBinding(sch.NamedRecord): class Meta: fields = ['keys', 'function'] _KEYS = [('Ctrl', 'ctrl'), ('Alt', 'meta'), ('+', ' ')] def _verbose_to_urwid_keys(keys): def sub(key): for verb, uverb in _KEYS: key = key.replace(verb, uverb) return key return [sub(key) for key in keys] def _urwid_to_verbose(key): for verb, uverb in _KEYS: key = key.replace(uverb, verb) return key if CURSES_ON: class StatusBar(urwid.AttrMap): def __init__(self, mw): self.wtext = urwid.Text('') super().__init__(self.wtext, 'footer') self.msgs, self.mw = [], mw self.add_text('Welcome!') def add_text(self, txt, timeout=0): if 0 < timeout: self.msgs.append((txt, time.time() + timeout)) self.mw.loop.set_alarm_in(timeout, self._reload_text) else: self.msgs.append((txt, 0)) self._set_text(txt) def _reload_text(self, obj, user_data): count, t0 = 0, time.time() for it in range(len(self.msgs)): idx = it - count if self.msgs[idx][1] and self.msgs[idx][1] < t0: del self.msgs[idx] count += 1 if count: self._set_text(self.msgs[-1][0]) def _set_text(self, markup): if isinstance(markup, str): markup = [markup] self.wtext.set_text([' '] + markup) class StatusBarAsker(urwid.Edit, metaclass=urwid.signals.MetaSignals): signals = ['ask_done'] def __init__(self, *args, **kwargs): self.validator = kwargs.pop('validator', None) super().__init__(*args, **kwargs) def keypress(self, size, key): if key == 'enter': urwid.emit_signal(self, 'ask_done', self.get_edit_text()) elif key == 'esc': urwid.emit_signal(self, 'ask_done', None) elif len(key) != 1 or self.validator is None or self.validator(key): super().keypress(size, key) class HexView(urwid.ListWalker): def __init__(self, data): self.focus = (0, 0) self.view = sd.DataView(data) def __getitem__(self, position): line, _ = position _write('position =', position) if 0 <= line and line < self.view.max_lines(): addr, data = list(self.view(line, 1).items())[0] # Widgets for columns widgets = [('pack', urwid.Text([('addr', addr)]))] data = [[('pack', urwid.Text(b)) for b in d] for d in data['data']] widgets.extend([urwid.Columns(d, dividechars=1) for d in data]) return urwid.Columns(widgets, dividechars=2, min_width=len(addr)) raise IndexError def next_position(self, position): if position[0] < self.view.max_lines(): return (position[0] + 1, position[1]) raise IndexError def prev_position(self, position): if position[0] != 0: return (position[0] - 1, position[1]) raise IndexError class CursesMainWindow: def __init__(self): # Non-UI data. self.data = sd.FileData('/bin/ls') # Palette of colors. self.palette = [ ('footer', 'black', 'light gray'), ('addr', 'white', 'black'), ] self.loop = None # Build main view. ## Body self.body = urwid.ListBox(HexView(self.data)) ## Footer self.status_bar = StatusBar(self) ## Main view self.view = urwid.Frame(self.body, footer=self.status_bar) # Main loop self.loop = urwid.MainLoop(self.view, palette=self.palette, unhandled_input=self.unhandled_input) def unhandled_input(self, key): def exit_program(key): '''quit the program''' raise urwid.ExitMainLoop() def goto_offset(key): def validator(key): return key in string.hexdigits def done(offset): _write('offset select =', int(offset, 16)) self.ask('Go to offset 0x', done, validator=validator) KEYBINDINGS = [ ('General features:', 1, [ KeyBinding(['q', 'Q'], exit_program), ]), ('Move arround:', 1, [ KeyBinding(['g'], goto_offset), ]), ] for _, _, bindings in KEYBINDINGS: for binding in bindings: if key in _verbose_to_urwid_keys(binding.keys): binding.function(key) return True txt = 'Unknwon key binding \'{}\', try \'h\' to see them all.' self.status_bar.add_text(txt.format(_urwid_to_verbose(key)), timeout=2) def ask(self, prompt, callback, validator=None): edit = StatusBarAsker(' ' + prompt, validator=validator) def ask_done(content): urwid.disconnect_signal(self, edit, 'ask_done', ask_done) self.view.set_focus('body') self.view.set_footer(self.status_bar) if content is not None: callback(content) self.view.set_footer(urwid.AttrMap(edit, 'footer')) self.view.set_focus('footer') urwid.connect_signal(edit, 'ask_done', ask_done) def main(self): self.loop.run()
Many people shop for the lowest priced modular home they can find and they think that it is like shopping for a new car and just compare “feature-to-feature” for pricing. It is paramount to research the “brand” for good quality and that the modular manufacturer has a good reputation in the modular home industry. Unfortunately when people do shop around and when they do find the lowest priced modular home it is usually of inferior quality. In most cases when people choose a modular home solely by the cheapest price they can find, it can be the most expensive and problematic decision they can make in the long run! The “you get what you pay for rule” usually applies here. Also the builder will often use poor quality contractors and brands or even worse there may be no builder involved at all and it is up to the customer to do everything themselves! Pro’s Edge builds turnkey modular homes in counties in Eastern Massachusetts and Southern New Hampshire. Scope of Work: Make sure each proposal lists all tasks required to complete your home. If one proposal has a substantially lower price, it probably does not include all the tasks. If you sign a contract that doesn’t include every task, the builder will come back to you for more money after he begins construction of your home. Building Specifications: Look closely at how the estimate proposes to complete each task. A builder can offer a much lower price by selecting a less expensive set of building specifications or by not listing any specifications at all for some tasks. Exclusions: Ask each builder to document in writing which tasks are not included in his proposal. The most complete estimates include these “exclusions” so you aren’t left guessing what you could be responsible for. Allowances: We offer a “Full Contract” price that we will stand behind. We do not work from “Allowances” which other builders may use that could be much lower than actual costs. When comparison shopping with builders proposals always use an “apples-to-apples” comparison. Since there are hundreds of details involved in building a home it can be very difficult to compare the levels of detail. It is paramount that you choose a good reputable builder because you do not want to make a decision that you may later regret.
import re import sys import requests from bs4 import BeautifulSoup URL = 'https://wiki.eveonline.com/en/wiki' STEELPLATEMASS = 2812500 MWDMASS = 50000000 def fetch_stat(statname, shipname): r = requests.get(URL + '/' + shipname) soup = BeautifulSoup(r.text) paragraphs = soup.find_all('p') p = '{0}\s+([\d\s]+)(.*)$'.format(statname) for paragraph in paragraphs: if statname in paragraph: return paragraph def parse_mass(massstr): massstr = massstr.encode('utf-8') p = '([\d,]+)' mass = re.search(p, massstr).groups()[0].replace(',','') return mass def main(): ship = sys.argv[1] m = fetch_stat('Mass', ship) shipmass = int(parse_mass(m)) print '---' + ship + '---' h = fetch_stat('Low Slots', ship) lowslots = int(parse_mass(h)) print '\tShip Mass: {0}'.format(shipmass) print '\tLow Slots: {0}'.format(lowslots) platemass = STEELPLATEMASS * lowslots print '\tPlate Mass: {0}'.format(platemass) total = shipmass + platemass + MWDMASS print '\tTotal Mass(ship+plate+MWD): {0}'.format(total) print '\tMass With Higgs Anchor: {0}'.format(total*2) if __name__ == '__main__': main()
These high performance shadows are perfectly paired in striking combinations to highlight, contour and create an alluring eye. These high performance shadows are perfectly paired in striking combinations to highlight, contour and create an alluring eye. Ideal for creating a subtle two-tone look, or adding depth and drama to an individual eyeshadow, allow your eyes to pop with a splash of colour, from daring purple tones, to simmering bronze. Using the Crease Smudge Brush, define the crease with the darker of the two shades, and apply the lighter shade to the entire lid. Use dry for a soft look, or wet for a dramatic effect. Lauroyl Lysine: An amino acid derived from coconut oil, known for its skin conditioning properties. Caprylyl Glycol: This humectant has moisturising characteristics, optimising performance and application.
# coding=utf-8 """Author: Krzysztof Trzepla Copyright (C) 2015 ACK CYFRONET AGH This software is released under the MIT license cited in 'LICENSE.txt' Brings up a S3 storage. """ from boto.s3.connection import S3Connection, OrdinaryCallingFormat from . import common, docker def _node_up(image, buckets, name, uid): hostname = common.format_hostname([name, 's3'], uid) container = docker.run( image=image, hostname=hostname, name=hostname, detach=True) settings = docker.inspect(container) ip = settings['NetworkSettings']['IPAddress'] port = 4569 host_name = '{0}:{1}'.format(ip, port) access_key = 'AccessKey' secret_key = 'SecretKey' for bucket in buckets: connection = S3Connection(access_key, secret_key, host=ip, port=port, is_secure=False, calling_format=OrdinaryCallingFormat()) connection.create_bucket(bucket) return { 'docker_ids': [container], 'host_name': host_name, 'access_key': access_key, 'secret_key': secret_key, } def up(image, buckets, name, uid): return _node_up(image, buckets, name, uid)
Are you looking to send a holiday gift hamper to Bulgaria? We are happy to help! How long does it take to deliver a hamper to Bulgaria? Is weekend delivery available in Bulgaria? We can deliver your gift in Bulgaria on sunday with an advance notice. Gift delivery in Bulgaria is available on all other days of the week. How much does it cost to deliver a hamper to Bulgaria? While actual cost of delivery varies by item weight and delivery location - we made it easy for our customers to shop for gifts to Bulgaria. With AceOfHeartsGiftBaskets.com the delivery fee is always included in price of all of our gifts in our Bulgaria catalog, so you can expect no surprises.
""" Firebird database backend for Django. Requires KInterbasDB 3.3+: http://www.firebirdsql.org/index.php?op=devel&sub=python """ import re import sys import base64 try: import kinterbasdb as Database except ImportError, e: from django.core.exceptions import ImproperlyConfigured raise ImproperlyConfigured("Unable to load KInterbasDB module: %s" % e) from django.db import utils from django.db.backends import * from django.db.backends.signals import connection_created from firebird.creation import DatabaseCreation from firebird.introspection import DatabaseIntrospection from firebird.client import DatabaseClient from django.conf import settings import django.utils.encoding as utils_encoding import kinterbasdb.typeconv_datetime_stdlib as typeconv_datetime import kinterbasdb.typeconv_fixed_decimal as typeconv_fixeddecimal import kinterbasdb.typeconv_text_unicode as typeconv_textunicode DatabaseError = Database.DatabaseError IntegrityError = Database.IntegrityError OperationalError = Database.OperationalError class CursorWrapper(object): """ A thin wrapper around kinterbasdb cursor class so that we can catch particular exception instances and reraise them with the right types. Django uses "format" style placeholders, but firebird uses "qmark" style. This fixes it -- but note that if you want to use a literal "%s" in a query, you'll need to use "%%s". We need to do some data translation too. See: http://kinterbasdb.sourceforge.net/dist_docs/usage.html for Dynamic Type Translation """ def __init__(self, cursor): self.cursor = cursor def __getattr__(self, attr): if attr in self.__dict__: return self.__dict__[attr] else: return getattr(self.cursor, attr) def __iter__(self): return iter(self.cursor) def execute(self, query, args=None): # This is a workaround for KInterbasDB locks if query.find('DROP') != -1: # self.cursor.close() # someday will recreate cursor here pass try: #print query, args if not args: args = () return self.cursor.execute(query) else: query = self.convert_query(query, len(args)) return self.cursor.execute(query, args) except Database.IntegrityError, e: raise utils.IntegrityError, utils.IntegrityError(*tuple(e)+('sql: '+query,)+args), sys.exc_info()[2] except Database.DatabaseError, e: raise utils.DatabaseError, utils.DatabaseError(*tuple(e)+('sql: '+query,)+args), sys.exc_info()[2] def executemany(self, query, args): try: #print query, args if not args: args = () return self.cursor.executemany(query) else: query = self.convert_query(query, len(args[0])) return self.cursor.executemany(query, args) except Database.IntegrityError, e: raise utils.IntegrityError, utils.IntegrityError(*tuple(e)+('sql: '+query,)+args), sys.exc_info()[2] except Database.DatabaseError, e: raise utils.DatabaseError, utils.DatabaseError(*tuple(e)+('sql: '+query,)+args), sys.exc_info()[2] def convert_query(self, query, num_params): return query % tuple("?" * num_params) def fetchone(self): return self.cursor.fetchone() def fetchmany(self, size=None): return self.cursor.fetchmany(size) def fetchall(self): return self.cursor.fetchall() class DatabaseFeatures(BaseDatabaseFeatures): """ This class defines bd-specific features. - can_return_id_from_insert return insert id right in SELECT statements as described at http://firebirdfaq.org/faq243/ for Firebird 2+ """ can_return_id_from_insert = False class DatabaseOperations(BaseDatabaseOperations): """ This class encapsulates all backend-specific differences, such as the way a backend performs ordering or calculates the ID of a recently-inserted row. """ compiler_module = 'firebird.compiler' def __init__(self, connection, dialect=3): super(DatabaseOperations, self).__init__(connection) self.dialect = dialect self._cache = None self._engine_version = None self.FB_CHARSET_CODE = 3 #UNICODE_FSS def autoinc_sql(self, table, column): # To simulate auto-incrementing primary keys in Firebird, we have to create a generator and a trigger. gn_name = self.quote_name(self.get_generator_name(table)) tr_name = self.quote_name(self.get_trigger_name(table)) tbl_name = self.quote_name(table) col_name = self.quote_name(column) generator_sql = """CREATE GENERATOR %(gn_name)s""" % locals() trigger_sql = """ CREATE TRIGGER %(tr_name)s FOR %(tbl_name)s BEFORE INSERT AS BEGIN IF (NEW.%(col_name)s IS NULL) THEN NEW.%(col_name)s = GEN_ID(%(gn_name)s, 1); END""" % locals() return generator_sql, trigger_sql def date_extract_sql(self, lookup_type, field_name): # Firebird uses WEEKDAY keyword. lkp_type = lookup_type if lkp_type == 'week_day': lkp_type = 'weekday' return "EXTRACT(%s FROM %s)" % (lkp_type.upper(), field_name) def date_trunc_sql(self, lookup_type, field_name): if lookup_type == 'year': sql = "EXTRACT(year FROM %s)||'-01-01 00:00:00'" % field_name elif lookup_type == 'month': sql = "EXTRACT(year FROM %s)||'-'||EXTRACT(month FROM %s)||'-01 00:00:00'" % (field_name, field_name) elif lookup_type == 'day': sql = "EXTRACT(year FROM %s)||'-'||EXTRACT(month FROM %s)||'-'||EXTRACT(day FROM %s)||' 00:00:00'" % (field_name, field_name, field_name) return "CAST(%s AS TIMESTAMP)" % sql def lookup_cast(self, lookup_type): if lookup_type in ('iexact', 'istartswith', 'iendswith'): return "UPPER(%s)" return "%s" def fulltext_search_sql(self, field_name): # We use varchar for TextFields so this is possible # Look at http://www.volny.cz/iprenosil/interbase/ip_ib_strings.htm return '%%s CONTAINING %s' % self.quote_name(field_name) def return_insert_id(self): return 'RETURNING %s', () def last_insert_id(self, cursor, table_name, pk_name): # Method used for Firebird prior 2. Method is unreliable, but nothing else could be done cursor.execute('SELECT GEN_ID(%s, 0) FROM rdb$database' % (self.get_generator_name(table_name),)) return cursor.fetchone()[0] def max_name_length(self): return 31 def convert_values(self, value, field): return super(DatabaseOperations, self).convert_values(value, field) def query_class(self, DefaultQueryClass): return query.query_class(DefaultQueryClass) def quote_name(self, name): # Dialect differences as described in http://mc-computing.com/databases/Firebird/SQL_Dialect.html if self.dialect==1: name = name.upper() else: if not name.startswith('"') and not name.endswith('"'): name = '"%s"' % util.truncate_name(name, self.max_name_length()) # Handle RDB$DB_KEY calls if name.find('RDB$DB_KEY') > -1: name = name.strip('"') return name def get_generator_name(self, table_name): return '%s_GN' % util.truncate_name(table_name, self.max_name_length() - 3).upper() def get_trigger_name(self, table_name): return '%s_TR' % util.truncate_name(table_name, self.max_name_length() - 3).upper() def year_lookup_bounds(self, value): first = '%s-01-01' second = self.conv_in_date('%s-12-31 23:59:59.999999' % value) return [first % value, second] def conv_in_ascii(self, text): if text is not None: # Handle binary data from RDB$DB_KEY calls if text.startswith('base64'): return base64.b64decode(text.lstrip('base64')) return utils_encoding.smart_str(text, 'ascii') def conv_in_blob(self, text): return typeconv_textunicode.unicode_conv_in((utils_encoding.smart_unicode(text), self.FB_CHARSET_CODE)) def conv_in_fixed(self, (val, scale)): if val is not None: if isinstance(val, basestring): val = decimal.Decimal(val) # fixed_conv_in_precise produces weird numbers # return typeconv_fixeddecimal.fixed_conv_in_precise((val, scale)) return int(val.to_integral()) def conv_in_timestamp(self, timestamp): if isinstance(timestamp, basestring): # Replaces 6 digits microseconds to 4 digits allowed in Firebird timestamp = timestamp[:24] return typeconv_datetime.timestamp_conv_in(timestamp) def conv_in_time(self, value): import datetime if isinstance(value, datetime.datetime): value = datetime.time(value.hour, value.minute, value.second, value.microsecond) return typeconv_datetime.time_conv_in(value) def conv_in_date(self, value): if isinstance(value, basestring): if self.dialect==1: # Replaces 6 digits microseconds to 4 digits allowed in Firebird dialect 1 value = value[:24] else: # Time portion is not stored in dialect 3 value = value[:10] return typeconv_datetime.date_conv_in(value) def conv_in_unicode(self, text): if text[0] is not None: return typeconv_textunicode.unicode_conv_in((utils_encoding.smart_unicode(text[0]), self.FB_CHARSET_CODE)) def conv_out_ascii(self, text): if text is not None: # Handle binary data from RDB$DB_KEY calls if "\0" in text: return 'base64'+base64.b64encode(text) return utils_encoding.smart_unicode(text, strings_only=True) def conv_out_blob(self, text): return typeconv_textunicode.unicode_conv_out((text, self.FB_CHARSET_CODE)) class DatabaseWrapper(BaseDatabaseWrapper): """ Represents a database connection. """ operators = { 'exact': '= %s', 'iexact': '= UPPER(%s)', 'contains': "LIKE %s ESCAPE'\\'", 'icontains': 'CONTAINING %s', #case is ignored 'gt': '> %s', 'gte': '>= %s', 'lt': '< %s', 'lte': '<= %s', 'startswith': 'STARTING WITH %s', #looks to be faster then LIKE 'endswith': "LIKE %s ESCAPE'\\'", 'istartswith': 'STARTING WITH UPPER(%s)', 'iendswith': "LIKE UPPER(%s) ESCAPE'\\'", } def __init__(self, *args, **kwargs): super(DatabaseWrapper, self).__init__(*args, **kwargs) settings_dict = self.settings_dict self.settings = { 'charset': 'UNICODE_FSS', 'dialect': 3, } if settings_dict['HOST']: self.settings['host'] = settings_dict['HOST'] if settings_dict['NAME']: self.settings['database'] = settings_dict['NAME'] if settings_dict['USER']: self.settings['user'] = settings_dict['USER'] if settings_dict['PASSWORD']: self.settings['password'] = settings_dict['PASSWORD'] self.settings.update(settings_dict['OPTIONS']) self.dialect = self.settings['dialect'] if 'init_params' in self.settings: Database.init(**self.settings['init_params']) self.server_version = None self.features = DatabaseFeatures(self) self.ops = DatabaseOperations(self, dialect=self.dialect) self.client = DatabaseClient(self) self.creation = DatabaseCreation(self) self.introspection = DatabaseIntrospection(self) self.validation = BaseDatabaseValidation(self) def _cursor(self): new_connection = False if self.connection is None: new_connection = True self.connection = Database.connect(**self.settings) connection_created.send(sender=self.__class__) cursor = self.connection.cursor() if new_connection: if self.connection.charset == 'UTF8': self.ops.FB_CHARSET_CODE = 4 # UTF-8 with Firebird 2.0+ self.connection.set_type_trans_in({ 'DATE': self.ops.conv_in_date, 'TIME': self.ops.conv_in_time, 'TIMESTAMP': self.ops.conv_in_timestamp, 'FIXED': self.ops.conv_in_fixed, 'TEXT': self.ops.conv_in_ascii, 'TEXT_UNICODE': self.ops.conv_in_unicode, 'BLOB': self.ops.conv_in_blob }) self.connection.set_type_trans_out({ 'DATE': typeconv_datetime.date_conv_out, 'TIME': typeconv_datetime.time_conv_out, 'TIMESTAMP': typeconv_datetime.timestamp_conv_out, 'FIXED': typeconv_fixeddecimal.fixed_conv_out_precise, 'TEXT': self.ops.conv_out_ascii, 'TEXT_UNICODE': typeconv_textunicode.unicode_conv_out, 'BLOB': self.ops.conv_out_blob }) version = re.search(r'\s(\d{1,2})\.(\d{1,2})', self.connection.server_version) self.server_version = tuple([int(x) for x in version.groups()]) # feature for Firebird version 2 and above if self.server_version[0] >=2: self.features.can_return_id_from_insert = True return CursorWrapper(cursor) def get_server_version(self): return self.server_version
The System.Security.Authentication.ExtendedProtection.Configuration namespace provides support for configuration of authentication using extended protection for applications. The System.Security.Authentication.ExtendedProtection.Configuration.ExtendedProtectionPolicyElement class represents a configuration element for an System.Security.Authentication.ExtendedProtection.ExtendedProtectionPolicy. The System.Security.Authentication.ExtendedProtection.Configuration.ServiceNameElement class represents a configuration element for a service name used in a System.Security.Authentication.ExtendedProtection.Configuration.ServiceNameElementCollection. The System.Security.Authentication.ExtendedProtection.ServiceNameCollection class is a collection of service principal names that represent a configuration element for an System.Security.Authentication.ExtendedProtection.ExtendedProtectionPolicy.
import pyglet from pyglet.window import key import cocos from cocos import actions, layer, sprite, scene from cocos.director import director import cocos.euclid as eu import cocos.collision_model as cm import math import paho.mqtt.client as mqtt import json MAP_SIZE = (600, 600) VELOCITY_MAX = 400 VELOCITY_INERTIA = 3 # smaller means more inertia VELOCITY_BRAKE_VS_SPEED = 3 VELOCITY_IMPACT_ON_TURNING = 0.0025 TURNING_SPEED = 3 VELOCITY_DECLINE = 0.995 # not touching controls means the velocity will go to zero class CollidableSprite(cocos.sprite.Sprite): def __init__(self, image, cx, cy, radius): super(CollidableSprite, self).__init__(image) self.position = (cx, cy) self.cshape = cm.CircleShape(eu.Vector2(cx, cy), 25) def update_in_collision_manager(self): collision_manager.remove_tricky(self) self.cshape = cm.CircleShape(eu.Vector2(self.position[0], self.position[1]), 25) collision_manager.add(self) def maybe_impact(self): if collision_manager.any_near(self, 1): self.velocity = (- self.velocity[0], - self.velocity[1]) #self.velocity = (0, 0) # check if out of map self.position = (max(0, min(self.position[0], MAP_SIZE[0])), \ max(0, min(self.position[1], MAP_SIZE[1]))) # How to handle collisions #mapcollider = mapcolliders.RectMapCollider("bounce") # Car Actions class class Car(actions.Move): def step(self, dt): super(Car, self).step(dt) rl = keyboard[key.RIGHT] - keyboard[key.LEFT] speed_or_brake = keyboard[key.UP] - keyboard[key.DOWN] radians = self.target.rotation * math.pi / 180 # Update the speed from the perspective of the car try: speed_or_brake = keyboard[key.UP] - VELOCITY_BRAKE_VS_SPEED * keyboard[key.DOWN] \ if self.target.speed > 0 else \ VELOCITY_BRAKE_VS_SPEED * keyboard[key.UP] - keyboard[key.DOWN] self.target.speed = VELOCITY_DECLINE * (min(VELOCITY_INERTIA * speed_or_brake + self.target.speed, VELOCITY_MAX)) except AttributeError: self.target.speed = math.sqrt(self.target.velocity[0]**2 + self.target.velocity[1]**2) velocity_x = self.target.speed * math.sin(radians) velocity_y = self.target.speed * math.cos(radians) self.target.velocity = (velocity_x, velocity_y) # turn the car rl = TURNING_SPEED * rl * VELOCITY_IMPACT_ON_TURNING * abs(self.target.speed) rl = rl if self.target.speed > 0 else - rl action = actions.interval_actions.RotateBy(rl, 0) self.target.do(action) self.target.update_in_collision_manager() self.target.maybe_impact() class Mqtt_layer(layer.Layer): def __init__(self, collision_mgr): super(Mqtt_layer, self).__init__() self.collision_mgr = collision_mgr # MQTT part def on_marker(client, userdata, msg): print("marker: '" + str(msg.payload)) payload = json.loads(msg.payload) print payload["position"][0] print payload["position"][1] # create an obstacle and add to layer # obstacle3 = CollidableSprite('sprites/obstacle.png', 200, 200, 0) # player_layer.add(obstacle3) # obstacle3.velocity = (0, 0) # collision_manager.add(obstacle3) def on_connect(client, userdata, flags, rc): print("Connected with result code " + str(rc)) # client.message_callback_add("ares/video/markers", on_marker) # Subscribing in on_connect() means that if we lose the connection and # reconnect then subscriptions will be renewed. client.subscribe("ares/video/markers") client.subscribe("ares/video/edges") client.subscribe("ares/video/objects") client.subscribe("ares/mgt/features/add") client.subscribe("ares/mgt/features/remove") # The callback for when a PUBLISH message is received from the server which is not handled in other handlers def on_message(client, userdata, msg): print("Received message '" + str(msg.payload) + "' on topic '" \ + msg.topic + "' with QoS " + str(msg.qos)) payload = json.loads(msg.payload) x = payload["position"][0] y = payload["position"][1] # create an obstacle and add to layer obstacle3 = CollidableSprite('sprites/obstacle.png', x, y, 0) self.add(obstacle3) # obstacle3.velocity = (0, 0) self.collision_mgr.add(obstacle3) self.client = mqtt.Client() self.client.on_connect = on_connect self.client.on_message = on_message self.client.connect("localhost", 1883, 60) # Blocking call that processes network traffic, dispatches callbacks and # handles reconnecting. # Other loop*() functions are available that give a threaded interface and a # manual interface. def draw(self): self.client.loop(0) # Main class def main(): global keyboard global collision_manager collision_manager = cm.CollisionManagerBruteForce() director.init(width=MAP_SIZE[0], height=MAP_SIZE[1], autoscale=True, resizable=True) # Create a layer player_layer = Mqtt_layer(collision_manager) # create an obstacle and add to layer obstacle1 = CollidableSprite('sprites/obstacle.png', 200, 200, 0) player_layer.add(obstacle1) obstacle1.velocity = (0, 0) collision_manager.add(obstacle1) # create an obstacle and add to layer obstacle2 = CollidableSprite('sprites/obstacle.png', 320, 240, 0) player_layer.add(obstacle2) obstacle2.velocity = (0, 0) collision_manager.add(obstacle2) # create an obstacle and add to layer obstacle4 = CollidableSprite('sprites/obstacle.png', 490, 490, 0) player_layer.add(obstacle4) obstacle4.velocity = (0, 0) collision_manager.add(obstacle4) # create the car and add to layer car = CollidableSprite('sprites/Black_viper.png', 100, 100, 10) action = actions.interval_actions.ScaleBy(0.25, 0) car.do(action) player_layer.add(car) car.velocity = (0, 0) # Set the sprite's movement class. car.do(Car()) # Create a scene and set its initial layer. main_scene = scene.Scene(player_layer) # collisions collision_manager.add(car) # Attach a KeyStateHandler to the keyboard object. keyboard = key.KeyStateHandler() director.window.push_handlers(keyboard) # Play the scene in the window. director.run(main_scene) if __name__ == '__main__': main()
Home » SEO Reviews » Need information about Google Page Creator and ad sense.? Need information about Google Page Creator and ad sense.? Here goes some replies1) keyword tool for google https://adwords.google.com/select/KeywordToolExternal2) http://www.seochat .com / seo-tools / keyword-google-suggestions / word suggestion clésGénérateur keywords 3) Typo Générateurhttps :/ / adwords.google.com / select / TrafficEstimatorSandboxPour build profitable Adsense empire, you need quality content that is frequently updated and web traffic in full croissance.Vous can do several things to increase the fluidity of trafic.Lorsque you make your own website doing research on how to optimize your site you get a good ranking with SEO. To do this, integrating intelligent words clés.Vous need to get your web address in as many places as possible on the net. To do this, article submission, link exchange and classifieds gratuites.Voici a complete list of Small http://answers.yahoo.com/question/?qid=20080114075709AAtlrLP Here is a link to a teenager which has large volume of web traffic based on its content quality. http://www.fastcompany.com/magazine/118/girl-power.htmlIci is an article from adsense program succèsPar Jefferson Graham, USA TODAYLOS ANGELES – Jerry Alonzy thought it would work in his 70 years at moins.Comme a handyman to thank you independent weather near Hartford, Conn., He always had a decent income rarely augmenté.Puis he found Google (GOOG), and his life has changed. Alonzy, 57, is now $ 120,000 per year in places Google ads on their website Handyman natural, and he could not be happier. “I put in two, maybe three hours a day on the site, and pay checks, “he said.” What more? “In return for placing its ads on websites and blogs, Google pays Web publishers each time one of their ads are clicked. these clicks help keep Alonzy and his wife live comfortably and talking about moving to Hawaii. “that I do not need a laptop and a high speed internet connection, and I can live anywhere.” Other stories in : Google | Adsense | Bjork | Web publishers | Slegg JenniferL’Internet can be a middle of a young person, but the retirement retirement and those being considered as Alonzy have discovered they can also work on Web fine. Sometimes these “gray Googlers” can. Enjoy richer life more profitable than when they were supposed to work “Google is not just for kids anymore,” says Google executive Kim Scott, who heads the company’s AdSense program, the advertising platform that provides revenue for web publishers looking like Alonzy and others. Jerrold Foutz Take. The former Boeing engineer, 75, has launched a website a few years ago devoted to one of his passions – switching power supplies that help to drive, for example, inside the camera has vidéo.Il Google ads on its site smpstech.com four years ago. After only one month, the first Google check was $ 800 The second control totaled $ 2,000. “I thought, ‘Wow,” he said. “It was the most amazing thing that ever happened to me. Something I thought I would do $ 50 per year is now equal to my retirement check from Boeing.” Who comes out to about $ 25,000 per an.Foutz experience is not a hope Pryor anomalie.Après four children left home, she grew intrigued with the Internet and I learned to design a web page. She did not want to focus solely on her, so she posted some of her favorite recipes on site.Maintenant she cooks recipes site is almost $ 90,000 per year, mostly from Google ads. Holidays are the largest producers months of the year. “Last December alone, I netted $ 30,000 from Google,” she said. “There are not too many people I know who can walk into a car dealership and purchase of two vehicles at the same time. Whether I did recently.” Give your answer to this question below! Post Tagged withabout, Creator, Google, Information, need, Page, sense.
#!/usr/bin/env python3 # Dependent packages: import json import logging as log import os import requests import yaml import sys from restdriver import cli __author__ = 'kgraham' __version__ = '0.0.0-0' def init_defaults(): cwd = os.getcwd() root = os.path.abspath('/') defaults = dict(config_file=os.path.join(cwd, 'environments.yaml'), suite_file=os.path.join(cwd, 'suites', 'default.tcs'), results_file=os.path.join(cwd, 'results', 'default.rslt'), log_file=os.path.join(root, 'var', 'log', 'restdriver', 'restdriver.log')) return defaults def load_config_file(config): # Load Config try: with open(config['config_file'], 'r') as yamlfile: yamlconfig = yaml.load(yamlfile) config.update(yamlconfig) except FileNotFoundError: log.error("Error: Config file %s not found" % config['config_file']) sys.exit(1) except yaml.YAMLError as err: if hasattr(err, 'problem_mark'): mark = err.problem_mark log.error("Error: Invalid config file %s" % config['config_file']) log.error("Error: Location (%s line %d)" % (config['config_filename'], mark.line+1)) else: log.error("Error: ", err) sys.exit(1) # Log the configuration log.info('------------------------------------------') log.info(" Config file : %s" % config['config_file']) log.info(" Log file : %s" % config['log_file']) log.info(" Results file : %s" % config['results_file']) log.info(" Test suite : %s" % config['suite_file']) log.debug(" Configuration : %s" % config) log.info('------------------------------------------') return config def initialize(): defaults = init_defaults() parser = cli.get_parser(defaults) options, args = parser.parse_args() # Setup log log_level = getattr(log, options.debug_level.upper()) debug_enabled = (log_level == log.DEBUG) try: log.basicConfig(format='%(asctime)s %(levelname)-5s [(%(thread)s) %(module)s:%(lineno)s] %(message)s', filemode='w', level=log_level, filename=options.log_file) log.getLogger().addHandler(log.StreamHandler()) log.info('------------------------------------------') log.info('Starting %s' % __file__) except (FileNotFoundError, IOError) as err: print("Failed to open log file '%s', check permissions" % options.log_file, file=sys.stderr) print(err, file=sys.stderr) sys.exit(1) # Normalize file names to absolute paths config_dir, config_filename, config_file = normalize_filename(options.config_file) log_dir, log_filename, log_file = normalize_filename(options.log_file) results_dir, results_filename, results_file = normalize_filename(options.results_file) suite_dir, suite_filename, suite_file = normalize_filename(options.suite_file) # Save initial configuration config = dict(config_file=config_file, config_filepath=config_dir, config_filename=config_filename, log_file=log_file, log_filepath=log_dir, log_filename=log_filename, results_file=results_file, results_filepath=results_dir, results_filename=results_filename, suite_file=suite_file, suite_filepath=suite_dir, suite_filename=suite_filename, log_level=log_level, debug_enabled=debug_enabled) # Add configuration from config file config = load_config_file(config) return config def normalize_filename(file_path): file_dir, file_name = os.path.split(file_path) file_dir = os.path.abspath(file_dir) absolute_filename = os.path.join(file_dir, file_name) return file_dir, file_name, absolute_filename def load_suite_templates(suite, config): message_templates = {} if 'message_t' in suite: message_templates = suite['message_t'] return message_templates def load_suite_testcases(suite, config): testcases = {} if 'testcase' in suite: testcases = suite['testcase'] return testcases def load_suite(suite_file, config): try: with open(suite_file, 'r') as yamlfile: suite = yaml.load(yamlfile) message_templates = load_suite_templates(suite, config) log.debug("Message Templates:\n%s" % json.dumps(message_templates, indent=2)) testcases = load_suite_testcases(suite, config) log.debug("Test Cases:\n%s" % json.dumps(testcases, indent=2)) except FileNotFoundError: log.error("Error: Config file %s not found" % config['config_file']) sys.exit(1) except yaml.YAMLError as err: if hasattr(err, 'problem_mark'): mark = err.problem_mark log.error("Error: Invalid config file %s" % config['config_file']) log.error("Error: Location (%s line %d)" % (config['config_filename'], mark.line+1)) else: log.error("Error: ", err) sys.exit(1) return message_templates, testcases def run_command(command): for cmd_name, cmd_data in command.items(): print('%s' % type(cmd_data)) print('%s' % cmd_data) if cmd_name == 'send': method = cmd_data['method'] url = cmd_data['baseurl'] + '/' + cmd_data['resource'] if method.upper() == "GET": response = requests.get(url) headers = "" for key, val in response.headers.items(): headers += "%s: %s\n" % (key, val) # if response.headers['Content-Type'] == 'application/json': content_string = json.dumps(response.json(), indent=2) print("RESPONSE %s %s\n%s\n%s" % (response.status_code, response.reason, headers, content_string)) break def run_testcase(testcase, message_templates): for tc_name, tc_data in testcase.items(): print("1>>> %s" % tc_name) print("1>>> %s" % tc_data) for cmd in tc_data: run_command(cmd) def run_suite(config): message_templates, testcases = load_suite(config['suite_file'], config) for testcase in testcases: print("2>>> %s" % testcase) run_testcase(testcase, message_templates) def main(): config = initialize() run_suite(config) if __name__ == '__main__': main()
Discovery Education’s latest article is a deep dive into St. Vrain Valley Schools, a Colorado school district with a powerful story of reinvention. Read the introduction below and then download the full article for free. St. Vrain Valley Schools in Colorado is on a roll. In just under a decade, the district of 32,000 students has transformed itself through a variety of initiatives to provide students with a hands-on education that sets them up for success well beyond the halls of their schools. “That’s what makes the system work. What you see as a result is a systematic gain. It’s not limited to one school—it’s districtwide,” he adds. This is just an excerpt. Download the full article for free! Wow, amazing! Great post Frank!
# query.py # Implements Query. # # Copyright (C) 2012-2016 Red Hat, Inc. # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # the GNU General Public License v.2, or (at your option) any later version. # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY expressed or implied, including the implied warranties of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General # Public License for more details. You should have received a copy of the # GNU General Public License along with this program; if not, write to the # Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. Any Red Hat trademarks that are incorporated in the # source code or documentation are not subject to the GNU General Public # License and may only be used or replicated with the express permission of # Red Hat, Inc. # from __future__ import absolute_import from __future__ import unicode_literals import hawkey from hawkey import Query from dnf.i18n import ucd from dnf.pycomp import basestring def _by_provides(sack, patterns, ignore_case=False, get_query=False): if isinstance(patterns, basestring): patterns = [patterns] q = sack.query() flags = [] if ignore_case: flags.append(hawkey.ICASE) q.filterm(*flags, provides__glob=patterns) if get_query: return q return q.run() def _per_nevra_dict(pkg_list): return {ucd(pkg):pkg for pkg in pkg_list}
When I get off the train I wrap the lanyard my keys are on round my wrist twice and I position a key between each knuckle. A month or so in. I think it’s tacky to be honest but he’s getting on my nerves and I know he’ll pay so I order everything. He tells me his guilty pleasure is watching Eastenders, on his phone, in his lunch break. I find this surprisingly sweet. I’m angry at him, but God he’s beautiful. How great it’s going to be. He never calls me back. I got a text in the morning, like are you in – I ended up in your area last night. I literally couldn’t believe it. You’re stunning too I say. Keep saying. It was kind of weird though. Well, his friend came in. Yeah no, he kind of got involved. And all the lights in the world turn off. And all the volume goes up. And all the vices get tighter. And the heat becomes unbearable. And we both just sit there in silence. And she’s got a bruise on her wrist. And I sit there and make a wish. Please please let this not be what I think it is. Tutku Barbaros is a writer and an artist. She’s one third of the loud mouthed Plunge Theatre with whom she’s on a mission to combat mental health issues through body positive theatre. She also works with ADF focusing on raising the profile of BAME arts practitioners. She writes across forms, follow her on Twitter!
import json import logging import os import platform from datetime import datetime from gensim.models import word2vec from searcher.models import Search from task_manager.models import Task from texta.settings import ERROR_LOGGER, INFO_LOGGER, MODELS_DIR from utils.datasets import Datasets from utils.es_manager import ES_Manager if platform.system() == 'Windows': from threading import Thread as Process else: from multiprocessing import Process class LanguageModel: def __init__(self): self.id = None self.model = None self.model_name = None def train(self, task_id): self.id = task_id # Process(target=self._training_worker).start() self._training_worker() # Apache wsgi multiprocessing problem # self._training_worker() return True def _training_worker(self): logging.getLogger(INFO_LOGGER).info(json.dumps({'process': 'CREATE MODEL', 'event': 'model_training_started', 'data': {'task_id': self.id}})) num_passes = 5 # Number of word2vec passes + one pass to vocabulary building total_passes = num_passes + 1 show_progress = ShowProgress(self.id, multiplier=total_passes) show_progress.update_view(0) model = word2vec.Word2Vec() task_params = json.loads(Task.objects.get(pk=self.id).parameters) try: sentences = EsIterator(task_params, callback_progress=show_progress) model = word2vec.Word2Vec( sentences, min_count=int(task_params['min_freq']), size=int(task_params['num_dimensions']), workers=int(task_params['num_workers']), iter=int(num_passes), max_vocab_size=int(task_params['max_vocab']) if task_params['max_vocab'] else None ) self.model = model self.save() # declare the job done logging.getLogger(INFO_LOGGER).info(json.dumps({'process': 'CREATE MODEL', 'event': 'model_training_completed', 'data': {'task_id': self.id}})) r = Task.objects.get(pk=self.id) r.time_completed = datetime.now() r.status = 'Completed' r.result = json.dumps({"model_type": "word2vec", "lexicon_size": len(self.model.wv.vocab)}) r.save() except Exception as e: logging.getLogger(ERROR_LOGGER).error(json.dumps({'process': 'CREATE MODEL', 'event': 'model_training_failed', 'data': {'task_id': self.id}}), exc_info=True) print('--- Error: {0}'.format(e)) # declare the job as failed r = Task.objects.get(pk=self.id) r.time_completed = datetime.now() r.status = 'Failed' r.save() print('done') def delete(self): pass def save(self): try: model_name = 'model_' + str(self.id) self.model_name = model_name output_model_file = os.path.join(MODELS_DIR, model_name) self.model.save(output_model_file) return True except Exception as e: model_name = 'model_' + str(self.id) filepath = os.path.join(MODELS_DIR, model_name) logging.getLogger(ERROR_LOGGER).error('Failed to save model pickle to filesystem.', exc_info=True, extra={'filepath': filepath, 'modelname': model_name}) class ShowProgress(object): """ Show model training progress """ def __init__(self, task_pk, multiplier=None): self.n_total = None self.n_count = 0 self.task_pk = task_pk self.multiplier = multiplier def set_total(self, total): self.n_total = total if self.multiplier: self.n_total = self.multiplier * total def update(self, amount): if amount == 0: return self.n_count += amount percentage = (100.0 * self.n_count) / self.n_total self.update_view(percentage) def update_view(self, percentage): r = Task.objects.get(pk=self.task_pk) r.status = 'Running [{0:3.0f} %]'.format(percentage) r.save() class EsIteratorError(Exception): """ EsIterator Exception """ pass class EsIterator(object): """ ElasticSearch Iterator """ def __init__(self, parameters, callback_progress=None): ds = Datasets().activate_dataset_by_id(parameters['dataset']) query = self._parse_query(parameters) self.field = json.loads(parameters['field'])['path'] self.es_m = ds.build_manager(ES_Manager) self.es_m.load_combined_query(query) self.callback_progress = callback_progress if self.callback_progress: total_elements = self.get_total_documents() callback_progress.set_total(total_elements) @staticmethod def _parse_query(parameters): search = parameters['search'] # select search if search == 'all_docs': query = {"main": {"query": {"bool": {"minimum_should_match": 0, "must": [], "must_not": [], "should": []}}}} else: query = json.loads(Search.objects.get(pk=int(search)).query) return query def __iter__(self): self.es_m.set_query_parameter('size', 500) response = self.es_m.scroll() scroll_id = response['_scroll_id'] l = response['hits']['total'] while l > 0: response = self.es_m.scroll(scroll_id=scroll_id) l = len(response['hits']['hits']) scroll_id = response['_scroll_id'] # Check errors in the database request if (response['_shards']['total'] > 0 and response['_shards']['successful'] == 0) or response['timed_out']: msg = 'Elasticsearch failed to retrieve documents: ' \ '*** Shards: {0} *** Timeout: {1} *** Took: {2}'.format(response['_shards'], response['timed_out'], response['took']) raise EsIteratorError(msg) for hit in response['hits']['hits']: try: # Take into account nested fields encoded as: 'field.sub_field' decoded_text = hit['_source'] for k in self.field.split('.'): decoded_text = decoded_text[k] sentences = decoded_text.split('\n') for sentence in sentences: yield [word.strip().lower() for word in sentence.split(' ')] except KeyError: # If the field is missing from the document logging.getLogger(ERROR_LOGGER).error('Key does not exist.', exc_info=True, extra={'hit': hit, 'scroll_response': response}) if self.callback_progress: self.callback_progress.update(l) def get_total_documents(self): return self.es_m.get_total_documents()
Peggy Liu, Chairperson and co-founder of JUCCCE, is the 2010 Hillary Laureate. An internationally recognized expert on China's energy landscape. JUCCCE is a non-profit organization dedicated to changing the way China creates and uses energy, because a green China is the key to a healthy world. JUCCCE is well-known for its effectiveness in carrying out system changing programs and for fostering international collaboration with China. Peggy was honored as a Time Magazine Hero of the Environment in 2008, an energy adviser to the Clinton Global Initiative in 2008, a World Economic Forum Young Global Leader in 2009, the Hillary Laureate of 2010 for climate change leadership, a Forbes "Women to Watch in Asia" in 2010, a Huffington Post "Greatest Person of the Day" in 2011, and a member of the World Economic Forum’s Global Agenda Council on New Energy Architecture 2011. In Chinese press, she has been recognized as a green leader on covers and in features such as Global Times (“Green Goddess”), Beijing Tatler (“Green Miracle”), Psychologies (“10 Green Handkerchiefs award”), L’Officiel, Elle, Good Housekeeping, Rui Li, Southern People Weekly, Shanghai Daily, The Bund, 21st Century Herald, China Daily, QQ, Sohu.com. She is the winner of The Economist debate "Why China is doing more than the US in climate change leadership" (November 24, 2009) ; author of an op-ed in Huffington Post, "3 Win-Win Areas for US-China Collaboration" (2009); author of an op-ed in 21st Century Herald, World Economic Forum supplement, "Why COP15 Negotiations are Built on Shaky Ground" (Sept 2009). She is an advisor to the Katerva Challenge for innovative climate change solutions, and an executive advisor to Marks & Spencer on sustainable retailing. In 2007, Peggy organized the MIT Forum on the Future of Energy in China from which JUCCCE was formed. This forum was the first public dialogue between US and Chinese government officials on clean energy in China. Prior to JUCCCE she was a venture capitalist in Shanghai. In the 1990's Peggy was on the cover of "Red Herring Hits" as an Internet pioneer in Silicon Valley heading Channel A, one of the earliest e-commerce companies. She was General Manager of Consumer Applications at Zaplet, a dynamic email communications platform (funded by KPCB and acquired by MetricStream). She was also VP of Marketing at SeeUthere.com (now owned by Starcite). As an independent advisor, she consulted ecommerce companies on strategy, fundraising, and business partnerships. Previously, Peggy also served as a management consultant at McKinsey, a software product manager for Internet Chameleon, Symantec C++ and Norton OEM products, and computer programmer. She is a graduate of Massachusetts Institute of Technology in Electrical Engineering and Computer Science and completed a program in Global Leadership and Public Policy for the 21st Century at the Harvard Kennedy School of Government. Peggy is Chinese-American and resides in Shanghai with her husband and two sons, 6 and 8. She has extensive non-profit management experience in a wide range of sectors and has a passion for making the world a better place. Peggy‘s Laureate Award was bestowed by Hillary Governor Rajendra Pachauri and ED Mark Prain in Beijing on June 30, as part of the 2010 JUCCCE China Energy Forum. She was then welcomed down to New Zealand to lead the 3rd Annual Hillary Symposium in October – The Greening of China – New Zealand‘s Response/Opportunity. In 2012 Peggy was also awarded the first Hillary Step Prize for her "China Dream" programme - see link below. She was also appointed to the Hillary Summit advisory board at this time.
""" The sister file of similar_lines_a, another file designed to have lines of similarity when compared to its sister file As with the sister file, we use lorm-ipsum to generate 'random' code. """ # Copyright (c) 2020 Frank Harrison <frank@doublethefish.com> class Nulla: tortor = "ultrices quis porta in" sagittis = "ut tellus" def pulvinar(self, blandit, metus): egestas = [mauris for mauris in zip(blandit, metus)] neque = (egestas, blandit) def similar_function_5_lines(self, similar): # line same #1 some_var = 10 # line same #2 someother_var *= 300 # line same #3 fusce = "sit" # line same #4 amet = "tortor" # line same #5 iaculis = "dolor" # line diff return some_var, someother_var, fusce, amet, iaculis, iaculis # line diff def tortor(self): ultrices = 2 quis = ultricies * "porta" return ultricies, quis class Commodo: def similar_function_3_lines(self, tellus): # line same #1 agittis = 10 # line same #2 tellus *= 300 # line same #3 laoreet = "commodo " # line diff return agittis, tellus, laoreet # line diff
Home › 145-Sit by a tree. . .
# -*- coding: utf-8 -*- import transaction import argparse from pprint import pprint import datetime import sys import os import pysite.lib import pysite.cli import pysite.authmgr.const import pysite.vmailmgr.manager as vmailmanager from pysite.exc import PySiteError class PySiteSasscCli(pysite.cli.Cli): def __init__(self): super().__init__() def compile(self, site): site_dir = os.path.join(self._rc.g('sites_dir'), site) rc = pysite.lib.load_site_config(site_dir, 'rc.yaml') resp = pysite.lib.compile_sass(site_dir, rc) resp.print() def main(argv=sys.argv): cli = PySiteSasscCli() # Main parser parser = argparse.ArgumentParser(description="""PySite-Sassc command-line interface.""", epilog=""" Samples: pysite-sassc -c production.ini www.default.local """) parser.add_argument('-c', '--config', required=True, help="""Path to INI file with configuration, e.g. 'production.ini'""") parser.add_argument('-l', '--locale', help="""Set the desired locale. If omitted and output goes directly to console, we automatically use the console's locale.""") parser.add_argument('site', help="Name of a site, e.g. 'www.default.local'") # Parse args and run command args = parser.parse_args() ###pprint(args); sys.exit() pysite.lib.init_cli_locale(args.locale, print_info=True) cli.init_app(args) cli.compile(args.site) print("Done.", file=sys.stderr)
Marble, coral, yellow turquoise beads on an 7 1/2" bracelet with brass toggle closure. I made this. I have another made in this design that is 8" long.
# -*- coding: utf-8 -*- # Generated by Django 1.9 on 2017-01-25 11:34 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('options', '0005_refactoring'), ] operations = [ migrations.AlterModelOptions( name='optionset', options={'ordering': ('uri',), 'verbose_name': 'OptionSet', 'verbose_name_plural': 'OptionSets'}, ), migrations.AddField( model_name='option', name='comment', field=models.TextField(blank=True, help_text='Additional information about this option.', null=True, verbose_name='Comment'), ), migrations.AddField( model_name='option', name='uri', field=models.URLField(blank=True, help_text='The Uniform Resource Identifier of this option (auto-generated).', max_length=640, null=True, verbose_name='URI'), ), migrations.AddField( model_name='option', name='uri_prefix', field=models.URLField(blank=True, help_text='The prefix for the URI of this option.', max_length=256, null=True, verbose_name='URI Prefix'), ), migrations.AddField( model_name='optionset', name='comment', field=models.TextField(blank=True, help_text='Additional information about this option set.', null=True, verbose_name='Comment'), ), migrations.AddField( model_name='optionset', name='uri', field=models.URLField(blank=True, help_text='The Uniform Resource Identifier of this option set (auto-generated).', max_length=640, null=True, verbose_name='URI'), ), migrations.AddField( model_name='optionset', name='uri_prefix', field=models.URLField(blank=True, help_text='The prefix for the URI of this option set.', max_length=256, null=True, verbose_name='URI Prefix'), ), migrations.AlterField( model_name='option', name='additional_input', field=models.BooleanField(default=False, help_text='Designates whether an additional input is possible for this option.', verbose_name='Additional input'), ), migrations.AlterField( model_name='option', name='key', field=models.SlugField(blank=True, help_text='The internal identifier of this option. The URI will be generated from this key.', max_length=128, null=True, verbose_name='Key'), ), migrations.AlterField( model_name='option', name='optionset', field=models.ForeignKey(blank=True, help_text='The option set this option belongs to.', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='options', to='options.OptionSet', verbose_name='Option set'), ), migrations.AlterField( model_name='option', name='order', field=models.IntegerField(default=0, help_text='The position of this option in lists.', verbose_name='Order'), ), migrations.AlterField( model_name='option', name='text_de', field=models.CharField(help_text='The German text displayed for this option.', max_length=256, verbose_name='Text (de)'), ), migrations.AlterField( model_name='option', name='text_en', field=models.CharField(help_text='The English text displayed for this option.', max_length=256, verbose_name='Text (en)'), ), migrations.AlterField( model_name='optionset', name='conditions', field=models.ManyToManyField(blank=True, help_text='The list of conditions evaluated for this option set.', to='conditions.Condition', verbose_name='Conditions'), ), migrations.AlterField( model_name='optionset', name='key', field=models.SlugField(blank=True, help_text='The internal identifier of this option set. The URI will be generated from this key.', max_length=128, null=True, verbose_name='Key'), ), migrations.AlterField( model_name='optionset', name='order', field=models.IntegerField(default=0, help_text='The position of this option set in lists.', verbose_name='Order'), ), ]
What can we say… what an amazing night at the Hull Daily Mail Business Awards at the Hilton DoubleTree. We were very proud to be shortlisted in two categories – Community Involvement and Apprentice of the Year and over the moon to walk away with one of the awards. We love all the work we do for the Cobus Foundation, the charitable arm of Cobus, and all staff get involved whether it be fun runs, tough mudders, cycle rides or helping renovate buildings so to be recognised at these prestigious awards is a humbling accolade. We are elated that our Sales Advisor Jack was a finalist in the Apprentice of the Year. He’s had a great year and is fully deserving of this nomination.
from decimal import Decimal import os import re import cairoplot from django.http import HttpResponse from django.shortcuts import get_object_or_404 from bitfund.core.settings_split.project import (ARGB_DONUT_CHART_PLEDGES, ARGB_DONUT_CHART_REDONATIONS, ARGB_DONUT_CHART_OTHER_SOURCES, ARGB_DONUT_CHART_BACKGROUND, TOTAL_DEGREES, CHART_RADIUS_LIST, CHART_IMAGE_TYPE, CHART_PARAMS, MINIMAL_DEFAULT_PLEDGES_DEGREES, MINIMAL_DEFAULT_OTHER_SOURCES_DEGREES, MINIMAL_DEFAULT_REDONATIONS_DEGREES, CHART_INNER_RADIUS, CHART_PLEDGES_RGB, CHART_REDONATIONS_RGB, CHART_OTHER_SOURCES_RGB, CHART_BACKGROUND_RGB, CHART_PLEDGES_STYLE, CHART_PLEDGES_ALPHA) from bitfund.core.settings_split.server import MEDIA_ROOT from bitfund.project.decorators import disallow_not_public_unless_maintainer from bitfund.project.models import Project, ProjectGoal, ProjectNeed from bitfund.project.template_helpers import _get_chart_relative_filename, hex_to_rgb, is_number, _parse_request_chart_params @disallow_not_public_unless_maintainer def chart_image_project(request, project_key): project = get_object_or_404(Project, key=project_key) chart_size, pledges_rgbas, redonations_rgbas, other_sources_rgbas, background_rgbas = _parse_request_chart_params(request) chart_colors = [pledges_rgbas, redonations_rgbas, other_sources_rgbas, background_rgbas] if chart_size in CHART_PARAMS['project']: chart_image_width = CHART_PARAMS['project'][chart_size]['w'] chart_image_height = CHART_PARAMS['project'][chart_size]['h'] else: chart_image_width = chart_image_height = int(chart_size) chart_relpathname = _get_chart_relative_filename(project_key, chart_size) chart_abspathname = MEDIA_ROOT+chart_relpathname project_monthly_budget = project.getTotalMonthlyBudget() pledges_needs_total_sum, pledges_goals_total_sum = project.getTotalMonthlyPledges() redonations_total_sum = project.getTotalMonthlyRedonations() other_sources_needs_total_sum, other_sources_goals_total_sum = project.getTotalMonthlyOtherSources() other_sources_total_sum = other_sources_needs_total_sum + other_sources_goals_total_sum #donut chart radiants if project_monthly_budget > 0 : pledges_degrees = min(TOTAL_DEGREES, round(TOTAL_DEGREES * (pledges_needs_total_sum / project_monthly_budget))) redonations_degrees = min((TOTAL_DEGREES-pledges_degrees), round(TOTAL_DEGREES * (redonations_total_sum / project_monthly_budget))) other_sources_degrees = min((TOTAL_DEGREES-pledges_degrees-redonations_degrees), round(TOTAL_DEGREES * (other_sources_total_sum / project_monthly_budget))) else : pledges_degrees = 0 redonations_degrees = 0 other_sources_degrees = 0 if pledges_needs_total_sum > 0 : pledges_degrees = TOTAL_DEGREES elif redonations_total_sum > 0 : redonations_degrees = TOTAL_DEGREES elif other_sources_total_sum > 0 : other_sources_degrees = TOTAL_DEGREES if pledges_needs_total_sum == 0 and redonations_degrees == 0 and other_sources_degrees == 0 : pledges_degrees = MINIMAL_DEFAULT_PLEDGES_DEGREES redonations_degrees = MINIMAL_DEFAULT_REDONATIONS_DEGREES other_sources_degrees = MINIMAL_DEFAULT_OTHER_SOURCES_DEGREES chart_data = {'1' : pledges_degrees, '2' : redonations_degrees, '3' : other_sources_degrees, '4' : max(0, (TOTAL_DEGREES-(pledges_degrees+redonations_degrees+other_sources_degrees))), } cairoplot.donut_plot(name=chart_abspathname, data=chart_data, width=chart_image_width, height=chart_image_height, background='transparent', inner_radius=CHART_INNER_RADIUS, radius_list=CHART_RADIUS_LIST, colors=chart_colors ) response = HttpResponse(mimetype='image/'+CHART_IMAGE_TYPE) response['Content-Length'] = os.path.getsize(chart_abspathname) response.write(open(chart_abspathname, 'r').read()) return response def chart_image_need(request, project_key, need_id): need = get_object_or_404(ProjectNeed, pk=need_id) chart_size, pledges_rgbas, redonations_rgbas, other_sources_rgbas, background_rgbas = _parse_request_chart_params(request) chart_colors = [pledges_rgbas, redonations_rgbas, other_sources_rgbas, background_rgbas] if chart_size in CHART_PARAMS['need']: chart_image_width = CHART_PARAMS['need'][chart_size]['w'] chart_image_height = CHART_PARAMS['need'][chart_size]['h'] else: chart_image_width = chart_image_height = int(chart_size) chart_relpathname = _get_chart_relative_filename(project_key, chart_size, need_id=need_id) chart_abspathname = MEDIA_ROOT+chart_relpathname pledges_degrees = min(TOTAL_DEGREES, Decimal(TOTAL_DEGREES * ((need.getPledgesMonthlyTotal()) / need.amount)).quantize(Decimal('1') )) redonations_degrees = min((TOTAL_DEGREES-pledges_degrees), Decimal(TOTAL_DEGREES * ((need.getRedonationsMonthlyTotal()) / need.amount)).quantize(Decimal('1')) ) other_sources_degrees = min((TOTAL_DEGREES-(pledges_degrees+redonations_degrees)), Decimal(TOTAL_DEGREES * ((need.getOtherSourcesMonthlyTotal()) / need.amount)).quantize(Decimal('1')) ) if pledges_degrees == 0 and redonations_degrees == 0 and other_sources_degrees == 0 : pledges_degrees = MINIMAL_DEFAULT_PLEDGES_DEGREES redonations_degrees = MINIMAL_DEFAULT_REDONATIONS_DEGREES other_sources_degrees = MINIMAL_DEFAULT_OTHER_SOURCES_DEGREES chart_data = {'1' : pledges_degrees, '2' : redonations_degrees, '3' : other_sources_degrees, '4' : max(0, (TOTAL_DEGREES-(pledges_degrees+other_sources_degrees+redonations_degrees))) } cairoplot.donut_plot(name=chart_abspathname, data=chart_data, width=chart_image_width, height=chart_image_height, background='transparent', inner_radius=CHART_INNER_RADIUS, colors=chart_colors, radius_list=CHART_RADIUS_LIST) response = HttpResponse(mimetype='image/'+CHART_IMAGE_TYPE) response['Content-Length'] = os.path.getsize(chart_abspathname) response.write(open(chart_abspathname, 'r').read()) return response def chart_image_goal(request, project_key, goal_key): project = get_object_or_404(Project, key=project_key) goal = get_object_or_404(ProjectGoal, project_id=project.id, key=goal_key) chart_size, pledges_rgbas, redonations_rgbas, other_sources_rgbas, background_rgbas = _parse_request_chart_params(request) chart_colors = [pledges_rgbas, redonations_rgbas, other_sources_rgbas, background_rgbas] if chart_size in CHART_PARAMS['goal']: chart_image_width = CHART_PARAMS['goal'][chart_size]['w'] chart_image_height = CHART_PARAMS['goal'][chart_size]['h'] else: chart_image_width = chart_image_height = int(chart_size) chart_relpathname = _get_chart_relative_filename(project_key, chart_size, goal_id=goal.id) chart_abspathname = MEDIA_ROOT+chart_relpathname if goal.amount > 0: pledges_degrees = min(TOTAL_DEGREES, Decimal(TOTAL_DEGREES * ((goal.getTotalPledges()) / goal.amount)).quantize(Decimal('1')) ) other_sources_degrees = min((TOTAL_DEGREES-pledges_degrees), Decimal(TOTAL_DEGREES * ((goal.getTotalOtherSources()) / goal.amount)).quantize(Decimal('1')) ) else: pledges_degrees = 0 other_sources_degrees = 0 if pledges_degrees == 0 and other_sources_degrees == 0 : pledges_degrees = MINIMAL_DEFAULT_PLEDGES_DEGREES other_sources_degrees = MINIMAL_DEFAULT_OTHER_SOURCES_DEGREES chart_data = {'1' : pledges_degrees, '2' : 0, # redonations never apply to goals '3' : other_sources_degrees, '4' : max(0, (TOTAL_DEGREES-(pledges_degrees+other_sources_degrees))) } cairoplot.donut_plot(name=chart_abspathname, data=chart_data, width=chart_image_width, height=chart_image_height, background='transparent', inner_radius=CHART_INNER_RADIUS, colors=chart_colors, radius_list=CHART_RADIUS_LIST) response = HttpResponse(mimetype='image/'+CHART_IMAGE_TYPE) response['Content-Length'] = os.path.getsize(chart_abspathname) response.write(open(chart_abspathname, 'r').read()) return response
A recent article bashing consumption of animal protein published in the New York Times by Dr Dean Ornish is causing a frenzy. Similar studies crop up every so often, the latest indicating that red meat is high in Neu5Gc, a tumor-forming sugar that is linked to chronic inflammation and an increased risk of cancer. Dr Ornish is most famous for his vegan and anti-stress lifestyle approach to reversing coronary artery disease, a discovery previously considered physiologically impossible. With that said, let’s carry on and dissect this study. Who is a typical risk factor for cardiovascular disease? Men aged 55 and up who are overweight, sedentary smokers with high dietary intakes of saturated fat, trans-fats, and salt and low intake of fruits, vegetables, and fish, although whether all these associations are causal is disputed. So this means the typical cardiovascular disease patient is an overweight male (possibly a smoker) who likely has a high intake of processed meats and foods, damaged fats, and low intake of disease preventive vegetables. Take this profile and put him on Ornish’s vegan diet (which is basically a plant-based detox plan), with meditation and stress relief, and yes, you will see improvement. So then claims about animal proteins and fats (the main foods of choice among males 55 and older at risk for CVD?) are then selected as a causative factor in heart disease risk. Yes, eating processed meats, factory farmed (CAFO) meats with altered fatty acids ratios, and few vegetables and fruits when one is already overweight and sedentary will increase disease risk, no doubt. Are these studies accounting for food quality? Do the same risk factors appear in people eating plant based diets with organic animal proteins? Those who eat good quality animal proteins and exercise and/or meditate? Ornish’s article is quite misleading. He states in his article that Americans actually consumed 67 percent more added fat, 39 percent more sugar, and 41 percent more meat in 2000 than they had in 1950 and 24.5 percent more calories than they had in 1970, according to the Agricultural Department. He implies that added fats are coming in the form of animal protein, though the literature he cites states that THREE TIMES the added fats are in the form of canola and “salad oil” (vegetable oil), which are the very oils he promotes (including fat-free non-dairy salad dressings, non-stick cooking spray, fat-free margarine spreads. Because those are all real foods.) Vegetable oils are a source of rancid inflammatory fats due to their unstable PUFAs. The article also cites the use of twice as much shortening, a known source for disease-causing trans fats. Also interesting is the fact that the increase in calories Ornish cites comes from grains, not meat. Ornish’s interpretation of the data is misleading. This article does an excellent job explaining the flaws in his argument. That said, most of my clients come to me wildly confused about what to eat for disease prevention. There is one item no expert will dispute: EAT MORE PLANTS. We know and can all agree that a plant-based diet filled with different types of mostly vegetables and some fruits (maybe legumes if they work for you) has to make up the majority of your diet for good health. Period. One half or more of your plate at 2, preferably 3, meals daily. If you are eating more animal protein than plant matter LONG TERM, then yes, that may be a disease risk factor, especially if that protein is not organic. If you ate only kale that would be a disease risk factor too. If you are eating large quantities of dairy, especially conventional, hormone-and toxin-rich dairy products, and especially if you have a casein sensitivity, you may be at increased risk for disease. Consumption of conventionally raised animal products puts you at an increased risk for cancer and heart disease. Why? These animal products have been altered to produce a by-product with a skewed fatty acid ratio: one that contains hormones, antibiotics, and toxins that increase inflammation in our bodies. And inflammation causes disease. Balance is key here: Not too much sugar. Not too much booze. Not too much red meat. Not veggies only; you need protein and healthy, stable fats to help you absorb the nutrients in the veggies, and for tissue repair and hormone production. How much of each of these macronutrients (protein, carb, fat) you need depends on your unique physiology, your activity level, and your health status. If you are under stress or trying to get pregnant or are pregnant, eat more (organic) protein. If you have major risk factors for cancer and heart disease, you may be better off on a Mediterranean type diet plan with minimal red meats and mostly fish, legumes, and vegetables. THERE IS NO ONE DIET THAT IS RIGHT FOR EVERYONE. But always eat both cooked and raw plant foods daily. Include organically raised animal protein, as it’s the CAFO meats that increase disease risk. Get regular blood work done to make sure all your inflammatory markers are normal. Is Meat Killing the Planet? Which brings us to the environment: Ornish polishes off his article by stating Livestock production causes more disruption of the climate than all forms of transportation combined. And because it takes as much as 10 times more grain to produce the same amount of calories through livestock as through direct grain consumption, eating a plant-based diet could free up resources for the hungry. That all sounds well and good, but it’s not that simple. Factually it is correct that cows in particular eat up (so to speak) a lot of resources in terms of food and water. But there’s another way to look at the same information. If you stop eating beef, you can’t replace a kilogram of it, which has 2,280 calories, with a kilogram of broccoli, at 340 calories. You have to replace it with 6.7 kilograms of broccoli. Calories are the great equalizer, so it makes sense to use them as the basis of the calculation (source). Emissions accounted to produce enough low calorie plant matter (broccoli and tomatoes and almonds especially) to feed the world don’t much make up for beef production. There’s a reason we have an ecosystem. In an ideal world on a biodynamic farm, chickens for example help with pest control and produce poop that can be used as fertilizer for plants that can be grown to feed people and the animals that feed people. All parts of the animal should be used, from hoof to hide to organ meats (some of the most nutrient dense). But we are so far skewed with CAFO operations and pesticide-spewing agri-businesses that both pollute surrounding groundwater and air quality. Highly recommend this excellent article for more research on the topic. So, no. Vegetarianism will not solve the world’s problems. What’s the answer then? Eat less red meat if you are overindulging, sure. Our fast food culture loves burgers and steaks in which ruminant animals are fed grain (not their natural diet, making them sick, so that they require antibiotics which you, in turn, ingest) to alter their flesh so that it produces an inflammatory meat. I personally eat beef and lamb once or twice monthly at most, simply because I feel better eating lighter proteins like poultry and seafood. Always choose wild fish and organic and local meat wherever possible so you’re supporting the sustainable models of meat production, the ones that also take animal welfare into account (but don’t forget, vegetables know when you’re about to eat them too). Regardless (sorry, veggies), eat more vegetables. Eat organic vegetables. Eat less sugar and less dairy. Get fresh air, good sleep, clean water, plenty of sunshine. Grow your own food. Those are the pillars of health.
#!/usr/bin/env python # helphelp.py # utility helper functions import re import shutil import os import subprocess # chginfile() - replaces strFrom with strTo in file strFilePath # takes simple string input for strFrom def chginfile(strFilePath, strFrom, strTo): "replaces strFrom with strTo in file strFilePath" fin = open(strFilePath, "r") # input file fout = open("temp.txt", "w") # temporary output file patt = re.compile(strFrom) # pattern for line in fin: newline = patt.sub(strTo, line) # replace strFrom with strTo print(newline) fout.write(newline) fin.close() fin = None fout.close() fout = None shutil.move("temp.txt", strFilePath) # replace original with temp.txt patt = None os.remove("temp.txt") # chginfile() - replaces regexFrom with strTo in file strFilePath # for example chginfile_re(fp,"\d.\d","1.2") # regexFrom argument needs to be passed in quotes # see http://www.rexegg.com/regex-quickstart.html for regex cheatsheet def chginfile_re(strFilePath, regexFrom, strTo): "replaces regexFrom with strTo in file strFilePath" fin = open(strFilePath, "r") # input file fout = open("temp.txt", "w") # temporary output file patt = re.compile(regexFrom) # pattern for line in fin: # loop through each line in fin newline = patt.sub(strTo, line) # replace strFrom with strTo print(newline) # print to console, not necessary fout.write(newline) # write to temporary file fout fin.close() fin = None fout.close() fout = None shutil.move("temp.txt", strFilePath) # replace original with temp.txt patt = None os.remove("temp.txt") # cmdcall() - takes a string and passes it into subprocess.call() # effectively mimics entering a command diarectly into the shell # deprecated - found had more control using subprocess.call()... eg when directory changes were needed # with the cwd argument def cmdcall(commandString): "calls command ""commandString"", as if entered in the CLI" subprocess.call(commandString.split()) def filestuff(origfilepath): "quick example of how python can be used to replace subprocess.call("sed")" lines = open(origfilepath, "r").readlines() # returns string object new = open("tempfile.txt","w") # this is where the processed lines will get dumped for line in lines: # do some clever regex stuff here lines[lines.index(line)] = line # change line to result of clever regex processing lines.insert(lines.index(line), newvalue) # insert newvalue into new line above line lines.append(newvalue) # append to end of list new.write(line) #write processed line to new new.close() new = open(tempfile.txt", "r").read() # returns string orig = open(origfilepath, "w") # original file, ready for overwriting orig.write(new) # overwrite with contents of newly created tempfile.txt orig.close() # all done!
This stags head is fully assembled. The main coulor of this item is copper. This is made from resin. The delivery of this item is 1-3 working days, express delivery is available at an additional charge.
# Copyright (c) 2005, XenSource Ltd. import string, re, os from xen.xend.server.blkif import BlkifController from xen.xend.XendLogging import log from xen.util.xpopen import xPopen3 phantomDev = 0; phantomId = 0; blktap1_disk_types = [ 'aio', 'sync', 'vmdk', 'ram', 'qcow', 'qcow2', 'ioemu', ] blktap2_disk_types = [ 'aio', 'ram', 'qcow', 'vhd', 'remus', ] blktap_disk_types = blktap1_disk_types + blktap2_disk_types def doexec(args, inputtext=None): """Execute a subprocess, then return its return code, stdout and stderr""" proc = xPopen3(args, True) if inputtext != None: proc.tochild.write(inputtext) stdout = proc.fromchild stderr = proc.childerr rc = proc.wait() return (rc,stdout,stderr) # blktap1 device controller class BlktapController(BlkifController): def __init__(self, vm): BlkifController.__init__(self, vm) def frontendRoot(self): """@see DevController#frontendRoot""" return "%s/device/vbd" % self.vm.getDomainPath() def getDeviceDetails(self, config): (devid, back, front) = BlkifController.getDeviceDetails(self, config) phantomDevid = 0 wrapped = False try: imagetype = self.vm.info['image']['type'] except: imagetype = "" if imagetype == 'hvm': tdevname = back['dev'] index = ['c', 'd', 'e', 'f', 'g', 'h', 'i', \ 'j', 'l', 'm', 'n', 'o', 'p'] while True: global phantomDev global phantomId import os, stat phantomId = phantomId + 1 if phantomId == 16: if index[phantomDev] == index[-1]: if wrapped: raise VmError(" No loopback block \ devices are available. ") wrapped = True phantomDev = 0 else: phantomDev = phantomDev + 1 phantomId = 1 devname = 'xvd%s%d' % (index[phantomDev], phantomId) try: info = os.stat('/dev/%s' % devname) except: break vbd = { 'mode': 'w', 'device': devname } fn = 'tap:%s' % back['params'] # recurse ... by creating the vbd, then fallthrough # and finish creating the original device from xen.xend import XendDomain dom0 = XendDomain.instance().privilegedDomain() phantomDevid = dom0.create_phantom_vbd_with_vdi(vbd, fn) # we need to wait for this device at a higher level # the vbd that gets created will have a link to us # and will let them do it there # add a hook to point to the phantom device, # root path is always the same (dom0 tap) if phantomDevid != 0: front['phantom_vbd'] = '/local/domain/0/backend/tap/0/%s' \ % str(phantomDevid) return (devid, back, front) class Blktap2Controller(BlktapController): def __init__(self, vm): BlktapController.__init__(self, vm) def backendPath(self, backdom, devid): if self.deviceClass == 'tap2': deviceClass = 'vbd' else: deviceClass = 'tap' return "%s/backend/%s/%s/%d" % (backdom.getDomainPath(), deviceClass, self.vm.getDomid(), devid) def getDeviceDetails(self, config): (devid, back, front) = BlktapController.getDeviceDetails(self, config) if self.deviceClass == 'tap2': # since blktap2 uses blkback as a backend the 'params' feild contains # the path to the blktap2 device (/dev/xen/blktap-2/tapdev*). As well, # we need to store the params used to create the blktap2 device # (tap:tapdisk:<driver>:/<image-path>) tapdisk_uname = config.get('tapdisk_uname', '') (_, tapdisk_params) = string.split(tapdisk_uname, ':', 1) back['tapdisk-params'] = tapdisk_params return (devid, back, front) def getDeviceConfiguration(self, devid, transaction = None): # this is a blktap2 device, so we need to overwrite the 'params' feild # with the actual blktap2 parameters. (the vbd parameters are of little # use to us) config = BlktapController.getDeviceConfiguration(self, devid, transaction) if transaction is None: tapdisk_params = self.readBackend(devid, 'tapdisk-params') else: tapdisk_params = self.readBackendTxn(transaction, devid, 'tapdisk-params') if tapdisk_params: config['uname'] = 'tap:' + tapdisk_params return config def createDevice(self, config): uname = config.get('uname', '') try: (typ, subtyp, params, file) = string.split(uname, ':', 3) if subtyp not in ('tapdisk', 'ioemu'): raise ValueError('invalid subtype') except: (typ, params, file) = string.split(uname, ':', 2) subtyp = 'tapdisk' if typ in ('tap'): if subtyp in ('tapdisk', 'ioemu'): if params not in blktap2_disk_types or \ TapdiskController.check(): # pass this device off to BlktapController log.warn('WARNING: using deprecated blktap module') self.deviceClass = 'tap' devid = BlktapController.createDevice(self, config) self.deviceClass = 'tap2' return devid device = TapdiskController.create(params, file) # modify the configutration to create a blkback for the underlying # blktap2 device. Note: we need to preserve the original tapdisk uname # (it is used during save/restore and for managed domains). config.update({'tapdisk_uname' : uname}) config.update({'uname' : 'phy:' + device.rstrip()}) devid = BlkifController.createDevice(self, config) config.update({'uname' : uname}) config.pop('tapdisk_uname') return devid # This function is called from a thread when the # domain is detached from the disk. def finishDeviceCleanup(self, backpath, path): """Perform any device specific cleanup @backpath backend xenstore path. @path frontend device path """ #Figure out what we're going to wait on. self.waitForBackend_destroy(backpath) TapdiskController.destroy(path) class TapdiskException(Exception): pass class TapdiskController(object): '''class which encapsulates all tapdisk control operations''' TAP_CTL = 'tap-ctl' TAP_DEV = '/dev/xen/blktap-2/tapdev' class Tapdisk(object): def __init__(self, pid=None, minor=-1, state=None, dtype='', image=None, device=None): self.pid = pid self.minor = minor self.state = state self.dtype = dtype self.image = image self.device = device def __str__(self): return 'image=%s pid=%s minor=%s state=%s type=%s device=%s' \ % (self.image, self.pid, self.minor, self.state, self.dtype, self.device) @staticmethod def exc(*args): rc, stdout, stderr = doexec([TapdiskController.TAP_CTL] + list(args)) out, err = stdout.read().strip(), stderr.read().strip() stdout.close() stderr.close() if rc: raise TapdiskException('%s failed (%s %s %s)' % \ (args, rc, out, err)) return out @staticmethod def check(): try: TapdiskController.exc('check') return 0 except Exception, e: log.warn("tapdisk2 check failed: %s" % e) return -1 @staticmethod def list(): tapdisks = [] _list = TapdiskController.exc('list') if not _list: return [] for line in _list.split('\n'): tapdisk = TapdiskController.Tapdisk() for pair in line.split(): key, value = pair.split('=') if key == 'pid': tapdisk.pid = value elif key == 'minor': tapdisk.minor = int(value) if tapdisk.minor >= 0: tapdisk.device = '%s%s' % \ (TapdiskController.TAP_DEV, tapdisk.minor) elif key == 'state': tapdisk.state = value elif key == 'args' and value.find(':') != -1: tapdisk.dtype, tapdisk.image = value.split(':') tapdisks.append(tapdisk) return tapdisks @staticmethod def fromDevice(device): if device.startswith(TapdiskController.TAP_DEV): minor = os.minor(os.stat(device).st_rdev) tapdisks = filter(lambda x: x.minor == minor, TapdiskController.list()) if len(tapdisks) == 1: return tapdisks[0] return None @staticmethod def create(dtype, image): return TapdiskController.exc('create', '-a%s:%s' % (dtype, image)) @staticmethod def destroy(device): tapdisk = TapdiskController.fromDevice(device) if tapdisk: if tapdisk.pid: TapdiskController.exc('destroy', '-p%s' % tapdisk.pid, '-m%s' % tapdisk.minor) else: TapdiskController.exc('free', '-m%s' % tapdisk.minor) @staticmethod def pause(device): tapdisk = TapdiskController.fromDevice(device) if tapdisk and tapdisk.pid: TapdiskController.exc('pause', '-p%s' % tapdisk.pid, '-m%s' % tapdisk.minor) @staticmethod def unpause(device): tapdisk = TapdiskController.fromDevice(device) if tapdisk and tapdisk.pid: TapdiskController.exc('unpause', '-p%s' % tapdisk.pid, '-m%s' % tapdisk.minor)
Low Alcohol Wine: THE HEALTHY NEW TREND? More and more European winemakers are starting to look into the idea of making "light" wines -- with 3% to 6% less alcohol content than traditional varieties. As you can imagine, the wine snobs are turning up their noses at them, but is that fair? Experts say that such low-alcohol wines taste the same as higher "octane" wines, yet might be healthier for us -- all the fun of a glass of wine, but with less alcohol. Thanks for sharing the information regarding the healthy new trend of wine. I hope this low alcohol wine will not affect to both society and user. Wine is good for health and such wine which is having less amount of alcohol is surely good for health. Government should encourage such great steps. Alcohol can be both a tonic and a poison for the body.Drinking wine is a very common trend for youth.. Low alcohol wine effects less on liver. nice post !!Thank you its no doubt automated spam is a huge problem clouding blog posts. Its important to separate well meaning commenter’s with a website from those who did not even look at the material offered. I need to do this on a golf blog I own this will be a big help. I love the blog engine platform. I like concept of your post. Very creative post. Best of luck and waiting for some new ideas. I am looking for such useful information and I found here a lot of it. Nice article and exactly what I need. Most of all I like the style. Great job, thanks. Wow, this was a top quality post. In theory I'd like to write like this too, taking time and real effort to make a good article. The small amount of wine consumption is not that much harmful and if the wine is without alcohol or low alcohol will consume in small amount will not produce any effect.Thanks for the post. Good post !!This really is my very first time i visit here. I discovered so numerous fascinating stuff in your weblog particularly its discussion. From the plenty of comments on your content articles, I guess I am not the only one having all the enjoyment right here! keep up the great work. I often heard that alcohol is bad for health but your post make me think something different. Thank you, I've just been searching for info approximately this subject for ages and yours is the best I've found out so far. But, what concerning the bottom line? Are you certain concerning the source?
# -*- coding: utf-8 -*- import os from reportlab.lib.colors import HexColor from reportlab.platypus import Paragraph from reportlab.lib.styles import getSampleStyleSheet from reportlab.lib.enums import TA_CENTER from gluon import current from s3.codecs.card import S3PDFCardLayout from s3 import s3_format_fullname, s3_str # Fonts we use in this layout NORMAL = "Helvetica" BOLD = "Helvetica-Bold" # ============================================================================= class IDCardLayout(S3PDFCardLayout): """ Layout for printable beneficiary ID cards """ # ------------------------------------------------------------------------- @classmethod def fields(cls, resource): """ The layout-specific list of fields to look up from the resource @param resource: the resource @returns: list of field selectors """ return ["id", "pe_id", "pe_label", "first_name", "middle_name", "last_name", "case.organisation_id$root_organisation", ] # ------------------------------------------------------------------------- @classmethod def lookup(cls, resource, items): """ Look up layout-specific common data for all cards @param resource: the resource @param items: the items @returns: a dict with common data """ db = current.db s3db = current.s3db defaultpath = os.path.join(current.request.folder, 'uploads') # Get all root organisations root_orgs = set(item["_row"]["org_organisation.root_organisation"] for item in items) # Get localized root organisation names ctable = s3db.br_case represent = ctable.organisation_id.represent if represent.bulk: root_org_names = represent.bulk(list(root_orgs), show_link=False) else: root_org_names = None # Get all PE IDs pe_ids = set(item["_row"]["pr_person.pe_id"] for item in items) # Look up all profile pictures itable = s3db.pr_image query = (itable.pe_id.belongs(pe_ids)) & \ (itable.profile == True) & \ (itable.deleted == False) rows = db(query).select(itable.pe_id, itable.image) field = itable.image path = field.uploadfolder if field.uploadfolder else defaultpath pictures = {row.pe_id: os.path.join(path, row.image) for row in rows if row.image} return {"pictures": pictures, "root_org_names": root_org_names, } # ------------------------------------------------------------------------- def draw(self): """ Draw the card (one side) Instance attributes (NB draw-function should not modify them): - self.canv...............the canvas (provides the drawing methods) - self.resource...........the resource - self.item...............the data item (dict) - self.labels.............the field labels (dict) - self.backside...........this instance should render the backside of a card - self.multiple...........there are multiple cards per page - self.width..............the width of the card (in points) - self.height.............the height of the card (in points) NB Canvas coordinates are relative to the lower left corner of the card's frame, drawing must not overshoot self.width/self.height """ T = current.T c = self.canv w = self.width #h = self.height common = self.common blue = HexColor(0x27548F) item = self.item raw = item["_row"] root_org = raw["org_organisation.root_organisation"] # Get the localized root org name org_names = common.get("root_org_names") if org_names: root_org_name = org_names.get(root_org) #draw_field = self.draw_field draw_value = self.draw_value draw_label = self.draw_label code = raw["pr_person.pe_label"] if not self.backside: # Horizontal alignments LEFT = w / 4 - 5 CENTER = w / 2 - 5 RIGHT = w * 3 / 4 - 5 # Vertical alignments TOP = 200 #LOWER = [76, 58, 40] BOTTOM = 16 # Organisation name if root_org_name: draw_value(LEFT, TOP, root_org_name, width = 55, height = 55, size = 10, valign = "middle", ) # Get the profile picture pictures = common.get("pictures") if pictures: picture = pictures.get(raw["pr_person.pe_id"]) if picture: self.draw_image(picture, RIGHT, TOP, width = 60, height = 55, valign = "middle", halign = "center", ) # Center fields in reverse order so that vertical positions # can be adjusted for very long and hence wrapping strings y = 98 # ID ah = draw_value(CENTER, y, code, height=24, size=8) draw_label(CENTER, y, None, T("ID Number")) # Name y += ah + 12 name = s3_format_fullname(fname = raw["pr_person.first_name"], mname = raw["pr_person.middle_name"], lname = raw["pr_person.last_name"], truncate = False, ) draw_value(CENTER, y, name, height=24, size=10) draw_label(CENTER, y, None, T("Name")) # Barcode if code: self.draw_barcode(s3_str(code), CENTER, BOTTOM, height = 12, halign = "center", maxwidth = w - 15, ) # Graphics c.setFillColor(blue) c.rect(0, 0, w, 12, fill=1, stroke=0) c.rect(w - 12, 0, 12, 154, fill=1, stroke=0) # Add a utting line with multiple cards per page if self.multiple: c.setDash(1, 2) self.draw_outline() else: # Horizontal alignments CENTER = w / 2 # Vertical alignments TOP = 200 MIDDLE = 85 BOTTOM = 16 # QR Code if code: identity = "%s//%s:%s:%s" % (code, raw["pr_person.first_name"] or "", raw["pr_person.middle_name"] or "", raw["pr_person.last_name"] or "", ) self.draw_qrcode(identity, CENTER, MIDDLE, size=60, halign="center", valign="center") # Barcode if code: self.draw_barcode(s3_str(code), CENTER, BOTTOM, height = 12, halign = "center", maxwidth = w - 15 ) # Graphics c.setFillColor(blue) c.rect(0, 0, w, 10, fill=1, stroke=0) # ------------------------------------------------------------------------- def draw_field(self, x, y, colname, size=7, bold=True): """ Helper function to draw a centered field value of self.item above position (x, y) @param x: drawing position @param y: drawing position @param colname: the column name of the field to look up the value @param size: the font size (points) @param bold: use bold font """ c = self.canv font = BOLD if bold else NORMAL value = self.item.get(colname) if value: c.setFont(font, size) c.drawCentredString(x, y, s3_str(value)) # ------------------------------------------------------------------------- def draw_value(self, x, y, value, width=120, height=40, size=7, bold=True, valign=None): """ Helper function to draw a centered text above position (x, y); allows the text to wrap if it would otherwise exceed the given width @param x: drawing position @param y: drawing position @param value: the text to render @param width: the maximum available width (points) @param height: the maximum available height (points) @param size: the font size (points) @param bold: use bold font @param valign: vertical alignment ("top"|"middle"|"bottom"), default "bottom" @returns: the actual height of the text element drawn """ # Preserve line breaks by replacing them with <br/> tags value = s3_str(value).strip("\n").replace('\n','<br />\n') stylesheet = getSampleStyleSheet() style = stylesheet["Normal"] style.fontName = BOLD if bold else NORMAL style.fontSize = size style.leading = size + 2 style.splitLongWords = False style.alignment = TA_CENTER para = Paragraph(value, style) aw, ah = para.wrap(width, height) while((ah > height or aw > width) and style.fontSize > 4): # Reduce font size to make fit style.fontSize -= 1 style.leading = style.fontSize + 2 para = Paragraph(value, style) aw, ah = para.wrap(width, height) if valign == "top": vshift = ah elif valign == "middle": vshift = ah / 2.0 else: vshift = 0 para.drawOn(self.canv, x - para.width / 2, y - vshift) return ah # ------------------------------------------------------------------------- def draw_label(self, x, y, colname, default=""): """ Helper function to draw a centered label below position (x, y) @param x: drawing position @param y: drawing position @param colname: the column name of the field to look up the label @param default: the default label (if label cannot be looked up), pass colname=None to enforce using the default """ if colname: label = self.labels.get(colname, default) else: label = default c = self.canv c.setFont(NORMAL, 5) c.drawCentredString(x, y - 6, s3_str(label)) # END =========================================================================
Q1: What are the service intervals for Yamaha Motorcycles & Scooters? Q2: What are the service intervals for Yamaha Outboards? Q3: What are the service intervals for Yamaha WaveRunners? Q4: What are the service intervals for Yamaha ATVs? Q5: What are the service intervals for Yamaha Generators? The above intervals relate to road going machines. For competition and off road machines, please refer to the Owner's Handbook. These intervals are applicable to both Utility and Leisure vehicles.
#!/usr/bin/env python # vim: set expandtab tabstop=4 shiftwidth=4: # # Copyright (c) 2012, Christopher J. Kucera # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the PyInvEdit team nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL VINCENT VOLLERS OR CJ KUCERA BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from pymclevel import nbt # This file primarily contains classes which represent the actual # on-disk data for a given savefile, without our abstractions on # top of it. class EnchantmentSlot(object): """ Holds information about a particular enchantment inside a particular inventory slot """ def __init__(self, nbtobj=None, num=None, lvl=None): """ Initializes a new object. Either pass in 'nbtobj' or both 'num' and 'lvl' """ if nbtobj is None: self.num = num self.lvl = lvl self.extratags = {} else: self.num = nbtobj['id'].value self.lvl = nbtobj['lvl'].value self.extratags = {} for tagname in nbtobj: if tagname not in ['id', 'lvl']: self.extratags[tagname] = nbtobj[tagname] def copy(self): """ Returns a fresh object with our data """ newench = EnchantmentSlot(num=self.num, lvl=self.lvl) newench.extratags = self.extratags return newench def export_nbt(self): """ Exports ourself as an NBT object """ nbtobj = nbt.TAG_Compound() nbtobj['id'] = nbt.TAG_Short(self.num) nbtobj['lvl'] = nbt.TAG_Short(self.lvl) for tagname, tagval in self.extratags.iteritems(): nbtobj[tagname] = tagval return nbtobj def has_extra_info(self): """ Returns whether or not we have any extra information """ return (len(self.extratags) > 0) class InventorySlot(object): """ Holds information about a particular inventory slot. We make an effort to never lose any data that we don't explicitly understand, and so you'll see two extra dicts in here with the names extratags and extratagtags. The first holds extra tag information stored right at the "Slot" level of the NBT structure. Before we enabled explicit support for enchantments, this is the variable which held and saved enchantment information. Since adding in Enchantments explicitly, extratagtags is used to store extra tag information found alongside enchantments. The enchantments themselves are found in an "ench" tag which itself lives inside a tag helpfully labeled "tag," hence the odd naming of "extratagtags." Alas! """ def __init__(self, nbtobj=None, other=None, num=None, damage=None, count=None, slot=None): """ Initializes a new object. There are a few different valid ways of doing so: 1) Pass in only nbtobj, as loaded from level.dat. Everything will be populated from that one object. Used on initial loads. 2) Pass in other and slot, which is another InventorySlot object from which to copy all of our data. 3) Only pass in "slot" - this will create an empty object. 4) Pass in num, damage, count, and slot. """ if nbtobj is None: if other is None: self.slot = slot self.num = num self.damage = damage self.count = count self.extratags = {} self.extratagtags = {} self.enchantments = [] else: self.slot = other.slot self.num = other.num self.damage = other.damage self.count = other.count self.extratags = other.extratags self.extratagtags = other.extratagtags self.enchantments = [] for ench in other.enchantments: self.enchantments.append(ench.copy()) else: self.num = nbtobj['id'].value self.damage = nbtobj['Damage'].value self.count = nbtobj['Count'].value self.slot = nbtobj['Slot'].value self.enchantments = [] self.extratagtags = {} if 'tag' in nbtobj: if 'ench' in nbtobj['tag']: for enchtag in nbtobj['tag']['ench']: self.enchantments.append(EnchantmentSlot(nbtobj=enchtag)) for tagname in nbtobj['tag']: if tagname not in ['ench']: extratagtags[tagname] = nbtobj['tag'][tagname] self.extratags = {} for tagname in nbtobj: if tagname not in ['id', 'Damage', 'Count', 'Slot', 'tag']: self.extratags[tagname] = nbtobj[tagname] # Check to see if we're supposed to override the "slot" value if slot is not None: self.slot = slot # Doublecheck that we have some vars if self.extratags is None: self.extratags = {} if self.extratagtags is None: self.extratagtags = {} if self.enchantments is None: self.enchantments = [] def __cmp__(self, other): """ Comparator object for sorting """ return cmp(self.num, other.num) def export_nbt(self): """ Exports ourself as an NBT object """ item_nbt = nbt.TAG_Compound() item_nbt['Count'] = nbt.TAG_Byte(self.count) item_nbt['Slot'] = nbt.TAG_Byte(self.slot) item_nbt['id'] = nbt.TAG_Short(self.num) item_nbt['Damage'] = nbt.TAG_Short(self.damage) for tagname, tagval in self.extratags.iteritems(): item_nbt[tagname] = tagval if len(self.enchantments) > 0 or len(self.extratagtags) > 0: tag_nbt = nbt.TAG_Compound() if len(self.enchantments) > 0: ench_tag = nbt.TAG_List() for ench in self.enchantments: ench_tag.append(ench.export_nbt()) tag_nbt['ench'] = ench_tag for tagname, tagval in self.extratagtags.iteritems(): tag_nbt[tagname] = tagval item_nbt['tag'] = tag_nbt return item_nbt def has_extra_info(self): """ Returns whether or not we have any extra info in our tags """ if len(self.extratags) > 0: return True if len(self.extratagtags) > 0: return True for ench in self.enchantments: if ench.has_extra_info(): return True return False class Inventory(object): """ Holds Information about our inventory as a whole """ def __init__(self, data): """ Loads in memory fro the given NBT Object """ self.inventory = {} for item in data: self._import_item(item) def _import_item(self, item): """ Imports an item from the given NBT Object """ slot = item['Slot'].value self.inventory[slot] = InventorySlot(nbtobj=item) def get_items(self): """ Gets a list of all items in this inventory set """ return self.inventory.values()
CHCB’s Psychiatry Program includes a team of highly-skilled, compassionate licensed Psychiatrists and Psychiatric Nurse Practitioners, offering psychiatric assessments and ongoing psychiatric care. New patient to CHCB? If you have not established care yet with CHCB, please call to schedule an appointment with a primary care physician to inquire about psychiatry. Psychiatry services are made through internal referral only. Existing CHCB patient? If you currently have a primary care physician at CHCB, inquire at your next visit to be referred to psychiatry services. Already have a CHCB Psychiatric Provider? Each of our Psychiatry Providers has their own walk-in clinic once per week. Please call 802-864-6309 for more information.
__author__ = 'elsabakiu, dmorina, neilthemathguy, megha, asmita' from crowdsourcing import models from rest_framework import serializers from crowdsourcing.serializers.template import TemplateItemSerializer from crowdsourcing.serializers.dynamic import DynamicFieldsModelSerializer from rest_framework.exceptions import ValidationError from django.db import transaction class SkillSerializer(serializers.ModelSerializer): class Meta: model = models.Skill fields = ('name', 'description', 'verified', 'deleted', 'created_timestamp', 'last_updated', 'id') read_only_fields = ('created_timestamp', 'last_updated') def create(self, validated_data): skill = models.Skill.objects.create(deleted=False, **validated_data) return skill def update(self, instance, validated_data): instance.name = validated_data.get('name', instance.name) instance.description = validated_data.get('description', instance.description) # TODO(megha.agarwal): Define method to verify the skill added instance.verified = True instance.save() return instance def delete(self, instance): instance.deleted = True instance.save() return instance class WorkerSerializer(DynamicFieldsModelSerializer): ''' Good Lord, this needs cleanup :D ''' num_tasks = serializers.SerializerMethodField() task_status_det = serializers.SerializerMethodField() task_category_det = serializers.SerializerMethodField() task_price_time = serializers.SerializerMethodField() total_balance = serializers.SerializerMethodField() class Meta: model = models.Worker fields = ('profile', 'skills', 'num_tasks', 'task_status_det', 'task_category_det', 'task_price_time', 'id','total_balance') read_only_fields = ('num_tasks', 'task_status_det', 'task_category_det', 'task_price_time','total_balance') def create(self, validated_data): worker = models.Worker.objects.create(**validated_data) return worker def delete(self, instance): instance.deleted = True instance.save() return instance # Returns number of tasks the worker has/had worked on def get_num_tasks(self, instance): # response_data = models.Worker.objects.filter(taskworker__worker = instance).count() response_data = models.TaskWorker.objects.filter(worker=instance).count() return response_data # Returns tasks grouped by task status that the worker has undertaken # Also returns the number of tasks within each task status def get_task_status_det(self, instance): task_status = dict() number_task_per_status = dict() task_set = models.TaskWorker.objects.filter(worker=instance) # e.g. task_status = {'Accepted': ['Task1', 'Task2', 'Task3']} for task_worker in task_set: key = task_worker.task.module.status value = task_worker.task.module.description task_status.setdefault(key, []) task_status[key].append(value) # e.g. number_task_per_status = ['Accepted' : 3] for key_status in task_status: number_task_per_status[key_status] = len(task_status[key_status]) return task_status, number_task_per_status # Returns the task grouped by Category that the worker has undertaken # Also returns the number of tasks within each category def get_task_category_det(self, instance): task_categories = dict() number_task_per_category = dict() task_set = models.TaskWorker.objects.filter(worker=instance) # e.g. task_categories = {'Image': ['Task1', 'Task2', 'Task3']} for task_worker in task_set: key = task_worker.task.module.categories.name value = task_worker.task.module.description task_categories.setdefault(key, []) task_categories[key].append(value) # e.g. number_task_per_category = ['Image' : 3] for key_category in task_categories: number_task_per_category[key_category] = len(task_categories[key_category]) return task_categories, number_task_per_category # Returns the number of hours spent by a worker on the task and corresponding price def get_task_price_time(self, instance): task_det = [] task_set = models.TaskWorker.objects.filter(worker=instance) # e.g. task_det = [{description: 'Task1', price: '50$', time_spent_in_hrs: '2', deadline: '2015-06-11'}] for task_worker in task_set: task_info = dict() deadline = task_worker.task.module.project.end_date # TODO(megha.agarwal): Refine duration spent on a task date1 = task_worker.task.created_timestamp date2 = task_worker.task.last_updated time_spent = (((date2 - date1).total_seconds()) / 3600) task_info['description'] = task_worker.task.module.description task_info['deadline'] = deadline task_info['price'] = task_worker.task.price task_info['time_spent_in_hrs'] = time_spent task_det.append(task_info) return task_det def get_total_balance(self,instance): acceptedresults = models.TaskWorkerResult.objects.all().filter(status = 2,task_worker__worker = instance) balance = 0 for eachresult in acceptedresults: balance = balance + eachresult.task_worker.task.price return balance class WorkerSkillSerializer(serializers.ModelSerializer): class Meta: model = models.WorkerSkill fields = ('worker', 'skill', 'level', 'verified', 'created_timestamp', 'last_updated') read_only_fields = ('worker', 'created_timestamp', 'last_updated', 'verified') def create(self, **kwargs): worker_skill = models.WorkerSkill.objects.get_or_create(worker=kwargs['worker'], **self.validated_data) return worker_skill class TaskWorkerResultSerializer (serializers.ModelSerializer): #task_worker = TaskWorkerSerializer() template_item = TemplateItemSerializer() class Meta: model = models.TaskWorkerResult fields = ('id', 'template_item', 'result', 'status', 'created_timestamp', 'last_updated') read_only_fields = ('template_item', 'created_timestamp', 'last_updated') class TaskWorkerSerializer (serializers.ModelSerializer): module = serializers.ModelField(model_field=models.Task()._meta.get_field('module'), write_only=True) task_worker_results = TaskWorkerResultSerializer(many=True, read_only=True) worker_alias = serializers.SerializerMethodField() class Meta: model = models.TaskWorker fields = ('task', 'worker', 'created_timestamp', 'last_updated', 'module', 'task_worker_results', 'worker_alias') read_only_fields = ('task', 'worker', 'created_timestamp', 'last_updated') def create(self, **kwargs): module = self.validated_data.pop('module') module_instance = models.Module.objects.get(id=module) repetition = module_instance.repetition with transaction.atomic(): tasks = models.Task.objects.select_for_update(nowait=False).filter(module=module).exclude(status__gt=2).exclude(task_workers__worker=kwargs['worker']).first() if tasks: task_worker = models.TaskWorker.objects.create(worker=kwargs['worker'], task=tasks) tasks.status = 2 tasks.save() return task_worker else: raise ValidationError('No tasks left for this module') def get_worker_alias(self, obj): return obj.worker.profile.worker_alias class WorkerModuleApplicationSerializer(serializers.ModelSerializer): class Meta: model = models.WorkerModuleApplication fields = ('worker', 'module', 'status', 'created_timestamp', 'last_updated') read_only_fields = ('worker', 'module', 'created_timestamp', 'last_updated')
Human Rights Watch (HRW) has accused the Bahraini regime of systematically targeting religious leaders of the country’s Shi’a community and violating the clerics’ right to freedom of expression and gathering. RNA – Human Rights Watch (HRW) has accused the Bahraini regime of systematically targeting religious leaders of the country’s Shi’a community and violating the clerics’ right to freedom of expression and gathering. In a statement, the New York-based rights organization said credible local sources estimate that Bahraini authorities have questioned or brought charges against at least 56 Shi’a clerics since June. “Now that the Bahraini authorities have begun to run out of human rights defenders and political activists to jail, silence, or exile, they are moving on to the Shi’a community’s religious leaders,” said Joe Stork, HRW’s deputy Middle East director. Stork further noted that Manama seems to be “willfully and recklessly fanning the flames of sectarianism, while simultaneously taking moderate voices out of play” in the tiny Persian Gulf state. HRW also referred to the cases of several Bahraini clerics who have been targeted in the government’s heavy-handed crackdown, among them Shaykh Ali Humaydan, Shaykh Isa Qasim, Shaykh Ali Salman and Shaykh Maytham al-Salman. Last week, Shaykh Humaydan, the imam of the al-Zahra Mosque in the northern town of Hamad, was handed down a one-year prison sentence on charges of “organizing illegal assemblies” in the village of Diraz. Over the past few weeks, Diraz has been the scene of peaceful demonstrations outside the home of Shaykh Isa Qasim, the spiritual leader of al-Wefaq National Islamic Society, Bahrain’s main opposition group which has recently been dissolved by the Manama regime. Shaykh Qasim has rejected the accusation. Back in May, a court increased from four to nine years the prison term for Shaykh Ali Salman, al-Wefaq’s secretary general. The cleric was arrested in December 2014 on charges of attempting to overthrow the Bahraini regime and collaborating with foreign powers; allegations rejected by Salman. Earlier this month, another Shi’a cleric, Shaykh Maytham al-Salman, faced police questioning on charges of taking part in an illegal gathering. Salman said he was kept in interview rooms for 26 hours without sleep and ordered to remove his religious attire with authorities seeking to “insult and intimidate” him. “These prosecutions and interrogations of clerics are the latest stage of a systematic campaign to nullify dissent and protest in Bahrain,” Stork said.
from django.test import TestCase from django.test.client import RequestFactory from django.core.urlresolvers import reverse from django.template import Template, Context from django.contrib.auth import get_user_model from django.contrib.auth.models import AnonymousUser from wagtail.tests.utils import WagtailTestUtils from wagtail.wagtailcore.models import Page, PAGE_TEMPLATE_VAR from wagtail.tests.testapp.models import BusinessIndex, BusinessChild class TestUserbarTag(TestCase): def setUp(self): self.user = get_user_model().objects.create_superuser( username='test', email='test@email.com', password='password' ) self.homepage = Page.objects.get(id=2) def dummy_request(self, user=None): request = RequestFactory().get('/') request.user = user or AnonymousUser() return request def test_userbar_tag(self): template = Template("{% load wagtailuserbar %}{% wagtailuserbar %}") content = template.render(Context({ PAGE_TEMPLATE_VAR: self.homepage, 'request': self.dummy_request(self.user), })) self.assertIn("<!-- Wagtail user bar embed code -->", content) def test_userbar_tag_self(self): """ Ensure the userbar renders with `self` instead of `PAGE_TEMPLATE_VAR` """ template = Template("{% load wagtailuserbar %}{% wagtailuserbar %}") content = template.render(Context({ 'self': self.homepage, 'request': self.dummy_request(self.user), })) self.assertIn("<!-- Wagtail user bar embed code -->", content) def test_userbar_tag_anonymous_user(self): template = Template("{% load wagtailuserbar %}{% wagtailuserbar %}") content = template.render(Context({ PAGE_TEMPLATE_VAR: self.homepage, 'request': self.dummy_request(), })) # Make sure nothing was rendered self.assertEqual(content, '') class TestUserbarFrontend(TestCase, WagtailTestUtils): def setUp(self): self.login() self.homepage = Page.objects.get(id=2) def test_userbar_frontend(self): response = self.client.get(reverse('wagtailadmin_userbar_frontend', args=(self.homepage.id, ))) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'wagtailadmin/userbar/base.html') def test_userbar_frontend_anonymous_user_cannot_see(self): # Logout self.client.logout() response = self.client.get(reverse('wagtailadmin_userbar_frontend', args=(self.homepage.id, ))) # Check that the user recieved a forbidden message self.assertEqual(response.status_code, 403) class TestUserbarAddLink(TestCase, WagtailTestUtils): fixtures = ['test.json'] def setUp(self): self.login() self.homepage = Page.objects.get(url_path='/home/') self.event_index = Page.objects.get(url_path='/home/events/') self.business_index = BusinessIndex(title='Business', slug='business', live=True) self.homepage.add_child(instance=self.business_index) self.business_child = BusinessChild(title='Business Child', slug='child', live=True) self.business_index.add_child(instance=self.business_child) def test_page_allowing_subpages(self): response = self.client.get(reverse('wagtailadmin_userbar_frontend', args=(self.event_index.id, ))) # page allows subpages, so the 'add page' button should show expected_url = reverse('wagtailadmin_pages:add_subpage', args=(self.event_index.id, )) expected_link = '<a href="%s" target="_parent" class="action icon icon-plus" title="Add a child page">Add</a>' \ % expected_url self.assertContains(response, expected_link) def test_page_disallowing_subpages(self): response = self.client.get(reverse('wagtailadmin_userbar_frontend', args=(self.business_child.id, ))) # page disallows subpages, so the 'add page' button shouldn't show expected_url = reverse('wagtailadmin_pages:add_subpage', args=(self.business_index.id, )) expected_link = '<a href="%s" target="_parent" class="action icon icon-plus" title="Add a child page">Add</a>' \ % expected_url self.assertNotContains(response, expected_link) class TestUserbarModeration(TestCase, WagtailTestUtils): def setUp(self): self.login() self.homepage = Page.objects.get(id=2) self.homepage.save_revision() self.revision = self.homepage.get_latest_revision() def test_userbar_moderation(self): response = self.client.get(reverse('wagtailadmin_userbar_moderation', args=(self.revision.id, ))) self.assertEqual(response.status_code, 200) self.assertTemplateUsed(response, 'wagtailadmin/userbar/base.html') def test_userbar_moderation_anonymous_user_cannot_see(self): # Logout self.client.logout() response = self.client.get(reverse('wagtailadmin_userbar_moderation', args=(self.revision.id, ))) # Check that the user recieved a forbidden message self.assertEqual(response.status_code, 403)
For the release of the new Fairphone 2, industrial designer Alan Nguyen teamed up with 3D Hubs to create world’s first 3D printed phone accessories that are made from wood. All of the designs in the ongoing collection can be downloaded and 3D printed in local neighbourhoods. Because of the locality and the material used, the process is as sustainable as can be. Both of the currently available accessories – the Fairphone 2 Nightstand and the Fairphone 2 Amplifier – were designed using 3DSMax and 3D printed using an Ultimaker 3D printer. The Nightstand was inspired by the simplicity of the Fairphone 2’s clean aesthetic and the Amplifier was inspired by conch shells due to their naturally? acoustic structure.
# coding=utf-8 __author__ = 'Michael Nickey' """ REQUEST: On a Host that is connected to a LAN, you have a log-file that contains a list of users who have logged onto some of the machines on the network, in the past 24 hrs. Write a script that searches for computers on the network that are currently online, and then sends a text-file to appropriate users on the online computers. At the end of the run, the script should mark in the log file, computers to which the file has been transmitted. In the log file, it should also add computers that have been discovered in the current traversal, which were not listed originally. Please specify any assumptions you make and explain how you’d test your code. Assumptions: The log file is in csv form. The log file contains a date/time stamp username user-email address computer-id online status (online vs offline) script will be in the same directory as the logfile or the logfile will be copied to the same directory """ # Imports import csv import datetime import logging # SET GLOBAL VARIABLES # This is the time delta. To change this time, change the number to the days that you want to search the log for. DAY = datetime.timedelta(days=4) # Format of the dates that are being compared FORMAT = "%Y-%m-%d" # Create a date variable for the current date TODAY = datetime.date.today() # Set the log output file, and the log level logging.basicConfig(filename="output.txt", level=logging.DEBUG) #---------------------------------------------------------------------- def csv_dict_reader(file_obj): """ Read a CSV file using csv.DictReader :param file_obj: This is the logfile that is to be read. The log file needs to be in CSV format :return: info from the CSV log file of those users that have a time delta greater than what is set in DAY. In this example the time delta is set to 4 days. """ reader = csv.DictReader(file_obj, delimiter=',') for line in reader: line["Date"] = datetime.date(*(int(x) for x in line["Date"].split("-"))) if (TODAY - (line["Date"] )) < DAY: # Log the computers that have been accessed within the last day logging.info("{} -- User: {} at {} accessed {}".format(TODAY, line["Username"], line["User Email"], line["Computer ID"])) print line["Username"], line["User Email"] send_the_Mail(line["User Email"]) def send_the_Mail(recipient): """ This function takes in recipient and will send the email to that email address with an attachment. :param recipient: the email of the person to get the text file attachment """ # Import the needed email libraries from email.mime.text import MIMEText from email.mime.application import MIMEApplication from email.mime.multipart import MIMEMultipart from smtplib import SMTP # Set the server and the message details send_from = 'mnickey@gmail.com' send_to = recipient subject = "Computer Access text file" # Create the multipart msg = MIMEMultipart() msg['Subject'] = subject msg['From'] = send_from msg['To'] = send_to # msg preable for those that do not have a email reader msg.preamble = 'Multipart message.\n' # Text part of the message part = MIMEText("This is an automated message. No need to reply... it won't be answered anyway :) ") msg.attach(part) # The attachment part of the message part = MIMEApplication(open("output.txt", "rb").read()) part.add_header('Content-Disposition', 'attachment', filename="output.txt") msg.attach(part) # Create an instance of a SMTP server smtp = SMTP(host='smtp.gmail.com', port='587') # Start the server smtp.ehlo() smtp.starttls() smtp.login('ENTER YOUR EMAIL LOGIN USERNAME', 'ENTER YOUR PASSWORD HERE') # Send the email smtp.sendmail(msg['From'], msg['To'], msg.as_string() ) smtp.quit() if __name__ == "__main__": with open("ComputerLog.csv",) as f_obj: csv_dict_reader(f_obj)
My first – and last, so far – appearance on the ABC's Media Watch program was in February 2007. It had been a particularly hot summer. John Howard discovered climate change. And in capital cities across the country, would-be tenants jostled for position at crowded open inspections. On a quiet news day, just after New Year's, we splashed the paper with a story of mine with the headline "Through the roof: rents to soar 20%". It was, necessarily, a condensed summary of a longer story about how generous new tax breaks on superannuation taxes were tipped to encourage investors to dump investment properties and plough the money into super instead. The Treasurer predicted as much. The head of the Real Estate Institute of NSW told me landlords would look to increase rents by "as much as 20 per cent" – a figure so fantastical sounding it attracted the attention of Media Watch's producers, who ran an omnibus story critical in general of the media's reporting of the national rental squeeze, particularly by media outlets with a financial interest in spruiking property. Friends chastised me in the pub for giving their landlords ammunition to lift rents. Turns out, however, the prediction wasn't too far off in magnitude, if perhaps a little premature. According to rental bond figures analysed by the NSW Tenants' Union, Sydney rents rose 12.9 per cent in 2007, before the annual pace of rental price growth peaked at 15.15 per cent over the year ended the September quarter 2008. And it wasn't just Sydney. The surge in rents was a national affair which followed an unusually weak period of rental growth in the early 2000s after the concessions on capital gains tax sparked a surge in investment properties. But by 2007, the new supply had worn off, population growth was surging and interest rates rose to near double digits. According to the rental component of the consumer price index, rents nationally rose by 8.4 per cent in 2008. And to prove I am not in the pocket of the real estate lobby, here it is: "Through the floor: rents to slump in 2017". I won't be so bold as to offer a numerical prediction. Once burnt, twice shy. But the slump in rents is upon us. Rents are rising at their slowest annual pace in nearly two decades, according to last week's Reserve Bank statement on monetary policy, which also notes the value of new building approvals has reached a record share of GDP. Perth is the epicentre of landlord pain, with slow population growth and the end of the mining boom dampening demand for rental properties. In other capital cities, particularly in inner Melbourne and Brisbane, booming apartment construction has sparked concern about oversupply and rising settlement failures on off-the-plan developments. According to the Reserve: "Further increases in housing supply over coming years is expected to result in a protracted period of low rent inflation." Indeed, weak rent increases have been a major driver of the recent surprising weakness in general price inflation. According to the Bureau of Statistics, which surveys a national sample of real estate agents about rent increases, rents increased nationally by just 0.7 per cent over the year to the September quarter. The NSW Tenants' Union has begun producing a quarterly "Rent tracker" report, based on a survey of rental bonds lodged with Housing NSW which has been running for 26 years. It finds the average value of rental bonds lodged in Greater Sydney rose 5 per cent last financial year and just 3 per cent in the rest of NSW. As with all things property, there are a range of data sources for renters looking to compare their rent. On the lower side of estimates, a "Rental Review" by Core Logic RP Data as at August 31 estimated median weekly rents had fallen half a per cent over the previous year, to be 1.4 per cent lower than their peak in May 2015. Surveys of advertised rents by Domain Group/Australian Property Monitors, owned by Fairfax, and SQM Research paint a similar picture of weak rental growth. "Renters are now in a much better position to negotiate," according to Core Logic's head of research, Cameron Kusher: "As long as wages growth continues to stagnate, coupled with historically high levels of new dwelling construction and slowing population growth, landlords won't have much scope to increase rents." Rents are still at historic highs, but the heat is out of the market. Now is the time to haggle before signing your next lease. And if your landlord comes knocking for a rent rise, feel free to show them this article.
# -*- coding: utf-8 -*- # ---------------------------------------------------------------------------- # Copyright © 2016, Continuum Analytics, Inc. All rights reserved. # # The full license is in the file LICENSE.txt, distributed with this software. # ---------------------------------------------------------------------------- from __future__ import absolute_import, print_function try: from shlex import quote except ImportError: from pipes import quote import platform from conda_kapsel.commands.main import _parse_args_and_run_subcommand from conda_kapsel.commands.activate import activate, main from conda_kapsel.commands.prepare_with_mode import UI_MODE_TEXT_ASSUME_YES_DEVELOPMENT from conda_kapsel.internal.test.tmpfile_utils import with_directory_contents_completing_project_file from conda_kapsel.project_file import DEFAULT_PROJECT_FILENAME from conda_kapsel.local_state_file import DEFAULT_LOCAL_STATE_FILENAME from conda_kapsel.test.project_utils import project_dir_disable_dedicated_env class Args(object): def __init__(self, **kwargs): self.directory = "." self.env_spec = None self.mode = UI_MODE_TEXT_ASSUME_YES_DEVELOPMENT self.command = None for key in kwargs: setattr(self, key, kwargs[key]) def _monkeypatch_can_connect_to_socket_to_succeed(monkeypatch): can_connect_args = dict() def mock_can_connect_to_socket(host, port, timeout_seconds=0.5): can_connect_args['host'] = host can_connect_args['port'] = port can_connect_args['timeout_seconds'] = timeout_seconds return True monkeypatch.setattr("conda_kapsel.plugins.network_util.can_connect_to_socket", mock_can_connect_to_socket) return can_connect_args def test_activate(monkeypatch): can_connect_args = _monkeypatch_can_connect_to_socket_to_succeed(monkeypatch) def activate_redis_url(dirname): project_dir_disable_dedicated_env(dirname) result = activate(dirname, UI_MODE_TEXT_ASSUME_YES_DEVELOPMENT, conda_environment=None, command_name=None) assert can_connect_args['port'] == 6379 assert result is not None if platform.system() == 'Windows': result = [line for line in result if not line.startswith("export PATH")] print("activate changed PATH on Windows and ideally it would not.") if len(result) > 2: import os print("os.environ=" + repr(os.environ)) print("result=" + repr(result)) assert ['export PROJECT_DIR=' + quote(dirname), 'export REDIS_URL=redis://localhost:6379'] == result with_directory_contents_completing_project_file( {DEFAULT_PROJECT_FILENAME: """ services: REDIS_URL: redis """}, activate_redis_url) def test_activate_quoting(monkeypatch): def activate_foo(dirname): project_dir_disable_dedicated_env(dirname) result = activate(dirname, UI_MODE_TEXT_ASSUME_YES_DEVELOPMENT, conda_environment=None, command_name=None) assert result is not None if platform.system() == 'Windows': result = [line for line in result if not line.startswith("export PATH")] print("activate changed PATH on Windows and ideally it would not.") assert ["export FOO='$! boo'", 'export PROJECT_DIR=' + quote(dirname)] == result with_directory_contents_completing_project_file( { DEFAULT_PROJECT_FILENAME: """ variables: FOO: {} """, DEFAULT_LOCAL_STATE_FILENAME: """ variables: FOO: $! boo """ }, activate_foo) def test_main(monkeypatch, capsys): def mock_conda_create(prefix, pkgs, channels): raise RuntimeError("this test should not create an environment in %s with pkgs %r" % (prefix, pkgs)) monkeypatch.setattr('conda_kapsel.internal.conda_api.create', mock_conda_create) can_connect_args = _monkeypatch_can_connect_to_socket_to_succeed(monkeypatch) def main_redis_url(dirname): project_dir_disable_dedicated_env(dirname) main(Args(directory=dirname)) with_directory_contents_completing_project_file( {DEFAULT_PROJECT_FILENAME: """ services: REDIS_URL: redis """}, main_redis_url) assert can_connect_args['port'] == 6379 out, err = capsys.readouterr() assert "export REDIS_URL=redis://localhost:6379\n" in out assert "" == err def test_main_dirname_not_provided_use_pwd(monkeypatch, capsys): can_connect_args = _monkeypatch_can_connect_to_socket_to_succeed(monkeypatch) def main_redis_url(dirname): from os.path import abspath as real_abspath def mock_abspath(path): if path == ".": return dirname else: return real_abspath(path) monkeypatch.setattr('os.path.abspath', mock_abspath) project_dir_disable_dedicated_env(dirname) code = _parse_args_and_run_subcommand(['conda-kapsel', 'activate']) assert code == 0 with_directory_contents_completing_project_file( {DEFAULT_PROJECT_FILENAME: """ services: REDIS_URL: redis """}, main_redis_url) assert can_connect_args['port'] == 6379 out, err = capsys.readouterr() assert "export PROJECT_DIR" in out assert "export REDIS_URL=redis://localhost:6379\n" in out assert "" == err def test_main_dirname_provided_use_it(monkeypatch, capsys): can_connect_args = _monkeypatch_can_connect_to_socket_to_succeed(monkeypatch) def main_redis_url(dirname): project_dir_disable_dedicated_env(dirname) code = _parse_args_and_run_subcommand(['conda-kapsel', 'activate', '--directory', dirname]) assert code == 0 with_directory_contents_completing_project_file( {DEFAULT_PROJECT_FILENAME: """ services: REDIS_URL: redis """}, main_redis_url) assert can_connect_args['port'] == 6379 out, err = capsys.readouterr() assert "export PROJECT_DIR" in out assert "export REDIS_URL=redis://localhost:6379\n" in out assert "" == err def test_main_bad_command_provided(capsys): def check(dirname): project_dir_disable_dedicated_env(dirname) code = _parse_args_and_run_subcommand(['conda-kapsel', 'activate', '--directory', dirname, '--command', 'nope']) assert code == 1 with_directory_contents_completing_project_file( {DEFAULT_PROJECT_FILENAME: """ services: REDIS_URL: redis """}, check) out, err = capsys.readouterr() assert err.startswith("Command name 'nope' is not in") def _monkeypatch_can_connect_to_socket_to_fail_to_find_redis(monkeypatch): def mock_can_connect_to_socket(host, port, timeout_seconds=0.5): if port == 6379: return False # default Redis not there else: return True # can't start a custom Redis here monkeypatch.setattr("conda_kapsel.plugins.network_util.can_connect_to_socket", mock_can_connect_to_socket) def test_main_fails_to_redis(monkeypatch, capsys): _monkeypatch_can_connect_to_socket_to_fail_to_find_redis(monkeypatch) def main_redis_url(dirname): project_dir_disable_dedicated_env(dirname) code = main(Args(directory=dirname)) assert 1 == code with_directory_contents_completing_project_file( {DEFAULT_PROJECT_FILENAME: """ services: REDIS_URL: redis """}, main_redis_url) out, err = capsys.readouterr() assert "missing requirement" in err assert "All ports from 6380 to 6449 were in use" in err
Out this week: Frankenstein in Baghdad by Ahmed Saadawi; The Sky Is Yours by Chandler Klang Smith; Brass by Xhenet Aliu; Eternal Life by Dara Horn; and Winter by Karl Ove Knausgaard. Want to learn more about upcoming titles? Go read our most recent book preview. Want to help The Millions keep churning out great books coverage? Sign up to be a member today. Recommended Weekend Podcast: Helen DeWitt talks with Anne Strainchamps about her novel Lightning Rods, which we at The Millions loved a lot. Rigoberto González on Teaching the M.F.A. “I was enrolled in a writing program to imagine a cultured life, not just to dream about the rewards of being a writer.” Rigoberto González for Publisher’s Weekly on why he attended and later returned to teach at a M.F.A. program. “I have the impression that the shelves of new releases in US bookstores are becoming more globalized. They’re still not as international as those in bookstores in Rome or Paris or Mexico City or Buenos Aires, where there is a much higher percentage of books in translation. But I think works in translation are becoming much more visible.” Mexican author Álvaro Enrigue contends that trends in publishing mean we’ll enjoy ever-increasing bounties of translated work. See also: translator Alison Anderson on “Ferrante Fever” and what a great translation adds to the original work.