text
stringlengths
29
850k
""" MySQL Database class handler """ # -*- coding: utf-8 -*- # emma # # Copyright (C) 2006 Florian Schmidt (flo@fastflo.de) # 2014 Nickolay Karnaukhov (mr.electronick@gmail.com) # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Library General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA from emmalib.providers.mysql.MySqlTable import MySqlTable class MySqlDb(object): """ Class wraps MySQL Database logic """ def __init__(self, host, name=None): self.handle = host.handle self.host = host self.charset = self.host.charset if name is not None: self.name = name self.expanded = False self.status_headers = [] self.tables = {} else: # print "unpickling tables!", self.handle for name, table in self.tables.iteritems(): table.handle = self.handle # self.id = id def refresh(self): """ :return: """ self.host.select_database(self) if self.host.is_at_least_version("4.1.1"): self.host.query("show variables like 'character_set_database'") result = self.handle.store_result() row = result.fetch_row() self.charset = row[0][1] # print "using database charset %r" % self.charset else: self.charset = self.host.charset # print "using server charset %r for this database" % self.charset if not self.host.query("show table status"): return new_tables = [] result = self.handle.store_result() self.status_headers = [] for row in result.describe(): self.status_headers.append(row[0]) old = dict(zip(self.tables.keys(), range(len(self.tables)))) for row in result.fetch_row(0): if not row[0] in old: # print "new table", row[0] self.tables[row[0]] = MySqlTable(self, row, result.describe()) new_tables.append(row[0]) else: # print "known table", row[0] # todo update self.tables[row[0]] with row! del old[row[0]] for table in old: # print "destroy table", table del self.tables[table] return new_tables def query(self, query, check_use=True, append_to_log=True): """ :param query: :param check_use: :param append_to_log: :return: """ self.host.select_database(self) return self.host.query(query, check_use, append_to_log) def get_escaped_name(self): """ :@return : str """ return self.name.replace('&', '&amp;').replace('<', '&lt;')
Brown County State Park, near Nashville, IN. 1/2 hour before sunset Lake Ogle BCSP, facing east. Lake Ogle Brown County State Park, near Nashville, IN. Sunrise Brown County State Park, near Nashville, IN. Sugar Maple Leaves (Acer saccharum).
#!/usr/bin/env python import sys from concurrent.futures import ThreadPoolExecutor from tornado import web, wsgi from tornado.concurrent import run_on_executor from tornado.web import url from tornado.ioloop import IOLoop from tornado.options import options, define from wsgi import application as django_app from django.conf import settings define('port', type=int, default=8000) define('host', type=str, default='127.0.0.1') options.parse_command_line() APP_SETTINGS = { 'static_path': settings.STATIC_ROOT, 'debug': settings.DEBUG, 'gzip': True} class ThreadMixin(object): executor = ThreadPoolExecutor(max_workers=4) class FallbackHandler(ThreadMixin, web.FallbackHandler): @run_on_executor def prepare(self): self.fallback(self.request) self._finished = True application = web.Application([ url(r'/media/(.*)', web.StaticFileHandler, {'path': settings.MEDIA_ROOT}), url(r'.*', FallbackHandler, {'fallback': wsgi.WSGIContainer(django_app)})], **APP_SETTINGS) def main(): if APP_SETTINGS['debug']: sys.stdout.write('Host: {}\n'.format(options.host)) sys.stdout.write('Port: {}\n\n'.format(options.port)) application.listen(options.port, options.host) try: IOLoop.instance().start() except KeyboardInterrupt: IOLoop.instance().stop() if __name__ == '__main__': main()
Madden NFL Mobile is a great American football video sport based on the Countrywide Rugby League, published by simply EA Sports. Madden NATIONAL FOOTBALL LEAGUE Mobile is basically a mobile version of Madden Ultimate Team (MUT), which usually has the functions associated with players and cards. Inside order to get these kinds of, players can participate within \"live broadcasting activities\". It allows players to enjoy a full 16 video games, extending to the Super Bowl. As a totally free value-added game, players can spend money to purchase luggage and \"bundles\", including some number of bags and surprise hats. Games can be based on levels, in addition to some functions like video games are limited before achieving some level. Another pattern is labeled \"alliance\".Should you cherished this short article as well as you want to obtain more information with regards to /www.mmorog.net/nfl-coins" target="_blank" rel="nofollow">https /www.mmorog.net/nfl-coins i implore you to pay a visit to our own web-page. Right now there is a new plan called Zero Chill within MUT, which has an From Position (OOP) participant. OOP Master is Bo Jackson, and you could choose about three versions (95 OVR Crime, 93 OVR Defense or 91 OVR Punter/Kicker) that will can can be found in your collection at the same time. You can view OOP Master Bo Jackson\'s complete score below. Madden NFL 19 is a game based about American football. The game was developed by EA sports activities. The game was well received by different critics.If you liked this short article and you would like to obtain additional facts pertaining to /www.mmorog.net/nfl-coins" target="_blank" rel="nofollow">https /www.mmorog.net/nfl-coins kindly browse through our own webpage.Our store provides insane 19 and MUT 19 for those gamers. You could buy NFL 19 money at the cheapest value. Note that the application of against the law leveling and coin services may terminate the account! We could guarantee 100% safety in our mobile Madden cash. You just need to to select the proper number and then quickly complete our automated peruse process. Ninety % of cheap, crazy mobile coin orders can be designed in 10 minutes.
''' Created on Apr 12, 2015 @author: Akshat ''' import pandas as pd import matplotlib.pyplot as plt class ResultCollator(object): ''' classdocs ''' distance_df = None def __init__(self, worker_list, output_directory): ''' Constructor ''' self.output_directory = output_directory self.worker_list = worker_list def collate(self): rows = [] for worker in self.worker_list: dist_irv = None dist_plu = None dist_pluL = None dist_stv = None row = {} if hasattr(worker,'distance_stv'): dist_stv = worker.distance_stv row['stv_dist'] = dist_stv[0] row['stv_pval'] = dist_stv[1] if hasattr(worker,'distance_irv'): dist_irv = worker.distance_irv row['irv_dist'] = dist_irv[0] row['irv_pval'] = dist_irv[1] if hasattr(worker,'distance_plu'): dist_plu = worker.distance_plu row['plurality_dist'] = dist_plu[0] row['plurality_dpval'] = dist_plu[1] if hasattr(worker,'distance_pluatL'): dist_pluL = worker.distance_pluatL row['plurality_at_large_dist'] = dist_pluL[0] row['plurality_at_large_pval'] = dist_pluL[1] rows.append(row) df = pd.DataFrame(rows) self.distance_df = df ocsv_path = self.output_directory + '/result_distance.csv' with open(ocsv_path, 'a') as f: df.to_csv(f, header=False,index=False) def graph_results(self): fig = plt.figure() ax = fig.add_subplot(111) # ax.set_xlim([0,10]) # ax.set_ylim([0,10]) ax.set_title('All Percentiles') ax.set_xlabel("index") ax.set_ylabel("Price($)") ax.scatter(self.distance_df.index.values, self.distance_df['stv'], edgecolors='none', s=5, color='red', label='50 percentile', alpha=0.5) # ax.scatter(df.index.values, df['75 percentile'], edgecolors='none', s=5, color='blue', label='75 percentile', alpha=0.5) # ax.scatter(df.index.values, df['90 percentile'], edgecolors='none', s=5, color='yellow', label='90 percentile', alpha=0.5) # ax.scatter(df.index.values, df['95 percentile'], edgecolors='none', s=5, color='green', label='95 percentile', alpha=0.5) # ax.scatter(df.index.values, df['100 percentile'], edgecolors='none', s=5, color='magenta', label='100 percentile', alpha=0.5) ax.set_ylim(80) ax.set_xlim(0) ax.legend(loc=0, scatterpoints=1) # ax.scatter(scipy.randn(100), scipy.randn(100), c='r') fig.set_size_inches(15, 5) fig.savefig(self.output_directory + 'distance' + '_ALL.png', bbox_inches='tight')
Oceanic islands located in Pingyang County. BirdLife International (2019) Important Bird Areas factsheet: Nanji Islands Nature Reserve. Downloaded from http://www.birdlife.org on 21/04/2019.
# This file is part of GridCal. # # GridCal is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # GridCal is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GridCal. If not, see <http://www.gnu.org/licenses/>. import pandas as pd import numpy as np from matplotlib import pyplot as plt from GridCal.Engine.basic_structures import Logger from GridCal.Engine.Devices.bus import Bus from GridCal.Engine.Devices.enumerations import BranchType from GridCal.Engine.Devices.underground_line import UndergroundLineType from GridCal.Engine.Devices.editable_device import EditableDevice, DeviceType, GCProp from GridCal.Engine.Devices.tower import Tower def firing_angles_to_reactive_limits(P, alphamin, alphamax): # minimum reactive power calculated under assumption of no overlap angle # i.e. power factor equals to tan(alpha) Qmin = P * np.tan(alphamin) # maximum reactive power calculated when overlap angle reaches max # value (60 deg). I.e. # cos(phi) = 1/2*(cos(alpha)+cos(delta)) # Q = P*tan(phi) phi = np.arccos(0.5 * (np.cos(alphamax) + np.cos(np.deg2rad(60)))) Qmax = P * np.tan(phi) # if Qmin < 0: # Qmin = -Qmin # # if Qmax < 0: # Qmax = -Qmax return Qmin, Qmax class HvdcLine(EditableDevice): """ The **Line** class represents the connections between nodes (i.e. :ref:`buses<bus>`) in **GridCal**. A branch is an element (cable, line, capacitor, transformer, etc.) with an electrical impedance. The basic **Branch** class includes basic electrical attributes for most passive elements, but other device types may be passed to the **Branch** constructor to configure it as a specific type. For example, a transformer may be created with the following code: .. code:: ipython3 from GridCal.Engine.Core.multi_circuit import MultiCircuit from GridCal.Engine.Devices import * from GridCal.Engine.Devices.types import * # Create grid grid = MultiCircuit() # Create buses POI = Bus(name="POI", vnom=100, #kV is_slack=True) grid.add_bus(POI) B_C3 = Bus(name="B_C3", vnom=10) #kV grid.add_bus(B_C3) # Create transformer types SS = TransformerType(name="SS", hv_nominal_voltage=100, # kV lv_nominal_voltage=10, # kV nominal_power=100, # MVA copper_losses=10000, # kW iron_losses=125, # kW no_load_current=0.5, # % short_circuit_voltage=8) # % grid.add_transformer_type(SS) # Create transformer X_C3 = Branch(bus_from=POI, bus_to=B_C3, name="X_C3", branch_type=BranchType.Transformer, template=SS, ) # Add transformer to grid grid.add_branch(X_C3) Refer to the :class:`GridCal.Engine.Devices.branch.TapChanger` class for an example using a voltage regulator. Arguments: **bus_from** (:ref:`Bus`): "From" :ref:`bus<Bus>` object **bus_to** (:ref:`Bus`): "To" :ref:`bus<Bus>` object **name** (str, "Branch"): Name of the branch **r** (float, 1e-20): Branch resistance in per unit **x** (float, 1e-20): Branch reactance in per unit **g** (float, 1e-20): Branch shunt conductance in per unit **b** (float, 1e-20): Branch shunt susceptance in per unit **rate** (float, 1.0): Branch rate in MVA **tap** (float, 1.0): Branch tap module **shift_angle** (int, 0): Tap shift angle in radians **active** (bool, True): Is the branch active? **tolerance** (float, 0): Tolerance specified for the branch impedance in % **mttf** (float, 0.0): Mean time to failure in hours **mttr** (float, 0.0): Mean time to recovery in hours **r_fault** (float, 0.0): Mid-line fault resistance in per unit (SC only) **x_fault** (float, 0.0): Mid-line fault reactance in per unit (SC only) **fault_pos** (float, 0.0): Mid-line fault position in per unit (0.0 = `bus_from`, 0.5 = middle, 1.0 = `bus_to`) **branch_type** (BranchType, BranchType.Line): Device type enumeration (ex.: :class:`GridCal.Engine.Devices.transformer.TransformerType`) **length** (float, 0.0): Length of the branch in km **vset** (float, 1.0): Voltage set-point of the voltage controlled bus in per unit **temp_base** (float, 20.0): Base temperature at which `r` is measured in °C **temp_oper** (float, 20.0): Operating temperature in °C **alpha** (float, 0.0033): Thermal constant of the material in °C **bus_to_regulated** (bool, False): Is the `bus_to` voltage regulated by this branch? **template** (BranchTemplate, BranchTemplate()): Basic branch template """ def __init__(self, bus_from: Bus = None, bus_to: Bus = None, name='HVDC Line', idtag=None, active=True, rate=1.0, Pset=0.0, loss_factor=0.0, Vset_f=1.0, Vset_t=1.0, length=1.0, mttf=0.0, mttr=0.0, overload_cost=1000.0, min_firing_angle_f=-1.0, max_firing_angle_f=1.0, min_firing_angle_t=-1.0, max_firing_angle_t=1.0, active_prof=np.ones(0, dtype=bool), rate_prof=np.zeros(0), Pset_prof=np.zeros(0), Vset_f_prof=np.ones(0), Vset_t_prof=np.ones(0), overload_cost_prof=np.zeros(0), contingency_factor=1.0): """ HVDC Line model :param bus_from: Bus from :param bus_to: Bus to :param idtag: id tag of the line :param name: name of the line :param active: Is the line active? :param rate: Line rate in MVA :param Pset: Active power set point :param loss_factor: Losses factor (p.u.) :param Vset_f: Voltage set point at the "from" side :param Vset_t: Voltage set point at the "to" side :param min_firing_angle_f: minimum firing angle at the "from" side :param max_firing_angle_f: maximum firing angle at the "from" side :param min_firing_angle_t: minimum firing angle at the "to" side :param max_firing_angle_t: maximum firing angle at the "to" side :param overload_cost: cost of a line overload in EUR/MW :param mttf: Mean time to failure in hours :param mttr: Mean time to recovery in hours :param length: line length in km :param active_prof: profile of active states (bool) :param rate_prof: Profile of ratings in MVA :param Pset_prof: Active power set points profile :param Vset_f_prof: Voltage set points at the "from" side profile :param Vset_t_prof: Voltage set points at the "to" side profile :param overload_cost_prof: Profile of overload costs in EUR/MW """ EditableDevice.__init__(self, name=name, idtag=idtag, active=active, device_type=DeviceType.HVDCLineDevice, editable_headers={'name': GCProp('', str, 'Name of the line.'), 'idtag': GCProp('', str, 'Unique ID'), 'bus_from': GCProp('', DeviceType.BusDevice, 'Name of the bus at the "from" side of the line.'), 'bus_to': GCProp('', DeviceType.BusDevice, 'Name of the bus at the "to" side of the line.'), 'active': GCProp('', bool, 'Is the line active?'), 'rate': GCProp('MVA', float, 'Thermal rating power of the line.'), 'contingency_factor': GCProp('p.u.', float, 'Rating multiplier for contingencies.'), 'Pset': GCProp('MW', float, 'Set power flow.'), 'loss_factor': GCProp('p.u.', float, 'Losses factor.\n' 'The losses are computed as losses=Pfset x Ploss'), 'Vset_f': GCProp('p.u.', float, 'Set voltage at the from side'), 'Vset_t': GCProp('p.u.', float, 'Set voltage at the to side'), 'min_firing_angle_f': GCProp('rad', float, 'minimum firing angle at the ' '"from" side.'), 'max_firing_angle_f': GCProp('rad', float, 'maximum firing angle at the ' '"from" side.'), 'min_firing_angle_t': GCProp('rad', float, 'minimum firing angle at the ' '"to" side.'), 'max_firing_angle_t': GCProp('rad', float, 'maximum firing angle at the ' '"to" side.'), 'mttf': GCProp('h', float, 'Mean time to failure, ' 'used in reliability studies.'), 'mttr': GCProp('h', float, 'Mean time to recovery, ' 'used in reliability studies.'), 'length': GCProp('km', float, 'Length of the branch ' '(not used for calculation)'), 'overload_cost': GCProp('e/MWh', float, 'Cost of overloads. Used in OPF.'), }, non_editable_attributes=['bus_from', 'bus_to', 'idtag'], properties_with_profile={'active': 'active_prof', 'rate': 'rate_prof', 'Pset': 'Pset_prof', 'Vset_f': 'Vset_f_prof', 'Vset_t': 'Vset_t_prof', 'overload_cost': 'overload_cost_prof'}) # connectivity self.bus_from = bus_from self.bus_to = bus_to # List of measurements self.measurements = list() # line length in km self.length = length self.Pset = Pset self.loss_factor = loss_factor self.mttf = mttf self.mttr = mttr self.overload_cost = overload_cost self.Vset_f = Vset_f self.Vset_t = Vset_t # converter / inverter firing angles self.min_firing_angle_f = min_firing_angle_f self.max_firing_angle_f = max_firing_angle_f self.min_firing_angle_t = min_firing_angle_t self.max_firing_angle_t = max_firing_angle_t self.Qmin_f, self.Qmax_f = firing_angles_to_reactive_limits(self.Pset, self.min_firing_angle_f, self.max_firing_angle_f) self.Qmin_t, self.Qmax_t = firing_angles_to_reactive_limits(self.Pset, self.min_firing_angle_t, self.max_firing_angle_t) self.overload_cost_prof = overload_cost_prof self.Pset_prof = Pset_prof self.active_prof = active_prof self.Vset_f_prof = Vset_f_prof self.Vset_t_prof = Vset_t_prof # branch rating in MVA self.rate = rate self.contingency_factor = contingency_factor self.rate_prof = rate_prof def get_from_and_to_power(self): """ Get the power set at both ends accounting for meaningful losses :return: power from, power to """ A = int(self.Pset > 0) B = 1 - A Pf = - self.Pset * A + self.Pset * (1 - self.loss_factor) * B Pt = self.Pset * A * (1 - self.loss_factor) - self.Pset * B return Pf, Pt def get_from_and_to_power_profiles(self): """ Get the power set at both ends accounting for meaningful losses :return: power from, power to """ A = (self.Pset_prof > 0).astype(int) B = 1 - A Pf = - self.Pset_prof * A + self.Pset_prof * (1 - self.loss_factor) * B Pt = self.Pset_prof * A * (1 - self.loss_factor) - self.Pset_prof * B return Pf, Pt def copy(self, bus_dict=None): """ Returns a copy of the branch @return: A new with the same content as this """ if bus_dict is None: f = self.bus_from t = self.bus_to else: f = bus_dict[self.bus_from] t = bus_dict[self.bus_to] ''' bus_from: Bus = None, bus_to: Bus = None, name='HVDC Line', idtag=None, active=True, rate=1.0, Pfset=0.0, loss_factor=0.0, Vset_f=1.0, Vset_t=1.0, length=1.0, mttf=0.0, mttr=0.0, overload_cost=1000.0, min_firing_angle_f=-1.0, max_firing_angle_f=1.0, min_firing_angle_t=-1.0, max_firing_angle_t=1.0, active_prof=np.ones(0, dtype=bool), rate_prof=np.zeros(0), Pset_prof=np.zeros(0), Vset_f_prof=np.ones(0), Vset_t_prof=np.ones(0), overload_cost_prof=np.zeros(0) ''' b = HvdcLine(bus_from=f, bus_to=t, name=self.name, idtag=self.idtag, rate=self.rate, active=self.active, loss_factor=self.loss_factor, Vset_f=self.Vset_f, Vset_t=self.Vset_t, length=self.length, mttf=self.mttf, mttr=self.mttr, overload_cost=self.overload_cost, min_firing_angle_f=self.min_firing_angle_f, max_firing_angle_f=self.max_firing_angle_f, min_firing_angle_t=self.min_firing_angle_t, max_firing_angle_t=self.max_firing_angle_t, active_prof=self.active_prof, rate_prof=self.rate_prof, Pset_prof=self.Pset_prof, Vset_f_prof=self.Vset_f_prof, Vset_t_prof=self.Vset_t_prof, overload_cost_prof=self.overload_cost_prof) b.measurements = self.measurements b.active_prof = self.active_prof.copy() return b def get_save_data(self): """ Return the data that matches the edit_headers :return: """ data = list() for name, properties in self.editable_headers.items(): obj = getattr(self, name) if properties.tpe == DeviceType.BusDevice: obj = obj.idtag elif properties.tpe not in [str, float, int, bool]: obj = str(obj) data.append(obj) return data def get_properties_dict(self): """ Get json dictionary :return: """ d = {'id': self.idtag, 'type': 'hvdc', 'phases': 'ps', 'name': self.name, 'name_code': self.code, 'bus_from': self.bus_from.idtag, 'bus_to': self.bus_to.idtag, 'active': self.active, 'rate': self.rate, 'r': 0, 'length': self.length, 'loss_factor': self.loss_factor, 'vset_from': self.Vset_f, 'vset_to': self.Vset_t, 'Pset': self.Pset, 'min_firing_angle_f': self.min_firing_angle_f, 'max_firing_angle_f': self.max_firing_angle_f, 'min_firing_angle_t': self.min_firing_angle_t, 'max_firing_angle_t': self.max_firing_angle_t, 'overload_cost': self.overload_cost, 'base_temperature': 20, 'operational_temperature': 20, 'alpha': 0.00330, 'locations': [] } return d def get_profiles_dict(self): """ :return: """ if self.active_prof is not None: active_prof = self.active_prof.tolist() rate_prof = self.rate_prof.tolist() pset_prof = self.Pset_prof.tolist() vset_prof_f = self.Vset_f_prof vset_prof_t = self.Vset_t_prof cost_prof = self.overload_cost_prof.tolist() else: active_prof = list() rate_prof = list() pset_prof = list() cost_prof = list() vset_prof_f = list() vset_prof_t = list() return {'id': self.idtag, 'active': active_prof, 'rate': rate_prof, 'Pset': pset_prof, 'vset_from': vset_prof_f, 'vset_to': vset_prof_t, 'overload_cost': cost_prof} def get_units_dict(self): """ Get units of the values """ return {'rate': 'MW', 'length': 'km', 'loss_factor': '%', 'vset_f': 'p.u.', 'vset_t': 'p.u.', 'pset': 'MW', 'min_firing_angle_f': 'radians', 'max_firing_angle_f': 'radians', 'min_firing_angle_t': 'radians', 'max_firing_angle_t': 'radians', 'overload_cost': '€/MWh'} def plot_profiles(self, time_series=None, my_index=0, show_fig=True): """ Plot the time series results of this object :param time_series: TimeSeries Instance :param my_index: index of this object in the simulation :param show_fig: Show the figure? """ if time_series is not None: fig = plt.figure(figsize=(12, 8)) ax_1 = fig.add_subplot(211) ax_2 = fig.add_subplot(212, sharex=ax_1) x = time_series.results.time # loading y = self.Pset_prof / (self.rate_prof + 1e-9) * 100.0 df = pd.DataFrame(data=y, index=x, columns=[self.name]) ax_1.set_title('Loading', fontsize=14) ax_1.set_ylabel('Loading [%]', fontsize=11) df.plot(ax=ax_1) # losses y = self.Pset_prof * self.loss_factor df = pd.DataFrame(data=y, index=x, columns=[self.name]) ax_2.set_title('Losses', fontsize=14) ax_2.set_ylabel('Losses [MVA]', fontsize=11) df.plot(ax=ax_2) plt.legend() fig.suptitle(self.name, fontsize=20) if show_fig: plt.show() def get_coordinates(self): """ Get the branch defining coordinates """ return [self.bus_from.get_coordinates(), self.bus_to.get_coordinates()]
We have used FiDA & Co. services for three important and critical projects and I have been particularly impressed with the professional team and the exceptional high quality services we have received through their commitment to a unique service offering. FiIDA & Co. Solution's is really a great monitoring advanced digital tool that we have used to effectively monitor our ongoing outdoor campaigns, along with a great network that was being well geographically observed. We have been functioning it practically all the time and it really matched our technological objective. Great team has always been committed to quick submit a distinguished reports on time. Dalida Mostafa - ORANGE Egypt – Manager, Corporate and digital communication. Integrity: We believe integrity and sincerity are a core values that can’t be bought or measured to yet very essential to provide real service. Sustainability: Sustainable long term client relationship and satisfaction is an essential part of who we are.
from django.http import HttpResponse, HttpResponseRedirect, Http404 from django.core.urlresolvers import reverse from django.shortcuts import render_to_response from django.template import RequestContext from django.utils import simplejson from django.views.decorators.csrf import csrf_exempt from mongoengine import ValidationError from annotate.models import AnnotatedDoc from annotate.forms import AnnotatedDocumentCreationForm def index(request): ctx = {} return render_to_response( "annotate/index.html", RequestContext(request, ctx)) def new(request): form_class = AnnotatedDocumentCreationForm form = form_class() if request.method == "POST": form = form_class(request.POST) if form.is_valid(): content = form.cleaned_data["content"] title = form.cleaned_data['title'] content = content.split("\r\n\r\n") text = [{'anot':segment, 'content':''} for segment in content] text[0].update({'content': 'Annotate any paragraph by clicking the pencil\n icon to the right of the reference text.'}) doc = AnnotatedDoc(text=text, title=title) doc.save() return HttpResponseRedirect(reverse("edit_annotateddoc", args=[doc.id])) ctx = { 'form': form } return render_to_response( "annotate/new.html", RequestContext(request, ctx)) @csrf_exempt # FIXME: to be removed soon def post(request): if not request.POST or not "doc" in request.POST: response = {"error": "Invalid request"} else: try: text = simplejson.loads(request.POST["doc"]) doc_hash = request.POST.get("doc_hash") except ValueError: response = {"error": "Invalid document"} else: try: doc = AnnotatedDoc.objects.get(id=doc_hash) doc.text = text except AnnotatedDoc.DoesNotExist: doc = AnnotatedDoc(text=text) except ValidationError: doc = AnnotatedDoc(text=text) except Exception, e: raise Exception, "UH OH: %s" % e finally: doc.save() response = {"success": True, "doc": str(doc.id)} return HttpResponse( simplejson.dumps(response), content_type="application/json") def get(request, doc_hash): if not request.GET or not doc_hash: response = {"error": "Invalid request"} try: doc = AnnotatedDoc.objects.get(id=doc_hash) except (ValidationError, AnnotatedDoc.DoesNotExist): response = {"error": "Does not exist"} else: response = {"success": True, "text": doc.text} return HttpResponse( simplejson.dumps(response), content_type="application/json") def edit(request, doc_hash): return view(request, doc_hash, editable=True) def view(request, doc_hash, editable=False): try: doc = AnnotatedDoc.objects.get(id=doc_hash) except (ValidationError, AnnotatedDoc.DoesNotExist): raise Http404 ctx = {'doc_hash': doc_hash, 'doc': doc, 'editable': editable, } return render_to_response( "annotate/view.html", RequestContext(request, ctx)) def list(request): docs = AnnotatedDoc.objects.all() ctx = { "docs" : docs } return render_to_response( "annotate/list.html", RequestContext(request, ctx))
Our Epson 711XXL Magenta (T711XXL320) ink cartridge come with the same money back guarantee as all our other cartridges. We believe that this inkjet cartridge is of the same as or exceed the quality of OEM cartridges. Some cartridges we have, found exceed the print output of the original inkjet cartridges. Save money on your printing while maintaining the same print standards.
# coding=utf-8 """Scan directories as nodes.""" import os from glob import glob from ..nodes import get, validate_link from ..utility import abspath, boolenize def directory(node_list, root, whitelist, followlinks=False): """Scan nodes from all directories under the directory 'root'. If one directory has properties of both of 'run' and 'project', type of the directory is set to 'run'. Parameters ---------- root : str Scan directories recursively under the directory `root`. whitelist : list of str or str Run node has one or more file or directory which satisfies run_node_dir/`whitelist`. And project nodes satisfy project_dir/run_node_dir/`whitelist`. str can be specified by wildcard. followlinks : {'False', 'True'}, optional Whether scan in symbolic link. Be aware that setting this to True may lead to infinite recursion. Returns ------- node_list Examples -------- >>> # Initialize node_list. >>> node_list = directory([], "scandir_path", ["data/hoge*", "*foo*"]) >>> # Rescan node_list. >>> node_list = [ ... {'path': '/tmp/scan_dir/run0', ... 'parents': [], # empty ... 'children': [], # empty ... 'name': 'run0', ... 'type': 'run'}] >>> node_list = directory([], "scandir_path", ["*.conf"]) """ root = abspath(root) followlinks = boolenize(followlinks) if isinstance(whitelist, str): whitelist = [whitelist] scan_nodelist = [] for path, dirs, files in os.walk(root, followlinks=followlinks): dirs.sort() node_type = None parents = [] children = [] if not get(node_list, path) is None: continue for child in dirs: for white in whitelist: if glob(os.path.join(path, child, white)): node_type = "project" children.append(os.path.join(path, child)) break for white in whitelist: if glob(os.path.join(path, white)): node_type = "run" parents.append(os.path.dirname(path)) break if not node_type: continue scan_nodelist.append({"path": path, "parents": parents, "children": children, "type": node_type, "name": os.path.basename(path), }) origin_len = len(node_list) node_list = node_list + scan_nodelist for node in node_list[origin_len:]: validate_link(node_list, node) return node_list def register(pipe_dics): pipe_dics["scan_directory"] = { "func": directory, "args": [("root", {"help": "path of root directory"}), ("whitelist", {"help": "whitelist of file which exists in run directory", "nargs": "+", }), ], "kwds": [("followlinks", {"help": "whether scan in symbolic link"})], "desc": "Scan nodes from all directories under the directory 'root'.", }
will be on the perimeter. King has multiple receivers they can go to on any down, East English will look to their defesive backs to try to stop the highly praised passing game of the Crusaders. This game will also be the first Detroit Public School game of the week. The season has just started, and at this time I would like to mention some players that I think will become better known around the Metro-Detroit area by the end of the season. This list is in no praticular order. King Running Back Raymond Mitchell dives into the end-zone. Detroit MLK scores a touchdown in overtime. Southfield players celebrate comeback victory over Martin Luther King. Final score Southfield 21 King 20. Cass Tech- If you could choose one word to describe Cass Tech, depth would be the word to use. Offense, defense special teams, it is amazing to believe that one team could have so much talent. The Technicians will be led on both offense and defense this year by Jourdan Lewis. The 5’10” Lewis, who has already committed to Michigan, will be used more has a wide receive this year. Which means quarterback Jayru campbell will have another weapon for his disposal on an already talent rich offense. Henry Ford- The Trojans were the surprise team of the PSL last year, making it all the way to the Division 2 championship game. Though they lose quarterback Victor Edge, the Trojans will have to win games in the trenches this year, and will be led by senior running back Jamall Sims. The Trojans will have to improve upon a defense that surrendered 49 points to Farmington last year in the playoffs. Cody- Look for Cody to improve upon on their sub-par season last year. Coach Calvin Norman will have no issues with speed this year. WR/DB Eddie Fulker and teammate Kenyon Bell ran a reported 10.5 seconds in a 100 meter dash. Western- The Cowboys will be going through a transition this year. Western is bringing in one of the most respected coaches in Detroit to their program: Andre Harlan. Harlan had impressive couple of years at Detroit Southwestern and will look to continue his winning tradition at Western. Following coach Harlan to Western will be sophomore running back sensation Romello Ross. Ross will be a major addition to an offense that struggled last year scoring only 109 points. Renaissance- Renaissance will look to make their first trip to the state playoffs since 2007. Coach Tyrice Grice will have a problem with experience this season. He has only 9 seniors on the squad this year. This young team will have to come together quickly as they open up the season against the always tough and well coached Farmington Hill Harrison. Northwestern- The Colts had a rough time scoring last season, which was shown by them being tied for second to last in the PSL in scoring. Northwestern should be huge on the interior with OG/DT Balaal Hollings. The running game for Northwestern, led by a pair 5’8″ running backs, should have a nice holes to run through. Mumford- Mumford had the leagues worst defense last year, giving up a combined 330 points. The Mustangs will have a good amount of their starters returning, which should help them improve upon the issue they had defensively. Mumford has a good leader in Donshell English who was the head coach at Southeastern when William Gholston was tearing up the Public School League. East English- East English is a combination of the old Finney High School and Crockett Tech. Most of the players will come from Crockett, a team that made it to the regional final last season, losing a close game to Marine City. Leading East English will be the dynamic duo of Desmond King and Khalid Hill. King, a RB/DB, will be counted on to hold down the secondary and create big plays in the running game. Hill is a TE/DE and has already committed to Michigan. Losing Quarterback Brian Blackburn will be a big loss, no doubt about it, but East English has enough skill players with starting experience to make it by. Douglass- The ‘Canes won the PSL Division 2 championship last year in large part due to an explosive offense that scored over 3oo points. Leading that attack will be 5’ 10″ athlete Demetrius Stinson, who acquired over 1,700 all-purpose yards while around 1,500 of those were on the ground. Fellow senior Kory Peterson will have to throw the ball consistently to open up the ground game for Stinson. Douglass should be solid on defense, particularly in secondary with Stinson roaming the backfield with Corner Omari McCauley. King- The Crusaders will have to replace Dennis Norfleet, who was the heart and soul of the team for the last couple of years. Instead of running the ball this year as effectively as they have done years past, King will have one of the most experienced wide receiver depth charts in the city. Mycial Allen and fellow Senior Devaun Williams should be an exciting Quarterback/Wide Receiver combination. Southeastern- Southeastern is coming off a 5-4 season in which they were held to under 6 points 3 times. Trying to help the defense will be LB Kenyatta Singleton. 6 starters will return on offense and 5 return on defense. Pershing- The Doughboys are coming off an impressive 7-3 season last year. Pershing has hired a new coach, Charles Spann, this year, and he will have the pleasure of utilizing the highly experienced Laron Kidd. Tommy Pearson is also expected to have an accomplished season this year. Denby- The Tar’s had a rough time scoring last season, being shut out 3 times. The defense was not that much better, giving up more than 40 points five times. They have hired Peter Karras, son of former Lion Alex Karras, to coordinate the defense which should help improve the Tars chances of staying in the game. Osborn- Will have only 12 seniors on the team this year. On the positive side, there are not a lot of players on the team, so the seniors will have a chance to play. Glancing at the size of the players, one thing that stood out was their height. Only three players will be above 6’3″. Central- Had a tough time scoring last year. The most points scored by the Trailblazers was 18. The defensive side of the ball will be led by seniors Calvin Hall and Terrance Barksdale. Head Coach Robert Hunt believes his team will have a multitude of senior leadership entering the 2012 campaign. The season is fast approaching, and I would like to take this time to share with you my opinions about the top Detroit Public School League Football games for this fall. There are some city championship notable games coming up in the next few weeks, but lets open up with a game worthy of a state final at Ford Field. What better way to start off a season than with two defending state champions battling it out in the new Detroit Prep Kickoff Classic. There should be plenty of scouts on hand for these two teams. It seems that at every position, Cass Tech has a future Division One recruit. Whether it be Jourdan Lewis at Corner, Damon Webb at receiver, Jayru Campbell at quarterback, Deon Drake at linebacker, just to name a few. Don’t count out Brother Rice in this game, especially with experienced running back Brian Walker pounding the rock in Coach Fracassa’s offense. This year, the Detroit Public School League will introduce a new format for fall football. The new format will consist of two divisions, with one division carrying 8 teams and the other division being composed of 7 teams. The East Division will feature Central, East English, King, Osborn, Denby, Pershing, Southeastern and Douglass. Teams that will compete in the West Division include Mumford, Renaissance, Cody, Cass Tech, Ford, Northwestern, and Western. The new format will introduce both positive and negative outcomes. Negatives that the new format will present include the loss of old rivalries due to the fact cross-over games will only be played if teams do not qualify for the playoffs. Thus, traditional rivals, such as Martin Luther King and Cass Tech, will not be guaranteed to compete against one another. At the same time though, positives from the new format will include just one city championship game, which will hopefully be held at Ford Field as it was last year. Another positive with the new format will be less cost for travel. Schools will be closer together giving students and faculty a chance to attend road games which has been a problem lately for the PSL. Although the new format is not perfect, the new set-up will hopefully bring added attention to every game being played in the PSL every given week instead of one division receiving all the exposure. Don’t forget today is the first day of high school football in the state of Michigan, and two PSL teams will be in action. Henry Ford will play Pershing at 4:00 and Mumford will take on the Yellow Jackets of Detroit Country Day. Both games will be at Renaissance as part of the PSL Kick-Off Showcase Classic. My name is Steven and this is a blog created to display and showcase the talent within the Detroit Public School Football League. Over the past few years, I have personally seen talented athletes dominate the field of play in high school but receive no recognition at the College level. Their athletic talents demonstrated on the field might take them places where they could continue their athletic careers and become leaders of tomorrow. Please feel free to voice your opinions on who should be given consideration to be part of the conversation on this blog. My ultimate goal is to get athletes noticed by top-tier programs locally and nationally. Lets hope that a Detroit Public School League team will follow the great season Cass Tech had last year and bring a state championship to the city of Detroit.
"""Apply a set of rolling filter to a data cubes This copy read complex filter dataset. """ import argparse import os from glob import glob from multiprocessing import Pool, Array, current_process import numpy as np import xarray as xr from opstats.foreground import apply_filter def do_mask(i): process = current_process().pid print('... P{:d}: applying filter {:s}' .format(process, filter_files[i].split('/')[-1])) filter_ds = xr.open_dataset(filter_files[i]) filter_array = filter_ds['real'].values + (1j * filter_ds['imag'].values) data_channels = filter_ds.attrs['frequency_channels'] image_channel = int(np.floor(filter_ds['real'].shape[0] / 2)) # Figure out FFT and filter normalization # FFT normalization factor x = filter_ds.attrs['x'] y = filter_ds.attrs['y'] f = filter_ds.attrs['f'] dx = x[1] - x[0] dy = y[1] - y[0] df = f[1] - f[0] u = filter_ds.attrs['u'] v = filter_ds.attrs['v'] e = filter_ds.attrs['e'] du = u[1] - u[0] dv = v[1] - v[0] de = e[1] - e[0] fft_norm = dx * dy * df ifft_norm = du * dv * de * filter_array.size # Filter normalization factor filter_volume = np.sum(filter_array.size * du * dv * de) filter_integral = np.sum(np.abs(filter_array) ** 2 * du * dv * de) filter_norm = np.sqrt(filter_volume / filter_integral) # Apply filter filtered_data = apply_filter( data_array[data_channels], filter_array, fft_multiplier=fft_norm, ifft_multiplier=ifft_norm, output_multiplier=filter_norm, apply_window_func=True, invert_filter=False ).real # Select and store the center channel of the filtered data array filtered_data_array[data_channels[image_channel]] = \ filtered_data[image_channel] if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('data_cube', type=str) parser.add_argument('filter_directory', type=str, help='filter directory containing *.nc filter files.') parser.add_argument('output_file', type=str) parser.add_argument('--nprocs', type=int, default=4) args = parser.parse_args() print('Data cube: {:s}'.format(args.data_cube)) print('Filter directory: {:s}'.format(args.filter_directory)) # Read input data cube data_da = xr.open_dataarray(args.data_cube) data_array = data_da.values # Create shared memory array to store filtered data cube filtered_data_array_base = Array('d', data_array.size) filtered_data_array = np.frombuffer(filtered_data_array_base.get_obj()) filtered_data_array.shape = data_array.shape # Read in list of filter files filter_files = glob('{:s}/*.nc'.format(args.filter_directory)) filter_files.sort() nbins = len(filter_files) # Attributes for output files # Temporary read in the first filter to retrive filter information ds0 = xr.open_dataset(filter_files[0]) filter_bandwidth = ds0.attrs['filter_bandwidth'] image_bandwidth = ds0.attrs['channel_bandwidth'] print('Filter bandwidth: {:.1f} Hz'.format(filter_bandwidth)) print('Image bandwidth: {:.1f} Hz'.format(image_bandwidth)) ds0.close() output_attrs = data_da.attrs extra_attrs = {'filter_type': 'wedge', 'extra_filter_shift': 'None', 'filter_bandwidth': filter_bandwidth, 'image_bandwidth': image_bandwidth} for key, value in extra_attrs.items(): output_attrs[key] = value # Check output directory output_dir = args.output_file.rsplit('/', 1)[0] os.makedirs(output_dir, exist_ok=True) # Start processing pool pool = Pool(args.nprocs) pool.map(do_mask, range(nbins)) pool.close() pool.join() # Save output da = xr.DataArray(filtered_data_array, dims=['f', 'y', 'x'], coords={'x': data_da.x.values, 'y': data_da.y.values, 'f': data_da.f.values}, attrs=output_attrs) da.to_netcdf(args.output_file) print('Saving out put to {:s}'.format(args.output_file))
Glamour, Exclusive and More Sensation Party. Optimization can be up to more than 12 different design view. Editable colors & Background. Quick and Easy to edit. Editable text layers.
from nca47.db import api as db_api from nca47.db.sqlalchemy.models import GPoolInfo as GPoolModel from nca47.objects import base from nca47.objects import fields as object_fields class DnsGPool(base.Nca47Object): VERSION = '1.0' fields = { 'tenant_id': object_fields.StringField(), 'name': object_fields.StringField(), 'enable': object_fields.StringField(), 'pass_': object_fields.StringField(), 'ttl': object_fields.StringField(), 'max_addr_ret': object_fields.StringField(), 'cname': object_fields.StringField(), 'first_algorithm': object_fields.StringField(), 'second_algorithm': object_fields.StringField(), 'fallback_ip': object_fields.StringField(), 'hms': object_fields.ListOfStringsField(), 'gmember_list': object_fields.ListOfStringsField(), 'warning': object_fields.StringField(), 'gpool_id': object_fields.StringField(), } def __init__(self, context=None, **kwargs): self.db_api = db_api.get_instance() super(DnsGPool, self).__init__(context=None, **kwargs) @staticmethod def __from_db_object(dns_gpool, db_dns_gpool): """ :param dns_syngroup: :param db_dns_syngroup: :return: """ for field in dns_gpool.fields: dns_gpool[field] = db_dns_gpool dns_gpool.obj_reset_changes() return dns_gpool def create(self, context, values): gpool = self.db_api.create(GPoolModel, values) return gpool def update(self, context, id, values): gpool = self.db_api.update_object(GPoolModel, id, values) return gpool def get_object(self, context, **values): gpool = self.db_api.get_object(GPoolModel, **values) return gpool # def get_objects(self, context, **values): # gpool = self.db_api.get_objects(GPoolModel, **values) # return gpool def delete(self, context, id): gpool = self.db_api.delete_object(GPoolModel, id) return gpool def get_objects(self, context, str_sql): gpool = self.db_api.get_all_object(GPoolModel, str_sql) return gpool
“How do you innovate in an industry that is centuries old? You find new approaches to doing critical work in radically different ways.” Vinodh Swaminathan, Head of KPMG’s Cognitive Labs and Principal, Intelligent Automation, Cognitive and AI. The power and potential of intelligent automation create new ways of thinking about work at the intersection of cloud, data, cognitive and other automation technologies. KPMG is applying the power of Artificial Intelligence (AI) and cognitive systems to audit, tax, and advisory areas. With technologies like IBM Watson, KPMG is innovating to further empower our employees to work smarter, gain insights faster, sustain execution of high quality audits, and further enhance the value of the services we deliver to tax and advisory clients. KPMG has been recently featured in an IBM Watson ad campaign about transformation journeys. In complex areas like audit, tax, and advisory, KPMG seeks to implement a variety of innovative tools, including cognitive automation to make our work even more impactful and valuable. Click here for more information on KPMG’s work with Watson.
import gi gi.require_version('Gtk', '3.0') from gi.repository import Gtk class PyApp(Gtk.Window): def __init__(self): super(PyApp, self).__init__() self.set_title('Menu') self.set_position(Gtk.WindowPosition.CENTER_ALWAYS) self.set_size_request(250, 150) accel_group = Gtk.AccelGroup() self.add_accel_group(accel_group) vbox = Gtk.VBox() menubar = Gtk.MenuBar() vbox.pack_start(menubar, False, False, 0) self.label = Gtk.Label(label='Activate a menu item') vbox.pack_start(self.label, True, True, 0) menu_file = Gtk.Menu() item_file = Gtk.MenuItem.new_with_mnemonic('_File') item_file.set_submenu(menu_file) menubar.append(item_file) item_new = Gtk.MenuItem.new_with_mnemonic('_New') key, mod = Gtk.accelerator_parse('<Ctrl>N') item_new.add_accelerator('activate', accel_group, key, mod, Gtk.AccelFlags.VISIBLE) item_new.connect('activate', self._activate_cb, 'New') menu_file.append(item_new) item_open = Gtk.MenuItem.new_with_mnemonic('_Open') key, mod = Gtk.accelerator_parse('<Ctrl>O') item_open.add_accelerator('activate', accel_group, key, mod, Gtk.AccelFlags.VISIBLE) item_open.connect('activate', self._activate_cb, 'Open') menu_file.append(item_open) menu_recents = Gtk.Menu() item_recents = Gtk.MenuItem.new_with_mnemonic('Open _recents') item_recents.set_submenu(menu_recents) menu_file.append(item_recents) for recent_file in range(1, 6): item_recent = Gtk.MenuItem.new_with_mnemonic('_%d: Recent file %d' % (recent_file, recent_file)) item_recent.connect('activate', self._activate_cb, 'Recent file %d' % recent_file) menu_recents.append(item_recent) separator = Gtk.SeparatorMenuItem() menu_file.append(separator) item_exit = Gtk.MenuItem.new_with_mnemonic('_Quit') key, mod = Gtk.accelerator_parse('<Ctrl>Q') item_exit.add_accelerator('activate', accel_group, key, mod, Gtk.AccelFlags.VISIBLE) item_exit.connect('activate', self._activate_cb, 'Quit') menu_file.append(item_exit) menu_edit = Gtk.Menu() item_edit = Gtk.MenuItem.new_with_mnemonic('_Edit') item_edit.set_submenu(menu_edit) menubar.append(item_edit) item_undo = Gtk.ImageMenuItem.new_from_stock(Gtk.STOCK_UNDO, None) key, mod = Gtk.accelerator_parse('<Ctrl>Z') item_undo.add_accelerator('activate', accel_group, key, mod, Gtk.AccelFlags.VISIBLE) item_undo.connect('activate', self._activate_cb, 'Undo') menu_edit.append(item_undo) item_redo = Gtk.ImageMenuItem.new_from_stock(Gtk.STOCK_REDO, None) key, mod = Gtk.accelerator_parse('<Ctrl><Shift>Z') item_redo.add_accelerator('activate', accel_group, key, mod, Gtk.AccelFlags.VISIBLE) item_redo.connect('activate', self._activate_cb, 'Redo') menu_edit.append(item_redo) separator = Gtk.SeparatorMenuItem() menu_edit.append(separator) item_copy = Gtk.ImageMenuItem.new_from_stock(Gtk.STOCK_COPY, None) key, mod = Gtk.accelerator_parse('<Ctrl>C') item_copy.add_accelerator('activate', accel_group, key, mod, Gtk.AccelFlags.VISIBLE) item_copy.connect('activate', self._activate_cb, 'Copy') menu_edit.append(item_copy) item_cut = Gtk.ImageMenuItem.new_from_stock(Gtk.STOCK_CUT, None) key, mod = Gtk.accelerator_parse('<Ctrl>X') item_cut.add_accelerator('activate', accel_group, key, mod, Gtk.AccelFlags.VISIBLE) item_cut.connect('activate', self._activate_cb, 'Cut') menu_edit.append(item_cut) item_paste = Gtk.ImageMenuItem.new_from_stock(Gtk.STOCK_PASTE, None) key, mod = Gtk.accelerator_parse('<Ctrl>V') item_paste.add_accelerator('activate', accel_group, key, mod, Gtk.AccelFlags.VISIBLE) item_paste.connect('activate', self._activate_cb, 'Paste') menu_edit.append(item_paste) separator = Gtk.SeparatorMenuItem() menu_edit.append(separator) label = 'Vertical page' item_vertical = Gtk.RadioMenuItem(label=label) item_vertical.set_active(True) item_vertical.connect('toggled', self._toggled_cb, label) menu_edit.append(item_vertical) label = 'Horizontal page' item_horizontal = Gtk.RadioMenuItem.new_with_label((item_vertical,), label) item_horizontal.connect('toggled', self._toggled_cb, label) menu_edit.append(item_horizontal) menu_view = Gtk.Menu() item_view = Gtk.MenuItem.new_with_mnemonic('_View') item_view.set_submenu(menu_view) menubar.append(item_view) item_hides = Gtk.CheckMenuItem.new_with_mnemonic('_Hidden files') key, mod = Gtk.accelerator_parse('<Ctrl>H') item_hides.add_accelerator('activate', accel_group, key, mod, Gtk.AccelFlags.VISIBLE) item_hides.connect('toggled', self._toggled_cb, 'Hidden files', True) menu_view.append(item_hides) menu_help = Gtk.Menu() item_help = Gtk.MenuItem(label='Ayuda') item_help.set_submenu(menu_help) menubar.append(item_help) item_about = Gtk.ImageMenuItem.new_from_stock(Gtk.STOCK_ABOUT, None) item_about.add_accelerator('activate', accel_group, key, mod, Gtk.AccelFlags.VISIBLE) item_about.connect('activate', self._activate_cb, 'About') menu_help.append(item_about) self.add(vbox) self.show_all() self.connect('destroy', Gtk.main_quit) def _activate_cb(self, item, label): self.label.set_text('You activated %s item' % label) def _toggled_cb(self, item, label, no_active=False): if item.get_active(): self.label.set_text('You activated %s item' % label) elif not item.get_active() and no_active: self.label.set_text('You deactivate %s item' % label) PyApp() Gtk.main()
David has more than eight years of experience providing accounting and assurance service to clients across a wide variety of industries. He has served as the auditor in-charge of governmental, manufacturing and distribution, real estate and not-for-profit audits. He has been involved in assessing entity, audit and business risks. David focuses on providing recommendations to his clients on how to improve their internal controls and processes. He also provides clients with timely audit deliverables, including financial statement disclosure and various other assurance activities. David and his wife, Kelly, have been married for 21 years. They have two children, Alex and Emily Lauren. In his free time, David enjoys fishing, travel baseball, and watching sports.
#!/usr/bin/env python # -*- coding: utf-8 -*- # vim: set et ai sta sts=2 sw=2 ts=2 tw=0: """ SaLT functions: - getSaLTVersion - isSaLTVersionAtLeast - isSaLTLiveEnv - isSaLTLiveCloneEnv - getSaLTLiveMountPoint - getSaLTRootDir - getSaLTIdentFile - getSaLTBaseDir - listSaLTModules - installSaLTModule """ from __future__ import print_function, unicode_literals, absolute_import from .freesize import * import os import glob import re import subprocess from threading import Thread from time import sleep def getSaLTVersion(): """ Returns the SaLT version if run in a SaLT Live environment """ _checkLive() return open('/mnt/salt/salt-version', 'r').read().strip() def isSaLTVersionAtLeast(version): """ Returns True if the SaLT version is at least 'version'. """ v = getSaLTVersion() def vercmp(v1, v2): def _makelist(v): lst = [int(x) for x in re.sub(r'[a-z]', '', v.lower()).split('.')] while lst[-1] == 0: lst.pop() return lst return _makelist(v1).__ge__(_makelist(v2)) return vercmp(version, v) def isSaLTLiveEnv(): """ Returns True if it is executed in a SaLT Live environment, False otherwise """ return os.path.isfile('/mnt/salt/salt-version') and os.path.isfile('/mnt/salt/tmp/distro_infos') def _checkLive(): if not isSaLTLiveEnv(): raise Exception('Not in SaLT Live environment.') def isSaLTLiveCloneEnv(): """ Returns True if it is executed in a SaLT LiveClone environment, False otherwise """ if not isSaLTLiveEnv(): return False else: moduledir = '{0}/{1}/{2}/modules'.format(getSaLTLiveMountPoint(), getSaLTBaseDir(), getSaLTRootDir()) return os.path.isfile(moduledir + '/01-clone.salt') def getSaLTLiveMountPoint(): """ Returns the SaLT source mount point path. It could be the mount point of the optical drive or the USB stick for example. """ _checkLive() try: # format: # mountpoint:device ret = open('/mnt/salt/tmp/distro_infos', 'r').read().splitlines()[0].split(':', 1)[0] except: ret = None return "/mnt/salt{0}".format(ret) def getSaLTRootDir(): """ Returns the SaLT ROOT_DIR, which is the directory containing SaLT modules. This is not the full path but a relative path to BASEDIR. """ _checkLive() ret = None for line in open('/mnt/salt/etc/salt.cfg', 'r').read().splitlines(): if line.startswith('ROOT_DIR='): ret = line.split('=', 1)[1] break return ret def getSaLTIdentFile(): """ Returns the SaLT IDENT_FILE, which is the file located at the root of a filesystem containing some SaLT information for this Live session. This is not the full path but a relative path to the mount point. """ _checkLive() ret = None for line in open('/mnt/salt/etc/salt.cfg', 'r').read().splitlines(): if line.startswith('IDENT_FILE='): ret = line.split('=', 1)[1] break return ret def getSaLTBaseDir(): """ Returns the SaLT BASEDIR, which is the directory containing all files for this Live session. This is not a full path but a relative path to the mount point. """ _checkLive() mountpoint = getSaLTLiveMountPoint() identfile = getSaLTIdentFile() ret = None if mountpoint and identfile: for line in open('{0}/{1}'.format(mountpoint, identfile), 'r').read().splitlines(): if line.startswith('basedir='): ret = line.split('=', 1)[1] break if ret is not None and len(ret) == 0: ret = '.' # for not having empty path. GNU is ok having a path like a/b//c/d but it's preferable to have a/b/./c/d if possible return ret def listSaLTModules(): """ Returns the list of SaLT modules for this Live session. """ _checkLive() moduledir = '{0}/{1}/{2}/modules'.format(getSaLTLiveMountPoint(), getSaLTBaseDir(), getSaLTRootDir()) return sorted(map(lambda(x): re.sub(r'.*/([^/]+).salt$', r'\1', x), glob.glob('{0}/*.salt'.format(moduledir)))) def getSaLTModulePath(moduleName): """ Get the module full path. """ return '/mnt/salt/mnt/modules/{0}'.format(moduleName) def installSaLTModule(moduleName, moduleSize, targetMountPoint, callback, callback_args=(), interval = 10, completeCallback = None): """ Install the module 'moduleName' from this Live session into the targetMountPoint. 'moduleSize' is the uncompressed size of the module expressed in bytes. The 'callback' function will be called each 'interval' seconds with the pourcentage (0 ≤ x ≤ 1) of progression (based on used size of target partition) as first argument and all value of callback_args as next arguments The 'completeCallback' function will be called after the completion of installation. """ _checkLive() src = getSaLTModulePath(moduleName) if not os.path.isdir(src): raise IOError("The module '{0}' does not exists".format(moduleName)) if not os.path.isdir(targetMountPoint): raise IOError("The target mount point '{0}' does not exists".format(targetMountPoint)) def get_used_size(p): return getSizes(p, False)['used'] class ExecCopyTask: def _run(self, *args, **kwargs): cmd = args[0] self._p = subprocess.Popen(cmd) self._p.wait() def start(self, cmd): self._t = Thread(target=self._run, args=(cmd,)) self._t.start() def is_running(self): return self._t and self._t.is_alive() def stop(self): if self._p: self._p.kill() self._p = None init_size = get_used_size(targetMountPoint) actual_size = init_size t = ExecCopyTask() t.start(['cp', '--preserve', '-r', '-f', '--remove-destination', '{0}/.'.format(src), targetMountPoint + '/']) while t.is_running(): for x in range(interval): sleep(1) if not t.is_running(): break if t.is_running(): actual_size = get_used_size(targetMountPoint) diff_size = float(actual_size - init_size) if diff_size < 0: # is this possible? diff_size = 0 p = diff_size / moduleSize if p > 1: p = 1 if not callback(p, *callback_args): t.stop() if completeCallback: completeCallback()
AUTO.... All vehicles receive a mechanical inspection. Finance is available to approved purchasers.The vehicle can be found North of the River. We have over 80 vehicles in stock. Ute's , 4x4's, commercials & passenger cars. TRADE IN'S WELCOME.
# SPDX-License-Identifier: Apache-2.0 # # Copyright (C) 2015, ARM Limited and contributors. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Trace Parser Module """ import numpy as np import os import pandas as pd import sys import trappy import json import warnings import operator import logging from analysis_register import AnalysisRegister from collections import namedtuple from devlib.utils.misc import memoized from trappy.utils import listify, handle_duplicate_index NON_IDLE_STATE = -1 ResidencyTime = namedtuple('ResidencyTime', ['total', 'active']) ResidencyData = namedtuple('ResidencyData', ['label', 'residency']) class Trace(object): """ The Trace object is the LISA trace events parser. :param platform: a dictionary containing information about the target platform :type platform: dict :param data_dir: folder containing all trace data :type data_dir: str :param events: events to be parsed (everything in the trace by default) :type events: list(str) :param window: time window to consider when parsing the trace :type window: tuple(int, int) :param normalize_time: normalize trace time stamps :type normalize_time: bool :param trace_format: format of the trace. Possible values are: - FTrace - SysTrace :type trace_format: str :param plots_dir: directory where to save plots :type plots_dir: str :param plots_prefix: prefix for plots file names :type plots_prefix: str """ def __init__(self, platform, data_dir, events, window=(0, None), normalize_time=True, trace_format='FTrace', plots_dir=None, plots_prefix=''): # The platform used to run the experiments self.platform = platform # TRAPpy Trace object self.ftrace = None # Trace format self.trace_format = trace_format # The time window used to limit trace parsing to self.window = window # Dynamically registered TRAPpy events self.trappy_cls = {} # Maximum timespan for all collected events self.time_range = 0 # Time the system was overutilzied self.overutilized_time = 0 self.overutilized_prc = 0 # List of events required by user self.events = [] # List of events available in the parsed trace self.available_events = [] # Cluster frequency coherency flag self.freq_coherency = True # Folder containing all trace data self.data_dir = None # Setup logging self._log = logging.getLogger('Trace') # Folder containing trace if not os.path.isdir(data_dir): self.data_dir = os.path.dirname(data_dir) else: self.data_dir = data_dir # By deafult, use the trace dir to save plots self.plots_dir = plots_dir if self.plots_dir is None: self.plots_dir = self.data_dir self.plots_prefix = plots_prefix self.__registerTraceEvents(events) self.__parseTrace(data_dir, window, normalize_time, trace_format) self.__computeTimeSpan() # Minimum and Maximum x_time to use for all plots self.x_min = 0 self.x_max = self.time_range # Reset x axis time range to full scale t_min = self.window[0] t_max = self.window[1] self.setXTimeRange(t_min, t_max) self.data_frame = TraceData() self._registerDataFrameGetters(self) self.analysis = AnalysisRegister(self) def _registerDataFrameGetters(self, module): """ Internal utility function that looks up getter functions with a "_dfg_" prefix in their name and bounds them to the specified module. :param module: module to which the function is added :type module: class """ self._log.debug('Registering [%s] local data frames', module) for func in dir(module): if not func.startswith('_dfg_'): continue dfg_name = func.replace('_dfg_', '') dfg_func = getattr(module, func) self._log.debug(' %s', dfg_name) setattr(self.data_frame, dfg_name, dfg_func) def setXTimeRange(self, t_min=None, t_max=None): """ Set x axis time range to the specified values. :param t_min: lower bound :type t_min: int or float :param t_max: upper bound :type t_max: int or float """ if t_min is None: self.x_min = 0 else: self.x_min = t_min if t_max is None: self.x_max = self.time_range else: self.x_max = t_max self._log.debug('Set plots time range to (%.6f, %.6f)[s]', self.x_min, self.x_max) def __registerTraceEvents(self, events): """ Save a copy of the parsed events. :param events: single event name or list of events names :type events: str or list(str) """ if isinstance(events, basestring): self.events = events.split(' ') elif isinstance(events, list): self.events = events else: raise ValueError('Events must be a string or a list of strings') # Register devlib fake cpu_frequency events if 'cpu_frequency' in events: self.events.append('cpu_frequency_devlib') def __parseTrace(self, path, window, normalize_time, trace_format): """ Internal method in charge of performing the actual parsing of the trace. :param path: path to the trace folder (or trace file) :type path: str :param window: time window to consider when parsing the trace :type window: tuple(int, int) :param normalize_time: normalize trace time stamps :type normalize_time: bool :param trace_format: format of the trace. Possible values are: - FTrace - SysTrace :type trace_format: str """ self._log.debug('Loading [sched] events from trace in [%s]...', path) self._log.debug('Parsing events: %s', self.events) if trace_format.upper() == 'SYSTRACE' or path.endswith('html'): self._log.debug('Parsing SysTrace format...') trace_class = trappy.SysTrace self.trace_format = 'SysTrace' elif trace_format.upper() == 'FTRACE': self._log.debug('Parsing FTrace format...') trace_class = trappy.FTrace self.trace_format = 'FTrace' else: raise ValueError("Unknown trace format {}".format(trace_format)) self.ftrace = trace_class(path, scope="custom", events=self.events, window=window, normalize_time=normalize_time) # Load Functions profiling data has_function_stats = self._loadFunctionsStats(path) # Check for events available on the parsed trace self.__checkAvailableEvents() if len(self.available_events) == 0: if has_function_stats: self._log.info('Trace contains only functions stats') return raise ValueError('The trace does not contain useful events ' 'nor function stats') # Index PIDs and Task names self.__loadTasksNames() # Setup internal data reference to interesting events/dataframes self._sanitize_SchedLoadAvgCpu() self._sanitize_SchedLoadAvgTask() self._sanitize_SchedCpuCapacity() self._sanitize_SchedBoostCpu() self._sanitize_SchedBoostTask() self._sanitize_SchedEnergyDiff() self._sanitize_SchedOverutilized() self._sanitize_CpuFrequency() # Compute plot window if not normalize_time: start = self.window[0] if self.window[1]: duration = min(self.ftrace.get_duration(), self.window[1]) else: duration = self.ftrace.get_duration() self.window = (self.ftrace.basetime + start, self.ftrace.basetime + duration) def __checkAvailableEvents(self, key=""): """ Internal method used to build a list of available events. :param key: key to be used for TRAPpy filtering :type key: str """ for val in self.ftrace.get_filters(key): obj = getattr(self.ftrace, val) if len(obj.data_frame): self.available_events.append(val) self._log.debug('Events found on trace:') for evt in self.available_events: self._log.debug(' - %s', evt) def __loadTasksNames(self): """ Try to load tasks names using one of the supported events. """ def load(event, name_key, pid_key): df = self._dfg_trace_event(event) self._scanTasks(df, name_key=name_key, pid_key=pid_key) if 'sched_switch' in self.available_events: load('sched_switch', 'prev_comm', 'prev_pid') return if 'sched_load_avg_task' in self.available_events: load('sched_load_avg_task', 'comm', 'pid') return self._log.warning('Failed to load tasks names from trace events') def hasEvents(self, dataset): """ Returns True if the specified event is present in the parsed trace, False otherwise. :param dataset: trace event name or list of trace events :type dataset: str or list(str) """ if dataset in self.available_events: return True return False def __computeTimeSpan(self): """ Compute time axis range, considering all the parsed events. """ ts = sys.maxint te = 0 for events in self.available_events: df = self._dfg_trace_event(events) if len(df) == 0: continue if (df.index[0]) < ts: ts = df.index[0] if (df.index[-1]) > te: te = df.index[-1] self.time_range = te - ts self._log.debug('Collected events spans a %.3f [s] time interval', self.time_range) # Build a stat on trace overutilization if self.hasEvents('sched_overutilized'): df = self._dfg_trace_event('sched_overutilized') self.overutilized_time = df[df.overutilized == 1].len.sum() self.overutilized_prc = 100. * self.overutilized_time / self.time_range self._log.debug('Overutilized time: %.6f [s] (%.3f%% of trace time)', self.overutilized_time, self.overutilized_prc) def _scanTasks(self, df, name_key='comm', pid_key='pid'): """ Extract tasks names and PIDs from the input data frame. The data frame should contain a task name column and PID column. :param df: data frame containing trace events from which tasks names and PIDs will be extracted :type df: :mod:`pandas.DataFrame` :param name_key: The name of the dataframe columns containing task names :type name_key: str :param pid_key: The name of the dataframe columns containing task PIDs :type pid_key: str """ df = df[[name_key, pid_key]] self._tasks_by_pid = (df.drop_duplicates(subset=pid_key, keep='last') .rename(columns={ pid_key : 'PID', name_key : 'TaskName'}) .set_index('PID').sort_index()) def getTaskByName(self, name): """ Get the PIDs of all tasks with the specified name. The same PID can have different task names, mainly because once a task is generated it inherits the parent name and then its name is updated to represent what the task really is. This API works under the assumption that a task name is updated at most one time and it always considers the name a task had the last time it has been scheduled for execution in the current trace. :param name: task name :type name: str :return: a list of PID for tasks which name matches the required one, the last time they ran in the current trace """ return (self._tasks_by_pid[self._tasks_by_pid.TaskName == name] .index.tolist()) def getTaskByPid(self, pid): """ Get the name of the task with the specified PID. The same PID can have different task names, mainly because once a task is generated it inherits the parent name and then its name is updated to represent what the task really is. This API works under the assumption that a task name is updated at most one time and it always report the name the task had the last time it has been scheduled for execution in the current trace. :param name: task PID :type name: int :return: the name of the task which PID matches the required one, the last time they ran in the current trace """ try: return self._tasks_by_pid.ix[pid].values[0] except KeyError: return None def getTasks(self): """ Get a dictionary of all the tasks in the Trace. :return: a dictionary which maps each PID to the corresponding task name """ return self._tasks_by_pid.TaskName.to_dict() ############################################################################### # DataFrame Getter Methods ############################################################################### def df(self, event): """ Get a dataframe containing all occurrences of the specified trace event in the parsed trace. :param event: Trace event name :type event: str """ warnings.simplefilter('always', DeprecationWarning) #turn off filter warnings.warn("\n\tUse of Trace::df() is deprecated and will be soon removed." "\n\tUse Trace::data_frame.trace_event(event_name) instead.", category=DeprecationWarning) warnings.simplefilter('default', DeprecationWarning) #reset filter return self._dfg_trace_event(event) def _dfg_trace_event(self, event): """ Get a dataframe containing all occurrences of the specified trace event in the parsed trace. :param event: Trace event name :type event: str """ if self.data_dir is None: raise ValueError("trace data not (yet) loaded") if self.ftrace and hasattr(self.ftrace, event): return getattr(self.ftrace, event).data_frame raise ValueError('Event [{}] not supported. ' 'Supported events are: {}' .format(event, self.available_events)) def _dfg_functions_stats(self, functions=None): """ Get a DataFrame of specified kernel functions profile data For each profiled function a DataFrame is returned which reports stats on kernel functions execution time. The reported stats are per-CPU and includes: number of times the function has been executed (hits), average execution time (avg), overall execution time (time) and samples variance (s_2). By default returns a DataFrame of all the functions profiled. :param functions: the name of the function or a list of function names to report :type functions: str or list(str) """ if not hasattr(self, '_functions_stats_df'): return None df = self._functions_stats_df if not functions: return df return df.loc[df.index.get_level_values(1).isin(listify(functions))] ############################################################################### # Trace Events Sanitize Methods ############################################################################### def _sanitize_SchedCpuCapacity(self): """ Add more columns to cpu_capacity data frame if the energy model is available. """ if not self.hasEvents('cpu_capacity') \ or 'nrg_model' not in self.platform: return df = self._dfg_trace_event('cpu_capacity') # Add column with LITTLE and big CPUs max capacities nrg_model = self.platform['nrg_model'] max_lcap = nrg_model['little']['cpu']['cap_max'] max_bcap = nrg_model['big']['cpu']['cap_max'] df['max_capacity'] = np.select( [df.cpu.isin(self.platform['clusters']['little'])], [max_lcap], max_bcap) # Add LITTLE and big CPUs "tipping point" threshold tip_lcap = 0.8 * max_lcap tip_bcap = 0.8 * max_bcap df['tip_capacity'] = np.select( [df.cpu.isin(self.platform['clusters']['little'])], [tip_lcap], tip_bcap) def _sanitize_SchedLoadAvgCpu(self): """ If necessary, rename certain signal names from v5.0 to v5.1 format. """ if not self.hasEvents('sched_load_avg_cpu'): return df = self._dfg_trace_event('sched_load_avg_cpu') if 'utilization' in df: df.rename(columns={'utilization': 'util_avg'}, inplace=True) df.rename(columns={'load': 'load_avg'}, inplace=True) def _sanitize_SchedLoadAvgTask(self): """ If necessary, rename certain signal names from v5.0 to v5.1 format. """ if not self.hasEvents('sched_load_avg_task'): return df = self._dfg_trace_event('sched_load_avg_task') if 'utilization' in df: df.rename(columns={'utilization': 'util_avg'}, inplace=True) df.rename(columns={'load': 'load_avg'}, inplace=True) df.rename(columns={'avg_period': 'period_contrib'}, inplace=True) df.rename(columns={'runnable_avg_sum': 'load_sum'}, inplace=True) df.rename(columns={'running_avg_sum': 'util_sum'}, inplace=True) df['cluster'] = np.select( [df.cpu.isin(self.platform['clusters']['little'])], ['LITTLE'], 'big') # Add a column which represents the max capacity of the smallest # clustre which can accomodate the task utilization little_cap = self.platform['nrg_model']['little']['cpu']['cap_max'] big_cap = self.platform['nrg_model']['big']['cpu']['cap_max'] df['min_cluster_cap'] = df.util_avg.map( lambda util_avg: big_cap if util_avg > little_cap else little_cap ) def _sanitize_SchedBoostCpu(self): """ Add a boosted utilization signal as the sum of utilization and margin. Also, if necessary, rename certain signal names from v5.0 to v5.1 format. """ if not self.hasEvents('sched_boost_cpu'): return df = self._dfg_trace_event('sched_boost_cpu') if 'usage' in df: df.rename(columns={'usage': 'util'}, inplace=True) df['boosted_util'] = df['util'] + df['margin'] def _sanitize_SchedBoostTask(self): """ Add a boosted utilization signal as the sum of utilization and margin. Also, if necessary, rename certain signal names from v5.0 to v5.1 format. """ if not self.hasEvents('sched_boost_task'): return df = self._dfg_trace_event('sched_boost_task') if 'utilization' in df: # Convert signals name from to v5.1 format df.rename(columns={'utilization': 'util'}, inplace=True) df['boosted_util'] = df['util'] + df['margin'] def _sanitize_SchedEnergyDiff(self): """ If a energy model is provided, some signals are added to the sched_energy_diff trace event data frame. Also convert between existing field name formats for sched_energy_diff """ if not self.hasEvents('sched_energy_diff') \ or 'nrg_model' not in self.platform: return nrg_model = self.platform['nrg_model'] em_lcluster = nrg_model['little']['cluster'] em_bcluster = nrg_model['big']['cluster'] em_lcpu = nrg_model['little']['cpu'] em_bcpu = nrg_model['big']['cpu'] lcpus = len(self.platform['clusters']['little']) bcpus = len(self.platform['clusters']['big']) SCHED_LOAD_SCALE = 1024 power_max = em_lcpu['nrg_max'] * lcpus + em_bcpu['nrg_max'] * bcpus + \ em_lcluster['nrg_max'] + em_bcluster['nrg_max'] self._log.debug( "Maximum estimated system energy: {0:d}".format(power_max)) df = self._dfg_trace_event('sched_energy_diff') translations = {'nrg_d' : 'nrg_diff', 'utl_d' : 'usage_delta', 'payoff' : 'nrg_payoff' } df.rename(columns=translations, inplace=True) df['nrg_diff_pct'] = SCHED_LOAD_SCALE * df.nrg_diff / power_max # Tag columns by usage_delta ccol = df.usage_delta df['usage_delta_group'] = np.select( [ccol < 150, ccol < 400, ccol < 600], ['< 150', '< 400', '< 600'], '>= 600') # Tag columns by nrg_payoff ccol = df.nrg_payoff df['nrg_payoff_group'] = np.select( [ccol > 2e9, ccol > 0, ccol > -2e9], ['Optimal Accept', 'SchedTune Accept', 'SchedTune Reject'], 'Suboptimal Reject') def _sanitize_SchedOverutilized(self): """ Add a column with overutilized status duration. """ if not self.hasEvents('sched_overutilized'): return df = self._dfg_trace_event('sched_overutilized') df['start'] = df.index df['len'] = (df.start - df.start.shift()).fillna(0).shift(-1) df.drop('start', axis=1, inplace=True) def _chunker(self, seq, size): """ Given a data frame or a series, generate a sequence of chunks of the given size. :param seq: data to be split into chunks :type seq: :mod:`pandas.Series` or :mod:`pandas.DataFrame` :param size: size of each chunk :type size: int """ return (seq.iloc[pos:pos + size] for pos in range(0, len(seq), size)) def _sanitize_CpuFrequency(self): """ Verify that all platform reported clusters are frequency coherent (i.e. frequency scaling is performed at a cluster level). """ if not self.hasEvents('cpu_frequency_devlib'): return devlib_freq = self._dfg_trace_event('cpu_frequency_devlib') devlib_freq.rename(columns={'cpu_id':'cpu'}, inplace=True) devlib_freq.rename(columns={'state':'frequency'}, inplace=True) df = self._dfg_trace_event('cpu_frequency') clusters = self.platform['clusters'] # devlib always introduces fake cpu_frequency events, in case the # OS has not generated cpu_frequency envets there are the only # frequency events to report if len(df) == 0: # Register devlib injected events as 'cpu_frequency' events setattr(self.ftrace.cpu_frequency, 'data_frame', devlib_freq) df = devlib_freq self.available_events.append('cpu_frequency') # make sure fake cpu_frequency events are never interleaved with # OS generated events else: if len(devlib_freq) > 0: # Frequencies injection is done in a per-cluster based. # This is based on the assumption that clusters are # frequency choerent. # For each cluster we inject devlib events only if # these events does not overlaps with os-generated ones. # Inject "initial" devlib frequencies os_df = df dl_df = devlib_freq.iloc[:self.platform['cpus_count']] for _,c in self.platform['clusters'].iteritems(): dl_freqs = dl_df[dl_df.cpu.isin(c)] os_freqs = os_df[os_df.cpu.isin(c)] self._log.debug("First freqs for %s:\n%s", c, dl_freqs) # All devlib events "before" os-generated events self._log.debug("Min os freq @: %s", os_freqs.index.min()) if os_freqs.empty or \ os_freqs.index.min() > dl_freqs.index.max(): self._log.debug("Insert devlib freqs for %s", c) df = pd.concat([dl_freqs, df]) # Inject "final" devlib frequencies os_df = df dl_df = devlib_freq.iloc[self.platform['cpus_count']:] for _,c in self.platform['clusters'].iteritems(): dl_freqs = dl_df[dl_df.cpu.isin(c)] os_freqs = os_df[os_df.cpu.isin(c)] self._log.debug("Last freqs for %s:\n%s", c, dl_freqs) # All devlib events "after" os-generated events self._log.debug("Max os freq @: %s", os_freqs.index.max()) if os_freqs.empty or \ os_freqs.index.max() < dl_freqs.index.min(): self._log.debug("Append devlib freqs for %s", c) df = pd.concat([df, dl_freqs]) df.sort_index(inplace=True) setattr(self.ftrace.cpu_frequency, 'data_frame', df) # Frequency Coherency Check for _, cpus in clusters.iteritems(): cluster_df = df[df.cpu.isin(cpus)] for chunk in self._chunker(cluster_df, len(cpus)): f = chunk.iloc[0].frequency if any(chunk.frequency != f): self._log.warning('Cluster Frequency is not coherent! ' 'Failure in [cpu_frequency] events at:') self._log.warning(chunk) self.freq_coherency = False return self._log.info('Platform clusters verified to be Frequency coherent') ############################################################################### # Utility Methods ############################################################################### def integrate_square_wave(self, sq_wave): """ Compute the integral of a square wave time series. :param sq_wave: square wave assuming only 1.0 and 0.0 values :type sq_wave: :mod:`pandas.Series` """ sq_wave.iloc[-1] = 0.0 # Compact signal to obtain only 1-0-1-0 sequences comp_sig = sq_wave.loc[sq_wave.shift() != sq_wave] # First value for computing the difference must be a 1 if comp_sig.iloc[0] == 0.0: return sum(comp_sig.iloc[2::2].index - comp_sig.iloc[1:-1:2].index) else: return sum(comp_sig.iloc[1::2].index - comp_sig.iloc[:-1:2].index) def _loadFunctionsStats(self, path='trace.stats'): """ Read functions profiling file and build a data frame containing all relevant data. :param path: path to the functions profiling trace file :type path: str """ if os.path.isdir(path): path = os.path.join(path, 'trace.stats') if (path.endswith('dat') or path.endswith('txt') or path.endswith('html')): pre, ext = os.path.splitext(path) path = pre + '.stats' if not os.path.isfile(path): return False # Opening functions profiling JSON data file self._log.debug('Loading functions profiling data from [%s]...', path) with open(os.path.join(path), 'r') as fh: trace_stats = json.load(fh) # Build DataFrame of function stats frames = {} for cpu, data in trace_stats.iteritems(): frames[int(cpu)] = pd.DataFrame.from_dict(data, orient='index') # Build and keep track of the DataFrame self._functions_stats_df = pd.concat(frames.values(), keys=frames.keys()) return len(self._functions_stats_df) > 0 @memoized def getCPUActiveSignal(self, cpu): """ Build a square wave representing the active (i.e. non-idle) CPU time, i.e.: cpu_active[t] == 1 if the CPU is reported to be non-idle by cpuidle at time t cpu_active[t] == 0 otherwise :param cpu: CPU ID :type cpu: int :returns: A :mod:`pandas.Series` or ``None`` if the trace contains no "cpu_idle" events """ if not self.hasEvents('cpu_idle'): self._log.warning('Events [cpu_idle] not found, ' 'cannot compute CPU active signal!') return None idle_df = self._dfg_trace_event('cpu_idle') cpu_df = idle_df[idle_df.cpu_id == cpu] cpu_active = cpu_df.state.apply( lambda s: 1 if s == NON_IDLE_STATE else 0 ) start_time = 0.0 if not self.ftrace.normalized_time: start_time = self.ftrace.basetime if cpu_active.empty: cpu_active = pd.Series([0], index=[start_time]) elif cpu_active.index[0] != start_time: entry_0 = pd.Series(cpu_active.iloc[0] ^ 1, index=[start_time]) cpu_active = pd.concat([entry_0, cpu_active]) # Fix sequences of wakeup/sleep events reported with the same index return handle_duplicate_index(cpu_active) @memoized def getClusterActiveSignal(self, cluster): """ Build a square wave representing the active (i.e. non-idle) cluster time, i.e.: cluster_active[t] == 1 if at least one CPU is reported to be non-idle by CPUFreq at time t cluster_active[t] == 0 otherwise :param cluster: list of CPU IDs belonging to a cluster :type cluster: list(int) :returns: A :mod:`pandas.Series` or ``None`` if the trace contains no "cpu_idle" events """ if not self.hasEvents('cpu_idle'): self._log.warning('Events [cpu_idle] not found, ' 'cannot compute cluster active signal!') return None active = self.getCPUActiveSignal(cluster[0]).to_frame(name=cluster[0]) for cpu in cluster[1:]: active = active.join( self.getCPUActiveSignal(cpu).to_frame(name=cpu), how='outer' ) active.fillna(method='ffill', inplace=True) # Cluster active is the OR between the actives on each CPU # belonging to that specific cluster cluster_active = reduce( operator.or_, [cpu_active.astype(int) for _, cpu_active in active.iteritems()] ) return cluster_active class TraceData: """ A DataFrame collector exposed to Trace's clients """ pass # vim :set tabstop=4 shiftwidth=4 expandtab
This is the complete set from Phil Circle's performance at Chicago's Uncommon Ground Lakeview on October 28th, 2018. Following Phil was his friend Dave Arcari, on tour from Scotland. This audio was pulled from the Facebook live stream provided by a member of the audience. This show is currently being edited and split into tracks for a live release.
from django.contrib import messages from django.contrib.auth.models import User from django.core.urlresolvers import reverse from django.contrib.auth.decorators import login_required from django.shortcuts import render, redirect, get_object_or_404 from django.views.decorators.http import require_POST from ..question.models import Question, Answer from .models import Follow from .forms import QuestionForm from .helpers import get_total_likes # @login_required # def my_profile(request): # unanswered_questions = Question.objects.filter( # asked_to=request.user, # answer=None # ).select_related('asked_by').order_by('-created') # asked_questions = Question.objects.filter( # asked_by=request.user # ).select_related('asked_to').order_by('-created') # # context = { # 'unanswered_questions': unanswered_questions, # 'asked_questions': asked_questions, # 'total_likes': get_total_likes(request.user) # } # return render(request, 'askfm/my_profile.html', context) def homepage(request): if request.user.is_authenticated(): return redirect( reverse('askfm:user_profile', args=(request.user.username, )) ) random_users = User.objects.order_by('?')[:20] context = { 'random_users': random_users } return render(request, 'askfm/homepage.html', context) def user_profile(request, username): user = get_object_or_404(User, username=username) answered_questions = Question.objects.exclude(answer=None).filter( asked_to=user).select_related('answer').order_by('-created') asked_questions = Question.objects.filter( asked_by=user).select_related('answer').order_by('-created') if request.method == 'POST': if not request.user.is_authenticated(): messages.error('You must login first!') return redirect( reverse('auth:login') + '?next=/{}/'.format(username)) form = QuestionForm(request.POST) if form.is_valid(): q = Question( asked_by=request.user, asked_to=get_object_or_404(User, username=username), text=form.cleaned_data['question_text'], anonymous=form.cleaned_data.get('anonymous', False) ) q.save() messages.success(request, 'Your question has been submitted!') return redirect(reverse('askfm:user_profile', args=(username,))) else: form = QuestionForm() context = { 'username': username, 'answered_questions': answered_questions, 'asked_questions': asked_questions, 'form': form, 'total_likes': get_total_likes(user) } return render(request, 'askfm/user_profile.html', context) @login_required @require_POST def answer(request): question_id = request.POST.get('question-id') answer_text = request.POST.get('answer-text') if question_id and answer_text: question = get_object_or_404( Question, id=question_id, asked_to=request.user ) answer = Answer.objects.create(text=answer_text, question=question) messages.success(request, 'Answer submitted successfully!') else: messages.error(request, 'Something went wrong.', extra_tags='danger') return redirect( reverse('askfm:user_profile', args=(request.user.username, )) ) @login_required def friends(request, username): if request.user.username != username: return redirect( reverse('askfm:user_profile', args=(request.user.username, )) ) following = User.objects.filter(following__followed_by=request.user) context = { 'following': following } return render(request, 'askfm/friends.html', context=context)
Founded by Tesla Motors co-founder Ian Wright, Wrightspeed is bringing range-extended electric vehicle powertrains to the heavyweight transportation industry where maintenance expenses are costly and fuel economy is paramount. This Silicon Valley engineering company is electrifying delivery trucks, buses and refuse trucks to saving fleet operators hefty margins on maintenance costs, fuel consumption (as much as 67%) and CO2 emissions (up to 63%). Razorfrog’s design team collaborated with renowned SF photographer Josh Hittleman to capture the essence of the future that Wrightspeed has envisioned. Utilizing sleek typography, sharp visual assets and a commanding palette, the new Wrightspeed website is anything but ordinary. Their responsive site features a slew of videos, technical diagrams, news and press releases to inform prospective investors, the media and the general public of their revolutionary technology. We’re proud of collaborating with Wrightspeed to usher in a new era of electric transportation. Be sure to watch Ian Wright’s TED talk to learn more about The Route Powertrain’s uncompromised power.
# # DBus structure for comps definitions. # # Copyright (C) 2021 Red Hat, Inc. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from dasbus.structure import DBusData from dasbus.typing import * # pylint: disable=wildcard-import __all__ = ["CompsGroupData", "CompsEnvironmentData"] class CompsGroupData(DBusData): """Comps group data.""" def __init__(self): self._id = "" self._name = "" self._description = "" @property def id(self) -> Str: """Unique identifier of the group. :return: a string """ return self._id @id.setter def id(self, value): self._id = value @property def name(self) -> Str: """Translated name of the group. :return: a translated string """ return self._name @name.setter def name(self, value): self._name = value @property def description(self) -> Str: """Translated description of the group. :return: a translated string """ return self._description @description.setter def description(self, value): self._description = value class CompsEnvironmentData(DBusData): """Comps environment data.""" def __init__(self): self._id = "" self._name = "" self._description = "" self._optional_groups = [] self._default_groups = [] self._visible_groups = [] @property def id(self) -> Str: """Unique identifier of the environment. :return: a string """ return self._id @id.setter def id(self, value): self._id = value @property def name(self) -> Str: """Translated name of the environment. :return: a translated string """ return self._name @name.setter def name(self, value): self._name = value @property def description(self) -> Str: """Translated description of the environment. :return: a translated string """ return self._description @description.setter def description(self, value): self._description = value @property def optional_groups(self) -> List[Str]: """List of optional groups. These groups don't have to be installed for successful installation of the environment. :return: a list of group identifiers """ return self._optional_groups @optional_groups.setter def optional_groups(self, value): self._optional_groups = value @property def default_groups(self) -> List[Str]: """List of default optional groups. These groups don't have to be installed for successful installation of the environment, but they will be pre-selected by default. :return: a list of group identifiers """ return self._default_groups @default_groups.setter def default_groups(self, value): self._default_groups = value @property def visible_groups(self) -> List[Str]: """List of user-visible groups. These groups are not defined by the environment, but they supplement the list of optional groups that can be selected by users. :return: a list of group identifiers """ return self._visible_groups @visible_groups.setter def visible_groups(self, value): self._visible_groups = value def get_available_groups(self) -> List[Str]: """Get a list of groups available for the user selection. :return: a list of group identifiers """ return list(dict.fromkeys( self.optional_groups + self.default_groups + self.visible_groups ))
Tajikistan Kuktosh Rudaki won against Pandzhsher Dzhaloliddin Rumi on their home pitch. 3-2 was the score when the whistle blew. Kuktosh Rudaki could pat themselves on the back and add three points to their tally. In the table, Kuktosh Rudaki hold spot three, and Pandzhsher Dzhaloliddin Rumi seven.
""" ############################################################################################# # Name: StateAFTN.py # # Author: Daniel Lemay # # Date: 2006-06-06 # # Description: Keep the AFTN state in this object # ############################################################################################# """ import os, sys, pickle sys.path.insert(1,sys.path[0] + '/..') sys.path.insert(1,sys.path[0] + '/../importedLibs') class StateAFTN: def __init__(self): self.CSN = None self.waitedTID = None self.lastAckReceived = None self.waitingForAck = None def fill(self, messageManager): self.CSN = messageManager.CSN self.waitedTID = messageManager.waitedTID self.lastAckReceived = messageManager.lastAckReceived self.waitingForAck = messageManager.waitingForAck def clear(self): self.CSN = None self.waitedTID = None self.lastAckReceived = None self.waitingForAck = None def infos(self): return """ CSN = %s waitedTID = %s lastAckReceived = %s waitingForAck = %s """ % (self.CSN, self.waitedTID, self.lastAckReceived, self.waitingForAck) if __name__ == "__main__": state = StateAFTN() print(state.infos())
Congrats to Yo Gotti (real name Mario Mims). The 36-year-old rapper is closing out the Hot 100’s top 10, with his hit single “Rake It Up,” featuring Nicki Minaj, surging 24-10. After the arrival of its official video on Aug. 21, the track blasts 12-5 on Streaming Songs (28.1 million, up 45 percent, good for the Hot 100’s top streaming gain). It also jumps 42-26 on Digital Song Sales (17,000, up 24 percent), while lifting 35-33 on Radio Songs (36 million, up 10 percent). Yo Gotti earns his first Hot 100 top 10, after previously peaking at a No. 13 high with “Down in the DM,” also featuring Minaj, in March 2016. With her 14th Hot 100 top 10, Minaj extends her lead for the most among female rappers, pushing further past runner-up Missy Elliott, with nine. Watch Yo Gotti’s “Rake It Up” ft. Nicki Minaj video below.
import six from .node import Node from .utils import parse_date @six.python_2_unicode_compatible class Person(Node): """Represents a Person on CrunchBase""" KNOWN_RELATIONSHIPS = [ "primary_affiliation", "primary_location", "primary_image", "websites", "degrees", "jobs", "advisory_roles", "founded_companies", "investments", "memberships", "images", "videos", "news", ] KNOWN_PROPERTIES = [ "permalink", "api_path", "web_path", "last_name", "first_name", "also_known_as", "bio", "role_investor", "born_on", "born_on_trust_code", "is_deceased", "died_on", "died_on_trust_code", "created_at", "updated_at", ] def _coerce_values(self): """A delegate method to handle parsing all data and converting them into python values """ # special cases to convert strings to pythonic value for attr in ['born_on', 'died_on']: if getattr(self, attr, None): setattr(self, attr, parse_date(getattr(self, attr))) def __str__(self): return u'{first} {last} ({permalink})'.format( first=self.first_name, last=self.last_name, permalink=self.permalink, ) def __repr__(self): return self.__str__()
How to pour and measure fudge in a mixing bowl - Make Fudge - Calico Cottage Inc. Learn how to pour and measure so you always end up with perfectly textured and deliciously flavored fudge. If you see skin on the fudge in the kettle, run the kettle for a few seconds to blend in the skin prior to pouring. Please note that the kettle has been positioned sideways so that the camera can better capture the instructional steps. Follow your Setup Guide and Fudge Manual for proper positioning and use of the kettle. As you lift the kettle back to its upright position, the remaining fudge that had been flowing from the lip will raise the level of the poured fudge in the mixing bowl to the desired amount. Use the measurement markings on the bowl’s side to guide you.
# -*- coding: utf-8 -*- # Generated by Django 1.11.10 on 2018-05-30 08:32 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ("delft3dworker", "0089_template_shortname"), ] operations = [ migrations.AddField( model_name="workflow", name="action_log", field=models.TextField(blank=True, default=""), ), migrations.AddField( model_name="workflow", name="cluster_log", field=models.TextField(blank=True, default=""), ), migrations.AddField( model_name="workflow", name="yaml", field=models.FileField(default="", upload_to="workflows/"), ), migrations.AlterField( model_name="scene", name="phase", field=models.PositiveSmallIntegerField( choices=[ (0, "New"), (6, "Idle: waiting for user input"), (11, "Starting workflow"), (12, "Running workflow"), (13, "Removing workflow"), (500, "Finished"), (501, "Failed"), ], default=0, ), ), migrations.AlterField( model_name="template", name="shortname", field=models.CharField(default="gt", max_length=256), ), migrations.AlterField( model_name="template", name="yaml_template", field=models.FileField(default="", upload_to="workflow_templates/"), ), ]
The Hispanic community might be the fastest-growing population in Texas, but it is highly underrepresented in local and state politics, according to a new report. Hispanics make up more than 38 percent of the population in the Lone Star State — yet only about 10 percent of Texas mayors and county judges are Hispanic. The Austin American-Statesman reported more than 1.3 million Hispanics across Texas live in cities or counties that have no Hispanic representation on their city council and county commissioners courts. The disparities remain high even when accounting for non-citizens. Lydia Camarillo, vice president of the Southwest Voter Registration Education Project, said that while some areas of the state — notably South Texas — have seen sharp rises in the number of Latinos elected to local office, the Statesman&apos;s findings show "there is still disparity in your face" across Texas. In county government, Latino representation has largely stagnated during the past two decades. In 1994, Latinos made up 10 percent of county commissioner positions; today, the percentage has inched up to 13 percent — even though the state&apos;s Hispanic population nearly doubled during that time. Statewide election experts and Hispanic officeholders in some of the state&apos;s most underrepresented regions say the disparity defies easy explanation. They point out several factors: Texas laws that have made registering to vote more difficult; redistricting efforts designed to dilute Hispanic influence; and a virtual abandonment by statewide political parties. And even in districts with favorable demographics, Hispanics often turn out to vote in small numbers. While the most glaring disparities are clustered in a largely rural swath of West Texas, through the High Plains region and into the Panhandle, the newspaper&apos;s analysis found similar patterns across the state. Medina County, just outside San Antonio, has a 50 percent Latino population but no Hispanic county commissioners. Odessa, where 63 percent of city residents are Hispanic, has just one Hispanic city council member. In Central Texas, while Hispanics in Guadalupe and Gonzales counties make up about a third of eligible voters, neither county has a Latino commissioner. The most underrepresented areas also tend to be heavily Republican, which observers say also limits the participation of Texas Hispanics, who more often vote Democratic. In Medina County, County Judge Chris Schuchart said he believes the lack of Hispanic elected officials is more attributable to party than to ethnicity. "The county votes Republican, and ... we generally have very few Democrats on the local ballot," he said. Amado Morales is the lone Hispanic member of the commissioners court in Floyd County in northwestern Texas. Hispanics make up 53 percent of the population in the county. The 64-year-old pumpkin farmer got involved in local politics more than 35 years ago. Thanks in part to a lawsuit that forced officials to move from an at-large election system to single-member districts, he was elected to the Floydada City Council in 1980. After the Voting Rights Act was amended in 1975 to explicitly cover Latinos, advocates and lawyers filed hundreds of lawsuits throughout the state challenging voting procedures. But once elected, Morales recalled, the cotton processors whom he did business with said his political ambitions were threatening to cost them customers in the conservative county. He said, "They told me, &apos;We need to get rid of this guy.&apos;&apos;" After five years as a council member, Morales moved to the school board, and then ultimately to the commissioners court, where he won election after three tries, including a disputed count that he sued over. "They don&apos;t want to share power," he said. In the Panhandle city of Amarillo, Mercy Murguia was appointed in 2011 to fill an unexpired term on the Potter County Commissioners Court when she was 32. She has since won re-election twice. Since joining the commissioners court, she has sought to expand the Latino vote in the Panhandle. "We know apathy is a big reason — we&apos;re not naive — but we also know that many Hispanics lack a basic understanding of where to vote, whether or not they&apos;re registered and so forth." Murguia has also helped other Hispanics run for the school board. "The little things — just finding out how to run for school board, where to get the forms (to declare candidacy) — was difficult."
#!/usr/bin/env python """ Copyright (c) 2015 Jonas Krehl <Jonas.Krehl@triebenberg.de> Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. """ import numpy try: import reikna.cluda.cuda CUDA = True except ImportError: CUDA = False import reikna.cluda.ocl class AbstractArray: _modes = {} def __new__(cls, array, mode = "numpy", *args, **kwargs): assert mode in cls._modes if mode in cls._modes: if isinstance(array, AbstractArray): return array._as(mode, *args, **kwargs) else: return cls._modes[mode].from_ndarray(array, *args, **kwargs) @classmethod def from_ndarray(cls, array, *args, **kwargs): raise NotImplemented def to_ndarray(self): raise NotImplemented def to_same_mode(self, *args, **kwargs): return self def _as(self, mode, *args, **kwargs): assert mode in self._modes if mode == self.mode: return self.to_same_mode(*args, **kwargs) else: return self._modes[mode].from_ndarray(self.to_ndarray(), *args, **kwargs) class AbstractArray_Numpy(numpy.ndarray, AbstractArray): mode = "numpy" @classmethod def from_ndarray(cls, array): self = array.view(__class__) return self def to_ndarray(self): return self.view(numpy.ndarray) AbstractArray._modes['numpy'] = AbstractArray_Numpy if CUDA: class AbstractArray_CUDA(reikna.cluda.cuda.Array, AbstractArray): mode = "cuda" def __init__(self, *args, **kwargs): pass @staticmethod def get_thread(thread=None): if isinstance(thread, reikna.cluda.cuda.Thread): return thread elif isinstance(thread, reikna.cluda.ocl.Thread): raise TypeError("Thread of wrong CLUDA Backend given") else: return reikna.cluda.cuda.Thread.create() @classmethod def from_ndarray(cls, array, thread = None): if isinstance(thread, reikna.cluda.cuda.Thread): pass elif isinstance(thread, reikna.cluda.ocl.Thread): raise TypeError("Thread of wrong CLUDA Backend given") else: thread = reikna.cluda.cuda.Thread.create() self = __class__.get_thread(thread).to_device(array) self.__class__ = __class__ return self def to_ndarray(self): return self.get() def to_same_mode(self, thread=None): if self.thread == thread or thread == None: return self else: return self.from_ndarray(self.to_ndarray(), thread) AbstractArray._modes['cuda'] = AbstractArray_CUDA class AbstractArray_OpenCL(reikna.cluda.ocl.Array, AbstractArray): mode = "opencl" def __init__(self, *args, **kwargs): pass @staticmethod def get_thread(thread=None): if isinstance(thread, reikna.cluda.ocl.Thread): return thread elif isinstance(thread, reikna.cluda.cuda.Thread): raise TypeError("Thread of wrong CLUDA Backend given") else: return reikna.cluda.ocl.Thread.create() @classmethod def from_ndarray(cls, array, thread=None): self = __class__.get_thread(thread).to_device(array) self.__class__ = __class__ return self def to_ndarray(self): return self.get() def to_same_mode(self, thread = None): if self.thread == thread or thread == None: return self else: return self.from_ndarray(self.to_ndarray(), thread) AbstractArray._modes['opencl'] = AbstractArray_OpenCL
Mini Hidden CCTV Microphone for CCTV System (CM501C) - Chinasky Electronics Co., Ltd. 3. Clear voice, low noise, fashionable appearance, suit for various occasions. microphone is typically used as an add-on to CCTV cameras that do not have built-in audio. cables can be used with these microphones to easily run both the audio and power centrally back to the DVR. DVRs or PC based DVRs, such as Geovision.
"""Engines are functions that are used by the Do-ers. Keyword Args: experiments, farms, barn Returns: farms, barn """ import time import logging import pandas as pd from cellpy import dbreader from cellpy.parameters.internal_settings import get_headers_journal from cellpy.utils.batch_tools import batch_helpers as helper # logger = logging.getLogger(__name__) SELECTED_SUMMARIES = [ "discharge_capacity", "charge_capacity", "coulombic_efficiency", "cumulated_coulombic_efficiency", "ir_discharge", "ir_charge", "end_voltage_discharge", "end_voltage_charge", "charge_c_rate", "discharge_c_rate", ] hdr_journal = get_headers_journal() def cycles_engine(**kwargs): """engine to extract cycles""" logging.debug("cycles_engine::Not finished yet (sorry).") # raise NotImplementedError experiments = kwargs["experiments"] farms = [] barn = "raw_dir" # Its a murder in the red barn - murder in the red barn for experiment in experiments: farms.append([]) if experiment.all_in_memory: logging.debug("all in memory") for key in experiment.cell_data_frames: logging.debug(f"extracting cycles from {key}") # extract cycles here and send it to the farm else: logging.debug("dont have it in memory - need to lookup in the files") for key in experiment.cell_data_frames: logging.debug(f"looking up cellpyfile for {key}") # extract cycles here and send it to the farm return farms, barn def raw_data_engine(**kwargs): """engine to extract raw data""" logging.debug("cycles_engine") raise NotImplementedError experiments = kwargs["experiments"] farms = [] barn = "raw_dir" for experiment in experiments: farms.append([]) return farms, barn def summary_engine(**kwargs): """engine to extract summary data""" logging.debug("summary_engine") # farms = kwargs["farms"] farms = [] experiments = kwargs["experiments"] for experiment in experiments: if experiment.selected_summaries is None: selected_summaries = SELECTED_SUMMARIES else: selected_summaries = experiment.selected_summaries if experiment.summary_frames is None: logging.debug("No summary frames found") logging.debug("Re-loading") experiment.summary_frames = _load_summaries(experiment) farm = helper.join_summaries(experiment.summary_frames, selected_summaries) farms.append(farm) barn = "batch_dir" return farms, barn def _load_summaries(experiment): summary_frames = {} for label in experiment.cell_names: # TODO: replace this with direct lookup from hdf5? summary_frames[label] = experiment.data[label].cell.summary return summary_frames def dq_dv_engine(**kwargs): """engine that performs incremental analysis of the cycle-data""" farms = None barn = "raw_dir" return farms, barn def simple_db_engine(reader=None, srnos=None, **kwargs): """engine that gets values from the simple excel 'db'""" # This is not really a proper Do-er engine. But not sure where to put it. if reader is None: reader = dbreader.Reader() logging.debug("No reader provided. Creating one myself.") info_dict = dict() info_dict[hdr_journal["filename"]] = [reader.get_cell_name(srno) for srno in srnos] info_dict[hdr_journal["mass"]] = [reader.get_mass(srno) for srno in srnos] info_dict[hdr_journal["total_mass"]] = [ reader.get_total_mass(srno) for srno in srnos ] info_dict[hdr_journal["loading"]] = [reader.get_loading(srno) for srno in srnos] info_dict[hdr_journal["nom_cap"]] = [reader.get_nom_cap(srno) for srno in srnos] info_dict[hdr_journal["experiment"]] = [ reader.get_experiment_type(srno) for srno in srnos ] info_dict[hdr_journal["fixed"]] = [ reader.inspect_hd5f_fixed(srno) for srno in srnos ] info_dict[hdr_journal["label"]] = [reader.get_label(srno) for srno in srnos] info_dict[hdr_journal["cell_type"]] = [reader.get_cell_type(srno) for srno in srnos] info_dict[hdr_journal["instrument"]] = [ reader.get_instrument(srno) for srno in srnos ] info_dict[hdr_journal["raw_file_names"]] = [] info_dict[hdr_journal["cellpy_file_name"]] = [] info_dict[hdr_journal["comment"]] = [reader.get_comment(srno) for srno in srnos] logging.debug(f"created info-dict from {reader.db_file}:") # logging.debug(info_dict) for key in list(info_dict.keys()): logging.debug("%s: %s" % (key, str(info_dict[key]))) _groups = [reader.get_group(srno) for srno in srnos] logging.debug(">\ngroups: %s" % str(_groups)) groups = helper.fix_groups(_groups) info_dict[hdr_journal["group"]] = groups my_timer_start = time.time() filename_cache = [] info_dict = helper.find_files(info_dict, filename_cache, **kwargs) my_timer_end = time.time() if (my_timer_end - my_timer_start) > 5.0: logging.critical( "The function _find_files was very slow. " "Save your journal so you don't have to run it again! " "You can load it again using the from_journal(journal_name) method." ) info_df = pd.DataFrame(info_dict) info_df = info_df.sort_values([hdr_journal.group, hdr_journal.filename]) info_df = helper.make_unique_groups(info_df) info_df[hdr_journal.label] = info_df[hdr_journal.filename].apply( helper.create_labels ) # TODO: check if drop=False works [#index] info_df.set_index(hdr_journal["filename"], inplace=True) # edit this to allow for # non-nummeric index-names (for tab completion and python-box) return info_df
Looking for an exciting career opportunity? CTR is now hiring drivers and more! Why Is CTR A Great Place to Work? Open A SERVICE REQUEST TODAY! CTR is a Full Service Waste Disposal Company serving Austin and the surrounding areas since 1981. Since the beginning we have focused on one simple goal, customer satisfaction. Whether you’re a Fortune 500 Company or a new start up business, CTR’s professional sales staff are here to assist you and your company’s needs with a free consultation. CTR offers commercial waste services for your business. Collection services are on a scheduled basis from 1-6 days per week. Schedule delivery, pay your bill, and get help in just a few clicks. Roll-off and compactor equipment are designed for large quantities of waste removal. CTR roll-off containers are delivered right to your location and then picked up to empty at a later date. CTR provides waste collection services to residents across Central Texas. Our services include a garbage, recycling, and brush and bulky item removal. Pick-up services provided once a week. Recycling is provided exclusively by our wholly owned subsidy Wilco Recycling. Our recycling facility is state of the art and was designed to serve residents in Austin and surrounding counties. Central Texas Refuse, Inc. (CTR) is excited to announce that We Have Acquired Wilco Recycling.
""" WordWrapping """ import os, sys, string, time, getopt import re def WordWrap(text, cols=70, detect_paragraphs = 0, is_header = 0): text = string.replace(text,"\r\n", "\n") # remove CRLF def nlrepl(matchobj): if matchobj.group(1) != ' ' and matchobj.group(2) != ' ': repl_with = ' ' else: repl_with = '' return matchobj.group(1) + repl_with + matchobj.group(2) if detect_paragraphs: text = re.sub("([^\n])\n([^\n])",nlrepl,text) body = [] i = 0 j = 0 ltext = len(text) while i<ltext: if i+cols < ltext: r = string.find(text, "\n", i, i+cols) j = r if r == -1: j = string.rfind(text, " ", i, i+cols) if j == -1: r = string.find(text, "\n", i+cols) if r == -1: r = ltext j = string.find(text, " ", i+cols) if j == -1: j = ltext j = min(j, r) else: j = ltext body.append(string.strip(text[i:j])) i = j+1 if is_header: body = string.join(body, "\n ") else: body = string.join(body, "\n") return body
” Over the past 4 years, I have used your massage, chiropractic, and acupuncture services. I can honestly say that as a client of Yonge Finch Chiropractic and Health Centre, I have always received excellent and timely treatment and been pleasantly greeted on a first name basis by all staff. I suffered from terrible shoulder pain due to daily computer use and felt hopeless in my search for relief. After hearing about the benefits of acupuncture from a friend, I made an appointment for treatment with Shant Filo R.Ac.. One hour with Shant and I became the president of his fan club! I’ve recommended multiple co-workers who have also joined the club. Recently John came by my office and sat individually with myself and all my co-workers to provide consultations on our work space set ups. These consults were a huge hit and truly appreciated. There’s nothing worse than incessant back, shoulder and neck pain that just seems to have no end in sight and there’s nothing better than knowing where to go to get help … and that is for sure Yonge Finch Chiropractic and Health Centre. ” – Peta M.
import struct f = open("/tmp/rtl.dat", "rb") #i = 0 #for debugging only p = 0 #pause counter wasHigh = 0 #set when signal was high r = "" #the sent bit string threshold = 1500 #needs to be set between high and low, depends on gain of sdr-stick #samplemax = 0 #for debugging only resultArray = [] #Stores the 12 packages that are send try: s = f.read(1) #16 bits are one sample s += f.read(1) while s: sample = struct.unpack('<H', s)[0] #samples are in little endian #print(sample) #debugging #if (sample > samplemax and sample < 5000): # samplemax = sample if (sample > threshold): #print(sample) wasHigh = 1 if (p != 0): if (p >= 27 and p <= 31): #short pause -> 0 r = r + "0" if (p >= 56 and p <= 62): #medium pause -> 1 r += "1" if (p > 100): #long pause -> transmission of one package ended. The package is send 12 times resultArray.append(r) r = "" #print(p) p = 0 if (sample < threshold and (wasHigh == 1 or p != 0)): wasHigh = 0 p += 1 #i += 1 s = f.read(1) s+= f.read(1) finally: resultArray.append(r) #Check for transmission/decoding error - this assumes there is max 1 error in the first 3 transmissions if (resultArray[0] == resultArray[1]): #print(resultArray[0]) #resulting bitstring that was transmitted data = resultArray[0] else: #print(resultArray[2]) data = resultArray[2] humidity = int(data[-8:], 2) print("Humidity:", humidity) temp = int(data[12:-12], 2) if (temp & 0x800 != 0): temp = ~temp temp = temp & 0xFFF temp += 1 temp = temp/10.0 print("Temperature:", "-" + str(temp)) else: temp = temp/10.0 print("Temperature:", temp) f.close()
At Seung-Ni Martial Arts & Fitness in Grand Rapids’ Kentwood neighborhood, we are excited and privileged to be part of this great community. We also look forward to bringing you a positive and fun environment in which you can achieve your goals and chase your dreams. Headed by Master Joshua Cox, this location offers instruction in Taekwondo, Brazilian Jiu-Jitsu and Aerobic Kickboxing. We are dedicated to bringing you the best in martial arts instruction and will walk with you on every step of your journey. We offer martial arts instruction for every age group as well as a praised fitness program that combines resistance training and aerobic exercise that will help you look good and feel great.
import pandas as pd print(pd.__version__) # 1.0.5 df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]}, index=['X', 'Y', 'Z']) print(df) # A B # X 1 4 # Y 2 5 # Z 3 6 print(df.cumsum()) # A B # X 1 4 # Y 3 9 # Z 6 15 print(df.cumsum(axis=1)) # A B # X 1 5 # Y 2 7 # Z 3 9 print(df.cumprod()) # A B # X 1 4 # Y 2 20 # Z 6 120 print(df.cumprod(axis=1)) # A B # X 1 4 # Y 2 10 # Z 3 18 print(df['B']) # X 4 # Y 5 # Z 6 # Name: B, dtype: int64 print(type(df['B'])) # <class 'pandas.core.series.Series'> print(df['B'].cumsum()) # X 4 # Y 9 # Z 15 # Name: B, dtype: int64 print(df['B'].cumprod()) # X 4 # Y 20 # Z 120 # Name: B, dtype: int64 df_nan = pd.DataFrame({'A': [1, 2, 3], 'B': [4, float('nan'), 6]}, index=['X', 'Y', 'Z']) print(df_nan) # A B # X 1 4.0 # Y 2 NaN # Z 3 6.0 print(df_nan.cumsum()) # A B # X 1 4.0 # Y 3 NaN # Z 6 10.0 print(float('nan') + 4) # nan print(df_nan.cumsum(skipna=False)) # A B # X 1 4.0 # Y 3 NaN # Z 6 NaN print(df_nan.cumprod()) # A B # X 1 4.0 # Y 2 NaN # Z 6 24.0 print(df_nan.cumprod(skipna=False)) # A B # X 1 4.0 # Y 2 NaN # Z 6 NaN df2 = pd.DataFrame({'A': [1, 4, 2], 'B': [6, 3, 5]}, index=['X', 'Y', 'Z']) print(df2) # A B # X 1 6 # Y 4 3 # Z 2 5 print(df2.cummax()) # A B # X 1 6 # Y 4 6 # Z 4 6 print(df2.cummax(axis=1)) # A B # X 1 6 # Y 4 4 # Z 2 5 print(df2.cummin()) # A B # X 1 6 # Y 1 3 # Z 1 3 print(df2.cummin(axis=1)) # A B # X 1 1 # Y 4 3 # Z 2 2 df2_nan = pd.DataFrame({'A': [1, 4, 2], 'B': [6, float('nan'), 5]}, index=['X', 'Y', 'Z']) print(df2_nan) # A B # X 1 6.0 # Y 4 NaN # Z 2 5.0 print(df2_nan.cummax()) # A B # X 1 6.0 # Y 4 NaN # Z 4 6.0 print(df2_nan.cummax(skipna=False)) # A B # X 1 6.0 # Y 4 NaN # Z 4 NaN print(df2_nan.cummin()) # A B # X 1 6.0 # Y 1 NaN # Z 1 5.0 print(df2_nan.cummin(skipna=False)) # A B # X 1 6.0 # Y 1 NaN # Z 1 NaN
Woodlands Academy of the Sacred Heart is seeking and Admission Associate to work in the Office of Admission & Affordability. This is a full time, 12-month position, some evenings and weekends required. The Admission Associate works as part of a team in a busy and evolving admissions office. This position is a mix of administrative/office support and customer service. The Admission Associate will report to the Director of Admission and Affordability and will join a highly collaborative, innovative, and experienced faculty. Must be able to drive and have a car.
#!/usr/bin/env python # -*- coding:utf-8 -*- __author__ = 'ghost' import urlparse import tornado.web from tornado.httputil import HTTPServerRequest from .auth.models import User class BaseAuthHandler(tornado.web.RequestHandler): def get_current_user(self): cookie = self.get_secure_cookie('user') if cookie: self._current_user = User.getone(pk=int(cookie)) return self._current_user self.clear_cookie('user') return def extract_params(request): if not isinstance(request, HTTPServerRequest): request = request.request parse_url = urlparse.urlparse(request.uri) path, params, query, fragment = parse_url.path, parse_url.params, parse_url.query, parse_url.fragment uri = urlparse.urlunparse((request.protocol, request.host, path, params, query, fragment)) http_method = request.method headers = request.headers if 'wsgi.input' in headers: del headers['wsgi.input'] if 'wsgi.errors' in headers: del headers['wsgi.errors'] if 'HTTP_AUTHORIZATION' in headers: headers['Authorization'] = headers['HTTP_AUTHORIZATION'] body = request.body return uri, http_method, body, headers
We accept discover, mastercard and visa for payment. Our preferred brands include best buy, ecowater, hot spot, hot spring, hotspring portable spas, solana and tiger river. Our specialties include bottled water delivery, chemical & supplies, cooler rentals & sales, free water analysis, hot spring portable spas, pool filters & pumps, pool opening & closing, service & repair, ultra-violet purification, water conditioners and world's best selling brand. We can assist you with bottled water, bottled water delivery, chemicals, chemicals & supplies, chlorine removal, comprehensive, straightforward warranty , cooler rentals & sales, delivery services, estimates, filters, free water analysis, great tasting water, hotspring portable spas, installation, maintenance, parts & supplies, patio furniture, pumps, purified drinking water, rentals, repairs, sauna, ultra-violet purification and water coolers. We've been in business since 1957. Quality water treatment, uv & r.o. purification. If you have used the services of Ecowater Systems we'd love to hear from you! Use the form below to review and comment on your experience.
# -*- coding: utf-8 -*- """ Library of functions for meteorology. Meteorological function names ============================= - cp_calc: Calculate specific heat - Delta_calc: Calculate slope of vapour pressure curve - es_calc: Calculate saturation vapour pressures - ea_calc: Calculate actual vapour pressures - gamma_calc: Calculate psychrometric constant - L_calc: Calculate latent heat of vapourisation - pottemp: Calculate potential temperature (1000 hPa reference pressure) - rho_calc: Calculate air density - sun_NR: Maximum sunshine duration [h] and extraterrestrial radiation [J/day] - vpd_calc: Calculate vapour pressure deficits - windvec: Calculate average wind direction and speed Module requires and imports math and scipy modules. Tested for compatibility with Python 2.7. Function descriptions ===================== """ import math import numpy as np import pandas as pd import scipy def _arraytest(*args): """ Function to convert input parameters in as lists or tuples to arrays, while leaving single values intact. Test function for single values or valid array parameter input (J. Delsman). Parameters: args (array, list, tuple, int, float): Input values for functions. Returns: rargs (array, int, float): Valid single value or array function input. Examples -------- >>> _arraytest(12.76) 12.76 >>> _arraytest([(1,2,3,4,5),(6,7,8,9)]) array([(1, 2, 3, 4, 5), (6, 7, 8, 9)], dtype=object) >>> x=[1.2,3.6,0.8,1.7] >>> _arraytest(x) array([ 1.2, 3.6, 0.8, 1.7]) >>> _arraytest('This is a string') 'This is a string' """ rargs = [] for a in args: if isinstance(a, (list, tuple)): rargs.append(scipy.array(a)) else: rargs.append(a) if len(rargs) == 1: return rargs[0] # no unpacking if single value, return value i/o list return rargs def cp_calc(airtemp=scipy.array([]), rh=scipy.array([]), airpress=scipy.array([])): """ Function to calculate the specific heat of air: .. math:: c_p = 0.24 \\cdot 4185.5 \\cdot \\left(1 + 0.8 \\cdot \\frac{0.622 \\cdot e_a}{p - e_a}\\right) where ea is the actual vapour pressure calculated from the relative humidity and p is the ambient air pressure. Parameters: - airtemp: (array of) air temperature [Celsius]. - rh: (array of) relative humidity data [%]. - airpress: (array of) air pressure data [Pa]. Returns: cp: array of saturated c_p values [J kg-1 K-1]. References ---------- R.G. Allen, L.S. Pereira, D. Raes and M. Smith (1998). Crop Evaporation Guidelines for computing crop water requirements, FAO - Food and Agriculture Organization of the United Nations. Irrigation and drainage paper 56, Chapter 3. Rome, Italy. (http://www.fao.org/docrep/x0490e/x0490e07.htm) Examples -------- >>> cp_calc(25,60,101300) 1014.0749457208065 >>> t = [10, 20, 30] >>> rh = [10, 20, 30] >>> airpress = [100000, 101000, 102000] >>> cp_calc(t,rh,airpress) array([ 1005.13411289, 1006.84399787, 1010.83623841]) """ # Test input array/value airtemp, rh, airpress = _arraytest(airtemp, rh, airpress) # calculate vapour pressures eact = ea_calc(airtemp, rh) # Calculate cp cp = 0.24 * 4185.5 * (1 + 0.8 * (0.622 * eact / (airpress - eact))) return cp # in J/kg/K def Delta_calc(airtemp=scipy.array([])): """ Function to calculate the slope of the temperature - vapour pressure curve (Delta) from air temperature T: .. math:: \\Delta = 1000 \\cdot \\frac{e_s \\cdot 4098}{(T + 237.3)^2} where es is the saturated vapour pressure at temperature T. Parameters: - airtemp: (array of) air temperature [Celsius]. Returns: - Delta: (array of) slope of saturated vapour curve [Pa K-1]. References ---------- Technical regulations 49, World Meteorological Organisation, 1984. Appendix A. 1-Ap-A-3. Examples -------- >>> Delta_calc(30.0) 243.34309166827094 >>> x = [20, 25] >>> Delta_calc(x) array([ 144.6658414 , 188.62504569]) """ # Test input array/value airtemp = _arraytest(airtemp) # calculate saturation vapour pressure at temperature es = es_calc(airtemp) # in kPa # Calculate Delta Delta = es * 4098.0 / ((airtemp + 237.3) ** 2) * 1000 return Delta # in Pa/K def ea_calc(airtemp=scipy.array([]), rh=scipy.array([])): """ Function to calculate actual vapour pressure from relative humidity: .. math:: e_a = \\frac{rh \\cdot e_s}{100} where es is the saturated vapour pressure at temperature T. Parameters: - airtemp: array of measured air temperatures [Celsius]. - rh: Relative humidity [%]. Returns: - ea: array of actual vapour pressure [Pa]. Examples -------- >>> ea_calc(25,60) 1900.0946514729308 """ # Test input array/value airtemp, rh = _arraytest(airtemp, rh) # Calculate saturation vapour pressures es = es_calc(airtemp) * 10 # kPa convert to hPa # Calculate actual vapour pressure eact = rh / 100.0 * es return eact # in Pa def es_calc(airtemp): """ Function to calculate saturated vapour pressure from temperature. Uses the Arden-Buck equations. Parameters: - airtemp : (data-type) measured air temperature [Celsius]. Returns: - es : (data-type) saturated vapour pressure [kPa]. References ---------- https://en.wikipedia.org/wiki/Arden_Buck_equation Buck, A. L. (1981), "New equations for computing vapor pressure and enhancement factor", J. Appl. Meteorol., 20: 1527–1532 Buck (1996), Buck Research CR-1A User's Manual, Appendix 1. (PDF) Examples -------- >>> es_calc(30.0) 4.245126 >>> x = [20, 25] >>> es_calc(x) array([ 2.338340, 3.168531]) """ airtemp = pd.to_numeric(airtemp, errors="coerce") # Calculate saturated vapour pressures, distinguish between water/ice mask = airtemp > 0 es = pd.Series(0.0, index=airtemp.index) # Calculate saturation vapour pressure over liquid water. es[mask] = 6.1121 * np.exp( ( (18.678 - (airtemp[mask] / 234.5)) * (airtemp[mask] / (257.14 + airtemp[mask])) ).astype(float) ) # Calculate saturation vapour pressure for ice es[~mask] = 6.1115 * np.exp( ( (23.036 - (airtemp[~mask] / 333.7)) * (airtemp[~mask] / (279.82 + airtemp[~mask])) ).astype(float) ) # Convert from hPa to kPa es = es / 10.0 return es # in kPa def gamma_calc(airtemp=scipy.array([]), rh=scipy.array([]), airpress=scipy.array([])): """ Function to calculate the psychrometric constant gamma. .. math:: \\gamma = \\frac{c_p \\cdot p}{0.66 \\cdot \\lambda} where p is the air pressure and lambda the latent heat of vapourisation. Parameters: - airtemp: array of measured air temperature [Celsius]. - rh: array of relative humidity values[%]. - airpress: array of air pressure data [Pa]. Returns: - gamma: array of psychrometric constant values [Pa K-1]. References ---------- J. Bringfelt. Test of a forest evapotranspiration model. Meteorology and Climatology Reports 52, SMHI, Norrköpping, Sweden, 1986. Examples -------- >>> gamma_calc(10,50,101300) 66.26343318657227 >>> t = [10, 20, 30] >>> rh = [10, 20, 30] >>> airpress = [100000, 101000, 102000] >>> gamma_calc(t,rh,airpress) array([ 65.25518798, 66.65695779, 68.24239285]) """ # Test input array/value airtemp, rh, airpress = _arraytest(airtemp, rh, airpress) # Calculate cp and Lambda values cp = cp_calc(airtemp, rh, airpress) L = L_calc(airtemp) # Calculate gamma gamma = cp * airpress / (0.622 * L) return gamma # in Pa\K def L_calc(airtemp=scipy.array([])): """ Function to calculate the latent heat of vapourisation from air temperature. Parameters: - airtemp: (array of) air temperature [Celsius]. Returns: - L: (array of) lambda [J kg-1 K-1]. References ---------- J. Bringfelt. Test of a forest evapotranspiration model. Meteorology and Climatology Reports 52, SMHI, Norrköpping, Sweden, 1986. Examples -------- >>> L_calc(25) 2440883.8804625 >>> t=[10, 20, 30] >>> L_calc(t) array([ 2476387.3842125, 2452718.3817125, 2429049.3792125]) """ # Test input array/value airtemp = _arraytest(airtemp) # Calculate lambda L = 4185.5 * (751.78 - 0.5655 * (airtemp + 273.15)) return L # in J/kg def pottemp(airtemp=scipy.array([]), rh=scipy.array([]), airpress=scipy.array([])): """ Function to calculate the potential temperature air, theta, from air temperatures, relative humidity and air pressure. Reference pressure 1000 hPa. Parameters: - airtemp: (array of) air temperature data [Celsius]. - rh: (array of) relative humidity data [%]. - airpress: (array of) air pressure data [Pa]. Returns: - theta: (array of) potential air temperature data [Celsius]. Examples -------- >>> t = [5, 10, 20] >>> rh = [45, 65, 89] >>> airpress = [101300, 102000, 99800] >>> pottemp(t,rh,airpress) array([ 3.97741582, 8.40874555, 20.16596828]) >>> pottemp(5,45,101300) 3.977415823848844 """ # Test input array/value airtemp, rh, airpress = _arraytest(airtemp, rh, airpress) # Determine cp cp = cp_calc(airtemp, rh, airpress) # Determine theta theta = (airtemp + 273.15) * pow((100000.0 / airpress), (287.0 / cp)) - 273.15 return theta # in degrees celsius def rho_calc(airtemp=scipy.array([]), rh=scipy.array([]), airpress=scipy.array([])): """ Function to calculate the density of air, rho, from air temperatures, relative humidity and air pressure. .. math:: \\rho = 1.201 \\cdot \\frac{290.0 \\cdot (p - 0.378 \\cdot e_a)}{1000 \\cdot (T + 273.15)} / 100 Parameters: - airtemp: (array of) air temperature data [Celsius]. - rh: (array of) relative humidity data [%]. - airpress: (array of) air pressure data [Pa]. Returns: - rho: (array of) air density data [kg m-3]. Examples -------- >>> t = [10, 20, 30] >>> rh = [10, 20, 30] >>> airpress = [100000, 101000, 102000] >>> rho_calc(t,rh,airpress) array([ 1.22948419, 1.19787662, 1.16635358]) >>> rho_calc(10,50,101300) 1.2431927125520903 """ # Test input array/value airtemp, rh, airpress = _arraytest(airtemp, rh, airpress) # Calculate actual vapour pressure eact = ea_calc(airtemp, rh) # Calculate density of air rho rho = ( 1.201 * (290.0 * (airpress - 0.378 * eact)) / (1000.0 * (airtemp + 273.15)) / 100.0 ) return rho # in kg/m3 def sun_NR(doy=scipy.array([]), lat=float): """ Function to calculate the maximum sunshine duration [h] and incoming radiation [MJ/day] at the top of the atmosphere from day of year and latitude. Parameters: - doy: (array of) day of year. - lat: latitude in decimal degrees, negative for southern hemisphere. Returns: - N: (float, array) maximum sunshine hours [h]. - Rext: (float, array) extraterrestrial radiation [J day-1]. Notes ----- Only valid for latitudes between 0 and 67 degrees (i.e. tropics and temperate zone). References ---------- R.G. Allen, L.S. Pereira, D. Raes and M. Smith (1998). Crop Evaporation - Guidelines for computing crop water requirements, FAO - Food and Agriculture Organization of the United Nations. Irrigation and drainage paper 56, Chapter 3. Rome, Italy. (http://www.fao.org/docrep/x0490e/x0490e07.htm) Examples -------- >>> sun_NR(50,60) (9.1631820597268163, 9346987.824773483) >>> days = [100,200,300] >>> latitude = 52. >>> sun_NR(days,latitude) (array([ 13.31552077, 15.87073276, 9.54607624]), array([ 29354803.66244921, 39422316.42084264, 12619144.54566777])) """ # Test input array/value doy, lat = _arraytest(doy, lat) # Set solar constant [W/m2] S = 1367.0 # [W/m2] # Print warning if latitude is above 67 degrees if abs(lat) > 67.0: print("WARNING: Latitude outside range of application (0-67 degrees).\n)") # Convert latitude [degrees] to radians latrad = lat * math.pi / 180.0 # calculate solar declination dt [radians] dt = 0.409 * scipy.sin(2 * math.pi / 365 * doy - 1.39) # calculate sunset hour angle [radians] ws = scipy.arccos(-scipy.tan(latrad) * scipy.tan(dt)) # Calculate sunshine duration N [h] N = 24 / math.pi * ws # Calculate day angle j [radians] j = 2 * math.pi / 365.25 * doy # Calculate relative distance to sun dr = 1.0 + 0.03344 * scipy.cos(j - 0.048869) # Calculate Rext Rext = ( S * 86400 / math.pi * dr * ( ws * scipy.sin(latrad) * scipy.sin(dt) + scipy.sin(ws) * scipy.cos(latrad) * scipy.cos(dt) ) ) return N, Rext def vpd_calc(airtemp=scipy.array([]), rh=scipy.array([])): """ Function to calculate vapour pressure deficit. Parameters: - airtemp: measured air temperatures [Celsius]. - rh: (array of) rRelative humidity [%]. Returns: - vpd: (array of) vapour pressure deficits [Pa]. Examples -------- >>> vpd_calc(30,60) 1697.090397862653 >>> T=[20,25] >>> RH=[50,100] >>> vpd_calc(T,RH) array([ 1168.54009896, 0. ]) """ # Test input array/value airtemp, rh = _arraytest(airtemp, rh) # Calculate saturation vapour pressures es = es_calc(airtemp) * 10 # kPa convert to hPa eact = ea_calc(airtemp, rh) # Calculate vapour pressure deficit vpd = es - eact return vpd # in hPa def windvec(u=scipy.array([]), D=scipy.array([])): """ Function to calculate the wind vector from time series of wind speed and direction. Parameters: - u: array of wind speeds [m s-1]. - D: array of wind directions [degrees from North]. Returns: - uv: Vector wind speed [m s-1]. - Dv: Vector wind direction [degrees from North]. Examples -------- >>> u = scipy.array([[ 3.],[7.5],[2.1]]) >>> D = scipy.array([[340],[356],[2]]) >>> windvec(u,D) (4.162354202836905, array([ 353.2118882])) >>> uv, Dv = windvec(u,D) >>> uv 4.162354202836905 >>> Dv array([ 353.2118882]) """ # Test input array/value u, D = _arraytest(u, D) ve = 0.0 # define east component of wind speed vn = 0.0 # define north component of wind speed D = D * math.pi / 180.0 # convert wind direction degrees to radians for i in range(0, len(u)): ve = ve + u[i] * math.sin(D[i]) # calculate sum east speed components vn = vn + u[i] * math.cos(D[i]) # calculate sum north speed components ve = -ve / len(u) # determine average east speed component vn = -vn / len(u) # determine average north speed component uv = math.sqrt(ve * ve + vn * vn) # calculate wind speed vector magnitude # Calculate wind speed vector direction vdir = scipy.arctan2(ve, vn) vdir = vdir * 180.0 / math.pi # Convert radians to degrees if vdir < 180: Dv = vdir + 180.0 else: if vdir > 180.0: Dv = vdir - 180 else: Dv = vdir return uv, Dv # uv in m/s, Dv in dgerees from North if __name__ == "__main__": import doctest doctest.testmod() print("Ran all tests...")
An extension to the Bronx Museum planned to be built with a technique similar to the Madras Museum. Like the project in Madras the aim of this extension was to create space to present self-help methods for the poor, but now situated in the big city in the modern world. An extension to the Bronx Museum planned to be built with a technique similar to the Madras Museum. For the dome structures, instead of split bamboo, round iron of 12 cm diameter was foreseen. The final model for a transformable low-cost museum: a plan for a flexible lay-out of different dome structures. Like the project in Madras the aim of this extension was to create space to present self-help methods for the poor, but now situated in the big city in the modern world. Model for a transformable low-cost museum: a plan for a flexible lay-out of different dome structures.
# ============================================================================ # # Copyright (C) 2007-2010 Conceptive Engineering bvba. All rights reserved. # www.conceptive.be / project-camelot@conceptive.be # # This file is part of the Camelot Library. # # This file may be used under the terms of the GNU General Public # License version 2.0 as published by the Free Software Foundation # and appearing in the file license.txt included in the packaging of # this file. Please review this information to ensure GNU # General Public Licensing requirements will be met. # # If you are unsure which license is appropriate for your use, please # visit www.python-camelot.com or contact project-camelot@conceptive.be # # This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE # WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. # # For use of this library in commercial applications, please contact # project-camelot@conceptive.be # # ============================================================================ """Actions box""" import logging logger = logging.getLogger('controls.actionsbox') from PyQt4 import QtGui from camelot.core.utils import ugettext as _ class ActionsBox(QtGui.QGroupBox): """A box containing actions to be applied to a view""" def __init__(self, parent, *args, **kwargs): QtGui.QGroupBox.__init__(self, _('Actions'), parent) logger.debug('create actions box') self.args = args self.kwargs = kwargs def setActions(self, actions): action_widgets = [] logger.debug('setting actions') # keep action object alive to allow them to receive signals self.actions = actions layout = QtGui.QVBoxLayout() for action in actions: action_widget = action.render(self, *self.args) layout.addWidget(action_widget) action_widgets.append(action_widget) self.setLayout(layout) return action_widgets
You can’t send Easter eggs - so send a fresh bouquet of Easter flowers instead! Searle's Gardens & Florals has the best and brightest flowers for Easter. Send Easter flowers to Truro, NS or nationwide for a joyful Easter celebration.
""" """ from functions import FunctionSkin from skin import Skin class Context(object): """ Context is a namespace that manages the stack of skins. """ _skin = None @classmethod def skins(cls): s = cls._skin while s: s = s.parent_skin yield s @classmethod def get_skin(cls, function=False): """ See if this skin will do. If not create an overlay skin and return it. If you want a specific skin type use 'set_skin' and then this. - function: Specify that you will need the skin for storing functions. Will overlay a new one. """ need_skin = function and not isinstance(cls._skin, FunctionSkin) or \ cls._skin is None if need_skin: cls.set_skin(function and FunctionSkin() or Skin()) return cls._skin @classmethod def set_skin(cls, skin, child=True): """ Add a layer overlay to skin. - skin: skin to replace with - child: False ignores all the so-far structure and replaces. """ if child: skin.set_parent(cls._skin) cls._skin = skin return cls._skin @classmethod def register_function(cls, fn, name=None, domain=None, skin=None, append=True, mapping=True, **kw): """ Register a function under domain. - name: Give a name to the function. Fallback to function name - domain: Skin domain. Fallback to name - skin: Use specific skin (not appended) - append: the domain is a collection. Append to it. - mapping: the domain is a mapping, ignore if not appending """ s = skin or cls.get_skin(function=True) name = name or fn.__name__ domain = domain or name # XXX: this might not be the place to interpret append == None # as append == True if append or append is None: if mapping: s.append(domain, (name, fn), coll_type=dict) else: s.append(domain, fn, coll_type=list) else: s.set(domain or name, fn)
Glucosamine is naturally found in the joints of the body but rarely found in natural food sources. It is a building block of cartilage and joint tissue. Directions: Take one to two caplets daily, preferably with meals. Do not exceed stated dose. No Artificial Flavours or Sweeteners. No Preservatives. No added Sugar or Salt. No Starch, No Corn, No Milk, No Lactose, No Soya, No Gluten, No Wheat, No Yeast, No Fish, No Porcine. Glucosamine Sulphate 2KCl (Crustaceans), Bulking Agent (Microcrystalline Cellulose), Anti-Caking Agents (Magnesium Stearate, Stearic Acid), Glazing Agents (Hydroxypropyl Methylcellulose, Glycerine, Carnauba Wax), Colour (Titanium Dioxide). Glucosamine Sulphate is really helping my joints, could not be without them now. Have been taking this for a while, but had a break and noticed a difference and again when I restarted taking them. Hopefully won't be having a break again! excellent product. suitable for joint problems. It' really effective! My husband and I have always found the product to be very beneficial and the H&B service excellent. Worth every dirham and have told all of my friends about them too!
""" Properties O(1) extra space O(n lg(n)) time Not stable: (original ordering is lost during the heap creation, which comes first) Not really adaptive: (doesn't takes advantage of existing order in its input) """ """ Heap sort is simple to implement, performs an O(n lg(n)) in-place sort, but is not stable as any information about the ordering of the items in the original sequence was lost during the heap creation stage, which came first. Heapsort is not stable because operations on the heap can change the relative order of equal items. The first loop, the O(n) "heapify" phase, puts the array into heap order. The second loop, the O(n lg(n)) "sortdown" phase, repeatedly extracts the maximum and restores heap order. The sink function is written recursively for clarity. Thus, as shown, the code requires O(lg(n)) space for the recursive call stack. However, the tail recursion in sink() is easily converted to iteration, which yields the O(1) space bound. Both phases are slightly adaptive, though not in any particularly useful manner. In the nearly sorted case, the heapify phase destroys the original order. In the reversed case, the heapify phase is as fast as possible since the array starts in heap order, but then the sortdown phase is typical. In the few unique keys case, there is some speedup but not as much as in shell sort or 3-way quicksort. """ import sys import math import cProfile def swap(aList, iIndex, jIndex): """ Given a `list` and two indices, it swaps the contents. """ aList[iIndex], aList[jIndex] = aList[jIndex], aList[iIndex] def get_left_child(iIndex): """ Given an index it returns it's left child's index. """ return 2 * iIndex + 1 def get_right_child(iIndex): """ Given an index it returns it's right child's index. """ return 2 * iIndex + 2 def get_parent(iIndex): """ Given an index, it returns the index of it's parent. """ return int(math.floor((iIndex - 1) / 2)) def heapify(aList, iEnd, iIndex): """ Given a list, and its size and an index, this function ensures all items are in descending order (children < parent) """ iLeft = get_left_child(iIndex) iRight = get_right_child(iIndex) iLargest = iIndex if iLeft < iEnd and aList[iLeft] > aList[iLargest]: iLargest = iLeft if iRight < iEnd and aList[iRight] > aList[iLargest]: iLargest = iRight if iLargest != iIndex: swap(aList, iIndex, iLargest) heapify(aList, iEnd, iLargest) def build_heap(aList): """ Given a list, it builds a heap using the list. """ iEnd = len(aList) iStart = iEnd / 2 - 1 # Root of a tree is @ size/2-1 for iIndex in range(iStart, -1, -1): heapify(aList, iEnd, iIndex) def heap_sort(aList): """ Given a unsorted list of integers other comparable types, it rearrange the integers in natural order in place in an ascending order. Example: Input: [8,5,3,1,7,6,0,9,4,2,5] Output: [0,1,2,3,4,5,5,6,7,8,9] """ iEnd = len(aList) build_heap(aList) for iIndex in range(iEnd-1, 0, -1): swap(aList, iIndex, 0) heapify(aList, iIndex, 0) return aList def run_test(): """ Test function. """ print "---------------------" aList = [8, 5, 3, 1, 9, 6, 0, 7, 4, 2, 5] aList = heap_sort(aList) print "aList Sorted. Ascending = {}\n".format(aList) if __name__ == "__main__": """ Run the code and profile it. """ cProfile.run('run_test()')
We are super excited to announce our first ever Yoga Retreat to Costa Rica in April 2019 being led by our very own Carrie Godesky and Jennifer Vafakos! A retreat specially designed to honor ritual and celebration. Our Yoga Retreat will be held at Danyasa Eco-Retreat in Dominical, Costa Rica. Danyasa offers an ecologically friendly retreat space. Join us in this sanctuary for exploration and transformation through yoga, surf, play, movement, healing arts, relaxation, adventure, community and a connection to nature like you’ve not had before. Inlet Yoga strives to provide top notch teacher trainings, workshops, private sessions, classes, tours, events and as well as excellence in service in our boutique and our accommodations! Our goal lies in exploring myriad ways to better ourselves… giving us more tools to move through the world, living artfully. Nestled at the base of rainforested mountains on the bank of the Baru River in Dominical, Costa Rica, Danyasa Eco-Retreat provides a sanctuary for living artfully in nature. We are a boutique yoga retreat center with a focus on providing you with everything you need to explore both the wonders of Costa Rica’s abundant nature as well as the wonders of your inner landscape. We invite you to join us for relaxation, play, adventure, creativity and transformation by way of classes, workshops, retreats, trainings and adventure tours. Our commitment to you is to always maintain a level of excellence and an intention to inspire. Transportation to and from San Jose Airport (SJO) coordinated for retreat attendees in two groups. Eco-friendly soaps and detergents however no shampoo or conditioner they recommend bringing eco-friendly products. The currency is Colon. Exchange rate averages between 530 to 550 colones to US dollar. US dollar is generally accepted everywhere. If you do pay in dollars, you will receive change in colons. Recommended to bring denominations of $20 or less. Water: No need to buy plastic bottles of water. Just bring your favorite water bottle and refill at filtered water stations. All water on property goes through 2 filters before reaching the room and kitchen. Credit cards: Danyasa takes both cc and cash (USD works just as easily as local currency) on site. There is a 13% fee for credit cards at Danyasa, so cash is definitely preferable for booking excursions or buying anything in our amazing onsite boutique. In town, USD is accepted everywhere, and a few of the larger restaurants accept ccs. There is an ATM in town as well. Passport: Required and must have at least 6 months validity remaining to enter country. No visa required if traveling from US or CAD. Electrical power: Same outlets at US so no need for adaptors. Weather: April is the green season. Rain does not begin until late afternoon or evening. For questions or to book please contact: info@inletyoga.com.
#!/usr/bin/env python3 # Copyright (C) 2014 Gabriel F. Araujo # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA import unittest import unittest.mock as mk from os import path import setupam.speaker class SpeakerTest(unittest.TestCase): def setUp(self): self.src_path = '/home/user/source' self.speaker = setupam.speaker.Speaker(self.src_path) # TODO Will include tests for speaker class when it's extended. class ResourceTest(unittest.TestCase): def setUp(self): patcher = mk.patch('{}.{}'.format(self.module_to_patch.__name__, self.class_to_patch.__name__)) self.addCleanup(patcher.stop) self.mock = patcher.start() args, kwargs = self.builder_args self.builder = setupam.speaker.SpeakerBuilder(*args, **kwargs) def check_call(self, args, kwargs, expected_calls): func = getattr(self.builder, self.method_under_test.__name__) func(*args, **kwargs) self.mock.assert_has_calls(expected_calls) def check_all_calls(self): for value in self.assertion_values: self.check_call(**value) @staticmethod def create_calls(*args, **kwargs): # Create proper paths from args paths = (path.join(*arg) if isinstance(arg, tuple) else arg for arg in args) # Create the calls object with the paths given calls = [mk.call(), mk.call().populate(*paths, **kwargs)] return calls
This offer is specially for you! This beautiful business was built from scratch and is thriving in its seventh year. Beauty, by far the most profitable. It has built up a stable customer base. By purchasing this salon you receive income from three jobs barber stylist, Manicurist, solarium., Sales of cosmetics and hair care and body care. It is also at your disposal: a waiting room, utility pomeschenie.Prodazha interior a necessary measure, because of living in Tyumen.Arendnaya fee will be more than acceptable for you. Its price includes the same payment for the consumption of water and elektrichestvo.Nachat a new business is never easy. A purchase already existing and established on the market - profitable. Become the new owner.
from django.shortcuts import render from django.http import HttpResponse import os, threading, csv, tempfile from collections import OrderedDict #from django.core.servers.basehttp import FileWrapper #from wsgiref.util import FileWrapper # Create your views here. def index(request): return render(request, 'drawing_freecad/index.html') #lists = {'stories':'','dep_of_foun':'','plinth_lev':'','cclear_height':'', # 'dep_slab':'','rep_span_len':'','rep_span_wid':'','col_type':'', # 'len_col':'','wid_col':'', 'radius_col':'','dep_beam':'', # 'wid_beam':''} # lists = OrderedDict([('stories', ''), ('dep_of_foun', ''), ('plinth_lev', ''), ('cclear_height', ''), ('dep_slab', ''), ('rep_span_len', ''), ('rep_span_wid', ''), ('col_type', ''), ('len_col', ''), ('wid_col', ''), ('radius_col', ''), ('dep_beam', ''), ('wid_beam', '')]) lis = ['stories','dep_of_foun','plinth_lev','cclear_height','dep_slab','rep_span_len','rep_span_wid','col_type','len_col','wid_col','radius_col','dep_beam','wid_beam'] #bb = [] def specs(request): try: global lists global lis bb = list() for var in lists.keys(): lists[var] = request.POST.get(var) # print("session %s" %request.session[var]) print lists # print lists['rep_span_len'] # print("list is : %s" %bb) f = open('drawing_freecad/some.csv', 'w') ww = csv.writer(f, delimiter=' ') a = [] for i in lists.keys(): a.append(lists[i]) ww.writerow(a) f.close() os.system('rm project.fcstd') os.system('cd drawing_freecad/FreeCAD_macros && freecadcmd drawing.py') # print l # print request.POST # print len(request.POST) return render(request, 'drawing_freecad/specs.html', {'lists': lists}) except: return render(request, 'drawing_freecad/specs.html', {'message': 'please fill again'}) def download(request): os.system('cd drawing_freecad/drawings/svg_pdf && rm -f *') os.system('cd drawing_freecad/drawings && rm -f drawings.zip') os.system('cp drawing_freecad/project.fcstd ./drawing_freecad/drawings/svg_pdf/') os.system('cd drawing_freecad/FreeCAD_macros && freecadcmd savepdf.FCMacro') command = "./drawing_freecad/drawings/drawings.zip" f = open(command) response = HttpResponse(f, content_type='application/zip') response['Content-Disposition'] = 'attachment; filename="drawings.zip"' return response draw_list = OrderedDict([('x_dir', ''), ('y_dir', ''), ('z_dir', ''), ('hid_lines', ''), ('scale_size', ''), ('rotation', '')]) def drawing(request): global draw_list for i in draw_list.keys(): draw_list[i] = request.POST.get(i) print draw_list return render(request,'web_app/drawing.html', {'draw_list':draw_list})
On this website we recommend many images about Couchtisch Bei Otto that we have collected from various sites from many image inspiration, and of course what we recommend is the most excellent of image for couchtisch bei otto . If you like the image on our website, please do not hesitate to visit again and get inspiration from all the houses in the image of our web image. And if you want to see more images, we recommend the gallery below. You can see the picture as a reference image from your Couchtisch Bei Otto. Thank you for seeing gallery of Couchtisch Bei Otto, we would be very happy if you come back.
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. import sqlalchemy as sa from relengapi.blueprints.tokenauth import types from relengapi.lib import db from relengapi.lib.permissions import p class Token(db.declarative_base('relengapi')): __tablename__ = 'auth_tokens' def __init__(self, permissions=None, **kwargs): if permissions is not None: kwargs['_permissions'] = ','.join((str(a) for a in permissions)) super(Token, self).__init__(**kwargs) id = sa.Column(sa.Integer, primary_key=True) typ = sa.Column(sa.String(4), nullable=False) description = sa.Column(sa.Text, nullable=False) user = sa.Column(sa.Text, nullable=True) disabled = sa.Column(sa.Boolean, nullable=False) _permissions = sa.Column(sa.Text, nullable=False) def to_jsontoken(self): tok = types.JsonToken(id=self.id, typ=self.typ, description=self.description, permissions=[str(a) for a in self.permissions], disabled=self.disabled) if self.user: tok.user = self.user return tok @property def permissions(self): token_permissions = [p.get(permissionstr) for permissionstr in self._permissions.split(',')] # silently ignore any nonexistent permissions; this allows us to remove unused # permissions without causing tokens permitting those permissions to fail # completely return [a for a in token_permissions if a]
H 2016/04/24 Philadelphia Flyers 0 - Washington Capitals 1 L !! 1st Round !! A 2016/04/22 Philadelphia Flyers 2 - Washington Capitals 0 W !! 1st Round !! H 2016/04/20 Philadelphia Flyers 2 - Washington Capitals 1 W !! 1st Round !! H 2016/04/18 Philadelphia Flyers 1 - Washington Capitals 6 L !! 1st Round !! A 2016/04/16 Philadelphia Flyers 1 - Washington Capitals 4 L !! 1st Round !! A 2016/04/14 Philadelphia Flyers 0 - Washington Capitals 2 L !! 1st Round !! A 2008/04/22 Philadelphia Flyers 3 - Washington Capitals 2 (OT) W !! 1st Round !! H 2008/04/21 Philadelphia Flyers 2 - Washington Capitals 4 L !! 1st Round !! A 2008/04/19 Philadelphia Flyers 2 - Washington Capitals 3 L !! 1st Round !! H 2008/04/17 Philadelphia Flyers 4 - Washington Capitals 3 (OT) W !! 1st Round !! H 2008/04/15 Philadelphia Flyers 6 - Washington Capitals 3 W !! 1st Round !! A 2008/04/13 Philadelphia Flyers 2 - Washington Capitals 0 W !! 1st Round !! A 2008/04/11 Philadelphia Flyers 4 - Washington Capitals 5 L !! 1st Round !! H 1989/04/13 Philadelphia Flyers 4 - Washington Capitals 3 W !! 1st Round !! A 1989/04/11 Philadelphia Flyers 8 - Washington Capitals 5 W !! 1st Round !! H 1989/04/09 Philadelphia Flyers 5 - Washington Capitals 2 W !! 1st Round !! H 1989/04/08 Philadelphia Flyers 3 - Washington Capitals 4 (OT) L !! 1st Round !! A 1989/04/06 Philadelphia Flyers 3 - Washington Capitals 2 W !! 1st Round !! A 1989/04/05 Philadelphia Flyers 2 - Washington Capitals 3 L !! 1st Round !! A 1988/04/16 Philadelphia Flyers 4 - Washington Capitals 5 (OT) L !! 1st Round !! H 1988/04/14 Philadelphia Flyers 2 - Washington Capitals 7 L !! 1st Round !! A 1988/04/12 Philadelphia Flyers 2 - Washington Capitals 5 L !! 1st Round !! H 1988/04/10 Philadelphia Flyers 5 - Washington Capitals 4 (OT) W !! 1st Round !! H 1988/04/09 Philadelphia Flyers 4 - Washington Capitals 3 W !! 1st Round !! A 1988/04/07 Philadelphia Flyers 4 - Washington Capitals 5 L !! 1st Round !! A 1988/04/06 Philadelphia Flyers 4 - Washington Capitals 2 W !! 1st Round !! H 1984/04/07 Philadelphia Flyers 1 - Washington Capitals 5 L !! 1st Round !! A 1984/04/05 Philadelphia Flyers 2 - Washington Capitals 6 L !! 1st Round !! A 1984/04/04 Philadelphia Flyers 2 - Washington Capitals 4 L !! 1st Round !!
#!/usr/bin/env python from django.core import management from optparse import OptionParser import os, sys ACTION_MAPPING = { 'adminindex': management.get_admin_index, 'createsuperuser': management.createsuperuser, 'createcachetable' : management.createcachetable, # 'dbcheck': management.database_check, 'init': management.init, 'inspectdb': management.inspectdb, 'install': management.install, 'installperms': management.installperms, 'runserver': management.runserver, 'sql': management.get_sql_create, 'sqlall': management.get_sql_all, 'sqlclear': management.get_sql_delete, 'sqlindexes': management.get_sql_indexes, 'sqlinitialdata': management.get_sql_initial_data, 'sqlreset': management.get_sql_reset, 'sqlsequencereset': management.get_sql_sequence_reset, 'startapp': management.startapp, 'startproject': management.startproject, 'validate': management.validate, } NO_SQL_TRANSACTION = ('adminindex', 'createcachetable', 'dbcheck', 'install', 'installperms', 'sqlindexes') def get_usage(): """ Returns a usage string. Doesn't do the options stuff, because optparse takes care of that. """ usage = ["usage: %prog action [options]\nactions:"] available_actions = ACTION_MAPPING.keys() available_actions.sort() for a in available_actions: func = ACTION_MAPPING[a] usage.append(" %s %s -- %s" % (a, func.args, getattr(func, 'help_doc', func.__doc__))) return '\n'.join(usage) class DjangoOptionParser(OptionParser): def print_usage_and_exit(self): self.print_help(sys.stderr) sys.exit(1) def print_error(msg, cmd): sys.stderr.write('Error: %s\nRun "%s --help" for help.\n' % (msg, cmd)) sys.exit(1) def main(): # Parse the command-line arguments. optparse handles the dirty work. parser = DjangoOptionParser(get_usage()) parser.add_option('--settings', help='Python path to settings module, e.g. "myproject.settings.main". If this isn\'t provided, the DJANGO_SETTINGS_MODULE environment variable will be used.') parser.add_option('--pythonpath', help='Lets you manually add a directory the Python path, e.g. "/home/djangoprojects/myproject".') options, args = parser.parse_args() # Take care of options. if options.settings: os.environ['DJANGO_SETTINGS_MODULE'] = options.settings if options.pythonpath: sys.path.insert(0, options.pythonpath) # Run the appropriate action. Unfortunately, optparse can't handle # positional arguments, so this has to parse/validate them. try: action = args[0] except IndexError: parser.print_usage_and_exit() if not ACTION_MAPPING.has_key(action): print_error("Your action, %r, was invalid." % action, sys.argv[0]) # switch to english, because django-admin creates database content # like permissions, and those shouldn't contain any translations. # But only do this if we should have a working settings file. if action not in ('startproject', 'startapp'): from django.utils import translation translation.activate('en-us') if action in ('createsuperuser', 'init', 'validate'): ACTION_MAPPING[action]() elif action == 'inspectdb': try: param = args[1] except IndexError: parser.print_usage_and_exit() try: for line in ACTION_MAPPING[action](param): print line except NotImplementedError: sys.stderr.write("Error: %r isn't supported for the currently selected database backend.\n" % action) sys.exit(1) elif action == 'createcachetable': try: ACTION_MAPPING[action](args[1]) except IndexError: parser.print_usage_and_exit() elif action in ('startapp', 'startproject'): try: name = args[1] except IndexError: parser.print_usage_and_exit() ACTION_MAPPING[action](name, os.getcwd()) elif action == 'runserver': if len(args) < 2: addr = '' port = '8000' else: try: addr, port = args[1].split(':') except ValueError: addr, port = '', args[1] ACTION_MAPPING[action](addr, port) else: from django.core import meta if action == 'dbcheck': mod_list = meta.get_all_installed_modules() else: try: mod_list = [meta.get_app(app_label) for app_label in args[1:]] except ImportError, e: sys.stderr.write("Error: %s. Are you sure your INSTALLED_APPS setting is correct?\n" % e) sys.exit(1) if not mod_list: parser.print_usage_and_exit() if action not in NO_SQL_TRANSACTION: print "BEGIN;" for mod in mod_list: output = ACTION_MAPPING[action](mod) if output: print '\n'.join(output) if action not in NO_SQL_TRANSACTION: print "COMMIT;" if __name__ == "__main__": main()
business processes inside your SAP system. The platform acts as your control center for directing processes, so information can be automatically matched against SAP master data or transactional data. Discrepancies between pieces of information are caught early and corrected before transactions are finalized in SAP. ReadSoft Process Director resides inside SAP, so you have the choice of working with the familiar SAP environment or using an intuitive web interface.
""" Mesh: Operators: Cahn Hilliard ============================== This example is based on the example in the FiPy_ library. Please see their documentation for more information about the Cahn-Hilliard equation. The "Cahn-Hilliard" equation separates a field \\\\( \\\\phi \\\\) into 0 and 1 with smooth transitions. .. math:: \\frac{\partial \phi}{\partial t} = \\nabla \cdot D \\nabla \left( \\frac{\partial f}{\partial \phi} - \epsilon^2 \\nabla^2 \phi \\right) Where \\\\( f \\\\) is the energy function \\\\( f = ( a^2 / 2 )\\\\phi^2(1 - \\\\phi)^2 \\\\) which drives \\\\( \\\\phi \\\\) towards either 0 or 1, this competes with the term \\\\(\\\\epsilon^2 \\\\nabla^2 \\\\phi \\\\) which is a diffusion term that creates smooth changes in \\\\( \\\\phi \\\\). The equation can be factored: .. math:: \\frac{\partial \phi}{\partial t} = \\nabla \cdot D \\nabla \psi \\\\ \psi = \\frac{\partial^2 f}{\partial \phi^2} (\phi - \phi^{\\text{old}}) + \\frac{\partial f}{\partial \phi} - \epsilon^2 \\nabla^2 \phi Here we will need the derivatives of \\\\( f \\\\): .. math:: \\frac{\partial f}{\partial \phi} = (a^2/2)2\phi(1-\phi)(1-2\phi) \\frac{\partial^2 f}{\partial \phi^2} = (a^2/2)2[1-6\phi(1-\phi)] The implementation below uses backwards Euler in time with an exponentially increasing time step. The initial \\\\( \\\\phi \\\\) is a normally distributed field with a standard deviation of 0.1 and mean of 0.5. The grid is 60x60 and takes a few seconds to solve ~130 times. The results are seen below, and you can see the field separating as the time increases. .. _FiPy: http://www.ctcms.nist.gov/fipy/examples/cahnHilliard/generated/examples.cahnHilliard.mesh2DCoupled.html """ from __future__ import print_function from SimPEG import Mesh, Utils, Solver import numpy as np import matplotlib.pyplot as plt def run(plotIt=True, n=60): np.random.seed(5) # Here we are going to rearrange the equations: # (phi_ - phi)/dt = A*(d2fdphi2*(phi_ - phi) + dfdphi - L*phi_) # (phi_ - phi)/dt = A*(d2fdphi2*phi_ - d2fdphi2*phi + dfdphi - L*phi_) # (phi_ - phi)/dt = A*d2fdphi2*phi_ + A*( - d2fdphi2*phi + dfdphi - L*phi_) # phi_ - phi = dt*A*d2fdphi2*phi_ + dt*A*(- d2fdphi2*phi + dfdphi - L*phi_) # phi_ - dt*A*d2fdphi2 * phi_ = dt*A*(- d2fdphi2*phi + dfdphi - L*phi_) + phi # (I - dt*A*d2fdphi2) * phi_ = dt*A*(- d2fdphi2*phi + dfdphi - L*phi_) + phi # (I - dt*A*d2fdphi2) * phi_ = dt*A*dfdphi - dt*A*d2fdphi2*phi - dt*A*L*phi_ + phi # (dt*A*d2fdphi2 - I) * phi_ = dt*A*d2fdphi2*phi + dt*A*L*phi_ - phi - dt*A*dfdphi # (dt*A*d2fdphi2 - I - dt*A*L) * phi_ = (dt*A*d2fdphi2 - I)*phi - dt*A*dfdphi h = [(0.25, n)] M = Mesh.TensorMesh([h, h]) # Constants D = a = epsilon = 1. I = Utils.speye(M.nC) # Operators A = D * M.faceDiv * M.cellGrad L = epsilon**2 * M.faceDiv * M.cellGrad duration = 75 elapsed = 0. dexp = -5 phi = np.random.normal(loc=0.5, scale=0.01, size=M.nC) ii, jj = 0, 0 PHIS = [] capture = np.logspace(-1, np.log10(duration), 8) while elapsed < duration: dt = min(100, np.exp(dexp)) elapsed += dt dexp += 0.05 dfdphi = a**2 * 2 * phi * (1 - phi) * (1 - 2 * phi) d2fdphi2 = Utils.sdiag(a**2 * 2 * (1 - 6 * phi * (1 - phi))) MAT = (dt*A*d2fdphi2 - I - dt*A*L) rhs = (dt*A*d2fdphi2 - I)*phi - dt*A*dfdphi phi = Solver(MAT)*rhs if elapsed > capture[jj]: PHIS += [(elapsed, phi.copy())] jj += 1 if ii % 10 == 0: print(ii, elapsed) ii += 1 if plotIt: fig, axes = plt.subplots(2, 4, figsize=(14, 6)) axes = np.array(axes).flatten().tolist() for ii, ax in zip(np.linspace(0, len(PHIS)-1, len(axes)), axes): ii = int(ii) M.plotImage(PHIS[ii][1], ax=ax) ax.axis('off') ax.set_title('Elapsed Time: {0:4.1f}'.format(PHIS[ii][0])) if __name__ == '__main__': run() plt.show()
"Thanks again for all of your help getting us the ladder. Our guys love it!" "This product is cutting edge technology brought to the law enforcement tactical arena. The potential versatility through accessorizing the base model makes this a multi tool must have for all tactical teams. The Syracuse Police Department Emergency Response Team is honored to assist in helping to bring this product to our brother tactical operators." "The ladder is extremely light and easily transported to locations as needed. Training was completed transporting the ladder in its storage bag and then assembling it at a jump off point. One person can easily carry the ladder and have hands free to provide cover or carry other equipment. Assembly was quick, quiet and simple. Training was also completed transporting the ladder assembled. Again one person was able to easily carry the ladder, but his hands were not free for other tasks. The Carbon Fiber ladder is a very light weight and sturdy tool. It is versatile and quiet making it usefull for tactical situations." "The ladder is rated for 350 lbs but can easily hold well over that. For one test we placed the assembled ladder horizontally between two weight benches. We then placed a total of five operators, over 1000 lbs, on the ladder. The ladder did visibly bow, but it held together without breaking. Careful examination afterward, particularly at the joints, did not reveal any stress cracks or other signs of failure anywhere. Even though this is not the type of load application this ladder is designed to withstand it gave us an indication of just how strong it is. I feel very confident in stating that in a near vertical position this ladder would be able to hold as many operators as could physically fit on it. One of the best features of this ladder, aside from its lightweight design, is the fact that it can be used in individual sections or combinations of sections. This can be advantageous when conducting window entries and bus assaults."
# Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import pytest from ansible.cli.doc import DocCLI, RoleMixin TTY_IFY_DATA = { # No substitutions 'no-op': 'no-op', 'no-op Z(test)': 'no-op Z(test)', # Simple cases of all substitutions 'I(italic)': "`italic'", 'B(bold)': '*bold*', 'M(ansible.builtin.module)': '[ansible.builtin.module]', 'U(https://docs.ansible.com)': 'https://docs.ansible.com', 'L(the user guide,https://docs.ansible.com/user-guide.html)': 'the user guide <https://docs.ansible.com/user-guide.html>', 'R(the user guide,user-guide)': 'the user guide', 'C(/usr/bin/file)': "`/usr/bin/file'", 'HORIZONTALLINE': '\n{0}\n'.format('-' * 13), # Multiple substitutions 'The M(ansible.builtin.yum) module B(MUST) be given the C(package) parameter. See the R(looping docs,using-loops) for more info': "The [ansible.builtin.yum] module *MUST* be given the `package' parameter. See the looping docs for more info", # Problem cases 'IBM(International Business Machines)': 'IBM(International Business Machines)', 'L(the user guide, https://docs.ansible.com/)': 'the user guide <https://docs.ansible.com/>', 'R(the user guide, user-guide)': 'the user guide', # de-rsty refs and anchors 'yolo :ref:`my boy` does stuff': 'yolo website for `my boy` does stuff', '.. seealso:: Something amazing': 'See website for: Something amazing', '.. seealso:: Troublesome multiline\n Stuff goes htere': 'See website for: Troublesome multiline\n Stuff goes htere', '.. note:: boring stuff': 'Note: boring stuff', } @pytest.mark.parametrize('text, expected', sorted(TTY_IFY_DATA.items())) def test_ttyify(text, expected): assert DocCLI.tty_ify(text) == expected def test_rolemixin__build_summary(): obj = RoleMixin() role_name = 'test_role' collection_name = 'test.units' argspec = { 'main': {'short_description': 'main short description'}, 'alternate': {'short_description': 'alternate short description'}, } expected = { 'collection': collection_name, 'entry_points': { 'main': argspec['main']['short_description'], 'alternate': argspec['alternate']['short_description'], } } fqcn, summary = obj._build_summary(role_name, collection_name, argspec) assert fqcn == '.'.join([collection_name, role_name]) assert summary == expected def test_rolemixin__build_summary_empty_argspec(): obj = RoleMixin() role_name = 'test_role' collection_name = 'test.units' argspec = {} expected = { 'collection': collection_name, 'entry_points': {} } fqcn, summary = obj._build_summary(role_name, collection_name, argspec) assert fqcn == '.'.join([collection_name, role_name]) assert summary == expected def test_rolemixin__build_doc(): obj = RoleMixin() role_name = 'test_role' path = '/a/b/c' collection_name = 'test.units' entrypoint_filter = 'main' argspec = { 'main': {'short_description': 'main short description'}, 'alternate': {'short_description': 'alternate short description'}, } expected = { 'path': path, 'collection': collection_name, 'entry_points': { 'main': argspec['main'], } } fqcn, doc = obj._build_doc(role_name, path, collection_name, argspec, entrypoint_filter) assert fqcn == '.'.join([collection_name, role_name]) assert doc == expected def test_rolemixin__build_doc_no_filter_match(): obj = RoleMixin() role_name = 'test_role' path = '/a/b/c' collection_name = 'test.units' entrypoint_filter = 'doesNotExist' argspec = { 'main': {'short_description': 'main short description'}, 'alternate': {'short_description': 'alternate short description'}, } fqcn, doc = obj._build_doc(role_name, path, collection_name, argspec, entrypoint_filter) assert fqcn == '.'.join([collection_name, role_name]) assert doc is None
Claudia and Garrett did everything right – so they thought: They went to school on student loans, purchased cars with loans, and got a mortgage for a house when all they really wanted to do was travel. What started as a silly idea somehow took hold in their brains: tiny living. Claudia and Garrett have taken what may seem like drastic measures to fight lifestyle inflation and pay off debt. After spending thousands to remodel their kitchen, they sold the “big house” to downsize. What else have they done? Listen to the interview to find out.
import mechanize import cookielib from bs4 import BeautifulSoup from socket import * def getInfo(ipaddr, userAgent, proxz, hostname): WEBFORM_NAME = 'search' browser = mechanize.Browser() browser.set_handle_robots(False) browser.set_handle_equiv(True) browser.set_handle_referer(True) browser.set_handle_redirect(True) browser.addheaders = userAgent # browser.set_proxies(proxz) cookie_jar = cookielib.LWPCookieJar() browser.set_cookiejar(cookie_jar) page = browser.open('https://apps.db.ripe.net/search/query.html') for form in browser.forms(): if form.name == WEBFORM_NAME: browser.select_form(WEBFORM_NAME) browser.form['search:queryString'] = ipaddr browser.form['search:sources'] = ['GRS'] submission = browser.submit().read() parsed_submission = BeautifulSoup(submission, 'html.parser') print ipaddr, '/',hostname for mainIndex in parsed_submission.find_all('ul', {'class': 'attrblock'}): for i, li in enumerate(mainIndex.findAll('li')): if i in range(0, 2): print '[+] ', li.text print '\n ########## \n' import struct import os os.popen("sudo ifconfig eth0 promisc") userAgent = [('User-agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:40.0) Gecko/20100101 Firefox/40.1')] proxz = {} source_ips = [] s = socket(PF_PACKET, SOCK_RAW, ntohs(0x0800)) LOCALHOST=['192.168.0.1','127.0.0.1'] while 1: pkt = s.recvfrom(2048) eth_head = pkt[0][:14] ip_head = pkt[0][14:34] tcp_head = pkt[0][34:54] dest_mac, source_mac, seq_number = struct.unpack("!6s6s2s", eth_head) neco, source_ip, dest_ip = struct.unpack("!12s4s4s", ip_head) source_port, dest_port, neco2, flag, neco3 = struct.unpack("!HH9ss6s", tcp_head) source_ip = inet_ntoa(source_ip) if not source_ip in source_ips and source_ip not in LOCALHOST: source_ips.append(source_ip) try: hostname = gethostbyaddr(source_ip)[0] except: hostname = "err reaching hostname" if hostname != 'apps.db.ripe.net': getInfo(source_ip, userAgent, proxz, hostname)
Calling all wine lovers! Enjoy a guided tour of the Urban Winery and be taken through six wines from three different wine regions of NSW, complemented with a charcuterie and cheese platter. You'll then have the opportunity to create your own unique blend alongside the head winemaker. Maximum number of guests is 50. Closed toe shoes must be worn. This experience is located in Sydney's Moore Park. Urban Winery Sydney is located Building 121, Entertainment Quarter, 122 Lang Road, Moore Park 2021. Customers are able to change participants. Vouchers will be sent via email within 1 business day of order placement. RedBalloon vouchers are not available for delivery in the post, or for click & collect. RedBalloon experience vouchers are non-refundable.
#!/usr/bin/env python # Protocol Buffers - Google's data interchange format # Copyright 2008 Google Inc. All rights reserved. # https://developers.google.com/protocol-buffers/ # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Script to generate a list of all modules to use in autosummary. This script creates a ReStructured Text file for each public module in the protobuf Python package. The script also updates the table of contents in ``docs/index.rst`` to point to these module references. To build the docs with Sphinx: 1. Install the needed packages (``sphinx``, ``sphinxcontrib-napoleon`` for Google-style docstring support). I've created a conda environment file to make this easier: .. code:: bash conda env create -f python/docs/environment.yml 2. (Optional) Generate reference docs files and regenerate index: .. code:: bash cd python/docs python generate_docs.py 3. Run Sphinx. .. code:: bash make html """ import pathlib import re DOCS_DIR = pathlib.Path(__file__).parent.resolve() PYTHON_DIR = DOCS_DIR.parent SOURCE_DIR = PYTHON_DIR / "google" / "protobuf" SOURCE_POSIX = SOURCE_DIR.as_posix() # Modules which are always included: INCLUDED_MODULES = ( "google.protobuf.internal.containers", ) # Packages to ignore, including all modules (unless in INCLUDED_MODULES): IGNORED_PACKAGES = ( "compiler", "docs", "internal", "pyext", "util", ) # Ignored module stems in all packages (unless in INCLUDED_MODULES): IGNORED_MODULES = ( "any_test_pb2", "api_pb2", "unittest", "source_context_pb2", "test_messages_proto3_pb2", "test_messages_proto2", ) TOC_REGEX = re.compile( r"\.\. START REFTOC.*\.\. END REFTOC\.\n", flags=re.DOTALL, ) TOC_TEMPLATE = """.. START REFTOC, generated by generate_docs.py. .. toctree:: {toctree} .. END REFTOC. """ AUTOMODULE_TEMPLATE = """.. DO NOT EDIT, generated by generate_docs.py. .. ifconfig:: build_env == 'readthedocs' .. warning:: You are reading the documentation for the `latest committed changes <https://github.com/protocolbuffers/protobuf/tree/master/python>`_ of the `Protocol Buffers package for Python <https://developers.google.com/protocol-buffers/docs/pythontutorial>`_. Some features may not yet be released. Read the documentation for the latest released package at `googleapis.dev <https://googleapis.dev/python/protobuf/latest/>`_. {module} {underline} .. automodule:: {module} :members: :inherited-members: :undoc-members: """ def find_modules(): modules = [] for module_path in SOURCE_DIR.glob("**/*.py"): # Determine the (dotted) relative package and module names. package_path = module_path.parent.relative_to(PYTHON_DIR) if package_path == SOURCE_DIR: package_name = "" module_name = module_path.stem else: package_name = package_path.as_posix().replace("/", ".") module_name = package_name + "." + module_path.stem # Filter: first, accept anything in the whitelist; then, reject anything # at package level, then module name level. if any(include == module_name for include in INCLUDED_MODULES): pass elif any(ignored in package_name for ignored in IGNORED_PACKAGES): continue elif any(ignored in module_path.stem for ignored in IGNORED_MODULES): continue if module_path.name == "__init__.py": modules.append(package_name) else: modules.append(module_name) return modules def write_automodule(module): contents = AUTOMODULE_TEMPLATE.format(module=module, underline="=" * len(module),) automodule_path = DOCS_DIR.joinpath(*module.split(".")).with_suffix(".rst") try: automodule_path.parent.mkdir(parents=True) except FileExistsError: pass with open(automodule_path, "w") as automodule_file: automodule_file.write(contents) def replace_toc(modules): toctree = [module.replace(".", "/") for module in modules] with open(DOCS_DIR / "index.rst", "r") as index_file: index_contents = index_file.read() toc = TOC_TEMPLATE.format( toctree="\n ".join(toctree) ) index_contents = re.sub(TOC_REGEX, toc, index_contents) with open(DOCS_DIR / "index.rst", "w") as index_file: index_file.write(index_contents) def main(): modules = list(sorted(find_modules())) for module in modules: print("Generating reference for {}".format(module)) write_automodule(module) print("Generating index.rst") replace_toc(modules) if __name__ == "__main__": main()
The Lifeline System acquires instrument readings from a wide variety of devices regardless of connection type (serial, TTL, Bluetooth, IrDA, Ethernet, USB - Handheld, Mobile or Fixed) and converts this information onto a secure WiFi network along with GPS location information using the Lifeline Interoperable Network Communicator (LINC). The Lifeline Gateway resolves distance problems by routing the WiFi LINC data to an Internet connection through redundant cellular providers and/or through an Ethernet connected satellite modem. Live instrument readings are viewed by remote Subject Matter Experts.
#!/usr/bin/python import shutil import os, errno import sys SITES_TO_IGNORE = { 'facebook': {'search_string': 'facebook', 'site_address':'www.facebook.com'} ,'twitter': {'search_string': 'twitter', 'site_address':'twitter.com'} ,'g+': {'search_string': 'plus', 'site_address':'plus.google.com'} ,'gmail': {'search_string': 'mail.google', 'site_address':'mail.google.com'} ,'flickr': {'search_string': 'flickr', 'site_address':'www.flickr.com'} ,'flickr_2': {'search_string': 'flickr', 'site_address':'flickr.com'} ,'repubblica': {'search_string': 'repubblica', 'site_address':'repubblica.it'} ,'repubblica_2': {'search_string': 'repubblica', 'site_address':'www.repubblica.it'} } ACCEPTED_ARGUMENTS = { 'act': 'activate the blacklist filter', 'deact': 'deactivate the blacklist filter' } # to be changed host_file = '/etc/hosts' backup_host_file = '/etc/hosts.bak' file_lines = [] command = sys.argv[1] def edit_file_host(command, source_file=host_file, backup_file=backup_host_file): _check_valid_argument(command) _remove_file_if_exists(backup_file) _make_backup_copy(source_file, backup_file) if command == "act": _enable_host_filter(source_file) print "blacklist activated" else: _disable_host_filter(source_file) print "blacklist deactivated" def _check_valid_argument(arg): if arg not in ACCEPTED_ARGUMENTS: _allowed_args = ACCEPTED_ARGUMENTS.keys() raise IndexError ('{} is not a valid argument. Allowed values are: {}'.format(arg, _allowed_args)) def _enable_host_filter(file_path): global file_lines file_lines = _get_file_lines(file_path) f = open(file_path, 'w') f.writelines(file_lines) noisy_sites_lines = _append_noisy_sites(SITES_TO_IGNORE) f.writelines(noisy_sites_lines) f.close() def _append_noisy_sites(sites_dict): ignoring_site_list = [] ignoring_site_string = '127.0.0.1' for k in sites_dict: ignoring_site_list.append('{} {}\n'.format(ignoring_site_string, sites_dict[k]['site_address'])) return ignoring_site_list def _disable_host_filter(file_path): global file_lines file_lines = _get_file_lines(file_path) cleaned_file_lines = _remove_noisy_sites(file_lines, SITES_TO_IGNORE) f = open(file_path, 'w') f.writelines(cleaned_file_lines) f.close() def _remove_noisy_sites(lines, sites_dict): searchable_sites = [x['search_string'] for x in SITES_TO_IGNORE.values()] allowed_file_lines = [x for x in lines if not any(y in x for y in searchable_sites)] return allowed_file_lines def _get_file_lines(file_path): f = open(file_path, 'r+') lines = f.readlines() f.close() return lines def _remove_file_if_exists(file): try: os.remove(file) except OSError, e: if e.errno != errno.ENOENT: raise def _make_backup_copy(source_file, backup_file): shutil.copy2(source_file,backup_file) if __name__ == '__main__': edit_file_host(command)
Another in the series of palette knife oil paintings, this piece depicts cleomes, also known as a Spider Plant. With their bursting blooms, they bring to mind the colorful explosion of fireworks and flashing sparklers. The puffs of blue in the background enhance the dynamic composition while the color of the flowers echoes in the antique marbles below.
# astests_pyb.py # Tests for AS_GPS module. Emulates a GPS unit using a UART loopback. # Run on a Pyboard with X1 and X2 linked # Tests for AS_GPS module (asynchronous GPS device driver) # Based on tests for MicropyGPS by Michael Calvin McCoy # https://github.com/inmcm/micropyGPS # Copyright (c) 2018-2020 Peter Hinch # Released under the MIT License (MIT) - see LICENSE file # Ported to uasyncio V3 OK. from .as_GPS import * from machine import UART import uasyncio as asyncio def callback(gps, _, arg): print('Fix callback. Time:', gps.utc, arg) async def run_tests(): uart = UART(4, 9600, read_buf_len=200) swriter = asyncio.StreamWriter(uart, {}) sreader = asyncio.StreamReader(uart) sentence_count = 0 test_RMC = ['$GPRMC,180041.896,A,3749.1851,N,08338.7891,W,001.9,154.9,240911,,,A*7A\n', '$GPRMC,180049.896,A,3749.1808,N,08338.7869,W,001.8,156.3,240911,,,A*70\n', '$GPRMC,092751.000,A,5321.6802,N,00630.3371,W,0.06,31.66,280511,,,A*45\n'] test_VTG = ['$GPVTG,232.9,T,,M,002.3,N,004.3,K,A*01\n'] test_GGA = ['$GPGGA,180050.896,3749.1802,N,08338.7865,W,1,07,1.1,397.4,M,-32.5,M,,0000*6C\n'] test_GSA = ['$GPGSA,A,3,07,11,28,24,26,08,17,,,,,,2.0,1.1,1.7*37\n', '$GPGSA,A,3,07,02,26,27,09,04,15,,,,,,1.8,1.0,1.5*33\n'] test_GSV = ['$GPGSV,3,1,12,28,72,355,39,01,52,063,33,17,51,272,44,08,46,184,38*74\n', '$GPGSV,3,2,12,24,42,058,33,11,34,053,33,07,20,171,40,20,15,116,*71\n', '$GPGSV,3,3,12,04,12,204,34,27,11,324,35,32,11,089,,26,10,264,40*7B\n', '$GPGSV,3,1,11,03,03,111,00,04,15,270,00,06,01,010,00,13,06,292,00*74\n', '$GPGSV,3,2,11,14,25,170,00,16,57,208,39,18,67,296,40,19,40,246,00*74\n', '$GPGSV,3,3,11,22,42,067,42,24,14,311,43,27,05,244,00,,,,*4D\n', '$GPGSV,4,1,14,22,81,349,25,14,64,296,22,18,54,114,21,51,40,212,*7D\n', '$GPGSV,4,2,14,24,30,047,22,04,22,312,26,31,22,204,,12,19,088,23*72\n', '$GPGSV,4,3,14,25,17,127,18,21,16,175,,11,09,315,16,19,05,273,*72\n', '$GPGSV,4,4,14,32,05,303,,15,02,073,*7A\n'] test_GLL = ['$GPGLL,3711.0942,N,08671.4472,W,000812.000,A,A*46\n', '$GPGLL,4916.45,N,12311.12,W,225444,A,*1D\n', '$GPGLL,4250.5589,S,14718.5084,E,092204.999,A*2D\n', '$GPGLL,4250.5589,S,14718.5084,E,092204.999,A*2D\n',] # '$GPGLL,0000.0000,N,00000.0000,E,235947.000,V*2D\n', # Will ignore this one my_gps = AS_GPS(sreader, fix_cb=callback, fix_cb_args=(42,)) sentence = '' for sentence in test_RMC: sentence_count += 1 await swriter.awrite(sentence) await my_gps.data_received(date=True) print('Longitude:', my_gps.longitude()) print('Latitude', my_gps.latitude()) print('UTC Time:', my_gps.utc) print('Speed:', my_gps.speed()) print('Date Stamp:', my_gps.date) print('Course', my_gps.course) print('Data is Valid:', my_gps._valid) print('Compass Direction:', my_gps.compass_direction()) print('') for sentence in test_GLL: sentence_count += 1 await swriter.awrite(sentence) await my_gps.data_received(position=True) print('Longitude:', my_gps.longitude()) print('Latitude', my_gps.latitude()) print('UTC Time:', my_gps.utc) print('Data is Valid:', my_gps._valid) print('') for sentence in test_VTG: print('Test VTG', sentence) sentence_count += 1 await swriter.awrite(sentence) await asyncio.sleep_ms(200) # Can't wait for course because of position check print('Speed:', my_gps.speed()) print('Course', my_gps.course) print('Compass Direction:', my_gps.compass_direction()) print('') for sentence in test_GGA: sentence_count += 1 await swriter.awrite(sentence) await my_gps.data_received(position=True) print('Longitude', my_gps.longitude()) print('Latitude', my_gps.latitude()) print('UTC Time:', my_gps.utc) # print('Fix Status:', my_gps.fix_stat) print('Altitude:', my_gps.altitude) print('Height Above Geoid:', my_gps.geoid_height) print('Horizontal Dilution of Precision:', my_gps.hdop) print('Satellites in Use by Receiver:', my_gps.satellites_in_use) print('') for sentence in test_GSA: sentence_count += 1 await swriter.awrite(sentence) await asyncio.sleep_ms(200) print('Satellites Used', my_gps.satellites_used) print('Horizontal Dilution of Precision:', my_gps.hdop) print('Vertical Dilution of Precision:', my_gps.vdop) print('Position Dilution of Precision:', my_gps.pdop) print('') for sentence in test_GSV: sentence_count += 1 await swriter.awrite(sentence) await asyncio.sleep_ms(200) print('SV Sentences Parsed', my_gps._last_sv_sentence) print('SV Sentences in Total', my_gps._total_sv_sentences) print('# of Satellites in View:', my_gps.satellites_in_view) data_valid = my_gps._total_sv_sentences > 0 and my_gps._total_sv_sentences == my_gps._last_sv_sentence print('Is Satellite Data Valid?:', data_valid) if data_valid: print('Satellite Data:', my_gps._satellite_data) print('Satellites Visible:', list(my_gps._satellite_data.keys())) print('') print("Pretty Print Examples:") print('Latitude (degs):', my_gps.latitude_string(DD)) print('Longitude (degs):', my_gps.longitude_string(DD)) print('Latitude (dms):', my_gps.latitude_string(DMS)) print('Longitude (dms):', my_gps.longitude_string(DMS)) print('Latitude (kml):', my_gps.latitude_string(KML)) print('Longitude (kml):', my_gps.longitude_string(KML)) print('Latitude (degs, mins):', my_gps.latitude_string()) print('Longitude (degs, mins):', my_gps.longitude_string()) print('Speed:', my_gps.speed_string(KPH), 'or', my_gps.speed_string(MPH), 'or', my_gps.speed_string(KNOT)) print('Date (Long Format):', my_gps.date_string(LONG)) print('Date (Short D/M/Y Format):', my_gps.date_string(DMY)) print('Date (Short M/D/Y Format):', my_gps.date_string(MDY)) print('Time:', my_gps.time_string()) print() print('### Final Results ###') print('Sentences Attempted:', sentence_count) print('Sentences Found:', my_gps.clean_sentences) print('Sentences Parsed:', my_gps.parsed_sentences) print('Unsupported sentences:', my_gps.unsupported_sentences) print('CRC_Fails:', my_gps.crc_fails) asyncio.run(run_tests())
Once upon a time, those two words came easily. They were honored without any qualms, fear, or the suspicious “yeah, right” that currently runs through your mind. It’s not that you aren’t committed to getting the trust back. You both still want a life together. After the infidelity, the disclosure, and the trauma of it all, there is still a lot of love to save. And that’s also really, really hard. Because rebuilding trust after infidelity is not easy. Can you do it? Of course, if you want to. But to be successful at rebuilding trust, there are things you’ve gotta do. This is work. But don’t get discouraged. It’s worth doing. Start with a good look inside. Are you committed to your relationship? Be sure. Rebuilding trust will be a bumpy road. To get anywhere, you’ll need to stay on it. Exclusivity is a must on this journey. There cannot be a hint of drifting or wavering when building trust in a committed relationship. That is firmly in the past. Trust after infidelity is only built when you and your partner believe the other is all in. Choose your relationship and honor that choice every day. Begin building trust with dedicated help, support, and counseling. Like it or not, broken trust is a messy thing. Especially when betrayal comes by way of cheating and sneaking and lies. There are shards and splinters of fractured trust pricking your relationship deeply. Some in places you expect, and a lot you don’t. Time and energy are precious. Don’t waste them on draining, ineffective attempts to work it out alone. You don’t have to figure this out on your own. You need nonjudgmental people around to hold you both up when you get tired of the pain. You need sure, steady counsel to guide you when you’re weary of the painstaking process of repairing your love. And you’ll likely come to rely on the tools and lessons of therapy to teach you how to really heal. Don’t prolong the pain. Call a therapist who can help get you talking and trusting again. Call for help. Keep generating trust by opening yourself up fully, once you feel safe. Hiding from each other does not support a trusting relationship. You’ll need to find ways to be transparent and honest, even when your partner isn’t actively seeking answers. Open your emotions and thoughts to your partner. Practically speaking, you might need to open your smart phones or laptops too, let them know where you are and be reliable and set expectations: “I’m in Oakland hunny see you in 10 minutes” so your partner is reassured that the honesty he or she is witnessing is at work in public and private. Is that overkill? Not really. Trust and transparency may require an extra measure of effort to demonstrate your trustworthiness. Part of the consequences for going outside your marriage is now being very available. Demonstrate a willingness to communicate without holding anything back. In the interest of moving forward, it is not too much to expect that passwords are provided, and all answers are straightforward and forthcoming. Sincerity and consistency help rehabilitate trust and intimacy between you. Following infidelity, your primary goal is be more than just a couple surviving or recovering. To solidify your new and improved relationship, seek to forgive. As you work with your therapist, accept that mutual trust will include self-examination and realizations about how you both participated in the breakdown of your relationship. Releasing each other from shame and blame to do the repair work is freedom to successfully rebuild trust authentically. Ultimately, sincere forgiveness supports trust, accountability, and a return to true partnership. When you rebuild trust well, you will have built a stronger, surer love. Love that is shored up with reliable and healthy relationship habits that will serve you well from here on out. Recommit, recognize the need for transparency, reestablish respect and communication, and release the past. With help and support, you can do it all for the sake of a solid second chance and the future together you always wanted.
#!/usr/bin/env python # -*- coding: utf-8 -*- from rest_framework import serializers from rest_framework.reverse import reverse from rfdocs.models import (RFLibrary, RFKeyword, RFLibraryVersion, RFTag) class DynamicFieldsModelSerializer(serializers.ModelSerializer): """ A ModelSerializer which takes an additional `field` argument that controls which fields should be displayed. """ def __init__(self, *args, **kwargs): default_fields = kwargs.pop('fields', []) super(DynamicFieldsModelSerializer, self).__init__(*args, **kwargs) existing = set(self.fields.keys()) if self.instance: if 'request' in self.context: requested_fields = self.context['request'].GET.getlist('field', []) default_fields.extend([f for f in requested_fields if f in existing]) allowed = set(default_fields) if allowed: for field_name in existing - allowed: self.fields.pop(field_name) class ConsecutiveHyperlinkedField(serializers.HyperlinkedIdentityField): """ Inheritor of serializers.HyperlinkedIdentityField serializer that allows to define a tuple of lookup fields, where field can be dot-notated string. """ def __init__(self, *args, **kwargs): self.lookup_fields = kwargs.pop('lookup_fields', None) super(ConsecutiveHyperlinkedField, self).__init__(*args, **kwargs) @staticmethod def getattr_consecutive(obj, dot_notated_string): """ Allows dot-notated strings to be passed to `getattr` """ return reduce(getattr, dot_notated_string.split('.'), obj) def get_url(self, obj, view_name, request, url_format): args = () if self.lookup_fields: args = (self.getattr_consecutive(obj, arg) for arg in self.lookup_fields) return reverse(view_name, args=args, request=request, format=url_format) class RFKeywordSerializer(serializers.HyperlinkedModelSerializer, DynamicFieldsModelSerializer): version = ConsecutiveHyperlinkedField( lookup_fields=('version.library.slug', 'version.slug',), view_name='rflibraryversion_detail_api', ) library = ConsecutiveHyperlinkedField( lookup_fields=('version.library.slug',), view_name='rflibrary_detail_api', ) url = ConsecutiveHyperlinkedField( lookup_fields=('version.library.slug', 'version.slug', 'name',), view_name='rfkeyword_detail_api', ) class Meta: model = RFKeyword fields = ('name', 'url', 'version', 'arguments', 'documentation', 'library') class RFLibraryVersionSerializer(serializers.HyperlinkedModelSerializer, DynamicFieldsModelSerializer): def __init__(self, *args, **kwargs): super(RFLibraryVersionSerializer, self).__init__(*args, **kwargs) if 'request' in self.context: requested_fields = self.context['request'].GET.getlist('keyword_field', []) allowed = set(RFKeywordSerializer.Meta.fields).intersection(set(requested_fields)) if allowed: self.fields['keywords'] = RFKeywordSerializer( fields=list(allowed), many=True, context={'request': self.context['request']} ) library = serializers.StringRelatedField() library_url = ConsecutiveHyperlinkedField( lookup_fields=('library.slug', ), view_name='rflibrary_detail_api' ) url = ConsecutiveHyperlinkedField( lookup_fields=('library.slug', 'slug'), view_name='rflibraryversion_detail_api', ) keywords = RFKeywordSerializer( many=True, fields=['name', 'url', 'arguments'] ) class Meta: model = RFLibraryVersion fields = ['name', 'library', 'library_url', 'slug', 'url', 'source_url', 'keywords', 'status', 'date_added', 'date_modified', 'date_deprecate'] class RFLibrarySerializer(serializers.HyperlinkedModelSerializer, DynamicFieldsModelSerializer): def __init__(self, *args, **kwargs): super(RFLibrarySerializer, self).__init__(*args, **kwargs) if 'request' in self.context: requested_fields = self.context['request'].GET.getlist('version_field', []) allowed = set(RFLibraryVersionSerializer.Meta.fields).intersection(set(requested_fields)) if allowed: self.fields['versions'] = RFLibraryVersionSerializer( fields=list(allowed), many=True, context={'request': self.context['request']} ) url = serializers.HyperlinkedIdentityField( view_name='rflibrary_detail_api', lookup_field='slug' ) versions = RFLibraryVersionSerializer( fields=['name', 'url'], many=True ) class Meta: model = RFLibrary fields = ('name', 'slug', 'url', 'versions') lookup_field = 'slug' class RFTagSerializer(serializers.HyperlinkedModelSerializer): url = serializers.HyperlinkedIdentityField( lookup_field='slug', view_name='rftag_detail_api', ) versions = RFLibraryVersionSerializer(fields=('name', 'url')) class Meta: model = RFTag fields = ('name', 'slug', 'url', 'versions')
Perhaps you’ve seen the video of an athlete moving through the agility ladder with his feet hardly visible because they are moving so fast. This remains the common mainstream perception of agility training. Does this, however, actually make athletes more agile on the field?
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'ui_fiscal_icnfabrir.ui' # # Created: Mon Nov 24 22:25:54 2014 # by: pyside-uic 0.2.15 running on PySide 1.2.2 # # WARNING! All changes made in this file will be lost! from PySide import QtCore, QtGui from pydaruma.pydaruma import iCNFAbrir_ECF_Daruma from scripts.fiscal.retornofiscal import tratarRetornoFiscal class Ui_ui_FISCAL_iCNFAbrir(QtGui.QWidget): def __init__(self): super(Ui_ui_FISCAL_iCNFAbrir, self).__init__() self.setupUi(self) self.pushButton.clicked.connect(self.on_pushButtonEnviar_clicked) self.pushButton_2.clicked.connect(self.on_pushButtonCancelar_clicked) def on_pushButtonEnviar_clicked(self): # Declaraçao das Variaveis que recebem os valores da UI StrCPF = self.lineEditCPF.text() StrNome = self.lineEditNome.text() StrEndereco = self.lineEditEndereco.text() # Chamada do Método tratarRetornoFiscal(iCNFAbrir_ECF_Daruma(StrCPF,StrNome,StrEndereco), self) def on_pushButtonCancelar_clicked(self): self.close() def setupUi(self, ui_FISCAL_iCNFAbrir): ui_FISCAL_iCNFAbrir.setObjectName("ui_FISCAL_iCNFAbrir") ui_FISCAL_iCNFAbrir.resize(263, 123) ui_FISCAL_iCNFAbrir.setMinimumSize(QtCore.QSize(263, 123)) ui_FISCAL_iCNFAbrir.setMaximumSize(QtCore.QSize(263, 123)) self.verticalLayout = QtGui.QVBoxLayout(ui_FISCAL_iCNFAbrir) self.verticalLayout.setObjectName("verticalLayout") self.gridLayout = QtGui.QGridLayout() self.gridLayout.setObjectName("gridLayout") self.label_nome = QtGui.QLabel(ui_FISCAL_iCNFAbrir) self.label_nome.setObjectName("label_nome") self.gridLayout.addWidget(self.label_nome, 0, 0, 1, 1) self.lineEditNome = QtGui.QLineEdit(ui_FISCAL_iCNFAbrir) sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Minimum) sizePolicy.setHorizontalStretch(0) sizePolicy.setVerticalStretch(0) sizePolicy.setHeightForWidth(self.lineEditNome.sizePolicy().hasHeightForWidth()) self.lineEditNome.setSizePolicy(sizePolicy) self.lineEditNome.setMaxLength(255) self.lineEditNome.setCursorPosition(27) self.lineEditNome.setObjectName("lineEditNome") self.gridLayout.addWidget(self.lineEditNome, 0, 1, 1, 1) self.label_endereco = QtGui.QLabel(ui_FISCAL_iCNFAbrir) self.label_endereco.setObjectName("label_endereco") self.gridLayout.addWidget(self.label_endereco, 1, 0, 1, 1) self.lineEditEndereco = QtGui.QLineEdit(ui_FISCAL_iCNFAbrir) self.lineEditEndereco.setObjectName("lineEditEndereco") self.gridLayout.addWidget(self.lineEditEndereco, 1, 1, 1, 1) self.label_cpf = QtGui.QLabel(ui_FISCAL_iCNFAbrir) self.label_cpf.setObjectName("label_cpf") self.gridLayout.addWidget(self.label_cpf, 2, 0, 1, 1) self.lineEditCPF = QtGui.QLineEdit(ui_FISCAL_iCNFAbrir) self.lineEditCPF.setObjectName("lineEditCPF") self.gridLayout.addWidget(self.lineEditCPF, 2, 1, 1, 1) self.verticalLayout.addLayout(self.gridLayout) self.horizontalLayout = QtGui.QHBoxLayout() self.horizontalLayout.setObjectName("horizontalLayout") spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem) self.pushButton = QtGui.QPushButton(ui_FISCAL_iCNFAbrir) self.pushButton.setObjectName("pushButton") self.horizontalLayout.addWidget(self.pushButton) self.pushButton_2 = QtGui.QPushButton(ui_FISCAL_iCNFAbrir) self.pushButton_2.setObjectName("pushButton_2") self.horizontalLayout.addWidget(self.pushButton_2) spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum) self.horizontalLayout.addItem(spacerItem1) self.verticalLayout.addLayout(self.horizontalLayout) self.retranslateUi(ui_FISCAL_iCNFAbrir) QtCore.QMetaObject.connectSlotsByName(ui_FISCAL_iCNFAbrir) def retranslateUi(self, ui_FISCAL_iCNFAbrir): ui_FISCAL_iCNFAbrir.setWindowTitle(QtGui.QApplication.translate("ui_FISCAL_iCNFAbrir", "Método iCNFAbrir_ECF_Daruma", None, QtGui.QApplication.UnicodeUTF8)) self.label_nome.setText(QtGui.QApplication.translate("ui_FISCAL_iCNFAbrir", "Nome:", None, QtGui.QApplication.UnicodeUTF8)) self.lineEditNome.setText(QtGui.QApplication.translate("ui_FISCAL_iCNFAbrir", "Daruma Developers Community", None, QtGui.QApplication.UnicodeUTF8)) self.label_endereco.setText(QtGui.QApplication.translate("ui_FISCAL_iCNFAbrir", "Endereço:", None, QtGui.QApplication.UnicodeUTF8)) self.lineEditEndereco.setText(QtGui.QApplication.translate("ui_FISCAL_iCNFAbrir", "Shishima Hifumi", None, QtGui.QApplication.UnicodeUTF8)) self.label_cpf.setText(QtGui.QApplication.translate("ui_FISCAL_iCNFAbrir", "CPF:", None, QtGui.QApplication.UnicodeUTF8)) self.lineEditCPF.setText(QtGui.QApplication.translate("ui_FISCAL_iCNFAbrir", "111.111.111-11", None, QtGui.QApplication.UnicodeUTF8)) self.pushButton.setText(QtGui.QApplication.translate("ui_FISCAL_iCNFAbrir", "Abrir CNF", None, QtGui.QApplication.UnicodeUTF8)) self.pushButton_2.setText(QtGui.QApplication.translate("ui_FISCAL_iCNFAbrir", "Cancelar", None, QtGui.QApplication.UnicodeUTF8))
Rising Dragon Hotel is located on a premier shopping street, with arts and crafts stores along the same block and throughout the surrounding neighborhood. Our hotel is a brand new six floor building with 35 rooms, renovated in June 2009. Ideal for leisure travelers, whether staying briefly in Hanoi or on an extended visit, we offer an on-site restaurant and full travel services! Our hotel boasts large smoking and non-smoking rooms, some of which have beautiful balconies overlooking the Old Quarter and large bathrooms with bathtubs. Each deluxe and standard room in our safe and quiet building has air conditioning and hot water..,.
#!/bin/env python3 import argparse import imp import logging import sys import os import watcher import config def main(argv): parser = argparse.ArgumentParser('mail_notify') parser.add_argument('maildir') parser.add_argument('--config', '-c', help="configuration file") parser.add_argument('--verbose', '-v', help="make me verbose", action='store_true') parser.add_argument('--debug', help="make me very verbose", action='store_true') args = parser.parse_args(argv) logger_level = logging.WARNING logger_format = '%(levelname)s: %(message)s' if args.verbose: logger_level = logging.INFO if args.debug: logger_level = logging.NOTSET logger_format = '%(asctime)-15s: %(name)s: %(levelname)s: %(message)s' logging.basicConfig(format=logger_format, level=logger_level) basedir = os.path.dirname(__file__) config.override(imp.load_source('config', os.path.join(basedir, 'default_config')).__dict__) if args.config: config.override(imp.load_source('config', args.config).__dict__) config.override(args.__dict__) watcher.watch_maildir(args.maildir) if __name__ == '__main__': main(sys.argv[1:])
Sunday will be fun in Rose Park! Please come – all are welcome. The Eggstravaganza is for the young and those that enjoy watching them have a great time. Lots of craft action for the kids, plus coffee, pastries and a Tai Chi demo for the adults. Background music by Danny our very own RPN DJ! Shannon Jones and Team returns this year with cake/pie for the Cake Walk and is generously supporting our craft stations.
""" Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Ambari Agent """ import sys import re from resource_management.libraries.functions.version import compare_versions from resource_management import * import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set. CURL_CONNECTION_TIMEOUT = '5' class ServiceCheck(Script): def service_check(self, env): import params env.set_params(params) if params.stack_version != "" and compare_versions(params.stack_version, '4.0') >= 0: path_to_distributed_shell_jar = "/usr/iop/current/hadoop-yarn-client/hadoop-yarn-applications-distributedshell.jar" else: path_to_distributed_shell_jar = "/usr/lib/hadoop-yarn/hadoop-yarn-applications-distributedshell*.jar" yarn_distrubuted_shell_check_cmd = format("yarn org.apache.hadoop.yarn.applications.distributedshell.Client " "-shell_command ls -num_containers {number_of_nm} -jar {path_to_distributed_shell_jar}") if params.security_enabled: kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser_principal};") smoke_cmd = format("{kinit_cmd} {yarn_distrubuted_shell_check_cmd}") else: smoke_cmd = yarn_distrubuted_shell_check_cmd return_code, out = shell.checked_call(smoke_cmd, path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin', user=params.smokeuser, ) m = re.search("appTrackingUrl=(.*),\s", out) app_url = m.group(1) splitted_app_url = str(app_url).split('/') for item in splitted_app_url: if "application" in item: application_name = item json_response_received = False for rm_host in params.rm_hosts: info_app_url = "http://" + rm_host + ":" + params.rm_port + "/ws/v1/cluster/apps/" + application_name get_app_info_cmd = "curl --negotiate -u : -sL --connect-timeout " + CURL_CONNECTION_TIMEOUT + " " + info_app_url return_code, stdout = shell.checked_call(get_app_info_cmd, path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin', user=params.smokeuser, ) try: json_response = json.loads(stdout) json_response_received = True if json_response['app']['state'] != "FINISHED" or json_response['app']['finalStatus'] != "SUCCEEDED": raise Exception("Application " + app_url + " state/status is not valid. Should be FINISHED/SUCCEEDED.") except Exception as e: pass if not json_response_received: raise Exception("Could not get json response from YARN API") if __name__ == "__main__": ServiceCheck().execute()
Funny Football News: Has Klopp found the missing piece to the jigsaw? Do you know one of the Christmas traditions in our household? A good old-fashioned jigsaw puzzle after Christmas lunch. But not a new puzzle, oh no. An old puzzle. Maybe a puzzle bought from a charity shop. Maybe a puzzle that has been passed down through the generations. Never, ever a new puzzle still freshly wrapped. And you know what else is a Christmas tradition? Getting to the end of the sodding puzzle and realising that some of the pieces are missing. That is why I am quite jealous of Jurgen Klopp right now if I am to believe what Liverpool defender Dejan Lovren is saying. I know, that could well be my first mistake in today’s column; listening to a man who declared himself the best defender in the world at the World Cup when, frankly, he’s not even the best defender at his club. The Croatian has declared that King Kloppo has ‘found the pieces to solve the puzzle’ which is merely an ambition in this house. Of course, Lovren is passing comment on the fact that Liverpool now lead Man City by six whole points at the top of the Premier League and not the finishing touches of a nice wintery scene outside a country pub. Whilst I commend the positive attitude of the Liverpool players, believing they can win the title – and why shouldn’t they? – I really would suggest that the best course of action is to keep those thoughts inside your head until you’ve lifted the trophy. It would be different, of course, if you were Liverpool of the early 80s where titles arrived like presents under the tree or Manchester United under Ferguson where they may as well just have given you the trophy at Christmas. Liverpool have not won the top flight since 1990, so maybe it would be better just to keep quiet, not say anything that could be used as motivation by the chasing pack and focus on not bloody well bottling it again? Peppy G is a worried man, understandably. He has spoken of the need of a ‘new dynamic’ in the team having lost two games on the bounce. What Pep really means, of course, is a couple of new signings so expect him to go back on his word about nobody joining in January as he searches the planet for someone capable of being the new Fernandinho and anyone that can do a better job than Fabian Delph at left back. Paul Pogba is said to be ‘improving’ but ‘can do more’ according to his midfield minder, Nemanja Matic. Much has been made, as expected, of Pogba suddenly remembering how to play football since the departure of the man who made him ‘a better player and person’. But Matic wants more, presumably so he doesn’t have to keep doing the work of one-and-a-half players in the middle of the park. Arsenal manager is facing a charge of ‘improper conduct’ following his side’s draw with Brighton. No, the improper conduct was not continuing to replace attacking players with defensive midfielders but kicking a bottle in frustration which hit a Brighton fan. Don’t let the fact that Emery immediately apologised and shook hands with the fan and the fact that said fan seemed to accept it graciously. Oh, no. Throw the book at him FA because, after all, you need to be seen to be tackling the big issues of the day, right? Southampton’s revival seems to have come to an abrupt halt, losing 2-1 at home to West Ham United. That Felipe Anderson, he is fast becoming West Ham’s new Dimitri Payet. By which, of course, I mean he’ll be really good this season before buggering off to a bigger club having gone on strike to force the move through. Cenk Tosun has been assured he still has a future at Everton by Marco Silva. That future is sitting on the bench watching either Dominic Calvert-Lewin or Richarlison play up front. Things are that bad for Mesut Ozil that he looks like he is about to meet the same fate as a young English player at Chelsea. The dreaded loan is about to come his way and Ozil might well be off to Italy and Inter. That’s not like Inter to throw stupid money at a bad situation and make it worse, is it? There’s also another surprise name being linked to Serie A and it’s not Aaron Ramsey or Cesc Fabregas for once. Roma are having a more than average season and have decided the only way to improve things is to sort out a better situation in the attacking left back berth. As a result, their gaze has landed upon Crystal Palace’s Patrick van Aarnholt. Obviously. £25m they reckon. Fulham are being investigated having paid Nice around £20m for Maxime Le Marchand and Jean Michael Seri in the summer. There’s talk of financial fuckwittery, though if I was Fulham I’d be going back to Nice and double checking that they did send over the Jean Michael Seri that was on the verge of a move to Barcelona because nobody is convinced it is the same player. The latest round of ‘they love me, they love me not’ for Jose Mourinho and Real Madrid says that Madrid are now NOT interested in taking back their former lover. All that can really mean is that Florentino Perez’s eye has been caught by a much younger, more attractive potential target and he is now beginning the process of wooing them. Watch out United, you have competition for the Poch after all.
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import datetime class Migration(migrations.Migration): dependencies = [ ('rh', '0001_initial'), ('estoque', '0005_auto_20141001_0953'), ('comercial', '0007_auto_20141006_1852'), ('almoxarifado', '0003_auto_20140917_0843'), ] operations = [ migrations.CreateModel( name='LinhaListaMaterial', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('quantidade_requisitada', models.DecimalField(max_digits=10, decimal_places=2)), ('quantidade_ja_atendida', models.DecimalField(max_digits=10, decimal_places=2)), ('criado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Criado', auto_now_add=True)), ('atualizado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Atualizado', auto_now=True)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='LinhaListaMaterialCompra', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('quantidade', models.DecimalField(max_digits=10, decimal_places=2)), ('criado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Criado', auto_now_add=True)), ('atualizado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Atualizado', auto_now=True)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='LinhaListaMaterialEntregue', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('quantidade', models.DecimalField(max_digits=10, decimal_places=2)), ('criado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Criado', auto_now_add=True)), ('atualizado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Atualizado', auto_now=True)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='ListaMaterialCompra', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('ativa', models.BooleanField(default=True)), ('criado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Criado', auto_now_add=True)), ('atualizado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Atualizado', auto_now=True)), ('contrato', models.ForeignKey(blank=True, to='comercial.ContratoFechado', null=True)), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='ListaMaterialDoContrato', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('ativa', models.BooleanField(default=True)), ('criado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Criado', auto_now_add=True)), ('atualizado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Atualizado', auto_now=True)), ('contrato', models.OneToOneField(null=True, blank=True, to='comercial.ContratoFechado')), ], options={ }, bases=(models.Model,), ), migrations.CreateModel( name='ListaMaterialEntregue', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('entregue', models.BooleanField(default=False)), ('criado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Criado', auto_now_add=True)), ('atualizado', models.DateTimeField(default=datetime.datetime.now, verbose_name=b'Atualizado', auto_now=True)), ('contrato', models.ForeignKey(blank=True, to='comercial.ContratoFechado', null=True)), ('entregue_para', models.ForeignKey(related_name=b'entregue_para_set', to='rh.Funcionario')), ('entregue_por', models.ForeignKey(related_name=b'entregue_por_set', to='rh.Funcionario')), ], options={ }, bases=(models.Model,), ), migrations.AddField( model_name='linhalistamaterialentregue', name='lista', field=models.ForeignKey(to='almoxarifado.ListaMaterialEntregue'), preserve_default=True, ), migrations.AddField( model_name='linhalistamaterialentregue', name='produto', field=models.ForeignKey(to='estoque.Produto'), preserve_default=True, ), migrations.AddField( model_name='linhalistamaterialcompra', name='lista', field=models.ForeignKey(to='almoxarifado.ListaMaterialCompra'), preserve_default=True, ), migrations.AddField( model_name='linhalistamaterialcompra', name='produto', field=models.ForeignKey(to='estoque.Produto'), preserve_default=True, ), migrations.AddField( model_name='linhalistamaterial', name='lista', field=models.ForeignKey(to='almoxarifado.ListaMaterialDoContrato'), preserve_default=True, ), migrations.AddField( model_name='linhalistamaterial', name='produto', field=models.ForeignKey(to='estoque.Produto'), preserve_default=True, ), ]
According to Econsultancy’s State of Search Marketing Report 2013, 74 percent of companies and 82 percent of agencies surveyed said social media is either somewhat or highly integrated into their SEO strategy. However, two months after collecting the data for this report, Google’s Matt Cutts stated that social signals, such as Facebook ‘likes’ and Twitter followers, do not actually affect search rankings. This threw digital marketers off-balance; most assumed social signals were factored into Google’s search algorithm and would therefore improve their SEO. So how does social media impact your organization’s SEO? Consider these factors to help your company’s SEO-focused marketers better understand the relationship between social and SEO. While social signals may not help as much as previously thought, links published to your organization’s social media account may be a different story. Many marketers say links to your website through digital platforms can have a major impact on your rankings, as it makes your site more credible and hosts more keywords for SEO to pick up on. Google has contradicted itself on its incorporation of the number of times a link has been tweeted or liked into their search rank algorithm, but Bing stated that it considers this data for SEO. Google has confirmed it crawls social websites for data just like any other site, so links published to social platforms should be viewed as credible backlinks and positively influence your company’s page rank. Google, Bing and Yahoo are no longer the only places your audience goes to look for information. They also use social media platforms to find what they are looking for. For example, if a user is looking for digital marketing-related topics through Twitter, it is possible they will come upon one of your organization’s articles about digital marketing. Businesses that post inspiring visuals can also benefit and make their content more visible through platforms like Pinterest and Instagram. If a user is interested in your company, they are also likely to go through your social media channels and see what your presence is like on each one. Be sure they will have something inspiring and attention-grabbing to find and keep them on your site. It can only help boost your SEO. While Google may not consider social signals in their SEO, Bing continues to look into it when organizing their search rankings. Bing looks to the social authority of each site, such as how many people follow your business, how many individuals and organization you follow and how fans engage with your posts. These aspects add weight when Bing goes to rank your organization’s site. Remember that Bing is the second most-utilized search engine and is more transparent about their algorithm than Google, so don’t forget Bing when creating an SEO strategy for your business.
# This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible # still belong to the author of the module, and may assign their own license # to the complete work. # # Copyright (c) 2018 Palo Alto Networks techbizdev, <techbizdev@paloaltonetworks.com> # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import, division, print_function __metaclass__ = type _MIN_VERSION_ERROR = '{0} version ({1}) < minimum version ({2})' HAS_PANDEVICE = True try: import pandevice from pandevice.base import PanDevice from pandevice.firewall import Firewall from pandevice.panorama import DeviceGroup, Template, TemplateStack from pandevice.policies import PreRulebase, PostRulebase, Rulebase from pandevice.device import Vsys from pandevice.errors import PanDeviceError except ImportError: HAS_PANDEVICE = False def _vstr(val): return '{0}.{1}.{2}'.format(*val) class ConnectionHelper(object): def __init__(self, min_pandevice_version, min_panos_version, panorama_error, firewall_error): """Performs connection initialization and determines params.""" # Params for AnsibleModule. self.argument_spec = {} self.required_one_of = [] # Params for pandevice tree construction. self.vsys = None self.device_group = None self.vsys_dg = None self.rulebase = None self.template = None self.template_stack = None self.vsys_importable = None self.min_pandevice_version = min_pandevice_version self.min_panos_version = min_panos_version self.panorama_error = panorama_error self.firewall_error = firewall_error # The PAN-OS device. self.device = None def get_pandevice_parent(self, module): """Builds the pandevice object tree, returning the parent object. If pandevice is not installed, then module.fail_json() will be invoked. Arguments: * module(AnsibleModule): the ansible module. Returns: * The parent pandevice object based on the spec given to get_connection(). """ # Sanity check. if not HAS_PANDEVICE: module.fail_json(msg='Missing required library "pandevice".') # Verify pandevice minimum version. if self.min_pandevice_version is not None: pdv = tuple(int(x) for x in pandevice.__version__.split('.')) if pdv < self.min_pandevice_version: module.fail_json(msg=_MIN_VERSION_ERROR.format( 'pandevice', pandevice.__version__, _vstr(self.min_pandevice_version))) pan_device_auth, serial_number = None, None if module.params['provider'] and module.params['provider']['ip_address']: pan_device_auth = ( module.params['provider']['ip_address'], module.params['provider']['username'], module.params['provider']['password'], module.params['provider']['api_key'], module.params['provider']['port'], ) serial_number = module.params['provider']['serial_number'] elif module.params.get('ip_address', None) is not None: pan_device_auth = ( module.params['ip_address'], module.params['username'], module.params['password'], module.params['api_key'], module.params['port'], ) msg = 'Classic provider params are deprecated; use "provider" instead' module.deprecate(msg, '2.12') else: module.fail_json(msg='Provider params are required.') # Create the connection object. try: self.device = PanDevice.create_from_device(*pan_device_auth) except PanDeviceError as e: module.fail_json(msg='Failed connection: {0}'.format(e)) # Verify PAN-OS minimum version. if self.min_panos_version is not None: if self.device._version_info < self.min_panos_version: module.fail_json(msg=_MIN_VERSION_ERROR.format( 'PAN-OS', _vstr(self.device._version_info), _vstr(self.min_panos_version))) # Optional: Firewall via Panorama connectivity specified. if hasattr(self.device, 'refresh_devices') and serial_number: fw = Firewall(serial=serial_number) self.device.add(fw) self.device = fw parent = self.device not_found = '{0} "{1}" is not present.' pano_mia_param = 'Param "{0}" is required for Panorama but not specified.' ts_error = 'Specify either the template or the template stack{0}.' if hasattr(self.device, 'refresh_devices'): # Panorama connection. # Error if Panorama is not supported. if self.panorama_error is not None: module.fail_json(msg=self.panorama_error) # Spec: template stack. tmpl_required = False added_template = False if self.template_stack is not None: name = module.params[self.template_stack] if name is not None: stacks = TemplateStack.refreshall(parent, name_only=True) for ts in stacks: if ts.name == name: parent = ts added_template = True break else: module.fail_json(msg=not_found.format( 'Template stack', name, )) elif self.template is not None: tmpl_required = True else: module.fail_json(msg=pano_mia_param.format(self.template_stack)) # Spec: template. if self.template is not None: name = module.params[self.template] if name is not None: if added_template: module.fail_json(msg=ts_error.format(', not both')) templates = Template.refreshall(parent, name_only=True) for t in templates: if t.name == name: parent = t break else: module.fail_json(msg=not_found.format( 'Template', name, )) elif tmpl_required: module.fail_json(msg=ts_error.format('')) else: module.fail_json(msg=pano_mia_param.format(self.template)) # Spec: vsys importable. vsys_name = self.vsys_importable or self.vsys if vsys_name is not None: name = module.params[vsys_name] if name not in (None, 'shared'): vo = Vsys(name) parent.add(vo) parent = vo # Spec: vsys_dg or device_group. dg_name = self.vsys_dg or self.device_group if dg_name is not None: name = module.params[dg_name] if name not in (None, 'shared'): groups = DeviceGroup.refreshall(parent, name_only=True) for dg in groups: if dg.name == name: parent = dg break else: module.fail_json(msg=not_found.format( 'Device group', name, )) # Spec: rulebase. if self.rulebase is not None: if module.params[self.rulebase] in (None, 'pre-rulebase'): rb = PreRulebase() parent.add(rb) parent = rb elif module.params[self.rulebase] == 'rulebase': rb = Rulebase() parent.add(rb) parent = rb elif module.params[self.rulebase] == 'post-rulebase': rb = PostRulebase() parent.add(rb) parent = rb else: module.fail_json(msg=not_found.format( 'Rulebase', module.params[self.rulebase])) else: # Firewall connection. # Error if firewalls are not supported. if self.firewall_error is not None: module.fail_json(msg=self.firewall_error) # Spec: vsys or vsys_dg or vsys_importable. vsys_name = self.vsys_dg or self.vsys or self.vsys_importable if vsys_name is not None: parent.vsys = module.params[vsys_name] # Spec: rulebase. if self.rulebase is not None: rb = Rulebase() parent.add(rb) parent = rb # Done. return parent def get_connection(vsys=None, device_group=None, vsys_dg=None, vsys_importable=None, rulebase=None, template=None, template_stack=None, with_classic_provider_spec=False, with_state=True, argument_spec=None, required_one_of=None, min_pandevice_version=None, min_panos_version=None, panorama_error=None, firewall_error=None): """Returns a helper object that handles pandevice object tree init. The `vsys`, `device_group`, `vsys_dg`, `vsys_importable`, `rulebase`, `template`, and `template_stack` params can be any of the following types: * None - do not include this in the spec * True - use the default param name * string - use this string for the param name The `min_pandevice_version` and `min_panos_version` args expect a 3 element tuple of ints. For example, `(0, 6, 0)` or `(8, 1, 0)`. If you are including template support (by defining either `template` and/or `template_stack`), and the thing the module is enabling the management of is an "importable", you should define either `vsys_importable` (whose default value is None) or `vsys` (whose default value is 'vsys1'). Arguments: vsys: The vsys (default: 'vsys1'). device_group: Panorama only - The device group (default: 'shared'). vsys_dg: The param name if vsys and device_group are a shared param. vsys_importable: Either this or `vsys` should be specified. For: - Interfaces - VLANs - Virtual Wires - Virtual Routers rulebase: This is a policy of some sort. template: Panorama - The template name. template_stack: Panorama - The template stack name. with_classic_provider_spec(bool): Include the ip_address, username, password, api_key, and port params in the base spec, and make the "provider" param optional. with_state(bool): Include the standard 'state' param. argument_spec(dict): The argument spec to mixin with the generated spec based on the given parameters. required_one_of(list): List of lists to extend into required_one_of. min_pandevice_version(tuple): Minimum pandevice version allowed. min_panos_version(tuple): Minimum PAN-OS version allowed. panorama_error(str): The error message if the device is Panorama. firewall_error(str): The error message if the device is a firewall. Returns: ConnectionHelper """ helper = ConnectionHelper( min_pandevice_version, min_panos_version, panorama_error, firewall_error) req = [] spec = { 'provider': { 'required': True, 'type': 'dict', 'required_one_of': [['password', 'api_key'], ], 'options': { 'ip_address': {'required': True}, 'username': {'default': 'admin'}, 'password': {'no_log': True}, 'api_key': {'no_log': True}, 'port': {'default': 443, 'type': 'int'}, 'serial_number': {'no_log': True}, }, }, } if with_classic_provider_spec: spec['provider']['required'] = False spec['provider']['options']['ip_address']['required'] = False del(spec['provider']['required_one_of']) spec.update({ 'ip_address': {'required': False}, 'username': {'default': 'admin'}, 'password': {'no_log': True}, 'api_key': {'no_log': True}, 'port': {'default': 443, 'type': 'int'}, }) req.extend([ ['provider', 'ip_address'], ['provider', 'password', 'api_key'], ]) if with_state: spec['state'] = { 'default': 'present', 'choices': ['present', 'absent'], } if vsys_dg is not None: if isinstance(vsys_dg, bool): param = 'vsys_dg' else: param = vsys_dg spec[param] = {} helper.vsys_dg = param else: if vsys is not None: if isinstance(vsys, bool): param = 'vsys' else: param = vsys spec[param] = {'default': 'vsys1'} helper.vsys = param if device_group is not None: if isinstance(device_group, bool): param = 'device_group' else: param = device_group spec[param] = {'default': 'shared'} helper.device_group = param if vsys_importable is not None: if vsys is not None: raise KeyError('Define "vsys" or "vsys_importable", not both.') if isinstance(vsys_importable, bool): param = 'vsys' else: param = vsys_importable spec[param] = {} helper.vsys_importable = param if rulebase is not None: if isinstance(rulebase, bool): param = 'rulebase' else: param = rulebase spec[param] = { 'default': None, 'choices': ['pre-rulebase', 'rulebase', 'post-rulebase'], } helper.rulebase = param if template is not None: if isinstance(template, bool): param = 'template' else: param = template spec[param] = {} helper.template = param if template_stack is not None: if isinstance(template_stack, bool): param = 'template_stack' else: param = template_stack spec[param] = {} helper.template_stack = param if argument_spec is not None: for k in argument_spec.keys(): if k in spec: raise KeyError('{0}: key used by connection helper.'.format(k)) spec[k] = argument_spec[k] if required_one_of is not None: req.extend(required_one_of) # Done. helper.argument_spec = spec helper.required_one_of = req return helper
As a unique investment opportunity, Variable Annuity attracts some charges. You need to know more about them before you think of investing. Several insurance companies in the US have come into the business. There’s every need for you to be properly enlightened before you sign any kind of annuity contract with any of the insurance companies. In most cases the charges reduce the value of your account in the Variable Annuity plan. They can also reduce the value of your investment returns. You need to know more about such charges. Let’s examine them now. This is the fee you pay when you want to withdraw money from a Variable annuity within a certain period of time. In most cases, the charge comes after purchase payment has been made up to 6 to 10 years. Your insurance company issues surrender charge as part of commission they require. It’s also known as sales charge. It’s normally used to pay a commission to your financial professional for selling the Variable annuity to you. The Surrender Charge is simply a percentage of the amount you withdraw. It normally comes down as the years roll by. The declining period is usually known as “surrender period”. This is a kind of charge that amounts to a certain percentage of your account value. Your insurance company uses the charge to compensate itself for the insurance risks it passed through while the annuity lasts. The gain made from this charge is also used in paying your insurer’s cost of selling the annuity. This may include the commission paid to your financial professional for selling the annuity to you. This can also be known as handling fees. Your insurer deducts the fees to cover the period of record-keeping while the Variable annuity lasts. The fees also cover any other expenses made during the period of administration. In most cases, the charge may be a flat maintenance fee for your account. It’s not much. It ranges as from $25 to $30 per annum. The above charges are the basic fees you’ll always come in contact with when you go for a Variable annuity. However, there may be other underlying fund charges which may spring up. They are part of the charges that come with some mutual funds which you may be interested in. Such fees are usually deducted indirectly. There are also other special charges that come with various features of Variable annuity. Some of the charges may be attached to death benefits, guaranteed minimum benefit, and longer-term care insurance and so on. They are likely to attract some fees based on the prevailing condition that may apply. In any case, there’s always the need to work with reliable insurance company when searching for variable annuities. Some of the insurance companies in the US that offer annuity packages do have hidden fees which you may never know at the initial states. You have to take your time to make proper inquiries before you agree to sign any contract deal. If you’re confused, always make sure you consult a reliable financial adviser.
# -*- coding: utf-8 -*- # Generated by Django 1.11 on 2017-04-17 03:26 from __future__ import unicode_literals from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('cms', '0015_operasprimas_video'), ] operations = [ migrations.AlterModelOptions( name='cortometrajes', options={'ordering': ['order'], 'verbose_name': 'Cortometraje', 'verbose_name_plural': 'Cortometrajes'}, ), migrations.AlterModelOptions( name='operasprimasentries', options={'ordering': ['order'], 'verbose_name': 'Opera Prima', 'verbose_name_plural': 'Operas Primas'}, ), migrations.RemoveField( model_name='operasprimas', name='second_description', ), migrations.AddField( model_name='cortometrajes', name='order', field=models.PositiveIntegerField(db_index=True, default=0, editable=False), ), migrations.AddField( model_name='cortometrajes', name='slug', field=models.CharField(default=django.utils.timezone.now, editable=False, max_length=200), preserve_default=False, ), migrations.AddField( model_name='operasprimasentries', name='order', field=models.PositiveIntegerField(db_index=True, default=0, editable=False), ), migrations.AddField( model_name='operasprimasentries', name='slug', field=models.CharField(default=1, editable=False, max_length=200), preserve_default=False, ), ]
Posted by CitysearchUser 12da9510280328 55 on January 17, 2008. Brought to you by rateitall. Rbi Baseball Academy is located at 97 Green St in Foxboro and has been in the business of Baseball Batting Cage since 2008.
import threading # za vzporedno izvajanje import logging from alfabeta import * ###################################################################### ## Igralec računalnik class Racunalnik(): def __init__(self, gui, algoritem): self.gui = gui self.algoritem = algoritem # Algoritem, ki izračuna potezo self.mislec = None # Vlakno (thread), ki razmišlja def igraj(self): """Igraj potezo, ki jo vrne algoritem.""" # Naredimo vlakno, ki mu podamo *kopijo* igre (da ne bo zmedel GUIja): self.mislec = threading.Thread( target=lambda: self.algoritem.izracunaj_potezo(self.gui.igra.kopija())) # Poženemo vlakno: self.mislec.start() # Gremo preverjat, ali je bila najdena poteza: self.gui.plosca.after(100, self.preveri_potezo) def preveri_potezo(self): """Vsakih 100ms preveri, ali je algoritem že izračunal potezo.""" poteza = self.algoritem.poteza if poteza != None: # self.algoritem.poteza vrne par (i, j) funkcija povleci_potezo # pa sprejme i, j, zato uporabimo *poteza self.gui.povleci_potezo(*poteza) # Vzporedno vlakno ni več aktivno, zato ga "pozabimo" self.mislec = None else: # Algoritem še ni našel poteze, preveri še enkrat čez 100ms self.gui.plosca.after(100, self.preveri_potezo) def prekini(self): '''prekine razmišljanje računalnika''' # To metodo kliče GUI, če je treba prekiniti razmišljanje. if self.mislec: # Algoritmu sporočimo, da mora nehati z razmišljanjem self.algoritem.prekini() # Počakamo, da se vlakno ustavi self.mislec.join() self.mislec = None def klik(self, i, j): '''se odzove na klik uporabnika, ko je na potezi računalnik''' # Računalnik ignorira klike uporabnika pass
Offenburg – Manual drying is tedious and time-consuming – and a matter of the past thanks to HOBART’s new TOP-DRY drying function. At the INTERNORGA, the leading trade show for the catering trade from 15 to 19 March, the world market leader for commercial warewashing presents its innovative undercounter dishwasher with the innovative TOP-DRY drying technology. Unique in the commercial sector, this functionality ensures that dishes and glasses are ready for re-use immediately after washing, without any additional drying or polishing. This saves the staff a lot of time and effort, in particular during the peak hours. Using hydro-thermal energy storage, the TOP-DRY technology converts the moist air to hot drying air after the end of rinsing and returns it to the washing chamber. Thus, the moisture can no longer deposit on the wash ware. The HOBART glasswashers and dishwashers provide another three innovative highlights for clean, ecological and smart warewashing. The machine feature VAPOSTOP² prevents steam from escaping not only during the washing and drying process but also when the dishwasher door is opened. This avoids discomfort for the guests sitting close to the bar and negative effects on the room climate. Besides, the new machines are particularly silent which makes them ideal for use at the bar counter. The VISIOTRONIC-TOUCH features a colour touch display as well as the time-proven single-button operation, which is another essential selling point. Since time pressure and stress are often part of everyday work in the catering trade, it is essential that the dishwasher eases the workload of the staff as much as possible: High operator comfort is required to avoid faulty operation and to keep the time needed for instruction as short as possible. At the push of a button, the HOBART WASHSMART app gives comprehensive information on the status of the new glasswashers and dishwashers. The end customer is for example informed well in advance of pending maintenance dates and can see the system information or chemicals consumption. With the help of this data, the user can avoid standstill, re-order consumables directly via the app, or give the service technician some advance information before an on-site call. With the help of the troubleshooting guide comprised in the HOBART app, users can remedy minor faults themselves. If the problem cannot be rectified, the user can contact the factory customer service or a qualified service partner directly via the HOBART app. Specialised dealers can now use a new partner portal which offers a variety of helpful functions. Subject to the end customer’s consent, HOBART dealers can link with the customer’s dishwashers using the online platform. This means that the dealer connects to the end customer’s machines via the Internet, allowing the dealer for example to identify faults before a repair becomes necessary. Furthermore, the HOBART partners receive an overview of the machine status, information on the current settings and the hygiene protocol, and an overview of the service and consumables orders. HOBART customers benefit in multiple ways from the further development of the HOBART rack-type dishwashers, such as enhanced efficiency and economy, easier handling, and improved drying. Other advantages are the automatic self-cleaning programme and the innovative RACK-FLOW sensor system, which doubles the rack volume the machine can yield in continuous one-man operation. Besides being extremely reliable and providing perfect washing results, the new dishwashers are also intelligent. The smart machines detect for example empty sections or different types of wash ware and automatically adjust to the varying conditions. This saves operating costs and valuable resources.
from bitmovin.utils import Serializable from . import AbstractFilter class UnsharpFilter(AbstractFilter, Serializable): def __init__(self, name=None, luma_matrix_horizontal_size=None, luma_matrix_vertical_size=None, luma_effect_strength=None, chroma_matrix_horizontal_size=None, chroma_matrix_vertical_size=None, chroma_effect_strength=None, id_=None, custom_data=None, description=None): super().__init__(id_=id_, custom_data=custom_data, name=name, description=description) self.lumaMatrixHorizontalSize = luma_matrix_horizontal_size self.lumaMatrixVerticalSize = luma_matrix_vertical_size self.lumaEffectStrength = luma_effect_strength self.chromaMatrixHorizontalSize = chroma_matrix_horizontal_size self.chromaMatrixVerticalSize = chroma_matrix_vertical_size self.chromaEffectStrength = chroma_effect_strength def serialize(self): serialized = super().serialize() return serialized @classmethod def parse_from_json_object(cls, json_object): id_ = json_object['id'] name = json_object.get('name') description = json_object.get('description') luma_matrix_horizontal_size = json_object.get('lumaMatrixHorizontalSize') luma_matrix_vertical_size = json_object.get('lumaMatrixVerticalSize') luma_effect_strength = json_object.get('lumaEffectStrength') chroma_matrix_horizontal_size = json_object.get('chromaMatrixHorizontalSize') chroma_matrix_vertical_size = json_object.get('chromaMatrixVerticalSize') chroma_effect_strength = json_object.get('chromaEffectStrength') unsharp_filter = UnsharpFilter( name=name, id_=id_, description=description, luma_matrix_horizontal_size=luma_matrix_horizontal_size, luma_matrix_vertical_size=luma_matrix_vertical_size, luma_effect_strength=luma_effect_strength, chroma_matrix_horizontal_size=chroma_matrix_horizontal_size, chroma_matrix_vertical_size=chroma_matrix_vertical_size, chroma_effect_strength=chroma_effect_strength ) return unsharp_filter
After going to see the excellent Radical Bloomsbury show at Brighton Museum & Art Gallery last week I was moved to take a trip to Firle, East Sussex to visit Charleston, the country home of artists Vanessa Bell and Duncan Grant which became a meeting place for their friends – the writers, painters and intellectuals known as the Bloomsbury group. Charleston is an 18th century farmhouse with a walled garden set at the foot of the South Downs. Each room inside is full of paintings, textiles and ceramics including work by Renoir, Picasso and Delacroix as well as furniture and objects from the Omega Workshops. However, I was most interested in seeing the hand-painted embellishments by Bell and Grant who (inspired by Italian fresco painting and the Post-Impressionists) decorated the walls, wonky doors and furniture. The beautiful stencilled dark grey walls in the dining room were particularly impressive, as was the richly painted large circular Omega dining table. I also loved the patched heavy curtains made of several long pieces of sewn-together contrasting fabrics, and the fact that the makeshift curtain poles were dusty with even a cobweb or two. Another delight was Quentin Bell’s witty ceramic colander lampshades. I travelled there by train and bike using the Charleston website’s bike route guide (I took the shortest route from Berwick station) though I’d recommend avoiding cycling along the A27 and taking one of the more circuitous off-road routes if you can. Pedalling along the country roads with cars and the odd lorry whizzing past me at high speed I took courage from imagining Virginia Woolf out on her bike, probably cycling along the same roads to and from the house all those years ago.
# This is a software index handler that gives a score based on the # number of mentions in SlideShare. It uses the SlideShare API: # http://www.slideshare.net/developers/documentation # # Inputs: # - Identifier (String) # # Outputs: # - score (Number) # - description (String) # # Notes: this handler treats the software identifier as a string, # even if it is a URL represented as a string. The behaviour of the # SlideShare API for this has not been tested import time, urllib, urllib2 from hashlib import sha1 from bs4 import BeautifulSoup SEARCH_URL = 'https://www.slideshare.net/api/2/search_slideshows' MATCH_STRING = 'TotalResults' class slideshare_handler: def get_score(self, identifier, key, secret, **kwargs): """ Return the number of mentions in SlideShare and a descriptor Needs an API key, which can be obtained here: http://www.slideshare.net/developers/applyforapi """ ts = int(time.time()) strts = str(ts) params = { 'api_key' : key, 'ts' : strts, 'hash' : sha1(secret+strts).hexdigest(), 'q' : identifier, } params.update(kwargs) response = urllib2.urlopen(SEARCH_URL + '?' + urllib.urlencode(params)) soup = BeautifulSoup(response, 'xml') return soup.find(MATCH_STRING).string def get_description(self): return 'Score based on number of mentions of software identifier in SlideShare'
No matter where I look, it seems as though everyone is coming down with some sort of illness. While the traditional chicken noodle soup is often the first food-related fix for those under the weather, it can get old fast. Luckily, there's another option with the same healthy chicken broth, filling protein and warming properties. Enter this Greek Egg Lemon Soup. Not only is super simple to make, but it's full of vitamin C to help you move past your cold. Give it a try tonight, and let me know your thoughts. 1. In a large saucepan, bring the broth to a boil. 2. Add the orzo, and cook until tender or about 8 minutes. Season with the salt and pepper and remove from heat. 3. While the orzo cooks, in a medium bowl, whisk together the eggs and lemon juice until smooth. 4. Ladle about 1 cup of the hot broth into the egg-and-lemon mixture, then whisk to combine. 5. Add the mixture back to the saucepan. Stir until the soup becomes opaque and thickens as the eggs cook. Gently reheat soup on medium low but do not allow to boil. Add more salt and pepper, if desired, serve and enjoy. With each new year come new resolutions, and you're thinking of ways to get your body in top shape for all of the adventures on which you are about to embark. Luckily, a few small hacks can help you meet your goals and live a little healthier this week. Class Description: Round out your CTF training with our 30-minute treadmill workout focusing on speed, incline and endurance. The class is designed for maximum benefits in a short format with a focus on increasing overall running performance.These purposefully structured intervals will help you achieve your running goals more quickly than just logging the miles. Open to all fitness levels; these classes are scalable for novice and marathoners alike. Hold on, guys. I need to catch my breath. CrossTown Fitness* just added a new class to their roster: Total Body Run. This class has a short 30-minute format focusing on speed, incline and endurance. Instead of tacking on miles, this class’ structured intervals aims to help participants achieve running goals faster. You better believe I tried this out for myself. Now I am not a huge fan of treadmills, and will generally choose an outdoor run over an indoor run because dreadmills are so boring!. No amount of music, TV, podcasts, or audiobooks have ever been able to make me enjoy my time on this contraption. But, given the short, 30-minute format of this new offering, I went in with an open mind. After a few climbs we went into speed intervals where we pushed ourselves to hold a harder effort pace for 60-90 seconds before coming back down to that easy pace and doing it all over again. I actually pushed myself here and was surprised at how fast I was running without maxing my heart rate. That’s The time passed quickly which for once I never begged "please let this end". Have you ever taken a group treadmill class? I haven’t and was pleasantly surprised to find out I didn’t mind it nearly as much as I thought I would! *I attended a free Total Body Run media preview class at CrossTown Fitness. All opinions are my own, as always. Valentine's Day is just around the corner. I don't know about you, but personally, I find the nice dinner at a fancy restaurant plan EXTREMELY played out. Why not switch it up this year? Though personally, I am incredibly and undoubtedly single, I'm spreading the love with some of my favorite active winter date ideas for you to do in Chicago. As per my previous statement, even if you don't have a hot date this year, these work wonderfully for a Galentine's Day fiesta. Now for this idea, you have options. My favorite places to ice skate in the city are Lincoln Park Zoo and Millennium Park both of which are perfect for Valentine's Day. As an added bonus, they are both super affordable and offer skate rentals. Could this get any easier? Provided the temperature is below 45 degrees, Kaiser Tiger turns their massive outdoor patio into the very festive ice curling rink that many have come to know and love. Grab some beers and get your 'curl on'! While this isn't something you can find in Chicago proper, there are plenty of options to found with a short drive. No car? There are plenty of companies and meetups that schedule day trips and transportation to local slopes. Find a list of nearby resorts here. But Jess, viewing art isn't a particularly active pursuit. If you've never been to the Art Institute before, let it be known that this museum is MASSIVE. Whether you want to see just a few or all of their many exhibits, you are guaranteed to spend most of your visit walking. Just be sure to wear a pair of comfortable shoes. Your feet will thank you. Did I miss any of your favorite winter date ideas? Share the love, and let me know in the comments below! Kindness: an underrated virtue that has the potential to positively change both your life and the lives of those around you. Today, I turned to my coworker and asked, “Do you ever feel like we collectively become less kind as we age?” When I say hello to the people I have not previously known, they often act like I don’t exist. Even people I am well acquainted with constantly fail to show up when we have plans together without taking the time to inform me their intentions have changed. And it’s not just my personal world that feels crueler. Especially with this past presidential election, rudeness seems to have flourished in the world. One of the unfortunate benefits of social media is that it allows people to say things to each other that they would never say face to face, so anyone can go on rants to either be ignored or, worse, praised. While I can’t prove that the world is a meaner place, I can do my best to combat the crudeness. How? In the words of many wise people, by “killing them with kindness.” I’m trying to be a kinder and caring person, and I encourage you to do the same. Because why not go ahead and make someone’s day? Say please and thank you. Often. Never underestimate the power of a simple thank-you. Let pregnant and elderly people have your seat on the bus or train. It's really that simple. Open doors. Hold the door open for someone carrying a bunch of groceries or a suitcase. Empty the dishwasher and take out the trash. Why not be a better roommate? Don’t leave your dirty dishes in the sink. Put them in the dishwasher you just emptied. Treat your team to coffee in the morning. Surprise your team with their favorite coffee orders waiting on their desk when they arrive in the morning. Pick up after yourself. If you’re at a restaurant or café that doesn’t have a busser, put your dirty dishes in the designated area. Throw away your trash. Be a decent human being. Don’t leave your shopping cart in the middle of the parking lot. Wear or use a gift in the presence of the gifter. It will make their day knowing you appreciate their kind gift. Send an old friend flowers. If they live in another city, brighten their day with a bouquet. Give away the books you’ve read. Go through your bookshelves. Pull out any books you no longer want, and donate them to a local library or charity. Respond to emails, texts, and phone calls. Don’t ignore your friends and family. When someone says hello, say hi back. It’s just plain rude not to. Recycle and compost as much as you can. We should be kind to the earth, too! Be an active and thoughtful listener. When you ask people how they are, listen intently to their response. Leave a generous tip to someone who has done a great job. Give more compliments. A simple “I love your dress” can go a long way. Buy a sandwich for the homeless person in front of the grocery store. Put change in a meter that’s about to expire. Forget about a debt that someone owes you. Ask the person who was supposed to pay you back to do the same to someone else in the future. Stop complaining. Your positivity will encourage other people to do the same. Be present. A kind person is authentic and aware of how their actions can affect people. Be a positive impact rather than a negative one. Pay it forward—share this list with the people you care about! How are you going to show kindness today?
""" Computes normal modes for ethane. """ from os import mkdir from os.path import exists, join from sardine.universe import UniverseFactory from sardine.energy import BondEnergyFactory, AngleEnergyFactory, VDWEnergyFactory from sardine.energy import EnergyFunctionFactory, GradientFunctionFactory from sardine.nma import compute_hessian, compute_force_constant_matrix,\ compute_normal_modes, generate_mode_trajectory from sardine.trajectory import save_trajectory_to_pdb from sardine.minimize import BFGSMinimizer from sardine.util import coords_1d_to_2d PDB_FILENAME = "C2H6_ideal_trans_min_final.pdb" SF_FILENAME = "C2H6.sf" OUTPUT_DIR = "modes" def main(): if not exists(OUTPUT_DIR): mkdir(OUTPUT_DIR) uf = UniverseFactory() uf.load_atoms_from_file(PDB_FILENAME) universe = uf.create_universe() bond_energy_factory = BondEnergyFactory() bond_energy_factory.load_bonds_from_file(SF_FILENAME) bond_energy_func = bond_energy_factory.create_energy_func(num_atoms=len(universe)) bond_gradient_func = bond_energy_factory.create_gradient_func(num_atoms=len(universe)) angle_energy_factory = AngleEnergyFactory() angle_energy_factory.load_angles_from_file(SF_FILENAME) angle_energy_func = angle_energy_factory.create_energy_func() angle_gradient_func = angle_energy_factory.create_gradient_func() vdw_energy_factory = VDWEnergyFactory() vdw_energy_factory.load_vdw_from_file(SF_FILENAME) vdw_energy_func = vdw_energy_factory.create_energy_func() vdw_gradient_func = vdw_energy_factory.create_gradient_func() eff = EnergyFunctionFactory() eff.add_energy_term('bonds', bond_energy_func) eff.add_energy_term('angles', angle_energy_func) eff.add_energy_term('vdw', vdw_energy_func) energy_func = eff.create_energy_func( ['bonds', 'angles', 'vdw'], num_atoms=len(universe)) gff = GradientFunctionFactory() gff.add_gradient_term('bonds', bond_gradient_func) gff.add_gradient_term('angles', angle_gradient_func) gff.add_gradient_term('vdw', vdw_gradient_func) gradient_func = gff.create_gradient_func( ['bonds', 'angles', 'vdw'], num_atoms=len(universe)) # ====================== # = Minimize structure = # ====================== minimizer = BFGSMinimizer(maxiter=200) X = universe.get_coords().flatten() energy_initial = energy_func(X) X_min, energy_min = minimizer.run_minimization( energy_func, gradient_func, X, num_atoms=len(universe), save_trajectory=True, noisy=True) print energy_initial, energy_min trajectory = minimizer.get_trajectory() save_trajectory_to_pdb('minimization.pdb', trajectory, universe, bond_energy_factory) print "Wrote minimization.pdb" # minimization output is a flat array. convert it to (N,3) array X_min = coords_1d_to_2d(X_min) # ======================== # = Compute normal modes = # ======================== M = universe.get_inv_sqrt_mass_matrix() H = compute_hessian(energy_func, X_min) F = compute_force_constant_matrix(H, M) normal_modes = compute_normal_modes(F, discard_trans_and_rot=True) mode_freqs = normal_modes.get_frequencies() with open(join(OUTPUT_DIR, 'eigen_values.txt'), 'w') as f: f.write("%s" % normal_modes.freq_to_str()) for i in xrange(len(mode_freqs)): mode_trajectory = generate_mode_trajectory( normal_modes, initial_coords=X_min, mode_number=i, peak_scale_factor=0.5) save_trajectory_to_pdb( join(OUTPUT_DIR, 'ethane_mode%02d.pdb') % (i+1), mode_trajectory, universe, bond_energy_factory) if __name__ == '__main__': main()
Companions in Bamaga 4876 QLD are definitely the most costly method to experience paid friendship. These companies have a tendency to be split in to high and also normal course companion solutions. These firms can bill prices of around EUR650 for 2 hrs with a companion. Listed below you will certainly discover info concerning 2 of the ideal high course solution companies in Bamaga 4876 QLD. There are a multitude of various Bamaga 4876 QLD companion firms running throughout the city, as well as it can be tough to understand which to rely on with such a fragile issue. While the majority of companies are credible as well as entirely expert, it is constantly beneficial to understand for certain which are excellent to handle.
# Imperialism remake # Copyright (C) 2020 amtyurin # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/> import logging from PyQt5 import QtGui from imperialism_remake.base import constants from imperialism_remake.server.models.workforce_action import WorkforceAction logger = logging.getLogger(__name__) class WorkforceToActionCursorMapper: def __init__(self, server_scenario): super().__init__() self._cursors = {} self._cursor_not_allowed = {} workforce_action_cursor_settings = server_scenario.get_workforce_action_cursor_settings() for workforce_type in workforce_action_cursor_settings: if workforce_type not in self._cursors: self._cursors[workforce_type] = {} self._cursors[workforce_type][WorkforceAction.MOVE] = QtGui.QCursor(QtGui.QPixmap( constants.extend(constants.GRAPHICS_MAP_ICON_FOLDER, workforce_action_cursor_settings[workforce_type]['workforce_action_cursor_move']))) self._cursors[workforce_type][WorkforceAction.DUTY_ACTION] = QtGui.QCursor(QtGui.QPixmap( constants.extend(constants.GRAPHICS_MAP_ICON_FOLDER, workforce_action_cursor_settings[workforce_type][ 'workforce_action_cursor_duty_action']))) self._cursor_not_allowed[workforce_type] = QtGui.QCursor(QtGui.QPixmap( constants.extend(constants.GRAPHICS_MAP_ICON_FOLDER, workforce_action_cursor_settings[workforce_type][ 'workforce_action_cursor_not_allowed']))) def get_cursor_of_type(self, workforce_type: int, action: WorkforceAction): return self._cursors[workforce_type][action] def get_cursor_not_allowed_of_type(self, workforce_type: int): return self._cursor_not_allowed[workforce_type]
Technically these are the fruit of the pandanus, not the nuts but they are the size of a child's fist, hard, and heavy and they give a fair old bonk if they hit you on the head. And the shady bits move too! Once upon a time "SUP" was a contraction for the greeting "What's up?" Now of course it's "Stand Up Paddleboard". A year or two ago, some visiting friends from Europe were astonished to see them being ridden in the surf - we on the other hand were astonished that anyone would contemplate using them in still water!
"""Implementation of AppGroup API. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import fnmatch from treadmill import context from treadmill import schema class API: """Treadmill AppGroup REST api.""" def __init__(self): """init""" def _admin_app_group(): """Lazily return admin object.""" return context.GLOBAL.admin.app_group() @schema.schema({'$ref': 'app_group.json#/resource_id'}) def get(rsrc_id): """Get application configuration.""" result = _admin_app_group().get(rsrc_id) result['_id'] = rsrc_id return result @schema.schema( {'$ref': 'app_group.json#/resource_id'}, {'allOf': [{'$ref': 'app_group.json#/resource'}, {'$ref': 'app_group.json#/verbs/create'}]} ) def create(rsrc_id, rsrc): """Create (configure) application.""" _admin_app_group().create(rsrc_id, rsrc) return _admin_app_group().get(rsrc_id, dirty=True) @schema.schema( {'$ref': 'app_group.json#/resource_id'}, {'allOf': [{'$ref': 'app_group.json#/resource'}, {'$ref': 'app_group.json#/verbs/update'}]} ) def update(rsrc_id, rsrc): """Update application configuration.""" _admin_app_group().replace(rsrc_id, rsrc) return _admin_app_group().get(rsrc_id, dirty=True) @schema.schema({'$ref': 'app_group.json#/resource_id'}) def delete(rsrc_id): """Delete configured application.""" _admin_app_group().delete(rsrc_id) def _list(match=None): """List configured applications.""" if match is None: match = '*' app_groups = _admin_app_group().list({}) filtered = [ app_group for app_group in app_groups if fnmatch.fnmatch(app_group['_id'], match) ] return sorted(filtered, key=lambda item: item['_id']) self.get = get self.create = create self.update = update self.delete = delete self.list = _list
The last of my travels to Greece… Mykonos Island. We departed after breakfast at Crepe House which was along the main street. There are many savoury and sweet crepes to choose from. I had the cheesecake crepe, which came with lots of cream cheese and crumble. I felt they could do more about sprucing up the crepes, but they tasted pretty good. It was a busy day at the port. We took the Cosmote High Speed 5 ferry from Santorini to Mykonos. For one thing, this supposedly speedy ferry was late. Also, it was extremely dirty. Imagine menstruation stains on seats and rubbish everywhere. We had to put plastic bags on our seats.ABSOLUTELY GROSSED OUT! And after all the food posts, it is back to Greece. Do not worry, it is just this and Mykonos left and we are done! I am also glad to finish blogging about Greece, did not know it took such a long time. Where did we leave off the last time? I believe we had just arrived at Santorini the day before and booked a day tour on the island. We went for breakfast a minute’s walk away from our hotel at Corner Cafe. As usual, meals here, inclusive of breakfast, are pricey. My set of scrambled eggs and bacon, together with fresh orange juice cost 7.50 euro. Considering the serving size, it was worth the money compared to some of the other meals I had. Lo and behold! Check out our ferry to Santorini! Yes, imagine Superstar Virgo, multiple decks and all, with a souvenir shop, fast food restaurant, cafe and cushy seats! We simply could not believe this was our ferry. We totally imagined those rustic ferries that we take to Batam. What a pleasant surprise! We had to wake up at 4.50am to reach the port at 6.25am for boarding. It was a seven hour cruise with a few stops at the other islands, but time did not seem long when you are enjoying yourself! What a sight to wake up to! Breathtakingly beautiful! After eating a quick breakfast, we decided to explore the area. However, not many shops were opened at that time. And just as we were about to give up and head back to the hotel, we came across this provision store (did not take down the name) and found pretty handcrafted wood tableware and utensils for quite a steal! After a not-very-restful night of sleep (still not used to the timezone), we checked out of the hotel and went to see the famed Olympia archaeological site. I would not really recommend the site if you do not have a guide. As it was, it was hard to envision how it used to be like with the explanation from the guide and the ruins that were left. That aside, the area was vast and good for taking snapshots. Sorry for neglecting my blog! I must admit that I have been lazy and procrastinated when it came to sorting out the pictures and editing them. However, here is the first edition of my Greece trip. Just a short post from me! Hello everyone! I reached Delphi last night and it is beautiful! This is the view from my balcony. Initially the hotel was just alright and we thought that was it until we opened the windows and stepped out onto the balcony. Totally blown away by the view!
import hashlib import json import logging import re from datetime import datetime, timedelta, date import requests from telebot.apihelper import ApiException from telebot.types import Message from app.constants import ( emoji, subject_short_types, week_day_number, months, reg_before_30, reg_only_30, reg_only_31, interval_off_answer, urls, yandex_error_answer, yandex_segment_answer, all_stations, ask_to_select_types_answer, updated_types_answer ) from config import Config def get_term_dates(): """ :return: Returns current term's start and end dates :rtype: tuple """ today = datetime.today() if today.month in range(2, 8): start_year = today.year end_year = today.year start_month = 2 end_month = 8 else: start_year = today.year - 1 if today.month < 2 else today.year end_year = today.year + 1 if today.month > 7 else today.year start_month = 8 end_month = 2 return (date(year=start_year, month=start_month, day=1), date(year=end_year, month=end_month, day=1)) def is_resit(event): """ Checks event for resit type :param event: an elements of `DayStudyEvents` :type event: dict :return: is resit :rtype: bool """ return ("пересдача" in event["Subject"] or "консультация" in event["Subject"] or "комиссия" in event["Subject"]) def get_resits_events(events): """ Deletes all resits from events :param events: all elements of `DayStudyEvents` :type events: list :return: list of available events :rtype: list """ return [event for event in events if is_resit(event)] def delete_resits_events(events): """ Deletes all resits from events :param events: all elements of `DayStudyEvents` :type events: list :return: list of available events :rtype: list """ return [event for event in events if not is_resit(event)] def delete_cancelled_events(events): """ Function to delete all cancelled events. :param events: all elements of `DayStudyEvents` :type events: list :return: list of available events :rtype: list """ return [ event for event in events if not event["IsCancelled"] ] def create_events_blocks(events): """ Function to create list of events grouped by time. :param events: all (or available) elements of `DayStudyEvents` :type events: list :return: list of events grouped by time :rtype: list of list """ event_blocks = [] for i, event in enumerate(events): if i and ( event["Start"] == events[i - 1]["Start"] and event["End"] == events[i - 1]["End"] ): event_blocks[-1].append(event) else: event_blocks.append([event]) return event_blocks def datetime_from_string(dt_string): """ Converts string to datetime object :param dt_string: datetime string :type dt_string: str :return: datetime object :rtype: datetime """ return datetime.strptime( dt_string.split("+")[0].split("Z")[0], "%Y-%m-%dT%H:%M:%S" ) def get_key_by_value(dct, val): """ Gets key by value from input dictionary :param dct: input dictionary :type dct: dict :param val: value in input dictionary (MUST BE) :return: suitable key """ for item in dct.items(): if item[1] == val: return item[0] def get_work_monday(is_next_week=False): """ Returns date of current or next monday for Mon-Sat, nex monday for Sunday :param is_next_week: (Optional) is for next week :type is_next_week: bool :return: monday date :rtype: date """ today = date.today() delta = today.weekday() if today.weekday() != 6 else -1 if is_next_week: delta -= 7 return today - timedelta(days=delta) def get_date_by_weekday_title(title, is_next_week=False): """ Returns date for current or next week by day short title :param title: short weekday title (Russian) :type title: str :param is_next_week: (Optional) is for next week :type is_next_week: bool :return: date :rtype: date """ work_monday = get_work_monday(is_next_week=is_next_week) delta = week_day_number[title] - 1 return work_monday + timedelta(days=delta) def datetime_to_string(date_value): """ Converts date object to string :param date_value: date object :type date_value: date :return: date string :rtype: str """ return "{day} {month_title} {year}".format( day=date_value.day, month_title=get_key_by_value(months, date_value.month), year=date_value.year) def text_to_date(text): """ Checks if the text is a date then converts it to a date object or returns False :param text: some text :type text: str :return: date object or False :rtype: date or False """ regs = [reg_before_30, reg_only_30, reg_only_31] for reg in regs: res = re.search(reg, text) if res: groups = res.groups() day = int(groups[0]) if groups[3]: if groups[3].isdecimal(): month = int(groups[3]) else: month = months[groups[3]] else: month = date.today().month year = int(groups[5]) if groups[5] else date.today().year try: return date(day=day, month=month, year=year) except ValueError: return False return False def text_to_interval(text): """ Checks if text is a dates interval and converts it to two date objects or returns False :param text: some text :type text: str :return: two date objects :rtype: tuple of date """ dates = text.split("-") if len(dates) == 2: from_date = text_to_date(dates[0].strip()) to_date = text_to_date(dates[1].strip()) if from_date and to_date and from_date < to_date: return from_date, to_date return False def create_interval_off_answer(from_date, to_date): """ Creates interval off answer for dates :param from_date: first date :type from_date: date :param to_date: second date :type to_date: date :return: interval off answer :rtype: str """ return interval_off_answer.format( emoji["sleep"], datetime_to_string(from_date), datetime_to_string(to_date) ) def is_correct_educator_name(text): """ Checks if the text is correct :param text: input text :type text: str :return: True or False :rtype: bool """ return text.replace(".", "").replace("-", "").replace(" ", "").isalnum() def parse_event_time(event): return "{0} {1:0>2}:{2:0>2}{3}{4:0>2}:{5:0>2}".format( emoji["clock"], datetime_from_string(event["Start"]).time().hour, datetime_from_string(event["Start"]).time().minute, emoji["en_dash"], datetime_from_string(event["End"]).time().hour, datetime_from_string(event["End"]).time().minute ) def parse_event_subject(event): answer = "" subject_name = ", ".join(event["Subject"].split(", ")[:-1]) subject_type = event["Subject"].split(", ")[-1] # оставляем только перыве два слова stripped_subject_type = " ".join(subject_type.split()[:2]) if stripped_subject_type in subject_short_types.keys(): answer += subject_short_types[stripped_subject_type] \ + " " + emoji["en_dash"] + " " else: answer += subject_type.upper() \ + " " + emoji["en_dash"] + " " answer += subject_name return answer def parse_event_location(location, full_place=True, have_chosen_educator=False, chosen_educator=None): answer = "" if location["IsEmpty"]: return answer if have_chosen_educator and not chosen_educator.issuperset( {edu["Item2"].split(", ")[0] for edu in location["EducatorIds"]} ): return answer if full_place: location_name = location["DisplayName"].strip(", ").strip() else: location_name = location["DisplayName"].split(", ")[-1].strip() answer += location_name if location["HasEducators"]: educators = [educator["Item2"].split(", ")[0] for educator in location["EducatorIds"]] if educators: answer += " <i>({0})</i>".format("; ".join(educators)) return answer def parse_event_sub_loc_edu(event, full_place): answer = "<b>" + parse_event_subject(event) + "</b>\n" for location in event["EventLocations"]: loc_answer = parse_event_location(location, full_place) answer += loc_answer if loc_answer: if event["LocationsWereChanged"] or \ event["EducatorsWereReassigned"]: answer += " " + emoji["warning"] answer += "\n" answer += "\n" return answer def create_schedule_answer(event, full_place): answer = "" if event["IsAssigned"]: answer += emoji["new"] + " " answer += parse_event_time(event) if event["TimeWasChanged"]: answer += " " + emoji["warning"] answer += "\n" + parse_event_sub_loc_edu(event, full_place) return answer def create_master_schedule_answer(day_info): answer = "{0} {1}\n\n".format(emoji["calendar"], day_info["DayString"]) for event in day_info["DayStudyEvents"]: answer += "{0} {1} <i>({2})</i>\n".format( emoji["clock"], event["TimeIntervalString"], "; ".join(event["Dates"]) ) answer += "<b>" subject_type = event["Subject"].split(", ")[-1] stripped_subject_type = " ".join(subject_type.split()[:2]) if stripped_subject_type in subject_short_types.keys(): answer += subject_short_types[stripped_subject_type] \ + " " + emoji["en_dash"] + " " else: answer += subject_type.upper() \ + " " + emoji["en_dash"] + " " answer += ", ".join( event["Subject"].split(", ")[:-1] ) + "</b>\n" for location in event["EventLocations"]: location_name = location["DisplayName"] answer += location_name + " <i>({0})</i>\n".format( "; ".join(name["Item1"] for name in event["ContingentUnitNames"]) ) answer += "\n" return answer def get_hours_minutes_by_seconds(seconds): """ Gets hours and minutes by input seconds :param seconds: seconds :type seconds: int :return: hours and minutes :rtype: tuple """ m, s = divmod(seconds, 60) h, m = divmod(m, 60) return h, m def get_yandex_raw_data(from_station, to_station, for_date): """ Gets yandex raw data and status code :param from_station: `from` station yandex code :type from_station: str :param to_station: `to` station yandex code :type to_station: str :param for_date: date for which data should be received :type for_date: date :return: status code and raw json data :rtype: tuple """ params = { "from": from_station, "to": to_station, "apikey": Config.YANDEX_API_KEY, "date": for_date, "format": "json", "lang": "ru_RU", "transport_types": "suburban" } url = urls["ya_search"] req = requests.get(url, params=params) return req.status_code, req.json() def parse_yandex_segment(segment): """ Parses segments data to `yandex_segment_answer` :param segment: segment's json data from api.rasp.yandex's search method :type segment: dict :return: parsed yandex segment answer :rtype: str """ departure_datetime = datetime_from_string(segment["departure"]) arrival_datetime = datetime_from_string(segment["arrival"]) hours, minutes = get_hours_minutes_by_seconds( (departure_datetime - datetime.now()).seconds ) if hours: time_mark = emoji["blue_diamond"] lef_time = "{0} ч {1} мин".format(hours, minutes) elif 15.0 < minutes < 60.0: time_mark = emoji["orange_diamond"] lef_time = "{0} мин".format(minutes) else: time_mark = emoji["runner"] lef_time = "{0} мин".format(minutes) if segment["thread"]["express_type"]: train_mark = emoji["express"] else: train_mark = emoji["train"] if segment["tickets_info"]: price = str( segment["tickets_info"]["places"][0]["price"]["whole"] ) if segment["tickets_info"]["places"][0]["price"]["cents"]: price += ",{0}".format( segment["tickets_info"]["places"][0]["price"]["cents"] ) else: price = "?" return yandex_segment_answer.format( time_mark=time_mark, lef_time=lef_time, train_mark=train_mark, dep_time=departure_datetime.time().strftime("%H:%M"), arr_time=arrival_datetime.time().strftime("%H:%M"), price=price, ruble_sign=emoji["ruble_sign"] ) def create_suburbans_answer(from_code, to_code, for_date, limit=3): """ Creates yandex suburbans answer for date by stations codes :param from_code: `from` yandex station code :type from_code: str :param to_code: `to` yandex station code :type to_code: str :param for_date: date for which data should be received :type for_date: date :param limit: limit of segments in answer :type limit: int :return: tuple with `answer`, `is_tomorrow` and `is_error` data :rtype: tuple """ code, data = get_yandex_raw_data(from_code, to_code, for_date) if code != 200: return yandex_error_answer, False, True from_title = data["search"]["from"]["title"] to_title = data["search"]["to"]["title"] answer = "" for segment in data["segments"]: if len(answer.split("\n\n")) > limit: break if datetime_from_string(segment["departure"]) >= datetime.now(): answer += parse_yandex_segment(segment) if answer: answer = "<b>{0}</b> => <b>{1}</b>\n\n".format( from_title, to_title ) + answer is_tomorrow = False else: for_date = date.today() + timedelta(days=1) answer += create_suburbans_answer( from_code, to_code, for_date, limit=5 )[0] is_tomorrow = True return answer, is_tomorrow, False def get_station_title_from_text(text, is_end=False, is_full=False): """ Gets start/end station title from bot's answer text :param text: bot's answer text :type text: str :param is_end: is get end station title :type is_end: bool :param is_full: if the text is full answer created by `create_suburbans_answer` function :type is_full: bool :return: station title :rtype: str """ if is_full: first_i, last_i, split_by = 0, int(is_end), " => " else: first_i, last_i, split_by = int(is_end), -1, ": " return text.split("\n")[first_i].split(split_by)[last_i] def get_station_code_from_text(text, is_end=False, is_full=False): """ Gets start/end station yandex code from bot's answer text :param text: bot's answer text :type text: str :param is_end: is get end station title :type is_end: bool :param is_full: if the text is full answer created by `create_suburbans_answer` function :type is_full: bool :return: yandex station code :rtype: str """ return all_stations[get_station_title_from_text(text, is_end, is_full)] def add_end_station(text, end_title): """ Changes answer text by adding end station title :param text: bot's answer text :type text: str :param end_title: end station title :type end_title: str :return: answer text :type: str """ return "Начальная: <b>{0}</b>\nКончная: <b>{1}</b>\nВыбери день:".format( get_station_title_from_text(text), end_title ) def update_suburbans_answer(text, show_more=False, for_tomorrow=False): """ Updates suburbans answer created by `create_suburbans_answer` function :param text: bot's answer text :type text: str :param show_more: is need to show future trails :type show_more: bool :param for_tomorrow: is need to show trails for tomorrow :type for_tomorrow: bool :return: """ return create_suburbans_answer( from_code=get_station_code_from_text(text, is_full=True), to_code=get_station_code_from_text(text, is_full=True, is_end=True), for_date=date.today() + timedelta(days=int(for_tomorrow)), limit=100 if show_more else (7 if for_tomorrow else 3) ) def bot_waiting_for(msg, waiting_bot_text): """ Checks if the message is a reply to `waiting_bot_text` :param msg: bot's message :type msg: Message :param waiting_bot_text: text which bot sent :type waiting_bot_text: str :return: True or False :rtype: bool """ if msg.reply_to_message: if msg.reply_to_message.from_user.username == Config.BOT_NAME: if msg.reply_to_message.text == waiting_bot_text: return True return False def get_block_data_from_block_answer(text): """ Gets count of blocks, current block number and schedule's date from created block answer by `User.get_block_answer()` :param text: block answer :type text: str :return: count of blocks, current block number and schedule's date :rtype: tuple """ rows = text.split("\n\n") current_block_num, blocks_count = list(map(int, rows[0].split()[::2])) for_date = get_date_by_weekday_title(rows[1].split()[-1][1:-1]) return blocks_count, current_block_num, for_date def get_event_data_from_block_answer(text, idx): """ Gets event's day short title, time, type, name and educators from created block answer by `User.get_block_answer()` :param text: block answer :type text: str :param idx: event index :type idx: int :return: event's day short title, time, type, name and educators :rtype: tuple """ rows = text.split("\n\n") emoji_time_day = rows[1].split() event_time = emoji_time_day[1] event_day_short = emoji_time_day[-1][1:-1] event_data = rows[idx + 2].split("\n") event_type_name = event_data[0].split(" " + emoji["en_dash"] + " ") event_name = (" " + emoji["en_dash"] + " ").join(event_type_name[1:]) event_type = event_type_name[0].strip( str(idx + 1) + ". " ).replace( emoji["cross_mark"] + " ", "" ) event_educators = "; ".join( [place_edu.split("(")[-1].split(")")[0] for place_edu in event_data[1:]] ) return event_day_short, event_time, event_type, event_name, event_educators def update_types_answer(text, new_type): """ Updates text by adding/removing type :param text: bot's ald text :type text: str :param new_type: selected short type :type new_type: str :return: updated answer :rtype: str """ lesson_data = text.split("\n\n")[1].split("\n") if lesson_data[-1] == "Типы: Все": types = [new_type] else: types = lesson_data[-1][6:].split("; ") if new_type in types: types.remove(new_type) else: types.append(new_type) lesson_data[-1] = "Типы: {0}".format("; ".join(types) if types else "Все") return "\n\n".join([ updated_types_answer, "\n".join(lesson_data), ask_to_select_types_answer ]) def get_lesson_data(data, hide_type): """ Creates dict with lesson data for `Lesson.__init__()` :param data: `selected_lesson_info_answer` with data :type data: str :param hide_type: `ЛЛЛ`, `ЛКК`, etc :type hide_type: str :return: dict with lesson data :rtype: dict """ return dict( name=data[2][10:], types=[get_key_by_value(dct=subject_short_types, val=t) for t in data[-1][6:].split("; ")] if "Все" not in data[-1] else [], days=[data[0][6:]] if hide_type[0] == "К" else [], times=[data[1][7:]] if hide_type[1] == "К" else [], educators=data[3][15:].split("; ") if hide_type[2] == "К" else [], locations=[] ) def tgbot_send_long_message(bot, text, user_id, split="\n\n"): try: bot.send_message(user_id, text, parse_mode="HTML") except ApiException as ApiExcept: json_err = json.loads(ApiExcept.result.text) if json_err["description"] == "Bad Request: message is too long": event_count = len(text.split(split)) first_part = split.join(text.split(split)[:event_count // 2]) second_part = split.join(text.split(split)[event_count // 2:]) tgbot_send_long_message(bot, first_part, user_id, split) tgbot_send_long_message(bot, second_part, user_id, split) def tgbot_edit_first_and_send_messages(bot, texts, bot_msg, split="\n\n"): try: bot.edit_message_text( text=texts[0], chat_id=bot_msg.chat.id, message_id=bot_msg.message_id, parse_mode="HTML" ) except ApiException: tgbot_send_long_message(bot, texts[0], bot_msg.chat.id, split) for text in texts[1:]: tgbot_send_long_message(bot, text, bot_msg.chat.id, split) def write_log(update, work_time, was_error=False): if update.message: chat_id = update.message.chat.id user_text = update.message.text or "NO TEXT" elif update.callback_query: chat_id = update.callback_query.message.chat.id user_text = update.callback_query.data else: chat_id = "ERROR" user_text = str(update) log = "CHAT: {0} ===== TEXT: {1} ===== TIME: {2}".format( chat_id, user_text, work_time) if was_error: log += "\t\t\tERROR" logging.info(log) def generate_hash(content: bytes): """ Create md5 hash from bytes :param content: some data in bytes :type content: bytes :return: hash string :rtype: str """ return hashlib.sha3_512(content).hexdigest() def check_hash(cur_hash: str, content: bytes): """ Checks current hash and content :param cur_hash: current hash :type cur_hash: str :param content: some data in bytes :type content: bytes :return: is same :rtype: bool """ return cur_hash == generate_hash(content)
As a small business operating within the building and construction industry in Tasmania, Woodhams Builders recognises the great importance of conducting our operations in a way that avoids placing unnecessary risk on direct employees, members of the public or any other persons conducting work for the organisation. It is also our desire to be leaders within our field and as such we understand the role that Workplace Health and Safety plays in creating an environment in which work standards, productivity, positive organisational culture and continual improvement have the opportunity to thrive. As such, the following manual outlines our commitment to maintaining a safe and healthy workplace as well as highlighting the varying roles and responsibilities of different people throughout this ongoing process. The manual also contains the relevant information and documentation required by officers, workers and Workplace Health and Safety authorities. To provide a workplace that is free from risks to health and safety by implementing the highest possible standards to protect workers’ health, safety, mental and social wellbeing. To engage and consult with all workers and others affected by our business or undertakings to ensure hazards are identified and the risks associated with them removed or reduced to the greatest degree. To create a workplace environment where workers and others affected by our business or undertakings are encouraged and supported to raise health and safety issues and help reduce manage them. Management is firmly committed to a policy enabling all work activities to be carried out safely, and with all possible measures taken to remove (or at least reduce) risks to the health, safety and welfare of employees, contractors, authorised visitors, and anyone else who may be affected by our business or undertakings. We are committed to ensuring we comply with the Work Health and Safety Act 2013, the Work Health and Safety Regulations 2013, relevant Australian Standards and relevant Codes of Practice. other people, by ensuring they are not put at risk from work carried out as part of our business undertakings. Woodhams Builders values its employees and recognizes their need for a safe and healthy work environment. Furthermore, employees abusing drugs and alcohol are less productive and are often a risk to the safety and productivity of our Company. The establishment of a Drug-Free Workplace Policy is consistent with this Company’s desire to promote a safe and healthy workplace. Woodhams Builders prohibits the use, possession, sale, manufacture, and/or distribution of alcohol and illegal/controlled substances and/or drug-related paraphernalia while performing duties for the company during working hours. Woodhams Builders also prohibits workers from operating their duties at work while under the influence of these substances. Compliance with this policy will be required as a condition of employment for qualified applicants or for continued employment of current employees. The presence of a detectable amount of any prohibited substance in an employee while working and/or during working hours may be deemed a violation of this policy, regardless of when or where the substance entered the employee’s system. Failure to comply with the provisions of this policy will be grounds for disciplinary action up to and including immediate discharge. It will at least result in a consultation in which an employee will be given either a verbal or written warning. After this warning, employees may be discharged should they commit another offence. To ensure everone remains safe in the event that social events such as Christmas parties or after work drinks occur…. This policy demonstrates that Woodhams Builders has a commitment to the health and safety of its employees and to complying with the Workplace Health and Safety Act 1995 and the Workplace Health and Safety Regulations 1998. This organisation is a bullying-free workplace where everyone is treated with respect. One-off behaviours can still present a risk to health and safety and will not be tolerated. Bullying is taken seriously by this organisation, and will be addressed. Disciplinary action may be taken if this policy is breached. If you are being bullied, or see others being bullied at work, you can talk to your supervisor, or if this is not possible, talk to Rob Gluckman, the contact person for this policy. Reports of bullying will be followed up and in serious cases investigated quickly, objectively and fairly. We seek the co-operation of all workers, contractors, visitors and others whom may be affected by our business or undertakings. We encourage and support suggestions to create a safe working environment as a result of all possible preventative measures being taken. by monitoring the effectiveness of policies and procedures. This policy (and related procedures) shall be displayed where possible. All workers, contractors and others affected by our business or undertakings will be provided with a copy. New workers will be provided with a copy as part of their induction.