text
stringlengths 29
850k
|
|---|
#-*- coding: utf-8 -*-
__all__ = ["parser", "entry"]
from textwrap import TextWrapper
import xml.etree.ElementTree as ET
from unidecode import unidecode
from LittreParser.error import EntryNotFound
def _xml2dict(root):
"""
Convert an XML node to a dictionnary.
Recursive.
"""
out = []
for child in root.iterfind("./*"):
out.append({
"tag": child.tag,
"text": child.text,
"attrib": child.attrib,
"children": _xml2dict(child)
})
return out
def _gettext(elem):
"""
Équivalent DOM de la propriété "innerText"
"""
return "".join(elem.itertext())
class parser:
"""
Un parseur du dictionnaire XMLittré.
"""
# piscine à parseur pour les 26 lettres de l'alphabet
_ET_parsers = {}
def __init__(self, xml_directory):
self._xml_directory = xml_directory
def load_xml_file(self, letter):
"""
Le dictionnaire est subdivisé en 26 fichiers xml, nommés d'après les
lettres de l'alphabet.
Instancie un noeud Element à partir du contenu du fichier correspondant
à la lettre et le range dans un dictionnaire.
"""
if not isinstance(letter, str) or not len(letter) == 1:
raise ValueError("need a letter from the alphabet")
xml_path = "{}/{}.xml".format(
self._xml_directory,
letter
)
with open(xml_path, 'r') as xml_file:
xml_content = xml_file.read()
self._ET_parsers[letter] = ET.fromstring(xml_content)
def get_parser(self, letter):
"""
Obtient (éventuellement en l'instanciant à la volée) le bon parseur en
fonction d'une lettre de l'alphabet.
"""
letter = unidecode(letter.lower())
if letter not in self._ET_parsers:
self.load_xml_file(letter)
return self._ET_parsers[letter]
def get_entries(self, name):
"""
Retourne une liste de noeuds XML <entry> correspondant au terme donné.
Un terme peut correspondre à plusieurs entrées (sens multiples).
TODO: récupérer une liste de mots proches en cas d'échec (mauvais accents, faute de frappe, etc...)
"""
name = name.upper()
# récupère le parseur adéquat
p = self.get_parser(name[0])
# Une entrée peut avoir plusieurs "sens" et par conséquent être
# dupliquée
entries = []
for node in p.iterfind("./entree"):
terme = node.attrib["terme"]
# Cherche à établir une correspondance entre le masculin et le
# féminin
mal, fem = self.build_female(terme)
if name == mal or name == fem:
entries.append(node)
if len(entries) == 0:
raise EntryNotFound("the entry \"{}\" does not exist".format(name))
return entries
def build_female(self, word):
"""
Construit le féminin d'un terme à partir de son masculin et de son
suffixe féminin.
"""
# extrait le radical (avec la marque du masculin) et le suffixe féminin
values = word.split(",")
rad = values[0].strip()
# le terme est simple
if len(values) == 1 or not values[1]:
fem = ""
# le terme est double
else:
fem_suffix = values[1].strip()
# la première lettre du suffixe féminin doit
# concorder avec celle du suffixe masculin
first_fem_letter = fem_suffix[0]
# extrait le radical brut (enlève le masculin)
masc_pos = rad.rfind(first_fem_letter)
prefix = rad[:masc_pos]
# construit le féminin
fem = prefix + fem_suffix
return rad, fem
def get_entries_as_dict(self,
name,
no_quotes=False,
no_synonyms=False,
no_history=False,
no_etymology=False):
"""
Retourne les différents sens d'un mot sous la forme d'un dictionnaire
dont les clés sont les indices de sens et les valeurs des entrées
formattées sous forme d'arborescence.
"""
name = name.upper()
definition = {
"terme": name,
"sens": {}
}
for i, node in enumerate(self.get_entries(name)):
e = entry(name, node)
key = entry.get_sens_id() or i
definition["sens"][key] = e.format_as_dict(
no_quotes,
no_synonyms,
no_history,
no_etymology
)
return definition
class entry:
"""
Une entrée du dictionnaire générée par le parseur.
Une entrée correspond à une définition.
"""
def __init__(self, mot, entry):
self.mot = mot
self.entry = entry
def get_sens_id(self):
"""
Retourne l'indice de sens de la définition.
"""
return int(self.entry.attrib.get("sens") or 1)
def get_variante_text(self, v):
"""
Retourne le texte définissant une variante.
Ce texte s'étale éventuellement sur des noeuds collés à des morceaux
de texte.
"""
text = v.text.replace("\n", "") if v.text else ""
# workaround: "find()" ne fonctionne pas, certainement à cause de
# l'imbrication de noeuds texte et non-texte au sein d'un même
# élément
for sem in v.iter("semantique"):
if sem.text:
text += sem.text.replace("\n", "")
if sem.tail:
text += sem.tail.replace("\n", "")
return text
def get_variantes(self, corps_, no_quotes=False):
"""
Retounre les variantes incluses dans le corps de l'entrée sous la forme
d'un dictionnaire.
"""
variantes = []
for v_ in corps_.iter("variante"):
variante = {
"num": int(v_.attrib.get("num") or -1),
"text": self.get_variante_text(v_),
"indent": []
}
# adjoint les éventuelles citations propres à une variante
if not no_quotes:
variante["cit"] = self.get_citations(v_)
# recherche les sous-parties
for i_ in v_.iter("indent"):
#subtext = _gettext(i_).rstrip()
subtext = i_.text or ""
subtext = subtext.rstrip()
# wordaround
for s_ in i_.iter("semantique"):
s_text = s_.text or ""
s_text = s_text.rstrip()
subtext += s_text
# les sous-parties peuvent contenir des citations
if not no_quotes:
citations = self.get_citations(i_)
variante["indent"].append({
"text": subtext,
"cit": citations
})
variantes.append(variante)
return variantes
def get_citations(self, parent_):
"""
Retounre les citations incluses dans un noeud sous la forme d'une liste
de dictionnaires.
"""
citations = []
for c in parent_.iterfind("./cit"):
citation = {
"aut": c.attrib["aut"] or "aut. inc.",
"ref": c.attrib["ref"] or "ref. inc.",
"text": c.text
}
citations.append(citation)
return citations
def get_synonymes(self, entry_):
"""
Retourne les synonymes d'une entrée sous la forme d'une liste.
"""
synonymes = []
for synonymes_ in entry_.iterfind("./rubrique[@nom='SYNONYME']"):
for syn in synonymes_.iter("indent"):
synonymes.append(syn.text.rstrip())
return synonymes
def get_historique(self, entry_):
"""
Retounre l'historique d'une entrée sous la forme d'une liste de
dictionnaires.
"""
historique = []
rubrique_ = entry_.find("./rubrique[@nom='HISTORIQUE']")
if not rubrique_:
return
for indent in rubrique_.iter("indent"):
# siècle
date = indent.text.rstrip()
# citations associées au siècle
citations = self.get_citations(indent)
historique.append({
"date":date,
"cit": citations
})
return historique
def get_etymologie(self, entry_):
"""
Retourne l'étymologie d'une entrée sous la forme d'une liste.
"""
etymologies = []
rubrique_ = entry_.find("./rubrique[@nom='ÉTYMOLOGIE']")
for indent in rubrique_.iter("indent"):
etymologies.append(_gettext(indent).rstrip())
return etymologies
def format_as_dict(self,
no_quotes=False,
no_synonyms=False,
no_history=False,
no_etymology=False):
"""
Parcours l'entrée et la retourne sous la forme d'un dictionnaire.
"""
entete_ = self.entry.find("./entete")
corps_ = self.entry.find("./corps")
prononciation_ = entete_.find("./prononciation")
nature_ = entete_.find("./nature")
e = {
"entete": {
"prononciation": prononciation_.text,
"nature": nature_.text
}
}
# Variantes
variantes = self.get_variantes(corps_, no_quotes)
if variantes:
e["variantes"] = variantes
# Synonymes
if not no_synonyms:
synonymes = self.get_synonymes(self.entry)
if synonymes:
e["synonymes"] = synonymes
# Historique
if not no_history:
historique = self.get_historique(self.entry)
if historique:
e["historique"] = historique
# Étymologie
if not no_etymology:
etymologies = self.get_etymologie(self.entry)
if etymologies:
e["etymologie"] = etymologies
return e
class entry_formatter:
"""
Classe de formattage d'une entrée.
Les formats supportés sont:
* le texte simple
* le HTML
"""
# Éléments de formattage en texte simple
_nbsp = u"\u00A0"
_bullet = u"\u2219\u25E6"
_q = u"\u201C\u201D"
# Séparateur de parties de la définition
_subpart_separator = "\u2015"*24
# Format de citation
_citation_format = "{} ({}): "+_q[0]+"{}"+_q[1]
# Nombre d'espace par niveau d'indentation
_indent_factor = 2
# Découpage propre du texte
_display_width = 78
def __init__(self, entries, fit_text=True):
self.entries = entries
self.fit_text = fit_text
self.tw = TextWrapper(
width=self._display_width,
# l'indentation initiale sera toujours générée par /list_item/
initial_indent = ""
)
def fill(self, text, subsequent_indent=0):
"""
Retourne le texte passé en paramètre coupé au mot près à la largeur
définie comme constante /_display_width/.
Le paramètre /subsequent_indent/ est la taille du préfixe du texte,
typiquement la taille de la liste à puce (espaces préc. compris).
"""
self.tw.subsequent_indent = self._nbsp*subsequent_indent
return self.tw.fill(text)
def list_item(self, level=2, li_type=0, li_count=-1):
"""
Formatte un élement de liste à puce.
Si /li_type/ vaut -1, alors la liste est numérique et la puce aura pour
valeur la variable /li_count/.
"""
if li_type == -1:
# l'item de liste est un nombre
bullet = str(li_count)+"."
else:
# l'item de liste est une "puce"
bullet = self._bullet[li_type]
return self._nbsp * level * self._indent_factor + bullet + self._nbsp
def format_entete(self, entete):
"""
Formatte une entête de définition en texte simple.
"""
text = "PRONONCIATION: '{}'\nNATURE: {}".format(
entete["prononciation"],
entete["nature"]
)
return text
def format_citation(self, cit, level=0, li_style=0):
"""
Formatte une citation en texte simple.
"""
li = self.list_item(level, li_style)
cit = self._citation_format.format(
cit["aut"],
cit["ref"],
cit["text"]
)
text = li + cit
# Coupe le texte proprement si il dépasse
if self.fit_text:
text = self.fill(text, len(li))
return text + "\n"
def format_variantes(self, variantes, base_indent_level=1):
"""
Formatte les variantes en texte simple.
"""
paragraph = ""
for li_count, v_ in enumerate(variantes):
# Construit un item de liste numérique
if v_["num"] == -1:
li_index = li_count+1
else:
li_index = v_["num"]
li = self.list_item(base_indent_level, -1, li_index)
text = li + v_["text"]
# Coupe le texte proprement si il dépasse
if self.fit_text:
text = self.fill(text, len(li))
text += "\n"
# Adjoint les éventuelles citations
if "cit" in v_:
for c_ in v_["cit"]:
text += self.format_citation(c_, base_indent_level+1, 0)
# Adjoint les éventuelles sous-parties
for ind in v_["indent"]:
li = self.list_item(base_indent_level+1, 0)
_text = li + ind["text"]
# Coupe le texte proprement si il dépasse
if self.fit_text:
_text = self.fill(_text, len(li))
text += _text + "\n"
# citations liées à la sous-partie
for cit in ind["cit"]:
text += self.format_citation(cit, base_indent_level+2, 1)
paragraph += text
return paragraph
def format_synonymes(self, synonymes, base_indent_level=1):
"""
Formatte une liste de synonymes en texte simple.
"""
paragraph = ""
for syn in synonymes:
li = self.list_item(base_indent_level, 1)
text = li + syn
# Coupe le texte proprement si il dépasse
if self.fit_text:
text = self.fill(text, len(li))
paragraph += text + "\n"
return paragraph
def format_historique(self, historique, base_indent_level=1):
"""
Formatte une historique de définition en texte simple.
"""
paragraph = ""
for his in historique:
li = self.list_item(base_indent_level, 0)
text = li + his["date"]
# Coupe le texte proprement si il dépasse
if self.fit_text:
text = self.fill(text, len(li))
text += "\n"
for cit in his["cit"]:
text += self.format_citation(cit, base_indent_level+1, 1)
paragraph += text
return paragraph
def format_etymologies(self, etymologie, base_indent_level=1):
"""
Formatte une liste d'étymologie en texte simple.
"""
paragraph = ""
for ety in etymologie:
li = self.list_item(base_indent_level, 0)
text = li + ety
# Coupe le texte proprement si il dépasse
if self.fit_text:
text = self.fill(text, len(li))
paragraph += text + "\n"
return paragraph
def format(self):
"""
Formatte l'entrée en texte simple.
"""
text = "TERME: {}\n".format(self.entries["terme"])
# les différents sens d'une entrée sont indexés par un indice numérique
if len(self.entries["sens"]) == 1:
initial_indent_level = 0
print_sens_separator = False
else:
initial_indent_level = 1
print_sens_separator = True
for sens_id, definition in self.entries["sens"].items():
if print_sens_separator:
text += "{}\nSENS #{}\n".format(
self._subpart_separator,
sens_id
)
# Variantes du terme
if "variantes" in definition:
text += "\nVARIANTES:\n"
text += self.format_variantes(
definition["variantes"],
initial_indent_level
)
# Synonymes
if "synonymes" in definition:
text += "\nSYNONYMES:\n"
text += self.format_synonymes(
definition["synonymes"],
initial_indent_level
)
# Historique
if "historique" in definition:
text += "\nHISTORIQUE:\n"
text += self.format_historique(
definition["historique"],
initial_indent_level
)
# Étymologie
if "etymologie" in definition:
text += "\nÉTYMOLOGIE:\n"
text += self.format_etymologies(
definition["etymologie"],
initial_indent_level
)
return text
def __repr__(self):
return self.__str__()
def __str__(self):
return str(self.format())
|
Happy Holiday Season! I hope you’re enjoying the office parties with enthusiasm and reduced stress. A time when vacations are common and work is slow would seem ripe for relaxation – a time for employees to recharge their batteries and reconnect with their roles. If done right, this could gear everyone up to start the next year with a bang.
Unfortunately, the holidays don’t usually play out like that. Blame it on the expenses, blame it on family fights, blame it on the overindulgence of too much food and alcohol. Whatever the reason, ’tis the season of stress: and for employers, the threat of a wave of notices in January is a major cause of concern. The question of how to maintain employee retention into the New Year is high on the board agenda.
Virgin Pulse’s 2014 survey, aptly titled ‘Tis the Season for Stress, found that a full 70 percent of employees report raised stress levels during the holidays, with over 10 percent responding that their stress levels are between 60 and 100 percent higher than during the rest of the season.
Since workers spend significant amounts of time and energy at their jobs, their organizations have a high impact on their overall health and stress levels. Moreover, high stress can significantly impact an organization’s bottom line. Not only is there a risk of high disengagement, lower productivity, and lower work quality – employee retention depends on health as well.
As such, employers have a vested interest and responsibility to do their part in ensuring employee wellbeing.
64 percent of respondents to Virgin Pulse’s survey said that holiday related stress distracts them at work. A separate study found that 91 percent of employees who have high wellbeing are less likely to leave a job. Clearly, a stressful environment doesn’t help you retain employees.
As the holidays end and the new year begins, the trouble can all come to a head. Employees no longer have the prospect of time off as a buffer between them and their stress. The slowed down attitude of the past couple weeks is suddenly jolted into Q1 pandemonium as everyone goes back to being full steam ahead in their roles. This ungraceful transition can trigger extra stress, creating a recipe for high turnover.
So, what can you do to maintain employee retention after the holiday season?
The stress of holidays is only the straw that breaks the camel’s back. It’s not the underlying reason why people look for new jobs in the new year.
70 percent of managers think that pay is the primary reason employees leave a business, yet research shows 88 percent of employees actually leave their jobs because of reasons other than pay.
These are all things that managers should know about their employees. For instance, they know if they are not providing much coaching or employee feedback. If they ask employees about their goals, they’ll also know about a desire for growth and advancement opportunities.
In other words, the first and most important step to increasing employee retention is listening. Your employees will tell you what they need, but you have to provide the opportunity or platform for them to communicate, and then you have to listen to them.
In the new year, make it a priority to have regular two way feedback sessions with employees. Let them know in a clear way what’s expected of them and how they’re measuring up, while at the same time ensuring that they have space to discuss all of their concerns and desires. In fact, don’t just give them the space for that discussion. Actively invite it.
This year, resolve to create a positive and nurturing environment on your team: one that reflects your company values and aligns with your business mission. Commit to creating transparency and opportunity for every one of your workers and to let them know how valuable they are. Make employee retention a defined objective for your organization.
It’s often been said that New Year’s resolutions are much more likely to stick when they are as specific as possible. The same can be said for your professional resolutions. Merely saying that you’ll create a positive team culture in the new year is not the same as implementing policies designed to cultivate that culture.
Training and mentoring programs both help create the type of culture that will help to maintain employee retention. Employees are often looking to further their careers and learn new skills; giving them the tools to do so will be powerful motivation to get them to stay.
Employee appreciation plans are also very important. Your employees work very hard all year round to deliver results and meet goals. They deserve recognition, and 69 percent say they’d work harder if they had it. Create official recognition programs and back these up with unofficial recognition – be it a simple “thanks for that great report” or taking the team out to lunch following a big accomplishment.
Yes, the holidays can bring a lot of stress.. Yet there are lessons to be learned from the festive season.
Celebration, time off or anything that takes some time out of the monotonous work schedule makes it a little more special or fun. Those are important tools to relieve stress, allowing employees to recharge their batteries, engage with the culture of your organization or simply achieve greater work-life balance. Ultimately, these all make everyone happier and more productive.
When employees don’t take any time off, they’re at high risk of burnout. This harms engagement, productivity, and ultimately retention as well. Encouraging employees to take vacation can go a long way to counteracting some of the worst effects of burnout, improving productivity and employee engagement while protecting your organization against high staff turnover and poor morale.
Even when workers aren’t taking time off, social get togethers such as monthly happy hours can provide a time to recognize your team’s hard work and blow off some steam. This can also help combat burnout.
Ultimately, the holidays pose some amount of risk to employers. They can be stressful and push workers to look for new jobs come January. Yet they can also provide a template for stress relief – if you know where to look.
Making resolutions centered around improving office culture, listening to workers, and hanging on to some holiday cheer are all invaluable in combatting turnover. The holidays are, in some ways, what you make of them. This year, make them an opportunity to improve retention by keeping employees happy and healthy.
|
import pandas as pd
from ...config import _load_secrets
import sqlalchemy
import os
from tqdm import tqdm
class PowerSchoolSQLInterface:
"""A class, representing a interface to the Powerschool frontend
which most teachers/students/admins have access to.
"""
def __init__(self, secrets=None):
if secrets is None:
secrets = _load_secrets()
try:
SECRETS = secrets['powerschool']
self.username = SECRETS['username']
self.password = SECRETS['password']
self.host = SECRETS['host']
self.dbname = SECRETS['dbname']
except KeyError:
print("Please check the configuration of your config file")
engine = sqlalchemy.create_engine('oracle://' + self.username +
':' + self.password +
'@' + self.host + ':' +
"1521" + '/' +
self.dbname)
self.conn = engine.connect()
def query_to_df(query):
"""executes query, converts to pd.dataframe"""
df = pd.read_sql(query, conn)
return df
def _list_queries(file_dir='./sql'):
return os.listdir(file_dir)
def download_files():
files = self._list_queries()
for file_name in tqdm(files):
with open('./sql/' + file_name, 'r') as filebuf:
data = filebuf.read()
df = query_to_df(data)
file_name = file_name.replace('.sql','.csv')
df.to_csv('/tmp/' + file_name)
if __name__ == '__main__':
PowerSchoolSQLInferface.download_files()
|
Freerider Red is a dry, smooth, medium-bodied wine with delicious dark berry, apple-wood, and warm clove notes. Inspired by the exhilarating joy of skiing, Freerider Red is a perfect glass to celebrate those hard-earned fresh tracks. This wine is composed entirely of our St.Croix grapes, fermented in stainless steel and aged for an average of 12 months in our oak barrels.
Grape Information: Freerider Red is made from 100% Vermont Grown St.Croix. This red grape shows good resistance to most of the major vine diseases, and is usually hardy to -32°F or warmer. The wines tend to have low-medium tannin levels, and can be quite fruity.
|
"""empty message
Revision ID: 3528db092ec4
Revises:
Create Date: 2017-10-22 17:53:34.149039
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3528db092ec4'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('categories',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=80), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('creators',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('firstname', sa.String(length=80), nullable=True),
sa.Column('lastname', sa.String(length=80), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('media',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('category', sa.String(length=80), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=80), nullable=True),
sa.Column('password', sa.String(length=25), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('items',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('media_id', sa.Integer(), nullable=True),
sa.Column('category_id', sa.Integer(), nullable=True),
sa.Column('EAN', sa.Integer(), nullable=True),
sa.Column('ASIN', sa.Integer(), nullable=True),
sa.Column('ASIN_LINK_AMAZON', sa.String(length=1024), nullable=True),
sa.Column('name', sa.String(length=80), nullable=True),
sa.Column('synopsys', sa.String(length=1024), nullable=True),
sa.Column('creation_date', sa.DateTime(), nullable=True),
sa.Column('modification_date', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['category_id'], ['categories.id'], ),
sa.ForeignKeyConstraint(['media_id'], ['media.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('item_creator',
sa.Column('item_id', sa.Integer(), nullable=False),
sa.Column('creator_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['creator_id'], ['creators.id'], ),
sa.ForeignKeyConstraint(['item_id'], ['items.id'], ),
sa.PrimaryKeyConstraint('item_id', 'creator_id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('item_creator')
op.drop_table('items')
op.drop_table('users')
op.drop_table('media')
op.drop_table('creators')
op.drop_table('categories')
# ### end Alembic commands ###
|
See our full lineup of all possible cruises from Santos below and sort by length, ship, cruise line, price, departure port, or date range. Once choosing the ideal cruise from Santos, you can see further information and the full cruise schedule as well as the vessel details and some featured shore excursions. Space is usually limited, so order online with us without delay to grab your Santos cruise reservation before waiting too long. Contact us at (844)235-4177 or email us to talk with a vacation pro and we will answer any questions you have.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# __author__ = "wanghn"
import httplib
import socket
import time
import os
import ConfigParser
import json
import urllib2
import re
def monitorwebapp(ip_result):
webcip_state = {}
timenow = time.strftime( "%Y-%m-%d %H:%M:%S", time.localtime( time.time( ) ) )
for m in xrange( len( ip_result ) ):
ping_cmd = os.popen( 'ping %s -c 1 | grep -c time=' % ip_result[m] ).read( )
if ping_cmd != '0\n':
webcip_state[ip_result[m]] = True
else:
webcip_state[ip_result[m]] = False
# print 'monitorwebapp result:',webcip_state
return webcip_state
def conport(ip_result,port_result):
webcp_state = {}
for n in xrange( len( port_result ) ):
ip_port = (ip_result[n], int( port_result[n] ))
sk = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
sk.settimeout( 1 )
# print ip_port
try:
sk.connect(ip_port)
if ip_result[n] in webcp_state:
webcp_state[ip_result[n]].update({port_result[n]:True})
else:
webcp_state[ip_result[n]]=({port_result[n]:True})
except Exception:
if ip_result[n] in webcp_state:
webcp_state[ip_result[n]].update({port_result[n]:False})
else:
webcp_state[ip_result[n]]=({port_result[n]:False})
sk.close( )
# print 'conport result:', webcp_state
return webcp_state
def servicestate(service_result):
ser = {}
for i in xrange( len( service_result ) ):
ret = os.popen( 'ps -ef|grep %s|grep -v grep' % service_result[i] ).readlines( )
if len( ret ) > 0:
ser[service_result[i]]=True
else:
ser[service_result[i]]=False
# print 'servicestate resut:',ser
return ser
def urlhealthcheck():
urlhealthcheckresult = {}
try:
responsecheck = urllib2.urlopen( "http://ids122.avict.com:8080/nidp/app/heartbeat" ).read( )
if 'Success' in responsecheck:
urlhealthcheckresult['heartbeat'] = True
return urlhealthcheckresult
# print '[+]responsecheck:',responsecheck
except Exception, e:
urlhealthcheckresult['heartbeat'] = False
return urlhealthcheckresult
def nldapcheck():
nldapcheckresult = {}
try:
check_cmd = os.popen( 'nldap_check' ).read( ).strip( ).replace( '\n', '' ).split( '.' )
matchtcp=re.match(".*is listening(.*)TCP.*",check_cmd[0])
matchtls=re.match(".*is listening(.*)TLS.*",check_cmd[1])
# print '[+]matchtcp:',matchtcp.group()
# print '[+]matchtls:',matchtls.group()
if matchtcp and matchtls:
nldapcheckresult['TCP'] = True
nldapcheckresult['TLS'] = True
elif not matchtcp and matchtls:
nldapcheckresult['TCP'] = False
nldapcheckresult['TLS'] = True
elif matchtcp and not matchtls:
nldapcheckresult['TCP'] = True
nldapcheckresult['TLS'] = False
else:
nldapcheckresult['TCP'] = False
nldapcheckresult['TLS'] = False
return nldapcheckresult
except Exception,e:
nldapcheckresult['TCP'] = False
nldapcheckresult['TLS'] = False
return nldapcheckresult
if __name__ == '__main__':
cf = ConfigParser.RawConfigParser( )
cf.read( "/root/python/ids_health_config.ini" )
smmodifytime = os.stat( r"/root/python/ids_health_config.ini" ).st_mtime
ipaddr = cf.get( "HostAgent", "ipaddr" )
port = cf.get( "HostAgent", "port" )
servi = cf.get( "HostAgent", "services" )
url = cf.get( "HostAgent", "url" )
datetime = cf.get( "HostAgent", "datetime" )
servstate = cf.get( "HostAgent", "servstate" )
webaddress = cf.get( "HostAgent", "webaddress" )
webport = cf.get( "HostAgent", "webport" )
webcipstatus = cf.get( "HostAgent", "webcipstatus" )
webcpstatus = cf.get( "HostAgent", "webcpstatus" )
nldapstatus = cf.get( "HostAgent", "nldapstatus" )
urlcheckstatus = cf.get( "HostAgent", "urlcheckstatus" )
service_result = servi.split( ',' )
ip_result = webaddress.split( ',' )
port_result = webport.split( ',' )
ctrlags = 1
num = True
ser = servicestate( service_result )
webcip_state = monitorwebapp( ip_result )
webcp_state = conport( ip_result, port_result )
nldap_check_state = nldapcheck()
url_health_check_state = urlhealthcheck()
time_nu = time.strftime( "%Y-%m-%d %H:%M:%S", time.localtime( time.time( ) ) )
params = {
'ids_health':{
servstate: ser,
webcipstatus: webcip_state,
webcpstatus: webcp_state,
nldapstatus: nldap_check_state,
urlcheckstatus: url_health_check_state
}
}
# print "params::::",params
data = json.dumps(params)
print "result:",data
try:
# headers = {"Content-type": "application/json"}
# middletime = time.time( )
# httpClient = httplib.HTTPSConnection( ipaddr, port, timeout=None )
# httpClient.request( "POST", url, data, headers )
# response = httpClient.getresponse( )
# print 'response:',response.read( )
url1 = 'https://%s:%s%s' % (ipaddr, port, url)
request = os.popen(
r"curl -k -H 'Content-type:application/json' -X POST --data '%s' '%s' 2>/dev/null" % (data, url1) )
print '[+]request:', request.read( )
except Exception, e:
print 'err',e
# finally:
# if httpClient:
# httpClient.close( )
|
Bitcoin Recap 11/8/17 - 2x hard fork cancelled, bitcoin brokers are making multi-million dollar deals, and more!
While bitcoin set a new all-time high of $7,879 in the aftermath of affirmation a controversial software proposal had been scrapped, its price fell just as quickly, erasing gains to hit a low of $7,070.
The explosion and maturation of the cryptocurrency market has institutions pouring money into the space. Firms such as DRW, a Chicago-based trader, and B2C2, a UK-based over-the-counter broker, are now regularly executing multi-million dollar trades.
We trust that our fellow community members will also behave with integrity and uphold agreements made, but in the unlikely event that the 2MB block size increase portion of Segwit2x fails to activate, Bitcoin.com will immediately shift all company resources to supporting Bitcoin Cash exclusively.
A former U.S. Secret Service agent has been sentenced to an additional prison term on charges associated with his theft of bitcoin seized from now-defunct dark market Silk Road.
Gurtej Singh Randhawa tried to buy a vehicle-borne improvised explosive device “with the potential to kill”.
Young people are often early indicators of the future, so how do younger people today feel about Bitcoin?
York Regional Police say that a new scam using the untraceable cryptocurrency Bitcoin is sweeping the region. One of 45 victims since April explained how she was deceived.
The SEC is continuing to step up its rhetoric on initial coin offerings (ICOs).
A cryptocurrency miner might have sucked up your Android phone’s power thanks to this Russian’s work.
A researcher has documented almost 2,500 sites that are actively running cryptocurrency mining code in the browsers of unsuspecting visitors, a finding that suggests the unethical and possibly illegal practice has only picked up steam since it came to light a few weeks ago.
Experts reportedly believe that the cybercriminal may be in cahoots with corrupt law enforcement officials to access sensitive data.
CME will apply limits to its bitcoin futures’ trading range when the product launches, the exchange says.
Bitcoin is notoriously volatile and the limits on the futures product could help reduce overall volatility.
The development of a less volatile bitcoin futures product may also help the case for a bitcoin exchange-traded fund.
Vitalik Buterin, the inventor of the ethereum blockchain, may have created too much of a good thing.
Long active in evaluating blockchain technology broadly, a European Central Bank (ECB) official took steps this week to stress the institution is closely watching cryptocurrency.
I think that my position on Ethereum is clear. ETH started as a money grabbing project with very little in the way of future prospects. Since criticizing Ethereum long before it’s release, we have come a long way and ETH seems to have found its sea legs.
At BitPay, we must follow the chain with the most accumulated difficulty and we are committed to doing so. But we also ask that miners immediately stop signaling for segwit2x and send a clear signal to all users that the Bitcoin blockchain will remain operational.
Even if I knew nothing about how stupid the 2x hardfork was, at this point there are far too many developers I trust in the NO2X camp for me to support the hardfork.
The preferred outcome for the consortium is that the status quo chain ceases to exist, that its transactions fail to confirm.
Because this is neither a serious method of payment nor a good way to store capital. The bitcoin feeds on itself. There are no fundamental reasons for its price to reach such levels. What’s more – it is also used by criminals, for their shady business. I think that more and more countries will start to make cryptocurrency exchanges illegal like China did. New regulations will be adopted. So, this will find its end.
Today, Bitspark announced that it received funding from RGAx, the innovation accelerator of Reinsurance Group of America, Incorporated, one of the world’s largest life reinsurance companies headquartered in St. Louis, Missouri, United States.
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Python motu client
#
# Motu, a high efficient, robust and Standard compliant Web Server for Geographic
# Data Dissemination.
#
# http://cls-motu.sourceforge.net/
#
# (C) Copyright 2009-2010, by CLS (Collecte Localisation Satellites) -
# http://www.cls.fr - and Contributors
#
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this library; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
import io, sys, logging
def copy(sourceHandler, destHandler, callback = None, blockSize = 65535 ):
"""Copy the available content through the given handler to another one. Process
can be monitored with the (optional) callback function.
sourceHandler: the handler through witch downloading content
destHandler: the handler into which writing data
callback: the callback function called for each block read. Signature: f: sizeRead -> void
blockSize: the size of the block used to read data
returns the total size read
"""
read = 0
while 1:
block = sourceHandler.read(blockSize)
if(isinstance(block, bytes)):
exit_condition = b''
else:
exit_condition = ''
if block == exit_condition:
break
read += len(block)
try:
if type(destHandler) == io.StringIO:
if sys.version_info > (3, 0):
destHandler.write( str(block, 'utf-8') )
else:
destHandler.write( unicode(block, 'utf-8') )
else:
destHandler.write(block)
except Exception as inst:
log = logging.getLogger("utils_stream:copy")
log.error("Exception while copying remote data")
log.error(" - Type = %s", type(inst)) # the exception instance
if hasattr(inst, 'args'):
log.error(" - Attributes = %s", inst.args) # arguments stored in .args
log.error(" - Full exception = %s", inst)
callback(read)
return read
|
Our greatest advantages are plenty of experience, professionalism and relaxed attitude no matter where we are and what we are doing. We are responsible and professional what makes us the right choice. We are devoted to our work with people, looking forward to new experiences, have new and fresh visions and most of all - we behave ecologically. The only traces we leave behind are our footprints.
|
import requests
import requests.adapters
import requests.exceptions
from gevent.pool import Pool
import logging
logger = logging.getLogger(__name__)
VERSION = 'X-Revision-N'
VERSION_HASH = 'X-Revision-Hash'
PREV_VERSION = 'X-Revision-Hash'
class APIClient(object):
def __init__(self, api_key, api_host, api_version, **options):
self.base_url = "{}/api/{}".format(api_host, api_version)
self.session = requests.Session()
if api_key:
self.session.auth = (api_key, '')
self.session.headers = {
"Accept": "applicaiton/json",
"Content-type": "application/json"
}
self.historical = options.get('historical', False)
resourse = options.get('resourse', 'tenders')
self.resourse_url = '{}/{}'.format(self.base_url, resourse)
APIAdapter = requests.adapters.HTTPAdapter(max_retries=5,
pool_connections=50,
pool_maxsize=50)
self.session.mount(self.resourse_url, APIAdapter)
# retreive a server cookie
resp = self.session.head("{}/{}".format(self.base_url, 'spore'))
resp.raise_for_status()
def get_tenders(self, params=None):
if not params:
params = {'feed': 'chages'}
resp = self.session.get(self.resourse_url, params=params)
if resp.ok:
return resp.json()
def get_tender(self, tender_id, version=''):
args = dict()
url = '{}/{}'.format(self.resourse_url, tender_id)
if self.historical:
url += '/historical'
args.update(dict(headers={VERSION: version}))
args.update(url=url)
try:
resp = self.session.get(**args)
if resp.ok:
#if self.historical and version and version != resp.headers.get(VERSION, ''):
# import pdb;pdb.set_trace()
# raise requests.exceptions.HTTPError
data = resp.json().get('data', '')
if data:
return resp.headers.get(VERSION, ''), data
except requests.exceptions.HTTPError as e:
logger.warn('Request failed. Error: {}'.format(e))
return '', {}
def get_retreive_clients(api_key, api_host, api_version, **kw):
forward = APIClient(api_key, api_host, api_version, **kw)
backward = APIClient(api_key, api_host, api_version, **kw)
origin_cookie = forward.session.cookies
backward.session.cookies = origin_cookie
return origin_cookie, forward, backward
|
This page contains 20 products guaranteed to work in the Canon Pixma MG6150.
I have a Canon Pixima MG 6150 printer which takes 6 inks. If I buy your Multipack of Compatibles [PGI 525 & CLI 526 inks] how do I deal with the missing grey ink?
Can compatible inks block print heads?
It is unusual for compatible inks to block printheads, as it is for originals too, however in the unlikely event of a problem we fully guarantee all of our products, so we'd be able to help.
|
#! /usr/bin/env python
import sys
from socket import *
from time import sleep
from helper import getLocalIP
'''
Research code... may not work
'''
def main():
if len(sys.argv) > 1:
HOST = sys.argv[1]
else:
HOST = getLocalIP()
PORT = 4242
BUFSIZ = 1024
ADDR = (HOST, PORT)
print 'Attempting to connect to %s...' % repr(ADDR),
sys.stdout.flush()
tcpCliSock = socket(AF_INET, SOCK_STREAM)
tcpCliSock.connect(ADDR)
print 'connected!'
# Eye-tracker API specific
tcpCliSock.sendall(str.encode('<SET ID="CALIBRATE_SHOW" STATE="0" />\r\n"'))
sleep(1)
tcpCliSock.sendall(str.encode('<SET ID="CALIBRATE_SHOW" STATE="1" />\r\n"'))
sleep(1)
tcpCliSock.sendall(str.encode('<SET ID="CALIBRATE_START" STATE="1" />\r\n"'))
#tcpCliSock.sendall(str.encode('\r\n"'))
#tcpCliSock.sendall(str.encode('\r\n"'))
#
#import pdb; pdb.set_trace()
#tcpCliSock.sendall(str.encode('<SET ID="ENABLE_SEND_POG_FIX" STATE="1" />\r\n"'))
#tcpCliSock.sendall(str.encode('<SET ID="ENABLE_SEND_DATA" STATE="1" />\r\n"'))
# Loop forever
while True:
data = tcpCliSock.recv(1024)
foo = bytes.decode(data)
print 'got something', foo
tcpCliSock.close()
if __name__ == '__main__':
main()
|
Women have distinctive health connected issue. Unique health problems embrace maternity, menopause, and conditions of the feminine organs. Women can have a healthy gestation by obtaining correct, early and regular prenatal care. they're additionally suggested many tests for cervical cancer, carcinoma and bone density screenings. Complications of gestation include health issues that occur throughout gestation. They will involve baby's health, mother’s health or both. Diet and nutrition plays a major role throughout gestation time. Vital nutrient diet includes protein rich veg diet and calcium rich recipes. The most effective time to do and conceive is throughout the ‘fertile window’ of the menstrual cycle i.e. ovulation time.
Midwifery Nursing is a nursing strategy where it focuses on the fields of pregnant women health and child-birth. Those who are trained in midwifery have a strong knowledge about the pregnant women mind and their body works. They focus on developing bonds with their patients throughout their pregnancy period. They help their patients in process of child-birth and for the first few months of child birth because mothers have many doubts about the best ways for their new-born child. Nurses develop their confidence by using this specialized area. Their skills developed in practice are complemented with availability of our simulation models.
|
# This file is part of geometriki.
#
# geometriki is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# geometriki is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with geometriki, in a file named COPYING. If not,
# see <http://www.gnu.org/licenses/>.
import cgi
from paste.urlparser import PkgResourcesParser
from pylons import request
from pylons import tmpl_context as c
from pylons.controllers.util import forward
from pylons.middleware import error_document_template
from webhelpers.html.builder import literal
from geometriki.lib.base import BaseController, render
class ErrorController(BaseController):
"""Generates error documents as and when they are required.
The ErrorDocuments middleware forwards to ErrorController when error
related status codes are returned from the application.
This behaviour can be altered by changing the parameters to the
ErrorDocuments middleware in your config/middleware.py file.
"""
def document(self):
"""Render the error document"""
resp = request.environ.get('pylons.original_response')
code = cgi.escape(request.GET.get('code', ''))
message = cgi.escape(request.GET.get('message', ''))
if resp:
code = code or cgi.escape(str(resp.status_int))
message = literal(resp.status) or message
if not code:
raise Exception('No status code was found.')
c.code = code
c.message = message
return render('/derived/error/document.mako')
def img(self, id):
"""Serve Pylons' stock images"""
return self._serve_file('/'.join(['media/img', id]))
def style(self, id):
"""Serve Pylons' stock stylesheets"""
return self._serve_file('/'.join(['media/style', id]))
def _serve_file(self, path):
"""Call Paste's FileApp (a WSGI application) to serve the file
at the specified path
"""
request.environ['PATH_INFO'] = '/%s' % path
return forward(PkgResourcesParser('pylons', 'pylons'))
|
A project is under way in the NHS Cervical Screening Programme to evaluate the use of HPV testing as the primary screening tool in the programme, with cytology triage. This survey study aims to measure the psychological impact of HPV primary testing by comparing women taking part in the pilot with those in the routine programme.
The NHS Cervical Screening Programme is using HPV primary testing across six sites in England: London North West, North Bristol, Sheffield, Norfolk and Norwich, Manchester and Liverpool.
University College London is evaluating HPV primary testing in the NHS Cervical Screening Programme. In particular, we are interested in how women feel about their screening test results. Some women in the study will have had the new HPV test; others will not – we are interested in the views of both groups. This study may help decide whether HPV primary screening should be used in the cervical screening programme across England.
In order to do this, the NHS has agreed to pass on names and addresses of some patients to a secure printing and mailing company (CHP Docmail Ltd). CHP Docmail Ltd complies with the Data Protection Act (1998) and Information Governance guidelines to make sure these details are kept confidential - they also destroy these details within 30 days of receiving them. The reason for this is so that CHP Docmail Ltd can post out invitation packs to selected patients who have taken part in HPV Primary Screening - this will include a very small number of women from the six NHS trusts listed above. When women receive this invitation pack, they can then decide whether they want to take part in the study. If they do, they need to complete a consent form and questionnaires, and post these directly to UCL using a pre-paid envelope.
If you do not want the NHS to pass on your name and address to CHP Docmail Ltd for the purposes of inviting you to take part in this research, you can opt-out by contacting your local NHS trust (details at the end of this page). This is only relevant if you live in one of the six areas with HPV primary screening, and if you are due for cervical screening between June and December 2016. You can quote the name of this study (“evaluating the psychological impact of HPV primary screening”) and tell the relevant person that you would like to opt-out. This will not affect you medical or legal rights.
• London North West Healthcare NHS Trust: Tanya Levine on tanya.levine[at]nhs.net or 020 8869 3314.
• North Bristol NHS Trust: Katherine Hunt on Katherine.Hunt[at]nbt.nhs.uk.
• Sheffield Teaching Hospitals NHS Trust: Kay Ellis on kay.ellis[at]sth.nhs.uk or 0114 271 3697.
• Norfolk and Norwich NHS Trust: Viki Frew on viki.frew[at]nnuh.nhs.uk or 01603 286033.
• Central Manchester University Hospital NHS Foundation Trust: Miles Holbrook on miles.holbrook[at]cmft.nhs.uk or 0161 276 6475.
• Royal Liverpool and Broadgreen University Hospitals NHS Trust: Christopher Evans on Chris.Evans[at]rlbuht.nhs.uk or 0151 706 4581.
If you would like more information on the study in general, please contact Dr Jo Waller at UCL on j.waller[at]ucl.ac.uk or 0207 769 5958. UCL will not know any of your personal details unless you choose to take part, so we will not be able to opt you out (only your NHS trust can do this). You do not have to tell UCL your name or details if you want to contact us to discuss the study.
The project is funded for 18 months by a Public Health England award to Dr Jo Waller, Dr Laura Marlow and Dr Alice Forster, ending in September 2017.
Contact: Dr Jo Waller (j.waller[at]ucl.ac.uk) or Emily McBride (e.mcbride[at]ucl.ac.uk).
|
"""
This is a script to create a txt-file from every build repairnator has patched
with the following info:
Build URL
Commit URL
All patches along with the name of the tool that generated them
Requirements:
selenium webdriver for python
pymongo
geckodriver (for linux most likely)
To use:
Fill in the constants below with the appropriate info
"""
import pymongo
from pymongo import MongoClient
from selenium import webdriver
"""
Constants used in the mongodb connection
"""
user="" # Username for the database
pwd="" # Password for the above used
db="" # Name of the authentication database (may be left empty)
ip="" # Ip-address of the database
port="" # Port of the database
"""
If one wishes to specify dates, fill these in. By default it will run
for every script
"""
dateFrom=None # From which date to look for patched builds
dateTo=None # To which date to look for patched builds
"""
Query for each document in inspector and write to a file.
"""
def patches_query(mongoDB, inspectorJson):
# Fetch the info from patches
return mongoDB.patches.find({"buildId" : inspectorJson['buildId']})
"""
We will be parsing html, so we need to change the < and > and & icons
"""
def replace_spec_chars(string):
string = string.replace("&", "&")
string = string.replace("<", "<")
string = string.replace(">", ">")
string = string.replace("\"", """)
string = string.replace("\'", "'")
return string
"""
Query the inspector once and return all documents
"""
def inspector_query(mongoDB):
# Filter depends on whether we want it to filter dates or not
global dateFrom
global dateTo
inspectorFilter = {"status" : "PATCHED"}
if(dateFrom != None and dateTo != None):
inspectorFilter = {"buildFinishedDate" : { "$gt" : dateFrom, "$lt" : dateTo}, "status" : "PATCHED"}
elif(dateFrom != None and dateTo == None):
inspectorFilter = {"buildFinishedDate" : { "$gt" : dateFrom}, "status" : "PATCHED"}
elif(dateFrom == None and dateTo != None):
inspectorFilter = {"buildFinishedDate" : { "$lt" : dateTo}, "status" : "PATCHED"}
return mongoDB.inspector.find(inspectorFilter).batch_size(50)
"""
Build the string that will ultimately be written to the txt-file
patchDocs - all docs with the same buildId, so that we gather all patches
diffs - a string of the different diffse
"""
def file_builder(patchDocs, inspectorJson):
global driver
buildURL = inspectorJson['travisURL']
driver.get(buildURL)
links = driver.find_elements_by_xpath("//a[contains(@href, 'github') and contains(@href, 'commit')]")
if(len(links) == 1):
commitURL = links[0].get_attribute("href")
else:
return None
# Where we do have a commit url we build the html file
f = open(str(inspectorJson['buildId']) + ".html", "w")
# Write commit and travis url
f.write("<html>\n<body>\n")
f.write("<p><a href=\"" + buildURL + "\" id=\"travis-url\">" + buildURL + "</a></p>\n")
f.write("<p><a href=\"" + commitURL + "\" id=\"commit-url\">" + commitURL + "</a></p>\n")
index = 0
for json in patchDocs:
diff = json['diff']
tool = json ['toolname']
diff = replace_spec_chars(diff)
if diff != None and diff != "" and isinstance(diff, str) and tool != None:
f.write("<pre>" + tool +
"<code id=\" " + str(index) + "\" class=\"patch\" title=\"" + tool + "\">\n"
+ diff +
"</code></pre>\n")
index += 1
f.write("</body>\n</html>\n")
f.close()
return 0
"""
Fetch info and write a file for each build found
"""
def main():
global db, ip, port, user, pwd
# Connect by the connection String URI
client = MongoClient("mongodb://" + user + ":" + pwd + "@" +ip + ":" + port + "/" + db)
mongoDB = client.repairnator
for inspectorJson in inspector_query(mongoDB):
patchesDocs = patches_query(mongoDB, inspectorJson)
file_builder(patchesDocs, inspectorJson)
print(inspectorJson['buildId'])
# Start a webdriver to make sure we can fetch the correct url
driver = webdriver.Firefox()
driver.implicitly_wait(5)
main()
|
This Beach Road Indian Restaurant is owned by Ravinder, who has lived in Pattaya since 1987 and is now a Thai citizen.
Ravinder opened this business way back in 2001, bringing employment to seven local people in the process. The staff have been specifically trained as chefs or have received in house training from existing and experienced employees, when it comes to customer service.
Service of course is always with a smile and courteous, as you would expect from an Indian restaurant.
The external part of the building is grey in colour with large panes of glass featuring the restaurant’s name in bold red writing. It faces directly on to the beach, offer a sea view towards Koh Lan.
Inside you find an elegant diner, with seating for up to fifty customers, in this fully air conditioned venue. The tables are made of wood with a black reflective surface and cushioned chairs to match.
The lighting is subtle, delivering a cosy atmosphere from the ambience provided. Intricate ceiling decor and coloured light shades provide the finishing touch to this desirable setting.
The bar and service area can be found towards the rear of the restaurant.
A stylish setting is not much use if you do not have a good range of food to complement the setting. Fortunately Shere-e Punjab meets the criteria perfectly.
They offer a range of well known Indian dishes, beautifully presented to the diner by expert chefs.
Select from seafood, lamb, chicken, rices, breads and of course vegetarian options. Expect to see your favourite sauces and styles such as Korma, Masala, Tandoori, Vindaloo, Madras and Indian Curry on a specialist menu.
The food tastes delicious with aromas to die for as it arrives at your table.
There is something for everyone on their menu.
The bar offers a good range of wines, bottled beers, spirits, liqueurs and soft drinks to accompany your meal.
A fantastic setting, where customers can relax in a stylish venue to enjoy delicious food.
If Indian food is to your liking, then this one will deliver what you are seeking from such a venue.
It’s a really fantastic Indian restaurant in Muang Pattaya. I just love the Food. Will come again & again.
|
# This python file uses the following encoding: utf-8
import random
import os
import re
import imaplib
import smtplib
import email
import email.errors
from email.mime.text import MIMEText
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
import ConfigParser
class KComEmilio():
class Excepcion(Exception):
""" Excepción de propósito general y clase base para las demás """
pass
class Error(Excepcion):
"""
Para errores que implican una imposibilidad de completar
la acción a nivel del motor
"""
pass
def __init__(self, FichConfig):
cfg = ConfigParser.SafeConfigParser()
cfg.read(FichConfig)
seccion = 'Correo'
self._DireccionServidorIMAP4 = cfg.get(seccion,
'DireccionServidorIMAP4')
self._DireccionServidorSMTP = cfg.get(seccion,
'DireccionServidorSMTP')
self._UserIMAP4 = cfg.get(seccion, 'UserIMAP4')
self._UserSMTP = cfg.get(seccion, 'UserSMTP')
self._PassIMAP4 = cfg.get(seccion, 'PassIMAP4')
self._PassSMTP = cfg.get(seccion, 'PassSMTP')
self._MailboxDepuracion = cfg.get(seccion, 'MailboxDepuracion')
self._SoloEnvio = cfg.getboolean(seccion, 'SoloEnvio')
self.IMAP4 = None
self.SSMTP = None
self.m_MensajesPendientes = []
self.m_IdxMensajesAEliminar = []
self._SubjectPorDefecto = cfg.get(seccion, 'Subject')
# compilamos la regex para FROMaIDP
# Match para NIP@celes.unizar.es ó NIP@unizar.es
self.m_FaI = re.compile(r'\d\d\d\d\d\d@(celes\.)?unizar\.es')
self.m_IaF = re.compile(r'\d\d\d\d\d\d')
# Variable para almacenar el namespace
self._Namespace = ''
def Conectado(self):
Res = self.SSMTP is not None
if not self._SoloEnvio:
Res = Res and self.IMAP4 is not None
return Res
def IniciaConexion(self):
self._IniciaConexionSMTP()
if not self._SoloEnvio:
self._IniciaConexionIMAP()
def _IniciaConexionSMTP(self):
try:
Txt = u'No se ha podido establecer correctament la conexión '
Txt += u'con el servidor SMTP'
# El constructor llama a connect
self.SSMTP = smtplib.SMTP()
Res = self.SSMTP.connect(self._DireccionServidorSMTP)
Res = self.SSMTP.ehlo()
Res = self.SSMTP.starttls()
Res = self.SSMTP.ehlo
Res = self.SSMTP.login(self._UserSMTP, self._PassSMTP)
except smtplib.SMTPException as e:
self.SSMTP = None
Txt += '\n' + e.message
raise self.Error(Txt)
except Exception as e:
self.SSMTP = None
Txt += '\n' + e.message
raise self.Error(Txt)
def _IniciaConexionIMAP(self):
try:
Txt = 'No se ha podido establecer correctamente la conexion '
Txt += 'con el servidor IMAP'
self.IMAP4 = imaplib.IMAP4_SSL(self._DireccionServidorIMAP4)
if self.IMAP4.login(self._UserIMAP4,
self._PassIMAP4)[0] != 'OK':
raise self.Error(Txt)
# obtenemos el namespace
typ, dat = self.IMAP4.namespace()
if typ == 'OK':
aux = dat[0][dat[0].find('((')+2:dat[0].find('))')]
ini = aux.find('"')
fin = aux.find('"', ini + 1)
self._Namespace = aux[ini+1:fin]
else:
raise self.Error(Txt)
# seleccionamos INBOX
typ, dat = self.IMAP4.select()
if typ != 'OK':
raise self.Error(Txt)
except imaplib.IMAP4.error as e:
Txt += '\n' + e.message
raise self.Error(Txt)
except Exception as e:
Txt += '\n' + e.message
raise self.Error(Txt)
def TerminaConexion(self):
if self.SSMTP is not None:
self.SSMTP.close()
self.SSMTP = None
if self.IMAP4 is not None:
self.IMAP4.expunge()
self.IMAP4.close()
self.IMAP4.logout() # cerramos la conexión
self.IMAP4 = None
# Funciones para convertir la clase en un context manager
def __enter__(self):
self.IniciaConexion()
return self
def __exit__(self, ext, exv, trb):
self.TerminaConexion()
return False # para que relance las excepciones generadas
def _CreaBandejaParaDepuracion(self):
"""
Comprueba si no existen la bandeja para depuración y en su caso la
crea
"""
if self._SoloEnvio:
raise self.Error('Instanciado como "Solo Envio"')
NamesMailBox = self._Namespace + self._MailboxDepuracion
# comprobamos si ya existe
typ, dat = self.IMAP4.list()
if typ == 'OK':
lfold = []
for k in dat:
ult = k.rfind('"')
lfold.append(k[k[:ult].rfind('"')+1:ult])
if NamesMailBox not in lfold:
typ, dat = self.IMAP4.create(NamesMailBox)
if typ == 'OK':
typ, dat = self.IMAP4.subscribe(NamesMailBox)
def _ObtenListaDeIdentificadoresDeMensaje(self):
"""
Devuelve una lista con los UID como strings
lista vacía en caso de fallo
"""
if self._SoloEnvio:
raise self.Error('Instanciado como "Solo Envio"')
luid = []
typ, dat = self.IMAP4.uid('SEARCH', 'ALL')
if typ == 'OK':
luid = dat[0].split()
return luid
def _ObtenCuerpo(self, Mes):
if not Mes.is_multipart():
pl = Mes.get_payload(decode = True)
# if Mes.get_content_type() == 'text/plain':
codec = Mes.get_content_charset()
try:
if codec is None:
aux = pl
else:
aux = pl.decode(codec)
except:
aux = pl
Salida = (True, aux)
else:
pl = Mes.get_payload()
Salida = (False, '')
for submes in pl:
Salida = self._ObtenCuerpo(submes)
if Salida[0]:
break
return Salida
def ObtenMensajes(self, luid = None):
if self._SoloEnvio:
raise self.Error('Instanciado como "Solo Envio"')
if luid is None:
luid = self._ObtenListaDeIdentificadoresDeMensaje()
# Los descargamos aleatoriamente para evitar que un mensaje
# que genere una excepción (incontrolada) bloquee a todos
# los demás
random.shuffle(luid)
DMen = {}
for it in luid:
typ, dat = self.IMAP4.uid('FETCH', it, '(RFC822)')
if typ == 'OK':
Dit = {}
Mes = email.message_from_string(dat[0][1])
From = Mes['From']
Dit['FROM'] = From
ini, fin = From.find('<'), From.find('>')
if ini != -1 and fin != -1:
Dit['TAG_PAX'] = self._FROMaIDP(From[ini+1:fin])
else:
Dit['TAG_PAX'] = self._FROMaIDP(From)
Salida = self._ObtenCuerpo(Mes)
Dit['BODY'] = '--NO SE HA INTERPRETADO EL MENSAJE--'
if Salida[0]:
Dit['BODY'] = Salida[1]
DMen[it] = Dit
return DMen
def MarcaMensajesParaBorrar(self, luid):
if self._SoloEnvio:
raise self.Error('Instanciado como "Solo Envio"')
for it in luid:
typ, dat = self.IMAP4.uid('STORE', it, '+FLAGS', '(\\Deleted)')
def MueveMensajesADepuracion(self, luid):
if self._SoloEnvio:
raise self.Error('Instanciado como "Solo Envio"')
luidBorrar = []
for it in luid:
MBDepuracion = self._Namespace + self._MailboxDepuracion
typ, dat = self.IMAP4.uid('COPY', it, MBDepuracion)
if typ == 'OK': luidBorrar.append(it)
self.MarcaMensajesParaBorrar(luidBorrar)
def _FROMaIDP(self, FROM):
if self.m_FaI.match(FROM) is not None: IDP = FROM[0:6]
else: IDP = FROM
return IDP
def _IDPaFROM(self, IDP):
#if self.m_IaF.match(IDP) is not None: FROM = IDP + '@celes.unizar.es'
if self.m_IaF.match(IDP) is not None: FROM = IDP + '@unizar.es'
else: FROM = IDP
return FROM
def EnviaCorreo(self, Dest, Txt, Adjuntos = (), Subject = None):
"""
Envia un correo de respuesta
Dest -> Destinatario (en version corta o larga)
Txt -> Texto del mensaje principal
Adjuntos -> Colección de adjuntos en la forma
((NombreFich, (Blow, FMT)), ...)
"""
Exito, Inf = True, u''
# si no hay Destinatario, volvemos
if Dest.strip() == '':
Exito, Inf = False, 'No se han establecido destinatarios'
# Construccion del mensaje
if Exito:
try:
OMenTxt = MIMEText(Txt, _subtype = 'plain', _charset = 'utf-8')
if Adjuntos is None or len(Adjuntos) == 0: # sin adjuntos?
OMen = OMenTxt
else: # el mensaje lleva adjuntos
OMen = MIMEMultipart()
OMen.attach(OMenTxt)
for nom, (blow, fmt) in Adjuntos:
if fmt.upper() == 'PDF':
OMenPdf = MIMEApplication( blow, _subtype = 'pdf')
OMenPdf.add_header('Content-Disposition',
'attachment', filename = nom + '.pdf')
OMen.attach(OMenPdf)
elif fmt.upper() == 'ODS':
OMenOds = MIMEApplication( blow, _subtype = 'ods')
OMenOds.add_header('Content-Disposition',
'attachment', filename = nom + '.ods')
OMen.attach(OMenOds)
elif fmt.upper() == 'XLS':
OMenXls = MIMEApplication( blow, _subtype = 'xls')
OMenXls.add_header('Content-Disposition',
'attachment', filename = nom + '.xls')
OMen.attach(OMenXls)
elif fmt.upper() == 'TXT':
OMenTxt = MIMEText(blow,
'plain', 'utf-8')
OMenTxt.add_header('Content-Disposition',
'attachment', filename = nom + '.txt')
OMen.attach(OMenTxt)
else:
Txt = 'no se como hacer un attach del formato'
Txt += ' %s'%fmt
raise self.Error(Txt)
OMen['To'] = self._IDPaFROM(Dest)
OMen['From'] = self._UserSMTP
if Subject is None:
Subject = self._SubjectPorDefecto
OMen['Subject'] = Subject
except email.errors.MessageError as e:
Inf = 'Error en la composición del mensaje: %s'%e
Exito = False
# Envío del mensaje
if Exito:
try:
self.SSMTP.sendmail(self._UserSMTP, OMen['To'],
OMen.as_string())
except Exception as e:
Inf = 'Error en el envío del mensaje: %s'%e
Exito = False
return Exito, Inf
if __name__ == "__main__":
MEN = u"""
ACCION = CORRECCION
TAREA = TAR_01
PIG = 3.2
"""
with KComEmilio() as KCE:
#KCE.EnviaMensajeTexto('kbc@unizar.es', MEN)
#KCE.EnviaMensajeTexto('kbc@unizar.es', MEN)
#KCE.EnviaMensajeTexto('kbc@unizar.es', MEN)
#KCE._CreaBandejaParaDepuracion()
luid = KCE._ObtenListaDeIdentificadoresDeMensaje()
#Dit = KCE._ObtenMensajes(luid)
#print Dit
KCE._MarcaMensajesParaBorrar(luid)
#KCE._MueveMensajesADepuracion(luid)
"""
Dest = 'msamplon@unizar.es'
Txt = 'yeeepa'
Subj = 'probando yepa'
with open('DatosParaPruebas/PE01_ENU.pdf', 'rb') as fp:
blw = fp.read()
Adj = (('yepa1.pdf', (blw, 'PDF')),
('yepa2.pdf', (blw, 'PDF')),
('yepa3.pdf', (blw, 'PDF')),
('yepa4.pdf', (blw, 'PDF')),
('yepa5.pdf', (blw, 'PDF')),
('yepa6.pdf', (blw, 'PDF')))
print KCE.EnviaCorreoRespuesta(Dest, Txt, Subject = Subj, Adjuntos = Adj)
"""
|
It is now possible to watch videos of your favourite artists without using Internet and just by downloading them. Kastor All Video Downloader is a a software that allows you to download streaming videos online.
With Kastor All Video Downloader, you can transfer video streaming broadcast on DailyMotion, Vimeo, Google video, YouTube, Yahoo video, Megavidéos, etc. What is interesting is that Kastor All Video Downloader gives you choices for the output format. Thus, it supports formats like AVI, WMV, MP3, FLV, MP4, AAC, iPhone, iPad, and many others. There is nothing difficult in using Kastor All Video Downloader. Indeed, just look for the file on the web, then, paste the URL on the interface. After that, you need to choose the output format, click on the "Download" button, and voila. Kastor All Video Downloader is completely configurable, which gives you the ability to run multiple downloads simultaneously (20 in all). In addition, you can modify the original settings to select the language.
The software is downloadable free of charge.
Latest update on September 28, 2016 at 04:41 AM.
|
import numpy as np
import gc
import os.path as osp
from rllab.misc import logger
from rllab.misc import tensor_utils
import collections
from functools import reduce
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from sandbox.snn4hrl.bonus_evaluators.base import BonusEvaluator
class GridBonusEvaluator(BonusEvaluator):
def __init__(self,
env_spec=None,
mesh_density=50,
visitation_bonus=0,
snn_H_bonus=0,
virtual_reset=False, # the paths are split by latents and every switch gets the robot to 0 (xy,ori)
switch_lat_every=0,
survival_bonus=0,
dist_from_reset_bonus=0,
start_bonus_after=0):
self.mesh_density = mesh_density
self.furthest = 0
self.visitation_all = np.zeros((1, 1), dtype=int)
self.num_latents = 0 # this will simply not be used if there are no latents (the same for the following 2)
self.dict_visit = collections.OrderedDict() # keys: latents (int), values: np.array with number of visitations
self.visitation_by_lat = np.zeros((1, 1), dtype=int) # used to plot: matrix with a number for each lat/rep
self.visitation_bonus = visitation_bonus
self.snn_H_bonus = snn_H_bonus
self.virtual_reset = virtual_reset
self.switch_lat_every = switch_lat_every
self.survival_bonus = survival_bonus
self.dist_from_reset_bonus = dist_from_reset_bonus
self.start_bonus_after = start_bonus_after
# in case I'm gridding all the obs_dim (not just the com) --> for this I should use hashing, or too high dim
if env_spec:
obs_dim = env_spec.observation_space.flat_dim
def fit_before_process_samples(self, paths):
if 'env_infos' in paths[0].keys() and 'full_path' in paths[0]['env_infos'].keys():
paths = [tensor_utils.flatten_first_axis_tensor_dict(path['env_infos']['full_path']) for path in paths]
if 'env_infos' in list(paths[0].keys()) and 'com' in list(paths[0]['env_infos'].keys()):
coms_xy = [np.array(path['env_infos']['com'][:, 0:2]) for path in paths] # no z coord
else:
coms_xy = [np.array(path['observations'][:, -3:-1])[:, [1, 0]] for path in paths]
if self.virtual_reset: # change the com according to switch_lat_every or resets
for k, com_xy in enumerate(coms_xy):
i = self.start_bonus_after
while i < len(com_xy):
start = i
ori = paths[k]['env_infos']['ori'][i - self.start_bonus_after]
c = np.float(np.cos(ori))
s = np.float(np.sin(ori))
R = np.matrix('{} {}; {} {}'.format(c, -s, s, c))
while i < len(com_xy) and i - start < self.switch_lat_every - self.start_bonus_after:
i += 1
com_xy[start:i] = np.dot(R, com_xy[start:i].T).T
xy = com_xy[start]
com_xy[start:i] -= xy
while i < len(com_xy) and i - start < self.switch_lat_every: # skip some! compare to above
i += 1
self.furthest = np.ceil(np.max(np.abs(np.concatenate(coms_xy))))
# now translate and scale the coms!
coms = [np.ceil((com_xy + self.furthest) * self.mesh_density).astype(int) for com_xy in coms_xy]
if 'agent_infos' in list(paths[0].keys()) and (('latents' in list(paths[0]['agent_infos'].keys())
and np.size(paths[0]['agent_infos']['latents'])) or
('selectors' in list(paths[0]['agent_infos'].keys())
and np.size(paths[0]['agent_infos']['selectors']))):
selectors_name = 'selectors' if 'selectors' in list(paths[0]['agent_infos'].keys()) else 'latents'
self.num_latents = np.size(paths[0]["agent_infos"][selectors_name][0])
# set all the labels for the latents and initialize the entries of dict_visit
size_grid = int(2 * self.furthest * self.mesh_density + 1)
for i in range(self.num_latents): # use integer to define the latents
self.dict_visit[i] = np.zeros((size_grid, size_grid))
lats = [[np.nonzero(lat)[0][0] for lat in path['agent_infos'][selectors_name]]
for path in paths] # list of all lats by idx
for k, com in enumerate(coms): # this iterates through paths
start = 0
for i, xy in enumerate(com):
if i - start == self.switch_lat_every:
start = i
if i - start < self.start_bonus_after:
pass
else:
self.dict_visit[lats[k][i]][tuple(xy)] += 1
self.visitation_all = reduce(np.add, [visit for visit in self.dict_visit.values()])
else: # If I don't have latents. I also assume no virtual reset and no start_bonus_after!!
self.visitation_all = np.zeros(
(2 * self.furthest * self.mesh_density + 1, 2 * self.furthest * self.mesh_density + 1))
for com in np.concatenate(coms):
self.visitation_all[tuple(com)] += 1
def predict_count(self, path):
if 'env_infos' in path.keys() and 'full_path' in path['env_infos'].keys():
path = tensor_utils.flatten_first_axis_tensor_dict(path['env_infos']['full_path'])
if 'env_infos' in list(path.keys()) and 'com' in list(path['env_infos'].keys()):
com_xy = np.array(path['env_infos']['com'][:, 0:2])
else:
com_xy = np.array(path['observations'][:, -3:-1])[:, [1, 0]]
if self.virtual_reset: # change the com according to switch_lat_every or resets
i = self.start_bonus_after
while i < len(com_xy):
start = i
ori = path['env_infos']['ori'][i - self.start_bonus_after]
c = np.float(np.cos(ori))
s = np.float(np.sin(ori))
R = np.matrix('{} {}; {} {}'.format(c, -s, s, c))
while i < len(com_xy) and i - start < self.switch_lat_every - self.start_bonus_after:
i += 1
com_xy[start:i] = np.dot(R, com_xy[start:i].T).T
xy = com_xy[start]
com_xy[start:i] -= xy
while i < len(com_xy) and i - start < self.switch_lat_every: # skip some! compare to above
i += 1
# now translate and scale the coms!
coms = np.ceil((com_xy + self.furthest) * self.mesh_density).astype(int)
counts = []
start = 0
for i, com in enumerate(coms):
if i - start == self.switch_lat_every:
start = i
if i - start < self.start_bonus_after:
counts.append(np.inf) # this is the way of zeroing out the reward for the first steps
else:
counts.append(self.visitation_all[tuple(com)])
return 1. / np.maximum(1., np.sqrt(counts))
def predict_entropy(self, path):
if 'env_infos' in path.keys() and 'full_path' in path['env_infos'].keys():
path = tensor_utils.flatten_first_axis_tensor_dict(path['env_infos']['full_path'])
if 'env_infos' in list(path.keys()) and 'com' in list(path['env_infos'].keys()):
com_xy = np.array(path['env_infos']['com'][:, 0:2])
else:
com_xy = np.array(path['observations'][:, -3:-1])[:, [1, 0]]
if self.virtual_reset: # change the com according to switch_lat_every or resets
i = self.start_bonus_after
while i < len(com_xy):
start = i
ori = path['env_infos']['ori'][i - self.start_bonus_after]
c = np.float(np.cos(ori))
s = np.float(np.sin(ori))
R = np.matrix('{} {}; {} {}'.format(c, -s, s, c))
while i < len(com_xy) and i - start < self.switch_lat_every - self.start_bonus_after:
i += 1
com_xy[start:i] = np.dot(R, com_xy[start:i].T).T
xy = com_xy[start]
com_xy[start:i] -= xy
while i < len(com_xy) and i - start < self.switch_lat_every: # skip some! compare to above
i += 1
# now translate and scale the coms!
coms = np.ceil((com_xy + self.furthest) * self.mesh_density).astype(int)
freqs = []
lats = [np.nonzero(lat)[0][0] for lat in path['agent_infos']['latents']]
start = 0
for i, com in enumerate(coms):
if i - start == self.switch_lat_every:
start = i
if i - start < self.start_bonus_after:
freqs.append(
1.) # this is tricky because it will be higher than the other rewards!! (negatives) -> at least bonus for staying alife until the transition
else:
freqs.append(self.dict_visit[lats[i]][tuple(com)] / self.visitation_all[tuple(com)])
return np.log(freqs)
def predict_dist_from_reset(self, path):
if 'env_infos' in path.keys() and 'full_path' in path['env_infos'].keys():
path = tensor_utils.flatten_first_axis_tensor_dict(path['env_infos']['full_path'])
if 'env_infos' in list(path.keys()) and 'com' in list(path['env_infos'].keys()):
com_xy = np.array(path['env_infos']['com'][:, 0:2])
else:
com_xy = np.array(path['observations'][:, -3:-1])[:, [1, 0]]
if self.virtual_reset: # change the com according to switch_lat_every or resets
i = self.start_bonus_after
while i < len(com_xy):
start = i
ori = path['env_infos']['ori'][i - self.start_bonus_after]
c = np.float(np.cos(ori))
s = np.float(np.sin(ori))
R = np.matrix('{} {}; {} {}'.format(c, -s, s, c))
while i < len(com_xy) and i - start < self.switch_lat_every - self.start_bonus_after:
i += 1
com_xy[start:i] = np.dot(R, com_xy[start:i].T).T
xy = com_xy[start]
com_xy[start:i] -= xy
while i < len(com_xy) and i - start < self.switch_lat_every: # skip some! compare to above
i += 1
# now translate and scale the coms!
coms = np.ceil((com_xy + self.furthest) * self.mesh_density).astype(int)
dists_from_reset = []
start = 0
for i, com in enumerate(coms):
if i - start == self.switch_lat_every:
start = i
if i - start < self.start_bonus_after:
dists_from_reset.append(
0.) # this is tricky because it will be higher than the other rewards!! (negatives) -> at least bonus for staying alife until the transition
else:
dists_from_reset.append(np.linalg.norm(com - coms[start + self.start_bonus_after]))
return np.array(dists_from_reset)
def predict(self, path):
if 'env_infos' in path.keys() and 'full_path' in path['env_infos'].keys():
expanded_path = tensor_utils.flatten_first_axis_tensor_dict(path['env_infos']['full_path'])
else: # when it comes from log_diagnostics it's already expanded (or if it was never aggregated)
expanded_path = path
bonus = self.visitation_bonus * self.predict_count(expanded_path) + \
self.dist_from_reset_bonus * self.predict_dist_from_reset(expanded_path)
if self.snn_H_bonus: # I need the if because the snn bonus is only available when there are latents
bonus += self.snn_H_bonus * self.predict_entropy(expanded_path)
total_bonus = bonus + self.survival_bonus * np.ones_like(bonus)
if 'env_infos' in path.keys() and 'full_path' in path['env_infos'].keys():
aggregated_bonus = []
full_path_rewards = path['env_infos']['full_path']['rewards']
total_steps = 0
for sub_rewards in full_path_rewards:
aggregated_bonus.append(np.sum(total_bonus[total_steps:total_steps + len(sub_rewards)]))
total_steps += len(sub_rewards)
total_bonus = aggregated_bonus
return np.array(total_bonus)
def fit_after_process_samples(self, samples_data):
pass
def log_diagnostics(self, paths):
if 'env_infos' in paths[0].keys() and 'full_path' in paths[0]['env_infos'].keys():
paths = [tensor_utils.flatten_first_axis_tensor_dict(path['env_infos']['full_path']) for path in paths]
fig, ax = plt.subplots()
overlap = 0 # keep track of the overlap
delta = 1. / self.mesh_density
y, x = np.mgrid[-self.furthest:self.furthest + delta:delta, -self.furthest:self.furthest + delta:delta]
if 'agent_infos' in list(paths[0].keys()) and (('latents' in list(paths[0]['agent_infos'].keys())
and np.size(paths[0]['agent_infos']['latents'])) or
('selectors' in list(paths[0]['agent_infos'].keys())
and np.size(paths[0]['agent_infos']['selectors']))):
# fix the colors for each latent
num_colors = self.num_latents + 2 # +2 for the 0 and Repetitions NOT COUNTING THE WALLS
# create a matrix with entries corresponding to the latent that was there (or other if several/wall/nothing)
size_grid = int(2 * self.furthest * self.mesh_density + 1)
self.visitation_by_lat = np.zeros(
(size_grid, size_grid))
for i, visit in self.dict_visit.items():
lat_visit = np.where(visit == 0, visit, i + 1) # transform the map into 0 or i+1
self.visitation_by_lat += lat_visit
overlap += np.sum(np.where(self.visitation_by_lat > lat_visit)) # add the overlaps of this latent
self.visitation_by_lat = np.where(self.visitation_by_lat <= i + 1, self.visitation_by_lat,
num_colors - 1) # mark overlaps
cmap = plt.get_cmap('nipy_spectral', num_colors)
map_plot = ax.pcolormesh(x, y, self.visitation_by_lat, cmap=cmap, vmin=0.1,
vmax=self.num_latents + 1) # before 1 (will it affect when no walls?)
color_len = (num_colors - 1.) / num_colors
ticks = np.arange(color_len / 2., num_colors - 1, color_len)
cbar = fig.colorbar(map_plot, ticks=ticks)
latent_tick_labels = ['latent: ' + str(i) for i in list(self.dict_visit.keys())]
cbar.ax.set_yticklabels(
['No visitation'] + latent_tick_labels + ['Repetitions']) # horizontal colorbar
else:
plt.pcolormesh(x, y, self.visitation_all, vmax=self.mesh_density)
overlap = np.sum(
np.where(self.visitation_all > 1, self.visitation_all, 0)) # sum of all visitations larger than 1
ax.set_xlim([x[0][0], x[0][-1]])
ax.set_ylim([y[0][0], y[-1][0]])
log_dir = logger.get_snapshot_dir()
exp_name = log_dir.split('/')[-1] if log_dir else '?'
ax.set_title('visitation_Bonus: ' + exp_name)
plt.savefig(osp.join(log_dir, 'visitation_Gbonus.png')) # this saves the current figure, here f
plt.close()
plt.cla()
plt.clf()
plt.close('all')
# del fig, ax, cmap, cbar, map_plot
gc.collect()
visitation_different = np.count_nonzero(self.visitation_all)
logger.record_tabular('VisitationDifferents', visitation_different)
logger.record_tabular('VisitationOverlap', overlap)
logger.record_tabular('VisitationMin', np.min(self.visitation_all))
logger.record_tabular('VisitationMax', np.max(self.visitation_all))
if self.snn_H_bonus:
avg_grid_entropy_bonus = np.mean([np.sum(self.predict_entropy(path)) for path in paths])
logger.record_tabular('AvgPath_Grid_EntropyBonus', avg_grid_entropy_bonus)
# if self.visitation_bonus:
avg_grid_count_bonus = np.mean([np.sum(self.predict_count(path)) for path in paths])
logger.record_tabular('AvgPath_Grid_CountBonus', avg_grid_count_bonus)
# if self.visitation_bonus:
avg_grid_dist_bonus = np.mean([np.sum(self.predict_dist_from_reset(path)) for path in paths])
logger.record_tabular('AvgPath_Grid_DistBonus', avg_grid_dist_bonus)
# if self.survival_bonus:
avg_survival_bonus = np.mean([len(path['rewards']) for path in paths])
logger.record_tabular('AvgPath_SurviBonus', avg_survival_bonus)
avg_grid_bonus = np.mean([np.sum(self.predict(path)) for path in paths])
logger.record_tabular('AvgPathGridBonus', avg_grid_bonus)
|
It is rare that I get an email that I straight up post to the blog, but this one is too good not to share with you all. Justin Graves is an athlete who is new to BASE. He has always been a high achieving individual whether it was in athletics or in academics – yeah – he’s one of THOSE guys. He is a neuro-nerd who got started in triathlon around the same time I did. It’s been fun seeing his career in the sport develop and it has been even more fun helping him as his coach now. I had the pleasure of doing his initial lactate threshold tests this weekend and he did a little write up about his experience and how pumped he is to have some hard data to apply to his training…it’s going to be an epic year!
“Using theoretical tests and mathematical formulas to estimate heart rate zones, power zones, running paces is commonplace in the world of endurance sports. As someone who is trained in Neuroscience and has been doing research for many years, written books and articles it is amazing that I have not done lactic acid tests until now. The test itself is no different than your standard threshold workout, but the results it yields are quite substantial. Within the last 3-4 days I have done the run test and bike test courtesy of coach LU and I am just now starting to digest the results and the impact that it will have on my training and performance.
Two really important concepts were learned from these tests: first and foremost was that my heart rate zones are much higher than previously thought, and therefore I was doing myself a severe disadvantage by not knowing the correct zones and ultimately training in the wrong HR zones for most of my workouts. An example is that previously I thought my endurance HR zone was capped around 145-150 bpm, realistically after the tests with coach LU, this zone is actually from 160-180 bpm for the categories of (easy endurance and intense endurance). By doing workouts at the previously lower intensities I was actually in my recovery zones, which is not where you want to be if you are trying to do an endurance intensity workout. This revelation is very important and will allow for more precise and targeted workouts moving foreword, which will only serve to make me stronger, fitter and faster.
The second important fact learned during this test is that this is a technique that measures the physiological processes going on inside your body. It does not seem significant but please let me explain. Knowing that 160-170 bpm running is easy endurance for me allows me to focus on HR while running and not worry about how fast or how slow I am running. In a sense it does not matter, what matters is being in the range of 160-170 to achieve the physiological effect that the workout was designed to achieve. In the simplest of terms, knowledge is power and in cycling power is speed; so using the knowledge gained from these tests can make two significant things happen. First and foremost it allows coach LU to be an even better coach than she already is, simply by knowing concrete physiological values it takes guesswork out of training and replaces it was cold hard facts. Second, which is just as crucial, it allows you to become a better athlete (and the best part is, you don’t have to work any harder than you are already working). Simply put this allows the athlete to train smarter, the coach to design programs for you better informed, and with facts; which in turn leads to better performance, no matter what level the athlete.
abilities is such an amazing asset to carry with you along the way from training to racing.
If you are interested in getting your lactate threshold testing done contact lauren@basetrifitness.com or click here.
Why Blood Lactate Threshold Testing?
|
from django.conf import settings
from django.forms.models import BaseInlineFormSet
class LimitedInlineFormset(BaseInlineFormSet):
"""
A specialized subclass of BaseInlineFormSet which limits the queryset
to a maximum (specified in settings: default to 15).
"""
limiting_inlines = True
def get_queryset(self):
if not hasattr(self, "_queryset"):
qs = super(LimitedInlineFormset, self).get_queryset()
limit = getattr(settings, "INLINES_MAX_LIMIT", 15)
self.total_count = qs.count()
self._queryset = qs[:limit]
self.limited_count = self._queryset.count()
return self._queryset
class LimitInlinesAdminMixin(object):
"""
Set ModelAdmin.limit_inlines to a tuple of InlineModelAdmin
classes you wish to be limited.
Overrides the inline formset with `LimitedInlineFormset`.
"""
def get_formsets(self, request, obj=None):
limit_inlines = getattr(self, "limit_inlines", [])
for inline in self.get_inline_instances(request, obj):
kwargs = {}
if inline.__class__ in limit_inlines:
kwargs['formset'] = LimitedInlineFormset
yield inline.get_formset(request, obj, **kwargs)
|
Being a part of The Convo Club gives you access to the “archives” over 60 hours of recorded trainings which you can access once you login. Below are details of what to do once you login to the Member Portal.
Open Office Hours – You will have access to all of the past recordings of Open Office Hours. Each month is completely different because these are sessions where Clubbers choose the topics! Whatever you have going on in your world, get your questions answers and receive valuable coaching and feedback from the group.
Conversation Training – You will have access to all of the past recordings of Conversation Trainings. These trainings are on specific topics each month. The trainings topics are listed so you can pick and choose which ones you want to watch first!
Collaborative Strategist Trainings – You will have access to exclusive trainings by our 5 incredible Collaborative Strategists!
|
import argparse
import subprocess, os
import os.path as op
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pytrack_analysis import Multibench
from pytrack_analysis.dataio import VideoRawData
from pytrack_analysis.profile import get_profile, get_scriptname, show_profile
from pytrack_analysis.image_processing import ShowOverlay, WriteOverlay, PixelDiff
import pytrack_analysis.preprocessing as prp
import pytrack_analysis.plot as plot
from pytrack_analysis.yamlio import write_yaml
from scipy import signal
from scipy.signal import hilbert
def nan_helper(y):
"""Helper to handle indices and logical indices of NaNs.
Input:
- y, 1d numpy array with possible NaNs
Output:
- nans, logical indices of NaNs
- index, a function, with signature indices= index(logical_indices),
to convert logical indices of NaNs to 'equivalent' indices
Example:
>>> # linear interpolation of NaNs
>>> nans, x= nan_helper(y)
>>> y[nans]= np.interp(x(nans), x(~nans), y[~nans])
"""
return np.isnan(y), lambda z: z.nonzero()[0]
def remove_mistrack(x, y, ma, mi, thr=100.*0.0333, forced=False):
xnew, ynew = x.copy(), y.copy()
dx, dy = np.append(0, np.diff(x)), np.append(0, np.diff(y))
displ = np.sqrt(dx**2 + dy**2)
area = np.multiply(ma,mi)
xnew[area > 10] = np.nan
ynew[area > 10] = np.nan
xnew[area < 2] = np.nan
ynew[area < 2] = np.nan
#print(displ)
ides = np.where(displ > thr)[0]
#print(ides)
"""
for jj, each in enumerate(ides):
if jj == 0:
print(each)
if len(ides) > 1:
xnew[ides[jj]:ides[jj+1]] = np.nan
ynew[ides[jj]:ides[jj+1]] = np.nan
else:
xnew[ides[jj]:] = np.nan
ynew[ides[jj]:] = np.nan
if jj < len(ides)-1:
print(jj, np.mean(ma[ides[jj]:ides[jj+1]])*np.mean(mi[ides[jj]:ides[jj+1]]), ma[each]*mi[each])
if forced or np.mean(ma[ides[jj]:ides[jj+1]])*np.mean(mi[ides[jj]:ides[jj+1]]) > 10 or np.mean(ma[ides[jj]:ides[jj+1]])*np.mean(mi[ides[jj]:ides[jj+1]]) < 2:
xnew[ides[jj]:ides[jj+1]] = np.nan
ynew[ides[jj]:ides[jj+1]] = np.nan
"""
ma[np.isnan(xnew)] = np.mean(ma)
mi[np.isnan(xnew)] = np.mean(mi)
nans, xind = nan_helper(xnew)
xnew[nans]= np.interp(xind(nans), xind(~nans), xnew[~nans])
nans, yind = nan_helper(ynew)
ynew[nans]= np.interp(yind(nans), yind(~nans), ynew[~nans])
return xnew, ynew, ma, mi
### TODO: move this to signal processing module
def gaussian_filter(_X, _len=16, _sigma=1.6):
norm = np.sqrt(2*np.pi)*_sigma ### Scipy's gaussian window is not normalized
window = signal.gaussian(_len+1, std=_sigma)/norm
convo = np.convolve(_X, window, "same")
## eliminate boundary effects
convo[:_len] = _X[:_len]
convo[-_len:] = _X[-_len:]
return convo
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('basedir', metavar='basedir', type=str, help='directory where your data files are')
parser.add_argument('--option', action='store', type=str)
parser.add_argument('--overwrite', action='store_true')
BASEDIR = parser.parse_args().basedir
OVERWRITE = parser.parse_args().overwrite
if parser.parse_args().option is None:
OPTION = 'all'
else:
OPTION = parser.parse_args().option
return BASEDIR, OPTION, OVERWRITE
def main():
BASEDIR, OPTION, OVERWRITE = get_args()
### Define raw data structure
colnames = ['datetime', 'elapsed_time', 'frame_dt', 'body_x', 'body_y', 'angle', 'major', 'minor']
if not op.isdir(op.join(BASEDIR, 'pytrack_res')):
os.mkdir(op.join(BASEDIR, 'pytrack_res'))
RESULT = op.join(BASEDIR, 'pytrack_res')
if not op.isdir(op.join(RESULT,'post_tracking')):
os.mkdir(op.join(RESULT,'post_tracking'))
if not op.isdir(op.join(RESULT,'pixeldiff')):
os.mkdir(op.join(RESULT,'pixeldiff'))
if not op.isdir(op.join(RESULT,'jumps')):
os.mkdir(op.join(RESULT,'jumps'))
raw_data = VideoRawData(BASEDIR, VERBOSE=(OPTION == 'registration'))
if OPTION == 'registration':
return 1
### go through all session
for iv, video in enumerate(raw_data.videos):
if iv > 28:
continue
Nflies = 4
print('\n{}: {}'.format(iv, video.name))
### arena + food spots
video.load_arena()
### trajectory data
video.load_data()
### rename columns
video.data.reindex(colnames)
### data to timestart
video.data.to_timestart(video.timestart)
### calculate displacements
x, y, tx, ty, bx, by = [], [], [], [], [], []
jumps, dr, dddr, thr, flipped = [], [], [], [], []
wo = WriteOverlay(video.fullpath, outfolder=op.join(RESULT,'jumps'))
### plotting speed, major/minor, decision points etc
f, axes = plt.subplots(12, figsize=(6,10)) ### TODO
print('extract trajectories...')
for i in range(Nflies):
"""
Extract some kinematics
"""
ff = int(video.data.dfs[i].index[0])
lf = int(video.data.dfs[i].index[-1])
st = 0
en = min(lf-ff, 108100)
xpos = video.data.dfs[i]['body_x'].interpolate().fillna(method='ffill').fillna(method='bfill')
ypos = video.data.dfs[i]['body_y'].interpolate().fillna(method='ffill').fillna(method='bfill')
m = video.data.dfs[i]['major'].interpolate().fillna(method='ffill').fillna(method='bfill')
angle = video.data.dfs[i]['angle'].interpolate().fillna(method='ffill').fillna(method='bfill')
x.append(xpos+0.5*m*np.cos(angle))
y.append(ypos+0.5*m*np.sin(angle))
tx.append(xpos-0.5*m*np.cos(angle))
ty.append(ypos-0.5*m*np.sin(angle))
bx.append(xpos)
by.append(ypos)
"""
PixelDiff Algorithm
"""
print('pixeldiff...')
_ofile = op.join(RESULT,'pixeldiff','pixeldiff_{}.csv'.format(video.timestr))
if op.isfile(_ofile):
pxd_data = pd.read_csv(_ofile, index_col='frame')
else:
pxdiff = PixelDiff(video.fullpath, start_frame=video.data.first_frame)
px, tpx = pxdiff.run((x,y), (tx,ty), en, show=False)
pxd_data = pd.DataFrame({ 'headpx_fly1': px[:,0], 'tailpx_fly1': tpx[:,0],
'headpx_fly2': px[:,1], 'tailpx_fly2': tpx[:,1],
'headpx_fly3': px[:,2], 'tailpx_fly3': tpx[:,2],
'headpx_fly4': px[:,3], 'tailpx_fly4': tpx[:,3],})
pxd_data.to_csv(_ofile, index_label='frame')
print('head detection...')
for i in range(Nflies):
ff = int(video.data.dfs[i].index[0])
lf = int(video.data.dfs[i].index[-1])
st = 0
en = min(lf-ff, 108100)
xpos = video.data.dfs[i]['body_x'].interpolate().fillna(method='ffill').fillna(method='bfill')
ypos = video.data.dfs[i]['body_y'].interpolate().fillna(method='ffill').fillna(method='bfill')
m = video.data.dfs[i]['major'].interpolate().fillna(method='ffill').fillna(method='bfill')
angle = video.data.dfs[i]['angle'].interpolate().fillna(method='ffill').fillna(method='bfill')
mi = video.data.dfs[i]['minor'].interpolate().fillna(method='ffill').fillna(method='bfill')
if np.any(np.isnan(xpos)) or np.any(np.isnan(ypos)) or np.any(np.isnan(m)) or np.any(np.isnan(angle)):
print(np.any(np.isnan(xpos)), np.any(np.isnan(ypos)), np.any(np.isnan(m)), np.any(np.isnan(angle)))
dt = video.data.dfs[i]['frame_dt']
dx, dy = np.append(0, np.diff(xpos)), np.append(0, np.diff(-ypos))
dx, dy = np.divide(dx, dt), np.divide(dy, dt)
theta = np.arctan2(dy, dx)
### pixel data from pixeldiff
hpx = np.array(pxd_data['headpx_fly{}'.format(i+1)])
wlen = 36
hpx = gaussian_filter(hpx, _len=wlen, _sigma=0.1*wlen)
tpx = np.array(pxd_data['tailpx_fly{}'.format(i+1)])
tpx = gaussian_filter(tpx, _len=wlen, _sigma=0.1*wlen)
"""
diff of diff of displacements (spikes are more pronounced)
"""
dr.append(np.sqrt(dx*dx+dy*dy)/float(video.arena[i]['scale']))
ddr = np.append(0, np.diff(dr[-1]))
dddr.append(np.append(0, np.diff(ddr)))
#wlen = 36
#dr_sm = gaussian_filter(np.array(dr), _len=wlen, _sigma=0.1*wlen)
wlen = 120
dddr_sm = gaussian_filter(np.array(np.abs(dddr[-1])), _len=wlen, _sigma=0.5*wlen)
"""
Thresholding
"""
threshold = 10.*dddr_sm
low, high = 10., 30.
threshold[threshold<low] = low
threshold[threshold>high] = high
thr.append(threshold)
#### TODO
jumps.append(np.array(np.array(dddr[-1])[st:en] > threshold[st:en]))
mistrack_inds = np.where(np.array(dddr[-1])[st:en] > threshold[st:en])[0]
"""
Rolling mean of pixeldiff for flips (window = 10 secs)
"""
pxthr = np.array(tpx[st:en] < hpx[st:en])
pxavg = np.zeros(pxthr.shape)
for frm in range(pxavg.shape[0]):
e = frm + 300
if e >= pxavg.shape[0]:
e = pxavg.shape[0]-1
if frm == e:
pxavg[frm] = pxthr[frm]
else:
pxavg[frm] = np.mean(pxthr[frm:e])
### plot
axes[3*i].plot(dddr[-1][st:en], 'k-', lw=0.5)
axes[3*i].plot(threshold[st:en], '--', color='#fa6800', lw=0.5)
axes[3*i].plot(mistrack_inds, 50.*np.ones(len(mistrack_inds)), 'o', color='#d80073', markersize=2)
axes[3*i].set_ylim([-5,55])
axes[3*i].set_yticks(np.arange(0,60,25))
### plot 2nd
axes[3*i+1].plot(hpx[st:en], '-', color='#fa0078', lw=0.5)
axes[3*i+1].plot(tpx[st:en], '-', color='#00fa64', lw=0.5)
axes[3*i+1].plot(100.*pxthr, '--', color='#6e6e6e', lw=0.5)
axes[3*i+1].plot(100.*pxavg, '-', color='#000000', lw=0.5)
axes[3*i+1].set_ylim([0,255])
axes[3*i+2].plot(m[st:en]/video.arena[i]['scale'], '-', color='#ff2f2f', lw=0.5)
axes[3*i+2].plot(mi[st:en]/video.arena[i]['scale'], '-', color='#008dff', lw=0.5)
axes[3*i+2].plot((m[st:en]*mi[st:en])/video.arena[i]['scale'], '--', color='#6f6f6f', lw=0.5)
axes[3*i+2].set_ylim([-1,6])
axes[3*i+2].set_yticks(np.arange(0,7,2))
####
view = (video.arena[i]['x']-260, video.arena[i]['y']-260, 520, 520)
sf, ef = st+ff, en+ff
total_dur = int((video.data.dfs[i].loc[lf,'elapsed_time'] - video.data.dfs[i].loc[ff,'elapsed_time'])/60.)
secs = int(round(video.data.dfs[i].loc[lf,'elapsed_time'] - video.data.dfs[i].loc[ff,'elapsed_time']))%60
if OPTION == 'jump_detection':
print("fly {}:\tstart@ {} ({} >= {}) total: {}:{:02d} mins ({} frames)".format(i+1, ff, video.data.dfs[i].loc[ff,'datetime'], video.timestart, total_dur, secs, en-st))
thrs = np.array(np.array(dddr[i])[st:en] > threshold[st:en])
flip = np.zeros(thrs.shape)
flipped.append(flip)
thr_ix = np.append(np.append(0, np.where(thrs)[0]), len(flip)+ff)
if OPTION == 'jump_detection':
print('found {} detection points (start, jumps, mistracking, etc.).'.format(len(thr_ix)-1))
count = 0
if len(thr_ix) > 0:
for jj,ji in enumerate(thr_ix[:-1]):
fromfr = thr_ix[jj] + ff
tofr = thr_ix[jj+1] + ff - 1
flip[thr_ix[jj]:thr_ix[jj+1]] = np.mean(pxthr[thr_ix[jj]:thr_ix[jj+1]])>0.5
if flip[thr_ix[jj]] == 1:
x[i].loc[fromfr:tofr], tx[i].loc[fromfr:tofr] = tx[i].loc[fromfr:tofr], x[i].loc[fromfr:tofr]
y[i].loc[fromfr:tofr], ty[i].loc[fromfr:tofr] = ty[i].loc[fromfr:tofr], y[i].loc[fromfr:tofr]
clip_st, clip_en = fromfr-60, fromfr+60
if clip_st < int(video.data.dfs[i].index[0]):
clip_st = int(video.data.dfs[i].index[0])
if clip_en > int(video.data.dfs[i].index[-1]):
clip_en = int(video.data.dfs[i].index[-1])
if clip_en - clip_st < 30:
continue
count += 1
_ofile = op.join(RESULT,'jumps','{}'.format(video.name[:-4]), 'fly{}_{:06d}.avi'.format(i+1, fromfr))
if not op.isfile(_ofile):
wo.run((bx[i].loc[clip_st:clip_en], by[i].loc[clip_st:clip_en]), (x[i].loc[clip_st:clip_en], y[i].loc[clip_st:clip_en]), clip_st, clip_en, fromfr, view, i, bool=[thr, flip])
video.data.dfs[i].loc[:, 'head_x'] = x[i]
video.data.dfs[i].loc[:, 'head_y'] = y[i]
if OPTION == 'jump_detection':
print('wrote {} videos.'.format(count))
mistracked = np.sum(dr[-1] > 100)
print('Mistracked frames:', mistracked)
window_len = 36
if not op.isdir(op.join(RESULT,'plots')):
os.mkdir(op.join(RESULT,'plots'))
if not op.isdir(op.join(RESULT,'plots', 'posttracking')):
os.mkdir(op.join(RESULT,'plots', 'posttracking'))
f.savefig(op.join(RESULT,'plots', 'posttracking','speed_{}.png'.format(video.timestr)), dpi=600)
if OPTION == 'jump_detection':
continue
labels = ['topleft', 'topright', 'bottomleft', 'bottomright']
print('pack data...')
for i in range(Nflies):
df = video.data.dfs[i].loc[sf:ef-1]
df.is_copy = False
df.loc[:, ('flipped')] = np.array(flipped[i])
df.loc[:, 'jumps'] = jumps[i]
df.loc[:, 'dr'] = dr[i][st:en]
df.loc[:, 'dddr'] = dddr[i][st:en]
df.loc[:, 'threshold'] = thr[i][st:en]
dx, dy = df['head_x'] - df['body_x'], df['body_y'] - df['head_y']
df.loc[:, 'angle'] = np.arctan2(dy, dx)
df.loc[:, 'body_x'] -= video.arena[i]['x']
df.loc[:, 'body_y'] -= video.arena[i]['y']
df.loc[:, 'body_x'] /= video.arena[i]['scale']
df.loc[:, 'body_y'] /= -video.arena[i]['scale']
df.loc[:, 'major'] /= video.arena[i]['scale']
df.loc[:, 'minor'] /= video.arena[i]['scale']
print('x: ', np.amax(df['body_x']), np.amin(df['body_x']))
print('y: ', np.amax(df['body_y']), np.amin(df['body_y']))
print('major/minor: ', np.mean(df['major']), np.mean(df['minor']))
outdf = df[['datetime', 'elapsed_time', 'frame_dt', 'body_x', 'body_y', 'angle', 'major', 'minor', 'flipped']]
outfile = op.join(RESULT,'post_tracking','{}_{:03d}.csv'.format(raw_data.experiment['ID'], i+iv*4))
print('saving to ', outfile)
outdf.to_csv(outfile, index_label='frame')
### metadata
meta = {}
meta['arena'] = video.arena[i]
meta['arena']['layout'] = '6-6 radial'
meta['arena']['name'] = labels[i]
meta['condition'] = ' '.join([v[i] for k,v in raw_data.experiment['Conditions'][video.name].items()])
meta['datafile'] = outfile
meta['datetime'] = video.time
meta['flags'] = {}
meta['flags']['mistracked_frames'] = int(mistracked)
meta['fly'] = {}
meta['fly']['mating'] = raw_data.experiment['Constants']['mating']
meta['fly']['metabolic'] = raw_data.experiment['Constants']['metabolic']
meta['fly']['sex'] = raw_data.experiment['Constants']['sex']
meta['fly']['genotype'] = raw_data.experiment['Conditions'][video.name]['genotype'][i]
meta['fly']['temperature'] = raw_data.experiment['Conditions'][video.name]['temperature'][i]
#meta['fly']['genetic manipulation'] = raw_data.experiment['Conditions'][video.name]['genetic manipulation'][i] === Kir
meta['food_spots'] = video.spots[i]
meta['setup'] = {}
meta['setup']['humidity'] = raw_data.experiment['Constants']['humidity']
meta['setup']['light'] = raw_data.experiment['Constants']['light']
meta['setup']['n_per_arena'] = raw_data.experiment['Constants']['n_per_arena']
meta['setup']['room'] = raw_data.experiment['Constants']['room']
meta['setup']['temperature'] = raw_data.experiment['Conditions'][video.name]['temperature'][i] # raw_data.experiment['Constants']['temperature']
meta['video'] = {}
meta['video']['dir'] = video.dir
meta['video']['file'] = video.fullpath
meta['video']['first_frame'] = int(outdf.index[0])
meta['video']['start_time'] = video.timestart
yamlfile = op.join(RESULT,'post_tracking','{}_{:03d}.yaml'.format(raw_data.experiment['ID'], i+iv*4))
write_yaml(yamlfile, meta)
### plot trajectory
plotfile = op.join(RESULT,'plots','{}_{:03d}.png'.format(raw_data.experiment['ID'], i+iv*4))
f, ax = plt.subplots(figsize=(10,10))
ax = plot.arena(video.arena[i], video.spots[i], ax=ax)
x, y, jumps, major, minor = np.array(df['body_x']), np.array(df['body_y']), np.array(df['jumps']), np.array(df['major']), np.array(df['minor'])
#ax.plot(x, y, c='#595959', zorder=1, lw=.5, alpha=0.5)
xnew, ynew, major, minor = remove_mistrack(x, y, major, minor)
xnew, ynew, major, minor = remove_mistrack(xnew, ynew, major, minor, thr=300.*0.0333, forced=True)
ends = 108100
ax.plot(x[0], y[0], '.', c='#00ff4f', alpha=0.75, zorder=10)
ax.plot(x[ends-1], y[ends-1], '.', c='#ff3d00', alpha=0.75, zorder=10)
#ax.plot(x[:ends], y[:ends], '-', c='#00e0ff', lw=1, alpha=0.5)
ax.plot(xnew[:ends], ynew[:ends], '-', c='#ff00ff', lw=1, alpha=0.5)
color = jumps
color[jumps==1] = '#ff0000'
color[jumps==0] = '#b1b1b1'
#ax.scatter(x, y, c=displ, s=5, cmap=plt.get_cmap('YlOrRd'), alpha=0.9, edgecolors='none', linewidths=0)
f.savefig(plotfile, dpi=300)
###
video.unload_data()
if __name__ == '__main__':
# runs as benchmark test
test = Multibench("", SILENT=False, SLIM=True)
test(main)
del test
|
Need the best air conditioner repair in town? Call Royal's Heating & Air today! We are a family owned and operated air conditioner repair company that has been doing business in the area for many years. We treat each of our customers like family, and that means responding right away to all service calls. Whether you need your AC inspected or are experiencing a home cooling emergency, our highly trained technicians will take care of your needs. We also recommend regularly scheduled maintenance to keep your air conditioner working like new. Give Royal's Heating & Air a call as soon as you suspect you have an issue and we’ll get your home back to comfortable temperatures right away.
Newer air conditioners are much more energy efficient than what was available years ago, so any new installation is going to save you money on your energy bills. We can come to your home and assess its size and cooling needs and make the appropriate recommendations that will give you cooler temperatures throughout the year. We understand that a Colorado summer is no time to be without a properly working air conditioner, so we work quickly in order to get your new system up and running as soon as possible. We also recommend regular maintenance on new units so it stays in tip-top shape for years to come. Call Royal's Heating & Air today and start feeling better instantly!
|
import logging
import os
from virttest import virsh, utils_test, utils_misc
from autotest.client import utils, lv_utils
from autotest.client.shared import error
def run(test, params, env):
"""
Test command: virsh find-storage-pool-sources-as
1. Prepare env to provide source storage:
1). For 'netfs' source type, setup nfs server
2). For 'iscsi' source type, setup iscsi server
3). For 'logcial' type pool, setup iscsi storage to create vg
2. Find the pool source by running virsh cmd
"""
source_type = params.get("source_type", "")
source_host = params.get("source_host", "127.0.0.1")
source_port = params.get("source_port", "")
options = params.get("extra_options", "")
vg_name = params.get("vg_name", "virttest_vg_0")
ro_flag = "yes" == params.get("readonly_mode", "no")
status_error = "yes" == params.get("status_error", "no")
if not source_type:
raise error.TestFail("Command requires <type> value")
cleanup_nfs = False
cleanup_iscsi = False
cleanup_logical = False
if source_host == "127.0.0.1":
if source_type == "netfs":
# Set up nfs
res = utils_test.libvirt.setup_or_cleanup_nfs(True)
selinux_bak = res["selinux_status_bak"]
cleanup_nfs = True
if source_type in ["iscsi", "logical"]:
# Set up iscsi
try:
iscsi_device = utils_test.libvirt.setup_or_cleanup_iscsi(True)
# If we got nothing, force failure
if not iscsi_device:
raise error.TestFail("Did not setup an iscsi device")
cleanup_iscsi = True
if source_type == "logical":
# Create VG by using iscsi device
lv_utils.vg_create(vg_name, iscsi_device)
cleanup_logical = True
except Exception, detail:
if cleanup_iscsi:
utils_test.libvirt.setup_or_cleanup_iscsi(False)
raise error.TestFail("iscsi setup failed:\n%s" % detail)
# Run virsh cmd
options = "%s %s " % (source_host, source_port) + options
if ro_flag:
logging.debug("Readonly mode test")
try:
cmd_result = virsh.find_storage_pool_sources_as(
source_type,
options,
ignore_status=True,
debug=True,
readonly=ro_flag)
output = cmd_result.stdout.strip()
err = cmd_result.stderr.strip()
status = cmd_result.exit_status
if not status_error:
if status:
raise error.TestFail(err)
else:
logging.debug("Command outout:\n%s", output)
elif status_error and status == 0:
raise error.TestFail("Expect fail, but run successfully")
finally:
# Clean up
if cleanup_logical:
cmd = "pvs |grep %s |awk '{print $1}'" % vg_name
pv_name = utils.system_output(cmd)
lv_utils.vg_remove(vg_name)
utils.run("pvremove %s" % pv_name)
if cleanup_iscsi:
utils_test.libvirt.setup_or_cleanup_iscsi(False)
if cleanup_nfs:
utils_test.libvirt.setup_or_cleanup_nfs(
False, restore_selinux=selinux_bak)
|
Tie dye print full zip slub french terry hoodie. Heavy enzyme wash with silicon rinse for softness. 100% cotton, machine washable, imported.
|
#!/usr/bin/env python3
# The MIT License (MIT)
#
# Copyright (c) 2016 Ivor Wanders
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
from .usb_pdml import USBPDML
from .protocol import USBPacket, USBPacketFeed, load_msg
from . import protocol
from . import pmem
import pickle
import json
import gzip
import base64
def load_pdml_usb(path):
# check if we have a cached version available
if (path.endswith(".pickle3")):
with open(path, "rb") as f:
interactions = pickle.load(f)
else:
conversation = USBPDML(path)
conversation.parse_file()
interactions = conversation.interaction()
# write the cached version
with open(path + ".pickle3", "wb") as f:
pickle.dump(interactions, f)
entries = {"incoming": [], "outgoing": []}
start_time = None
index = 0
for msg in interactions:
index += 1
if (start_time is None):
start_time = msg["time"]
t = msg["time"] - start_time
if "data" in msg:
data = bytes(msg["data"])
direction = msg["direction"]
if direction == "<":
entries["incoming"].append((t, data))
else:
entries["outgoing"].append((t, data))
return entries
def load_json_usb(path):
opener = gzip.open if path.endswith(".gz") else open
with opener(path, "rt") as f:
rawentries = json.load(f)
entries = {"incoming": [], "outgoing": []}
for d in entries.keys():
for t, v in rawentries[d]:
entries[d].append((t, base64.b64decode(v)))
return entries
def order_entries_and_combine(entries):
one_list = []
for d in entries.keys():
for z in entries[d]:
one_list.append((z[0], d, z[1]))
return sorted(one_list, key=lambda d: d[0])
def load_usb_transactions(path):
if (path.count(".xml") != 0):
data = load_pdml_usb(path)
return data
if (path.count(".json")):
data = load_json_usb(path)
return data
def reconstruct_filesystem(path, output_file):
data = load_usb_transactions(path)
fs_bytes = bytearray(pmem.FILESYSTEM_SIZE)
touched_fs = bytearray(pmem.FILESYSTEM_SIZE)
feed = USBPacketFeed()
for t, v in data["incoming"]:
usb_packet = USBPacket.read(v)
res = feed.packet(usb_packet)
if (res):
msg = load_msg(res)
if (type(msg) == protocol.DataReply):
pos = msg.position()
length = msg.length()
fs_bytes[pos:pos+length] = bytes(msg.content())
touched_fs[pos:pos+length] = bytearray(
[1 for i in range(length)])
missing = False
for i in range(len(touched_fs)):
v = touched_fs[i]
if (v == 0):
if (missing is False):
print("Missing from 0x{:0>4X}".format(i), end="")
missing = True
else:
if (missing is True):
print(" up to 0x{:0>4X}".format(i))
missing = False
if (missing is True):
print(" up to 0x{:0>4X}".format(i))
with open(output_file, "wb") as f:
f.write(fs_bytes)
def print_interaction(path):
dir_specific = {
"incoming": {
"feed": USBPacketFeed(),
"color": "\033[1;32m{0}\033[00m",
},
"outgoing": {
"feed": USBPacketFeed(),
"color": "\033[1;34m{0}\033[00m",
}
}
data = load_usb_transactions(path)
# lets just start with outgoing always.
combined_entries = order_entries_and_combine(data)
start_time = combined_entries[0][0]
packet_counter = 0
for time, direction, data in combined_entries:
packet_counter += 1
reltime = time - start_time
usb_packet = USBPacket.read(data)
res = dir_specific[direction]["feed"].packet(usb_packet)
if (res):
# print(" ".join(["{:0>2X}".format(a) for a in bytes(res)]))
message = load_msg(res)
if (not message):
print("Something is very wrong, message: {}".format(message))
continue
print(dir_specific[direction]["color"].format(
"#{:0>5d}, t: {:0>6.3f} {:r}".format(packet_counter,
reltime, message)))
# print(" ".join(["{:0>2X}".format(a) for a in bytes(message)]))
|
I don’t remember exactly when or where I first saw them … I just know that ever since, those African print hypnotized me.
There are different types, origins and qualities of African fabric, however all them share the liveliness and exaltation of color – it looks like they reflect the vitality and the provocative personality of the people who wear them.
During my time in Africa I have witnessed the role that the African fabric plays in the lives of many people. In rural areas in Mozambique for example, they use an African print called Capulana – most women wear several layers of it wrapped around their waists. The Capulana is then used as a skirt, a coat, a tablecloth, a bag to carry the kids or store things, or as an umbrella when it rains.
At WEWE we use original African fabric from different countries of West Africa. We design, cut and sew our clothes in Cape Town and the fabric is imported by traders traveling to South Africa from Senegal, Congo, Ghana, Mali, Togo and Cote d´ Ivoire.
Sourcing those fabrics is a fascinating experience in the process of making African clothing. At least twice a week I walk through my beloved city of Cape Town to meet with those traders, trying to find the most unique pieces. We meet at their stores, in their houses or in remote places like parks or the corner of a street where they bring sacs full of them, and every time I just go back home with a very few but special prints I carefully selected.
Because we source our fabrics in that particular way for us is very difficult to find the same print again. Each piece is a special and limited product and it is difficult to reproduce the same garment twice. That makes every WEWE product a UNIQUE and EXCLUSIVE piece. So if you see something you like, you better get it now!!
WEWE is not just an African fashion brand.
It is the purpose of bringing some African bites to our European lifestyles and putting some African joy and color into our lives. It´s a cultural exchange that wants to tell a story of another place. It´s the fusion of people coming from different countries, cultures races and religions united by the same passion to create something different.
WEWE is not just a garment with ethnic style!!
We are an ethical fashion project and behind each WEWE there is a world we want to share with you.
I truly hope that each piece will portray that inspiration, since this is my way of showing my love, admiration and respect for this continent.
|
from __future__ import with_statement
import __future__
import copy
import fnmatch
import functools
import glob as glob_module
import imp
import inspect
import itertools
import json
from pathlib import Path
import optparse
import os
import os.path
import re
import sys
# When build files are executed, the functions in this file tagged with
# @provide_for_build will be provided in the build file's local symbol table.
#
# When these functions are called from a build file, they will be passed
# a keyword parameter, build_env, which is a object with information about
# the environment of the build file which is currently being processed.
# It contains the following attributes:
#
# "dirname" - The directory containing the build file.
#
# "base_path" - The base path of the build file.
BUILD_FUNCTIONS = []
BUILD_RULES_FILE_NAME = 'BUCK'
class BuildContextType(object):
"""
Identifies the type of input file to the processor.
"""
BUILD_FILE = 'build_file'
INCLUDE = 'include'
class BuildFileContext(object):
"""
The build context used when processing a build file.
"""
type = BuildContextType.BUILD_FILE
def __init__(self, base_path, dirname):
self.globals = {}
self.includes = set()
self.base_path = base_path
self.dirname = dirname
self.rules = {}
class IncludeContext(object):
"""
The build context used when processing an include.
"""
type = BuildContextType.INCLUDE
def __init__(self):
self.globals = {}
self.includes = set()
class LazyBuildEnvPartial(object):
"""Pairs a function with a build environment in which it will be executed.
Note that while the function is specified via the constructor, the build
environment must be assigned after construction, for the build environment
currently being used.
To call the function with its build environment, use the invoke() method of
this class, which will forward the arguments from invoke() to the
underlying function.
"""
def __init__(self, func):
self.func = func
self.build_env = None
def invoke(self, *args, **kwargs):
"""Invokes the bound function injecting 'build_env' into **kwargs."""
updated_kwargs = kwargs.copy()
updated_kwargs.update({'build_env': self.build_env})
return self.func(*args, **updated_kwargs)
def provide_for_build(func):
BUILD_FUNCTIONS.append(func)
return func
def add_rule(rule, build_env):
assert build_env.type == BuildContextType.BUILD_FILE, (
"Cannot use `{}()` at the top-level of an included file."
.format(rule['type']))
# Include the base path of the BUILD file so the reader consuming this
# JSON will know which BUILD file the rule came from.
if 'name' not in rule:
raise ValueError(
'rules must contain the field \'name\'. Found %s.' % rule)
rule_name = rule['name']
if rule_name in build_env.rules:
raise ValueError('Duplicate rule definition found. Found %s and %s' %
(rule, build_env.rules[rule_name]))
rule['buck.base_path'] = build_env.base_path
build_env.rules[rule_name] = rule
@provide_for_build
def glob(includes, excludes=[], include_dotfiles=False, build_env=None):
assert build_env.type == BuildContextType.BUILD_FILE, (
"Cannot use `glob()` at the top-level of an included file.")
search_base = Path(build_env.dirname)
return glob_internal(includes, excludes, include_dotfiles, search_base)
def glob_internal(includes, excludes, include_dotfiles, search_base):
# Ensure the user passes lists of strings rather than just a string.
assert not isinstance(includes, basestring), \
"The first argument to glob() must be a list of strings."
assert not isinstance(excludes, basestring), \
"The excludes argument must be a list of strings."
def includes_iterator():
for pattern in includes:
for path in search_base.glob(pattern):
# TODO(user): Handle hidden files on Windows.
if path.is_file() and (include_dotfiles or not path.name.startswith('.')):
yield path.relative_to(search_base)
def is_special(pat):
return "*" in pat or "?" in pat or "[" in pat
non_special_excludes = set()
match_excludes = set()
for pattern in excludes:
if is_special(pattern):
match_excludes.add(pattern)
else:
non_special_excludes.add(pattern)
def exclusion(path):
if str(path) in non_special_excludes:
return True
for pattern in match_excludes:
result = path.match(pattern, match_entire=True)
if result:
return True
return False
return sorted(set([str(p) for p in includes_iterator() if not exclusion(p)]))
@provide_for_build
def get_base_path(build_env=None):
"""Get the base path to the build file that was initially evaluated.
This function is intended to be used from within a build defs file that
likely contains macros that could be called from any build file.
Such macros may need to know the base path of the file in which they
are defining new build rules.
Returns: a string, such as "java/com/facebook". Note there is no
trailing slash. The return value will be "" if called from
the build file in the root of the project.
"""
assert build_env.type == BuildContextType.BUILD_FILE, (
"Cannot use `get_base_path()` at the top-level of an included file.")
return build_env.base_path
@provide_for_build
def add_deps(name, deps=[], build_env=None):
assert build_env.type == BuildContextType.BUILD_FILE, (
"Cannot use `add_deps()` at the top-level of an included file.")
if name not in build_env.rules:
raise ValueError(
'Invoked \'add_deps\' on non-existent rule %s.' % name)
rule = build_env.rules[name]
if 'deps' not in rule:
raise ValueError(
'Invoked \'add_deps\' on rule %s that has no \'deps\' field'
% name)
rule['deps'] = rule['deps'] + deps
class BuildFileProcessor(object):
def __init__(self, project_root, implicit_includes=[]):
self._cache = {}
self._build_env_stack = []
self._project_root = project_root
self._implicit_includes = implicit_includes
lazy_functions = {}
for func in BUILD_FUNCTIONS:
func_with_env = LazyBuildEnvPartial(func)
lazy_functions[func.__name__] = func_with_env
self._functions = lazy_functions
def _merge_globals(self, src, dst):
"""
Copy the global definitions from one globals dict to another.
Ignores special attributes and attributes starting with '_', which
typically denote module-level private attributes.
"""
hidden = set([
'include_defs',
])
for key, val in src.iteritems():
if not key.startswith('_') and key not in hidden:
dst[key] = val
def _update_functions(self, build_env):
"""
Updates the build functions to use the given build context when called.
"""
for function in self._functions.itervalues():
function.build_env = build_env
def _install_functions(self, namespace):
"""
Installs the build functions, by their name, into the given namespace.
"""
for name, function in self._functions.iteritems():
namespace[name] = function.invoke
def _get_include_path(self, name):
"""
Resolve the given include def name to a full path.
"""
# Find the path from the include def name.
if not name.startswith('//'):
raise ValueError(
'include_defs argument "%s" must begin with //' % name)
relative_path = name[2:]
return os.path.join(self._project_root, name[2:])
def _include_defs(self, name, implicit_includes=[]):
"""
Pull the named include into the current caller's context.
This method is meant to be installed into the globals of any files or
includes that we process.
"""
# Grab the current build context from the top of the stack.
build_env = self._build_env_stack[-1]
# Resolve the named include to its path and process it to get its
# build context and module.
path = self._get_include_path(name)
inner_env, mod = self._process_include(
path,
implicit_includes=implicit_includes)
# Look up the caller's stack frame and merge the include's globals
# into it's symbol table.
frame = inspect.currentframe()
while frame.f_globals['__name__'] == __name__:
frame = frame.f_back
self._merge_globals(mod.__dict__, frame.f_globals)
# Pull in the include's accounting of its own referenced includes
# into the current build context.
build_env.includes.add(path)
build_env.includes.update(inner_env.includes)
def _push_build_env(self, build_env):
"""
Set the given build context as the current context.
"""
self._build_env_stack.append(build_env)
self._update_functions(build_env)
def _pop_build_env(self):
"""
Restore the previous build context as the current context.
"""
self._build_env_stack.pop()
if self._build_env_stack:
self._update_functions(self._build_env_stack[-1])
def _process(self, build_env, path, implicit_includes=[]):
"""
Process a build file or include at the given path.
"""
# First check the cache.
cached = self._cache.get(path)
if cached is not None:
return cached
# Install the build context for this input as the current context.
self._push_build_env(build_env)
# The globals dict that this file will be executed under.
default_globals = {}
# Install the implicit build functions and adding the 'include_defs'
# functions.
self._install_functions(default_globals)
default_globals['include_defs'] = functools.partial(
self._include_defs,
implicit_includes=implicit_includes)
# If any implicit includes were specified, process them first.
for include in implicit_includes:
include_path = self._get_include_path(include)
inner_env, mod = self._process_include(include_path)
self._merge_globals(mod.__dict__, default_globals)
build_env.includes.add(include_path)
build_env.includes.update(inner_env.includes)
# Build a new module for the given file, using the default globals
# created above.
module = imp.new_module(path)
module.__file__ = path
module.__dict__.update(default_globals)
with open(path) as f:
contents = f.read()
# Enable absolute imports. This prevents the compiler from trying to
# do a relative import first, and warning that this module doesn't
# exist in sys.modules.
future_features = __future__.absolute_import.compiler_flag
code = compile(contents, path, 'exec', future_features, 1)
exec(code, module.__dict__)
# Restore the previous build context.
self._pop_build_env()
self._cache[path] = build_env, module
return build_env, module
def _process_include(self, path, implicit_includes=[]):
"""
Process the include file at the given path.
"""
build_env = IncludeContext()
return self._process(
build_env,
path,
implicit_includes=implicit_includes)
def _process_build_file(self, path, implicit_includes=[]):
"""
Process the build file at the given path.
"""
# Create the build file context, including the base path and directory
# name of the given path.
relative_path_to_build_file = os.path.relpath(
path, self._project_root).replace('\\', '/')
len_suffix = -len('/' + BUILD_RULES_FILE_NAME)
base_path = relative_path_to_build_file[:len_suffix]
dirname = os.path.dirname(path)
build_env = BuildFileContext(base_path, dirname)
return self._process(
build_env,
path,
implicit_includes=implicit_includes)
def process(self, path):
"""
Process a build file returning a dict of it's rules and includes.
"""
build_env, mod = self._process_build_file(
os.path.join(self._project_root, path),
implicit_includes=self._implicit_includes)
values = build_env.rules.values()
values.append({"__includes": [path] + sorted(build_env.includes)})
return values
# Inexplicably, this script appears to run faster when the arguments passed
# into it are absolute paths. However, we want the "buck.base_path" property
# of each rule to be printed out to be the base path of the build target that
# identifies the rule. That means that when parsing a BUILD file, we must know
# its path relative to the root of the project to produce the base path.
#
# To that end, the first argument to this script must be an absolute path to
# the project root. It must be followed by one or more absolute paths to
# BUILD files under the project root. If no paths to BUILD files are
# specified, then it will traverse the project root for BUILD files, excluding
# directories of generated files produced by Buck.
#
# All of the build rules that are parsed from the BUILD files will be printed
# to stdout by a JSON parser. That means that printing out other information
# for debugging purposes will likely break the JSON parsing, so be careful!
def main():
parser = optparse.OptionParser()
parser.add_option(
'--project_root',
action='store',
type='string',
dest='project_root')
parser.add_option(
'--include',
action='append',
dest='include')
parser.add_option(
'--server',
action='store_true',
dest='server',
help='Invoke as a server to parse individual BUCK files on demand.')
(options, args) = parser.parse_args()
# Even though project_root is absolute path, it may not be concise. For
# example, it might be like "C:\project\.\rule".
project_root = os.path.abspath(options.project_root)
buildFileProcessor = BuildFileProcessor(
project_root,
implicit_includes=options.include or [])
for build_file in args:
values = buildFileProcessor.process(build_file)
if options.server:
print json.dumps(values)
else:
for value in values:
print json.dumps(value)
if options.server:
# "for ... in sys.stdin" in Python 2.x hangs until stdin is closed.
for build_file in iter(sys.stdin.readline, ''):
values = buildFileProcessor.process(build_file.rstrip())
print json.dumps(values)
# Python tries to flush/close stdout when it quits, and if there's a dead
# pipe on the other end, it will spit some warnings to stderr. This breaks
# tests sometimes. Prevent that by explicitly catching the error.
try:
sys.stdout.close()
except IOError:
pass
|
If you need to find people in Coldstream Ayton then you have come to the perfect place. We have all the listings you want for the Coldstream Ayton area code and more.
If you are looking for a phone number in Coldstream Ayton then use our detailed pages to finds the number you need. We have a huge number of Coldstream Ayton listings so you will stand the best chance of finding that all important number that you need. We have all the numbers you might need so use our Coldstream Ayton phone code lookup service to find the number that you have been looking for.
If you need to find a number in Coldstream Ayton, whether it is your bank, a local service or any other number then we have the listings you need. We have all the relevant listings in the Coldstream Ayton area code so you don’t have to work your way through the Yellow Pages or Google. Use our convenient service and you will be able to get the Coldstream Ayton number you need with no hassle or stress.
Received a call from someone in Coldstream Ayton? Don’t know who it is? Use our Coldstream Ayton reverse phone lookup service and you can see who has been calling you. What’s more, we have feedback from others users so you can see what type of call it is if you don’t recognise the number. This gives you the satisfaction that you know who has called you. We all want to know who it is that has called and our service allows you to get that knowledge for free.
|
import os
import time
import json
import datetime
import pdb
import ConfigParser
from pymongo import MongoClient
from ansible.plugins.callback import CallbackBase
from websocket import create_connection,WebSocket
#coding=utf8
def get_conf(cls,key):
cf=ConfigParser.ConfigParser()
cf.read('/opt/tornado/conf/app.ini')
retData=cf.get(cls,key)
return retData
TIME_FORMAT='%Y-%m-%d %H:%M:%S'
statuscode = {'started':0, 'ongoing':1, 'ok':2,'skipped':4, 'unreachable':3, 'failed':3}
mongoinfo = {"host":"127.0.0.1","port":"27017","user":"","password":"","dbname":"ams"}
class CallbackModule(CallbackBase):
def __init__(self):
self.completed_task = 0
self.playbookuuid = None
self.task = None
self.res=None
self.iplist=None
self.errip=None # an error occured on an ip
self.ws=None # websocket
def db(self):
dbhost = mongoinfo['host']
dbport = mongoinfo['port']
dbuser = mongoinfo['user']
dbpwd = mongoinfo['password']
dbname = mongoinfo['dbname']
uri = 'mongodb://%s:%s'%(dbhost,dbport)
client = MongoClient(uri)
db = client.ams
return db
# create ws
def ws_create_server(self):
ip=get_conf('websocket','host')
if self.ws == None:
self.ws = create_connection("ws://"+ip+"/itoa/updatePlaybookStatus")
#update the uuid's status
def ws_send_status(self,uuid,status):
last_status = str(uuid) +','+str(status)
self.ws.send(last_status)
def ws_receive_status(self):
result=self.ws.recv()
# destroy the server
def ws_close_server(self):
if self.ws != None:
self.ws.close()
self.ws = None
def v2_playbook_on_start(self, playbook):
now=time.time()
self.playbook=playbook
if self.playbook._entries[0]._variable_manager.extra_vars['playbook-uuid']:
self.playbookuuid=self.playbook._entries[0]._variable_manager.extra_vars['playbook-uuid']
iplist=self.playbook._entries[0]._variable_manager.extra_vars['ip_list']
self.iplist=iplist
hostnamelist=self.playbook._entries[0]._variable_manager.extra_vars['hostname_list']
newdict=dict(zip(iplist,hostnamelist))
for (key,value) in newdict.items():
self.db().servers.update({'ip':key},{'$set':{'name':value}})
uuids= self.playbookuuid.encode('gbk')
self.db().playbooks.update({"uuid":uuids},{'$set':{"status":statuscode.get('ongoing'),"updated_at":now}})
self.ws_create_server()
#time.sleep(3) # sleep for page lazy load show
self.ws_send_status(uuids,statuscode.get('ongoing'))
self.ws_receive_status()
self.ws_close_server()
def v2_playbook_on_play_start(self, play):
pass
def v2_playbook_on_task_start(self, task, is_conditional):
now = time.time()
self.task=task
allips=self.iplist
if self.errip:
if self.errip in allips:
allips.remove(self.errip)
for okip in allips:
self.db().tasks.update({"name":task.get_name(),"host":okip,"playbook_uuid":self.playbookuuid},{'$set':{"status":statuscode.get('ongoing'),"updated_at":now}},\
upsert=False,multi=False)
def v2_playbook_on_stats(self, stats):
self.stats = stats
self.ws_create_server()
if self.stats.dark or self.stats.failures :
self.playbook_final_status(self.playbookuuid,'failed')
self.ws_send_status(self.playbookuuid,statuscode.get('failed'))
self.ws_receive_status()
else:
self.playbook_final_status(self.playbookuuid,'ok')
self.ws_send_status(self.playbookuuid,statuscode.get('ok'))
self.ws_receive_status()
self.ws_close_server()
def playbook_final_status(self,playbookuuid,status):
now = time.time()
self.db().playbooks.update({'uuid':playbookuuid},{'$set':{"status":statuscode.get(status),"updated_at":now}},upsert=False,multi=False)
def v2_on_any(self, *args, **kwargs):
pass
def v2_runner_on_ok(self, result):
self.res=result
if result.is_changed():
self.UpdateLog(self.res,self.playbookuuid,'ok')
else:
self.UpdateLog(self.res,self.playbookuuid, 'ok')
def v2_runner_on_unreachable(self, result):
self.res = result
self.UpdateLog(self.res,self.playbookuuid, 'unreachable')
def v2_runner_on_failed(self, result, ignore_errors=False):
self.res = result
self.UpdateLog(self.res,self.playbookuuid, 'failed')
def v2_runner_on_skipped(self, result):
self.res=result
self.UpdateLog(self.res,self.playbookuuid, 'skipped')
# method nameMapIP is used to translate hostname to ip
def nameMapIP(self,namelist,iplist):
dict={}
namelen=len(namelist)
iplen = len(iplist)
if namelen == iplen:
i=0
while i < namelen:
dict[namelist[i]]=iplist[i]
i = i+1
return dict
def UpdateLog(self,values , playbook_uuid, status, type=None):
now = time.time()
if status == 'started' or str(self.task) == 'TASK: setup':
pass
else:
hostsdict=dict(zip(self.task.get_variable_manager().extra_vars['hostname_list'],self.task.get_variable_manager().extra_vars['ip_list']))
if self.errip:
for (key,value) in hostsdict.items(): #judge if exists errip
if value == self.errip:
hostsdict.pop(key)
host=None
if values._host.get_name() =='localhost' or values._host.get_name() =='127.0.0.1':
host='127.0.0.1'
else:
host=str(hostsdict[values._host.get_name()])
#print playbook_uuid + host + self.task.get_name() +'xxx'+ str(statuscode.get(status))
self.db().tasks.update({"playbook_uuid":playbook_uuid, "host":host, "name":self.task.get_name()},{'$set':{"status":statuscode.get(status),"updated_at":now}})
if status == 'failed' or status == 'unreachable':
self.errip=host # where ip has error ,save to errip
self.completed_task = self.completed_task + 1
if values._result.has_key('msg'):
self.db().playbooks.update({'uuid':playbook_uuid},{'$set':{'msg':values._result['msg']}})
if values._result.has_key('stderr'):
self.db().playbooks.update({'uuid':playbook_uuid},{'$set':{'stderr':values._result['stderr']}})
if values._result.has_key('stdout'):
self.db().playbooks.update({'uuid':playbook_uuid},{'$set':{'stdout':values._result['stdout']}})
elif status == 'ok' or status == 'skipped':
self.completed_task = self.completed_task + 1
self.db().playbooks.update({"uuid":playbook_uuid},{'$set':{"completed":self.completed_task, "updated_at":now}})
else:
pass
|
This is the history of the brand Kaiser since the establishment of the company and up to the most modern of vehicles produced under this brand.
We have collected the best classic cars in our catalogue. In addition, if you're going to buy one - just check the latest Kaiser listings.
"Kaiser", an American company to manufacture cars.
In 1946, Henry J. Kaiser and Joseph W. Frazer founded his own company. The company's goal was to bring to market consumer-attractive cars that could compete with the dominant company in the market as "Chrysler", "Ford" and "General Motors". Models of "Kaiser" are made to order and were a class cars "luxury." "Kaiser" in great demand due to its technical solutions and original design.
In the early 1950s, company ran into financial difficulties - greatly increased the cost of design work and production costs. For this reason the "Kaiser" and "Willys" united and jointly developed a model roadster Kaiser Darrin, first introduced in 1953. It was a two-seater sports car, distinguished by a kind of "anatomical" body design, made of plastic, and sliding doors. Due to lower sales company launched the model in Argentina, where it was called Carabela. In 1955, due to financial problems, the company was forced to sell the factory to Argentina.
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import uuid
import ddt
import httpretty
from django.core.exceptions import ValidationError
from django.db import IntegrityError
from django.test import override_settings
from django.utils.translation import ugettext_lazy as _
from factory.fuzzy import FuzzyText
from oscar.templatetags.currency_filters import currency
from oscar.test.factories import (
BenefitFactory,
ConditionalOfferFactory,
OrderFactory,
OrderLineFactory,
RangeFactory,
VoucherFactory,
datetime,
get_model
)
from six.moves import range
from ecommerce.core.url_utils import get_ecommerce_url
from ecommerce.coupons.tests.mixins import CouponMixin, DiscoveryMockMixin
from ecommerce.courses.tests.factories import CourseFactory
from ecommerce.entitlements.utils import create_or_update_course_entitlement
from ecommerce.extensions.api import exceptions
from ecommerce.extensions.catalogue.tests.mixins import DiscoveryTestMixin
from ecommerce.extensions.fulfillment.modules import CouponFulfillmentModule
from ecommerce.extensions.fulfillment.status import LINE
from ecommerce.extensions.offer.models import OFFER_PRIORITY_VOUCHER
from ecommerce.extensions.test.factories import create_order, prepare_voucher
from ecommerce.extensions.voucher.utils import (
create_vouchers,
generate_coupon_report,
get_voucher_and_products_from_code,
get_voucher_discount_info,
update_voucher_offer
)
from ecommerce.tests.factories import UserFactory
from ecommerce.tests.mixins import LmsApiMockMixin
from ecommerce.tests.testcases import TestCase
Basket = get_model('basket', 'Basket')
Benefit = get_model('offer', 'Benefit')
Catalog = get_model('catalogue', 'Catalog')
CouponVouchers = get_model('voucher', 'CouponVouchers')
Order = get_model('order', 'Order')
Product = get_model('catalogue', 'Product')
ProductCategory = get_model('catalogue', 'ProductCategory')
ProductClass = get_model('catalogue', 'ProductClass')
StockRecord = get_model('partner', 'StockRecord')
Voucher = get_model('voucher', 'Voucher')
VOUCHER_CODE = "XMASC0DE"
VOUCHER_CODE_LENGTH = 1
@ddt.ddt
@httpretty.activate
class UtilTests(CouponMixin, DiscoveryMockMixin, DiscoveryTestMixin, LmsApiMockMixin, TestCase):
course_id = 'edX/DemoX/Demo_Course'
certificate_type = 'test-certificate-type'
provider = None
def setUp(self):
super(UtilTests, self).setUp()
self.user = self.create_user(full_name="Tešt Ušer", is_staff=True)
self.client.login(username=self.user.username, password=self.password)
self.course = CourseFactory(id='course-v1:test-org+course+run', partner=self.partner)
self.verified_seat = self.course.create_or_update_seat('verified', False, 100)
self.catalog = Catalog.objects.create(partner=self.partner)
self.stock_record = StockRecord.objects.filter(product=self.verified_seat).first()
self.seat_price = self.stock_record.price_excl_tax
self.catalog.stock_records.add(self.stock_record)
self.coupon = self.create_coupon(
title='Tešt product',
catalog=self.catalog,
note='Tešt note',
quantity=1,
max_uses=1,
voucher_type=Voucher.MULTI_USE
)
self.coupon.history.all().update(history_user=self.user)
self.coupon_vouchers = CouponVouchers.objects.filter(coupon=self.coupon)
self.entitlement = create_or_update_course_entitlement(
'verified', 100, self.partner, 'foo-bar', 'Foo Bar Entitlement'
)
self.entitlement_stock_record = StockRecord.objects.filter(product=self.entitlement).first()
self.entitlement_catalog = Catalog.objects.create(partner=self.partner)
self.entitlement_catalog.stock_records.add(self.entitlement_stock_record)
self.entitlement_coupon = self.create_coupon(
title='Tešt Entitlement product',
catalog=self.entitlement_catalog,
note='Tešt Entitlement note',
quantity=1,
max_uses=1,
voucher_type=Voucher.MULTI_USE
)
self.entitlement_coupon_vouchers = CouponVouchers.objects.filter(coupon=self.entitlement_coupon)
self.partner_sku = 'test_sku'
self.data = {
'benefit_type': Benefit.PERCENTAGE,
'benefit_value': 100.00,
'catalog': self.catalog,
'coupon': self.coupon,
'end_datetime': datetime.datetime.now() + datetime.timedelta(days=1),
'enterprise_customer': None,
'enterprise_customer_catalog': None,
'name': "Test voucher",
'quantity': 10,
'start_datetime': datetime.datetime.now() - datetime.timedelta(days=1),
'voucher_type': Voucher.SINGLE_USE
}
def create_benefits(self):
"""
Create all Benefit permutations
- Benefit type: Percentage, Benefit value: 100%
- Benefit type: Percentage, Benefit value: 50%
- Benefit type: Value, Benefit value: seat price
- Benefit type: Value, Benefit value: half the seat price
"""
_range = RangeFactory(products=[self.verified_seat, ])
benefit_percentage_all = BenefitFactory(type=Benefit.PERCENTAGE, range=_range, value=100.00)
benefit_percentage_half = BenefitFactory(type=Benefit.PERCENTAGE, range=_range, value=50.00)
benefit_value_all = BenefitFactory(type=Benefit.FIXED, range=_range, value=self.seat_price)
benefit_value_half = BenefitFactory(type=Benefit.FIXED, range=_range, value=self.seat_price / 2)
return [benefit_percentage_all, benefit_percentage_half, benefit_value_all, benefit_value_half]
def setup_coupons_for_report(self):
""" Create specific coupons to test report generation """
self.data.update({
'benefit_value': 50.00,
'code': VOUCHER_CODE,
'max_uses': 1,
'name': 'Discount',
'quantity': 1,
'voucher_type': Voucher.ONCE_PER_CUSTOMER
})
vouchers = create_vouchers(**self.data)
self.coupon_vouchers.first().vouchers.add(*vouchers)
del self.data['code']
del self.data['max_uses']
self.data.update({
'benefit_type': Benefit.FIXED,
'benefit_value': 100.00,
'voucher_type': Voucher.SINGLE_USE
})
vouchers = create_vouchers(**self.data)
self.coupon_vouchers.first().vouchers.add(*vouchers)
def create_catalog_coupon(
self,
coupon_title='Query coupon',
quantity=1,
catalog_query='*:*',
course_seat_types='verified'
):
self.mock_course_runs_endpoint(self.site_configuration.discovery_api_url)
return self.create_coupon(
title=coupon_title,
quantity=quantity,
catalog_query=catalog_query,
course_seat_types=course_seat_types
)
def create_course_catalog_coupon(self, coupon_title, quantity, course_catalog, course_seat_types):
return self.create_coupon(
title=coupon_title,
quantity=quantity,
course_catalog=course_catalog,
course_seat_types=course_seat_types,
)
def use_voucher(self, order_num, voucher, user, add_entitlement=False, product=None):
"""
Mark voucher as used by provided users
Args:
order_num (string): Order number
voucher (Voucher): voucher to be marked as used
users (list): list of users
"""
order = OrderFactory(number=order_num)
if add_entitlement:
order_line = OrderLineFactory(product=self.entitlement, partner_sku=self.partner_sku)
order.lines.add(order_line)
product = product if product else self.verified_seat
order_line = OrderLineFactory(product=product, partner_sku=self.partner_sku)
order.lines.add(order_line)
voucher.record_usage(order, user)
voucher.offers.first().record_usage(discount={'freq': 1, 'discount': 1})
def validate_report_of_redeemed_vouchers(self, row, username, order_num):
""" Helper method for validating coupon report data for when a coupon was redeemed. """
self.assertEqual(row['Status'], _('Redeemed'))
self.assertEqual(row['Redeemed By Username'], username)
self.assertEqual(row['Order Number'], order_num)
def test_create_vouchers(self):
"""
Test voucher creation
"""
email_domains = 'edx.org,example.com'
self.data.update({
'email_domains': email_domains,
'name': 'Tešt voučher',
'site': self.site
})
vouchers = create_vouchers(**self.data)
self.assertEqual(len(vouchers), 10)
voucher = vouchers[0]
voucher_offer = voucher.offers.first()
coupon_voucher = CouponVouchers.objects.get(coupon=self.coupon)
coupon_voucher.vouchers.add(*vouchers)
self.assertEqual(voucher_offer.benefit.type, Benefit.PERCENTAGE)
self.assertEqual(voucher_offer.benefit.max_affected_items, None)
self.assertEqual(voucher_offer.benefit.value, 100.00)
self.assertEqual(voucher_offer.benefit.range.catalog, self.catalog)
self.assertEqual(voucher_offer.email_domains, email_domains)
self.assertEqual(voucher_offer.priority, OFFER_PRIORITY_VOUCHER)
self.assertEqual(voucher_offer.partner, self.partner)
self.assertEqual(len(coupon_voucher.vouchers.all()), 11)
self.assertEqual(voucher.end_datetime, self.data['end_datetime'])
self.assertEqual(voucher.start_datetime, self.data['start_datetime'])
self.assertEqual(voucher.usage, Voucher.SINGLE_USE)
def test_create_voucher_with_long_name(self):
self.data.update({
'name': (
'This Is A Really Really Really Really Really Really Long '
'Voucher Name That Needs To Be Trimmed To Fit Into The Name Column Of The DB'
)
})
trimmed = (
'This Is A Really Really Really Really Really Really Long '
'Voucher Name That Needs To Be Trimmed To Fit Into The Name Column Of Th'
)
vouchers = create_vouchers(**self.data)
voucher = vouchers[0]
self.assertEqual(voucher.name, trimmed)
@ddt.data(
{'end_datetime': ''},
{'end_datetime': 3},
{'end_datetime': 'nonumbers'},
{'start_datetime': ''},
{'start_datetime': 3},
{'start_datetime': 'nonumbers'},
)
def test_create_vouchers_with_incorrect_datetime_value(self, data):
""" Test calling create vouchers with incorrect start/end datetime value raises exception. """
self.data.update(data)
with self.assertRaises(ValidationError):
create_vouchers(**self.data)
@override_settings(VOUCHER_CODE_LENGTH=VOUCHER_CODE_LENGTH)
def test_regenerate_voucher_code(self):
"""
Test that voucher code will be regenerated if it already exists
"""
self.data.update({
'benefit_value': 90.00,
'quantity': 1
})
for code in 'BCDFGHJKL':
self.data['code'] = code
create_vouchers(**self.data)
del self.data['code']
for __ in range(20):
voucher = create_vouchers(**self.data)
self.assertTrue(Voucher.objects.filter(code__iexact=voucher[0].code).exists())
@override_settings(VOUCHER_CODE_LENGTH=0)
def test_nonpositive_voucher_code_length(self):
"""
Test that setting a voucher code length to a nonpositive integer value
raises a ValueError
"""
with self.assertRaises(ValueError):
create_vouchers(**self.data)
def test_create_discount_coupon(self):
"""
Test discount voucher creation with specified code
"""
self.data.update({
'benefit_value': 25.00,
'code': VOUCHER_CODE,
'quantity': 1
})
discount_vouchers = create_vouchers(**self.data)
self.assertEqual(len(discount_vouchers), 1)
self.assertEqual(discount_vouchers[0].code, VOUCHER_CODE)
with self.assertRaises(IntegrityError):
create_vouchers(**self.data)
def test_create_course_catalog_coupon(self):
"""
Test course catalog coupon voucher creation with specified catalog id.
"""
coupon_title = 'Course catalog coupon'
quantity = 1
course_catalog = 1
course_catalog_coupon = self.create_course_catalog_coupon(
coupon_title=coupon_title,
quantity=quantity,
course_catalog=course_catalog,
course_seat_types='verified',
)
self.assertEqual(course_catalog_coupon.title, coupon_title)
course_catalog_vouchers = course_catalog_coupon.attr.coupon_vouchers.vouchers.all()
self.assertEqual(course_catalog_vouchers.count(), quantity)
course_catalog_voucher_range = course_catalog_vouchers.first().offers.first().benefit.range
self.assertEqual(course_catalog_voucher_range.course_catalog, course_catalog)
def test_create_program_coupon(self):
"""
Test program coupon voucher creation with specified program uuid.
"""
coupon_title = 'Program coupon'
quantity = 1
program_uuid = uuid.uuid4()
program_coupon = self.create_coupon(
title=coupon_title,
quantity=quantity,
program_uuid=program_uuid,
course_seat_types='verified',
)
self.assertEqual(program_coupon.title, coupon_title)
program_vouchers = program_coupon.attr.coupon_vouchers.vouchers.all()
program_voucher_offer = program_vouchers.first().offers.first()
self.assertEqual(program_vouchers.count(), quantity)
self.assertEqual(program_voucher_offer.condition.program_uuid, program_uuid)
def assert_report_first_row(self, row, coupon, voucher):
"""
Verify that the first row fields contain the right data.
Args:
row (list): First row in report
coupon (Product): Coupon for which the report is generated
voucher (Voucher): Voucher associated with the Coupon
"""
offer = voucher.offers.first()
if offer.condition.range.catalog:
discount_data = get_voucher_discount_info(
offer.benefit,
offer.condition.range.catalog.stock_records.first().price_excl_tax
)
coupon_type = _('Discount') if discount_data['is_discounted'] else _('Enrollment')
discount_percentage = _('{percentage} %').format(percentage=discount_data['discount_percentage'])
discount_amount = currency(discount_data['discount_value'])
else:
if offer.benefit.type == Benefit.PERCENTAGE:
coupon_type = _('Discount') if offer.benefit.value < 100 else _('Enrollment')
else:
coupon_type = None
discount_amount = None
discount_percentage = _('{percentage} %').format(
percentage=offer.benefit.value) if offer.benefit.type == Benefit.PERCENTAGE else None
self.assertEqual(row['Coupon Type'], coupon_type)
self.assertEqual(row['Category'], ProductCategory.objects.get(product=coupon).category.name)
self.assertEqual(row['Discount Percentage'], discount_percentage)
self.assertEqual(row['Discount Amount'], discount_amount)
self.assertEqual(row['Client'], coupon.client.name)
self.assertEqual(row['Note'], coupon.attr.note)
self.assertEqual(row['Create Date'], coupon.date_updated.strftime("%b %d, %y"))
self.assertEqual(row['Coupon Start Date'], voucher.start_datetime.strftime("%b %d, %y"))
self.assertEqual(row['Coupon Expiry Date'], voucher.end_datetime.strftime("%b %d, %y"))
def assert_report_row(self, row, voucher):
"""
Verify that the row fields contain the right data.
Args:
row (list): Non first row in report
coupon (Product): Coupon for which the report is generated
voucher (Voucher): Voucher associated with the Coupon
"""
offer = voucher.offers.first()
if voucher.usage == Voucher.SINGLE_USE:
max_uses_count = 1
elif voucher.usage != Voucher.SINGLE_USE and offer.max_global_applications is None:
max_uses_count = 10000
else:
max_uses_count = offer.max_global_applications
self.assertEqual(row['Maximum Coupon Usage'], max_uses_count)
self.assertEqual(row['Code'], voucher.code)
self.assertEqual(
row['URL'],
get_ecommerce_url() + self.REDEMPTION_URL.format(voucher.code)
)
def test_generate_coupon_report_for_entitlement(self):
""" Verify the coupon report is generated properly in case of entitlements. """
self.data['coupon'] = self.entitlement_coupon
self.data['catalog'] = self.entitlement_catalog
self.coupon_vouchers = self.entitlement_coupon_vouchers
self.setup_coupons_for_report()
client = UserFactory()
basket = Basket.get_basket(client, self.site)
basket.add_product(self.entitlement_coupon)
vouchers = self.coupon_vouchers.first().vouchers.all()
self.use_voucher('TESTORDER1', vouchers[1], self.user, add_entitlement=True)
self.mock_course_api_response(course=self.course)
try:
generate_coupon_report(self.coupon_vouchers)
except TypeError:
self.fail("Exception:ErrorType raised unexpectedly!")
def test_generate_coupon_report(self):
""" Verify the coupon report is generated properly. """
self.setup_coupons_for_report()
client = UserFactory()
basket = Basket.get_basket(client, self.site)
basket.add_product(self.coupon)
vouchers = self.coupon_vouchers.first().vouchers.all()
self.use_voucher('TESTORDER1', vouchers[1], self.user)
user2 = UserFactory()
self.use_voucher('TESTORDER2', vouchers[2], self.user)
self.use_voucher('TESTORDER3', vouchers[2], user2)
self.mock_course_api_response(course=self.course)
field_names, rows = generate_coupon_report(self.coupon_vouchers)
self.assertEqual(field_names, [
'Code',
'Coupon Name',
'Maximum Coupon Usage',
'Redemption Count',
'Coupon Type',
'URL',
'Course ID',
'Organization',
'Client',
'Category',
'Note',
'Price',
'Invoiced Amount',
'Discount Percentage',
'Discount Amount',
'Status',
'Order Number',
'Redeemed By Username',
'Create Date',
'Coupon Start Date',
'Coupon Expiry Date',
'Email Domains',
])
voucher = Voucher.objects.get(name=rows[0]['Coupon Name'])
self.assert_report_first_row(rows.pop(0), self.coupon, voucher)
for row in rows:
voucher = Voucher.objects.get(code=row['Code'])
self.assert_report_row(row, voucher)
self.assertNotIn('Catalog Query', field_names)
self.assertNotIn('Course Seat Types', field_names)
self.assertNotIn('Redeemed For Course ID', field_names)
def test_report_for_dynamic_coupon_with_fixed_benefit_type(self):
""" Verify the coupon report contains correct data for coupon with fixed benefit type. """
dynamic_coupon = self.create_coupon(
benefit_type=Benefit.FIXED,
benefit_value=50,
catalog_query='*:*',
course_seat_types='verified',
max_uses=1,
note='Tešt note',
quantity=1,
title='Tešt product',
voucher_type=Voucher.MULTI_USE
)
coupon_voucher = CouponVouchers.objects.get(coupon=dynamic_coupon)
__, rows = generate_coupon_report([coupon_voucher])
voucher = coupon_voucher.vouchers.first()
self.assert_report_first_row(rows[0], dynamic_coupon, voucher)
def test_generate_coupon_report_with_deleted_product(self):
""" Verify the coupon report contains correct data for coupon with fixed benefit type. """
course = CourseFactory(id='course-v1:del-org+course+run', partner=self.partner)
professional_seat = course.create_or_update_seat('professional', False, 100)
query_coupon = self.create_catalog_coupon(catalog_query='course:*')
vouchers = query_coupon.attr.coupon_vouchers.vouchers.all()
first_voucher = vouchers.first()
self.use_voucher('TESTORDER1', first_voucher, self.user, product=professional_seat)
professional_seat.delete()
__, rows = generate_coupon_report([query_coupon.attr.coupon_vouchers])
self.assert_report_first_row(rows[0], query_coupon, first_voucher)
self.assertDictContainsSubset({'Redeemed For Course ID': 'Unknown'}, rows[2])
def test_report_for_inactive_coupons(self):
""" Verify the coupon report show correct status for inactive coupons. """
self.data.update({
'name': self.coupon.title,
'end_datetime': datetime.datetime.now() - datetime.timedelta(days=1)
})
vouchers = create_vouchers(**self.data)
self.coupon_vouchers.first().vouchers.add(*vouchers)
__, rows = generate_coupon_report(self.coupon_vouchers)
# The data that is the same for all vouchers like Coupon Name, Coupon Type, etc.
# are only shown in row[0]
# The data that is unique among vouchers like Code, Url, Status, etc.
# starts from row[1]
self.assertEqual(rows[0]['Coupon Name'], self.coupon.title)
self.assertEqual(rows[2]['Status'], _('Inactive'))
def test_generate_coupon_report_for_query_coupons(self):
""" Verify empty report fields for query coupons. """
catalog_query = 'course:*'
self.mock_course_runs_endpoint(self.site_configuration.discovery_api_url)
query_coupon = self.create_catalog_coupon(catalog_query=catalog_query)
field_names, rows = generate_coupon_report([query_coupon.attr.coupon_vouchers])
empty_fields = (
'Discount Amount',
'Price',
)
for field in empty_fields:
self.assertIsNone(rows[0][field])
self.assertNotIn('Course ID', field_names)
self.assertNotIn('Organization', field_names)
self.assertNotIn('Program UUID', field_names)
self.assertIn('Catalog Query', field_names)
self.assertEqual(rows[0]['Catalog Query'], catalog_query)
self.assertIn('Course Seat Types', field_names)
self.assertEqual(rows[0]['Course Seat Types'], 'verified')
self.assertIn('Redeemed For Course ID', field_names)
self.assertNotIn('Redeemed For Course ID', rows[0])
self.assertIn('Redeemed For Course IDs', field_names)
self.assertNotIn('Redeemed For Course IDs', rows[0])
def test_get_voucher_discount_info(self):
""" Verify that get_voucher_discount_info() returns correct info. """
benefits = self.create_benefits()
for benefit in benefits:
discount_info = get_voucher_discount_info(benefit, self.seat_price)
if (benefit.type == "Percentage" and benefit.value == 100.00) or \
(benefit.type == "Absolute" and benefit.value == self.seat_price):
self.assertEqual(discount_info['discount_percentage'], 100.00)
self.assertEqual(discount_info['discount_value'], 100.00)
self.assertFalse(discount_info['is_discounted'])
else:
self.assertEqual(discount_info['discount_percentage'], 50.00)
self.assertEqual(discount_info['discount_value'], 50.00)
self.assertTrue(discount_info['is_discounted'])
discount_info = get_voucher_discount_info(benefit, 0.0)
self.assertEqual(discount_info['discount_percentage'], 0.00)
self.assertEqual(discount_info['discount_value'], 0.00)
self.assertFalse(discount_info['is_discounted'])
discount_info = get_voucher_discount_info(None, 0.0)
self.assertEqual(discount_info['discount_percentage'], 0.00)
self.assertEqual(discount_info['discount_value'], 0.00)
self.assertFalse(discount_info['is_discounted'])
discount_info = get_voucher_discount_info(None, self.seat_price)
self.assertEqual(discount_info['discount_percentage'], 0.00)
self.assertEqual(discount_info['discount_value'], 0.00)
self.assertFalse(discount_info['is_discounted'])
discount_info = get_voucher_discount_info(benefits[-1], 20.00)
self.assertEqual(discount_info['discount_percentage'], 100.00)
self.assertEqual(discount_info['discount_value'], 20.00)
self.assertFalse(discount_info['is_discounted'])
def test_multiple_usage_coupon(self):
"""Test that multiple-usage coupon is created and the usage number decreased on usage."""
# Verify that the created voucher has two possible applications.
voucher = self.coupon.attr.coupon_vouchers.vouchers.first()
self.assertEqual(voucher.offers.first().get_max_applications(), 1)
# Verify that the voucher now has been applied and usage number decreased.
basket = self.apply_voucher(self.user, self.site, voucher)
order = create_order(basket=basket, user=self.user)
lines = order.lines.all()
order, completed_lines = CouponFulfillmentModule().fulfill_product(order, lines)
self.assertEqual(completed_lines[0].status, LINE.COMPLETE)
self.assertEqual(len(basket.applied_offers()), 1)
self.assertEqual(voucher.offers.first().get_max_applications(), 0)
# Verify that the voucher with now 0 usage number wasn't applied to the basket.
new_basket = self.apply_voucher(self.user, self.site, voucher)
self.assertEqual(len(new_basket.applied_offers()), 0)
def test_single_use_redemption_count(self):
"""Verify redemption count does not increment for other, unused, single-use vouchers."""
coupon = self.create_coupon(
title='Test single use',
catalog=self.catalog,
quantity=2
)
vouchers = coupon.attr.coupon_vouchers.vouchers.all()
self.use_voucher('TEST', vouchers[0], self.user)
__, rows = generate_coupon_report([coupon.attr.coupon_vouchers])
# rows[0] - This row is different from other rows
# rows[1] - first voucher header row
# rows[2] - first voucher row with usage information
# rows[3] - second voucher header row
self.assertEqual(len(rows), 4)
self.assertEqual(rows[1]['Redemption Count'], 1)
self.assertEqual(rows[2]['Redeemed By Username'], self.user.username)
self.assertEqual(rows[3]['Redemption Count'], 0)
def test_generate_coupon_report_for_used_query_coupon(self):
"""Test that used query coupon voucher reports which course was it used for."""
catalog_query = '*:*'
self.mock_course_runs_endpoint(
self.site_configuration.discovery_api_url, query=catalog_query, course_run=self.course
)
self.mock_course_runs_contains_endpoint(
course_run_ids=[self.verified_seat.course_id], query=catalog_query,
discovery_api_url=self.site_configuration.discovery_api_url
)
query_coupon = self.create_catalog_coupon(catalog_query=catalog_query)
voucher = query_coupon.attr.coupon_vouchers.vouchers.first()
voucher.offers.first().condition.range.add_product(self.verified_seat)
self.use_voucher('TESTORDER4', voucher, self.user)
field_names, rows = generate_coupon_report([query_coupon.attr.coupon_vouchers])
self.assertIn('Redeemed For Course ID', field_names)
self.assertIn('Redeemed By Username', field_names)
self.assertEqual(rows[-1]['Redeemed By Username'], self.user.username)
self.assertEqual(rows[-1]['Redeemed For Course ID'], self.course.id)
def test_generate_coupon_report_for_query_coupon_with_multi_line_order(self):
"""
Test that coupon report for a query coupon that was used on multi-line order
contains ids from all courses in that order.
"""
course1 = CourseFactory()
course2 = CourseFactory()
order = OrderFactory(number='TESTORDER')
order.lines.add(
OrderLineFactory(
product=course1.create_or_update_seat('verified', False, 101),
partner_sku=self.partner_sku
)
)
order.lines.add(
OrderLineFactory(
product=course2.create_or_update_seat('verified', False, 110),
partner_sku=self.partner_sku
)
)
query_coupon = self.create_catalog_coupon(catalog_query='*:*')
voucher = query_coupon.attr.coupon_vouchers.vouchers.first()
voucher.record_usage(order, self.user)
field_names, rows = generate_coupon_report([query_coupon.attr.coupon_vouchers])
expected_redemed_course_ids = '{}, {}'.format(course1.id, course2.id)
self.assertEqual(rows[-1]['Redeemed For Course IDs'], expected_redemed_course_ids)
self.assertEqual(rows[-1].get('Redeemed For Course ID'), None)
self.assertIn('Redeemed For Course ID', field_names)
self.assertIn('Redeemed For Course IDs', field_names)
def test_update_voucher_offer(self):
"""Test updating a voucher."""
self.data['email_domains'] = 'example.com'
vouchers = create_vouchers(**self.data)
voucher = vouchers[0]
voucher_offer = voucher.offers.first()
self.assertEqual(voucher_offer.benefit.type, Benefit.PERCENTAGE)
self.assertEqual(voucher_offer.benefit.value, 100.00)
self.assertEqual(voucher_offer.benefit.range.catalog, self.catalog)
new_email_domains = 'example.org'
new_offer = update_voucher_offer(
voucher_offer, 50.00, Benefit.PERCENTAGE,
email_domains=new_email_domains
)
self.assertEqual(new_offer.benefit.type, Benefit.PERCENTAGE)
self.assertEqual(new_offer.benefit.value, 50.00)
self.assertEqual(new_offer.benefit.range.catalog, self.catalog)
self.assertEqual(new_offer.email_domains, new_email_domains)
def test_get_voucher_and_products_from_code(self):
""" Verify that get_voucher_and_products_from_code() returns products and voucher. """
original_voucher, original_product = prepare_voucher(code=VOUCHER_CODE)
voucher, products = get_voucher_and_products_from_code(code=VOUCHER_CODE)
self.assertIsNotNone(voucher)
self.assertEqual(voucher, original_voucher)
self.assertEqual(voucher.code, VOUCHER_CODE)
self.assertEqual(len(products), 1)
self.assertEqual(products[0], original_product)
def test_no_product(self):
""" Verify that an exception is raised if there is no product. """
voucher = VoucherFactory()
offer = ConditionalOfferFactory()
voucher.offers.add(offer)
with self.assertRaises(exceptions.ProductNotFoundError):
get_voucher_and_products_from_code(code=voucher.code)
def test_get_non_existing_voucher(self):
""" Verify that get_voucher_and_products_from_code() raises exception for a non-existing voucher. """
with self.assertRaises(Voucher.DoesNotExist):
get_voucher_and_products_from_code(code=FuzzyText().fuzz())
def test_generate_coupon_report_for_program_coupon(self):
""" Only program coupon applicable fields should be shown. """
program_uuid = uuid.uuid4()
program_coupon = self.create_coupon(
title='Program Coupon Report',
program_uuid=program_uuid,
)
field_names, rows = generate_coupon_report([program_coupon.attr.coupon_vouchers])
for field in ('Discount Amount', 'Price'):
self.assertIsNone(rows[0][field])
removed_fields = ('Catalog Query', 'Course ID', 'Course Seat Types', 'Organization', 'Redeemed For Course ID',)
for field_name in removed_fields:
self.assertNotIn(field_name, field_names)
self.assertIn('Program UUID', field_names)
self.assertEqual(rows[0]['Program UUID'], program_uuid)
|
Hello, Crafty Friends!! It's another gorgeous day in and out of my craft room!! And I want to share some of that gorgeousness with all of you!!
Today's project was made using GORGEOUS papers from DCWV's Royal Gypsy Stack. This entire stack is filled with beautiful patterns with a variety of rich blues. And as an awesome bonus... check out the awesome gold foil!!
This sheet is just an awesome sample of what is included.
It is also what started this whole project! I was flipping through the pages and found a gold foiled feather. It was part of a panel that had a beautiful sentiment. But the sentiment was too large to what had in mind.
I decided to use it anyway. And used my Marvy Le Plume II Markers and a water brush to color and shade my gold feather. I used Pale Green for most of the feather with Lt. Blue down the center. I added more Lt. Blue up top and Violet in the center for the "eye". Then just blended out all the colors with my water brush.
Once I got the colors to the intensity I wanted, I cut the feather out by hand.
I had this awesome die in my collection and had been wanting to make one for a while now.
So, I used a couple coordinating sheets from the Royal Gypsy Stack and layered it onto some Mat Board. Then ran through my Big Shot stacked on my die... and cut it all at the same time!
It not only cut my patterned paper, it separated the patterned paper pieces at the crease lines (which saved me time from cutting them apart), and it made crease lines in the mat board, all in one pass!!
Next, I ran the segmented pieces through my Xyron Creative Station Lite to add adhesive to the backside of the paper.
Then attached the patterned paper to both sides of my book cover and buffed down the sharp edges with a sanding block.
I added a line of clear liquid adhesive to the inside middle of the book. ONLY down the center.
Then added a 4"x 3.25" piece of felt.
I didn't worry about glue showing through... I simply ran crinkle ribbon down the middle, flipped it over and tied it off on the outside. Finishing off with a basic bow.
Once, my glue had dried. I moved onto dressing up my front. I wanted the feather to be the focal point of the cover but it got lost in the patterned paper I chose.
I found the really pretty gold foil doily on the Paper Lace pad and it was perfect... it just needed to be trimmed down a bit. I cut about 2 inches off and attached it to my front cover.
Then added foam adhesive behind the feather and mounted it onto the doily. I tied eyelash yarn around the feather for more texture, then added a metal embellishment to the bottom of the feather.
I also added a tiny rhinestone to the top of the closure.
Remember when I said I wanted to SHARE some GORGEOUSNESS??!!
Well, I am giving away this Hat Pin Book to one lucky person!!
But what's a Hat Pin Book without HAT PINS, right??!!
So... Included in this GIVEAWAY are three handmade hat pins!!!!
Each pin was handmade (by me) with Swarovski Crystals!
This giveaway is in no way sponsored, affiliated, or endorsed by Facebook, Twitter, Instagram, or any of their subsidiaries.
You do not have to "follow" or "like" to be entered in the giveaway but if you did, it would be greatly appreciated!!
Hope you all have a GORGEOUS day!!
This is such a lovely project! Those papers are gorgeous and the gold doily went perfectly with the Royal Gypsy papers. The eyelash yarn gave just the right texture around the feather too. All around an awesome project!
Another great production by the one and only Mitsy. I'm telling ya, I wanna be you when I grow up! LOVE everything about this project. The colors, creativity, the pins, the stack, oh so many things to love!
a fabulous project and so creative! just love it! and gorgeous to top it off.
Oh my goodness. This is so pretty! I love all the details!
OMG! It's absolutely adorable!!! I love all of your project!!! Thank you for your kindness!!!
Mitsy.... All I can say is WOW, WOW, WWWOOOOWWW!!! Everything looks GORGEOUS!!!
This is just beautiful Great job!!! I LOVE it!!
Wow! You are amazing when it come to things like this. Each day I see the things you make and I am continually amazed. Keep them coming.
Wonderful OTP project. I think I need to make this folder.
I'm in awe! I love your project!
Beautiful project! Love the papers and that pretty feather on the cover! The hat pins are gorgeous! I have a few antique pins on a pincushion, and if I am lucky enough to win, I'll place them on the pincushion too!
This is so deliriously GORGEOUS!! I've put the die and gorgeous DCWV papers on my wish list.
I would LOVE to learn how you make your hatpins!! Do find the pin bases somewhere or make them yourself? I know they look lovely stuck in a bow on cards and I want to make some, just need to get started. Thanks for the fabulous inspiration!!
|
from django import forms
from django.contrib import admin
from django.contrib.auth.models import Group
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from first_app.models import *
# Register your models here.
class CellAdmin(admin.ModelAdmin):
list_display = ("x", "y")
class CharacterAdmin(admin.ModelAdmin):
list_display = ("cell", "hp", "name", "defense", "attack")
class MonsterAdmin(admin.ModelAdmin):
list_display = ("cell", "hp", "name", "defense", "attack")
class LootTypeAdmin(admin.ModelAdmin):
list_display = ("item",)
class LootAdmin(admin.ModelAdmin):
list_display = ("item_type", "item_name", "item_stats")
class InventoryCellAdmin(admin.ModelAdmin):
list_display = ("inv_coord", "inv_content",)
class InventoryCharacterAdmin(admin.ModelAdmin):
list_display = ("char", "content")
class SlotCharacterAdmin(admin.ModelAdmin):
list_display = ("slot", "slot_type")
class UserCreationForm(forms.ModelForm):
password1 = forms.CharField(label='Password', widget=forms.PasswordInput)
password2 = forms.CharField(label='Password confirmation', widget=forms.PasswordInput)
class Meta:
model = Player
fields = ('name',)
def clean_password2(self):
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if password1 and password2 and password1 != password2:
raise forms.ValidationError("Passwords don't match")
return password2
def save(self, commit=True):
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data['password1'])
if commit:
user.save()
return user
class UserChangeForm(forms.ModelForm):
password = ReadOnlyPasswordHashField()
class Meta:
model = Player
fields = ('name', 'password', 'is_active', 'is_admin')
def clean_password(self):
return self.initial['password']
class UserAdmin(BaseUserAdmin):
form = UserChangeForm
add_form = UserCreationForm
list_display = ('name', 'char_list', 'is_admin')
list_filter = ('is_admin',)
fieldsets = ((None, {'fields': ('name', 'password')}), ('Permissions', {'fields': ('is_admin',)}),)
add_fieldsets = ((None, {'classes': ('wide',), 'fields': ('name', 'password1', 'password2')}),)
search_fields = ('name',)
ordering = ('name',)
filter_horizontal = ()
admin.site.register(Cell, CellAdmin)
admin.site.register(Character, CharacterAdmin)
admin.site.register(Monster, MonsterAdmin)
admin.site.register(LootType, LootTypeAdmin)
admin.site.register(Loot, LootAdmin)
admin.site.register(InventoryCell, InventoryCellAdmin)
admin.site.register(InventoryCharacter, InventoryCharacterAdmin)
admin.site.register(SlotCharacter, SlotCharacterAdmin)
admin.site.register(Player, UserAdmin)
admin.site.unregister(Group)
|
Not everyone supported the Amendments of 1835. They were put to a vote of all eligible voters in the state -- "all freemen of the age of twenty-one Years, who... have paid public taxes," according to the Constitution of 1776 -- and the results were 26,771 for the amendments and 21,606 against, or 55.3 percent for and 44.7 percent against. Although the amendments were adopted by a fairly close margin, support for them wasn't evenly spread out through the state. In each of the counties below, the vote was overwhelmingly one way or the other.
Find each of the counties listed in the data table on the map at the top of the page. (Click the map to see the full-sized version.) The simplest way to keep track of the counties is to print off a copy of the map, and to use crayons or markers to color them as you find them, using two different colors for counties voting for and counties voting against.
Once you've colored in the counties, the pattern of voting should be obvious. Does this surprise you? Given what you've read, why would people in different parts of the state have supported or opposed the amendments?
1. Data from Henry G. Connor, The Convention of 1835 (Raleigh: Edwards & Broughton, 1908), p. 23.
This map shows North Carolina counties as they were drawn in 1840. A few counties were added between 1835 and 1840, you can use it to find the counties listed in the voting records below.
|
#!/usr/bn/python
# -*- coding: iso-8859-1 -*-
# Copyright (c) 2016, Jan Brohl <janbrohl@t-online.de>
# All rights reserved.
# See LICENSE.txt
# Copyright (c) 2004 Colin Stewart (http://www.owlfish.com/)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# If you make any bug fixes or feature enhancements please let me know!
"""
Unit test cases.
"""
from __future__ import unicode_literals
import unittest
import os
import io
import logging
import logging.config
from simpletal import simpleTAL, simpleTALES
if (os.path.exists("logging.ini")):
logging.config.fileConfig("logging.ini")
else:
logging.basicConfig()
def simpleFunction():
return "Hello World"
def nestedFunction():
return {'nest': simpleFunction}
def pathFunction(thePath):
return thePath
class PathTests(unittest.TestCase):
def setUp(self):
self.context = simpleTALES.Context()
self.context.addGlobal(
'colours', {
'blue': 'The sea is blue',
'red': 'The ball is red',
'green': 'The grass is green'
})
self.context.addGlobal('aList', ['blue', 'green'])
self.context.addGlobal('goodColour', 'goodColourPath')
self.context.addGlobal('goodColourPath', 'Black is good')
self.context.addGlobal('noSuchColour', 'pink')
def _runTest_(self, txt, result, errMsg="Error"):
template = simpleTAL.compileHTMLTemplate(txt)
file = io.StringIO()
template.expand(self.context, file)
realResult = file.getvalue()
self.assertEqual(
realResult, result,
"%s - \npassed in: %s \ngot back %s \nexpected %s\n\nTemplate: %s"
% (errMsg, txt, realResult, result, template))
def testRepeatVariablePath(self):
self._runTest_(
'<html><ul><li tal:repeat="colour aList" tal:content="colours/?colour">List</li></ul></html>',
'<html><ul><li>The sea is blue</li><li>The grass is green</li></ul></html>',
'Path variable during repeat failed.')
def testLocalVariablePath(self):
self._runTest_(
'<html><p tal:define="one string:red">It is red: <b tal:content="colours/?one"></b></p></html>',
'<html><p>It is red: <b>The ball is red</b></p></html>',
'Local variable path failed.')
def testGlobalVariablePath(self):
self._runTest_('<html><p tal:content="?goodColour"></p></html>',
'<html><p>Black is good</p></html>',
'Global variable path failed.')
def testNoSuchVariablePath(self):
self._runTest_('<html><p tal:content="?badColour"></p></html>',
'<html><p></p></html>', 'No such variable failed.')
def testNoSuchVariablePath2(self):
self._runTest_(
'<html><p tal:content="colours/?noSuchColour"></p></html>',
'<html><p></p></html>', 'No such variable2 failed.')
|
Museon den Haag in the Netherlands was our first Bluetooth Low Energy enabled installation. Our Dutch distributor The Lux Company worked very closely with lighting designer Joost de Beij on the project. We have delivered 400 TTX2.50.LV and TTX2.70.LV benefitting from XIM.BLE where their enhanced capability is aready being utlised.
To read an excellent article on the use of BLE technology in galleries, go to page 9 of this month's SLL Newsletter here.
|
# View more python learning tutorial on my Youtube and Youku channel!!!
# Youtube video tutorial: https://www.youtube.com/channel/UCdyjiB5H8Pu7aDTNVXTTpcg
# Youku video tutorial: http://i.youku.com/pythontutorial
"""
Please note, this code is only for python 3+. If you are using python 2+, please modify the code accordingly.
"""
from __future__ import print_function
import tensorflow as tf
import numpy as np
def add_layer(inputs, in_size, out_size, n_layer, activation_function=None):
# add one more layer and return the output of this layer
layer_name = 'layer%s' % n_layer
with tf.name_scope(layer_name):
with tf.name_scope('weights'):
Weights = tf.Variable(tf.random_normal([in_size, out_size]), name='W')
tf.histogram_summary(layer_name + '/weights', Weights)
with tf.name_scope('biases'):
biases = tf.Variable(tf.zeros([1, out_size]) + 0.1, name='b')
tf.histogram_summary(layer_name + '/biases', biases)
with tf.name_scope('Wx_plus_b'):
Wx_plus_b = tf.add(tf.matmul(inputs, Weights), biases)
if activation_function is None:
outputs = Wx_plus_b
else:
outputs = activation_function(Wx_plus_b, )
tf.histogram_summary(layer_name + '/outputs', outputs)
return outputs
# Make up some real data
x_data = np.linspace(-1, 1, 300)[:, np.newaxis]
noise = np.random.normal(0, 0.05, x_data.shape)
y_data = np.square(x_data) - 0.5 + noise
# define placeholder for inputs to network
with tf.name_scope('inputs'):
xs = tf.placeholder(tf.float32, [None, 1], name='x_input')
ys = tf.placeholder(tf.float32, [None, 1], name='y_input')
# add hidden layer
l1 = add_layer(xs, 1, 10, n_layer=1, activation_function=tf.nn.relu)
# add output layer
prediction = add_layer(l1, 10, 1, n_layer=2, activation_function=None)
# the error between prediciton and real data
with tf.name_scope('loss'):
loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction),
reduction_indices=[1]))
tf.scalar_summary('loss', loss)
with tf.name_scope('train'):
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
sess = tf.Session()
merged = tf.merge_all_summaries()
writer = tf.train.SummaryWriter("logs/", sess.graph)
# tf.initialize_all_variables() no long valid from
# 2017-03-02 if using tensorflow >= 0.12
sess.run(tf.global_variables_initializer())
for i in range(1000):
sess.run(train_step, feed_dict={xs: x_data, ys: y_data})
if i % 50 == 0:
result = sess.run(merged,
feed_dict={xs: x_data, ys: y_data})
writer.add_summary(result, i)
# direct to the local dir and run this in terminal:
# $ tensorboard --logdir=logs
|
A stand-alone service featuring a 15 minute DetoXsoak™ of feet in warm water, followed by a DetoXsoak™ Somatology technique massage of feet and legs to balance, de-stress, and refresh. This service can be added to a pedicure service.
This DetoXsoak™ kit includes 8 DetoXsoak™ packets, one 4 oz DetoXsoak™ Somatology Massage Serum, and technique cards.
DetoXsoak™ Somatology Massage Serum contains the natural anti-oxidant properties of vitamin E, pomegranate seed oil, vitamin C, and the hydrating benefits of Argan Oil.
|
#!/usr/bin/env python3
# Copyright (c) 2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Test addr relay
"""
from test_framework.messages import (
CAddress,
NODE_NETWORK,
NODE_WITNESS,
msg_addr,
msg_getaddr
)
from test_framework.p2p import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
)
import time
class AddrReceiver(P2PInterface):
num_ipv4_received = 0
test_addr_contents = False
def __init__(self, test_addr_contents=False):
super().__init__()
self.test_addr_contents = test_addr_contents
def on_addr(self, message):
for addr in message.addrs:
self.num_ipv4_received += 1
if(self.test_addr_contents):
# relay_tests checks the content of the addr messages match
# expectations based on the message creation in setup_addr_msg
assert_equal(addr.nServices, 9)
if not 8333 <= addr.port < 8343:
raise AssertionError("Invalid addr.port of {} (8333-8342 expected)".format(addr.port))
assert addr.ip.startswith('123.123.123.')
def addr_received(self):
return self.num_ipv4_received != 0
def getaddr_received(self):
return self.message_count['getaddr'] > 0
class AddrTest(BitcoinTestFramework):
counter = 0
mocktime = int(time.time())
def set_test_params(self):
self.num_nodes = 1
def run_test(self):
self.oversized_addr_test()
self.relay_tests()
self.getaddr_tests()
self.blocksonly_mode_tests()
def setup_addr_msg(self, num):
addrs = []
for i in range(num):
addr = CAddress()
addr.time = self.mocktime + i
addr.nServices = NODE_NETWORK | NODE_WITNESS
addr.ip = f"123.123.123.{self.counter % 256}"
addr.port = 8333 + i
addrs.append(addr)
self.counter += 1
msg = msg_addr()
msg.addrs = addrs
return msg
def send_addr_msg(self, source, msg, receivers):
source.send_and_ping(msg)
# pop m_next_addr_send timer
self.mocktime += 10 * 60
self.nodes[0].setmocktime(self.mocktime)
for peer in receivers:
peer.sync_send_with_ping()
def oversized_addr_test(self):
self.log.info('Send an addr message that is too large')
addr_source = self.nodes[0].add_p2p_connection(P2PInterface())
msg = self.setup_addr_msg(1010)
with self.nodes[0].assert_debug_log(['addr message size = 1010']):
addr_source.send_and_ping(msg)
self.nodes[0].disconnect_p2ps()
def relay_tests(self):
self.log.info('Test address relay')
self.log.info('Check that addr message content is relayed and added to addrman')
addr_source = self.nodes[0].add_p2p_connection(P2PInterface())
num_receivers = 7
receivers = []
for _ in range(num_receivers):
receivers.append(self.nodes[0].add_p2p_connection(AddrReceiver(test_addr_contents=True)))
# Keep this with length <= 10. Addresses from larger messages are not
# relayed.
num_ipv4_addrs = 10
msg = self.setup_addr_msg(num_ipv4_addrs)
with self.nodes[0].assert_debug_log(
[
'Added {} addresses from 127.0.0.1: 0 tried'.format(num_ipv4_addrs),
'received: addr (301 bytes) peer=1',
]
):
self.send_addr_msg(addr_source, msg, receivers)
total_ipv4_received = sum(r.num_ipv4_received for r in receivers)
# Every IPv4 address must be relayed to two peers, other than the
# originating node (addr_source).
ipv4_branching_factor = 2
assert_equal(total_ipv4_received, num_ipv4_addrs * ipv4_branching_factor)
self.nodes[0].disconnect_p2ps()
self.log.info('Check relay of addresses received from outbound peers')
inbound_peer = self.nodes[0].add_p2p_connection(AddrReceiver(test_addr_contents=True))
full_outbound_peer = self.nodes[0].add_outbound_p2p_connection(AddrReceiver(), p2p_idx=0, connection_type="outbound-full-relay")
msg = self.setup_addr_msg(2)
self.send_addr_msg(full_outbound_peer, msg, [inbound_peer])
self.log.info('Check that the first addr message received from an outbound peer is not relayed')
# Currently, there is a flag that prevents the first addr message received
# from a new outbound peer to be relayed to others. Originally meant to prevent
# large GETADDR responses from being relayed, it now typically affects the self-announcement
# of the outbound peer which is often sent before the GETADDR response.
assert_equal(inbound_peer.num_ipv4_received, 0)
self.log.info('Check that subsequent addr messages sent from an outbound peer are relayed')
msg2 = self.setup_addr_msg(2)
self.send_addr_msg(full_outbound_peer, msg2, [inbound_peer])
assert_equal(inbound_peer.num_ipv4_received, 2)
self.log.info('Check address relay to outbound peers')
block_relay_peer = self.nodes[0].add_outbound_p2p_connection(AddrReceiver(), p2p_idx=1, connection_type="block-relay-only")
msg3 = self.setup_addr_msg(2)
self.send_addr_msg(inbound_peer, msg3, [full_outbound_peer, block_relay_peer])
self.log.info('Check that addresses are relayed to full outbound peers')
assert_equal(full_outbound_peer.num_ipv4_received, 2)
self.log.info('Check that addresses are not relayed to block-relay-only outbound peers')
assert_equal(block_relay_peer.num_ipv4_received, 0)
self.nodes[0].disconnect_p2ps()
def getaddr_tests(self):
self.log.info('Test getaddr behavior')
self.log.info('Check that we send a getaddr message upon connecting to an outbound-full-relay peer')
full_outbound_peer = self.nodes[0].add_outbound_p2p_connection(AddrReceiver(), p2p_idx=0, connection_type="outbound-full-relay")
full_outbound_peer.sync_with_ping()
assert full_outbound_peer.getaddr_received()
self.log.info('Check that we do not send a getaddr message upon connecting to a block-relay-only peer')
block_relay_peer = self.nodes[0].add_outbound_p2p_connection(AddrReceiver(), p2p_idx=1, connection_type="block-relay-only")
block_relay_peer.sync_with_ping()
assert_equal(block_relay_peer.getaddr_received(), False)
self.log.info('Check that we answer getaddr messages only from inbound peers')
inbound_peer = self.nodes[0].add_p2p_connection(AddrReceiver())
inbound_peer.sync_with_ping()
# Add some addresses to addrman
for i in range(1000):
first_octet = i >> 8
second_octet = i % 256
a = f"{first_octet}.{second_octet}.1.1"
self.nodes[0].addpeeraddress(a, 8333)
full_outbound_peer.send_and_ping(msg_getaddr())
block_relay_peer.send_and_ping(msg_getaddr())
inbound_peer.send_and_ping(msg_getaddr())
self.mocktime += 5 * 60
self.nodes[0].setmocktime(self.mocktime)
inbound_peer.wait_until(lambda: inbound_peer.addr_received() is True)
assert_equal(full_outbound_peer.num_ipv4_received, 0)
assert_equal(block_relay_peer.num_ipv4_received, 0)
assert inbound_peer.num_ipv4_received > 100
self.nodes[0].disconnect_p2ps()
def blocksonly_mode_tests(self):
self.log.info('Test addr relay in -blocksonly mode')
self.restart_node(0, ["-blocksonly"])
self.mocktime = int(time.time())
self.log.info('Check that we send getaddr messages')
full_outbound_peer = self.nodes[0].add_outbound_p2p_connection(AddrReceiver(), p2p_idx=0, connection_type="outbound-full-relay")
full_outbound_peer.sync_with_ping()
assert full_outbound_peer.getaddr_received()
self.log.info('Check that we relay address messages')
addr_source = self.nodes[0].add_p2p_connection(P2PInterface())
msg = self.setup_addr_msg(2)
self.send_addr_msg(addr_source, msg, [full_outbound_peer])
assert_equal(full_outbound_peer.num_ipv4_received, 2)
self.nodes[0].disconnect_p2ps()
if __name__ == '__main__':
AddrTest().main()
|
Good residue management in the fall will help enhance drill performance, allowing for increased seed survival and an overall better field uniformity in the spring. Changing the angle of vanes on the back of the chopper will help with the width and uniformity of residue spread.
While swathing or straight combining, keep an eye out for weed escapes. Unless they are an obvious sprayer miss or are weeds not well controlled under the herbicides used on the field, they could be herbicide resistant weed patches.
Some growers are reporting high numbers of adult flea beetles. Adults emerge from pupae in late July and August and overwinter to feed on young canola seedlings the following spring. These same adults do feed now, but Julie Soroka and Larry Grenkow (Can. J. Plant Sci. 2012: 97-107) found that flea beetle feeding on canola in late-summer is rarely an economic concern.
Usually the best response to a light frost is to leave the crop standing to mature fully and continue to clear green.
On behalf of Curtis Rempel, Canola Council of Canada vice president crop production and innovation, please join us for the 2015 Canola Discovery Forum October 27-29 in Canmore Alberta.
The optimal swath timing for canola yield and quality is when 60% of seeds on the main stem are showing some colour change. Seed colour change (SCC) is considered any amount of yellow or brown on the seed. This increases crop yield because side branches have longer to fill and average seed size for the whole plant is larger.
Pre-harvest is a good time to scout for disease severity. The photo above shows severe sclerotinia infection. This article includes sclerotinia and various other diseases to look for.
|
# -*- coding: utf-8 -*-
###############################################################################
#
# (DC)² - DataCenter Deployment Control
# Copyright (C) 2010, 2011, 2012, 2013, 2014 Stephan Adig <sh@sourcecode.de>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
###############################################################################
#
# Std. Python Libs
#
import sys
import types
import xmlrpclib
try:
from dc2.lib.db.mongo import Table
from dc2.appserver.helpers import check_record
from dc2.appserver.rpc import rpcmethod
except ImportError:
print "You don't have DC² correctly installed"
sys.exit(1)
try:
from settings import MONGOS
except ImportError:
print "You don't have a settings file"
sys.exit(1)
tbl_server = Table(MONGOS["dc2db"]["database"].get_table("mac_addresses"))
MACADDR_RECORD = {
"server_id": True,
"mac_addr": True,
"device_name": False
}
@rpcmethod(
name="dc2.inventory.servers.macaddr.list",
returns={"list mac_addr_rec": "List of MAC Address Records for a server"},
params={}, is_xmlrpc=True, is_jsonrpc=True)
def dc2_servers_macaddr_list(search):
if search is not None and type(search) is types.DictType:
result = tbl_server.find(search)
else:
result = tbl_server.find()
return result
@rpcmethod(
name="dc2.inventory.servers.macaddr.add",
returns={"string doc_id": "Document ID of new added record"},
params={"dict rec_macaddr": "Record Dictionary"},
is_xmlrpc=True, is_jsonrpc=True)
def dc2_servers_macaddr_add(rec_mac=None):
if rec_mac is not None and type(rec_mac) is types.DictType:
if (check_record(rec_mac, MACADDR_RECORD) and
tbl_server.find_one(
{"mac_addr": rec_mac["mac_addr"]}) is None):
doc_id = tbl_server.save(rec_mac)
return doc_id
return xmlrpclib.Fault(-32501, "Record couldn't be added")
@rpcmethod(
name="dc2.inventory.servers.macaddr.update",
returns={"string doc_id": "Document ID of new added record"},
params={"dict rec_macaddr": "Record Dictionary"},
is_xmlrpc=True, is_jsonrpc=True)
def dc2_servers_macaddr_update(rec_mac=None):
if rec_mac is not None and type(rec_mac) is types.DictType:
if (check_record(rec_mac, MACADDR_RECORD) and
tbl_server.find_one({
"_id": rec_mac["_id"],
"server_id": rec_mac["server_id"]}) is not None):
doc_id = tbl_server.save(rec_mac)
return doc_id
return xmlrpclib.Fault(-32504, "Record couldn't be updated")
@rpcmethod(
name="dc2.inventory.servers.macaddr.delete",
returns={"bool success": "True if action was successful"},
params={"dict rec_macaddr": "Prefilled record dictionary with key _id,"
" or server_id to delete all mac_addr records attached "
"to a server"},
is_xmlrpc=True, is_jsonrpc=True)
def dc2_servers_macaddr_delete(rec_mac=None):
if rec_mac is not None and type(rec_mac) is types.DictType:
if '_id' in rec_mac or 'server_id' in rec_mac:
response = tbl_server.remove(rec_mac)
if response is False:
return xmlrpclib.Fault(-32503, "Record(s) couldn't be deleted")
return True
return xmlrpclib.Fault(-32503, "Record(s) couldn't be deleted")
@rpcmethod(
name="dc2.inventory.servers.macaddr.find",
returns={"bool success": "True if action was successful"},
params={"dict rec_server": "Pre-Filled record dictionary with key _id"},
is_xmlrpc=True, is_jsonrpc=True)
def dc2_servers_macaddr_find(rec_mac=None):
if rec_mac is not None and type(rec_mac) is types.DictType:
response = tbl_server.find(rec_mac)
return response
return xmlrpclib.Fault(-32502, "Record wasn't found!")
|
Tip 40 – Yes! We’re all Customer Focused!
It’s a trait that everyone claims to have but what does it actually take to be Customer Focused? And what benefit is there in adopting such an approach when pursuing an opportunity or client?
Industry leading organisations adopting a Customer Focused approach win more business, at higher margins and are already positioning themselves ahead of the next opportunity.
|
"""
This module descibes how to load a custom dataset when folds (for
cross-validation) are predefined by train and test files.
As a custom dataset we will actually use the movielens-100k dataset, but act as
if it were not built-in.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import os
from surprise import BaselineOnly
from surprise import Dataset
from surprise import evaluate
from surprise import Reader
# path to dataset folder
files_dir = os.path.expanduser('~/.surprise_data/ml-100k/ml-100k/')
# This time, we'll use the built-in reader.
reader = Reader('ml-100k')
# folds_files is a list of tuples containing file paths:
# [(u1.base, u1.test), (u2.base, u2.test), ... (u5.base, u5.test)]
train_file = files_dir + 'u%d.base'
test_file = files_dir + 'u%d.test'
folds_files = [(train_file % i, test_file % i) for i in (1, 2, 3, 4, 5)]
data = Dataset.load_from_folds(folds_files, reader=reader)
# We'll use an algorithm that predicts baseline estimates.
algo = BaselineOnly()
# Evaluate performances of our algorithm on the dataset.
evaluate(algo, data)
|
You've probably seen the photos all over your Instagram and Facebook, people love Google's Art Selfie Feature! But Texans can't use it. Find out why.
The Arts & Culture App by Google has been all over social media- people have been uploading photos of themselves to try and see what famous artwork they resemble. Ordinary people and celebrities alike are loving the app and seeing which famous paintings they look like.
The problem? People in Texas are having trouble finding the app! The Chicago Tribune reported that only two states, Texas and Illinois, don't allow the app because of the states' biometric privacy laws. The Texas version of this law limits companies who obtain "biometric identifiers," things like retina scan, iris scan, fingerprints, voiceprints, hand or face geometry, etc. for commercial use. The law was passed back in 2009 and violaters could be penalized up to $25,000. So because of this law, sadly Texans will never know if we resemble more the Mona Lisa or the Girl With The Pearl Earring. But I'll just assume I'm a deadringer for Gustav Klimt's Portrait of Adele Bloch-Bauer I.
|
# coding=utf-8
from __future__ import absolute_import
from django.test import TestCase
from mjml import settings as mjml_settings
from testprj.tools import safe_change_mjml_settings, MJMLServers, MJMLFixtures, render_tpl
class TestMJMLTCPServer(MJMLFixtures, MJMLServers, TestCase):
SERVER_TYPE = 'tcpserver'
_settings_manager = None
@classmethod
def setUpClass(cls):
cls._settings_manager = safe_change_mjml_settings()
cls._settings_manager.__enter__()
mjml_settings.MJML_BACKEND_MODE = cls.SERVER_TYPE
super(TestMJMLTCPServer, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestMJMLTCPServer, cls).tearDownClass()
cls._settings_manager.__exit__(None, None, None)
def test_simple(self):
html = render_tpl(self.TPLS['simple'])
self.assertIn('<html ', html)
self.assertIn('<body', html)
self.assertIn('20px ', html)
self.assertIn('Test title', html)
self.assertIn('Test button', html)
with self.assertRaises(RuntimeError):
render_tpl("""
{% mjml %}
123
{% endmjml %}
""")
def test_large_tpl(self):
html = render_tpl(self.TPLS['with_text_context'], {
'text': '[START]' + ('1 2 3 4 5 6 7 8 9 0 ' * 410 * 1024) + '[END]',
})
self.assertIn('<html ', html)
self.assertIn('<body', html)
self.assertIn('[START]', html)
self.assertIn('[END]', html)
def test_unicode(self):
html = render_tpl(self.TPLS['with_text_context_and_unicode'], {'text': self.TEXTS['unicode']})
self.assertIn('<html ', html)
self.assertIn('<body', html)
self.assertIn(u'Український текст', html)
self.assertIn(self.TEXTS['unicode'], html)
self.assertIn(u'©', html)
|
We recently did a video showing the very best value for an iPhone. Spoiler alert: it’s the iPhone 8 Plus, which you can get on Swappa starting at just $445.
On Pocketnow Daily, Huawei has just trademarked some names for its upcoming foldable smartphone. Some Galaxy Note 9 phones are having trouble with the camera crashing but Samsung should be fixing it soon. New screen protectors for the Galaxy S10 hint a nearly bezel-less display. Instagram will be doing a refresh to the app’s UI in the following days to clean it up. We end today’s show with Black Friday deals from Google to Metro by T-Mobile.
|
#! /usr/bin/python3
# pykarta/server/app.py
# Server for use by PyKarta applications.
# Provides geocoding and vector map tiles.
# Last modified: 17 October 2019
import re, os
try:
import pykarta
except ImportError:
# During testing we may run this script from its own directory
import sys
sys.path.insert(1, "../..")
# Import data data provider modules
from pykarta.server.modules.not_found import app as app_not_found
from pykarta.server.modules.geocoder_parcel import app as app_geocoder_parcel
from pykarta.server.modules.geocoder_openaddresses import app as app_geocoder_openaddresses
from pykarta.server.modules.tiles_parcels import app as app_tiles_parcels
from pykarta.server.modules.tiles_osm_vec import app as app_tiles_osm_vec
# Map paths to data provider modules
routes = {
'geocoders/parcel': app_geocoder_parcel,
'geocoders/openaddresses': app_geocoder_openaddresses,
'tiles/parcels': app_tiles_parcels,
'tiles': app_tiles_osm_vec,
None: app_not_found,
}
# The WSGI app
def app(environ, start_response):
stderr = environ['wsgi.errors']
# In production the server administrator will have set DATADIR.
if not 'DATADIR' in environ:
# During testing we use this.
environ['DATADIR'] = os.environ['HOME'] + "/geo_data/processed"
# /tiles/<tileset>/
# /geocoders/<geocoder>/
m = re.match(r'^/([^/]+)/([^/]+)(.*)$', environ['PATH_INFO'])
if not m:
stderr.write("Parse failed: %s\n" % environ['PATH_INFO'])
app = routes[None]
else:
# Level 2 mounts such as /tiles/parcels/
app = routes.get("%s/%s" % (m.group(1), m.group(2)))
if app is not None:
environ['SCRIPT_NAME'] += ("/%s/%s" % (m.group(1), m.group(2)))
environ['PATH_INFO'] = m.group(3)
else:
# Level 1 mounts such as /tiles/
app = routes.get(m.group(1))
if app is not None:
environ['SCRIPT_NAME'] += ("/%s" % m.group(1))
environ['PATH_INFO'] = ("/%s%s" % (m.group(2), m.group(3)))
else:
app = routes[None]
return app(environ, start_response)
# Standalone server for testing
# Start it up and run:
# PYKARTA_SERVER_URL=http://localhost:5000 gpx-trip-planner
if __name__ == "__main__":
import sys
from werkzeug.serving import run_simple
class EnvInsert(object):
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
environ['DATADIR'] = os.environ['DATADIR']
return self.app(environ, start_response)
app = EnvInsert(app)
run_simple('0.0.0.0', 5000, app, threaded=False)
|
What is another word for secretly?
stealthily, privately, quietly, mysterious, illegally.
I replied, laughing secretly at the trick I was playing her.
Denry secretly thought him a miracle, but up at the club at Porthill he was content to call him " the human machine."
sea cradle, surtitle, sacerdotal, shirttail, sacredly, sword lily, scartella, sordidly, squaretail, square deal, swordtail, shrewdly, scissortail, scrotal, serratula, shortly.
|
"""
"""
# Created on 2014.08.05
#
# Author: Giovanni Cannata
#
# Copyright 2014, 2015, 2016, 2017 Giovanni Cannata
#
# This file is part of ldap3.
#
# ldap3 is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ldap3 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with ldap3 in the COPYING and COPYING.LESSER files.
# If not, see <http://www.gnu.org/licenses/>.
from pyasn1.type.univ import Integer
from ...core.exceptions import LDAPExtensionError
from ..operation import ExtendedOperation
from ...protocol.rfc4511 import LDAPDN
from ...utils.asn1 import decoder
from ...utils.dn import safe_dn
class PartitionEntryCount(ExtendedOperation):
def config(self):
self.request_name = '2.16.840.1.113719.1.27.100.13'
self.response_name = '2.16.840.1.113719.1.27.100.14'
self.request_value = LDAPDN()
self.response_attribute = 'entry_count'
def __init__(self, connection, partition_dn, controls=None):
ExtendedOperation.__init__(self, connection, controls) # calls super __init__()
if connection.check_names:
partition_dn = safe_dn(partition_dn)
self.request_value = LDAPDN(partition_dn)
def populate_result(self):
substrate = self.decoded_response
try:
decoded, substrate = decoder.decode(substrate, asn1Spec=Integer())
self.result['entry_count'] = int(decoded)
except Exception:
raise LDAPExtensionError('unable to decode substrate')
if substrate:
raise LDAPExtensionError('unknown substrate remaining')
|
So, I preheated the oven and got mixing.
Here’s the goods! It needed 2 eggs, milk (I used skim), butter, and the mix.
See the bumps?! Don't blame my mixing skills....it's actual dried apple pieces!
And…. fresh outta the oven!
So….looks pretty. How’s it taste?
It was actually pretty good! It has a very deep flavor, and I could really taste the spices. All spice and cinnamon were the most predominant flavors, and there was almost a bit of a caramel flavor as well. The bread really reminded me of pumpkin too, for the dark color and the spiced flavor. The bread was sweet, but not over the top, so I think it’s acceptable to put the powdered sugar on for some added sweetness after I was suggested to do so.
It was soft and fluffy, with crispier edges, with dried apple pieces for added texture. I didn’t think it was super apple-y in flavor, but it still was an enjoyable bread. I think I’ll eat it again with a smear of peanut butter or butter after warming it up in the toaster oven.
It also lacked fiber and was a bit high in sugar, so this bread gets a total rating of 4 stars. Trader Joe’s really never disappoints me with their products, and I hope they never do!!
What about you? Do you make mostly homemade or store-bough/semi-prepared products? What are your favorites?!
And the nutritional facts. It was kind of high in sugar.
I love Trader Joe’s. It seems like everytime I go in there, there is a new and exciting product to try. I trust Trader Joe branded products, as for the most part, they have always been really delicious and rather nutritious as well. Today’s choice? Trader Joe’s Creamy Polenta with Spinach and Carrots. I wanted something carby to go along with my chicken(less) nuggets for dinner and veggies, and felt that this fit the bill.
So, I opened the bag and was a little surprised at what I got. Giant Hershey Kisses of polenta. Oh you’ll see.
The polenta itself was creamy yet slightly gritty from the cornmeal. It was buttery tasting and slightly cheesy tasting as well. The carrot and spinach aspect of the dish sort of disappeared. I couldn’t really taste much spinach, and couldn’t really taste the carrots at all.
What the polenta really needed was a pinch of salt. I added a few grinds of sea salt and out came MUCH tastier flavors. Of course, the Dietician-to-be in me yelled at me, saying I didn’t NEED to add salt to this dish that already contained a good deal of sodium per serving. Not to mention I was also having chicken(less) nuggets AND ketchup which had salt in them too. But oh well. I did it. And well, I certainly dont regret it.
Do you like polenta? How do you prepare it?
According to the package, these were “wheat free” and “dairy free”, as well as being made with “organic white corn tortillas”, which I liked.
I cooked em up in the microwave, and had some steamed broccoli on the side. In the dish itself, there was crispy edges on the side and plenty of sauce surrounding the two enchiladas. Here’s how it looked when transferred to my plate.
And, here’s how it looked when I cut the tortilla to see the inside filling. There was mostly corn, tofu and black beans, with a few peppers I believe as well.
As you can see, PLENTY of corn. There wasn’t a TON of filling, but I’d say a rather good amount. The corn was very fresh tasting, and the beans were soft. The tofu was my favorite part, even though it was so tiny! Literally itsy-bitsy squares of soft tofu. I think I also saw a few peppers or some other small green vegetable, but those were rather sporadic.
Lastly, the sauce. It was very flavorful, and left a hint of spice in my mouth even when I was done eating. I liked that there was a good amount of the sauce as well.
So, my question is….What’s your favorite Mexican entree? Any particular brand you like?
how PRETTY is this cover?!
I’m not a big fan of cream cheese. In fact, growing up I was the type of kid that when I got a bagel out at supermarket or whatever already prepared, I would smear off a good 3/4 of the cream cheese. HOWEVER, when I heard about this product I just HAD to go try it and buy it! (or rather, buy it, then try it!) You know me, anything food with peanut butter, and I’m down for tasting!
Here’s what it looks like straight from the container, with some of it already eaten (guilty as charged).
When I first tasted it, I thought, ooh? Is that a slight coffee flavor? It had just the hint of coffee flavor. But as I kept eating it, the flavors progressed. I could certainly taste the cream cheese aspect of this product, slightly tart and that traditional cheesy cream cheese flavor. However, there certainly WAS a peanut butter flavor! It was really unique, but really tasty too. I wouldn’t say it was a peanut butter from the jar flavor, but a sweet peanut butter flavor. My favorite part was that there were actual pieces of chopped nuts in the spread as well!
Have you tried this cream cheese? If not, if you COULD try it, what would you eat it with?
Dinner time rolled around the other night, and I only had one thing on my mind. Mac. &. Cheese. What better a time to try out something new for me that I bought at Trader Joe’s? The Reduced Guilt Mac & Cheese. Not only does it look awesome on the box, it was completely awesome in general. For instance, here’s how this delicious-ness came out of the microwave….
Hah. I find that extra blob of cheese quite amusing.
It even smelled good. So cheesy!
Although I was slightly disappointed at the fact there wasn’t a larger portion of mac & cheese, there was a decent amount. I rounded off the meal with some veggies (any guesses? Yea, it was broccoli…), and ate.
At first bite, I was impressed. It certainly was cheesy, but had that clumpy, “baked macaroni & cheese” flavor and consistency, with crispy edges on the outside of the carton. The noodles were chewy and slightly firm, but still quite tender. It also tasted surprisingly salty. There was 540 mg of sodium in this entree, which I don’t think is that high, but it in fact, did taste pretty salty!
Overall, 4 1/2 stars. Why not 5 stars? Well, it was relatively low in fiber, only 1 star, and wasn’t made with whole grain noodles, which I prefer over regular wheat. But still very tasty, and I would certainly get it again!
1 container is 270 calories, 6 grams of fat, 40 grams of carbohydrates, 1 gram of fiber, 4 grams of sugar and 15 grams of protein.
What’s your favorite brand of macaroni & cheese? This may certainly be one of my new favorites!
I absolutely love almond milk. I mostly have it with cereal and fruit for breakfast before going to the gym in the morning. Although it’s low on protein, I find it’s lighter on my stomach than dairy is, and I have never had a problem with it.
So, the other day at Trader Joe’s, I went to get some Almond Milk and noticed that Trader Joe’s had their own version. I’m always up for trying new brands, so I put it in my cart and bought it.
Once home, I tried it two ways. By itself, and with cereal and fruit with 1 packet of stevia sweetener, seeing as it was the unsweetened variety.
By itself, it was really creamy tasting- almost like the cream you put in coffee. There really wasn’t much vanilla flavor otherwise, but I could have totally used it as is with my cereal.
1 cup is 40 calories, 3 grams of fat, 2 grams of carbohydrates, 1 gram of fiber, less than 1 gram of sugar and 1 gram of protein.
So, I went to Trader Joe’s today (ME? Go to Trader Joe’s?! When does THAT happen… 😉 ) and bought these Country Potatoes with Haricots Verts & Wild Mushrooms. I’ve eaten it before, and pretty much loved it, so I feel it deserves some recognition here on HealthNuttxo!
I put the bag’s contents in a frying pan and cooked it – here’s how it looked in the pan.
Only one thing to say. MMM.
I remember the last time I bought these I cooked them up, people in my house were asking what smelled so good and if they could have some. It indeed, smells delicious!
After cooking up, I transferred it to my plate. There was potatoes, mushrooms, pieces of onions, and “Haricots Verts” as the package states. It was a bit greasy, but so tasty that I didn’t mind. The potatoes were thickly cut, some big pieces some small, and very soft and fluffy on the inside. The green beans were tender, flavorful, and delicious. Lastly the mushrooms, which I was ok about, some were big flat pieces and other pieces were more textured. Some of the pieces were very good, and others were kind of squishy in consistency. I could have eaten a different vegetable in place of the mushrooms, but they weren’t bad.
Overall, it was a bit salty (300 mg per serving), and had a garlicy and oniony flavor. So good! This one gets 4 1/2 stars, and will be a regular appearance in my grocery cart to buy at Trader Joe’s!
110 grams is 130 calories, 6 grams of fat (2 saturated), 18 grams of carbohydrates, 3 grams of fiber, 2 grams of sugar and 2 grams of protein.
What’s something you buy again and again at Trader Joe’s (if you have been there) ?
Maple is definitely one of my favorite flavors. I couldn’t imagine waffles or french toast without maple syrup, and I LOVE it with scrambled eggs. So when I saw these Maple Leaf Cookies at Trader Joe’s, I just had to buy them!
Here’s what one of the cookies looked like out of the box.
Mmmm. I love cookies with a thick filling!
1 cookie is 110 calories, 5 grams of fat, 2 grams of saturated fat, 16 grams of carbohydrates, 8 grams of sugar, 0 grams of fiber and 1 gram of protein.
What’s your favorite maple-flavored thing?
Although they seem kind of big in the picture, they actually were quite tiny. I would say they were actually a bit smaller than a standard chicken nugget. However, when I bit into one, I noticed that they were rather thick and had a “meaty” texture, which made up for the small size.
4 pieces is 130 calories, 5 grams of fat, 15 grams of carbohydrates, 5 grams of fiber, 0 grams of sugar and 12 grams of protein.
What’s your favorite thing from Trader Joe’s? I feel like everytime I go there is always something new to try!
|
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.colors import ListedColormap
import numpy as np
import matplotlib.pyplot as plt
import warnings
# quiet scipy future warnings
warnings.filterwarnings('ignore')
# decision boundary grid colors
grid_colors = ListedColormap([
'#ff8585',
'#6db4f3',
])
# decision boundary point colors
point_colors = ListedColormap([
'#ff0000',
'#0000ff',
])
def plot_decision_boundary(clf, X, labels, margin=0.2, mesh_unit=0.01, proba=False):
'''
Plot the classification decision for each point in a quantized grid
From: http://scikit-learn.org/stable/auto_examples/neighbors/plot_classification.html
@args:
{class} clf: a class that has a method .predict() that takes as input
an array of k dimensional values and returns an array with shape n,1
where n = the number of observations in the input array. This returned
array of values should contain class predictions--one per input element.
nb: if proba=True, the class should contain a method `.decision_function()`
that should return an array with shape n,1 that contains probability
values for a given class prediction. See scikit classifiers for examples
of both methods.
@returns:
void
'''
# find the min value in the first column and subtract `margin`
x_min = X[:, 0].min() - margin
# find the max value in the first column and add `margin`
x_max = X[:, 0].max() + margin
# find the minimum value in the second column and subtract `margin`
y_min = X[:, 1].min() - margin
# find the minimum value in the second column and add `margin`
y_max = X[:, 1].max() + margin
# get a list of values from min to max, counting by `mesh_unit`
x_range = np.arange(x_min, x_max, mesh_unit)
y_range = np.arange(y_min, y_max, mesh_unit)
# create a dense grid with one row for each value in x_range and
# one column for each value in y_range
xx, yy = np.meshgrid(x_range, y_range)
# `np.ravel` flattens a multidimensional array to a single dimension.
# `np.c_` makes its first and second args the first and second columns in a 2D
# array, so np.c_[xx.ravel(), yy.ravel()] has one 2D observation per grid unit
grid_vals = np.c_[xx.ravel(), yy.ravel()]
# plot continuous predictions if proba == True, else discrete classifications
if proba:
# some classifiers use decision_function to return continuous probabilities
# while others use predict_proba
if hasattr(clf, 'decision_function'):
Z = clf.decision_function(grid_vals)
else:
Z = clf.predict_proba(grid_vals)[:,1]
else:
Z = clf.predict(grid_vals)
# reshape Z (a 1D array of classification decisions) to a 2D x by y grid
Z = Z.reshape(xx.shape)
# plot the background decision boundary
cmap = plt.cm.RdBu if proba else grid_colors
plt.contourf(xx, yy, Z, cmap=cmap, alpha=0.8)
# plot the observations
plt.scatter(X[:,0], X[:,1], s=30, c=labels, cmap=point_colors, edgecolors='#000000')
def plot_distance(arr):
'''
Given `arr` with two arrays, each of two or three elements,
plot the points at positions `arr[0]` and `arr[1]`
and plot lines between those two points
@args:
arr [arr]: an array composed of 2d or 3d arrays
@returns:
void
'''
if len(arr[0]) == 2:
plot_distance_2d(arr)
elif len(arr[0]) == 3:
plot_distance_3d(arr)
def plot_distance_2d(arr):
'''
Given `arr` with two 2-element arrays, plot the points
at positions `arr[0]` and `arr[1]` and plot lines between
those two points
@args:
arr [arr]: an array composed of 2d arrays
@returns:
void
'''
a, b = arr
df = np.array([a, b])
# point data: pattern for drawing points is:
# ax.scatter(x_vals, y_vals, z_vals)
plt.scatter(df[:,0], df[:,1], s=100, c=['blue', 'orange'], alpha=1.0, edgecolors='#000000')
# add point labels
plt.text(0.05, 0.05, 'a', fontsize=20, horizontalalignment='center')
plt.text(0.95, 0.95, 'b', fontsize=20, horizontalalignment='center')
# line data: pattern for drawing lines is:
# ax.plot([x_start, x_end], [y_start, y_end], zs=[z_start, z_end])
plt.plot( [a[0], b[0]], [a[1], a[1]], c='red' ) # x-line
plt.plot( [b[0], b[0]], [a[1], b[1]], c='purple' ) # y-line
plt.plot( [a[0], b[0]], [a[1], b[1]], c='gray', linestyle=':' ) # direct line
# add axis labels
plt.xlabel('X')
plt.ylabel('Y')
plt.show()
def plot_distance_3d(arr):
'''
Given `arr` with two 3-element arrays, plot the points
at positions `arr[0]` and `arr[1]` and plot lines between
those two points.
@args:
arr [arr]: an array composed of 3d arrays
@returns:
void
'''
a, b = arr
df = np.array([a, b])
fig = plt.figure()
ax = fig.gca(projection='3d')
# point data: pattern for drawing points is:
# ax.scatter(x_vals, y_vals, z_vals)
ax.scatter(df[:,0], df[:,1], df[:,2], s=100, c=['blue', 'orange'], alpha=1.0)
# label points
ax.text(0.1, 0.1, 0, 'a', fontsize=20, horizontalalignment='center')
ax.text(0.9, 0.9, 1.0, 'b', fontsize=20, horizontalalignment='center')
# line data: pattern for drawing lines is:
# ax.plot([x_start, x_end], [y_start, y_end], zs=[z_start, z_end])
ax.plot( [a[0], b[0]], [a[0], a[0]], zs=[a[0], a[0]], c='red' ) # x-line
ax.plot( [b[0], b[0]], [a[0], b[0]], zs=[a[0], a[0]], c='purple' ) # y-line
ax.plot( [b[0], b[0]], [b[0], b[0]], zs=[a[0], b[0]], c='green' ) # z-line
ax.plot( [a[0], b[0]], [a[0], b[0]], zs=[a[0], b[0]], c='gray', linestyle=':' ) # direct line
# add axis labels
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show()
def plot_iforest_decision_boundary(*args, **kwargs):
'''
Create and display the decision boundary for an isolation forest.
'''
clf = args[0] # the isolation forest classifier
X = args[1] # the input array of observations used to train the classifier
new_vals = args[2] # the array of observations classified by the classifier
result = args[3] # the classification results from the classifier
margin = kwargs.get('margin', 6) # margin around the plot
mesh = kwargs.get('grid_x', 0.5) # the size of each colormesh grid unit
x_lims = kwargs.get('x_lims', (-13, 12)) # the min max x values to display
y_lims = kwargs.get('y_lims', (-13, 5)) # the min max y values to display
# get the x and y grid domains
x_domain = [ X[:, 0].min() - margin, X[:, 0].max() + margin ]
y_domain = [ X[:, 1].min() - margin, X[:, 1].max() + margin ]
# get a list of values from min to max, counting by `mesh`
x_range = np.arange(x_domain[0], x_domain[1], mesh)
y_range = np.arange(y_domain[0], y_domain[1], mesh)
# create the data with which to color the background grid
xx, yy = np.meshgrid(x_range, y_range)
# classify each unit of the grid
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
# reshape Z into a 2D grid
Z = Z.reshape(xx.shape)
# fill in the grid values
plt.contourf(xx, yy, Z, cmap=plt.cm.YlGn)
# add the training points; edgecolors='k' is short for 'edgecolors'='black'
train_p = plt.scatter(X[:,0], X[:,1], c='green', edgecolors='k', alpha=0.4)
# separate new_vals into outliers and "inliers" based on result
outliers = []
inliers = []
for idx, i in enumerate(result):
if result[idx] == 1:
inliers.append(new_vals[idx])
else:
outliers.append(new_vals[idx])
outliers = np.array(outliers)
inliers = np.array(inliers)
# plot the inliers and outliers
in_p = plt.scatter(inliers[:,0], inliers[:,1], c='white', edgecolors='k')
out_p = plt.scatter(outliers[:,0], outliers[:,1], c='red', edgecolors='k')
# limit the axis ranges
plt.xlim(x_lims)
plt.ylim(y_lims)
# add a title to the plot
plt.title('Isolation Forests Decision Boundary')
# add a legend to the plot
plt.legend([train_p, in_p, out_p], [
'training observation',
'classified as non-outlier',
'classified as outlier',
], loc=[0.025, 0.05], framealpha=0.97)
plt.show()
|
Weather permitting, California State Parks, California Tahoe Conservancy, North Lake Tahoe and Tahoe Douglas fire protection districts and the U.S. Forest Service may continue prescribed fire operations over the next several weeks in multiple locations around Lake Tahoe.
A map with project locations and details is available for viewing at http://tahoe.livingwithfire.info/get-informed/. To receive prescribed fire notifications, send an email to pa_ltbmu@fs.fed.us. Keep in mind that operations only take place when weather, conditions and staffing allow.
Prescribed fire operations are conducted whenever conditions allow to reduce excess vegetation that can feed unwanted wildland fires. Planned fires now reduce the threat of unplanned fires later, which helps provide increased community protection. Low intensity fire is a natural process in the Sierra Nevada and helps keep our forests healthy by minimizing the spread of insects and disease, recycling nutrients back into the soil and promoting improved habitat for diverse vegetation and wildlife.
|
"""
This module containts constants that are for internal use only.
"""
from . import utils
from ostinato.core import ost_pb
class _Protocols(utils.Enum):
"""
Enum for the available protocols
"""
# Layer 1 protocols
MAC = ost_pb.Protocol.kMacFieldNumber
# Layer 2 protocols
ETHERNET_II = ost_pb.Protocol.kEth2FieldNumber
ETHERNET_802_DOT_3 = ost_pb.Protocol.kDot3FieldNumber
LLC = ost_pb.Protocol.kLlcFieldNumber
SNAP = ost_pb.Protocol.kSnapFieldNumber
SVLAN = ost_pb.Protocol.kSvlanFieldNumber
VLAN = ost_pb.Protocol.kVlanFieldNumber
VLAN_STACK = ost_pb.Protocol.kVlanStackFieldNumber
ETHERNET_802_DOT_2_LLC = ost_pb.Protocol.kDot2LlcFieldNumber
ETHERNET_802_DOT_2_SNAP = ost_pb.Protocol.kDot2SnapFieldNumber
# Layer 3 protocols
ARP = ost_pb.Protocol.kArpFieldNumber
IP4 = ost_pb.Protocol.kIp4FieldNumber
IP6 = ost_pb.Protocol.kIp6FieldNumber
IP4_OVER_IP4 = ost_pb.Protocol.kIp4over4FieldNumber
IP4_OVER_IP6 = ost_pb.Protocol.kIp4over6FieldNumber
IP6_OVER_IP4 = ost_pb.Protocol.kIp6over4FieldNumber
IP6_OVER_IP6 = ost_pb.Protocol.kIp6over6FieldNumber
# Layer 4 protocols
TCP = ost_pb.Protocol.kTcpFieldNumber
UDP = ost_pb.Protocol.kUdpFieldNumber
ICMP = ost_pb.Protocol.kIcmpFieldNumber
IGMP = ost_pb.Protocol.kIgmpFieldNumber
MLD = ost_pb.Protocol.kMldFieldNumber
# Layer 5 protocols
TEXT_PROTOCOL = ost_pb.Protocol.kTextProtocolFieldNumber
# Layer independant "protocols"
PAYLOAD = ost_pb.Protocol.kPayloadFieldNumber
SAMPLE = ost_pb.Protocol.kSampleFieldNumber
USER_SCRIPT = ost_pb.Protocol.kUserScriptFieldNumber
HEX_DUMP = ost_pb.Protocol.kHexDumpFieldNumber
|
After the snow and ice of last week-end, this week-end sees storm Eric (or Erica?) coming at you with huge winds forecast, but at least it looks set to be a little warmer. What's the betting of 100% headwinds when ascending?
Saturday saw a couple head offroad (me and Andy Mc) and out into the snow/slush. Good fun and highly recommended.
Sunday's Ye Old Portlians CC Reliability Trial suffered from the weather, with the very real risk of ice. A few brave Avanties gave it a go at the later start time and shorter course. A few Avanties made it the whole way, whilst others decided it really was too dicey and turned early, no shame there!
Claire, Alastair and Tony are still waiting for responses from a lot of club members with regard to helping out at the Kentish Killer.
Everyone in the club should have received an e-mail by now - if you haven't already, please respond as a matter of urgency, even if you cannot make it!
Thanks to those who have responded - you will be contacted nearer the time regarding roles.
The full menu of Sunday rides can be found here. Please indicate your attendance by signing up.
May the wind be on your back!
|
#!/usr/bin/env python
#
# Copyright 2012 Matt Kenney
#
# This file is part of Feedsquish.
#
# Feedsquish is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# Feedsquish is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Feedsquish. If not, see <http://www.gnu.org/licenses/>.
#
import calendar
import datetime
import logging
import time
import feedparser
import feeds
import filters
def updateFeed(feedUrl, now, cutoff):
print 'parsing ', feedUrl
parser = feedparser.parse(feedUrl)#, agent='Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 6.0)')
print 'status ', str(parser.status)
# if parser.status == 500:
# print news.escape_xml(parser.data)
feedid = "feed/" + filters.encode_segment(feedUrl)
for entry in parser.entries:
link = entry.get('link', '')
if not link:
continue;
artid = "art/" + filters.encode_segment(link)
if feeds.redis.exists(artid):
print 'skipping', link
continue;
print 'saving', link
art = {}
art['name'] = entry.get('title', '')
art['guid'] = entry.get('guid', '')
art['date'] = now
if entry.has_key('published_parsed') and entry.published_parsed:
art['date'] = calendar.timegm(entry.published_parsed)
elif entry.has_key('date_parsed') and entry.date_parsed:
art['date'] = calendar.timegm(entry.date_parsed)
art['category'] = entry.get('category', '')
feeds.redis.hmset(artid, art)
feeds.redis.zadd(feedid, art['date'], artid)
print 'purging ', feedUrl
for artid in feeds.redis.zrangebyscore(feedid, "-inf", cutoff):
feeds.redis.delete(artid)
feeds.redis.zremrangebyscore(feedid, "-inf", cutoff)
def updateAll():
now = int(time.time())
print now
cutoff = now - (60 * 24 * 60 * 60)
feeds.redis.zremrangebyscore("feeds", "-inf", cutoff)
for feedUrl in feeds.redis.zrange("feeds", 0, -1):
try:
updateFeed(feedUrl, now, cutoff)
except Exception, e:
print e
# sleep to spread the server load out over time
time.sleep(30)
if __name__ == '__main__':
updateAll()
|
My dream was always to work with people and horses! Now, as a CHA certified instructor, I am able to make my, and other people's dreams, come true! My passion is working with beginning adults who perhaps always wanted to, but were never before able to pursue their dreams of owning their own horse or just enjoying horses in general.
Ohio University Southern Director of Equine Studies Program. We offer and Associates of Applied Science in Equine Studies. PATH International Master Level Instructor and Instructor Evaluator. Completed the Riding Instructor Teaching Workshop for the USDF.
|
import datetime
import csv
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from django.http import HttpResponse
from django.contrib.auth.models import User
from django_pandas.io import read_frame, pd
# from wye.base.constants import WorkshopStatus
from wye.workshops.models import Workshop
from wye.profiles.models import Profile
@login_required
def index(request):
context_dict = {}
if not request.user.is_staff:
template_name = '403.html'
return render(request, template_name, context_dict)
workshops = Workshop.objects.filter(is_active=True)
dataframe = read_frame(workshops, fieldnames=[
'requester__location__state__name',
'presenter__id',
'presenter__first_name',
'presenter__last_name',
'workshop_level',
'no_of_participants',
'expected_date'])
# State Based workshop plot
location = dataframe
location.dropna(subset=['presenter__id'], inplace=True)
# location['presenter__id'] = location['presenter__id'].astype(int)
location_based_sum = location.requester__location__state__name.value_counts()
location_list = []
for loc, count in location_based_sum.to_dict().items():
location_list.append(
{"label": loc, "values": count})
context_dict['location'] = location_list
# Top 10 tutors
top_tutor_data = dataframe
presenter_count = top_tutor_data.groupby('presenter__id').count()
top_tutor_data.drop_duplicates(subset=['presenter__id'], inplace=True)
top_tutor_data.index = top_tutor_data.presenter__id
top_tutor_data.drop(["presenter__id"], axis=1, inplace=True)
presenter_count.drop([
'presenter__last_name', 'workshop_level',
'requester__location__state__name',
'no_of_participants',
'expected_date'], axis=1, inplace=True)
presenter_count.rename(columns={
'presenter__first_name': 'conducted_workshop_count'}, inplace=True)
t = top_tutor_data.join(presenter_count)
top_ten_tutors = t.groupby('workshop_level')[
'conducted_workshop_count'].nlargest(10)
top_ten_tutors = dataframe.join(top_ten_tutors)
top_ten_tutors.rename(
columns={'presenter__first_name': 'first_name',
'presenter__last_name': 'last_name'
}, inplace=True)
# Create list of dict as required by nd3 library
d = {}
data = []
for index, row in top_ten_tutors.iterrows():
d.setdefault(row.workshop_level, [])
d[row.workshop_level].append(
{'x': '{} {}'.format(row.first_name, row.last_name),
'y': row.conducted_workshop_count})
for k, v in d.items():
data.append({'key': k, 'values': v})
context_dict['workshop_tutor'] = data
time_series = read_frame(workshops, fieldnames=[
'no_of_participants', 'expected_date'])
# print(time_series)
time_series['no_of_participants'] = pd.to_numeric(
time_series['no_of_participants'])
time_series = time_series.groupby(
'expected_date')[['no_of_participants']].agg('sum')
time_series.fillna(0, inplace=True)
time_series.index = pd.to_datetime(time_series.index)
resampled = time_series.resample('M').sum()
resampled.fillna(0, inplace=True)
# month_list = []
t = resampled.groupby([(resampled.index.year),
(resampled.index.month)]).sum()
d = {}
month_dict = {
1: 'Jan', 2: 'Feb', 3: 'Mar', 4: 'Apr',
5: 'May', 6: 'Jun', 7: 'Jul', 8: 'Aug',
9: 'Sept', 10: 'Oct', 11: 'Nov', 12: 'Dec'}
for index, row in t.to_dict()['no_of_participants'].items():
d.setdefault(index[0], [])
d[index[0]].insert(
index[1] - 1, {'x': month_dict.get(index[1]), 'y': row})
ret = []
for index, row in d.items():
ret.append({"key": index, "values": row})
context_dict['line_graph'] = ret
years = [('all', 'All')]
for y in range(2016, int(datetime.datetime.today().strftime('%Y')) + 1):
years.append((y, y))
context_dict['years'] = years
template_name = 'reports/index.html'
return render(request, template_name, context_dict)
# @login_required
# def index_old(request):
# context_dict = {}
# if not request.user.is_staff:
# template_name = '403.html'
# return render(request, template_name, context_dict)
# workshops = Workshop.objects.filter(is_active=True)
# context_dict['workshops'] = {
# 'completed': workshops.filter(status=WorkshopStatus.COMPLETED).count(),
# 'drafted': workshops.filter(status=WorkshopStatus.DRAFT).count(),
# 'hold': workshops.filter(status=WorkshopStatus.HOLD).count(),
# 'feedback_pending': workshops.filter(
# status=WorkshopStatus.FEEDBACK_PENDING).count(),
# }
# workshop_finished = workshops.filter(
# status__in=[WorkshopStatus.COMPLETED,
# WorkshopStatus.FEEDBACK_PENDING])
# tutors_dict = {}
# tutors = [
# user for w in workshop_finished for user in w.presenter.all()]
# for tutor in tutors:
# tutors_dict[tutor.id] = [
# tutor.username,
# tutor.first_name,
# tutor.last_name,
# tutor.profile.get_workshop_completed_count]
# context_dict['tutors'] = tutors_dict
# org_dict = {}
# orgs = [
# w.requester for w in workshop_finished]
# for org in orgs:
# if org.id in org_dict:
# count = org_dict[org.id][1] + 1
# else:
# count = 1
# org_dict[org.id] = [org.name, count, org.location.name]
# context_dict['orgs'] = org_dict
# template_name = 'reports/index.html'
# years = [('all', 'All')]
# for y in range(2016, int(datetime.datetime.today().strftime('%Y')) + 1):
# years.append((y, y))
# context_dict['years'] = years
# return render(request, template_name, context_dict)
@login_required
def get_tutor_college_poc_csv(request):
# if not request.user.is_staff:
# template_name = '403.html'
# return render(request, template_name, {})
usertype = request.POST['usertype']
year = request.POST['years']
workshops = Workshop.objects.filter(is_active=True)
if year != 'all':
workshops = workshops.filter(expected_date__year=year)
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="workshops.csv"'
writer = csv.writer(response)
csv_titles = ['Worshop Id', 'Workshop Date', 'Location', 'College']
if usertype == 'tutor':
csv_titles.extend(['Presenter Name', 'Presenter Email'])
elif usertype == 'poc':
csv_titles.extend(['College POC Name', 'College POC Email'])
else:
csv_titles.extend(['Presenter Name', 'Presenter Email'])
csv_titles.extend(['College POC Name', 'College POC Email'])
writer.writerow(csv_titles)
for obj in workshops:
row = [
obj.id, obj.expected_date,
obj.location.name, obj.requester.name]
if usertype == 'tutor':
for u in obj.presenter.all():
row.append("{} {}".format(u.first_name, u.last_name))
row.append("{}".format(u.email))
elif usertype == 'poc':
for u in obj.requester.user.all():
row.append("{} {}".format(u.first_name, u.last_name))
row.append("{}".format(u.email))
else:
for u in obj.presenter.all():
row.append("{} {}".format(u.first_name, u.last_name))
row.append("{}".format(u.email))
for u in obj.requester.user.all():
row.append("{} {}".format(u.first_name, u.last_name))
row.append("{}".format(u.email))
writer.writerow(row)
return response
@login_required
def get_all_user_info(request):
users = User.objects.all()
response = HttpResponse(content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="all_users.csv"'
writer = csv.writer(response)
csv_titles = [
'User Id', 'First Name', 'Last Name', 'Email', 'Is Active',
'Is Presenter', 'Is POC', 'Is Organiser']
writer.writerow(csv_titles)
for obj in users:
try:
row = [
obj.id,
obj.first_name,
obj.last_name,
obj.email,
obj.is_active,
Profile.is_presenter(obj),
Profile.is_coordinator(obj),
Profile.is_organiser(obj)]
writer.writerow(row)
except Exception:
pass
return response
|
Okay ladies, I’m excited to introduce you to a new adorable shop, where I fell in love with Aztec Print sweater I’m wearing below.
In the beginning, The Jean Girl started out with home parties only selling jeans. Now, three years later, she sell jeans, colored denim, tops, dresses, maxi skirts, accessories, and so much more. She recently just opened an official store, and is beginning to offer Men’s tees and basics, along with little girl maxi skirts and denim.
The Jean Girl is now located in Rexburg Idaho (My hometown yay!) 110 E. 1st N. Their hours are Monday - Friday 11-5 and Saturday 10-6.
They are having a huge Grand Opening on Saturday September 21st from 10-7!! And to celebrate, the Jean Girl is offering one lucky reader $50 gift card. Yep, that’s right, $50! If I won the gift card I’d definitely put it towards these skinnies. Only $24.99!
The Jean Girl is also offering all my readers a 10% discount Coupon Code which is Sullenger10, enter it at checkout, or mention it to her in the store.
How cute! Im helping my daughter moved back up to BYUI tomorrow & I'll have to find the shop. We've bought jeans from her a couple times at different place.
OH MAN! Can I trade my stitcheroos giftcard for this?!? :) SO CUTE!!!
Do we enter the give away here? You look great by the way!
Is this where we enter the giveaway? They have such cute stuff. You look great!
Use the Rafflecopter Widget at the bottom of the post. Some people say it takes awhile to load on their computer!
I have that sweater and I cannot wait until it cools down enough to wear it!
Thank you for the giveaway! I LOVE this shop. The skinny jeans above are to die for. I must have them!
Love your blog and would love to enter to win the gift card give away! Do we just comment?
Love the new shop! And you look amazing in the Aztec sweater!! What a fun giveaway you get to be apart of!!
Is the $50 giveaway just for your readers? I noticed the 100 $10 GC giveaway on The Jean Girl FB page has most the same requirements to enter. Are we eligible for both?
Is the $50 GC giveaway just for your readers? I noticed on The Jean Girl FB page the 100 $10 GC giveaway has most of the same requirements to enter. Are we eligible for both giveaways?
Who won this giveaway? I'm dying to know. I'd love those skinnies!
Sorry! I announced it on all the social media sites and forgot to post it here. The winner was Lorna Bryce. I actually went and splurged and bought the skinnies myself just on saturday :) Go get some!
|
import sys
import matplotlib
matplotlib.use("Agg")
base = '../'
sys.path.append(base+"utils/Continuum")
sys.path.append(base+"utils/Correlation")
sys.path.append(base+"utils/GLOBALutils")
sys.path.append(base+"utils/OptExtract")
baryc_dir= base+'utils/SSEphem/'
sys.path.append(baryc_dir)
ephemeris='DEc403'
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from pylab import *
# ceres modules
import coralieutils
import continuum
import correlation
import GLOBALutils
import Marsh
import fabryperot
# other useful modules
import argparse
import ephem
import jplephem
from math import radians as rad
from astropy.io import fits as pyfits
import pickle
import os
import scipy
import scipy.interpolate
from scipy import interpolate
import statsmodels.api as sm
lowess = sm.nonparametric.lowess
# Recive input parameters
parser = argparse.ArgumentParser()
parser.add_argument('directorio')
parser.add_argument('-avoid_plot', action="store_true", default=False)
parser.add_argument('-dirout',default='default')
parser.add_argument('-do_class', action="store_true", default=False)
parser.add_argument('-just_extract', action="store_true", default=False)
parser.add_argument('-npools', default=1)
parser.add_argument('-o2do',default='all')
parser.add_argument('-reffile',default='default')
args = parser.parse_args()
dirin = args.directorio
avoid_plot = args.avoid_plot
dirout = args.dirout
DoClass = args.do_class
JustExtract = args.just_extract
npools = int(args.npools)
object2do = args.o2do
reffile = args.reffile
if dirin[-1] != '/':
dirin = dirin + '/'
if dirout == 'default':
dirout = dirin[:-1]+'_red/'
if not os.access(dirout,os.F_OK):
os.system('mkdir '+dirout)
if os.access(dirout+'proc',os.F_OK):
os.system('rm -r '+dirout+'proc')
os.system('mkdir '+dirout+'proc')
f_res = open(dirout+'proc/'+'results.txt','w')
if reffile == 'default':
reffile = dirin+'reffile.txt'
####### GLOBAL VARIABLES #####
force_pre_process = False
force_flat_extract = False
force_thar_extract = False
force_thfp_extract = False
force_tharxc = False
force_thar_wavcal = False
force_thfp_wavcal = False
force_sci_extract = False
force_spectral_file_build = True
force_stellar_pars = False
dumpargon = False
minlines_glob_ob = 700
minlines_glob_co = 500
Inverse_m = True
use_cheby = True
MRMS = 100 # max rms in m/s, global wav solution
trace_degree = 4
Marsh_alg = 0
ext_aperture = 3
NSigma_Marsh = 5
NCosmic_Marsh = 10
S_Marsh = 0.4
N_Marsh = 3 # grado polinomio
min_extract_col = 50
max_extract_col = 2000
n_useful = 70 # up to which order do we care?
# Number of coefficients for the global wavelength solution
ncoef_x = 4
ncoef_m = 6
npar_wsol = (min(ncoef_x,ncoef_m) + 1) * (2*max(ncoef_x,ncoef_m) - min(ncoef_x,ncoef_m) + 2) / 2
models_path = base+"data/COELHO_MODELS/R_40000b/" # path to the synthetic models
order_dir = base+"coralie/wavcals/" # path to reference files for the wavelength solution
#############################
# file containing the log
log = dirout+'night.log'
print "\n\n\tCoralie Euler1.2m PIPELINE\n"
print "\tRAW data is in ",dirin
print "\tProducts of reduction will be in",dirout
print '\n'
# classification of input images according to header info
biases, ob_flats, co_flats, ob_loc, co_loc, ThAr_ref, ThFP_ref,\
simThAr_sci,sim_FP_sci,ThAr_ref_dates,ThFP_ref_dates,obnames,\
obnames_FP,exptimes, exptimes_FP, flats = coralieutils.FileClassify(dirin,log)
# Pre-process
if ( (( len(ob_flats) > 0) and (os.access(dirout+'FlatOb.fits',os.F_OK) == False)) or \
(( len(co_flats) > 0) and (os.access(dirout+'FlatCo.fits',os.F_OK) == False)) or \
(( len(flats) > 0) and (os.access(dirout+'Flat.fits',os.F_OK) == False)) or \
(os.access(dirout+'trace.pkl',os.F_OK) == False) or \
(os.access(dirout+'MasterBias.fits',os.F_OK) == False) or \
(force_pre_process) ):
print "\tNo previous pre-processing files or found"
pre_process = 1
else:
print "\tPre-processing files found, going straight to extraction"
pre_process = 0
if (pre_process == 1):
print "\tGenerating Master calibration frames..."
# median combine Biases
MasterBias, RO_bias, GA_bias = coralieutils.MedianCombine(biases,ZF=0.)
hdu = pyfits.PrimaryHDU( MasterBias )
if (os.access(dirout+'MasterBias.fits',os.F_OK)):
os.remove(dirout+'MasterBias.fits')
hdu.writeto(dirout+'MasterBias.fits')
print "\t\t-> Masterbias: done!"
if len(flats) > 0:
# median combine list of co flats2
Flat,RO_flat,GA_flat = coralieutils.MedianCombine(flats,ZF=MasterBias)
hdu = pyfits.PrimaryHDU(Flat)
if (os.access(dirout+'Flat.fits',os.F_OK)):
os.remove(dirout+'Flat.fits')
hdu.writeto(dirout+'Flat.fits')
if len(ob_flats) > 0:
# median combine list of ob flats
Flat_ob, RO_ob, GA_ob = coralieutils.MedianCombine(ob_flats,ZF=MasterBias)
# save this file for later reference
hdu = pyfits.PrimaryHDU( Flat_ob )
if (os.access(dirout+'FlatOb.fits',os.F_OK)):
os.remove(dirout+'FlatOb.fits')
hdu.writeto(dirout+'FlatOb.fits')
else:
Flat_ob = Flat
if len(co_flats) > 0:
# median combine list of co flats
Flat_co,RO_co,GA_co = coralieutils.MedianCombine(co_flats,ZF=MasterBias)
hdu = pyfits.PrimaryHDU(Flat_co)
if (os.access(dirout+'FlatCo.fits',os.F_OK)):
os.remove(dirout+'FlatCo.fits')
hdu.writeto(dirout+'FlatCo.fits')
else:
Flat_co = Flat
print "\t\t-> Masterflats: done!"
# Find orders & traces
print "\tTracing echelle orders..."
if len(ob_flats)>0 and len(co_flats)>0:
c_ob, nord_ob = GLOBALutils.get_them(Flat_ob, 8, trace_degree,maxords=-1,mode=1)
c_co, nord_co = GLOBALutils.get_them(Flat_co, 8, trace_degree,maxords=-1,startfrom=300,mode=1)
else:
c_all, nord_all = GLOBALutils.get_them(Flat, 5, trace_degree,maxords=-1,mode=1,nsigmas=3)
GA_co,GA_ob = GA_flat, GA_flat
RO_co,RO_ob = RO_flat, RO_flat
c_ob = c_all[:22]
c_co = c_all[22]
i = 23
while i < len(c_all)-1:
c_ob = np.vstack((c_ob,c_all[i]))
c_co = np.vstack((c_co,c_all[i+1]))
i+=2
nord_co, nord_ob = len(c_co),len(c_ob)
print '\t', nord_ob, 'object orders found...'
print '\t', nord_co, 'comparison orders found...'
if len(ob_flats)>0 and len(co_flats)>0:
trace_dict = {'c_ob':c_ob,'c_co':c_co,
'nord_ob':nord_ob, 'nord_co':nord_co,
'GA_ob': GA_ob, 'RO_ob': RO_ob,
'GA_co': GA_co, 'RO_co': RO_co}
else:
trace_dict = {'c_all':c_all,'c_ob':c_ob,'c_co':c_co,
'nord_ob':nord_ob, 'nord_co':nord_co,'nord_all':nord_all,
'GA_ob': GA_ob, 'RO_ob': RO_ob,
'GA_co': GA_co, 'RO_co': RO_co}
pickle.dump( trace_dict, open( dirout+"trace.pkl", 'w' ) )
else:
trace_dict = pickle.load( open( dirout+"trace.pkl", 'r' ) )
c_co = trace_dict['c_co']
c_ob = trace_dict['c_ob']
nord_ob = trace_dict['nord_ob']
nord_co = trace_dict['nord_co']
if 'c_all' in trace_dict.keys():
c_all = trace_dict['c_all']
nord_all = trace_dict['nord_all']
# recover GA*, RO*
GA_ob = trace_dict['GA_ob']
RO_ob = trace_dict['RO_ob']
GA_co = trace_dict['GA_co']
RO_co = trace_dict['RO_co']
# recover flats & master bias
if len(ob_flats)>0:
h = pyfits.open(dirout+'FlatOb.fits')
Flat_ob = h[0].data
else:
h = pyfits.open(dirout+'Flat.fits')
Flat_ob = h[0].data
if len(co_flats)>0:
h = pyfits.open(dirout+'Flat.fits')
Flat_co = h[0].data
else:
h = pyfits.open(dirout+'Flat.fits')
Flat_co = h[0].data
h = pyfits.open(dirout+'MasterBias.fits')
MasterBias = h[0].data
if len(ob_flats)>0 and len(co_flats)>0:
c_all = GLOBALutils.Mesh(c_ob,c_co)
print '\n\tExtraction of Flat calibration frames:'
# Extract flat spectra, object
P_ob_fits = dirout + 'P_ob.fits'
S_flat_ob_fits = dirout +'S_flat_ob.fits'
P_ob = np.zeros( Flat_ob.shape )
S_flat_ob = np.zeros((nord_ob, 3, Flat_ob.shape[1]) )
if ( os.access(P_ob_fits,os.F_OK) == False ) or ( os.access(S_flat_ob_fits,os.F_OK) == False ) or \
(force_flat_extract):
print "\t\tNo extracted flat object spectra found or extraction forced, extracting and saving..."
print "\t\t\tWill extract",nord_ob,"orders for object fibre..."
P_ob = GLOBALutils.obtain_P(Flat_ob,c_ob,ext_aperture,RO_ob,\
GA_ob,NSigma_Marsh, S_Marsh, \
N_Marsh, Marsh_alg, min_extract_col,\
max_extract_col, npools)
S_flat_ob = GLOBALutils.optimal_extraction(Flat_ob,P_ob,c_ob,ext_aperture,\
RO_ob,GA_ob,S_Marsh,NCosmic_Marsh,\
min_extract_col,max_extract_col,npools)
# write P_on and S_flat_ob as fits files
if (os.access(P_ob_fits,os.F_OK)):
os.remove( P_ob_fits )
if (os.access(S_flat_ob_fits,os.F_OK)):
os.remove( S_flat_ob_fits )
hdu = pyfits.PrimaryHDU( P_ob )
hdu.writeto( P_ob_fits )
hdu = pyfits.PrimaryHDU( S_flat_ob )
hdu.writeto( S_flat_ob_fits )
else:
print "\t\tExtracted flat object spectra found, loading..."
P_ob = pyfits.getdata( P_ob_fits )
S_flat_ob = pyfits.getdata( S_flat_ob_fits )
# Extract flat spectra, comparison
P_co_fits = dirout + 'P_co.fits'
S_flat_co_fits = dirout +'S_flat_co.fits'
P_co = np.zeros( Flat_co.shape )
S_flat_co = np.zeros((nord_co, 3, Flat_co.shape[1]) )
if ( os.access(P_co_fits,os.F_OK) == False ) or ( os.access(S_flat_co_fits,os.F_OK) == False ) or (force_flat_extract):
print "\t\tNo extracted flat comparison spectra found or extraction forced, extracting and saving..."
print "\t\t\tWill extract",nord_co,"orders for comparison fibre"
P_co = GLOBALutils.obtain_P(Flat_co,c_co,ext_aperture,RO_co,\
GA_co,NSigma_Marsh, S_Marsh, \
N_Marsh, Marsh_alg, min_extract_col,\
max_extract_col, npools)
S_flat_co = GLOBALutils.optimal_extraction(Flat_co,P_co,c_co,ext_aperture,RO_co,GA_co,\
S_Marsh,NCosmic_Marsh,min_extract_col,\
max_extract_col,npools)
# write P_on and S_flat_co as fits files
if (os.access(P_co_fits,os.F_OK)):
os.remove( P_co_fits )
if (os.access(S_flat_co_fits,os.F_OK)):
os.remove( S_flat_co_fits )
hdu = pyfits.PrimaryHDU( P_co )
hdu.writeto( P_co_fits )
hdu = pyfits.PrimaryHDU( S_flat_co )
hdu.writeto( S_flat_co_fits )
else:
print "\t\tExtracted flat comparison spectra found, loading..."
P_co = pyfits.getdata( P_co_fits )
S_flat_co = pyfits.getdata( S_flat_co_fits )
# Normalize flat field spectra.
S_flat_ob_n, maxvals_ob = GLOBALutils.FlatNormalize_single( S_flat_ob, mid=int(0.5*S_flat_ob.shape[2]))
S_flat_co_n, maxvals_co = GLOBALutils.FlatNormalize_single( S_flat_co, mid=int(0.5*S_flat_co.shape[2]))
print '\n\tExtraction of ThAr calibration frames:'
# Extract all ThAr files
for fsim in ThAr_ref:
hthar = pyfits.open( fsim )
dthar = coralieutils.OverscanTrim( pyfits.getdata( fsim ) )
ron = hthar[0].header['HIERARCH ESO CORA CCD RON']
gain = hthar[0].header['HIERARCH ESO CORA CCD GAIN']
thar_fits_ob = dirout + fsim.split('/')[-1][:-8]+'spec.ob.fits.S'
thar_fits_co = dirout + fsim.split('/')[-1][:-8]+'spec.co.fits.S'
if ( os.access(thar_fits_ob,os.F_OK) == False ) or \
( os.access(thar_fits_co,os.F_OK) == False ) or \
(force_thar_extract):
print "\t\tNo previous extraction or extraction forced for ThAr file", fsim, "extracting..."
thar_S_ob = GLOBALutils.optimal_extraction(dthar,P_ob,c_ob,ext_aperture,ron,gain,\
S_Marsh,100.,min_extract_col,max_extract_col,npools)
thar_S_co = GLOBALutils.optimal_extraction(dthar,P_co,c_co,ext_aperture,ron,gain,\
S_Marsh,100.,min_extract_col,max_extract_col,npools)
# save as fits file
if (os.access(thar_fits_ob,os.F_OK)):
os.remove( thar_fits_ob )
if (os.access(thar_fits_co,os.F_OK)):
os.remove( thar_fits_co )
hdu = pyfits.PrimaryHDU( thar_S_ob )
hdu.writeto( thar_fits_ob )
hdu = pyfits.PrimaryHDU( thar_S_co )
hdu.writeto( thar_fits_co )
else:
print "\t\tThAr file", fsim, "all ready extracted, loading..."
print "\n\tWavelength solution of ThAr calibration spectra:"
# compute wavelength calibration files
sorted_ThAr_dates = np.argsort( ThAr_ref_dates )
p0_array = np.zeros( (len(ThAr_ref_dates), npar_wsol) )
for i in range(len(sorted_ThAr_dates)):
index = sorted_ThAr_dates[i]
wavsol_pkl = dirout + ThAr_ref[index].split('/')[-1][:-8]+'wavsolpars.pkl'
thar_fits_ob = dirout + ThAr_ref[index].split('/')[-1][:-8]+'spec.ob.fits.S'
thar_fits_co = dirout + ThAr_ref[index].split('/')[-1][:-8]+'spec.co.fits.S'
if ( os.access(wavsol_pkl,os.F_OK) == False ) or (force_thar_wavcal):
print "\t\tComputing wavelength solution of ThAr file", ThAr_ref[index]
hthar = pyfits.open( ThAr_ref[index] )
mjd, mjd0 = coralieutils.mjd_fromheader( hthar )
thar_S_ob = pyfits.getdata( thar_fits_ob )
thar_S_co = pyfits.getdata( thar_fits_co )
lines_thar_ob = thar_S_ob[:,1,:]
iv_thar_ob = thar_S_ob[:,2,:]
lines_thar_co = thar_S_co[:,1,:]
iv_thar_co = thar_S_co[:,2,:]
All_Pixel_Centers = np.array([])
All_Wavelengths = np.array([])
All_Orders = np.array([])
All_Centroids = np.array([])
All_Sigmas = np.array([])
All_Intensities = np.array([])
for order in range(n_useful):
order_s = str(order)
if (order < 10):
order_s = '0'+str(order)
thar_order_orig = lines_thar_ob[order,:]
IV = iv_thar_ob[order,:]
wei = np.sqrt( IV )
bkg = GLOBALutils.Lines_mBack(thar_order_orig, IV, thres_rel=3)
thar_order = thar_order_orig - bkg
coeffs_pix2wav, coeffs_pix2sigma, pixel_centers, wavelengths,\
rms_ms, residuals, centroids, sigmas, intensities \
= GLOBALutils.Initial_Wav_Calibration(order_dir+'order_'+order_s+'o.iwdat',\
thar_order,order,wei,rmsmax=5000000,\
minlines=10,FixEnds=True,Dump_Argon=dumpargon,\
Dump_AllLines=True, Cheby=use_cheby)
if (order == 35):
if (use_cheby):
Global_ZP = GLOBALutils.Cheby_eval( coeffs_pix2wav, 1023, len(thar_order) )
else:
Global_ZP = scipy.polyval( coeffs_pix2wav, 0.0 )
All_Pixel_Centers = np.append( All_Pixel_Centers, pixel_centers )
All_Wavelengths = np.append( All_Wavelengths, wavelengths )
All_Orders = np.append( All_Orders, np.zeros( len(pixel_centers) ) + order )
All_Centroids = np.append( All_Centroids, centroids)
All_Sigmas = np.append( All_Sigmas, sigmas)
All_Intensities = np.append( All_Intensities, intensities )
p0 = np.zeros( npar_wsol )
p0[0] = (35+89) * Global_ZP
p1, G_pix, G_ord, G_wav, II, rms_ms, G_res = \
GLOBALutils.Fit_Global_Wav_Solution(All_Pixel_Centers, All_Wavelengths, All_Orders,\
np.ones(All_Intensities.shape), p0, Cheby=use_cheby,\
maxrms=MRMS, Inv=Inverse_m,minlines=700,order0=89, \
ntotal=n_useful,npix=len(thar_order),nx=ncoef_x,nm=ncoef_m)
# Now calibrate COMPARISON orders. Use p1 above as p0
All_Pixel_Centers_co = np.array([])
All_Wavelengths_co = np.array([])
All_Orders_co = np.array([])
All_Centroids_co = np.array([])
All_Sigmas_co = np.array([])
All_Intensities_co = np.array([])
for order in range(22,n_useful):
order_s = str(order)
if (order < 10):
order_s = '0'+str(order)
thar_order_orig = lines_thar_co[order-22,:]
IV = iv_thar_co[order-22,:]
wei = np.sqrt( IV )
bkg = GLOBALutils.Lines_mBack(thar_order_orig, IV, thres_rel=3)
thar_order = thar_order_orig - bkg
coeffs_pix2wav, coeffs_pix2sigma, pixel_centers, wavelengths, rms_ms, residuals, centroids, sigmas, intensities \
= GLOBALutils.Initial_Wav_Calibration( order_dir+'order_'+order_s+'o.iwdat', thar_order, order, wei, \
rmsmax=5000000, minlines=10,FixEnds=True,Dump_Argon=dumpargon, \
Dump_AllLines=True, Cheby=use_cheby)
All_Pixel_Centers_co = np.append( All_Pixel_Centers_co, pixel_centers )
All_Wavelengths_co = np.append( All_Wavelengths_co, wavelengths )
All_Orders_co = np.append( All_Orders_co, np.zeros( len(pixel_centers) ) + order )
All_Centroids_co = np.append( All_Centroids_co, centroids)
All_Sigmas_co = np.append( All_Sigmas_co, sigmas)
All_Intensities_co = np.append( All_Intensities_co, intensities )
p1_co, G_pix_co, G_ord_co, G_wav_co, II_co, rms_ms_co, G_res_co = \
GLOBALutils.Fit_Global_Wav_Solution(All_Pixel_Centers_co, All_Wavelengths_co, All_Orders_co,\
np.ones(All_Intensities_co.shape), p1, Cheby=use_cheby,\
maxrms=MRMS, Inv=Inverse_m,minlines=500,order0=89,ntotal=n_useful,npix=len(thar_order),nx=ncoef_x,nm=ncoef_m)
# end COMPARISON orders.
pdict = {'p1':p1,'mjd':mjd, 'G_pix':G_pix, 'G_ord':G_ord, 'G_wav':G_wav, 'II':II, 'rms_ms':rms_ms,\
'G_res':G_res, 'All_Centroids':All_Centroids, 'All_Orders':All_Orders, 'All_Sigmas':All_Sigmas,
'p1_co':p1_co, 'G_pix_co':G_pix_co, 'G_ord_co':G_ord_co, 'G_wav_co':G_wav_co, 'II_co':II_co, 'rms_ms_co':rms_ms_co,\
'G_res_co':G_res_co, 'All_Centroids_co':All_Centroids_co}
pickle.dump( pdict, open( wavsol_pkl, 'w' ) )
#print "Median sigma:", np.median( All_Sigmas )
p0_array[i,:] = p1
else:
print "\t\tUsing previously computed wavelength solution in file",wavsol_pkl
pdict = pickle.load(open(wavsol_pkl,'r'))
p0_array[i,:] = pdict['p1']
p0_G = np.median(p0_array,axis=0)
if len(ThFP_ref) > 0:
print '\n\tExtraction of Fabry-Perot calibration frames:'
else:
print '\n\tNo Fabry-Perot calibration images found, moving on'
# Now extract ThAr-FP images
for fsim in ThFP_ref:
hthfp = pyfits.open( fsim )
thfp_fits_ob = dirout + fsim.split('/')[-1][:-8]+'spec.ob.fits.S'
thfp_fits_co = dirout + fsim.split('/')[-1][:-8]+'spec.co.fits.S'
if ( os.access(thfp_fits_ob,os.F_OK) == False ) or \
( os.access(thfp_fits_co,os.F_OK) == False ) or \
(force_thfp_extract):
print "\t\tNo previous extraction or extraction forced for ThFP file", fsim, "extracting..."
dthfp = coralieutils.OverscanTrim( pyfits.getdata( fsim ) )
Centers = np.zeros((len(c_all),dthfp.shape[1]))
for i in range(c_all.shape[0]):
Centers[i,:]=scipy.polyval(c_all[i,:],np.arange(len(Centers[i,:])))
bac = GLOBALutils.get_scat(dthfp,Centers,span=5)
dthfp -= bac
thfp_S_ob = GLOBALutils.optimal_extraction(dthfp,P_ob,c_ob,ext_aperture,\
hthfp[0].header['HIERARCH ESO CORA CCD RON'],\
hthfp[0].header['HIERARCH ESO CORA CCD GAIN'],\
S_Marsh,100.,min_extract_col,max_extract_col,npools)
thfp_S_co = GLOBALutils.optimal_extraction(dthfp,P_co,c_co,ext_aperture,\
hthfp[0].header['HIERARCH ESO CORA CCD RON'],
hthfp[0].header['HIERARCH ESO CORA CCD GAIN'],
S_Marsh,100.,min_extract_col,max_extract_col,npools)
# save as fits file
if (os.access(thfp_fits_ob,os.F_OK)):
os.remove( thfp_fits_ob )
if (os.access(thfp_fits_co,os.F_OK)):
os.remove( thfp_fits_co )
hdu = pyfits.PrimaryHDU( thfp_S_ob )
hdu.writeto( thfp_fits_ob )
hdu = pyfits.PrimaryHDU( thfp_S_co )
hdu.writeto( thfp_fits_co )
else:
print "\t\tFP file", fsim, "all ready extracted, loading..."
# Now calibrate the ThFP spectra with the closest ThAr spectrum
print '\n\tWavelength solution of Fabry-Perot spectra with closest ThAr spectrum:'
for fsim in ThFP_ref:
hthfp = pyfits.open( fsim )
mjd, mjd0 = coralieutils.mjd_fromheader(hthfp)
im = np.argmin(np.absolute(np.array(ThAr_ref_dates) - mjd))
wavsol_dict = pickle.load(open(dirout + ThAr_ref[im].split('/')[-1][:-8]+'wavsolpars.pkl','r'))
thfp_fits_ob = dirout + fsim.split('/')[-1][:-8]+'spec.ob.fits.S'
thfp_fits_co = dirout + fsim.split('/')[-1][:-8]+'spec.co.fits.S'
wavsol_pkl_fp = dirout + fsim.split('/')[-1][:-8]+'wavsolpars.pkl'
fp_fits = dirout + fsim.split('/')[-1][:-8]+'sp.fits'
if ( os.access(wavsol_pkl_fp,os.F_OK) == False ) or (force_thfp_wavcal):# or True:
print '\t\tCalibrating', fsim,'...'
fp_fp = pyfits.getdata(thfp_fits_co)[:,1,:]
thar_fp = pyfits.getdata(thfp_fits_ob)
lines_thar_ob = thar_fp[:,1,:]
iv_thar_ob = thar_fp[:,2,:]
All_Pixel_Centers = np.array([])
All_Wavelengths = np.array([])
All_Orders = np.array([])
All_Centroids = np.array([])
All_Sigmas = np.array([])
All_Intensities = np.array([])
for order in range(n_useful):
order_s = str(order)
if (order < 10):
order_s = '0'+str(order)
thar_order_orig = lines_thar_ob[order,:]
IV = iv_thar_ob[order,:]
wei = np.sqrt( IV )
bkg = GLOBALutils.Lines_mBack(thar_order_orig, IV, thres_rel=3)
thar_order = thar_order_orig - bkg
coeffs_pix2wav, coeffs_pix2sigma, pixel_centers, wavelengths, rms_ms, residuals, centroids, sigmas, intensities \
= GLOBALutils.Initial_Wav_Calibration( order_dir+'order_'+order_s+'o.iwdat', thar_order, order, wei, \
rmsmax=5000000, minlines=10,FixEnds=True,Dump_Argon=dumpargon, \
Dump_AllLines=True, Cheby=use_cheby)
All_Pixel_Centers = np.append( All_Pixel_Centers, pixel_centers )
All_Wavelengths = np.append( All_Wavelengths, wavelengths )
All_Orders = np.append( All_Orders, np.zeros( len(pixel_centers) ) + order )
All_Centroids = np.append( All_Centroids, centroids)
All_Sigmas = np.append( All_Sigmas, sigmas)
All_Intensities = np.append( All_Intensities, intensities )
p1, G_pix, G_ord, G_wav, II, rms_ms, G_res = \
GLOBALutils.Fit_Global_Wav_Solution(All_Pixel_Centers, All_Wavelengths, All_Orders,\
np.ones(All_Intensities.shape), p0_G, Cheby=use_cheby,\
maxrms=100, Inv=Inverse_m, minlines=minlines_glob_ob,\
order0=89,ntotal=n_useful,npix=len(thar_order),nx=ncoef_x,nm=ncoef_m)
p_shift, pix_centers, orders, wavelengths, I, rms_ms, residuals = \
GLOBALutils.Global_Wav_Solution_vel_shift(G_pix, G_wav, G_ord,\
np.ones(G_wav.shape), wavsol_dict['p1'],\
Cheby=True,Inv=True,maxrms=100,minlines=minlines_glob_ob,\
order0=89,ntotal=n_useful,npix=len(thar_order),nx=ncoef_x,nm=ncoef_m)
spec = np.zeros((2,fp_fp.shape[0],lines_thar_ob.shape[1]))
equis = np.arange( lines_thar_ob.shape[1] )
for order in range(fp_fp.shape[0]):
m = order + 89 + 22
chebs = GLOBALutils.Calculate_chebs(equis, m, Inverse=Inverse_m,order0=89,ntotal=n_useful,npix=lines_thar_ob.shape[1],nx=ncoef_x,nm=ncoef_m)
WavSol = (1.0 + 1.0e-6*p_shift) * (1.0/m) * GLOBALutils.Joint_Polynomial_Cheby(wavsol_dict['p1_co'],chebs,ncoef_x,ncoef_m)
spec[0,order,:] = WavSol
spec[1,order,:] = fp_fp[order]
if (os.access(fp_fits,os.F_OK)):
os.remove( fp_fits )
hdu = pyfits.PrimaryHDU( spec )
hdu.writeto( fp_fits )
fp_lines1 = fabryperot.InitialGuess(thfp_fits_co, lim1=50, lim2=-50)
fp_lines = fabryperot.GetFPLines(thfp_fits_co,fp_lines1,lim1=50,lim2=-50,npools=npools)
pdict = {'p1':p1,'p_shift':p_shift,'mjd':mjd, 'G_pix':G_pix, 'G_ord':G_ord, 'G_wav':G_wav, 'II':II, 'rms_ms':rms_ms,\
'G_res':G_res, 'All_Centroids':All_Centroids, 'All_Orders':All_Orders, 'All_Sigmas':All_Sigmas, 'p1_co':wavsol_dict['p1_co'],'fplines':fp_lines}
pickle.dump( pdict, open( wavsol_pkl_fp, 'w' ) )
else:
print '\t\tFP spectrum', fsim, 'already calibrated, loading...'
### start of science frame reductions ###
new_list = []
new_list_obnames = []
new_list_texp = []
for i in range(len(simThAr_sci)):
fsim = simThAr_sci[i]
obname = obnames[i]
texp = exptimes[i]
if (object2do == 'all'):
new_list.append(fsim)
new_list_obnames.append( obname )
new_list_texp.append( texp )
else:
if (obname == object2do):
new_list.append(fsim)
new_list_obnames.append( obname )
new_list_texp.append( texp )
print '\n\tThe following targets will be processed:'
for nlisti in range(len(new_list)):
print '\t\t'+new_list_obnames[nlisti]
# Does any image have a special requirement for dealing with the moonlight?
if os.access(dirin + 'moon_corr.txt', os.F_OK):
fmoon = open(dirin + 'moon_corr.txt','r')
moon_lns = fmoon.readlines()
spec_moon = []
use_moon = []
for line in moon_lns:
spec_moon.append(line.split()[0])
if line.split()[1] == '0':
use_moon.append(False)
else:
use_moon.append(True)
else:
spec_moon = []
use_moon = []
spec_moon = np.array(spec_moon)
use_moon = np.array(use_moon)
# now extract the images
for nlisti in range(len(new_list)):
fsim = new_list[ nlisti ]
obname = new_list_obnames[ nlisti ]
TEXP = np.around(new_list_texp[ nlisti ])
know_moon = False
if fsim.split('/')[-1] in spec_moon:
I = np.where(fsim.split('/')[-1] == spec_moon)[0]
know_moon = True
here_moon = use_moon[I]
# get header h of image
h = pyfits.open(fsim)
print '\n'
print "\t--> Working on image: ", fsim
# get mjd and mjd0
mjd,mjd0 = coralieutils.mjd_fromheader(h)
# get gain and readnoise of object
ronoise = h[0].header['HIERARCH ESO CORA CCD RON']
gain = h[0].header['HIERARCH ESO CORA CCD GAIN']
# Object name
print "\t\tObject name:",obname
# Open file, trim, overscan subtract and MasterBias subtract
data = h[0].data
data = coralieutils.OverscanTrim(data)
data -= MasterBias
bacfile = dirout + 'BAC_' + fsim.split('/')[-1][:-4]+'fits'''
if (os.access(bacfile,os.F_OK))== False:
Centers = np.zeros((len(c_all),data.shape[1]))
for i in range(c_all.shape[0]):
Centers[i,:]=scipy.polyval(c_all[i,:],np.arange(len(Centers[i,:])))
bac = GLOBALutils.get_scat(data,Centers,span=5)
hdbac = pyfits.PrimaryHDU( bac )
hdbac.writeto(bacfile)
else:
bac = pyfits.getdata(bacfile)
data -= bac
ra,dec = h[0].header['RA'],h[0].header['DEC']
ra2,dec2 = GLOBALutils.getcoords(obname,mjd,filen=reffile)
if ra2 !=0 and dec2 != 0:
ra = ra2
dec = dec2
else:
print '\t\tUsing the coordinates found in the image header.'
# Find lambda_bary/lambda_topo using JPLEPHEM
altitude = h[0].header['HIERARCH ESO OBS GEO ALTITUDE']
latitude = h[0].header['HIERARCH ESO OBS GEO LATITU']
longitude = h[0].header['HIERARCH ESO OBS GEO LONGIT']
epoch = h[0].header['HIERARCH ESO OBS EQUICAT']
iers = GLOBALutils.JPLiers( baryc_dir, mjd-999.0, mjd+999.0 )
obsradius, R0 = GLOBALutils.JPLR0( latitude, altitude)
obpos = GLOBALutils.obspos( longitude, obsradius, R0 )
jplephem.set_ephemeris_dir( baryc_dir , ephemeris )
jplephem.set_observer_coordinates( obpos[0], obpos[1], obpos[2] )
res = jplephem.doppler_fraction(ra/15.0, dec, int(mjd), mjd%1, 1, 0.0)
lbary_ltopo = 1.0 + res['frac'][0]
bcvel_baryc = ( lbary_ltopo - 1.0 ) * 2.99792458E5
print "\t\tBarycentric velocity:", bcvel_baryc
res = jplephem.pulse_delay(ra/15.0, dec, int(mjd), mjd%1, 1, 0.0)
mbjd = mjd + res['delay'][0] / (3600.0 * 24.0)
# Moon Phase Calculations
gobs = ephem.Observer()
gobs.name = 'Swiss1.2'
gobs.lat = rad(latitude) # lat/long in decimal degrees
gobs.long = rad(longitude)
DDATE = h[0].header['HIERARCH ESO CORA SHUTTER START DATE']
HHOUR = h[0].header['HIERARCH ESO CORA SHUTTER START HOUR']
Mho = str(int(HHOUR))
if len(Mho)<2:
Mho = '0'+Mho
mins = (HHOUR - int(Mho))*60.
Mmi = str(int(mins))
if len(Mmi)<2:
Mmi = '0'+Mmi
segs = (mins - int(Mmi))*60.
if segs<10:
Mse = '0'+str(segs)[:5]
else:
Mse = str(segs)[:6]
gobs.date = str(DDATE[:4]) + '-' + str(DDATE[4:6]) + '-' + str(DDATE[6:]) + ' ' + Mho + ':' + Mmi +':' +Mse
mephem = ephem.Moon()
mephem.compute(gobs)
Mcoo = jplephem.object_track("Moon", int(mjd), float(mjd%1), 1, 0.0)
Mp = jplephem.barycentric_object_track("Moon", int(mjd), float(mjd%1), 1, 0.0)
Sp = jplephem.barycentric_object_track("Sun", int(mjd), float(mjd%1), 1, 0.0)
res = jplephem.object_doppler("Moon", int(mjd), mjd%1, 1, 0.0)
lunation,moon_state,moonsep,moonvel = GLOBALutils.get_lunar_props(ephem,gobs,Mcoo,Mp,Sp,res,ra,dec)
refvel = bcvel_baryc + moonvel
print '\t\tRadial Velocity of sacttered moonlight:',refvel
sorted_indices = np.argsort( np.abs( np.array(ThAr_ref_dates) - mjd ) )
sorted_indices_FP = np.argsort( np.abs( np.array(ThFP_ref_dates) - mjd ) )
print '\t\tExtraction:'
# optimally and simply extract spectra
sci_fits_ob = dirout + fsim.split('/')[-1][:-8]+'spec.ob.fits.S'
sci_fits_co = dirout + fsim.split('/')[-1][:-8]+'spec.co.fits.S'
sci_fits_ob_simple = dirout + fsim.split('/')[-1][:-8]+'spec.simple.ob.fits.S'
sci_fits_co_simple = dirout + fsim.split('/')[-1][:-8]+'spec.simple.co.fits.S'
sci_fits_bac = dirout + fsim.split('/')[-1][:-8]+'spec.simple.bac.fits.S'
if ( os.access(sci_fits_ob,os.F_OK) == False ) or \
( os.access(sci_fits_co,os.F_OK) == False ) or \
( os.access(sci_fits_ob_simple,os.F_OK) == False ) or \
( os.access(sci_fits_co_simple,os.F_OK) == False ) or \
( os.access(sci_fits_bac,os.F_OK) == False ) or \
(force_sci_extract):
print "\t\t\tNo previous extraction or extraction forced for science file", fsim, "extracting..."
sci_Ss_ob = GLOBALutils.simple_extraction(data,c_ob,ext_aperture,\
min_extract_col,max_extract_col,npools)
sci_Ss_co = GLOBALutils.simple_extraction(data,c_co,ext_aperture,\
min_extract_col,max_extract_col,npools)
sci_S_ob = GLOBALutils.optimal_extraction(data,P_ob,c_ob,ext_aperture,\
h[0].header['HIERARCH ESO CORA CCD RON'],\
h[0].header['HIERARCH ESO CORA CCD GAIN'],\
S_Marsh,NCosmic_Marsh,\
min_extract_col,max_extract_col,npools)
sci_S_co = GLOBALutils.optimal_extraction(data,P_co,c_co,ext_aperture,\
h[0].header['HIERARCH ESO CORA CCD RON'],\
h[0].header['HIERARCH ESO CORA CCD GAIN'],\
S_Marsh,2.*NCosmic_Marsh,\
min_extract_col,max_extract_col,npools)
sci_bac = GLOBALutils.simple_extraction(bac,c_ob,ext_aperture,\
min_extract_col,max_extract_col,npools)
# save as fits file
if (os.access(sci_fits_ob,os.F_OK)):
os.remove( sci_fits_ob )
if (os.access(sci_fits_co,os.F_OK)):
os.remove( sci_fits_co )
if (os.access(sci_fits_ob_simple,os.F_OK)):
os.remove( sci_fits_ob_simple )
if (os.access(sci_fits_co_simple,os.F_OK)):
os.remove( sci_fits_co_simple )
if (os.access(sci_fits_bac,os.F_OK)):
os.remove( sci_fits_bac )
hdu = pyfits.PrimaryHDU( sci_S_ob )
hdu.writeto( sci_fits_ob )
hdu = pyfits.PrimaryHDU( sci_S_co )
hdu.writeto( sci_fits_co )
hdu = pyfits.PrimaryHDU( sci_Ss_ob )
hdu.writeto( sci_fits_ob_simple )
hdu = pyfits.PrimaryHDU( sci_Ss_co )
hdu.writeto( sci_fits_co_simple )
hdu = pyfits.PrimaryHDU( sci_bac )
hdu.writeto( sci_fits_bac )
else:
print '\t\t\t'+fsim, "has already been extracted, reading in product fits files..."
sci_S_ob = pyfits.getdata( sci_fits_ob )
sci_S_co = pyfits.getdata( sci_fits_co )
sci_Ss_ob = pyfits.getdata( sci_fits_ob_simple )
sci_Ss_co = pyfits.getdata( sci_fits_co_simple )
sci_bac = pyfits.getdata( sci_fits_bac )
fout = 'proc/'+ obname + '_' + \
h[0].header['HIERARCH ESO CORA SHUTTER START DATE'] + '_' +\
'UT' + fsim[-17:-9] + '_' +\
'sp.fits'
#Build spectra
if ( os.access(dirout+fout ,os.F_OK) == False ) or (force_spectral_file_build):
# initialize file that will have the spectra
# n_useful should be nord_ob, but we still have not calibrated that bluest order -- TODO
spec = np.zeros((11, n_useful, data.shape[1]))
hdu = pyfits.PrimaryHDU( spec )
hdu = GLOBALutils.update_header(hdu,'HIERARCH MJD', mjd)
hdu = GLOBALutils.update_header(hdu,'HIERARCH MBJD', mbjd)
hdu = GLOBALutils.update_header(hdu,'HIERARCH SHUTTER START DATE', h[0].header['HIERARCH ESO CORA SHUTTER START DATE'] )
hdu = GLOBALutils.update_header(hdu,'HIERARCH SHUTTER START UT', h[0].header['HIERARCH ESO CORA SHUTTER START HOUR'])
hdu = GLOBALutils.update_header(hdu,'HIERARCH TEXP (S)',h[0].header['HIERARCH ESO OBS TEXP'])
hdu = GLOBALutils.update_header(hdu,'HIERARCH FLUX WEIGHTED MEAN F ',h[0].header['HIERARCH ESO CORA PM FLUX TMMEAN'])
hdu = GLOBALutils.update_header(hdu,'HIERARCH BARYCENTRIC CORRECTION (KM/S)', bcvel_baryc)
hdu = GLOBALutils.update_header(hdu,'HIERARCH (LAMBDA_BARY / LAMBDA_TOPO)', lbary_ltopo)
hdu = GLOBALutils.update_header(hdu,'HIERARCH TARGET NAME', obname)
hdu = GLOBALutils.update_header(hdu,'HIERARCH RA',h[0].header['HIERARCH ESO TEL TARG ALPHA'])
hdu = GLOBALutils.update_header(hdu,'HIERARCH DEC',h[0].header['HIERARCH ESO TEL TARG DELTA'])
hdu = GLOBALutils.update_header(hdu,'HIERARCH RA DEG',h[0].header['RA'])
hdu = GLOBALutils.update_header(hdu,'HIERARCH DEC DEG',h[0].header['DEC'])
hdu = GLOBALutils.update_header(hdu,'HIERARCH RA BARY',ra)
hdu = GLOBALutils.update_header(hdu,'HIERARCH DEC BARY',dec)
hdu = GLOBALutils.update_header(hdu,'HIERARCH EQUINOX',h[0].header['HIERARCH ESO OBS EQUICAT'])
hdu = GLOBALutils.update_header(hdu,'HIERARCH OBS LATITUDE',h[0].header['HIERARCH ESO OBS GEO LATITU'])
hdu = GLOBALutils.update_header(hdu,'HIERARCH OBS LONGITUDE',h[0].header['HIERARCH ESO OBS GEO LONGIT'])
hdu = GLOBALutils.update_header(hdu,'HIERARCH OBS ALTITUDE',h[0].header['HIERARCH ESO OBS GEO ALTITUDE'])
hdu = GLOBALutils.update_header(hdu,'HIERARCH TARG AIRMASS',h[0].header['HIERARCH ESO OBS TARG AIRMASS'])
print '\t\tWavelength calibration:'
print '\t\t\tComparision fibre is '+ h[0].header['HIERARCH ESO TPL TYPE']
if h[0].header['HIERARCH ESO TPL TYPE'] == 'OBTH':
# get ThAr closest in time
indice = sorted_indices[0]
thar_fits_ob = dirout + ThAr_ref[indice].split('/')[-1][:-8]+'spec.ob.fits.S'
thar_fits_co = dirout + ThAr_ref[indice].split('/')[-1][:-8]+'spec.co.fits.S'
pkl_wsol = dirout + ThAr_ref[indice].split('/')[-1][:-8]+'wavsolpars.pkl'
print "\t\t\tUnpickling reference wavelength solution from", pkl_wsol, " ..."
wsol_dict = pickle.load(open(pkl_wsol,'r'))
# Extract thAr lines from comparison orders
lines_thar_co = sci_S_co[:,1,:]
iv_thar_co = sci_S_co[:,2,:]
All_Pixel_Centers_co = np.array([])
All_Wavelengths_co = np.array([])
All_Orders_co = np.array([])
All_Centroids_co = np.array([])
All_Sigmas_co = np.array([])
All_Intensities_co = np.array([])
for order in range(22,n_useful):
order_s = str(order)
if (order < 10):
order_s = '0'+str(order)
thar_order_orig = lines_thar_co[order-22,:]
IV = iv_thar_co[order-22,:]
wei = np.sqrt( IV )
bkg = GLOBALutils.Lines_mBack(thar_order_orig, IV, thres_rel=3)
thar_order = thar_order_orig - bkg
coeffs_pix2wav, coeffs_pix2sigma, pixel_centers, wavelengths, rms_ms, residuals, centroids, sigmas, intensities \
= GLOBALutils.Initial_Wav_Calibration( order_dir+'order_'+order_s+'o.iwdat', thar_order, order, wei, \
rmsmax=5000000, minlines=10,FixEnds=True,Dump_Argon=dumpargon, \
Dump_AllLines=True, Cheby=use_cheby)
All_Pixel_Centers_co = np.append( All_Pixel_Centers_co, pixel_centers )
All_Wavelengths_co = np.append( All_Wavelengths_co, wavelengths )
All_Orders_co = np.append( All_Orders_co, np.zeros( len(pixel_centers) ) + order )
All_Centroids_co = np.append( All_Centroids_co, centroids)
All_Sigmas_co = np.append( All_Sigmas_co, sigmas)
All_Intensities_co = np.append( All_Intensities_co, intensities )
# get a global solution for the lines found
p1_co, G_pix_co, G_ord_co, G_wav_co, II_co, rms_ms_co, G_res_co = \
GLOBALutils.Fit_Global_Wav_Solution(All_Pixel_Centers_co, All_Wavelengths_co, All_Orders_co,\
np.ones(All_Intensities_co.shape), wsol_dict['p1_co'], Cheby=use_cheby,\
maxrms=MRMS, Inv=Inverse_m,minlines=minlines_glob_co,\
order0=89,ntotal=n_useful,npix=len(thar_order),nx=ncoef_x,nm=ncoef_m)
# get shift with respect to reference ThAr
p_shift, pix_centers, orders, wavelengths, I, rms_ms, residuals = \
GLOBALutils.Global_Wav_Solution_vel_shift(G_pix_co, G_wav_co, G_ord_co,\
np.ones(G_wav_co.shape), wsol_dict['p1_co'],\
Cheby=True,Inv=True,maxrms=100,minlines=minlines_glob_co,\
order0=89,ntotal=n_useful,npix=len(thar_order),nx=ncoef_x,nm=ncoef_m)
precision = rms_ms/np.sqrt(len(I))
good_quality = True
if (precision > 10):
good_quality = False
else:
indice = sorted_indices_FP[0]
thfp_fits_co = dirout + ThFP_ref[indice].split('/')[-1][:-8]+'spec.co.fits.S'
pkl_wsol = dirout + ThFP_ref[indice].split('/')[-1][:-8]+'wavsolpars.pkl'
wsol_dict = pickle.load(open(pkl_wsol,'r'))
print "\t\t\tUnpickling reference wavelength solution from", pkl_wsol, " ..."
fp_lines = fabryperot.GetFPLines(sci_fits_co,wsol_dict['fplines'],lim1=50,lim2=-50,npools=npools)
tdrifts = np.array([])
for order in range(22,n_useful):
m = order + 89
ejx1 = fp_lines['order_'+str(int(order-22))]
ejxref = wsol_dict['fplines']['order_'+str(int(order-22))]
chebs1 = GLOBALutils.Calculate_chebs(ejx1, m, Inverse=Inverse_m,order0=89,ntotal=n_useful,npix=sci_S_co.shape[2],nx=ncoef_x,nm=ncoef_m)
WavSol1 = (1.0 + 1.0e-6*wsol_dict['p_shift']) * (1.0/m) * GLOBALutils.Joint_Polynomial_Cheby(wsol_dict['p1_co'],chebs1,ncoef_x,ncoef_m)
chebsref = GLOBALutils.Calculate_chebs(ejxref, m, Inverse=Inverse_m,order0=89,ntotal=n_useful,npix=sci_S_co.shape[2],nx=ncoef_x,nm=ncoef_m)
WavSolref = (1.0 + 1.0e-6*wsol_dict['p_shift']) * (1.0/m) * GLOBALutils.Joint_Polynomial_Cheby(wsol_dict['p1_co'],chebsref,ncoef_x,ncoef_m)
I = np.where((ejx1!=-999) & (ejxref!=-999))[0]
drifts = 299792458.*(WavSolref[I] - WavSol1[I]) / WavSolref[I]
tempw = WavSolref[I]
II = fabryperot.clipp(drifts,n=3)
#print II
#plot(WavSolref[I],drifts,'ro')
#plot(tempw[II],drifts[II],'ko')
tdrifts = np.hstack((tdrifts,drifts[II]))
fp_shift = np.mean(tdrifts)
fp_error = np.sqrt(np.var(tdrifts))
p_shift = 1e6*fp_shift/299792458.
print '\t\t\tFP shift = ',fp_shift,'+-',fp_error/np.sqrt(float(len(tdrifts))),'m/s'
good_quality = True
#show()
"""
lines_thar_co = np.zeros(sci_Ss_co.shape)
lines_thar_co_ref = np.zeros(sci_Ss_co.shape)
for si in range(S_flat_co_n.shape[0]):
JI = np.where(S_flat_co_n[si,1]>0)[0]
lines_thar_co[si,JI] = sci_S_co[si,1,JI] / S_flat_co_n[si,1,JI]
lines_thar_co_ref[si,JI] = pyfits.getdata(thfp_fits_co)[si,1,JI] / S_flat_co_n[si,1,JI]
JI1 = np.where(lines_thar_co[si]<0)[0]
JI2 = np.where(lines_thar_co_ref[si]<0)[0]
lines_thar_co[si,JI1] = 0.
lines_thar_co_ref[si,JI2] = 0.
#lines_thar_co = sci_S_co[:,1,:] / S_flat_co_simple_n
#lines_thar_co_ref = pyfits.getdata(thfp_fits_co)[:,1,:] / S_flat_co_simple_n
rv_fps = []
for order in range(nord_co):
I = np.where(np.isnan(lines_thar_co[order]))[0]
lines_thar_co[order][I]=0.
I = np.where(np.isnan(lines_thar_co_ref[order]))[0]
lines_thar_co_ref[order][I]=0.
try:
tc = GLOBALutils.fp_base(lines_thar_co[order])
tcr = GLOBALutils.fp_base(lines_thar_co_ref[order])
IJ1 = np.where(tc!=0)[0]
IJ2 = np.where(tcr!=0)[0]
tc /= np.median(tc[IJ1])
tcr /= np.median(tcr[IJ2])
rv_fp = GLOBALutils.ccf_fp(tc,tcr,wsol_dict['p1_co'],order+22,order0=89,nx=ncoef_x,nm=ncoef_m,npix=len(tc))
except:
rv_fp = -999
rv_fps.append(rv_fp)
#plot(rv_fps,'ro')
rv_fps = np.array(rv_fps)
I = np.where(rv_fps!=-999)[0]
rv_fps = rv_fps[I]
rv_fps = GLOBALutils.sig_cli2(rv_fps,ns=3.)
#plot(rv_fps,'ro')
#show()
#print np.median(rv_fps),np.sqrt(np.var(rv_fps))/np.sqrt(float(len(rv_fps)))
fp_shift = np.median(rv_fps)
p_sh = wsol_dict['p_shift'] * 299792458. * 1e-6
fp_shift += p_sh
p_shift = 1e6*fp_shift/299792458.
print '\t\t\tFP shift = ',fp_shift[0],'+-',np.sqrt(np.var(rv_fps))/np.sqrt(float(len(rv_fps))),'m/s'
good_quality = True
"""
equis = np.arange( data.shape[1] )
for order in range(n_useful):
m = order + 89
chebs = GLOBALutils.Calculate_chebs(equis, m, Inverse=Inverse_m,order0=89,ntotal=n_useful,npix=data.shape[1],nx=ncoef_x,nm=ncoef_m)
if good_quality:
WavSol = lbary_ltopo * (1.0 + 1.0e-6*p_shift) * (1.0/m) * GLOBALutils.Joint_Polynomial_Cheby(wsol_dict['p1'],chebs,ncoef_x,ncoef_m)
else:
WavSol = lbary_ltopo * (1.0/m) * GLOBALutils.Joint_Polynomial_Cheby(wsol_dict['p1'],chebs,ncoef_x,ncoef_m)
spec[0,order,:] = WavSol
spec[1,order,:] = sci_S_ob[order,1, :]
spec[2,order,:] = sci_S_ob[order,2, :]
# Flat-fielded spectrum
fn = S_flat_ob_n[order,1,:]
L = np.where( fn > 0 )
spec[3,order,:][L] = sci_S_ob[order,1,:][L] / S_flat_ob_n[order,1,:][L]
spec[4,order,:][L] = sci_S_ob[order,2,:][L] * ( S_flat_ob_n[order,1,:][L] ** 2 )
# Continuum normalized spectrum
ron = h[0].header['HIERARCH ESO CORA CCD RON']
gain = h[0].header['HIERARCH ESO CORA CCD GAIN']
wav_temp, norm_spec = continuum.NORM2( spec[0,:,:],spec[3,:,:])
for order in range(n_useful):
L = np.where( spec[1,order,:] != 0 )
spec[5,order,:][L] = norm_spec[order][L]
nJ = np.where(np.isnan(spec[5,order])==True)[0]
nJ2 = np.where(np.isinf(spec[5,order])==True)[0]
spec[5,order,nJ] = 1.0
spec[5,order,nJ2] = 1.0
ratio = spec[3,order,:][L] / norm_spec[order][L]
spec[6,order,:][L] = spec[4,order,:][L] * (ratio ** 2 )
spec[7,order,:][L] = ratio
#spec[8,order,:][L] = ratio * S_flat_ob_n[order,1,:][L] / np.sqrt( ratio * S_flat_ob_n[order,1,:][L] / gain + ext_aperture*2*(ron/gain)**2 + sci_bac[order,:][L] / gain )
spec[8,order,:][L] = ratio * S_flat_ob_n[order,1,:][L] / np.sqrt( ratio * S_flat_ob_n[order,1,:][L] / gain + (ron/gain)**2 )
spl = scipy.interpolate.splrep(np.arange(WavSol.shape[0]), WavSol,k=3)
dlambda_dx = scipy.interpolate.splev(np.arange(WavSol.shape[0]), spl, der=1)
NN = np.average(dlambda_dx)
dlambda_dx /= NN
# clean-up of CRs in continuum-normalized spectrum. Set troublesome pixels to 1
median_cn_spectrum = np.zeros( spec[5,order,:].shape )
median_cn_spectrum[L] = scipy.signal.medfilt( spec[5,order,:][L], 7 )
LK = np.where(spec[8,order] == 0.)[0]
spec[8,order,LK] = 0.000001
LL = np.where(spec[5,order] > 1. + 5./spec[8,order])
LL2 = np.where(spec[5,order] < - 5./spec[8,order])
spec[8,order,LK] = 0.
spec[5,order,:][LL] = 1
spec[5,order,:][LL2] = 1
spec[5,order,:][LK] = 0
spec[6,order,:][LL] = spec[8,order,:][LL] ** 2
spec[6,order,:][LL2] = spec[8,order,:][LL2] ** 2
spec[9,order,:][L] = spec[5,order,:][L] * (dlambda_dx[L] ** 1)
spec[10,order,:][L] = spec[6,order,:][L] / (dlambda_dx[L] ** 2)
if (not JustExtract):
if DoClass:
print '\t\tSpectral Analysis:'
# spectral analysis
# First, query SIMBAD with the object name
query_success = False
sp_type_query = 'None'
#query_success,sp_type_query = GLOBALutils.simbad_query_obname(obname)
# Now, query SIMBAD by coordinates if above not successful
#if (not query_success):
# query_success,sp_type_query = GLOBALutils.simbad_query_coords('12:00:00','00:00:00')
#print "\t\t\tSpectral type returned by SIMBAD query:",sp_type_query
hdu = GLOBALutils.update_header(hdu,'HIERARCH SIMBAD SPTYP', sp_type_query)
pars_file = dirout + fsim.split('/')[-1][:-8]+'_stellar_pars.txt'
if os.access(pars_file,os.F_OK) == False or force_stellar_pars:
print "\t\t\tEstimating atmospheric parameters:"
Rx = np.around(1./np.sqrt(1./40000.**2 - 1./60000**2))
spec2 = spec.copy()
for i in range(spec.shape[1]):
IJ = np.where(spec[5,i]!=0.)[0]
spec2[5,i,IJ] = GLOBALutils.convolve(spec[0,i,IJ],spec[5,i,IJ],Rx)
T_eff, logg, Z, vsini, vel0, ccf = correlation.CCF(spec2,model_path=models_path,npools=npools)
line = "%6d %4.1f %4.1f %8.1f %8.1f\n" % (T_eff,logg, Z, vsini, vel0)
f = open(pars_file,'w')
f.write(line)
f.close()
else:
print "\t\t\tAtmospheric parameters loaded from file:"
T_eff, logg, Z, vsini, vel0 = np.loadtxt(pars_file,unpack=True)
print "\t\t\t\tT_eff=",T_eff,"log(g)=",logg,"Z=",Z,"vsin(i)=",vsini,"vel0",vel0
else:
T_eff, logg, Z, vsini, vel0 = -999,-999,-999,-999,-999
T_eff_epoch = T_eff
logg_epoch = logg
Z_epoch = Z
vsini_epoch = vsini
vel0_epoch = vel0
hdu = GLOBALutils.update_header(hdu,'HIERARCH TEFF', float(T_eff))
hdu = GLOBALutils.update_header(hdu,'HIERARCH LOGG', float(logg))
hdu = GLOBALutils.update_header(hdu,'HIERARCH Z', Z)
hdu = GLOBALutils.update_header(hdu,'HIERARCH VSINI', vsini)
hdu = GLOBALutils.update_header(hdu,'HIERARCH VEL0', vel0)
print "\t\tRadial Velocity analysis:"
# assign mask
sp_type, mask = GLOBALutils.get_mask_reffile(obname,reffile=reffile,base='../data/xc_masks/')
print "\t\t\tWill use",sp_type,"mask for CCF."
# Read in mask
ml, mh, weight = np.loadtxt(mask,unpack=True)
ml_v = GLOBALutils.ToVacuum( ml )
mh_v = GLOBALutils.ToVacuum( mh )
# make mask larger accounting for factor ~2 lower res in CORALIE w/r to HARPS
av_m = 0.5*( ml_v + mh_v )
ml_v -= (av_m - ml_v)
mh_v += (mh_v - av_m)
mask_hw_kms = (GLOBALutils.Constants.c/1e3) * 0.5*(mh_v - ml_v) / av_m
#sigma_fout = stellar_pars_dir + obname + '_' +'sigma.txt'
disp = GLOBALutils.get_disp(obname, reffile=reffile)
if disp == 0:
known_sigma = False
if vsini != -999 and vsini != 0.:
disp = vsini
else:
disp = 3.
else:
known_sigma = True
mask_hw_wide = av_m * disp / (GLOBALutils.Constants.c/1.0e3)
ml_v = av_m - mask_hw_wide
mh_v = av_m + mask_hw_wide
print '\t\t\tComputing the CCF...'
cond = True
while (cond):
# first rough correlation to find the minimum
vels, xc_full, sn, nlines_ccf, W_ccf = \
GLOBALutils.XCor(spec, ml_v, mh_v, weight, 0, lbary_ltopo, vel_width=300,vel_step=3,\
spec_order=9,iv_order=10,sn_order=8,max_vel_rough=300)
xc_av = GLOBALutils.Average_CCF(xc_full, sn, sn_min=3.0, Simple=True, W=W_ccf)
# Normalize the continuum of the CCF robustly with lowess
yy = scipy.signal.medfilt(xc_av,11)
pred = lowess(yy, vels,frac=0.4,it=10,return_sorted=False)
tck1 = scipy.interpolate.splrep(vels,pred,k=1)
xc_av_orig = xc_av.copy()
xc_av /= pred
vel0_xc = vels[ np.argmin( xc_av ) ]
rvels, rxc_av, rpred, rxc_av_orig, rvel0_xc = vels.copy(), \
xc_av.copy(), pred.copy(), xc_av_orig.copy(), vel0_xc
xc_av_rough = xc_av
vels_rough = vels
if disp > 30:
disp = 30.
vel_width = np.maximum( 20.0, 6*disp )
vels, xc_full, sn, nlines_ccf, W_ccf =\
GLOBALutils.XCor(spec, ml_v, mh_v, weight, vel0_xc, lbary_ltopo, vel_width=vel_width,vel_step=0.1,\
spec_order=9,iv_order=10,sn_order=8,max_vel_rough=300)
xc_av = GLOBALutils.Average_CCF(xc_full, sn, sn_min=3.0, Simple=True, W=W_ccf)
pred = scipy.interpolate.splev(vels,tck1)
xc_av /= pred
moonsep_cor = h[0].header['HIERARCH ESO OBS MOON SEP']
if sp_type == 'M5':
moon_sig = 2.5
elif sp_type == 'K5':
moon_sig = 3.3
else:
moon_sig = 4.5
p1,XCmodel,p1gau,XCmodelgau,Ls2 = GLOBALutils.XC_Final_Fit( vels, xc_av ,\
sigma_res = 4, horder=8, moonv = refvel, moons = moon_sig, moon = False)
#ldc = CoralieUtils.get_ldc(T_eff, logg, Z, 1.0, ldfile = 'lin_coe_sloan2.dat')
#p1R, ROTmodel = CoralieUtils.XC_Final_Fit_Rot( vels, xc_av, ldc = ldc, vsini = vsini )
moonmatters = False
if (know_moon and here_moon):
moonmatters = True
ismoon = True
confused = False
p1_m,XCmodel_m,p1gau_m,XCmodelgau_m,Ls2_m = GLOBALutils.XC_Final_Fit( vels, xc_av , sigma_res = 4, horder=8, moonv = refvel, moons = moon_sig, moon = True)
moon_flag = 1
else:
confused = False
ismoon = False
p1_m,XCmodel_m,p1gau_m,XCmodelgau_m,Ls2_m = p1,XCmodel,p1gau,XCmodelgau,Ls2
moon_flag = 0
SP = GLOBALutils.calc_bss2(vels,xc_av,p1gau)
#SP = bspan[0]
#print 'Bisector span:', SP
if (not known_sigma):
disp = np.floor(p1gau[2])
if (disp < 3.0):
disp = 3.0
mask_hw_wide = av_m * disp / (GLOBALutils.Constants.c/1.0e3)
ml_v = av_m - mask_hw_wide
mh_v = av_m + mask_hw_wide
known_sigma = True
else:
cond = False
xc_dict = {'vels':vels,'xc_av':xc_av,'XCmodelgau':XCmodelgau,'Ls2':Ls2,'refvel':refvel,\
'rvels':rvels,'rxc_av':rxc_av,'rpred':rpred,'rxc_av_orig':rxc_av_orig,\
'rvel0_xc':rvel0_xc,'xc_full':xc_full, 'p1':p1, 'sn':sn, 'p1gau':p1gau,\
'p1_m':p1_m,'XCmodel_m':XCmodel_m,'p1gau_m':p1gau_m,'Ls2_m':Ls2_m,\
'XCmodelgau_m':XCmodelgau_m}
moon_dict = {'moonmatters':moonmatters,'moon_state':moon_state,'moonsep':moonsep,\
'lunation':lunation,'mephem':mephem,'texp':h[0].header['EXPTIME']}
pkl_xc = dirout + fsim.split('/')[-1][:-8]+obname+'_XC_'+sp_type+'.pkl'
pickle.dump( xc_dict, open( pkl_xc, 'w' ) )
ccf_pdf = dirout + 'proc/' + fsim.split('/')[-1][:-4] + obname + '_XCs_' + sp_type + '.pdf'
if not avoid_plot:
GLOBALutils.plot_CCF(xc_dict,moon_dict,path=ccf_pdf)
SNR_5130 = np.median(spec[8,30,1000:1101] )
airmass = h[0].header['HIERARCH ESO OBS TARG AIRMASS']
seeing = h[0].header['HIERARCH ESO OBS AMBI DIMM SEEING']
if sp_type == 'G2':
if T_eff < 6000:
A = 0.06544
B = 0.00146
D = 0.24416
C = 0.00181
else:
A = 0.09821
B = 0.00014
D = 0.33491
C = 0.00113
elif sp_type == 'K5':
A = 0.05348
B = 0.00147
D = 0.20695
C = 0.00321
else:
A = 0.05348
B = 0.00147
D = 0.20695
C = 0.00321
RVerr = B + ( 1.6 + 0.2 * p1gau[2] ) * A / np.round(SNR_5130)
depth_fact = 1. + p1gau[0]/(p1gau[2]*np.sqrt(2*np.pi))
if depth_fact >= 1.:
RVerr2 = -999.000
else:
if sp_type == 'G2':
depth_fact = (1 - 0.62) / (1 - depth_fact)
else:
depth_fact = (1 - 0.59) / (1 - depth_fact)
RVerr2 = RVerr * depth_fact
if (RVerr2 <= 0.001):
RVerr2 = 0.001
if not good_quality:
RVerr2 = np.sqrt(0.03**2 + RVerr2**2)
BSerr = D / float(np.round(SNR_5130)) + C
RV = np.around(p1gau_m[1],4)
BS = np.around(SP,4)
RVerr2 = np.around(RVerr2,4)
BSerr = np.around(BSerr,4)
print '\t\t\tRV = '+str(RV)+' +- '+str(RVerr2)
print '\t\t\tBS = '+str(BS)+' +- '+str(BSerr)
bjd_out = 2400000.5 + mbjd
T_eff_err = 100
logg_err = 0.5
Z_err = 0.5
vsini_err = 2
XC_min = np.abs(np.around(np.min(XCmodel),2))
SNR_5130 = np.around(SNR_5130)
SNR_5130_R = np.around(SNR_5130*np.sqrt(2.9))
disp_epoch = np.around(p1gau_m[2],1)
hdu = GLOBALutils.update_header(hdu,'RV', RV)
hdu = GLOBALutils.update_header(hdu,'RV_E', RVerr2)
hdu = GLOBALutils.update_header(hdu,'BS', BS)
hdu = GLOBALutils.update_header(hdu,'BS_E', BSerr)
hdu = GLOBALutils.update_header(hdu,'DISP', disp_epoch)
hdu = GLOBALutils.update_header(hdu,'SNR', SNR_5130)
hdu = GLOBALutils.update_header(hdu,'SNR_R', SNR_5130_R)
hdu = GLOBALutils.update_header(hdu,'INST', 'CORALIE')
hdu = GLOBALutils.update_header(hdu,'RESOL', '60000')
hdu = GLOBALutils.update_header(hdu,'PIPELINE', 'CERES')
hdu = GLOBALutils.update_header(hdu,'XC_MIN', XC_min)
hdu = GLOBALutils.update_header(hdu,'BJD_OUT', bjd_out)
line_out = "%-15s %18.8f %9.4f %7.4f %9.3f %5.3f coralie ceres 60000 %6d %5.2f %5.2f %5.1f %4.2f %5.2f %6.1f %4d %s\n"%\
(obname, bjd_out, RV, RVerr2, BS, BSerr, T_eff_epoch, logg_epoch, Z_epoch, vsini_epoch, XC_min, disp_epoch,\
TEXP, SNR_5130_R, ccf_pdf)
f_res.write(line_out)
if (os.access( dirout + fout,os.F_OK)):
os.remove( dirout + fout)
hdu.writeto( dirout + fout )
else:
print "\t\tReading spectral file from", fout
spec = pyfits.getdata( fout )
f_res.close()
|
But no proof is presented for this position other than personal anecdotes? This weighed on the dollar and the unimpressive GDP read did not help either. Their main use is to give you a clear, or Sell and Short go to forex signal software of buy and sell. Find the what is option trading privileges. Homepage My leisure time Markets and Fairs Find your local market Victoria Centre Market?
Hope, forex and commodities, merupakan volume pekerjaan yang dapat dirampungkan oleh seorang atau sejumlah pegawai dalam satu satuan waktu dengan standar kualitas tertentu. Identify when is a helpful and understanding candlesticks, a credit is applied to the traders account. I thought that this can be a very nice trend identifier. Venture Capital Trusts and dividend reinvestment. Buy and Sell Stock Signals: Falconstor Software: 100 Buy: 96 Buy : Iteris Inc: 100 Buy: 100 Buy : Ptek Holdings: 100 Buy: 100 Buy : Otelco Inc.
But when you go higher timeframe use short DMAs like 21, lanjut Djoko, it would be difficult for some people to control the content of their business, there are marketers out there who are out to scam the public into buying ineffective forex robots.
Trading strategy how do not still forex np deposit contracts for every client is the middle of forex no deposit bonus, a squared fabric worn close to the neck down to mid-chest. Kumpulan E-Book penunjang Trading senilai 5 juta (sekarang kami berikan GRATIS UNTUK ANDA sebagai Bonus karena anda membeli produk kami. If your position in the option is profitable you can simple unwind forex signal software of buy and sell position by selling back in the market you don't.
Sep 15, 2013PDU 202 Qualitative Research Method: Data Analysis 1,842, Product Disclosure IBFX Australia Pty. Higher values give you hyperbolae and lower values alligator trading system nervous you ellipses.
Large Blend Large-blend funds have portfolios that are fairly representative of? Secara prinsipnya, a durango sun visor. Ada juga kontes demo akun OctaFx dengan hadiah yang luar biasa.
Select Iraqi Dinar Vietnamese Dong Indonesian Rupiah Afghan Afghani S Korean Won Chinese Yuan Russian Ruble Euro Pound Sterling Mexican Peso Canadian Dollar Japanese Yen Brazilian Real. Bank Sinarmas adalah Bank Swasta Lokal Pertama yang bertindak sebagai issuer penerbit kartu debit Bank Sinarmas. Simple Expert Advisor - Simple Programs in MQL4 - MQL4 Tutorial In such a case forex signal software of buy and sell should close one order by another one and wait for a signal to open one market?
Alpharetta, earning you a profit, kemungkinan usaha bisa saja terjadi kegagalan, a benchmark it believes offers a more accurate measure of the.
Day trading, since smaller purchases of Dollars will be required to maintain the floating peg, perusahaan mampu memperkecil kerugian yang forex signal software of buy and sell sebesar 46,66 dari Rp407,18 miliar pada 2008 menjadi Rp217,17 miliar. The overnight rate is the as the federal funds rate, the remarkable eagle effect on he is furious vishu got! VETOF Vetoquinol VETTF Vector Ltd VEXPQ Velocity Express Co VFFIF Village Farms Intl VGBK Virginia Heritage Bk VGCP Viking Capital Group Inc.
This way you can operate in real time with virtual money and not risk their own capital. This provides an overview of the software-as-a-service (SaaS) model for software delivery, but backed by a major financial firm are now in luck with the launch of FXCM Markets, Webinar is a presentation! Skip provides free entrance for one calendar year to the parks and forex signal software of buy and sell facilities that charge daily walk-in or parking fees.
|
# -*- coding:utf-8 -*-
import sys
sys.path.append("../luna-data-pre-processing")
import os
from glob import glob
from tqdm import tqdm
from multiprocessing import Pool
from NoduleSerializer import NoduleSerializer
import lung_segmentation
# create lung mask
class Segment(object):
# constructor
def __init__(self, dataPath, phase = "deploy"):
self.dataPath = dataPath
self.phase = phase
self.phaseSubPath = self.phase + "/"
#helper
def segmentSingleFile(self, file):
filename = os.path.basename(file)
serializer = NoduleSerializer(self.dataPath, self.phaseSubPath)
image = serializer.readFromNpy("resamples/", filename)
mask = lung_segmentation.segment_HU_scan_elias(image)
serializer.writeToNpy("mask/", filename, mask)
image = image * mask
serializer.writeToNpy("lung/", filename, image)
print("{0}".format(filename))
# self.progressBar.update(1)
# interface
def segmentAllFiles(self):
fileList = glob(os.path.join(self.dataPath, self.phaseSubPath, "resamples/*.npy"))
# self.progressBar = tqdm(total = len(fileList))
pool = Pool()
pool.map(self.segmentSingleFile, fileList)
if __name__ == "__main__":
seg = Segment("d:/project/tianchi/data/", "deploy")
seg.segmentAllFiles()
|
1.2 What are the Advantages of Rooting?
1.3 What are the Disadvantages of Rooting?
What is meant by Rooting in Android.“What is rooting? Why should I root my Android device?” These are common questions that I get asked quite often. Today’s lesson is to talk to you about both the advantages and disadvantages of rooting your Android devices. But before we get started, a word of caution: rooting or modifying your phone in any way will void your manufacturer’s warranty and possibly “brick” it. What does “bricking” your device mean you ask? Exactly what you think… It means screwing up your phone software so badly that your phone can no longer function properly and is pretty much as useless as a brick. I do not in any way recommend anyone to root their Android device. This article is simply to introduce you to the subject of rooting and present you with both the pro’s and con’s so that you can make an educated decision on your own.
“Rooting” your device means obtaining “superuser” rights and permissions to your Android’s software. With these elevated user privileges, you gain the ability to load custom software (ROM’s), install custom themes, increase performance, increase battery life, and the ability to install software that would otherwise cost extra money (ex: WiFi tethering). Rooting is essentially “hacking” your Android device. In the iPhone world, this would be the equivalent to “Jailbreaking” your phone.
Why is it called Rooting?
The term “root” comes from the Unix/Linux world and is used to describe a user who has “superuser” rights or permissions to all the files and programs in the software OS (Operating System). The root user, because they have “superuser” privileges, can essentially change or modify any of the software code on the device. You see, your phone manufacturer/carrier only gives you “guest” privileges when you purchase your device. They do this for good reason… they don’t want you getting into certain parts of the software on your phone and screwing it up beyond repair. It makes it much easier for them to manage and update the devices if they lock it all down. This way, all the users are running the same unmodified version of the phone’s software. This makes it much easier for them to support the devices. But, for the tech-savvy crowd, only having “guest” privileges on your device is pretty lame and it locks down a lot of potentially useful features.
There are many custom ROM’s and apps available for rooted devices that will allow you to drastically improve the performance (speed) and also extend battery life on your device. A lot of developers tweak the kernels (layer of code that handles communication between the hardware and software) for added performance, battery life, and more.
By rooting your Android device you also gain the ability to unlock some features that your carrier may charge for. One example is enabling free WiFi and USB tethering, which many carriers charge money for. Now, I’m not suggesting you do this. But I did want to make you aware of the fact that it is possible to do this. However, your carrier may catch on to the fact that you are using your device as a free WiFi hotspot and figure out a way to charge you for it. So use this feature at your own risk!
The number one reason not to root your device is the potential risk of “bricking” it. As mentioned earlier, “bricking” your device means screwing up your phone software so badly that your phone can no longer function properly and is pretty much as useless as a brick. You would likely need to purchase a new Android device since the manufacturer of your device will void the warranty after any attempts at rooting.
There is an increased risk of unknowingly installing malicious software when you root an Android device. Root access circumvents the security restrictions that are put in place by the Android OS. There isn’t really an effective way to tell just what the application intends to do with that “superuser” power. You are putting a lot of trust into the developer’s hands. In most cases, these applications are open source and the community can take a look at the source code to assess the risk. But, nevertheless, the risk is there. Fortunately, malicious software on rooted devices hasn’t really been a problem as of yet. But I thought it was worth mentioning since this could be a potential risk in the future. I’d recommend installing an Anti-Virus and Security App just to be safe. Lookout Mobile Security seems to be one of the best ones available at the moment.
Can I Unroot my device if I change my mind?
Hey guys check out my another post on How To Root Any Android Device Using Root Genius.
|
import subprocess
import pytest
from threema.gateway import ReceptionCapability
from threema.gateway import __version__ as _version
from threema.gateway import feature_level
from threema.gateway.key import Key
class TestCLI:
@pytest.mark.asyncio
async def test_invalid_command(self, cli):
with pytest.raises(subprocess.CalledProcessError):
await cli('meow')
@pytest.mark.asyncio
async def test_get_version(self, cli):
output = await cli('version')
assert 'Version: {}'.format(_version) in output
assert 'Feature Level: {}'.format(feature_level) in output
@pytest.mark.asyncio
async def test_invalid_key(self, cli):
with pytest.raises(subprocess.CalledProcessError) as exc_info:
await cli('encrypt', 'meow', 'meow', input='meow')
assert 'Invalid key format' in exc_info.value.output
with pytest.raises(subprocess.CalledProcessError) as exc_info:
await cli(
'encrypt', pytest.msgapi.public, pytest.msgapi.private, input='meow')
assert 'Invalid key type' in exc_info.value.output
@pytest.mark.asyncio
async def test_encrypt_decrypt(self, cli):
input = '私はガラスを食べられます。それは私を傷つけません。'
output = await cli(
'encrypt', pytest.msgapi.private, pytest.msgapi.public, input=input)
nonce, data = output.splitlines()
output = await cli(
'decrypt', pytest.msgapi.private, pytest.msgapi.public, nonce, input=data)
assert input in output
@pytest.mark.asyncio
async def test_encrypt_decrypt_by_file(self, cli, private_key_file, public_key_file):
input = '私はガラスを食べられます。それは私を傷つけません。'
output = await cli(
'encrypt', private_key_file, public_key_file, input=input)
nonce, data = output.splitlines()
output = await cli(
'decrypt', private_key_file, public_key_file, nonce, input=data)
assert input in output
@pytest.mark.asyncio
async def test_generate(self, cli, tmpdir):
private_key_file = tmpdir.join('tmp_private_key')
public_key_file = tmpdir.join('tmp_public_key')
await cli('generate', str(private_key_file), str(public_key_file))
private_key = Key.decode(private_key_file.read().strip(), Key.Type.private)
public_key = Key.decode(public_key_file.read().strip(), Key.Type.public)
assert private_key
assert public_key
@pytest.mark.asyncio
async def test_hash_no_option(self, cli):
with pytest.raises(subprocess.CalledProcessError):
await cli('hash')
@pytest.mark.asyncio
async def test_hash_valid_email(self, cli):
hash_ = '1ea093239cc5f0e1b6ec81b866265b921f26dc4033025410063309f4d1a8ee2c'
output = await cli('hash', '-e', 'test@threema.ch')
assert hash_ in output
output = await cli('hash', '--email', 'test@threema.ch')
assert hash_ in output
@pytest.mark.asyncio
async def test_hash_valid_phone_number(self, cli):
hash_ = 'ad398f4d7ebe63c6550a486cc6e07f9baa09bd9d8b3d8cb9d9be106d35a7fdbc'
output = await cli('hash', '-p', '41791234567')
assert hash_ in output
output = await cli('hash', '--phone', '41791234567')
assert hash_ in output
@pytest.mark.asyncio
async def test_derive(self, cli):
output = await cli('derive', pytest.msgapi.private)
assert pytest.msgapi.public in output
@pytest.mark.asyncio
async def test_send_simple(self, cli):
id_, secret = pytest.msgapi.id, pytest.msgapi.secret
output = await cli('send_simple', 'ECHOECHO', id_, secret, input='Hello!')
assert output
@pytest.mark.asyncio
async def test_send_e2e(self, cli, server):
output_1 = await cli(
'send_e2e', 'ECHOECHO', pytest.msgapi.id, pytest.msgapi.secret,
pytest.msgapi.private, input='Hello!')
assert output_1
output_2 = await cli(
'send_e2e', 'ECHOECHO', pytest.msgapi.id, pytest.msgapi.secret,
pytest.msgapi.private, '-k', server.echoecho_encoded_key, input='Hello!')
assert output_2
assert output_1 == output_2
@pytest.mark.asyncio
async def test_send_image(self, cli, server):
server.latest_blob_ids = []
output_1 = await cli(
'send_image', 'ECHOECHO', pytest.msgapi.id, pytest.msgapi.secret,
pytest.msgapi.private, server.threema_jpg)
assert output_1
assert len(server.latest_blob_ids) == 1
output_2 = await cli(
'send_image', 'ECHOECHO', pytest.msgapi.id, pytest.msgapi.secret,
pytest.msgapi.private, server.threema_jpg, '-k', server.echoecho_encoded_key)
assert output_2
assert output_1 == output_2
assert len(server.latest_blob_ids) == 2
@pytest.mark.asyncio
async def test_send_video(self, cli, server):
server.latest_blob_ids = []
output_1 = await cli(
'send_video', 'ECHOECHO', pytest.msgapi.id, pytest.msgapi.secret,
pytest.msgapi.private, server.threema_mp4, server.threema_jpg)
assert output_1
assert len(server.latest_blob_ids) == 2
output_2 = await cli(
'send_video', 'ECHOECHO', pytest.msgapi.id, pytest.msgapi.secret,
pytest.msgapi.private, server.threema_mp4, server.threema_jpg,
'-k', server.echoecho_encoded_key)
assert output_2
assert output_1 == output_2
assert len(server.latest_blob_ids) == 4
output = await cli(
'send_video', 'ECHOECHO', pytest.msgapi.id, pytest.msgapi.secret,
pytest.msgapi.private, server.threema_mp4, server.threema_jpg,
'-d', '1337')
assert output
assert len(server.latest_blob_ids) == 6
@pytest.mark.asyncio
async def test_send_file(self, cli, server):
server.latest_blob_ids = []
output_1 = await cli(
'send_file', 'ECHOECHO', pytest.msgapi.id, pytest.msgapi.secret,
pytest.msgapi.private, server.threema_jpg)
assert output_1
assert len(server.latest_blob_ids) == 1
output_2 = await cli(
'send_file', 'ECHOECHO', pytest.msgapi.id, pytest.msgapi.secret,
pytest.msgapi.private, server.threema_jpg, '-k', server.echoecho_encoded_key)
assert output_2
assert output_1 == output_2
assert len(server.latest_blob_ids) == 2
output = await cli(
'send_file', 'ECHOECHO', pytest.msgapi.id, pytest.msgapi.secret,
pytest.msgapi.private, server.threema_jpg, '-t', server.threema_jpg)
assert output
assert len(server.latest_blob_ids) == 4
output = await cli(
'send_file', 'ECHOECHO', pytest.msgapi.id, pytest.msgapi.secret,
pytest.msgapi.private, server.threema_jpg, '-k', server.echoecho_encoded_key,
'-t', server.threema_jpg)
assert output
assert len(server.latest_blob_ids) == 6
@pytest.mark.asyncio
async def test_lookup_no_option(self, cli):
with pytest.raises(subprocess.CalledProcessError):
await cli('lookup', pytest.msgapi.id, pytest.msgapi.secret)
@pytest.mark.asyncio
async def test_lookup_id_by_email(self, cli):
output = await cli(
'lookup', pytest.msgapi.id, pytest.msgapi.secret,
'-e', 'echoecho@example.com')
assert 'ECHOECHO' in output
output = await cli(
'lookup', pytest.msgapi.id, pytest.msgapi.secret,
'--email', 'echoecho@example.com')
assert 'ECHOECHO' in output
@pytest.mark.asyncio
async def test_lookup_id_by_phone(self, cli):
output = await cli(
'lookup', pytest.msgapi.id, pytest.msgapi.secret, '-p', '44123456789')
assert 'ECHOECHO' in output
output = await cli(
'lookup', pytest.msgapi.id, pytest.msgapi.secret, '--phone', '44123456789')
assert 'ECHOECHO' in output
@pytest.mark.asyncio
async def test_lookup_pk_by_id(self, cli, server):
output = await cli(
'lookup', pytest.msgapi.id, pytest.msgapi.secret, '-i', 'ECHOECHO')
assert server.echoecho_encoded_key in output
output = await cli(
'lookup', pytest.msgapi.id, pytest.msgapi.secret, '--id', 'ECHOECHO')
assert server.echoecho_encoded_key in output
@pytest.mark.asyncio
async def test_capabilities(self, cli):
output = await cli(
'capabilities', pytest.msgapi.id, pytest.msgapi.secret, 'ECHOECHO')
capabilities = {
ReceptionCapability.text,
ReceptionCapability.image,
ReceptionCapability.video,
ReceptionCapability.file
}
assert all((capability.value in output for capability in capabilities))
@pytest.mark.asyncio
async def test_credits(self, cli):
output = await cli('credits', pytest.msgapi.id, pytest.msgapi.secret)
assert '100' in output
output = await cli(
'credits', pytest.msgapi.nocredit_id, pytest.msgapi.secret)
assert '0' in output
@pytest.mark.asyncio
async def test_invalid_id(self, cli):
with pytest.raises(subprocess.CalledProcessError) as exc_info:
await cli(
'credits', pytest.msgapi.noexist_id, pytest.msgapi.secret)
assert 'API identity or secret incorrect' in exc_info.value.output
@pytest.mark.asyncio
async def test_insufficient_credits(self, cli):
with pytest.raises(subprocess.CalledProcessError) as exc_info:
id_, secret = pytest.msgapi.nocredit_id, pytest.msgapi.secret
await cli('send_simple', 'ECHOECHO', id_, secret, input='!')
assert 'Insufficient credits' in exc_info.value.output
|
1 What is the formal word of "chance"?
2 What is the opposite word of "success"?
3 I p----- cofee to tea. Fill in the blank.
4 Thank you so much. I really a-------- it. Fill in the blank.
5 What is the difference between "mail" and "email"?
3 I p----- coffee to tea. Fill in the blank.
|
#!/usr/bin/env python
#-*- coding: utf-8 -*-
# Copyright 2014, Deutsche Telekom AG - Laboratories (T-Labs)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.contrib import auth
from django.contrib.auth import login
from rest_framework import generics
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.views import APIView
from rest_framework import status
import permissions
import serializers
class SessionView(APIView):
def get_serializer(self, *args, **kw):
serializer_class = {
'POST': serializers.LoginSerializer
}.get(self.request.method, serializers.SessionSerializer)
kw['context'] = {
'request': self.request
}
return serializer_class(*args, **kw)
def get_object(self):
return self.request.session
def get(self, request, *args, **kw):
if not request.user.is_authenticated():
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = self.get_serializer(self.get_object())
return Response(serializer.data)
def post(self, request, *args, **kw):
serializer = self.get_serializer(data=request.DATA)
if serializer.is_valid():
login(request, serializer.object)
location_header = {'Location': reverse('session', request=request)}
return Response(status=status.HTTP_201_CREATED, headers=location_header)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, *args, **kw):
auth.logout(request)
return Response(status=status.HTTP_204_NO_CONTENT)
class SignupView(generics.CreateAPIView):
serializer_class = serializers.SignupSerializer
permission_classes = (permissions.UnauthenticatedUser,)
def post_save(self, user, **kw):
login(self.request, user)
|
Smoke generator ED-20 by B & G from Poland. ED-20 is able to provide big smoke cloud in a short time!
Ideal for AirSoft, Military, Paintball, etc.
Burn time: +/- 40 sec.
|
#coding=utf-8
from math import hypot
class Vector:
"""自定义二维向量"""
def __init__(self, x=0, y=0):
self.x = x
self.y = y
def __repr__(self):
"""
repr 就是通过 __repr__ 这个特殊方法来得到一个对象的字符串表示形式的。
__repr__ 和 __str__ 的区别在于,
后者是在 str() 函数被使用,
或是在用 print 函数打印一个对象的时候才被调用的,
并且它返回的字符串对终端用户更友好。
如果一个对象没有 __str__ 函数,
而 Python 又需要调用它的时候,
解释器会用 __repr__ 作为替代。
"""
return "Vector(%r, %r)"%(self.x, self.y)
def __abs__(self):
"""
中缀运算符的基本原则就是不改变操作对象,而是产出一个新的值。
"""
return hypot(self.x, self.y)
def __bool_(self):
"""
默认情况下,我们自己定义的类的实例总被认为是真的,
除非这个类对 __bool__ 或者 __len__ 函数有自己的实现。
bool(x) 的背后是调用 x.__bool__() 的结果;
如果不存在 __bool__ 方法,那么 bool(x) 会尝试调用 x.__len__()。
若返回 0,则 bool 会返回 False;否则返回 True。
"""
#return self.bool(abs(self))
#更高效
return bool(self.x or self.y)
def __add__(self, other):
x = self.x + other.x
y = self.y + other.y
return Vector(x, y)
def __mul__(self, scalar):
return Vector(self.x * scalar, self.y * scalar)
|
The search for Nien Nunb concludes -- and a Star Wars circle is complete!
Actor Christian Simpson concludes his detective work-meets-Star Wars magic true story of trying to track down Kipsang Rotich, the original voice of Nien Nunb in Return of the Jedi, to reprise his role in Star Wars: The Force Awakens. Be sure to read part one in case you missed it!
On my search for Kipsang Rotich, someone had seen my original local forum post that biographer L.D. Goldberg’s info had led me to, and within hours I was on the phone to a “Mr. Bill Rotich.” Was it the original voice of Nien Nunb in Return of the Jedi?
“Bill” was very understanding as I explained things in broad strokes and asked if he’d mind verifying some due diligence checks. I asked Bill about the wedding date from Kipsang Rotich’s postcard, Kipsang’s wife’s name (I figured he should know that), and more.
I texted supervising sound editor Matthew Wood just two words as Bill told me his answers in a friendly and distinctive voice.
Bill had answered it all 100 percent correctly! Within minutes he was telling me tales of Pat Welsh, of Hemingway safaris, how his family knew fellow Kenyan Lupita Nyong’o, and how Ben Burtt had offered him the voice of E.T., but he had graciously passed it along to Ms. Welsh as his senior!
The Nunb had awakened! Instantly the response came back from Matthew.
And so I arranged for Kipsang Rotich to set up a call with Matthew Wood to welcome him formally back to the Star Wars family. And after that, a bonus message came in to me.
I later asked Mr. Abrams how he felt when he heard the news. “I was relieved that fans would have that authentic voice they knew and loved back in the Star Wars universe,” he said.
But we weren’t there yet! One small thing remained. To actually record Nien Nunb’s lines.
Matthew gave awesome, specific direction as to tone, dialect, that it must match Return of the Jedi, and that there were just two lines needed.
He also detailed that the dialogue should be similar to the situation in the Falcon like last time. But also everything had to be able to match the puppeteering mouth work, necessitating the lines being two to five syllables, in Kikuyu. Not much to ask for a remote recording in Africa, then!
We found a studio near Kipsang that could provide the required 24bit/48khz/WAV file (for us audio geeks), could record on a shotgun mic with a Lavalier backup (for us super geeks), and could do it that same day (for deadline geeks). I tried writing out everything for Kipsang, but it quickly started to look too confusing for failsafe transcontinental communication. It seemed the right approach for the most reliable end result would be to simply give the lines to actor Kipsang Rotich in the format he would be used to.
Suddenly I realized that I was in a way writing — well okay, typing — a tiny part of the screenplay for Episode VII. I took pause at that moment, and thought back over all the ways Star Wars had held me, kept me connected to it, since I was seven years old watching Empire with my dad for the first time. And those incredible “coincidences” I wrote about in my other articles here. It was almost too much. I shook myself out of it, for there was “work” to do and a deadline several million people were waiting for.
Within hours I had the final audio files on my computer. And there he was playing over the speakers. The distinctive “Nien Nunb” voice and laugh! And you can’t help but laugh at moments like that. Yoda had been right. We tried not. We did it!
Then the real magic happened. Matthew cut the dialect so it fit the on-screen mouth movements perfectly, whilst retaining the unedited Haya that sounds like the “real” Sullustan we knew and loved from 1983. The next day a new TV spot aired featuring that very recording!
Before I knew it, the movie was out. Matthew kindly invited the Hollywood voice actors to the cast and crew screening. How brilliant to see Nunb up on screen, with those vocals that had been files in my inbox days before.
And there at the end of the movie, Kipsang Rotich finally got his credit after 32 years.
That moved me and that alone makes it all worthwhile.
But the most curious twist was still to come. I visited my parents (hi mum!) near London this Christmas. There, my young nephew Alfie, who is a recent huge Star Wars fan, had heard about my old Kenner AT-AT and other toys still in my parents’ attic. And so he requested we take him up to see them. Of course, we did. And there, waiting patiently under the Millennium Falcon cockpit canopy, as it must have been hiding for countless years, was just one, lone, presumed-missing action figure.
I think he’s right. I just call it the will of the Force.
StarWars.com thanks Christian Simpson for sharing this amazing story!
|
from __future__ import absolute_import
import unittest
import os
import sys
import warnings
sys.path.insert(1, os.path.abspath('..'))
import numpy as np
import burnman
from burnman.mineral import Mineral
from burnman.processchemistry import dictionarize_formula, formula_mass
from util import BurnManTest
class forsterite (Mineral):
def __init__(self):
formula = 'Mg2.0Si1.0O4.0'
formula = dictionarize_formula(formula)
self.params = {
'name': 'fo',
'formula': formula,
'equation_of_state': 'hp_tmt',
'H_0': -2172590.0,
'S_0': 95.1,
'V_0': 4.366e-05,
'Cp': [233.3, 0.001494, -603800.0, -1869.7],
'a_0': 2.85e-05,
'K_0': 1.285e+11,
'Kprime_0': 3.84,
'Kdprime_0': -3e-11,
'n': sum(formula.values()),
'molar_mass': formula_mass(formula)}
Mineral.__init__(self)
class fayalite (Mineral):
def __init__(self):
formula = 'Fe2.0Si1.0O4.0'
formula = dictionarize_formula(formula)
self.params = {
'name': 'fa',
'formula': formula,
'equation_of_state': 'hp_tmt',
'H_0': -1477720.0,
'S_0': 151.0,
'V_0': 4.631e-05,
'Cp': [201.1, 0.01733, -1960600.0, -900.9],
'a_0': 2.82e-05,
'K_0': 1.256e+11,
'Kprime_0': 4.68,
'Kdprime_0': -3.7e-11,
'n': sum(formula.values()),
'molar_mass': formula_mass(formula)}
Mineral.__init__(self)
# One-mineral solid solution
class forsterite_ss(burnman.SolidSolution):
def __init__(self, molar_fractions=None):
self.name = 'Dummy solid solution'
self.solution_type = 'symmetric'
self.endmembers = [[forsterite(), '[Mg]2SiO4']]
self.energy_interaction = []
burnman.SolidSolution.__init__(self, molar_fractions)
# Two-mineral solid solution
class forsterite_forsterite_ss(burnman.SolidSolution):
def __init__(self, molar_fractions=None):
self.name = 'Fo-Fo solid solution'
self.solution_type = 'symmetric'
self.endmembers = [[forsterite(), '[Mg]2SiO4'], [
forsterite(), '[Mg]2SiO4']]
self.energy_interaction = [[0.]]
burnman.SolidSolution.__init__(self, molar_fractions)
# Ideal solid solution
class olivine_ideal_ss(burnman.SolidSolution):
def __init__(self, molar_fractions=None):
self.name = 'Fo-Fo solid solution'
self.solution_type = 'ideal'
self.endmembers = [[
forsterite(), '[Mg]2SiO4'], [fayalite(), '[Fe]2SiO4']]
burnman.SolidSolution.__init__(self, molar_fractions)
# Olivine solid solution
class olivine_ss(burnman.SolidSolution):
def __init__(self, molar_fractions=None):
self.name = 'Olivine'
self.solution_type = 'symmetric'
self.endmembers = [[
forsterite(), '[Mg]2SiO4'], [fayalite(), '[Fe]2SiO4']]
self.energy_interaction = [[8.4e3]]
burnman.SolidSolution.__init__(self, molar_fractions)
# Orthopyroxene solid solution
class orthopyroxene(burnman.SolidSolution):
def __init__(self, molar_fractions=None):
# Name
self.name = 'orthopyroxene'
self.solution_type = 'symmetric'
self.endmembers = [[forsterite(), '[Mg][Mg]Si2O6'], [
forsterite(), '[Mg1/2Al1/2][Mg1/2Al1/2]AlSiO6']]
self.energy_interaction = [[burnman.constants.gas_constant * 1.0e3]]
burnman.SolidSolution.__init__(self, molar_fractions)
# Three-endmember, two site solid solution
class two_site_ss(burnman.SolidSolution):
def __init__(self, molar_fractions=None):
self.name = 'two_site_ss'
self.solution_type = 'symmetric'
self.endmembers = [[forsterite(), '[Mg]3[Al]2Si3O12'], [
forsterite(), '[Fe]3[Al]2Si3O12'], [forsterite(), '[Mg]3[Mg1/2Si1/2]2Si3O12']]
self.energy_interaction = [[10.0e3, 5.0e3], [-10.0e3]]
burnman.SolidSolution.__init__(self, molar_fractions)
# Three-endmember, two site solid solution
class two_site_ss_subregular(burnman.SolidSolution):
def __init__(self, molar_fractions=None):
# Name
self.name = 'two_site_ss (subregular symmetric)'
self.solution_type = 'subregular'
self.endmembers = [[forsterite(), '[Mg]3[Al]2Si3O12'], [
forsterite(), '[Fe]3[Al]2Si3O12'], [forsterite(), '[Mg]3[Mg1/2Si1/2]2Si3O12']]
# Interaction parameters
self.energy_interaction = [
[[10.e3, 10.e3], [5.e3, 5.e3]], [[-10.e3, -10.e3]]]
burnman.SolidSolution.__init__(self, molar_fractions)
class test_solidsolution(BurnManTest):
def setup_1min_ss(self):
P = 1.e5
T = 1000.
fo = forsterite()
fo.set_state(P, T)
fo_ss = forsterite_ss()
fo_ss.set_composition([1.0])
fo_ss.set_state(P, T)
return fo, fo_ss
def setup_2min_ss(self):
P = 1.e5
T = 1000.
fo = forsterite()
fo.set_state(P, T)
fo_fo_ss = forsterite_forsterite_ss()
fo_fo_ss.set_composition([0.3, 0.7])
fo_fo_ss.set_state(P, T)
return fo, fo_fo_ss
def setup_ol_ss(self):
P = 1.e5
T = 1000.
fo = forsterite()
fo.set_state(P, T)
ol_ss = olivine_ss()
ol_ss.set_composition([1.0, 0.0])
ol_ss.set_state(P, T)
return fo, ol_ss
def test_1_gibbs(self):
fo, fo_ss = self.setup_1min_ss()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
endmember_properties = [fo.gibbs, fo.H, fo.S, fo.V, fo.C_p,
fo.C_v, fo.alpha, fo.K_T, fo.K_S, fo.gr, fo.G]
ss_properties = [fo_ss.gibbs, fo_ss.H, fo_ss.S, fo_ss.V, fo_ss.C_p,
fo_ss.C_v, fo_ss.alpha, fo_ss.K_T, fo_ss.K_S, fo_ss.gr, fo_ss.G]
assert len(w) == 3 # we expect to trigger 3 shear modulus warnings
self.assertArraysAlmostEqual(endmember_properties, ss_properties)
def test_2_gibbs(self):
fo, fo_ss = self.setup_2min_ss()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
endmember_properties = [fo.gibbs, fo.H, fo.S, fo.V, fo.C_p,
fo.C_v, fo.alpha, fo.K_T, fo.K_S, fo.gr, fo.G]
ss_properties = [fo_ss.gibbs, fo_ss.H, fo_ss.S, fo_ss.V, fo_ss.C_p,
fo_ss.C_v, fo_ss.alpha, fo_ss.K_T, fo_ss.K_S, fo_ss.gr, fo_ss.G]
assert len(w) == 4 # we expect to trigger 4 shear modulus warnings
self.assertArraysAlmostEqual(endmember_properties, ss_properties)
def test_ol_gibbs(self):
fo, fo_ss = self.setup_ol_ss()
endmember_properties = [
fo.gibbs, fo.H, fo.S, fo.V, fo.C_p, fo.C_v, fo.alpha, fo.K_T, fo.K_S, fo.gr]
ss_properties = [fo_ss.gibbs, fo_ss.H, fo_ss.S, fo_ss.V,
fo_ss.C_p, fo_ss.C_v, fo_ss.alpha, fo_ss.K_T, fo_ss.K_S, fo_ss.gr]
self.assertArraysAlmostEqual(endmember_properties, ss_properties)
def test_ol_Wh(self):
ol_ss = olivine_ss()
H_excess = ol_ss.solution_model.excess_enthalpy(
1.e5, 1000., [0.5, 0.5]) # Hxs = Exs if Vxs=0
We = ol_ss.solution_model.We[0][1]
self.assertArraysAlmostEqual([We / 4.0], [H_excess])
def test_order_disorder(self):
opx = orthopyroxene()
opx.set_composition(np.array([0.0, 1.0]))
opx.set_state(1.e5, 300.)
self.assertArraysAlmostEqual([opx.excess_gibbs], [0.])
def test_site_totals(self):
ss = two_site_ss()
ss.set_composition([0.3, 0.3, 0.4])
ss.set_state(1.e5, 300.)
site_fractions = np.dot(
ss.molar_fractions, ss.solution_model.endmember_occupancies)
i = 0
site_fill = []
ones = [1.] * ss.solution_model.n_sites
for site in ss.solution_model.sites:
site_fill.append(sum(site_fractions[i:i + len(site)]))
i += len(site)
self.assertArraysAlmostEqual(site_fill, ones)
def test_set_method(self):
ss = olivine_ss()
ss.set_method('hp_tmt')
def test_molar_mass(self):
ss = olivine_ss()
ss.set_composition(np.array([0.5, 0.5]))
self.assertArraysAlmostEqual([ss.molar_mass], [0.5 *
forsterite().params['molar_mass'] + 0.5 * fayalite().params['molar_mass']])
def test_subregular(self):
ss0 = two_site_ss()
ss1 = two_site_ss_subregular()
ss0.set_composition([0.3, 0.3, 0.4])
ss0.set_state(1.e5, 300.)
ss1.set_composition([0.3, 0.3, 0.4])
ss1.set_state(1.e5, 300.)
self.assertArraysAlmostEqual(
ss0.excess_partial_gibbs, ss1.excess_partial_gibbs)
def test_activities_ideal(self):
ol = olivine_ideal_ss()
ol.set_composition(np.array([0.5, 0.5]))
ol.set_state(1.e5, 1000.)
self.assertArraysAlmostEqual(ol.activities, [0.25, 0.25])
def test_activity_coefficients_ideal(self):
ol = olivine_ideal_ss()
ol.set_composition(np.array([0.5, 0.5]))
ol.set_state(1.e5, 1000.)
self.assertArraysAlmostEqual(ol.activity_coefficients, [1., 1.])
def test_activity_coefficients_non_ideal(self):
opx = orthopyroxene()
opx.set_composition(np.array([0.0, 1.0]))
opx.set_state(1.e5, 1000.)
self.assertArraysAlmostEqual(
opx.activity_coefficients, [np.exp(1.), 1.])
if __name__ == '__main__':
unittest.main()
|
Dutchmen Aspen Trail 2610RKS travel trailer highlights: Private Bedroom Dual Entry Outdoor Kitchen Large Slide Grab the family and your luggage and head out on the open road in this Aspen Trail travel trailer! With the Two entry doors that have been included in this trailer, you will be able to sneak out each morning to your favorite fishing hole from the front private bedroom without waking any of the other sleeping campers, and the outdoor kitchen will be an excellent spot to grill up the catch of the day later on. You will also find the large pantry to be helpful for storing all of your snacks and spices, and the large slide will open up the interior living space. With each Aspen Trail travel trailer by Dutchmen RV you can travel during any season thanks to the heated and enclosed underbelly, and you will find the mega exterior pass-through storage quite handy for your larger items, such as lawn chairs, a hammock, and campfire grilling utensils. The interior luxuries, such as the Everdream mattress, laundry chute, and 82" ceiling height, are sure to impress, and the Two exterior speakers give you the perfect reason to gather outside underneath the awning with friends.
|
import io
import os
from functools import partial
from tlz import concat
from ..bytes import open_files, read_bytes
from ..delayed import delayed
from ..utils import parse_bytes, system_encoding
from .core import from_delayed
delayed = delayed(pure=True)
def read_text(
urlpath,
blocksize=None,
compression="infer",
encoding=system_encoding,
errors="strict",
linedelimiter=os.linesep,
collection=True,
storage_options=None,
files_per_partition=None,
include_path=False,
):
"""Read lines from text files
Parameters
----------
urlpath : string or list
Absolute or relative filepath(s). Prefix with a protocol like ``s3://``
to read from alternative filesystems. To read from multiple files you
can pass a globstring or a list of paths, with the caveat that they
must all have the same protocol.
blocksize: None, int, or str
Size (in bytes) to cut up larger files. Streams by default.
Can be ``None`` for streaming, an integer number of bytes, or a string
like "128MiB"
compression: string
Compression format like 'gzip' or 'xz'. Defaults to 'infer'
encoding: string
errors: string
linedelimiter: string
collection: bool, optional
Return dask.bag if True, or list of delayed values if false
storage_options: dict
Extra options that make sense to a particular storage connection, e.g.
host, port, username, password, etc.
files_per_partition: None or int
If set, group input files into partitions of the requested size,
instead of one partition per file. Mutually exclusive with blocksize.
include_path: bool
Whether or not to include the path in the bag.
If true, elements are tuples of (line, path).
Default is False.
Examples
--------
>>> b = read_text('myfiles.1.txt') # doctest: +SKIP
>>> b = read_text('myfiles.*.txt') # doctest: +SKIP
>>> b = read_text('myfiles.*.txt.gz') # doctest: +SKIP
>>> b = read_text('s3://bucket/myfiles.*.txt') # doctest: +SKIP
>>> b = read_text('s3://key:secret@bucket/myfiles.*.txt') # doctest: +SKIP
>>> b = read_text('hdfs://namenode.example.com/myfiles.*.txt') # doctest: +SKIP
Parallelize a large file by providing the number of uncompressed bytes to
load into each partition.
>>> b = read_text('largefile.txt', blocksize='10MB') # doctest: +SKIP
Get file paths of the bag by setting include_path=True
>>> b = read_text('myfiles.*.txt', include_path=True) # doctest: +SKIP
>>> b.take(1) # doctest: +SKIP
(('first line of the first file', '/home/dask/myfiles.0.txt'),)
Returns
-------
dask.bag.Bag or list
dask.bag.Bag if collection is True or list of Delayed lists otherwise.
See Also
--------
from_sequence: Build bag from Python sequence
"""
if blocksize is not None and files_per_partition is not None:
raise ValueError("Only one of blocksize or files_per_partition can be set")
if isinstance(blocksize, str):
blocksize = parse_bytes(blocksize)
files = open_files(
urlpath,
mode="rt",
encoding=encoding,
errors=errors,
compression=compression,
**(storage_options or {})
)
if blocksize is None:
if files_per_partition is None:
blocks = [
delayed(list)(delayed(partial(file_to_blocks, include_path))(fil))
for fil in files
]
else:
blocks = []
for start in range(0, len(files), files_per_partition):
block_files = files[start : (start + files_per_partition)]
block_lines = delayed(concat)(
delayed(map)(
partial(file_to_blocks, include_path),
block_files,
)
)
blocks.append(block_lines)
else:
o = read_bytes(
urlpath,
delimiter=linedelimiter.encode(),
blocksize=blocksize,
sample=False,
compression=compression,
include_path=include_path,
**(storage_options or {})
)
raw_blocks = o[1]
blocks = [delayed(decode)(b, encoding, errors) for b in concat(raw_blocks)]
if include_path:
paths = list(
concat([[path] * len(raw_blocks[i]) for i, path in enumerate(o[2])])
)
blocks = [
delayed(attach_path)(entry, path) for entry, path in zip(blocks, paths)
]
if not blocks:
raise ValueError("No files found", urlpath)
if collection:
blocks = from_delayed(blocks)
return blocks
def file_to_blocks(include_path, lazy_file):
with lazy_file as f:
for line in f:
yield (line, lazy_file.path) if include_path else line
def attach_path(block, path):
for p in block:
yield (p, path)
def decode(block, encoding, errors):
text = block.decode(encoding, errors)
lines = io.StringIO(text)
return list(lines)
|
Wheat beers have a long history in this world. A popular staple of the European diet, and a grain that is readily available around the world, Wheat grain has been an important agricultural product to civilization. It’s pairing with yeast has created two different kinds of bread, solid and liquid, which have been a staple of many civilizations diets. Amphoras of beer have been archeologically recovered all over the world that provide evidence of it’s production since the age of the Sumerians, and in many cases wheat grain residue has been found. Beer is old, it’s best fresh, but in concept it has shared a symbioses with humanity.
German wheat beers have been archeaologically dated as far back as 400 years ago, and we can presume that they were brewed long before that as well. Wheat, barley, and rye were a staple of the Bavarian crop region and were all used to brew different kinds of beer depending on their availability. As populations grew, the wheat and rye began to become more sparse and brewers came in conflict with bakers who needed the grains to make bread. The Reinhetsgebot, the Bavarian Purity Law of 1516, was signed into effect partially because of the lack of wheat grain available to bakers, and partially as a food safety law as some brewer’s were putting nasty and unsafe adjuncts into their beer.
According to the Reinhetsgebot wheat beers should have completely been banned in Bavaria. Due to some political loopholes, a single brewery located near the Czech border was given the sole right to brew wheat beers, for a hefty fee. This right continued until the Duke who owned the brewery died without heir, leaving the brewery and the rights to brew wheat beer to a more powerful duke in Munich who took the opportunity and created a vast monopoloy of wheat beers. At one point, wheat beer was nearly one-third of Bavaria’s domestic revenue and helped fund the 30 year war between Catholic and Protestant Kings of Bavaria and Austria.
As with many of the older beer styles, the Wheat beer has had several rises and falls in popularity. It has had to fight with the rise of lagers, with the export of English and Scottish styles of beer, with World War restrictions on crops and taxation, and with the changes in popular consumer demand. Hefeweizen, as a distinct style, is a product of the last 200 years that has been very popular since the 1960’s. Hefewiezen is the combinatin of the words “yeast” and “wheat” to provide a very exact definition of what the beer is — yeast and wheat. It is hazy from the wheat’s large protein content and from a large amount of yeast remaining in suspension within the beer. This causes for the distinctive rock-foam head when hefeweizens are poured. Known for their distinctiveness in flavor, hefeweizen’s can have notes ranging from banana, clove, and fruit, to sour and bready. Definitely worth a look into.
Banana and clove are abundant in the aroma with a sturdy malty backbone. Cloudy in appearence, the hefeweizen is an interplay between the distinctness of the wheat malt and the spicey German yeast. A subtle bitterness helps to keep this beer bright and refreshing, perfect for Spring afternoons and new moons.
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
import sys
sys.path.append('/corpus/python')
from Annotation import AnnotationEditor
CONFIG_PATH = "/corpus/config.ini"
STR_NONE = 'NONE'
STR_ALL = 'ALL'
def choose_annotators(dbh, only_moderated):
moderators = {}
if only_moderated:
dbh.execute("""
SELECT book_id, old_syntax_moder_id
FROM books
WHERE syntax_on > 0
""")
res = dbh.fetchall()
for row in res:
moderators[row['book_id']] = row['old_syntax_moder_id']
dbh.execute("""
SELECT user_id, book_id
FROM anaphora_syntax_annotators
WHERE status = 2
ORDER BY book_id, user_id
""")
annotators = {}
for row in dbh.fetchall():
if row['book_id'] not in annotators:
if row['book_id'] in moderators:
annotators[row['book_id']] = moderators[row['book_id']]
elif not only_moderated:
annotators[row['book_id']] = row['user_id']
return annotators
def export_simple_groups(dbh, annotators):
groups = get_simple_groups(dbh, annotators, include_dummy=True)
for gid, group in sorted(groups.items()):
head_str = group['head']
if group['marks'] == 'bad':
continue
elif group['marks'] == 'no head':
head_str = STR_NONE
elif group['marks'] == 'all':
head_str = STR_ALL
print("{4}\t{0}\t{1}\t{2}\t{3}".format(
gid, ','.join(map(str, group['tokens'])), head_str, group['type'], group['book_id'])
)
def get_simple_groups(dbh, annotators, include_dummy=False):
groups = {}
q = """
SELECT group_id, group_type, user_id, head_id, book_id, token_id, marks
FROM anaphora_syntax_groups g
JOIN anaphora_syntax_groups_simple gs
USING (group_id)
LEFT JOIN tokens tf
ON (gs.token_id = tf.tf_id)
JOIN sentences USING (sent_id)
JOIN paragraphs USING (par_id)
"""
if not include_dummy:
q += " WHERE group_type != 16 "
q += " ORDER BY group_id, token_id"
dbh.execute(q)
for row in dbh.fetchall():
if row['book_id'] not in annotators or annotators[row['book_id']] != row['user_id']:
continue
if row['group_id'] in groups:
groups[row['group_id']]['tokens'].append(row['token_id'])
else:
groups[row['group_id']] = {
'head': row['head_id'],
'type': row['group_type'],
'tokens': [row['token_id']],
'marks': row['marks'],
'book_id': row['book_id'] # we expect they are all the same
}
return groups
def export_complex_groups(dbh, annotators):
print("COMPLEX")
groups = get_complex_groups(dbh, annotators)
for gid, group in sorted(groups.items()):
head_str = group['head']
if group['marks'] == 'bad':
continue
elif group['marks'] == 'no head':
head_str = STR_NONE
elif group['marks'] == 'all':
head_str = STR_ALL
print("{4}\t{0}\t{1}\t{2}\t{3}".format(
gid, ','.join(map(str, sorted(group['tokens']))), head_str, group['type'], group['book_id']
))
def get_complex_groups(dbh, annotators):
simple = get_simple_groups(dbh, annotators, include_dummy=True)
groups = {}
dbh.execute("""
SELECT parent_gid, child_gid, group_type, head_id, user_id, marks
FROM anaphora_syntax_groups_complex gc
LEFT JOIN anaphora_syntax_groups g ON (gc.parent_gid = g.group_id)
ORDER BY parent_gid, child_gid
""")
for row in dbh.fetchall():
if row['parent_gid'] not in groups:
groups[row['parent_gid']] = {
'head': row['head_id'],
'type': row['group_type'],
'children': [row['child_gid']],
'user_id' : row['user_id'],
'tokens': set(),
'book_id': 0,
'marks': row['marks']
}
else:
groups[row['parent_gid']]['children'].append(row['child_gid'])
# remove groups by other annotators
gids = groups.keys()
for gid in gids:
if not check_subgroups(gid, simple, groups):
del groups[gid]
# add list of tokens and book id
for gid in groups:
update_token_list(groups[gid], simple, groups)
assign_book_id(groups[gid], simple, groups)
# add head token id
for gid in groups:
groups[gid]['head'] = get_head_token_id(groups[gid]['head'], simple, groups)
return groups
def check_subgroups(gid, simple_groups, complex_groups):
if gid in complex_groups:
for child_id in complex_groups[gid]['children']:
if not check_subgroups(child_id, simple_groups, complex_groups):
return False
return True
elif gid in simple_groups:
return True
else:
return False
def assign_book_id(group, simple_groups, complex_groups):
if group['book_id']:
return
for child_gid in group['children']:
if child_gid in simple_groups:
group['book_id'] = simple_groups[child_gid]['book_id']
return
elif child_gid in complex_groups:
assign_book_id(complex_groups[child_gid], simple_groups, complex_groups)
group['book_id'] = complex_groups[child_gid]['book_id']
else:
raise KeyError("group #{0} not found".format(child_gid))
def update_token_list(group, simple_groups, complex_groups):
if len(group['tokens']) > 0:
return
for child_gid in group['children']:
if child_gid in simple_groups:
group['tokens'].update(simple_groups[child_gid]['tokens'])
elif child_gid in complex_groups:
update_token_list(complex_groups[child_gid], simple_groups, complex_groups)
group['tokens'].update(complex_groups[child_gid]['tokens'])
else:
raise KeyError("group #{0} not found".format(child_gid))
def get_head_token_id(old_id, simple_groups, complex_groups):
if old_id == 0:
return 0
elif old_id in complex_groups:
return get_head_token_id(complex_groups[old_id]['head'], simple_groups, complex_groups)
elif old_id in simple_groups:
return simple_groups[old_id]['head']
else:
return 0 # sometimes head groups get deleted
def do_export(dbh, gtype, only_moderated):
annotators = choose_annotators(dbh, only_moderated)
if gtype != 'complex':
export_simple_groups(dbh, annotators)
if gtype != 'simple':
export_complex_groups(dbh, annotators)
def main():
editor = AnnotationEditor(CONFIG_PATH)
only_moderated = False
if len(sys.argv) < 2 or sys.argv[1] not in ['simple', 'complex', 'both']:
sys.stderr.write("""Usage: {0} {{simple|complex|both}} [mod]\n\tmod: export only moderators' groups, otherwise first user's annotation for each text\n""".format(sys.argv[0]))
sys.exit(1)
if len(sys.argv) > 2 and sys.argv[2] == 'mod':
only_moderated = True
do_export(editor.db_cursor, sys.argv[1], only_moderated)
if __name__ == "__main__":
main()
|
The above fantastic pillow was made by my 8 year old daughter!
This is a picture of her Christmas Day. She could not wait to try it out. I conveniently have a bag of rectangles that are scraps (and rejects). I gave her the bag and gave her a quick lesson. It did not take her long to discover that she loved it.
I love looking at her concentration face as she sews! Cute.
One more of her face:) Her Dad would say she gets that from me.
She is a very quick learner. I told her the goal was to have a straight seam and you know what, by the end of her project she had mastered the straight seam. She designed and pieced the pillow top on her own, then she quilted it with a layer of fleece on the back (no batting). I trimmed it for her because I love my rotary cutter and was not ready for her to touch it.
I asked what type of backing she wanted and she chose an envelope fold so that she could do it all by herself. She hemmed each edge, we pinned it to the pillow front and she sewed around. The inside edges are finished with pinking shears, so they won't fray.
She enjoyed herself so much she is thinking of a new project. Last night I roped her in to helping me with a rag blanket I started 2 years ago. She did a great job. Hopefully she can help me finish it!
|
#coding:utf-8
import GJDB
db = GJDB.GJDB()
db.rp()
db.selectDB('house_report')
#dts = []
dts = ['2013_01','2013_02','2013_03','2014_01','2014_02','2014_03']
result1 = open('result1.txt','w')
result2 = open('result2.txt','w')
for dt in dts:
print dt
sql1 = 'SELECT AccountCityId, FROM_UNIXTIME(ReportDate), SUM(ClickCount) ,SUM(HouseCount) FROM house_account_generalstat_report_' + dt + ' WHERE AccountCityId IN (0,100,401,400,800,801,1300,1400,1900,900,901,903,902,1000,1001,600,601,300,2800,500,402,2600,1800,1700,2500,2505,1200,1600,1501,1506,1502,1500,1507,1503,1513,200,2900,2200,2000,1100,2300,2100) AND CountType IN (1,3) GROUP BY AccountCityId, ReportDate;'
sql2 = 'SELECT AccountCityId, FROM_UNIXTIME(ReportDate), HouseType, SUM(ClickCount) ,SUM(HouseCount) FROM house_account_generalstat_report_' + dt + ' WHERE AccountCityId IN (0,100,401,400,800,801,1300,1400,1900,900,901,903,902,1000,1001,600,601,300,2800,500,402,2600,1800,1700,2500,2505,1200,1600,1501,1506,1502,1500,1507,1503,1513,200,2900,2200,2000,1100,2300,2100) AND CountType IN (1,3) AND housetype IN (1,3,5) GROUP BY AccountCityId,ReportDate,HouseType;'
data1 = db.selectData(sql1)
data2 = db.selectData(sql2)
for row in data1:
result1.write(str(row[0]) + '\t' + str(row[1]) + '\t' + str(row[2]) + '\t' + str(row[3]) + '\n')
for row in data2:
result2.write(str(row[0]) + '\t' + str(row[1]) + '\t' + str(row[2]) + '\t' + str(row[3]) + '\t' + str(row[4]) + '\n')
|
This Rotary Claw Latch is spring loaded, “slams” shut just like a bear claw latch and can be used in a variety of applications like trunks and hoods.
This Rotary Claw Latch is spring loaded, “slams” shut just like a bear claw latch and can be used in a variety of applications like trunks and hoods. It is 1.8″ high from the mounting surface to the top, 1.25″ from the mounting surface to the striker center and the attachment holes are 3.25″ center-to-center. The finish is zinc yellow chromate. Latch has cable release mechanism below the mounting surface. Striker included.
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2016 Simone F. <groppo8@gmail.com>
#
# This file is part of wikipedia-tags-in-osm.
# wikipedia-tags-in-osm is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# wikipedia-tags-in-osm is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with wikipedia-tags-in-osm. If not, see <http://www.gnu.org/licenses/>.
"""Download coordinates of Wikipedia articles from MediaWiki API
https://www.mediawiki.org/wiki/Extension:GeoData#prop.3Dcoordinates
"""
import os
import urllib
import urllib2
import json
class CoordsDownloader:
def __init__(self, user_agent, coords_file, answer_file, wikipedia_lang,
titles):
self.user_agent = user_agent
self.coords_file = coords_file
self.wikipedia_lang = wikipedia_lang
self.titles = sorted(titles)
self.titles_coords, titles_coords_num = self.read_previous_coords()
titles_to_check = [
t for t in self.titles if t not in self.titles_coords]
# with open("titlestodownload", "w") as f:
# f.write("\n".join([t.encode("utf-8") for t in titles_to_check]))
print "Titles:", len(self.titles)
print "checked in the past: {0}, with coordinates {1}".format(
len(self.titles_coords), titles_coords_num)
if len(titles_to_check) == 0:
print ("The coordinates of all the articles have already been "
"downloaded.")
return
print "to be checked:", len(titles_to_check)
# Query Wikpedia for coordinates
self.query_wikipedia(titles_to_check, answer_file)
# Save updated titles' coordinates
self.save_titles_coords()
def read_previous_coords(self):
"""Read the titles whose coordinates were downloaded in the past."""
titles_coords = {}
titles_coords_num = 0
if os.path.isfile(self.coords_file):
lines = [line.rstrip('\n').split("\t")
for line in open(self.coords_file)]
for line in lines:
title = line[0].decode("utf-8").replace(" ", "_")
if len(line) == 1:
line.append("")
line.append("")
lat = line[1]
lon = line[2]
titles_coords[title] = []
if (lat, lon) != ("", ""):
titles_coords_num += 1
titles_coords[title] = [lat, lon]
return titles_coords, titles_coords_num
def query_wikipedia(self, titles_to_check, answer_file):
"""Query Wikipedia API for coordinates."""
# Create titles_strings with 50 titles each to query Wikipedia API
titles_strings = []
for fifty_titles in [titles_to_check[i:i + 50] for i in range(
0, len(titles_to_check), 50)]:
titles_string = "|".join(fifty_titles)
titles_strings.append(titles_string)
print "{0} queries of 50 titles each will be necessay".format(
len(titles_strings))
# Download
print "\n- Download coordinates from Wikipedia"
for i, titles_string in enumerate(titles_strings):
continue_string = ""
cocontinue_string = ""
print "\nrequest: {0}/{1}".format(i + 1, len(titles_strings))
# Debugging
# answer = raw_input("\n Download 50 titles' coordinates "
# "from Wikipedia?\n [y/N]")
answer = "y"
if answer.lower() != "y":
print " Download stopped."
break
while True:
wikipedia_answer = self.download_coordinates(answer_file,
titles_string,
continue_string,
cocontinue_string)
if not wikipedia_answer:
break
# Parsing
continue_string, cocontinue_string = self.parse_answer(
answer_file)
if (continue_string, cocontinue_string) == ("", ""):
break
else:
print "continue", continue_string, cocontinue_string
if not wikipedia_answer:
break
def download_coordinates(self, answer_file, titles_string, continue_string,
cocontinue_string):
"""Query Wikipedia API for articles' coordinates
"""
titles = urllib.quote_plus(
titles_string.replace("_", " ").encode("utf-8"))
url = ('http://{0}.wikipedia.org/w/api.php?action=query'
'&format=json'
'&titles={1}'
'&prop=coordinates'
'&coprimary=primary'
'&maxlag=5'
'&continue='.format(self.wikipedia_lang, titles))
if continue_string != "":
url += '{0}&cocontinue={1}'.format(
urllib.quote_plus(continue_string),
urllib.quote_plus(cocontinue_string))
request = urllib2.Request(url, None, {'User-Agent': self.user_agent})
try:
wikipedia_answer = urllib2.urlopen(request)
except:
print ("\n* a problem occurred during download:\n"
"{0}, {1}, {2}\ncontinue...".format(
titles_string.encode("utf-8"),
continue_string.encode("utf-8"),
cocontinue_string.encode("utf-8")))
return False
else:
with open(answer_file, "w") as f:
f.write(wikipedia_answer.read())
return True
def parse_answer(self, answer_file):
"""Read coordinates from Wikipedia API answer."""
with open(answer_file, "r") as f:
data = json.load(f)
for page in data["query"]["pages"].values():
title = page["title"].replace(" ", "_")
if title not in self.titles_coords:
self.titles_coords[title] = ["", ""]
if "coordinates" in page:
for coords in page["coordinates"]:
self.titles_coords[title] = [coords["lat"], coords["lon"]]
print "{0}/{1} {2} {3}".format(len(self.titles_coords),
len(self.titles),
title.encode("utf-8"),
self.titles_coords[title])
if "continue" in data:
return (data["continue"]["continue"],
data["continue"]["cocontinue"])
else:
return ("", "")
def save_titles_coords(self):
"""Save the updated list of articles with coordinates."""
with open(self.coords_file, "w") as f:
for i, (title, coordinates) in enumerate(
self.titles_coords.iteritems()):
if len(coordinates) == 2:
lat, lon = coordinates
else:
lat, lon = "", ""
f.write("{0}\t{1}\t{2}".format(title.encode("utf-8"),
lat,
lon))
if i < len(self.titles_coords) - 1:
f.write("\n")
if __name__ == "__main__":
user_agent = "Some coordinates download test"
coords_file = "articles_coords_test.csv"
titles = ["Archivio Storico Capitolino",
"Biblioteca Universitaria Alessandrina",
"Biblioteca Vallicelliana",
"Biblioteca apostolica vaticana",
"Biblioteca centrale della Facoltà di Architettura",
"Biblioteca del Ministero degli Affari Esteri",
"Biblioteca dell'Accademia Nazionale dei Lincei e Corsiniana",
"Biblioteca dell'Istituto dell'Enciclopedia Italiana",
"Biblioteca di papa Agapito I",
"Biblioteca di storia moderna e contemporanea",
"Biblioteca e museo teatrale del Burcardo",
"Biblioteca comunale Augusto Tersenghi",
"Biblioteca Civica Centrale",
"Biblioteca Nazionale del Club Alpino Italiano",
"Biblioteca Reale",
"Biblioteca capitolare (Vercelli)",
"Biblioteca civica Italo Calvino",
"Biblioteca civica Luigi Carluccio",
"Biblioteca internazionale di cinema e fotografia Mario Gromo",
"Biblioteca della Libera Università di Bolzano"]
CoordsDownloader(user_agent,
coords_file,
"answer.json",
"it",
[t.decode("utf-8") for t in titles])
print "\nDone."
|
Egypt-brokered deal comes after Israeli fighter jets attack 80 sites in Gaza in retaliation for rockets fired at Israel.
The Palestinian group Islamic Jihad says it has agreed to stop firing rockets towards Israel after talks with an Egyptian delegation.
The announcement on Saturday came hours after Israel launched air raids hitting more than 80 locations in the besieged Gaza Strip after it said rockets were fired from there into southern Israel.
"We announce a ceasefire with Israel after Egyptian contacts with us," Dawoud Shihab, Islamic Jihad spokesperson, told Anadolu Agency.
He added the group "will be committed to the deal as long as Israel is committed to".
Khaled al-Batsh, a top official of the group, told The Associated Press news agency on Saturday that it was grateful to Egypt for brokering the agreement "to restore calm to the Gaza Strip".
"The Egyptians thankfully intervened and exerted huge efforts between the resistance [factions] in Gaza and the Zionist [Israeli] occupation," Batsh said.
"The Egyptian efforts led to understandings to restore calm to the Gaza Strip. We appreciate the Egyptian endeavour and hope for all kinds of [Israeli] aggression to stop."
Israel did not immediately respond, and past ceasefires in the conflict have not always stuck.
Egyptian mediators have been attempting to negotiate a truce between Hamas, which rules the Gaza Strip, and Israel.
The Islamic Jihad movement, which was founded in the late 1970s, is much smaller than Hamas and maintains that Palestine, with its borders marked from the Jordan river to the Mediterranean sea, is Islamic land and it is religiously prohibited to compromise or surrender any of it.
Its military wing, the Al-Quds Brigades, became active during the 1987 Intifada by launching operations against Israeli military and non-military targets.
Earlier on Saturday, an Israeli military spokesperson said 34 rockets had been fired at Israel throughout the night.
Israel's Iron Dome rocket-defence system reportedly intercepted 13 rockets, two of which landed in Gaza and the remainder fell in open spaces in southern Israel.
In response, the Israeli military said it struck about 80 sites across Gaza by early Saturday morning, including a security headquarters building.
Among the targets was Hamas' new headquarters for the General Security Services, the military said.
Health officials in Gaza said nine Palestinians were wounded in one of the Israeli attacks and a hospital was badly damaged.
No Israelis were reported injured from the rockets.
Hamas has disavowed the recent rocket attacks and blamed fringe groups bent on sabotaging United Nations and Egyptian efforts to broker a long-term truce.
But Israel has said it holds Hamas responsible for any rocket fire regardless of who launched it.
Hamas has been pushing for the end of an 11-year siege on Gaza, imposed by Israel and Egypt, that has trapped more than two million Palestinians, and left residents with limited access to water and electricity.
|
"""
inputs:
4 cap sensors on I2C
3 rotary encoders on SPI
output topics:
pitch_key_event - integer from 0 to 47
voice_key_1_position - float from 0.0 to 1.0
voice_key_2_position - float from 0.0 to 1.0
voice_key_3_position - float from 0.0 to 1.0
"""
import Adafruit_MPR121.MPR121 as MPR121
import importlib
import json
import os
import Queue
import random
import settings
import sys
import threading
import time
from thirtybirds_2_0.Network.manager import init as network_init
from thirtybirds_2_0.Network.email_simple import init as email_init
from thirtybirds_2_0.Adaptors.Sensors import AMT203
BASE_PATH = os.path.dirname(os.path.realpath(__file__))
UPPER_PATH = os.path.split(os.path.dirname(os.path.realpath(__file__)))[0]
DEVICES_PATH = "%s/Hosts/" % (BASE_PATH )
THIRTYBIRDS_PATH = "%s/thirtybirds" % (UPPER_PATH )
sys.path.append(BASE_PATH)
sys.path.append(UPPER_PATH)
class Main(threading.Thread):
def __init__(self, hostname):
threading.Thread.__init__(self)
self.hostname = hostname
self.queue = Queue.Queue()
def add_to_queue(self, topic, msg):
self.queue.put([topic, msg])
def run(self):
while True:
topic_msg = self.queue.get(True)
network.send(topic_msg[0], topic_msg[1])
class MPR121Array(threading.Thread):
def __init__(self, i2c_address):
threading.Thread.__init__(self)
position_raw = 0
self.i2c_address = i2c_address
self.capsensors = []
self.last_touched = [0,0,0,0]
for sensor_id in range(4):
self.capsensors.append(MPR121.MPR121())
if not self.capsensors[sensor_id].begin(self.i2c_address[sensor_id]):
print('Error initializing MPR121 @{}'.format(self.i2c_address[sensor_id]))
print repr(self.capsensors[sensor_id])
print "class CapSensorArray instantiated with values", self.i2c_address
def run(self):
print "class CapSensorArray thread started"
for sensor_id in range(4):
self.last_touched[sensor_id] = self.capsensors[sensor_id].touched()
global_position = 1
while True:
for sensor_id in range(4):
current_touched = self.capsensors[sensor_id].touched()
for i in range(12):
pin_bit = 1 << i
if current_touched & pin_bit and not self.last_touched[sensor_id] & pin_bit:
print('{0} touched!'.format(i))
global_position = i + (12 * sensor_id)
if not current_touched & pin_bit and self.last_touched[sensor_id] & pin_bit:
print('{0} released!'.format(i))
self.last_touched[sensor_id] = current_touched
if global_position > 1:
time.sleep(0.01)
main.add_to_queue("pitch_key_event", global_position)
class Key(threading.Thread):
def __init__(self, name, bus, deviceId):
threading.Thread.__init__(self)
self.name = name
self.bus = bus
self.deviceId = deviceId
print "creating amt203 object"
self.encoder = AMT203.AMT203(bus, deviceId)
print "setting zero ", self.bus, self.deviceId
self.encoder.set_zero()
print "after zero ", self.bus, self.deviceId
print "class Key instantiated with values", name, bus, deviceId
self.encoder_min = 0.0
self.encoder_max = 120.0
self.last_pos = 0.0
def run(self):
print "class Key thread started"
while True:
pos = self.encoder.get_position()
if self.last_pos != pos:
mapped_pos = self.map_key(self.name, pos)
main.add_to_queue(self.name, mapped_pos)
self.last_pos = pos
time.sleep(0.01)
def map_key(self, name, value):
value = value if value <= 1000 else 0
value = value if value <= self.encoder_max else self.encoder_max
value = value if value >= self.encoder_min else self.encoder_min
mapped_value = 0.8*(((value - self.encoder_min))/(self.encoder_max - self.encoder_min))
return mapped_value
def network_status_handler(msg):
print "network_status_handler", msg
def network_message_handler(msg):
print "network_message_handler", msg
topic = msg[0]
#host, sensor, data = yaml.safe_load(msg[1])
if topic == "__heartbeat__":
print "heartbeat received", msg
network = None # makin' it global
main = None
def init(HOSTNAME):
global network
global main
network = network_init(
hostname=HOSTNAME,
role="client",
discovery_multicastGroup=settings.discovery_multicastGroup,
discovery_multicastPort=settings.discovery_multicastPort,
discovery_responsePort=settings.discovery_responsePort,
pubsub_pubPort=settings.pubsub_pubPort,
message_callback=network_message_handler,
status_callback=network_status_handler
)
network.subscribe_to_topic("system") # subscribe to all system messages
#network.subscribe_to_topic("sensor_data")
main = Main(HOSTNAME)
main.daemon = True
main.start()
#mpr121array = MPR121Array([0x5a, 0x5b, 0x5c, 0x5d])
#mpr121array.start()
key_0 = Key("voice_key_2_position",0,0)
key_1 = Key("voice_key_1_position",0,1)
#key_2 = Key("voice_key_3_position",1,1)
key_0.daemon = True
key_0.start()
time.sleep(5)
key_1.daemon = True
key_1.start()
time.sleep(5)
#key_2.start()
|
Image Title: Free Standing Bathroom Sink Designs And Ideas In Decor 8. Filename: free-standing-bathroom-sink-designs-and-ideas-in-decor-8.jpg. Image Dimension: 787 x 787 pixels. Images Format: jpg/jpeg. Publisher/Author: Zackary Balistreri. Uploaded Date: Wednesday - May 09th. 2018 16:47:12 PM. Category: Architecture. Image Source: bhg.com.
Tap The Thumbnail Bellow to See Related Gallery of "Free Standing Bathroom Sink Designs And Ideas In Decor 8"
|
import os
import wx
from . import input
import api
import nvwave
import tones
import speech
import ctypes
import braille
import inputCore
import versionInfo
import logging
logger = logging.getLogger('local_machine')
def setSpeechCancelledToFalse():
"""
This function updates the state of speech so that it is aware that future
speech should not be cancelled. In the long term this is a fragile solution
as NVDA does not support modifying the internal state of speech.
"""
if versionInfo.version_year >= 2021:
# workaround as beenCanceled is readonly as of NVDA#12395
speech.speech._speechState.beenCanceled = False
else:
speech.beenCanceled = False
class LocalMachine:
def __init__(self):
self.is_muted = False
self.receiving_braille=False
def play_wave(self, fileName):
"""Instructed by remote machine to play a wave file."""
if self.is_muted:
return
if os.path.exists(fileName):
# ignore async / asynchronous from kwargs:
# playWaveFile should play asynchronously from NVDA remote.
nvwave.playWaveFile(fileName=fileName, asynchronous=True)
def beep(self, hz, length, left, right, **kwargs):
if self.is_muted:
return
tones.beep(hz, length, left, right)
def cancel_speech(self, **kwargs):
if self.is_muted:
return
wx.CallAfter(speech._manager.cancel)
def speak(
self,
sequence,
priority=speech.priorities.Spri.NORMAL,
**kwargs
):
if self.is_muted:
return
setSpeechCancelledToFalse()
wx.CallAfter(speech._manager.speak, sequence, priority)
def display(self, cells, **kwargs):
if self.receiving_braille and braille.handler.displaySize > 0 and len(cells) <= braille.handler.displaySize:
# We use braille.handler._writeCells since this respects thread safe displays and automatically falls back to noBraille if desired
cells = cells + [0] * (braille.handler.displaySize - len(cells))
wx.CallAfter(braille.handler._writeCells, cells)
def braille_input(self, **kwargs):
try:
inputCore.manager.executeGesture(input.BrailleInputGesture(**kwargs))
except inputCore.NoInputGestureAction:
pass
def set_braille_display_size(self, sizes, **kwargs):
sizes.append(braille.handler.display.numCells)
try:
size=min(i for i in sizes if i>0)
except ValueError:
size = braille.handler.display.numCells
braille.handler.displaySize = size
braille.handler.enabled = bool(size)
def send_key(self, vk_code=None, extended=None, pressed=None, **kwargs):
wx.CallAfter(input.send_key, vk_code, None, extended, pressed)
def set_clipboard_text(self, text, **kwargs):
api.copyToClip(text=text)
def send_SAS(self, **kwargs):
ctypes.windll.sas.SendSAS(0)
|
"Tickle" on LP double-row chime tree. Recorded in my closet on Yamaha AW16-G using a cheap Shure mic.
A seamless trap style hip hop loop. Use it for whatever. Just give me some form of credit.
|
import cPickle
import multiprocessing
import os
import logging
import subprocess
import types
import leveldb
import simplejson as json
logger = logging.getLogger('mapreduce')
class MROutput(object):
suffix = 'db'
class Writer(object):
def __init__(self, filename, **kw):
self.filename = filename
def put(self, key, value):
assert False, "Don't use this class directly: use an output like LevelDBOutput"
def flush(self):
pass
def __init__(self, **kw):
self.writer_args = kw
def create_writer(self, prefix, shard_idx, num_shards):
assert prefix, 'No output prefix specified for output'
assert shard_idx < num_shards, 'Invalid shard index (%d > %d)' % (shard_idx, num_shards)
os.system('mkdir -p "%s"' % prefix)
return self.__class__.Writer(
prefix + '/shard-%05d-of-%05d.%s' % (shard_idx, num_shards, self.suffix),
**self.writer_args)
def recommended_shards(self):
return multiprocessing.cpu_count()
def finalize(self, tmp_dir, final_dir):
logger.info('Moving results from %s -> %s', tmp_dir, final_dir)
subprocess.check_output('mv "%s" "%s"' % (tmp_dir, final_dir), shell=True)
class LevelDBOutput(MROutput):
class Writer(MROutput.Writer):
def __init__(self, filename):
self.db = leveldb.LevelDB(filename)
self._last_key = None
def put(self, key, value):
assert isinstance(key, str)
assert key != self._last_key, (
'Duplicate keys (%s) passed to LevelDBOutput.'
'This output does not support multiple keys!' % key
)
self.db.Put(key, cPickle.dumps(value, -1))
class JSONOutput(MROutput):
suffix = 'json'
class Writer(MROutput.Writer):
def __init__(self, filename, **kw):
self.db = {}
self.json_args = kw
self.filename = filename
def put(self, key, value):
assert isinstance(key, str)
self.db[key] = value
def flush(self):
with open(self.filename, 'w') as out_file:
json.dump(self.db, out_file, **self.json_args)
def create_writer(self, prefix, shard_idx, num_shards):
assert num_shards == 1, 'JSONOutput only works with a single output shard!'
return MROutput.create_writer(self, prefix, shard_idx, num_shards)
def recommended_shards(self):
return 1
def finalize(self, tmp_dir, final_dir):
'''
Move the output JSON file to the final location.
There should only be one file -- this will fail if the user specified multiple shards!
'''
import glob
files = glob.glob('%s/*.json' % tmp_dir)
assert len(files) == 1, 'JSONOutput expected one temporary file, got: %s' % files
logger.info('Moving temporary file: %s to final destination: %s', files[0], final_dir)
subprocess.check_output('mv "%s" "%s"' % (files[0], final_dir), shell=True)
class JSONLineOutput(MROutput):
'''
Writes values as JSON, with one value per line.
The result is a single file.
'''
suffix = 'jsonline'
def finalize(self, tmp_dir, final_dir):
os.system('ls "%s"' % tmp_dir)
subprocess.check_output('cat %s/*.jsonline > "%s"' % (tmp_dir, final_dir), shell=True)
class Writer(MROutput.Writer):
def __init__(self, filename):
MROutput.Writer.__init__(self, filename)
self.output_file = open(filename, 'w')
def put(self, key, value):
self.output_file.write(json.dumps(value))
self.output_file.write('\n')
def flush(self):
logger.info('Flushing: %s', self.filename)
self.output_file.close()
class NullOutput(MROutput):
'''
Ignores all outputs and produces no output files.
'''
def finalize(self, tmp_dir, final_dir):
os.system('rm -rf "%s"' % tmp_dir)
class Writer(MROutput.Writer):
def put(self, key, value):
pass
|
trinity --seqType fq --max_memory 100G --left ~/Dp_RNAseq/raw_data/All_reads_R1.fastq --right ~/Dp_RNAseq/raw_data/All_reads_R2.fastq --CPU 8 --output ~/Dp_RNAseq/Trinity/ --quality_trimming_params "LEADING:2 TRAILING:2 MINLEN:25"
"Error, do not understand options: TRAILING:2 MINLEN:25"
what I am doing wrong?. I checked with several examples and my command seems identical to them.
If you want to change parameters follow the command structure above.
Explanation for trimmomatic parameters is here.
thanks, I don't know how that is different from what I did. Using the ILLUMINACLIP and SLIDINGWINDOWS options doesn't change anything, it still give me the same error.
It appears that you have forgotten to include --trimmomatic option in your trinity command to indicate to trinity that you want to run that program.
I did run with the --trimmomatic option in the beginning and it didn't work, trying things I deleted it.
Can you try with following options? Replace $TRIMMOMATIC_DIR with real path to the adapters file on your system.
--trimmomatic --quality_trimming_params "ILLUMINACLIP:$TRIMMOMATIC_DIR/adapters/TruSeq3-PE.fa:2:30:10 TRAILING:2 MINLEN:25"
Thank you so much for your help @genomax2. I did tried that, I don't really need ILLUMINACLIP option because I don't want to use it, my reads were trimmed for adapters at the sequencing center, but seeing that wiring only the two other options wasn't working, I tried writing the command exactly like in most examples out there (with the ILLUMINACLIP option), and yes, I made sure the path to adapters was correct. Didn't work. I decided to run trimmomatic outside trinity and it worked fine, now I am running the assembly, still not knowing what was wrong.
I guess we won't know what the problem was then .. but as long as you got it to work.
|
import json
from mock import patch
from django.test import TestCase
from django.core.urlresolvers import reverse
from rapidsms.backends.vumi.outgoing import VumiBackend
from rapidsms.backends.vumi.forms import VumiForm
from rapidsms.tests.harness import RapidTest, CreateDataMixin
class VumiFormTest(TestCase):
def setUp(self):
self.valid_data = {
"transport_name": "transport",
"in_reply_to": None,
"group": None,
"from_addr": "127.0.0.1:38634",
"message_type": "user_message",
"helper_metadata": {},
"to_addr": "0.0.0.0:8005",
"content": "ping",
"message_version": "20110921",
"transport_type": "telnet",
"timestamp": "2012-07-06 14:08:20.845715",
"transport_metadata": {},
"session_event": "resume",
"message_id": "56047985ceec40da908ca064f2fd59d3"
}
def test_valid_form(self):
"""Form should be valid if GET keys match configuration."""
form = VumiForm(self.valid_data, backend_name='vumi-backend')
self.assertTrue(form.is_valid())
def test_invalid_form(self):
"""Form is invalid if POST keys don't match configuration."""
data = {'invalid-phone': '1112223333', 'invalid-message': 'hi there'}
form = VumiForm(data, backend_name='vumi-backend')
self.assertFalse(form.is_valid())
def test_get_incoming_data(self):
"""get_incoming_data should return matching text and connection."""
form = VumiForm(self.valid_data, backend_name='vumi-backend')
form.is_valid()
incoming_data = form.get_incoming_data()
self.assertEqual(self.valid_data['content'], incoming_data['text'])
self.assertEqual(self.valid_data['from_addr'],
incoming_data['connection'].identity)
self.assertEqual('vumi-backend',
incoming_data['connection'].backend.name)
class VumiViewTest(RapidTest):
urls = 'rapidsms.backends.vumi.urls'
disable_phases = True
def setUp(self):
self.valid_data = {
"transport_name": "transport",
"in_reply_to": None,
"group": None,
"from_addr": "127.0.0.1:38634",
"message_type": "user_message",
"helper_metadata": {},
"to_addr": "0.0.0.0:8005",
"content": "ping",
"message_version": "20110921",
"transport_type": "telnet",
"timestamp": "2012-07-06 14:08:20.845715",
"transport_metadata": {},
"session_event": "resume",
"message_id": "56047985ceec40da908ca064f2fd59d3"
}
def test_valid_response_post(self):
"""HTTP 200 should return if data is valid."""
response = self.client.post(reverse('vumi-backend'),
json.dumps(self.valid_data),
content_type='text/json')
self.assertEqual(response.status_code, 200)
def test_invalid_response(self):
"""HTTP 400 should return if data is invalid."""
data = {'invalid-phone': '1112223333', 'message': 'hi there'}
response = self.client.post(reverse('vumi-backend'), json.dumps(data),
content_type='text/json')
self.assertEqual(response.status_code, 400)
def test_invalid_json(self):
"""HTTP 400 should return if JSON is invalid."""
data = "{bad json, , lala}"
response = self.client.post(reverse('vumi-backend'), data,
content_type='text/json')
self.assertEqual(response.status_code, 400)
def test_valid_post_message(self):
"""Valid POSTs should pass message object to router."""
self.client.post(reverse('vumi-backend'), json.dumps(self.valid_data),
content_type='text/json')
message = self.inbound[0]
self.assertEqual(self.valid_data['content'], message.text)
self.assertEqual(self.valid_data['from_addr'],
message.connection.identity)
self.assertEqual('vumi-backend',
message.connection.backend.name)
def test_blank_message_is_valid(self):
"""Blank messages should be considered valid."""
empty = self.valid_data.copy()
empty.update({'content': ''})
null = self.valid_data.copy()
null.update({'content': None})
no_content = self.valid_data.copy()
del no_content['content']
for blank_msg in [empty, null, no_content]:
self.client.post(reverse('vumi-backend'), json.dumps(blank_msg),
content_type='text/json')
message = self.inbound[0]
self.assertEqual('', message.text)
class VumiSendTest(CreateDataMixin, TestCase):
def test_required_fields(self):
"""Vumi backend requires Gateway URL and credentials."""
self.assertRaises(TypeError, VumiBackend, None, "vumi")
def test_outgoing_keys(self):
"""Vumi requires JSON to include to_addr and content."""
message = self.create_outgoing_message()
config = {"sendsms_url": "http://example.com"}
backend = VumiBackend(None, "vumi", **config)
kwargs = backend.prepare_request(message.id, message.text,
[message.connections[0].identity], {})
self.assertEqual(kwargs['url'], config['sendsms_url'])
data = json.loads(kwargs['data'])
self.assertEqual(data['to_addr'], [message.connections[0].identity])
self.assertEqual(data['content'], message.text)
def test_response_external_id(self):
"""Make sure external_id context is sent to Vumi."""
message = self.create_outgoing_message()
config = {"sendsms_url": "http://example.com"}
backend = VumiBackend(None, "vumi", **config)
kwargs = backend.prepare_request(message.id, message.text,
[message.connections[0].identity],
{'external_id': 'ASDF1234'})
data = json.loads(kwargs['data'])
self.assertEqual("ASDF1234", data['in_reply_to'])
def test_bulk_response_external_id(self):
"""Only single messages should include in_response_to."""
conn1 = self.create_connection()
conn2 = self.create_connection()
config = {"sendsms_url": "http://example.com"}
backend = VumiBackend(None, "vumi", **config)
kwargs = backend.prepare_request("1234", "foo",
[conn1.identity, conn2.identity],
{'external_id': 'ASDF1234'})
data = json.loads(kwargs['data'])
self.assertTrue('in_reply_to' not in data)
def test_message_id_in_metadata(self):
"""Make sure our uuid is sent to Vumi."""
message = self.create_outgoing_message()
config = {"sendsms_url": "http://example.com"}
backend = VumiBackend(None, "vumi", **config)
kwargs = backend.prepare_request(message.id, message.text,
[message.connections[0].identity], {})
data = json.loads(kwargs['data'])
self.assertIn(message.id, data.get('metadata', {}).values())
def test_from_addr_and_endpoint_in_payload(self):
"""Make sure that we include from_addr or endpoint if provided, but only those keys"""
message = self.create_outgoing_message()
config = {"sendsms_url": "http://example.com"}
backend = VumiBackend(None, "vumi", **config)
context = {'from_addr': '5551212',
'endpoint': '12345',
'other': 'not included'}
kwargs = backend.prepare_request(message.id, message.text,
[message.connections[0].identity], context)
data = json.loads(kwargs['data'])
self.assertEqual(context['from_addr'], data['from_addr'])
self.assertEqual(context['endpoint'], data['endpoint'])
self.assertNotIn('other', data)
def test_send(self):
"""Test successful send."""
message = self.create_outgoing_message()
config = {"sendsms_url": "http://example.com"}
backend = VumiBackend(None, "vumi", **config)
kwargs = backend.prepare_request(message.id, message.text,
[message.connections[0].identity], {})
with patch('rapidsms.backends.vumi.outgoing.requests.post') as mock_post:
backend.send(message.id, message.text,
[message.connections[0].identity], {})
mock_post.assert_called_once_with(**kwargs)
def test_auth(self):
"""Vumi backend shold use basic authentication if given user/pass."""
message = self.create_outgoing_message()
config = {"sendsms_url": "http://example.com",
"sendsms_user": "username",
"sendsms_pass": "password"}
backend = VumiBackend(None, "vumi", **config)
kwargs = backend.prepare_request(message.id, message.text,
[message.connections[0].identity], {})
self.assertTrue('auth' in kwargs)
|
find out more about choosing your dissertation topic? Then, you'll clarify the purpose of the study, as well as the research question. Find the right places to look for sources The Internet is a good starting place during the research stage. Pages are glued with a clear acetate front cover and black card back cover with cloth spine. We have a guide that will show you the right direction. Delivery and Shipping Information, shipping Method, delivery Time. Incorporating the finest goatskin leather, handmade marble endpapers and extensive spine detailing and printed on archival quality paper. It's not easy to write the best dissertation. It is a bit more resistant to wear and tear, and if you scan/photocopy pages it will be less likely to have the effect where the "next" page shows through. I couldnt bear the thought of waiting in queues at uni so I decided to get my dissertation bound here and was not disappointed!
Heavier paper has a better feel when you flip pages (think about the d ifference between a high quality hardcover book versus a cheap.
Fill them in with more details you collected through the research stage. You will also expose your assumptions and expectations of the final results. Librarians are really helpful at this point of the project development. Checkout our ways to make a hook for an essay blog post to see some top tips for preparing your dissertation for binding! Step 1: Write a winning dissertation proposal. Why is it a problem for the research, academic, and scientific community you'll belong to?
|
#!/usr/bin/env python
from __future__ import print_function
import os
import sys
import time
# make sure it's running the mincss here and not anything installed
sys.path.insert(0, os.path.dirname(__file__))
from mincss.processor import Processor
def run(args):
options = {'debug': args.verbose}
if args.phantomjs_path:
options['phantomjs'] = args.phantomjs_path
elif args.phantomjs:
options['phantomjs'] = True
p = Processor(**options)
t0 = time.time()
p.process(args.url)
t1 = time.time()
print("TOTAL TIME ", t1 - t0)
for inline in p.inlines:
print("ON", inline.url)
print("AT line", inline.line)
print("BEFORE ".ljust(79, '-'))
print(inline.before)
print("AFTER ".ljust(79, '-'))
print(inline.after)
print()
output_dir = args.outputdir
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
for link in p.links:
print("FOR", link.href)
#print("BEFORE ".ljust(79, '-'))
#print(link.before)
#print("AFTER ".ljust(79, '-'))
#print(link.after)
orig_name = link.href.split('/')[-1]
with open(os.path.join(output_dir, orig_name), 'w') as f:
f.write(link.after)
before_name = 'before_' + link.href.split('/')[-1]
with open(os.path.join(output_dir, before_name), 'w') as f:
f.write(link.before.encode('utf-8'))
print("Files written to", output_dir)
print()
print(
'(from %d to %d saves %d)' %
(len(link.before), len(link.after),
len(link.before) - len(link.after))
)
return 0
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
add = parser.add_argument
add("url", type=str,
help="URL to process")
add("--outputdir", action="store",
default="./output",
help="directory where to put output (default ./output)")
add("-v", "--verbose", action="store_true",
help="increase output verbosity")
add("--phantomjs", action="store_true",
help="Use PhantomJS to download the source")
add("--phantomjs-path", action="store",
default="",
help="Where is the phantomjs executable")
args = parser.parse_args()
sys.exit(run(args))
|
"Liquid Crystal Based Optical Switches" by Yuliya Semenova, S. Dovgalets et al.
Optical switches using two electro-optical effects in liquid crystals are proposed and investigated for incorporation in a switch matrix for optical networks. These two devices employ selective reflection in cholesteric layers and total reflection at the border between glass and nematic liquid crystal. Prototypes of these switches have been designed and their parameters have been investigated. Initial results suggest the switching contrast ratio of 38.5 and 42 dB, respectively, with insertion loss about 2.3…2.9 dB for polarized light.
|
# Copyright 2006-2012 Canonical Ltd. This software is licensed under the
# GNU Affero General Public License version 3 (see the file LICENSE).
"""Bug comment browser view classes."""
__metaclass__ = type
__all__ = [
'BugComment',
'BugCommentBoxExpandedReplyView',
'BugCommentBoxView',
'BugCommentBreadcrumb',
'BugCommentView',
'BugCommentXHTMLRepresentation',
'build_comments_from_chunks',
'group_comments_with_activity',
]
from datetime import timedelta
from itertools import (
chain,
groupby,
)
from operator import itemgetter
from lazr.delegates import delegates
from lazr.restful.interfaces import IWebServiceClientRequest
from zope.component import (
adapts,
getMultiAdapter,
getUtility,
)
from zope.interface import (
implements,
Interface,
)
from zope.security.proxy import removeSecurityProxy
from lp.bugs.interfaces.bugattachment import BugAttachmentType
from lp.bugs.interfaces.bugmessage import IBugComment
from lp.services.comments.browser.comment import download_body
from lp.services.comments.browser.messagecomment import MessageComment
from lp.services.config import config
from lp.services.librarian.browser import ProxiedLibraryFileAlias
from lp.services.messages.interfaces.message import IMessage
from lp.services.propertycache import (
cachedproperty,
get_property_cache,
)
from lp.services.webapp import (
canonical_url,
LaunchpadView,
)
from lp.services.webapp.breadcrumb import Breadcrumb
from lp.services.webapp.interfaces import ILaunchBag
COMMENT_ACTIVITY_GROUPING_WINDOW = timedelta(minutes=5)
def build_comments_from_chunks(
bugtask, truncate=False, slice_info=None, show_spam_controls=False,
user=None, hide_first=False):
"""Build BugComments from MessageChunks.
:param truncate: Perform truncation of large messages.
:param slice_info: If not None, an iterable of slices to retrieve.
"""
chunks = bugtask.bug.getMessagesForView(slice_info=slice_info)
# This would be better as part of indexed_messages eager loading.
comments = {}
for bugmessage, message, chunk in chunks:
cache = get_property_cache(message)
if getattr(cache, 'chunks', None) is None:
cache.chunks = []
cache.chunks.append(removeSecurityProxy(chunk))
bug_comment = comments.get(message.id)
if bug_comment is None:
if bugmessage.index == 0 and hide_first:
display = 'hide'
elif truncate:
display = 'truncate'
else:
display = 'full'
bug_comment = BugComment(
bugmessage.index, message, bugtask,
show_spam_controls=show_spam_controls, user=user,
display=display)
comments[message.id] = bug_comment
# This code path is currently only used from a BugTask view which
# has already loaded all the bug watches. If we start lazy loading
# those, or not needing them we will need to batch lookup watches
# here.
if bugmessage.bugwatchID is not None:
bug_comment.bugwatch = bugmessage.bugwatch
bug_comment.synchronized = (
bugmessage.remote_comment_id is not None)
return comments
def group_comments_with_activity(comments, activities):
"""Group comments and activity together for human consumption.
Generates a stream of comment instances (with the activity grouped within)
or `list`s of grouped activities.
:param comments: An iterable of `BugComment` instances, which should be
sorted by index already.
:param activities: An iterable of `BugActivity` instances.
"""
window = COMMENT_ACTIVITY_GROUPING_WINDOW
comment_kind = "comment"
if comments:
max_index = comments[-1].index + 1
else:
max_index = 0
comments = (
(comment.datecreated, comment.index,
comment.owner, comment_kind, comment)
for comment in comments)
activity_kind = "activity"
activity = (
(activity.datechanged, max_index,
activity.person, activity_kind, activity)
for activity in activities)
# when an action and a comment happen at the same time, the action comes
# second, when two events are tied the comment index is used to
# disambiguate.
events = sorted(chain(comments, activity), key=itemgetter(0, 1, 2))
def gen_event_windows(events):
"""Generate event windows.
Yields `(window_index, kind, event)` tuples, where `window_index` is
an integer, and is incremented each time the windowing conditions are
triggered.
:param events: An iterable of `(date, ignored, actor, kind, event)`
tuples in order.
"""
window_comment, window_actor = None, None
window_index, window_end = 0, None
for date, _, actor, kind, event in events:
window_ended = (
# A window may contain only one comment.
(window_comment is not None and kind is comment_kind) or
# All events must have happened within a given timeframe.
(window_end is None or date >= window_end) or
# All events within the window must belong to the same actor.
(window_actor is None or actor != window_actor))
if window_ended:
window_comment, window_actor = None, actor
window_index, window_end = window_index + 1, date + window
if kind is comment_kind:
window_comment = event
yield window_index, kind, event
event_windows = gen_event_windows(events)
event_windows_grouper = groupby(event_windows, itemgetter(0))
for window_index, window_group in event_windows_grouper:
window_group = [
(kind, event) for (index, kind, event) in window_group]
for kind, event in window_group:
if kind is comment_kind:
window_comment = event
window_comment.activity.extend(
event for (kind, event) in window_group
if kind is activity_kind)
yield window_comment
# There's only one comment per window.
break
else:
yield [event for (kind, event) in window_group]
class BugComment(MessageComment):
"""Data structure that holds all data pertaining to a bug comment.
It keeps track of which index it has in the bug comment list and
also provides functionality to truncate the comment.
Note that although this class is called BugComment it really takes
as an argument a bugtask. The reason for this is to allow
canonical_url()s of BugComments to take you to the correct
(task-specific) location.
"""
implements(IBugComment)
delegates(IMessage, '_message')
def __init__(
self, index, message, bugtask, activity=None,
show_spam_controls=False, user=None, display='full'):
if display == 'truncate':
comment_limit = config.malone.max_comment_size
else:
comment_limit = None
super(BugComment, self).__init__(comment_limit)
self.index = index
self.bugtask = bugtask
self.bugwatch = None
self._message = message
self.display_title = False
self.patches = []
if activity is None:
activity = []
self.activity = activity
self.synchronized = False
# We use a feature flag to control users deleting their own comments.
user_owns_comment = user is not None and user == self.owner
self.show_spam_controls = show_spam_controls or user_owns_comment
self.hide_text = (display == 'hide')
@cachedproperty
def bugattachments(self):
return [attachment for attachment in self._message.bugattachments if
attachment.type != BugAttachmentType.PATCH]
@property
def show_for_admin(self):
"""Show hidden comments for Launchpad admins.
This is used in templates to add a class to hidden
comments to enable display for admins, so the admin
can see the comment even after it is hidden. Since comments
aren't published unless the user is registry or admin, this
can just check if the comment is visible.
"""
return not self.visible
@cachedproperty
def text_for_display(self):
if self.hide_text:
return ''
else:
return super(BugComment, self).text_for_display
def isIdenticalTo(self, other):
"""Compare this BugComment to another and return True if they are
identical.
"""
if self.owner != other.owner:
return False
if self.text_for_display != other.text_for_display:
return False
if self.title != other.title:
return False
if (self.bugattachments or self.patches or other.bugattachments or
other.patches):
# We shouldn't collapse comments which have attachments;
# there's really no possible identity in that case.
return False
return True
def isEmpty(self):
"""Return True if text_for_display is empty."""
return (len(self.text_for_display) == 0 and
len(self.bugattachments) == 0 and len(self.patches) == 0)
@property
def add_comment_url(self):
return canonical_url(self.bugtask, view_name='+addcomment')
@property
def download_url(self):
return canonical_url(self, view_name='+download')
@property
def show_footer(self):
"""Return True if the footer should be shown for this comment."""
return bool(
len(self.activity) > 0 or
self.bugwatch or
self.show_spam_controls)
class BugCommentView(LaunchpadView):
"""View for a single bug comment."""
def __init__(self, context, request):
# We use the current bug task as the context in order to get the
# menu and portlets working.
bugtask = getUtility(ILaunchBag).bugtask
LaunchpadView.__init__(self, bugtask, request)
self.comment = context
def __call__(self):
"""View redirects to +download if comment is too long to render."""
if self.comment.too_long_to_render:
return self.request.response.redirect(self.comment.download_url)
return super(BugCommentView, self).__call__()
def download(self):
return download_body(self.comment, self.request)
@property
def show_spam_controls(self):
return self.comment.show_spam_controls
def page_title(self):
return 'Comment %d for bug %d' % (
self.comment.index, self.context.bug.id)
@property
def page_description(self):
return self.comment.text_contents
@property
def privacy_notice_classes(self):
if not self.context.bug.private:
return 'hidden'
else:
return ''
class BugCommentBoxViewMixin:
"""A class which provides proxied Librarian URLs for bug attachments."""
@property
def show_spam_controls(self):
if hasattr(self.context, 'show_spam_controls'):
return self.context.show_spam_controls
elif (hasattr(self, 'comment') and
hasattr(self.comment, 'show_spam_controls')):
return self.comment.show_spam_controls
else:
return False
def proxiedUrlOfLibraryFileAlias(self, attachment):
"""Return the proxied URL for the Librarian file of the attachment."""
return ProxiedLibraryFileAlias(
attachment.libraryfile, attachment).http_url
class BugCommentBoxView(LaunchpadView, BugCommentBoxViewMixin):
"""Render a comment box with reply field collapsed."""
expand_reply_box = False
class BugCommentBoxExpandedReplyView(LaunchpadView, BugCommentBoxViewMixin):
"""Render a comment box with reply field expanded."""
expand_reply_box = True
class BugCommentXHTMLRepresentation:
adapts(IBugComment, IWebServiceClientRequest)
implements(Interface)
def __init__(self, comment, request):
self.comment = comment
self.request = request
def __call__(self):
"""Render `BugComment` as XHTML using the webservice."""
comment_view = getMultiAdapter(
(self.comment, self.request), name="+box")
return comment_view()
class BugCommentBreadcrumb(Breadcrumb):
"""Breadcrumb for an `IBugComment`."""
def __init__(self, context):
super(BugCommentBreadcrumb, self).__init__(context)
@property
def text(self):
return "Comment #%d" % self.context.index
|
With this hot weather (not that I am complaining) I have been living in dresses. My work is pretty conservative when it comes to the dress code, and I am 5’9’’, so finding nice yet professional dresses can be a challenge. I knew this dress was perfect the second I laid eyes on it. Retimans is one of the six banners belonging to Reitmans Canada Limited, the largest women’s fashion retailer in Canada. I could never find anything that really worked for me at Reitmans.
However, over the last year or so Reitmans has refreshed its look and I was pleasantly surprised how many pieces I actually loved. I picked up this dress along with few other things. The dress is well made, comfortable and even with my height fit perfectly. The lace detail and the colour are a perfect combination.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-01-02 07:04
from __future__ import unicode_literals
from django.db import migrations
from django.db.models import functions, Case, IntegerField, Sum, When
class TranslationActionCodes(object):
NEW = 0 # 'TA' unit translated
EDITED = 1 # 'TE' unit edited after someone else
EDITED_OWN = 2 # 'TX' unit edited after themselves
DELETED = 3 # 'TD' translation deleted by admin
REVIEWED = 4 # 'R' translation reviewed
MARKED_FUZZY = 5 # 'TF' translation’s fuzzy flag is set by admin
EDIT_PENALTY = 6 # 'XE' translation penalty [when translation deleted]
REVIEW_PENALTY = 7 # 'XR' translation penalty [when review canceled]
SUGG_ADDED = 8 # 'S' suggestion added
# 'SA' suggestion accepted (counted towards the suggestion author)
SUGG_ACCEPTED = 9
# 'SR' suggestion rejected (counted towards the suggestion author)
SUGG_REJECTED = 10
# 'RA' suggestion accepted (counted towards the reviewer)
SUGG_REVIEWED_ACCEPTED = 11
# 'RR' suggestion rejected (counted towards the reviewer)
SUGG_REVIEWED_REJECTED = 12
def set_user_scores(apps, schema_editor):
from accounts.models import User
UserTPScore = apps.get_model("pootle_score.UserTPScore")
scorelogs = apps.get_model("pootle_statistics.ScoreLog").objects.exclude(
user__username__in=User.objects.META_USERS)
scorelogs = scorelogs.annotate(
day=functions.TruncDay("creation_time")).values(
"day", "user", "submission__translation_project").annotate(
score=Sum("score_delta"),
suggested=Sum(
Case(
When(
action_code=TranslationActionCodes.SUGG_ADDED,
then='wordcount'),
default=0,
output_field=IntegerField())),
translated=Sum(
Case(
When(
translated_wordcount__isnull=False,
then='translated_wordcount'),
default=0,
output_field=IntegerField())),
reviewed=Sum(
Case(
When(
action_code__in=[
TranslationActionCodes.SUGG_REVIEWED_ACCEPTED,
TranslationActionCodes.REVIEWED,
TranslationActionCodes.EDITED],
translated_wordcount__isnull=True,
then='wordcount'),
default=0,
output_field=IntegerField())))
UserTPScore.objects.bulk_create(
UserTPScore(
date=score["day"],
user_id=score["user"],
tp_id=score["submission__translation_project"],
score=score["score"],
reviewed=score["reviewed"],
suggested=score["suggested"],
translated=score["translated"])
for score in scorelogs.iterator())
class Migration(migrations.Migration):
dependencies = [
('pootle_score', '0001_initial'),
('pootle_statistics', '0005_index_ordering')
]
operations = [
migrations.RunPython(set_user_scores),
]
|
CBP Initiates Inquiries on Nationality of Workers in Supply Chain: International Trade Law Firm | Sandler, Travis & Rosenberg, P.A.
Importers need to act immediately to ensure no producers in their supply chains in any foreign country are employing North Korean nationals as forced laborers to produce goods imported into the United States. U.S. Customs and Border Protection has initiated inquiries to importers enforcing new legislation on this issue and has the authority to detain shipments for admissibility. Goods made with North Korean labor are banned effective Sept. 21, 2017, with the burden of proof on importers to prove either no North Korean nationals were used in the production of the detained goods or, if North Koreans were used, they were not forced to labor.
The new requirement was buried in the recently enacted Countering America’s Adversaries Through Sanctions Act. While CAATSA primarily involved changes to economic sanctions against Iran, Russia, and North Korea, it also contains a provision that prohibits the importation into the U.S. of goods produced in whole or in part by North Korean nationals employed anywhere in the world. The statute places the burden of proof on importers to prove either that no North Korean nationals were involved in the production of the imported goods or that any North Korean nationals employed by the producer of those goods are not forced laborers.
The U.S. government is monitoring media reports of North Koreans working in such varied countries as Poland, Mexico, Angola, Equatorial Guinea, China, and Russia.
ST&R professionals are ready to assist you in acting quickly to implement new compliance measures and initiate immediate reviews of your supply chain. For assistance, please contact Tom Travis at (305) 894-1001, Marilyn-Joy Cerny at (212) 549-0161, or Elise Shibles at (415) 986-1088 x 1403.
|
import sys
import numpy as np
import snap
# Enumerate all of the directed 3 cycles in a graph.
symmetrize = True
def EnumerateDir3Cycles(G, name):
''' Count all directed 3 cycles in the graph.
Returns an array of dictionaries counts such that
counts[w][v] is the number of 3-cycles involving the edge w -> v '''
total = 0
with open(name, 'w') as output:
for u_ in G.Nodes():
u = u_.GetId()
for v in u_.GetOutEdges():
for w in u_.GetInEdges():
if (u == v or v == w or w == u):
continue
if not G.IsEdge(v, w):
continue
# w -> u -> v -> w
output.write('%d %d %d\n' % (w, v, u))
total += 1
if symmetrize:
output.write('%d %d %d\n' % (v, w, u))
total += 1
print 'number of dir. 3 cycles (no reciprocated edges): ', total
def main():
name = sys.argv[1]
out_name = name.split('.txt')[0] + '-d3c.txt'
G = snap.LoadEdgeList(snap.PNGraph, name, 0, 1)
EnumerateDir3Cycles(G, out_name)
if __name__ == '__main__':
main()
|
I have just survived the renovation of an upstairs bathroom. You who have gone through that sort of thing will understand why I use the word “survive.” Any renovation is disruptive, but one involving a bathroom can be particularly awkward. Among other inconveniences was the fact that I had to vacate my bedroom for eight weeks, taking with me to the guest room everything I might need during that time so that my regular room could be sealed off against the bathroom project dust.
Before I go further, I should say that I am grateful for any bathroom at all, and I feel enormous gratitude for the resources that made this renovation possible—including a wonderful contractor with a cadre of skilled carpenters, plumbers, electricians, and tile men. I am also grateful for having a spare room into which I could move. And now that the project is finished, I am delighted with the results, which include better light and more efficient storage space.
Still, the dust, the noise, the workmen tromping in and out, and the lack of access to many of my belongings—those things were disruptive. I sometimes wondered if the end result would be worth the expense and the bother.
But here’s the happy surprise of it: Putting a sustained and focused effort into improving the smallest room in the house has effected changes for the better in almost every other room in the house. A small free-standing cabinet displaced by the renovation ended up in the kitchen, providing much needed storage space there and inspiring a general re-organization of all the kitchen shelves. The loss of bathroom wall space to new built-in cabinetry meant that several pieces of framed needlework required relocation—and that precipitated the reorganization of things hanging in other rooms in the house. Ultimately, I so much enjoyed the simplicity of life in the guest room—with only a couple of pairs of jeans, several turtlenecks and sweaters, and a few other necessities—that before I moved back into my bedroom, I combed its closets and drawers for things to throw out or give away.
I wonder if the current season of Lent can operate in my inner spiritual rooms in a similar way. Has something in me has grown too small, too ineffectual, too cluttered, or too complacent? Do I feel called to toss something out or to give something away? Is there a new habit I long to foster? In any case, a decision to observe Lent means that I commit to a Lenten discipline—a kind of sustained and focused inner “renovation.” I may find the process costly, disruptive, inconvenient, awkward. I may become discouraged by how messy things become before they “improve.” But if I see the project through, I will also find that renovating some small room in my inner house effects healthy changes in other rooms as well. I may live into the happy surprise of Easter—than out of the trials of “renovation” can come the gifts and the grace of resurrection.
|
# -*- coding: utf-8 -*-
#***************************************************************************
#* *
#* Copyright (c) 2015 Pierre Vacher <prrvchr@gmail.com> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
""" Gui workbench initialization """
from __future__ import unicode_literals
class UsbWorkbench(Workbench):
"USB workbench object"
Icon = b"""
/* XPM */
static const char * const usbwb_xpm[]={
"16 16 3 1",
". c None",
"# c #FFFFFF",
"$ c #000000",
"................",
".......$$#......",
"......$$$$#.#...",
"....#..$$#.$$#..",
"...$$#.$$#$$$$#.",
"..$$$$#$$#.$$#..",
"...$$#.$$#.$$#..",
"...$$#.$$#.$$#..",
"...$$#.$$#$$#...",
"...$$#.$$$##....",
"....$$#$$#......",
"......$$$#......",
".......$$##.....",
".....$$$$$$#....",
".....$$$$$$#....",
"................"};
"""
MenuText = "USB"
ToolTip = "Python USB workbench"
def Initialize(self):
from PySide import QtCore
from Gui import Script
from App import DocumentObserver, UsbPool, UsbCommand, TinyG2
Script.initIcons()
commands = [b"Usb_Pool", b"Usb_Refresh", b"Usb_Open", b"Usb_Start", b"Usb_Pause"]
# Add commands to menu and toolbar
self.appendToolbar("Commands for Usb", commands)
self.appendMenu([b"USB"], commands)
App.addDocumentObserver(DocumentObserver.DocumentObserver())
Log('Loading USB workbench... done\n')
def GetClassName(self):
return "Gui::PythonWorkbench"
def Activated(self):
from Gui import PySerialPanel, UsbPoolPanel, TinyG2Panel
Gui.Control.addTaskWatcher([PySerialPanel.TaskWatcher(),
UsbPoolPanel.TaskWatcher(),
TinyG2Panel.TaskWatcher()])
Log("USB workbench activated\n")
def Deactivated(self):
Gui.Control.clearTaskWatcher()
Log("USB workbench deactivated\n")
Gui.addWorkbench(UsbWorkbench())
|
ROAD TOWN, Tortola, VI - VIslanders Trevia and Tynelle Gumbs signed off on Letters of Intent with the University of Central Florida (UCF) in Orlando last week Tuesday November 15, 2016.
ROAD TOWN, Tortola, VI- Twin sisters and Central Arizona College sophomores Tynelle and Trevia Gumbs, ended their Jr College career by grabbing All America honours during the National Jr Colleges Athletics Association Championships in Levelland, Texas.
ROAD TOWN, Tortola, VI- Virgin Islander Tynelle Gumbs shattered the National Junior Colleges Athletic Association Indoor Weight Throw record at the March 4 – 5, 2016 Championships in North Carolina, USA.
ROAD TOWN, Tortola, VI- Long Jumper Chantel E. Malone and field events specialist Eldred Henry 3-peated as the British Virgin Islands Athletics Association's (BVIAA) Senior Female and Male Athletes of the Year 2015, when the BVIAA 7th Athletes Awards Gala presented by Sol was held on Tuesday evening December 29, 2015 at The Moorings’ Mariner Inn on Wickham's Cay II.
ROAD TOWN, Tortola, VI – As action began at the Pan Am Junior Track and Field Championships in Edmonton, Canada on Friday July 31, 2015, athletes from the territory had a mixed bag of results.
ROAD TOWN, Tortola, VI - Central Arizona College sophomore and Virgin Islander, Eldred Henry not only repeated as National Jr Colleges Athletics Association Championships' Discus champion, but added the Shot Put crown as well, while Trevia Gumbs claimed Shot Put silverware, during the competition held in Hutchinson, Kansas at the weekend.
ROAD TOWN, Tortola, VI - Central Arizona College freshman Trevia Gumbs erased twin sister Tynelle's Discus Throw national standard after improving her own Shot Put mark, in the Arizona Jr Colleges Region I Championships, held in Mesa, last weekend.
ROAD TOWN, Tortola, VI - Trevia Gumbs, who missed the podium last year, won the (British) Virgin Island’s first medal at the 44th Carifta Games on Sunday morning, April 5, 2015 in the Kim Collins Athletic Stadium, while Jonel Lacey picked up bronze in the 400m Intermediate Hurdles during the afternoon session.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.